From 51621f462afe704c2368ca84d27c8c7b7269766e Mon Sep 17 00:00:00 2001
From: C4 Patino
Date: Tue, 19 Aug 2025 16:36:45 -0500
Subject: [PATCH 01/66] core(datetime): added implementation of ceiling
modifier to datetime
---
core/functions/datetime.rs | 198 ++++++++++++++++++-------
testing/scalar-functions-datetime.test | 62 +++++++-
2 files changed, 206 insertions(+), 54 deletions(-)
diff --git a/core/functions/datetime.rs b/core/functions/datetime.rs
index a41dad656..2d3b4896d 100644
--- a/core/functions/datetime.rs
+++ b/core/functions/datetime.rs
@@ -62,24 +62,35 @@ fn exec_datetime(values: &[Register], output_type: DateTimeOutput) -> Value {
}
fn modify_dt(dt: &mut NaiveDateTime, mods: &[Register], output_type: DateTimeOutput) -> Value {
+ let mut n_floor: i64 = 0;
let mut subsec_requested = false;
-
for modifier in mods {
if let Value::Text(ref text_rc) = modifier.get_value() {
// TODO: to prevent double conversion and properly support 'utc'/'localtime', we also
// need to keep track of the current timezone and apply it to the modifier.
- match apply_modifier(dt, text_rc.as_str()) {
+ let parsed = parse_modifier(text_rc.as_str());
+ if !matches!(parsed, Ok(Modifier::Floor) | Ok(Modifier::Ceiling)) {
+ n_floor = 0;
+ }
+
+ match apply_modifier(dt, text_rc.as_str(), &mut n_floor) {
Ok(true) => subsec_requested = true,
Ok(false) => {}
Err(_) => return Value::build_text(""),
}
+
+ if matches!(parsed, Ok(Modifier::Floor) | Ok(Modifier::Ceiling)) {
+ n_floor = 0;
+ }
} else {
return Value::build_text("");
}
}
+
if is_leap_second(dt) || *dt > get_max_datetime_exclusive() {
return Value::build_text("");
}
+
format_dt(*dt, output_type, subsec_requested)
}
@@ -95,7 +106,7 @@ fn format_dt(dt: NaiveDateTime, output_type: DateTimeOutput, subsec: bool) -> Va
Value::from_text(t.as_str())
}
DateTimeOutput::DateTime => {
- let t = if subsec {
+ let t = if subsec && dt.nanosecond() != 0 {
dt.format("%Y-%m-%d %H:%M:%S%.3f").to_string()
} else {
dt.format("%Y-%m-%d %H:%M:%S").to_string()
@@ -134,9 +145,7 @@ fn strftime_format(dt: &NaiveDateTime, format_str: &str) -> String {
}
}
-// to prevent stripping the modifier string and comparing multiple times, this returns
-// whether the modifier was a subsec modifier because it impacts the format string
-fn apply_modifier(dt: &mut NaiveDateTime, modifier: &str) -> Result {
+fn apply_modifier(dt: &mut NaiveDateTime, modifier: &str, n_floor: &mut i64) -> Result {
let parsed_modifier = parse_modifier(modifier)?;
match parsed_modifier {
@@ -148,10 +157,10 @@ fn apply_modifier(dt: &mut NaiveDateTime, modifier: &str) -> Result {
// Convert months to years + leftover months
let years = m / 12;
let leftover = m % 12;
- add_years_and_months(dt, years, leftover)?;
+ add_years_and_months(dt, years, leftover, n_floor)?;
}
Modifier::Years(y) => {
- add_years_and_months(dt, y, 0)?;
+ add_years_and_months(dt, y, 0, n_floor)?;
}
Modifier::TimeOffset(offset) => *dt += offset,
Modifier::DateOffset {
@@ -159,9 +168,7 @@ fn apply_modifier(dt: &mut NaiveDateTime, modifier: &str) -> Result {
months,
days,
} => {
- *dt = dt
- .checked_add_months(chrono::Months::new((years * 12 + months) as u32))
- .ok_or_else(|| InvalidModifier("Invalid date offset".to_string()))?;
+ add_years_and_months(dt, years, months, n_floor)?;
*dt += TimeDelta::days(days as i64);
}
Modifier::DateTimeOffset {
@@ -170,12 +177,20 @@ fn apply_modifier(dt: &mut NaiveDateTime, modifier: &str) -> Result {
days,
seconds,
} => {
- add_years_and_months(dt, years, months)?;
+ add_years_and_months(dt, years, months, n_floor)?;
*dt += chrono::Duration::days(days as i64);
*dt += chrono::Duration::seconds(seconds.into());
}
- Modifier::Ceiling => todo!(),
- Modifier::Floor => todo!(),
+ Modifier::Floor => {
+ if *n_floor <= 0 {
+ return Ok(false);
+ }
+
+ *dt -= TimeDelta::days(*n_floor);
+ }
+ Modifier::Ceiling => {
+ *n_floor = 0;
+ }
Modifier::StartOfMonth => {
*dt = NaiveDate::from_ymd_opt(dt.year(), dt.month(), 1)
.unwrap()
@@ -222,16 +237,22 @@ fn is_julian_day_value(value: f64) -> bool {
(0.0..5373484.5).contains(&value)
}
-fn add_years_and_months(dt: &mut NaiveDateTime, years: i32, months: i32) -> Result<()> {
- add_whole_years(dt, years)?;
- add_months_in_increments(dt, months)?;
+fn add_years_and_months(
+ dt: &mut NaiveDateTime,
+ years: i32,
+ months: i32,
+ n_floor: &mut i64,
+) -> Result<()> {
+ add_whole_years(dt, years, n_floor)?;
+ add_months_in_increments(dt, months, n_floor)?;
Ok(())
}
-fn add_whole_years(dt: &mut NaiveDateTime, years: i32) -> Result<()> {
+fn add_whole_years(dt: &mut NaiveDateTime, years: i32, n_floor: &mut i64) -> Result<()> {
if years == 0 {
return Ok(());
}
+
let target_year = dt.year() + years;
let (m, d, hh, mm, ss) = (dt.month(), dt.day(), dt.hour(), dt.minute(), dt.second());
@@ -255,16 +276,17 @@ fn add_whole_years(dt: &mut NaiveDateTime, years: i32) -> Result<()> {
.ok_or_else(|| InvalidModifier("Invalid time format".to_string()))?;
*dt = base_date + chrono::Duration::days(leftover as i64);
+ *n_floor += leftover as i64;
} else {
// do we fall back here?
}
Ok(())
}
-fn add_months_in_increments(dt: &mut NaiveDateTime, months: i32) -> Result<()> {
+fn add_months_in_increments(dt: &mut NaiveDateTime, months: i32, n_floor: &mut i64) -> Result<()> {
let step = if months >= 0 { 1 } else { -1 };
for _ in 0..months.abs() {
- add_one_month(dt, step)?;
+ add_one_month(dt, step, n_floor)?;
}
Ok(())
}
@@ -275,7 +297,7 @@ fn add_months_in_increments(dt: &mut NaiveDateTime, months: i32) -> Result<()> {
//
// the modifiers 'ceiling' and 'floor' will determine behavior, so we'll need to eagerly
// evaluate modifiers in the future to support those, and 'julianday'/'unixepoch'
-fn add_one_month(dt: &mut NaiveDateTime, step: i32) -> Result<()> {
+fn add_one_month(dt: &mut NaiveDateTime, step: i32, n_floor: &mut i64) -> Result<()> {
let (y0, m0, d0) = (dt.year(), dt.month(), dt.day());
let (hh, mm, ss) = (dt.hour(), dt.minute(), dt.second());
@@ -304,6 +326,7 @@ fn add_one_month(dt: &mut NaiveDateTime, step: i32) -> Result<()> {
.ok_or_else(|| InvalidModifier("Invalid Auto format".to_string()))?;
*dt = base_date + chrono::Duration::days(leftover as i64);
+ *n_floor += leftover as i64;
}
Ok(())
}
@@ -1143,6 +1166,12 @@ mod tests {
assert_eq!(parse_modifier("WEEKDAY 6").unwrap(), Modifier::Weekday(6));
}
+ #[test]
+ fn test_parse_ceiling_modifier() {
+ assert_eq!(parse_modifier("ceiling").unwrap(), Modifier::Ceiling);
+ assert_eq!(parse_modifier("CEILING").unwrap(), Modifier::Ceiling);
+ }
+
#[test]
fn test_parse_other_modifiers() {
assert_eq!(parse_modifier("unixepoch").unwrap(), Modifier::UnixEpoch);
@@ -1191,89 +1220,106 @@ mod tests {
#[test]
fn test_apply_modifier_days() {
let mut dt = setup_datetime();
- apply_modifier(&mut dt, "5 days").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "5 days", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 20, 12, 30, 45));
dt = setup_datetime();
- apply_modifier(&mut dt, "-3 days").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "-3 days", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 12, 12, 30, 45));
}
#[test]
fn test_apply_modifier_hours() {
let mut dt = setup_datetime();
- apply_modifier(&mut dt, "6 hours").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "6 hours", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 15, 18, 30, 45));
dt = setup_datetime();
- apply_modifier(&mut dt, "-2 hours").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "-2 hours", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 15, 10, 30, 45));
}
#[test]
fn test_apply_modifier_minutes() {
let mut dt = setup_datetime();
- apply_modifier(&mut dt, "45 minutes").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "45 minutes", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 15, 13, 15, 45));
dt = setup_datetime();
- apply_modifier(&mut dt, "-15 minutes").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "-15 minutes", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 15, 12, 15, 45));
}
#[test]
fn test_apply_modifier_seconds() {
let mut dt = setup_datetime();
- apply_modifier(&mut dt, "30 seconds").unwrap();
+
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "30 seconds", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 15, 12, 31, 15));
dt = setup_datetime();
- apply_modifier(&mut dt, "-20 seconds").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "-20 seconds", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 15, 12, 30, 25));
}
#[test]
fn test_apply_modifier_time_offset() {
let mut dt = setup_datetime();
- apply_modifier(&mut dt, "+01:30").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "+01:30", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 15, 14, 0, 45));
dt = setup_datetime();
- apply_modifier(&mut dt, "-00:45").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "-00:45", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 15, 11, 45, 45));
}
#[test]
fn test_apply_modifier_date_time_offset() {
let mut dt = setup_datetime();
- apply_modifier(&mut dt, "+0001-01-01 01:01").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "+0001-01-01 01:01", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2024, 7, 16, 13, 31, 45));
dt = setup_datetime();
- apply_modifier(&mut dt, "-0001-01-01 01:01").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "-0001-01-01 01:01", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2022, 5, 14, 11, 29, 45));
// Test with larger offsets
dt = setup_datetime();
- apply_modifier(&mut dt, "+0002-03-04 05:06").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "+0002-03-04 05:06", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2025, 9, 19, 17, 36, 45));
dt = setup_datetime();
- apply_modifier(&mut dt, "-0002-03-04 05:06").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "-0002-03-04 05:06", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2021, 3, 11, 7, 24, 45));
}
#[test]
fn test_apply_modifier_start_of_year() {
let mut dt = setup_datetime();
- apply_modifier(&mut dt, "start of year").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "start of year", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 1, 1, 0, 0, 0));
}
#[test]
fn test_apply_modifier_start_of_day() {
let mut dt = setup_datetime();
- apply_modifier(&mut dt, "start of day").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "start of day", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 15, 0, 0, 0));
}
@@ -1452,7 +1498,8 @@ mod tests {
fn test_already_on_weekday_no_change() {
// 2023-01-01 is a Sunday => weekday 0
let mut dt = create_datetime(2023, 1, 1, 12, 0, 0);
- apply_modifier(&mut dt, "weekday 0").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "weekday 0", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 1, 1, 12, 0, 0));
assert_eq!(weekday_sunday_based(&dt), 0);
}
@@ -1462,14 +1509,16 @@ mod tests {
// 2023-01-01 is a Sunday => weekday 0
// "weekday 1" => next Monday => 2023-01-02
let mut dt = create_datetime(2023, 1, 1, 12, 0, 0);
- apply_modifier(&mut dt, "weekday 1").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "weekday 1", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 1, 2, 12, 0, 0));
assert_eq!(weekday_sunday_based(&dt), 1);
// 2023-01-03 is a Tuesday => weekday 2
// "weekday 5" => next Friday => 2023-01-06
let mut dt = create_datetime(2023, 1, 3, 12, 0, 0);
- apply_modifier(&mut dt, "weekday 5").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "weekday 5", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 1, 6, 12, 0, 0));
assert_eq!(weekday_sunday_based(&dt), 5);
}
@@ -1479,12 +1528,13 @@ mod tests {
// 2023-01-06 is a Friday => weekday 5
// "weekday 0" => next Sunday => 2023-01-08
let mut dt = create_datetime(2023, 1, 6, 12, 0, 0);
- apply_modifier(&mut dt, "weekday 0").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "weekday 0", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 1, 8, 12, 0, 0));
assert_eq!(weekday_sunday_based(&dt), 0);
// Now confirm that being on Sunday (weekday 0) and asking for "weekday 0" stays put
- apply_modifier(&mut dt, "weekday 0").unwrap();
+ apply_modifier(&mut dt, "weekday 0", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 1, 8, 12, 0, 0));
assert_eq!(weekday_sunday_based(&dt), 0);
}
@@ -1494,7 +1544,8 @@ mod tests {
// 2023-01-05 is a Thursday => weekday 4
// Asking for weekday 4 => no change
let mut dt = create_datetime(2023, 1, 5, 12, 0, 0);
- apply_modifier(&mut dt, "weekday 4").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "weekday 4", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 1, 5, 12, 0, 0));
assert_eq!(weekday_sunday_based(&dt), 4);
}
@@ -1504,7 +1555,8 @@ mod tests {
// 2023-01-06 is a Friday => weekday 5
// Asking for weekday 5 => no change if already on Friday
let mut dt = create_datetime(2023, 1, 6, 12, 0, 0);
- apply_modifier(&mut dt, "weekday 5").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "weekday 5", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 1, 6, 12, 0, 0));
assert_eq!(weekday_sunday_based(&dt), 5);
}
@@ -1526,7 +1578,8 @@ mod tests {
#[test]
fn test_apply_modifier_start_of_month() {
let mut dt = create_datetime(2023, 6, 15, 12, 30, 45);
- apply_modifier(&mut dt, "start of month").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "start of month", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 1, 0, 0, 0));
}
@@ -1535,15 +1588,48 @@ mod tests {
let mut dt = create_datetime(2023, 6, 15, 12, 30, 45);
let dt_with_nanos = dt.with_nanosecond(123_456_789).unwrap();
dt = dt_with_nanos;
- apply_modifier(&mut dt, "subsec").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "subsec", &mut n_floor).unwrap();
assert_eq!(dt, dt_with_nanos);
}
+ #[test]
+ fn test_apply_modifier_floor_modifier_n_floor_gt_0() {
+ let mut dt = create_datetime(2023, 6, 15, 12, 30, 45);
+ let mut n_floor = 3;
+
+ apply_modifier(&mut dt, "floor", &mut n_floor).unwrap();
+ assert_eq!(dt, create_datetime(2023, 6, 12, 12, 30, 45));
+ }
+
+ #[test]
+ fn test_apply_modifier_floor_modifier_n_floor_le_0() {
+ let mut dt = create_datetime(2023, 6, 15, 12, 30, 45);
+ let mut n_floor = 0;
+
+ apply_modifier(&mut dt, "floor", &mut n_floor).unwrap();
+ assert_eq!(dt, create_datetime(2023, 6, 15, 12, 30, 45));
+
+ n_floor = 2;
+ apply_modifier(&mut dt, "floor", &mut n_floor).unwrap();
+ assert_eq!(dt, create_datetime(2023, 6, 13, 12, 30, 45));
+ }
+
+ #[test]
+ fn test_apply_modifier_ceiling_modifier_sets_n_floor_to_zero() {
+ let mut dt = create_datetime(2023, 6, 15, 12, 30, 45);
+ let mut n_floor = 5;
+
+ apply_modifier(&mut dt, "ceiling", &mut n_floor).unwrap();
+ assert_eq!(n_floor, 0);
+ }
+
#[test]
fn test_apply_modifier_start_of_month_basic() {
// Basic check: from mid-month to the 1st at 00:00:00.
let mut dt = create_datetime(2023, 6, 15, 12, 30, 45);
- apply_modifier(&mut dt, "start of month").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "start of month", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 1, 0, 0, 0));
}
@@ -1551,7 +1637,8 @@ mod tests {
fn test_apply_modifier_start_of_month_already_at_first() {
// If we're already at the start of the month, no change.
let mut dt = create_datetime(2023, 6, 1, 0, 0, 0);
- apply_modifier(&mut dt, "start of month").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "start of month", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 6, 1, 0, 0, 0));
}
@@ -1559,7 +1646,8 @@ mod tests {
fn test_apply_modifier_start_of_month_edge_case() {
// edge case: month boundary. 2023-07-31 -> start of July.
let mut dt = create_datetime(2023, 7, 31, 23, 59, 59);
- apply_modifier(&mut dt, "start of month").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "start of month", &mut n_floor).unwrap();
assert_eq!(dt, create_datetime(2023, 7, 1, 0, 0, 0));
}
@@ -1568,7 +1656,8 @@ mod tests {
let mut dt = create_datetime(2023, 6, 15, 12, 30, 45);
let dt_with_nanos = dt.with_nanosecond(123_456_789).unwrap();
dt = dt_with_nanos;
- apply_modifier(&mut dt, "subsec").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "subsec", &mut n_floor).unwrap();
assert_eq!(dt, dt_with_nanos);
}
@@ -1577,7 +1666,8 @@ mod tests {
let mut dt = create_datetime(2025, 1, 2, 4, 12, 21)
.with_nanosecond(891_000_000) // 891 milliseconds
.unwrap();
- apply_modifier(&mut dt, "subsec").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "subsec", &mut n_floor).unwrap();
let formatted = dt.format("%Y-%m-%d %H:%M:%S%.3f").to_string();
assert_eq!(formatted, "2025-01-02 04:12:21.891");
@@ -1586,7 +1676,8 @@ mod tests {
#[test]
fn test_apply_modifier_subsec_no_fractional_seconds() {
let mut dt = create_datetime(2025, 1, 2, 4, 12, 21);
- apply_modifier(&mut dt, "subsec").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "subsec", &mut n_floor).unwrap();
let formatted = dt.format("%Y-%m-%d %H:%M:%S%.3f").to_string();
assert_eq!(formatted, "2025-01-02 04:12:21.000");
@@ -1597,7 +1688,8 @@ mod tests {
let mut dt = create_datetime(2025, 1, 2, 4, 12, 21)
.with_nanosecond(891_123_456)
.unwrap();
- apply_modifier(&mut dt, "subsec").unwrap();
+ let mut n_floor = 0;
+ apply_modifier(&mut dt, "subsec", &mut n_floor).unwrap();
let formatted = dt.format("%Y-%m-%d %H:%M:%S%.3f").to_string();
assert_eq!(formatted, "2025-01-02 04:12:21.891");
diff --git a/testing/scalar-functions-datetime.test b/testing/scalar-functions-datetime.test
index 33caf52c2..4a3e4888c 100755
--- a/testing/scalar-functions-datetime.test
+++ b/testing/scalar-functions-datetime.test
@@ -251,6 +251,46 @@ do_execsql_test date-with-modifier-add-months {
SELECT date('2023-05-18', '+2 months');
} {2023-07-18}
+do_execsql_test datetime-default-ceiling {
+ SELECT date('2024-01-31', '+1 month'); -- default ceiling
+} {2024-03-02}
+
+do_execsql_test datetime-floor-keeps-time {
+ SELECT datetime('2024-01-31 10:20:30', '+1 month', 'floor');
+} {{2024-02-29 10:20:30}}
+
+do_execsql_test datetime-ceiling-keeps-time {
+ SELECT datetime('2024-01-31 10:20:30', '+1 month', 'ceiling');
+} {{2024-03-02 10:20:30}}
+
+do_execsql_test date-ceiling-floor-2 {
+ SELECT date('2024-01-31', '+1 month', 'floor');
+} {2024-02-29}
+
+do_execsql_test date-ceiling-floor-3 {
+ SELECT date('2023-01-31', '+1 month', 'floor');
+} {2023-02-28}
+
+do_execsql_test date-ceiling-floor-4 {
+ SELECT date('2024-03-31', '-1 month', 'floor');
+} {2024-02-29}
+
+do_execsql_test date-ceiling-floor-5 {
+ SELECT date('2024-01-31', '+1 month', '+1 month', 'floor');
+} {2024-04-02}
+
+do_execsql_test date-ceiling-floor-6 {
+ SELECT date('2024-01-31', '+1 month', 'ceiling');
+} {2024-03-02}
+
+do_execsql_test date-ceiling-floor-7 {
+ SELECT date('2024-01-31', '+1 month', 'floor', 'ceiling');
+} {2024-02-29}
+
+do_execsql_test date-ceiling-floor-8 {
+ SELECT date('2024-01-31', '+1 month', '+1 day', 'floor');
+} {2024-03-03}
+
do_execsql_test date-with-modifier-subtract-months {
SELECT date('2023-05-18', '-3 months');
} {2023-02-18}
@@ -371,6 +411,26 @@ do_execsql_test datetime-with-multiple-modifiers {
select datetime('2024-01-31', '+1 month', '+13 hours', '+5 minutes', '+62 seconds');
} {{2024-03-02 13:06:02}}
+do_execsql_test datetime-with-modifier-ceiling {
+ SELECT datetime('2023-05-18 15:30:45', 'ceiling');
+} {{2023-05-18 15:30:45}}
+
+do_execsql_test datetime-with-modifier-ceiling-already-ceiled {
+ SELECT datetime('2023-05-18 23:59:59', 'ceiling');
+} {{2023-05-18 23:59:59}}
+
+do_execsql_test datetime-with-ceiling-modifier-invalid-input {
+ SELECT datetime('not-a-date', 'ceiling');
+} {{}}
+
+do_execsql_test datetime-with-ceiling-modifier-stacked {
+ SELECT datetime('2023-05-18 15:30:45', '+1 day', 'ceiling');
+} {{2023-05-19 15:30:45}}
+
+do_execsql_test date-with-ceiling-modifier-basic {
+ SELECT date('2023-05-18 15:30:45', 'ceiling');
+} {2023-05-18}
+
do_execsql_test datetime-with-weekday {
SELECT datetime('2023-05-18', 'weekday 3');
} {{2023-05-24 00:00:00}}
@@ -666,4 +726,4 @@ do_execsql_test timediff-julian-day {
do_execsql_test timediff-different-time-formats {
SELECT timediff('23:59:59', '00:00:00');
-} {"+0000-00-00 23:59:59.000"}
\ No newline at end of file
+} {"+0000-00-00 23:59:59.000"}
From 9bda8971189ccdd78ac985cf9100005adefa8840 Mon Sep 17 00:00:00 2001
From: C4 Patino
Date: Tue, 19 Aug 2025 20:17:46 -0500
Subject: [PATCH 02/66] chore: marked datetime ceiling modifier as completed in
COMPAT.md
---
COMPAT.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/COMPAT.md b/COMPAT.md
index a81641055..06c6f9fc5 100644
--- a/COMPAT.md
+++ b/COMPAT.md
@@ -343,7 +343,7 @@ Modifiers:
| TimeOffset | Yes | |
| DateOffset | Yes | |
| DateTimeOffset | Yes | |
-| Ceiling | No | |
+| Ceiling | Yes | |
| Floor | No | |
| StartOfMonth | Yes | |
| StartOfYear | Yes | |
From b6e64587cb2bdab6c83a866bb1e341a1f1ce64d9 Mon Sep 17 00:00:00 2001
From: themixednuts
Date: Wed, 27 Aug 2025 23:49:59 -0500
Subject: [PATCH 03/66] fix: jsonb functions to check if binary is json string
chore: match sqlite error
chore: use existing slice variable
add better parsing logic, and validation
see sqlite source code @ sqlite/src/json.c -> static int jsonArgIsJsonb
chore: clippy
---
core/json/jsonb.rs | 113 ++++++++++++++++++++++++++++++++++++++++++++-
core/json/mod.rs | 53 ++++++++++++++++-----
testing/json.test | 29 +++++++++++-
3 files changed, 181 insertions(+), 14 deletions(-)
diff --git a/core/json/jsonb.rs b/core/json/jsonb.rs
index c4c95aac0..ee3f34cef 100644
--- a/core/json/jsonb.rs
+++ b/core/json/jsonb.rs
@@ -1,4 +1,5 @@
use crate::json::error::{Error as PError, Result as PResult};
+use crate::json::Conv;
use crate::{bail_parse_error, LimboError, Result};
use std::{
borrow::Cow,
@@ -742,7 +743,15 @@ impl JsonbHeader {
Self(ElementType::OBJECT, 0)
}
- fn from_slice(cursor: usize, slice: &[u8]) -> Result<(Self, usize)> {
+ pub(super) fn element_type(&self) -> ElementType {
+ self.0
+ }
+
+ pub(super) fn payload_size(&self) -> PayloadSize {
+ self.1
+ }
+
+ pub(super) fn from_slice(cursor: usize, slice: &[u8]) -> Result<(Self, usize)> {
match slice.get(cursor) {
Some(header_byte) => {
// Extract first 4 bits (values 0-15)
@@ -909,6 +918,96 @@ impl Jsonb {
}
}
+ pub fn is_valid(&self) -> bool {
+ self.validate_element(0, self.data.len(), 0).is_ok()
+ }
+
+ fn validate_element(&self, start: usize, end: usize, depth: usize) -> Result<()> {
+ if depth > MAX_JSON_DEPTH {
+ bail_parse_error!("Too deep");
+ }
+
+ if start >= end {
+ bail_parse_error!("Empty element");
+ }
+
+ let (header, header_offset) = self.read_header(start)?;
+ let payload_start = start + header_offset;
+ let payload_size = header.payload_size();
+ let payload_end = payload_start + payload_size;
+
+ if payload_end != end {
+ bail_parse_error!("Size mismatch");
+ }
+
+ match header.element_type() {
+ ElementType::NULL | ElementType::TRUE | ElementType::FALSE => {
+ if payload_size == 0 {
+ Ok(())
+ } else {
+ bail_parse_error!("Invalid payload for primitive")
+ }
+ }
+ ElementType::INT | ElementType::INT5 | ElementType::FLOAT | ElementType::FLOAT5 => {
+ if payload_size > 0 {
+ Ok(())
+ } else {
+ bail_parse_error!("Empty number payload")
+ }
+ }
+ ElementType::TEXT | ElementType::TEXTJ | ElementType::TEXT5 | ElementType::TEXTRAW => {
+ let payload = &self.data[payload_start..payload_end];
+ std::str::from_utf8(payload).map_err(|_| {
+ LimboError::ParseError("Invalid UTF-8 in text payload".to_string())
+ })?;
+ Ok(())
+ }
+ ElementType::ARRAY => {
+ let mut pos = payload_start;
+ while pos < payload_end {
+ if pos >= self.data.len() {
+ bail_parse_error!("Array element out of bounds");
+ }
+ let (elem_header, elem_header_size) = self.read_header(pos)?;
+ let elem_end = pos + elem_header_size + elem_header.payload_size();
+ if elem_end > payload_end {
+ bail_parse_error!("Array element exceeds bounds");
+ }
+ self.validate_element(pos, elem_end, depth + 1)?;
+ pos = elem_end;
+ }
+ Ok(())
+ }
+ ElementType::OBJECT => {
+ let mut pos = payload_start;
+ let mut count = 0;
+ while pos < payload_end {
+ if pos >= self.data.len() {
+ bail_parse_error!("Object element out of bounds");
+ }
+ let (elem_header, elem_header_size) = self.read_header(pos)?;
+ if count % 2 == 0 && !elem_header.element_type().is_valid_key() {
+ bail_parse_error!("Object key must be text");
+ }
+
+ let elem_end = pos + elem_header_size + elem_header.payload_size();
+ if elem_end > payload_end {
+ bail_parse_error!("Object element exceeds bounds");
+ }
+ self.validate_element(pos, elem_end, depth + 1)?;
+ pos = elem_end;
+ count += 1;
+ }
+
+ if count % 2 != 0 {
+ bail_parse_error!("Object must have even number of elements");
+ }
+ Ok(())
+ }
+ _ => bail_parse_error!("Invalid element type"),
+ }
+ }
+
#[expect(clippy::inherent_to_string)]
pub fn to_string(&self) -> String {
let mut result = String::with_capacity(self.data.len() * 2);
@@ -2158,6 +2257,18 @@ impl Jsonb {
Ok(result)
}
+ pub fn from_str_with_mode(input: &str, mode: Conv) -> PResult {
+ // Parse directly as JSON if it's already JSON subtype or strict mode is on
+ if matches!(mode, Conv::ToString) {
+ let mut str = input.replace('"', "\\\"");
+ str.insert(0, '"');
+ str.push('"');
+ Jsonb::from_str(&str)
+ } else {
+ Jsonb::from_str(input)
+ }
+ }
+
pub fn from_raw_data(data: &[u8]) -> Self {
Self::new(data.len(), Some(data))
}
diff --git a/core/json/mod.rs b/core/json/mod.rs
index caa1b28a0..663778c34 100644
--- a/core/json/mod.rs
+++ b/core/json/mod.rs
@@ -117,24 +117,20 @@ pub fn convert_dbtype_to_jsonb(val: &Value, strict: Conv) -> crate::Result crate::Result {
+ let str = std::str::from_utf8(slice)
+ .map_err(|_| LimboError::ParseError("malformed JSON".to_string()))?;
+ Jsonb::from_str_with_mode(str, Conv::Strict).map_err(Into::into)
+}
+
pub fn convert_ref_dbtype_to_jsonb(val: &RefValue, strict: Conv) -> crate::Result {
match val {
RefValue::Text(text) => {
let res = if text.subtype == TextSubtype::Json || matches!(strict, Conv::Strict) {
- // Parse directly as JSON if it's already JSON subtype or strict mode is on
- let json = if matches!(strict, Conv::ToString) {
- let mut str = text.as_str().replace('"', "\\\"");
- str.insert(0, '"');
- str.push('"');
- Jsonb::from_str(&str)
- } else {
- Jsonb::from_str(text.as_str())
- };
- json
+ Jsonb::from_str_with_mode(text.as_str(), strict)
} else {
// Handle as a string literal otherwise
let mut str = text.as_str().replace('"', "\\\"");
-
// Quote the string to make it a JSON string
str.insert(0, '"');
str.push('"');
@@ -143,7 +139,40 @@ pub fn convert_ref_dbtype_to_jsonb(val: &RefValue, strict: Conv) -> crate::Resul
res.map_err(|_| LimboError::ParseError("malformed JSON".to_string()))
}
RefValue::Blob(blob) => {
- let json = Jsonb::from_raw_data(blob.to_slice());
+ let bytes = blob.to_slice();
+ // Valid JSON can start with these whitespace characters
+ let index = bytes
+ .iter()
+ .position(|&b| !matches!(b, b' ' | b'\t' | b'\n' | b'\r'))
+ .unwrap_or(bytes.len());
+ let slice = &bytes[index..];
+ let json = match slice {
+ // branch with no overlapping initial byte
+ [b'"', ..] | [b'-', ..] | [b'0'..=b'2', ..] => parse_as_json_text(slice)?,
+ _ => match JsonbHeader::from_slice(0, slice) {
+ Ok((header, header_offset)) => {
+ let payload_size = header.payload_size();
+ let total_expected = header_offset + payload_size;
+
+ if total_expected != slice.len() {
+ parse_as_json_text(slice)?
+ } else {
+ let jsonb = Jsonb::from_raw_data(slice);
+ let is_valid_json = if payload_size <= 7 {
+ jsonb.is_valid()
+ } else {
+ jsonb.element_type().is_ok()
+ };
+ if is_valid_json {
+ jsonb
+ } else {
+ parse_as_json_text(slice)?
+ }
+ }
+ }
+ Err(_) => parse_as_json_text(slice)?,
+ },
+ };
json.element_type()?;
Ok(json)
}
diff --git a/testing/json.test b/testing/json.test
index 4f16732ca..6a7b53a4e 100755
--- a/testing/json.test
+++ b/testing/json.test
@@ -279,6 +279,34 @@ do_execsql_test json_arrow_object {
SELECT '{"a": [1,2,3]}' -> '$.a'
} {{[1,2,3]}}
+do_execsql_test json_arrow_blob_object {
+ SELECT cast('{"age":30,"name":"John"}' as blob) -> '$.age'
+} {{30}}
+
+# Tests against valid jsonb [b'{',.., b'}'] vs json text '{..}'
+# b'{' = ElementType::Array, PayloadSize of 7.
+# b'}' = last element in array ends in '}'
+# x'7B0707070707177D' = jsonb(["", "", "", "", "", "}"])
+do_execsql_test json_arrow_blob_array {
+ SELECT x'7B0707070707177D' -> '$[5]'
+} {\"\}\"}
+
+do_execsql_test json_arrow_blob_number {
+ SELECT cast('4' as blob) -> '$'
+} {{4}}
+
+do_execsql_test json_arrow_blob_number_2 {
+ SELECT cast(33 as blob) -> '$'
+} {{33}}
+
+# jsonb(333)
+do_execsql_test json_arrow_blob_number_3 {
+ SELECT x'33333333' -> '$'
+} {{333}}
+
+do_execsql_test json_arrow_blob_negative_number {
+ SELECT cast('-4' as blob) -> '$'
+} {{-4}}
do_execsql_test json_arrow_shift_object {
SELECT '{"a": [1,2,3]}' ->> '$.a'
} {{[1,2,3]}}
@@ -291,7 +319,6 @@ do_execsql_test json_extract_object_3 {
SELECT json_extract('{"a": [1,2,3]}', '$.a', '$.a[0]', '$.a[1]', null, '$.a[3]')
} {{}}
-
# \x61 is the ASCII code for 'a'
do_execsql_test json_extract_with_escaping {
SELECT json_extract('{"\x61": 1}', '$.a')
From fa19ba499344e6114b6a8a910aa56a6175cb6df2 Mon Sep 17 00:00:00 2001
From: themixednuts
Date: Mon, 1 Sep 2025 20:36:24 -0500
Subject: [PATCH 04/66] chore: add tests
---
testing/json.test | 45 ++++++++++++++++++++++++++++++++-------------
1 file changed, 32 insertions(+), 13 deletions(-)
diff --git a/testing/json.test b/testing/json.test
index 6a7b53a4e..97d1d175e 100755
--- a/testing/json.test
+++ b/testing/json.test
@@ -291,6 +291,14 @@ do_execsql_test json_arrow_blob_array {
SELECT x'7B0707070707177D' -> '$[5]'
} {\"\}\"}
+# Tests against valid jsonb [b'[',.., b']'] vs json text '[..]'
+# b'[' = ElementType::Array, PayloadSize of 5.
+# b']' = last element in array ends in ']'
+# x'5B070707175D' = jsonb(["", "", "", "}"])
+do_execsql_test json_arrow_blob_array_2 {
+ SELECT x'5B070707175D' -> '$[3]'
+} {\"\]\"}
+
do_execsql_test json_arrow_blob_number {
SELECT cast('4' as blob) -> '$'
} {{4}}
@@ -307,9 +315,10 @@ do_execsql_test json_arrow_blob_number_3 {
do_execsql_test json_arrow_blob_negative_number {
SELECT cast('-4' as blob) -> '$'
} {{-4}}
-do_execsql_test json_arrow_shift_object {
- SELECT '{"a": [1,2,3]}' ->> '$.a'
-} {{[1,2,3]}}
+
+do_execsql_test json_arrow_shift_blob {
+ SELECT cast('{"age":30,"name":"John"}' as blob) ->> '$.age'
+} {{30}}
do_execsql_test json_extract_object_2 {
SELECT json_extract('{"a": [1,2,3]}', '$.a', '$.a[0]', '$.a[1]', '$.a[3]')
@@ -537,10 +546,9 @@ do_execsql_test json_extract_overflow_int64 {
# SELECT json_extract('[1, 2, 3]', '$[170141183460469231731687303715884105729]');
#} {{2}}
-# TODO: fix me - this passes on SQLite and needs to be fixed in Limbo.
-#do_execsql_test json_extract_blob {
-# select json_extract(CAST('[1,2,3]' as BLOB), '$[1]')
-#} {{2}}
+do_execsql_test json_extract_blob {
+ select json_extract(CAST('[1,2,3]' as BLOB), '$[1]')
+} {{2}}
do_execsql_test json_array_length {
SELECT json_array_length('[1,2,3,4]');
@@ -707,12 +715,11 @@ do_execsql_test json_valid_1 {
do_execsql_test json_valid_2 {
SELECT json_valid('["a",55,"b",72]');
} {1}
-#
-# Unimplemented
-#do_execsql_test json_valid_3 {
-# SELECT json_valid( CAST('{"a":"1}' AS BLOB) );
-#} {0}
-#
+
+do_execsql_test json_valid_3 {
+ SELECT json_valid( CAST('{"a":"1}' AS BLOB) );
+} {0}
+
do_execsql_test json_valid_4 {
SELECT json_valid(123);
} {1}
@@ -857,6 +864,14 @@ do_execsql_test json-remove-7 {
SELECT json_remove('{"a": 1, "b": [1,2], "c": {"d": 3}}', '$.a', '$.b[0]', '$.c.d');
} {{{"b":[2],"c":{}}}}
+do_execsql_test json-remove-8 {
+ SELECT json_remove(cast('{"age":30,"name":"John"}' as blob), '$.age');
+} {{{"name":"John"}}}
+
+do_execsql_test json-remove-9 {
+ SELECT json_remove(cast('{"user":{"id":123,"profile":{"name":"Alice","age":25}}}' as blob), '$.user.id');
+} {{{"user":{"profile":{"name":"Alice","age":25}}}}}
+
do_execsql_test json_set_field_empty_object {
SELECT json_set('{}', '$.field', 'value');
} {{{"field":"value"}}}
@@ -865,6 +880,10 @@ do_execsql_test json_set_replace_field {
SELECT json_set('{"field":"old_value"}', '$.field', 'new_value');
} {{{"field":"new_value"}}}
+do_execsql_test json_set_replace_field_2 {
+ SELECT json_set(cast('{"age":30,"name":"John"}' as blob), '$.age', 40);
+} {{{"age":40,"name":"John"}}}
+
do_execsql_test json_set_set_deeply_nested_key {
SELECT json_set('{}', '$.object.doesnt.exist', 'value');
} {{{"object":{"doesnt":{"exist":"value"}}}}}
From b85233059aed924929dfb5b6b21b98dfe30fe24a Mon Sep 17 00:00:00 2001
From: Pavan-Nambi
Date: Thu, 4 Sep 2025 08:59:18 +0530
Subject: [PATCH 05/66] use extract_if
---
simulator/runner/file.rs | 17 ++---------------
1 file changed, 2 insertions(+), 15 deletions(-)
diff --git a/simulator/runner/file.rs b/simulator/runner/file.rs
index 440323e74..635de2bd2 100644
--- a/simulator/runner/file.rs
+++ b/simulator/runner/file.rs
@@ -111,21 +111,8 @@ impl SimulatorFile {
#[instrument(skip_all, level = Level::DEBUG)]
pub fn run_queued_io(&self, now: turso_core::Instant) -> Result<()> {
let mut queued_io = self.queued_io.borrow_mut();
- // TODO: as we are not in version 1.87 we cannot use `extract_if`
- // so we have to do something different to achieve the same thing
- // This code was acquired from: https://doc.rust-lang.org/beta/std/vec/struct.Vec.html#method.extract_if
- let range = 0..queued_io.len();
- let mut i = range.start;
- let end_items = queued_io.len() - range.end;
-
- while i < queued_io.len() - end_items {
- if queued_io[i].time <= now {
- let io = queued_io.remove(i);
- // your code here
- let _c = (io.op)(self)?;
- } else {
- i += 1;
- }
+ for io in queued_io.extract_if(.., |item| item.time <= now) {
+ let _c = (io.op)(self)?;
}
Ok(())
}
From d757a330eeed539269e76e83ecb9bf2718faa8ff Mon Sep 17 00:00:00 2001
From: Pavan-Nambi
Date: Mon, 8 Sep 2025 16:26:37 +0530
Subject: [PATCH 06/66] use sqlite_int_float_compare
---
core/types.rs | 8 ++++++--
testing/compare.test | 20 +++++++++++++++++++-
2 files changed, 25 insertions(+), 3 deletions(-)
diff --git a/core/types.rs b/core/types.rs
index 6bddb0b2b..8e428aa55 100644
--- a/core/types.rs
+++ b/core/types.rs
@@ -706,8 +706,12 @@ impl PartialOrd for Value {
fn partial_cmp(&self, other: &Self) -> Option {
match (self, other) {
(Self::Integer(int_left), Self::Integer(int_right)) => int_left.partial_cmp(int_right),
- (Self::Float(float), Self::Integer(int)) => Some(int_float_cmp(*int, *float).reverse()),
- (Self::Integer(int), Self::Float(float)) => Some(int_float_cmp(*int, *float)),
+ (Self::Float(float), Self::Integer(int)) => {
+ Some(sqlite_int_float_compare(*int, *float).reverse())
+ }
+ (Self::Integer(int), Self::Float(float)) => {
+ Some(sqlite_int_float_compare(*int, *float))
+ }
(Self::Float(float_left), Self::Float(float_right)) => {
float_left.partial_cmp(float_right)
}
diff --git a/testing/compare.test b/testing/compare.test
index 54633c93d..dd09ffb92 100644
--- a/testing/compare.test
+++ b/testing/compare.test
@@ -253,4 +253,22 @@ foreach {testname lhs rhs ans} {
text-text-2 'a' 'a' 0
} {
do_execsql_test compare-is-not-$testname "SELECT $lhs is not $rhs" $::ans
-}
\ No newline at end of file
+}
+
+# github-issue: 2957.
+do_execsql_test compare-int-float-setup {
+ CREATE TABLE t1(i INTEGER);
+ INSERT INTO t1 VALUES (0), (-1), (1);
+} {}
+
+do_execsql_test compare-int-float-lte-negative-zero {
+ SELECT i FROM t1 WHERE i <= -0.0 ORDER BY i;
+} {-1 0}
+
+do_execsql_test compare-int-float-lt-negative-zero {
+ SELECT i FROM t1 WHERE i < -0.0 ORDER BY i;
+} {-1}
+
+do_execsql_test compare-int-float-cleanup {
+ DROP TABLE t1;
+} {}
\ No newline at end of file
From 02df3728112e217d6bd879e2ad31b0e2a51d03c3 Mon Sep 17 00:00:00 2001
From: PThorpe92
Date: Mon, 25 Aug 2025 16:19:05 -0400
Subject: [PATCH 07/66] Add cancel and drain methods to IO trait
---
core/io/mod.rs | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/core/io/mod.rs b/core/io/mod.rs
index 2922004d2..15028f7eb 100644
--- a/core/io/mod.rs
+++ b/core/io/mod.rs
@@ -50,9 +50,6 @@ pub trait File: Send + Sync {
})
};
if let Err(e) = self.pwrite(pos, buf.clone(), child_c) {
- // best-effort: mark as abort so caller won't wait forever
- // TODO: when we have `pwrite` and other I/O methods return CompletionError
- // instead of LimboError, store the error inside
c.abort();
return Err(e);
}
@@ -91,6 +88,15 @@ pub trait IO: Clock + Send + Sync {
Ok(())
}
+ fn cancel(&self, c: &[Completion]) -> Result<()> {
+ c.iter().for_each(|c| c.abort());
+ Ok(())
+ }
+
+ fn drain(&self) -> Result<()> {
+ Ok(())
+ }
+
fn wait_for_completion(&self, c: Completion) -> Result<()> {
while !c.finished() {
self.run_once()?
From eb0e0694453768af99659b974b5371767b72cf6b Mon Sep 17 00:00:00 2001
From: PThorpe92
Date: Mon, 25 Aug 2025 16:21:22 -0400
Subject: [PATCH 08/66] Add ShortWrite to CompletionError
---
core/error.rs | 2 ++
1 file changed, 2 insertions(+)
diff --git a/core/error.rs b/core/error.rs
index 80e810833..ebbb6f7a2 100644
--- a/core/error.rs
+++ b/core/error.rs
@@ -124,6 +124,8 @@ pub enum CompletionError {
Aborted,
#[error("Decryption failed for page={page_idx}")]
DecryptionError { page_idx: usize },
+ #[error("I/O error: partial write")]
+ ShortWrite,
}
#[macro_export]
From a750505762f8b7f8d42e48d043b876893bc26c89 Mon Sep 17 00:00:00 2001
From: PThorpe92
Date: Mon, 25 Aug 2025 16:22:29 -0400
Subject: [PATCH 09/66] Impl cancel and drain methods for io_uring
---
core/io/io_uring.rs | 73 ++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 66 insertions(+), 7 deletions(-)
diff --git a/core/io/io_uring.rs b/core/io/io_uring.rs
index 80f8c8444..69bee24e3 100644
--- a/core/io/io_uring.rs
+++ b/core/io/io_uring.rs
@@ -3,7 +3,7 @@
use super::{common, Completion, CompletionInner, File, OpenFlags, IO};
use crate::io::clock::{Clock, Instant};
use crate::storage::wal::CKPT_BATCH_PAGES;
-use crate::{turso_assert, LimboError, Result};
+use crate::{turso_assert, CompletionError, LimboError, Result};
use parking_lot::Mutex;
use rustix::fs::{self, FlockOperation, OFlags};
use std::ptr::NonNull;
@@ -48,6 +48,9 @@ const ARENA_COUNT: usize = 2;
/// writing a commit frame.
const BARRIER_USER_DATA: u64 = 1;
+/// user_data tag for cancellation operations
+const CANCEL_TAG: u64 = 1;
+
pub struct UringIO {
inner: Arc>,
}
@@ -317,6 +320,18 @@ impl WrappedIOUring {
self.ring.submit().expect("submiting when full");
}
+ fn submit_cancel_urgent(&mut self, entry: &io_uring::squeue::Entry) -> Result<()> {
+ let pushed = unsafe { self.ring.submission().push(entry).is_ok() };
+ if pushed {
+ self.pending_ops += 1;
+ return Ok(());
+ }
+ // place cancel op at the front, if overflowed
+ self.overflow.push_front(entry.clone());
+ self.ring.submit()?;
+ Ok(())
+ }
+
/// Flush overflow entries to submission queue when possible
fn flush_overflow(&mut self) -> Result<()> {
while !self.overflow.is_empty() {
@@ -468,10 +483,18 @@ impl WrappedIOUring {
}
let written = result;
- state.advance(written as u64);
+
+ // guard against no-progress loop
+ if written == 0 && state.remaining() > 0 {
+ state.free_last_iov(&mut self.iov_pool);
+ completion_from_key(user_data).error(CompletionError::ShortWrite);
+ return;
+ }
+ state.advance(written);
+
match state.remaining() {
0 => {
- tracing::info!(
+ tracing::debug!(
"writev operation completed: wrote {} bytes",
state.total_written
);
@@ -546,6 +569,32 @@ impl IO for UringIO {
Ok(())
}
+ fn drain(&self) -> Result<()> {
+ trace!("drain()");
+ loop {
+ {
+ let inner = self.inner.borrow();
+ if inner.ring.empty() {
+ break;
+ }
+ }
+ self.run_once()?;
+ }
+ Ok(())
+ }
+
+ fn cancel(&self, completions: &[Completion]) -> Result<()> {
+ let mut inner = self.inner.borrow_mut();
+ for c in completions {
+ c.abort();
+ let e = io_uring::opcode::AsyncCancel::new(get_key(c.clone()))
+ .build()
+ .user_data(CANCEL_TAG);
+ inner.ring.submit_cancel_urgent(&e)?;
+ }
+ Ok(())
+ }
+
fn run_once(&self) -> Result<()> {
trace!("run_once()");
let mut inner = self.inner.lock();
@@ -561,11 +610,15 @@ impl IO for UringIO {
};
ring.pending_ops -= 1;
let user_data = cqe.user_data();
+ if user_data == CANCEL_TAG {
+ // ignore if this is a cancellation CQE
+ continue;
+ }
let result = cqe.result();
turso_assert!(
- user_data != 0,
- "user_data must not be zero, we dont submit linked timeouts or cancelations that would cause this"
- );
+ user_data != 0,
+ "user_data must not be zero, we dont submit linked timeouts that would cause this"
+ );
if let Some(state) = ring.writev_states.remove(&user_data) {
// if we have ongoing writev state, handle it separately and don't call completion
ring.handle_writev_completion(state, user_data, result);
@@ -579,7 +632,13 @@ impl IO for UringIO {
}
continue;
}
- completion_from_key(user_data).complete(result)
+ if result < 0 {
+ let errno = -result;
+ let err = std::io::Error::from_raw_os_error(errno);
+ completion_from_key(user_data).error(err.into());
+ } else {
+ completion_from_key(user_data).complete(result)
+ }
}
}
From ccae3ab0f2f0f5f7976184833e509de273c523e1 Mon Sep 17 00:00:00 2001
From: PThorpe92
Date: Mon, 25 Aug 2025 16:23:00 -0400
Subject: [PATCH 10/66] Change callsites to cancel any further IO when an error
occurs and drain
---
core/io/io_uring.rs | 45 +++++++++++++++++++++++------
core/storage/btree.rs | 26 +++++++++--------
core/storage/pager.rs | 52 +++++++++++++++++-----------------
core/storage/sqlite3_ondisk.rs | 6 ++--
core/vdbe/sorter.rs | 13 +++++----
5 files changed, 88 insertions(+), 54 deletions(-)
diff --git a/core/io/io_uring.rs b/core/io/io_uring.rs
index 69bee24e3..e56221b65 100644
--- a/core/io/io_uring.rs
+++ b/core/io/io_uring.rs
@@ -490,7 +490,7 @@ impl WrappedIOUring {
completion_from_key(user_data).error(CompletionError::ShortWrite);
return;
}
- state.advance(written);
+ state.advance(written as u64);
match state.remaining() {
0 => {
@@ -569,22 +569,51 @@ impl IO for UringIO {
Ok(())
}
+ /// Drain calls `run_once` in a loop until the ring is empty.
+ /// To prevent mutex churn of checking if ring.empty() on each iteration, we violate DRY
fn drain(&self) -> Result<()> {
trace!("drain()");
+ let mut inner = self.inner.lock();
+ let ring = &mut inner.ring;
loop {
- {
- let inner = self.inner.borrow();
- if inner.ring.empty() {
- break;
+ ring.flush_overflow()?;
+ if ring.empty() {
+ return Ok(());
+ }
+ ring.submit_and_wait()?;
+ 'inner: loop {
+ let Some(cqe) = ring.ring.completion().next() else {
+ break 'inner;
+ };
+ ring.pending_ops -= 1;
+ let user_data = cqe.user_data();
+ if user_data == CANCEL_TAG {
+ // ignore if this is a cancellation CQE
+ continue 'inner;
+ }
+ let result = cqe.result();
+ turso_assert!(
+ user_data != 0,
+ "user_data must not be zero, we dont submit linked timeouts that would cause this"
+ );
+ if let Some(state) = ring.writev_states.remove(&user_data) {
+ // if we have ongoing writev state, handle it separately and don't call completion
+ ring.handle_writev_completion(state, user_data, result);
+ continue 'inner;
+ }
+ if result < 0 {
+ let errno = -result;
+ let err = std::io::Error::from_raw_os_error(errno);
+ completion_from_key(user_data).error(err.into());
+ } else {
+ completion_from_key(user_data).complete(result)
}
}
- self.run_once()?;
}
- Ok(())
}
fn cancel(&self, completions: &[Completion]) -> Result<()> {
- let mut inner = self.inner.borrow_mut();
+ let mut inner = self.inner.lock();
for c in completions {
c.abort();
let e = io_uring::opcode::AsyncCancel::new(get_key(c.clone()))
diff --git a/core/storage/btree.rs b/core/storage/btree.rs
index 9f90f9472..a89341974 100644
--- a/core/storage/btree.rs
+++ b/core/storage/btree.rs
@@ -2633,20 +2633,22 @@ impl BTreeCursor {
let current_sibling = sibling_pointer;
let mut completions: Vec = Vec::with_capacity(current_sibling + 1);
for i in (0..=current_sibling).rev() {
- let (page, c) =
- btree_read_page(&self.pager, pgno as usize).inspect_err(|_| {
- for c in completions.iter() {
- c.abort();
+ match btree_read_page(&self.pager, pgno as usize) {
+ Err(e) => {
+ tracing::error!("error reading page {}: {}", pgno, e);
+ self.pager.io.cancel(&completions)?;
+ self.pager.io.drain()?;
+ return Err(e);
+ }
+ Ok((page, c)) => {
+ // mark as dirty
+ self.pager.add_dirty(&page);
+ pages_to_balance[i].replace(page);
+ if let Some(c) = c {
+ completions.push(c);
}
- })?;
- {
- // mark as dirty
- self.pager.add_dirty(&page);
+ }
}
- if let Some(c) = c {
- completions.push(c);
- }
- pages_to_balance[i].replace(page);
if i == 0 {
break;
}
diff --git a/core/storage/pager.rs b/core/storage/pager.rs
index 31eb980cd..c77aeb677 100644
--- a/core/storage/pager.rs
+++ b/core/storage/pager.rs
@@ -1275,34 +1275,36 @@ impl Pager {
};
pages.push(page);
if pages.len() == IOV_MAX {
- let c = wal
- .borrow_mut()
- .append_frames_vectored(
- std::mem::replace(
- &mut pages,
- Vec::with_capacity(std::cmp::min(IOV_MAX, dirty_pages.len() - idx)),
- ),
- page_sz,
- commit_frame,
- )
- .inspect_err(|_| {
- for c in completions.iter() {
- c.abort();
- }
- })?;
- completions.push(c);
+ match wal.borrow_mut().append_frames_vectored(
+ std::mem::replace(
+ &mut pages,
+ Vec::with_capacity(std::cmp::min(IOV_MAX, dirty_pages.len() - idx)),
+ ),
+ page_sz,
+ commit_frame,
+ ) {
+ Err(e) => {
+ self.io.cancel(&completions)?;
+ self.io.drain()?;
+ return Err(e);
+ }
+ Ok(c) => completions.push(c),
+ }
}
}
if !pages.is_empty() {
- let c = wal
+ match wal
.borrow_mut()
.append_frames_vectored(pages, page_sz, commit_frame)
- .inspect_err(|_| {
- for c in completions.iter() {
- c.abort();
- }
- })?;
- completions.push(c);
+ {
+ Ok(c) => completions.push(c),
+ Err(e) => {
+ tracing::error!("cacheflush: error appending frames: {e}");
+ self.io.cancel(&completions)?;
+ self.io.drain()?;
+ return Err(e);
+ }
+ }
}
Ok(completions)
}
@@ -1379,9 +1381,7 @@ impl Pager {
match r {
Ok(c) => completions.push(c),
Err(e) => {
- for c in &completions {
- c.abort();
- }
+ self.io.cancel(&completions)?;
return Err(e);
}
}
diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs
index 696d10d05..54854b516 100644
--- a/core/storage/sqlite3_ondisk.rs
+++ b/core/storage/sqlite3_ondisk.rs
@@ -1086,9 +1086,9 @@ pub fn write_pages_vectored(
if runs_left.fetch_sub(1, Ordering::AcqRel) == 1 {
done.store(true, Ordering::Release);
}
- for c in completions {
- c.abort();
- }
+ pager.io.cancel(&completions)?;
+ // cancel any submitted completions and drain the IO before returning an error
+ pager.io.drain()?;
return Err(e);
}
}
diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs
index d8a325dae..c44c36b16 100644
--- a/core/vdbe/sorter.rs
+++ b/core/vdbe/sorter.rs
@@ -238,12 +238,15 @@ impl Sorter {
InitChunkHeapState::Start => {
let mut completions: Vec = Vec::with_capacity(self.chunks.len());
for chunk in self.chunks.iter_mut() {
- let c = chunk.read().inspect_err(|_| {
- for c in completions.iter() {
- c.abort();
+ match chunk.read() {
+ Err(e) => {
+ tracing::error!("Failed to read chunk: {e}");
+ self.io.cancel(&completions)?;
+ self.io.drain()?;
+ return Err(e);
}
- })?;
- completions.push(c);
+ Ok(c) => completions.push(c),
+ };
}
self.init_chunk_heap_state = InitChunkHeapState::PushChunk;
io_yield_many!(completions);
From 8a6667a82982cb880cb6a65ef6a9a9579f54f122 Mon Sep 17 00:00:00 2001
From: TcMits
Date: Tue, 9 Sep 2025 16:23:08 +0700
Subject: [PATCH 11/66] refactor cli: will write to
---
cli/app.rs | 77 ++++++++++++++++++++++-------------------------------
cli/main.rs | 14 +++++++---
2 files changed, 42 insertions(+), 49 deletions(-)
diff --git a/cli/app.rs b/cli/app.rs
index f72e2a6c6..54c282749 100644
--- a/cli/app.rs
+++ b/cli/app.rs
@@ -375,11 +375,6 @@ impl Limbo {
self.writer.as_mut().unwrap().write_all(b"\n")
}
- fn buffer_input(&mut self, line: &str) {
- self.input_buff.push_str(line);
- self.input_buff.push(' ');
- }
-
fn run_query(&mut self, input: &str) {
let echo = self.opts.echo;
if echo {
@@ -481,34 +476,38 @@ impl Limbo {
}
}
- fn reset_line(&mut self, _line: &str) -> rustyline::Result<()> {
+ fn reset_line(&mut self) -> rustyline::Result<()> {
// Entry is auto added to history
// self.rl.add_history_entry(line.to_owned())?;
self.interrupt_count.store(0, Ordering::Release);
Ok(())
}
- pub fn handle_input_line(&mut self, line: &str) -> anyhow::Result<()> {
- if self.input_buff.is_empty() {
- if line.is_empty() {
- return Ok(());
+ // consume will consume `input_buff`
+ pub fn consume(&mut self) -> anyhow::Result<()> {
+ if self.input_buff.trim().is_empty() {
+ return Ok(());
+ }
+
+ self.reset_line()?;
+
+ // SAFETY: we don't reset input after we handle the command
+ let value: &'static str =
+ unsafe { std::mem::transmute::<&str, &'static str>(self.input_buff.as_str()) }.trim();
+ match (value.starts_with('.'), value.ends_with(';')) {
+ (true, _) => {
+ self.handle_dot_command(value.strip_prefix('.').unwrap());
+ self.reset_input();
}
- if let Some(command) = line.strip_prefix('.') {
- self.handle_dot_command(command);
- let _ = self.reset_line(line);
- return Ok(());
+ (false, true) => {
+ self.run_query(value);
+ self.reset_input();
+ }
+ (false, false) => {
+ self.set_multiline_prompt();
}
}
- self.reset_line(line)?;
- if line.ends_with(';') {
- self.buffer_input(line);
- let buff = self.input_buff.clone();
- self.run_query(buff.as_str());
- } else {
- self.buffer_input(format!("{line}\n").as_str());
- self.set_multiline_prompt();
- }
Ok(())
}
@@ -1331,35 +1330,23 @@ impl Limbo {
Ok(())
}
- pub fn handle_remaining_input(&mut self) {
- if self.input_buff.is_empty() {
- return;
- }
+ // readline will read inputs from rustyline or stdin
+ // and write it to input_buff.
+ pub fn readline(&mut self) -> Result<(), ReadlineError> {
+ use std::fmt::Write;
- let buff = self.input_buff.clone();
- self.run_query(buff.as_str());
- self.reset_input();
- }
-
- pub fn readline(&mut self) -> Result {
if let Some(rl) = &mut self.rl {
- Ok(rl.readline(&self.prompt)?)
+ let result = rl.readline(&self.prompt)?;
+ let _ = self.input_buff.write_str(result.as_str());
} else {
- let mut input = String::new();
let mut reader = std::io::stdin().lock();
- if reader.read_line(&mut input)? == 0 {
+ if reader.read_line(&mut self.input_buff)? == 0 {
return Err(ReadlineError::Eof);
}
- // Remove trailing newline
- if input.ends_with('\n') {
- input.pop();
- if input.ends_with('\r') {
- input.pop();
- }
- }
-
- Ok(input)
}
+
+ let _ = self.input_buff.write_char(' ');
+ Ok(())
}
pub fn dump_database_from_conn(
diff --git a/cli/main.rs b/cli/main.rs
index a2df75cba..d1ea807e3 100644
--- a/cli/main.rs
+++ b/cli/main.rs
@@ -63,9 +63,8 @@ fn main() -> anyhow::Result<()> {
}
loop {
- let readline = app.readline();
- match readline {
- Ok(line) => match app.handle_input_line(line.trim()) {
+ match app.readline() {
+ Ok(_) => match app.consume() {
Ok(_) => {}
Err(e) => {
eprintln!("{e}");
@@ -83,7 +82,14 @@ fn main() -> anyhow::Result<()> {
continue;
}
Err(ReadlineError::Eof) => {
- app.handle_remaining_input();
+ // consume remaining input before exit
+ match app.consume() {
+ Ok(_) => {}
+ Err(e) => {
+ eprintln!("{e}");
+ }
+ };
+
let _ = app.close_conn();
break;
}
From 048e72abf517fd9fb7d7ae66c1eb533d2897d797 Mon Sep 17 00:00:00 2001
From: TcMits
Date: Tue, 9 Sep 2025 16:27:31 +0700
Subject: [PATCH 12/66] consume remaining
---
cli/app.rs | 6 +++++-
cli/main.rs | 4 ++--
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/cli/app.rs b/cli/app.rs
index 54c282749..92371f5b1 100644
--- a/cli/app.rs
+++ b/cli/app.rs
@@ -484,7 +484,7 @@ impl Limbo {
}
// consume will consume `input_buff`
- pub fn consume(&mut self) -> anyhow::Result<()> {
+ pub fn consume(&mut self, flush: bool) -> anyhow::Result<()> {
if self.input_buff.trim().is_empty() {
return Ok(());
}
@@ -503,6 +503,10 @@ impl Limbo {
self.run_query(value);
self.reset_input();
}
+ (false, false) if flush => {
+ self.run_query(value);
+ self.reset_input();
+ }
(false, false) => {
self.set_multiline_prompt();
}
diff --git a/cli/main.rs b/cli/main.rs
index d1ea807e3..affb73888 100644
--- a/cli/main.rs
+++ b/cli/main.rs
@@ -64,7 +64,7 @@ fn main() -> anyhow::Result<()> {
loop {
match app.readline() {
- Ok(_) => match app.consume() {
+ Ok(_) => match app.consume(false) {
Ok(_) => {}
Err(e) => {
eprintln!("{e}");
@@ -83,7 +83,7 @@ fn main() -> anyhow::Result<()> {
}
Err(ReadlineError::Eof) => {
// consume remaining input before exit
- match app.consume() {
+ match app.consume(true) {
Ok(_) => {}
Err(e) => {
eprintln!("{e}");
From 745feb131f65517ebd0cc67509bd60636d5a7ac4 Mon Sep 17 00:00:00 2001
From: Mayank Verma
Date: Wed, 10 Sep 2025 02:17:12 +0530
Subject: [PATCH 13/66] add Connection.reconnect() to serverless driver
---
packages/turso-serverless/src/connection.ts | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/packages/turso-serverless/src/connection.ts b/packages/turso-serverless/src/connection.ts
index 0c0082eb3..efafcf20d 100644
--- a/packages/turso-serverless/src/connection.ts
+++ b/packages/turso-serverless/src/connection.ts
@@ -223,6 +223,17 @@ export class Connection {
this.isOpen = false;
await this.session.close();
}
+
+ reconnect(): void {
+ try {
+ if (this.isOpen) {
+ this.close();
+ }
+ } finally {
+ this.session = new Session(this.config);
+ this.isOpen = true;
+ }
+ }
}
/**
From aa38c9e099e0ab18a5ee7620cf4d4378c361bb5f Mon Sep 17 00:00:00 2001
From: Mayank Verma
Date: Wed, 10 Sep 2025 13:08:06 +0530
Subject: [PATCH 14/66] make reconnect() async
---
packages/turso-serverless/src/connection.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/packages/turso-serverless/src/connection.ts b/packages/turso-serverless/src/connection.ts
index efafcf20d..7b825cc33 100644
--- a/packages/turso-serverless/src/connection.ts
+++ b/packages/turso-serverless/src/connection.ts
@@ -224,7 +224,7 @@ export class Connection {
await this.session.close();
}
- reconnect(): void {
+ async reconnect(): Promise {
try {
if (this.isOpen) {
this.close();
From 8161badbf42fa3c36cac0c242fa2ccca31fedc48 Mon Sep 17 00:00:00 2001
From: Pekka Enberg
Date: Wed, 10 Sep 2025 07:22:17 +0300
Subject: [PATCH 15/66] core/vdbe: Don't rollback transaction when write
upgrade fails
If upgrade from read to write transaction fails, don't roll back the
transaction. Instead restore the transaction into its original state,
which allows deferred transactions that have not read anything to
restart automatically.
Fixes #2984
---
core/vdbe/execute.rs | 11 ++-
tests/integration/fuzz_transaction/mod.rs | 27 +----
.../query_processing/test_transactions.rs | 99 +++++++++++++++++++
3 files changed, 108 insertions(+), 29 deletions(-)
diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs
index d5697b73c..86ae514a4 100644
--- a/core/vdbe/execute.rs
+++ b/core/vdbe/execute.rs
@@ -2187,9 +2187,14 @@ pub fn op_transaction(
match pager.begin_write_tx()? {
IOResult::Done(r) => {
if let LimboResult::Busy = r {
- pager.end_read_tx()?;
- conn.transaction_state.replace(TransactionState::None);
- conn.auto_commit.replace(true);
+ // We failed to upgrade to write transaction so put the transaction into its original state.
+ // That is, if the transaction had not started, end the read transaction so that next time we
+ // start a new one.
+ if matches!(current_state, TransactionState::None) {
+ pager.end_read_tx()?;
+ conn.transaction_state.replace(TransactionState::None);
+ }
+ assert_eq!(conn.transaction_state.get(), current_state);
return Ok(InsnFunctionStepResult::Busy);
}
}
diff --git a/tests/integration/fuzz_transaction/mod.rs b/tests/integration/fuzz_transaction/mod.rs
index 1b61daac0..006ae7225 100644
--- a/tests/integration/fuzz_transaction/mod.rs
+++ b/tests/integration/fuzz_transaction/mod.rs
@@ -572,11 +572,6 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
}
Err(e) => {
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
- if let Some(tx_id) = *current_tx_id {
- shared_shadow_db.rollback_transaction(tx_id);
- *current_tx_id = None;
- }
-
// Check if it's an acceptable error
if !e.to_string().contains("database is locked") {
panic!("Unexpected error during commit: {e}");
@@ -597,9 +592,6 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
}
Err(e) => {
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
- shared_shadow_db.rollback_transaction(tx_id);
- *current_tx_id = None;
-
// Check if it's an acceptable error
if !e.to_string().contains("Busy")
&& !e.to_string().contains("database is locked")
@@ -646,10 +638,6 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
}
Err(e) => {
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
- if let Some(tx_id) = *current_tx_id {
- shared_shadow_db.rollback_transaction(tx_id);
- *current_tx_id = None;
- }
// Check if it's an acceptable error
if !e.to_string().contains("database is locked") {
panic!("Unexpected error during insert: {e}");
@@ -687,10 +675,6 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
}
Err(e) => {
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
- if let Some(tx_id) = *current_tx_id {
- shared_shadow_db.rollback_transaction(tx_id);
- *current_tx_id = None;
- }
// Check if it's an acceptable error
if !e.to_string().contains("database is locked") {
panic!("Unexpected error during update: {e}");
@@ -723,10 +707,6 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
}
Err(e) => {
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
- if let Some(tx_id) = *current_tx_id {
- shared_shadow_db.rollback_transaction(tx_id);
- *current_tx_id = None;
- }
// Check if it's an acceptable error
if !e.to_string().contains("database is locked") {
panic!("Unexpected error during delete: {e}");
@@ -803,12 +783,7 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
Err(e) => {
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
// Check if it's an acceptable error
- if e.to_string().contains("database is locked") {
- if let Some(tx_id) = *current_tx_id {
- shared_shadow_db.rollback_transaction(tx_id);
- *current_tx_id = None;
- }
- } else {
+ if !e.to_string().contains("database is locked") {
panic!("Unexpected error during alter table: {e}");
}
}
diff --git a/tests/integration/query_processing/test_transactions.rs b/tests/integration/query_processing/test_transactions.rs
index d621edf45..bb1f2e77b 100644
--- a/tests/integration/query_processing/test_transactions.rs
+++ b/tests/integration/query_processing/test_transactions.rs
@@ -2,6 +2,105 @@ use turso_core::{LimboError, Result, StepResult, Value};
use crate::common::TempDatabase;
+// Test a scenario where there are two concurrent deferred transactions:
+//
+// 1. Both transactions T1 and T2 start at the same time.
+// 2. T1 writes to the database succesfully, but does not commit.
+// 3. T2 attempts to write to the database, but gets busy error.
+// 4. T1 commits
+// 5. T2 attempts to write again and succeeds. This is because the transaction
+// was still fresh (no reads or writes happened).
+#[test]
+fn test_deferred_transaction_restart() {
+ let tmp_db = TempDatabase::new("test_deferred_tx.db", true);
+ let conn1 = tmp_db.connect_limbo();
+ let conn2 = tmp_db.connect_limbo();
+
+ conn1
+ .execute("CREATE TABLE test (id INTEGER PRIMARY KEY, value TEXT)")
+ .unwrap();
+
+ conn1.execute("BEGIN").unwrap();
+ conn2.execute("BEGIN").unwrap();
+
+ conn1
+ .execute("INSERT INTO test (id, value) VALUES (1, 'first')")
+ .unwrap();
+
+ let result = conn2.execute("INSERT INTO test (id, value) VALUES (2, 'second')");
+ assert!(matches!(result, Err(LimboError::Busy)));
+
+ conn1.execute("COMMIT").unwrap();
+
+ conn2
+ .execute("INSERT INTO test (id, value) VALUES (2, 'second')")
+ .unwrap();
+ conn2.execute("COMMIT").unwrap();
+
+ let mut stmt = conn1.query("SELECT COUNT(*) FROM test").unwrap().unwrap();
+ if let StepResult::Row = stmt.step().unwrap() {
+ let row = stmt.row().unwrap();
+ assert_eq!(*row.get::<&Value>(0).unwrap(), Value::Integer(2));
+ }
+}
+
+// Test a scenario where a deferred transaction cannot restart due to prior reads:
+//
+// 1. Both transactions T1 and T2 start at the same time.
+// 2. T2 performs a SELECT (establishes a read snapshot).
+// 3. T1 writes to the database successfully, but does not commit.
+// 4. T2 attempts to write to the database, but gets busy error.
+// 5. T1 commits (invalidating T2's snapshot).
+// 6. T2 attempts to write again but still gets BUSY - it cannot restart
+// because it has performed reads and has a committed snapshot.
+#[test]
+fn test_deferred_transaction_no_restart() {
+ let tmp_db = TempDatabase::new("test_deferred_tx_no_restart.db", true);
+ let conn1 = tmp_db.connect_limbo();
+ let conn2 = tmp_db.connect_limbo();
+
+ conn1
+ .execute("CREATE TABLE test (id INTEGER PRIMARY KEY, value TEXT)")
+ .unwrap();
+
+ conn1.execute("BEGIN").unwrap();
+ conn2.execute("BEGIN").unwrap();
+
+ // T2 performs a read - this establishes a snapshot and prevents restart
+ let mut stmt = conn2.query("SELECT COUNT(*) FROM test").unwrap().unwrap();
+ if let StepResult::Row = stmt.step().unwrap() {
+ let row = stmt.row().unwrap();
+ assert_eq!(*row.get::<&Value>(0).unwrap(), Value::Integer(0));
+ }
+
+ conn1
+ .execute("INSERT INTO test (id, value) VALUES (1, 'first')")
+ .unwrap();
+
+ let result = conn2.execute("INSERT INTO test (id, value) VALUES (2, 'second')");
+ assert!(matches!(result, Err(LimboError::Busy)));
+
+ conn1.execute("COMMIT").unwrap();
+
+ // T2 still cannot write because its snapshot is stale and it cannot restart
+ let result = conn2.execute("INSERT INTO test (id, value) VALUES (2, 'second')");
+ assert!(matches!(result, Err(LimboError::Busy)));
+
+ // T2 must rollback and start fresh
+ conn2.execute("ROLLBACK").unwrap();
+ conn2.execute("BEGIN").unwrap();
+ conn2
+ .execute("INSERT INTO test (id, value) VALUES (2, 'second')")
+ .unwrap();
+ conn2.execute("COMMIT").unwrap();
+
+ let mut stmt = conn1.query("SELECT COUNT(*) FROM test").unwrap().unwrap();
+ if let StepResult::Row = stmt.step().unwrap() {
+ let row = stmt.row().unwrap();
+ assert_eq!(*row.get::<&Value>(0).unwrap(), Value::Integer(2));
+ }
+}
+
#[test]
fn test_txn_error_doesnt_rollback_txn() -> Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite("create table t (x);", false);
From 12104df16a3356c602c87c2eea8ed1c05e923385 Mon Sep 17 00:00:00 2001
From: Mayank Verma
Date: Wed, 10 Sep 2025 13:42:21 +0530
Subject: [PATCH 16/66] await on .close()
---
packages/turso-serverless/src/connection.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/packages/turso-serverless/src/connection.ts b/packages/turso-serverless/src/connection.ts
index 7b825cc33..40eea184e 100644
--- a/packages/turso-serverless/src/connection.ts
+++ b/packages/turso-serverless/src/connection.ts
@@ -227,7 +227,7 @@ export class Connection {
async reconnect(): Promise {
try {
if (this.isOpen) {
- this.close();
+ await this.close();
}
} finally {
this.session = new Session(this.config);
From a874530db8205c06182207848eac6f2e144a3e99 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Wed, 10 Sep 2025 11:01:18 +0300
Subject: [PATCH 17/66] Fix semantics of transaction isolation test
- Busy errors do not rollback the transaction
- Transaction takes snapshot of DB state after its first successful
access of the DB, not before its first attempt to access the DB
---
tests/integration/fuzz_transaction/mod.rs | 46 ++++++++++++++---------
1 file changed, 28 insertions(+), 18 deletions(-)
diff --git a/tests/integration/fuzz_transaction/mod.rs b/tests/integration/fuzz_transaction/mod.rs
index 006ae7225..2d83b36ac 100644
--- a/tests/integration/fuzz_transaction/mod.rs
+++ b/tests/integration/fuzz_transaction/mod.rs
@@ -86,9 +86,12 @@ impl ShadowDb {
);
}
- fn take_snapshot(&mut self, tx_id: usize) {
+ fn take_snapshot_if_not_exists(&mut self, tx_id: usize) {
if let Some(tx_state) = self.transactions.get_mut(&tx_id) {
- assert!(tx_state.is_none());
+ if tx_state.is_some() {
+ // tx already has snapshot
+ return;
+ }
tx_state.replace(TransactionState {
schema: self.schema.clone(),
visible_rows: self.committed_rows.clone(),
@@ -547,7 +550,11 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
let (operation, visible_rows) =
generate_operation(&mut rng, *current_tx_id, &mut shared_shadow_db);
- println!("Connection {conn_id}(op={op_num}): {operation}");
+ let is_in_tx = current_tx_id.is_some();
+ let has_snapshot = current_tx_id.is_some_and(|tx_id| {
+ shared_shadow_db.transactions.get(&tx_id).unwrap().is_some()
+ });
+ println!("Connection {conn_id}(op={op_num}): {operation}, is_in_tx={is_in_tx}, has_snapshot={has_snapshot}");
match operation {
Operation::Begin => {
@@ -622,6 +629,7 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
Ok(_) => {
// Success - update shadow DB
if let Some(tx_id) = *current_tx_id {
+ shared_shadow_db.take_snapshot_if_not_exists(tx_id);
// In transaction - update transaction's view
shared_shadow_db
.insert(tx_id, id, other_columns.clone())
@@ -659,6 +667,7 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
Ok(_) => {
// Success - update shadow DB
if let Some(tx_id) = *current_tx_id {
+ shared_shadow_db.take_snapshot_if_not_exists(tx_id);
// In transaction - update transaction's view
shared_shadow_db
.update(tx_id, id, other_columns.clone())
@@ -695,6 +704,7 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
Ok(_) => {
// Success - update shadow DB
if let Some(tx_id) = *current_tx_id {
+ shared_shadow_db.take_snapshot_if_not_exists(tx_id);
// In transaction - update transaction's view
shared_shadow_db.delete(tx_id, id).unwrap();
} else {
@@ -720,6 +730,10 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
let columns = stmt.columns();
let mut rows = stmt.query(()).await.unwrap();
+ if let Some(tx_id) = *current_tx_id {
+ shared_shadow_db.take_snapshot_if_not_exists(tx_id);
+ }
+
let mut real_rows = Vec::new();
while let Some(row) = rows.next().await.unwrap() {
let Value::Integer(id) = row.get_value(0).unwrap() else {
@@ -768,6 +782,7 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
match result {
Ok(_) => {
if let Some(tx_id) = *current_tx_id {
+ shared_shadow_db.take_snapshot_if_not_exists(tx_id);
// In transaction - update transaction's view
shared_shadow_db.alter_table(tx_id, op).unwrap();
} else {
@@ -838,13 +853,8 @@ fn generate_operation(
} else {
shadow_db.schema.clone()
};
- let mut get_visible_rows = |accesses_db: bool| {
+ let get_visible_rows = || {
if let Some(tx_id) = current_tx_id {
- let tx_state = shadow_db.transactions.get(&tx_id).unwrap();
- // Take snapshot during first operation that accesses the DB after a BEGIN, not immediately at BEGIN (the semantics is BEGIN DEFERRED)
- if accesses_db && tx_state.is_none() {
- shadow_db.take_snapshot(tx_id);
- }
shadow_db.get_visible_rows(Some(tx_id))
} else {
shadow_db.get_visible_rows(None) // No transaction
@@ -853,9 +863,9 @@ fn generate_operation(
match rng.random_range(0..100) {
0..=9 => {
if !in_transaction {
- (Operation::Begin, get_visible_rows(false))
+ (Operation::Begin, get_visible_rows())
} else {
- let visible_rows = get_visible_rows(true);
+ let visible_rows = get_visible_rows();
(
generate_data_operation(rng, &visible_rows, &schema_clone),
visible_rows,
@@ -864,9 +874,9 @@ fn generate_operation(
}
10..=14 => {
if in_transaction {
- (Operation::Commit, get_visible_rows(false))
+ (Operation::Commit, get_visible_rows())
} else {
- let visible_rows = get_visible_rows(true);
+ let visible_rows = get_visible_rows();
(
generate_data_operation(rng, &visible_rows, &schema_clone),
visible_rows,
@@ -875,9 +885,9 @@ fn generate_operation(
}
15..=19 => {
if in_transaction {
- (Operation::Rollback, get_visible_rows(false))
+ (Operation::Rollback, get_visible_rows())
} else {
- let visible_rows = get_visible_rows(true);
+ let visible_rows = get_visible_rows();
(
generate_data_operation(rng, &visible_rows, &schema_clone),
visible_rows,
@@ -892,7 +902,7 @@ fn generate_operation(
3 => CheckpointMode::Full,
_ => unreachable!(),
};
- (Operation::Checkpoint { mode }, get_visible_rows(false))
+ (Operation::Checkpoint { mode }, get_visible_rows())
}
23..=26 => {
let op = match rng.random_range(0..6) {
@@ -942,10 +952,10 @@ fn generate_operation(
}
_ => unreachable!(),
};
- (Operation::AlterTable { op }, get_visible_rows(true))
+ (Operation::AlterTable { op }, get_visible_rows())
}
_ => {
- let visible_rows = get_visible_rows(true);
+ let visible_rows = get_visible_rows();
(
generate_data_operation(rng, &visible_rows, &schema_clone),
visible_rows,
From dbcd01bf8bd0708b6b288fb1b91af82811d72cdc Mon Sep 17 00:00:00 2001
From: TcMits
Date: Wed, 10 Sep 2025 15:56:20 +0700
Subject: [PATCH 18/66] make consume safer
---
cli/app.rs | 58 +++++++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 47 insertions(+), 11 deletions(-)
diff --git a/cli/app.rs b/cli/app.rs
index 92371f5b1..d1dcefa70 100644
--- a/cli/app.rs
+++ b/cli/app.rs
@@ -19,6 +19,7 @@ use comfy_table::{Attribute, Cell, CellAlignment, ContentArrangement, Row, Table
use rustyline::{error::ReadlineError, history::DefaultHistory, Editor};
use std::{
io::{self, BufRead as _, IsTerminal, Write},
+ mem::{forget, ManuallyDrop},
path::PathBuf,
sync::{
atomic::{AtomicUsize, Ordering},
@@ -82,7 +83,7 @@ pub struct Limbo {
writer: Option>,
conn: Arc,
pub interrupt_count: Arc,
- input_buff: String,
+ input_buff: ManuallyDrop,
opts: Settings,
pub rl: Option>,
config: Option,
@@ -157,7 +158,7 @@ impl Limbo {
writer: Some(get_writer(&opts.output)),
conn,
interrupt_count,
- input_buff: String::new(),
+ input_buff: ManuallyDrop::new(String::new()),
opts: Settings::from(opts),
rl: None,
config: Some(config),
@@ -431,8 +432,6 @@ impl Limbo {
let _ = self.writeln(output);
}
}
-
- self.reset_input();
}
fn print_query_performance_stats(&mut self, start: Instant, stats: QueryStatistics) {
@@ -491,20 +490,54 @@ impl Limbo {
self.reset_line()?;
- // SAFETY: we don't reset input after we handle the command
- let value: &'static str =
- unsafe { std::mem::transmute::<&str, &'static str>(self.input_buff.as_str()) }.trim();
+ // we are taking ownership of input_buff here
+ // its always safe because we split the string in two parts
+ fn take_usable_part(app: &mut Limbo) -> (String, usize) {
+ let ptr = app.input_buff.as_mut_ptr();
+ let (len, cap) = (app.input_buff.len(), app.input_buff.capacity());
+ app.input_buff =
+ ManuallyDrop::new(unsafe { String::from_raw_parts(ptr.add(len), 0, cap - len) });
+ (unsafe { String::from_raw_parts(ptr, len, len) }, unsafe {
+ ptr.add(len).addr()
+ })
+ }
+
+ fn concat_usable_part(app: &mut Limbo, mut part: String, old_address: usize) {
+ let ptr = app.input_buff.as_mut_ptr();
+ let (len, cap) = (app.input_buff.len(), app.input_buff.capacity());
+
+ // if the address is not the same, meaning the string has been reallocated
+ // so we just drop the part we took earlier
+ if ptr.addr() != old_address {
+ return;
+ }
+
+ let head_ptr = part.as_mut_ptr();
+ let (head_len, head_cap) = (part.len(), part.capacity());
+ forget(part); // move this part into `input_buff`
+ app.input_buff = ManuallyDrop::new(unsafe {
+ String::from_raw_parts(head_ptr, head_len + len, head_cap + cap)
+ });
+ }
+
+ let value = self.input_buff.trim();
match (value.starts_with('.'), value.ends_with(';')) {
(true, _) => {
- self.handle_dot_command(value.strip_prefix('.').unwrap());
+ let (owned_value, old_address) = take_usable_part(self);
+ self.handle_dot_command(owned_value.trim().strip_prefix('.').unwrap());
+ concat_usable_part(self, owned_value, old_address);
self.reset_input();
}
(false, true) => {
- self.run_query(value);
+ let (owned_value, old_address) = take_usable_part(self);
+ self.run_query(owned_value.trim());
+ concat_usable_part(self, owned_value, old_address);
self.reset_input();
}
(false, false) if flush => {
- self.run_query(value);
+ let (owned_value, old_address) = take_usable_part(self);
+ self.run_query(owned_value.trim());
+ concat_usable_part(self, owned_value, old_address);
self.reset_input();
}
(false, false) => {
@@ -1645,6 +1678,9 @@ fn sql_quote_string(s: &str) -> String {
}
impl Drop for Limbo {
fn drop(&mut self) {
- self.save_history()
+ self.save_history();
+ unsafe {
+ ManuallyDrop::drop(&mut self.input_buff);
+ }
}
}
From 65f5fbd1f64c1cca2dc64d71ccf83fc79358de73 Mon Sep 17 00:00:00 2001
From: TcMits
Date: Wed, 10 Sep 2025 16:31:12 +0700
Subject: [PATCH 19/66] no errors in consume
---
cli/app.rs | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/cli/app.rs b/cli/app.rs
index 298891f38..2f0df3d61 100644
--- a/cli/app.rs
+++ b/cli/app.rs
@@ -546,20 +546,19 @@ impl Limbo {
}
}
- fn reset_line(&mut self) -> rustyline::Result<()> {
+ fn reset_line(&mut self) {
// Entry is auto added to history
// self.rl.add_history_entry(line.to_owned())?;
self.interrupt_count.store(0, Ordering::Release);
- Ok(())
}
// consume will consume `input_buff`
- pub fn consume(&mut self, flush: bool) -> anyhow::Result<()> {
+ pub fn consume(&mut self, flush: bool) {
if self.input_buff.trim().is_empty() {
- return Ok(());
+ return;
}
- self.reset_line()?;
+ self.reset_line();
// we are taking ownership of input_buff here
// its always safe because we split the string in two parts
@@ -615,8 +614,6 @@ impl Limbo {
self.set_multiline_prompt();
}
}
-
- Ok(())
}
pub fn handle_dot_command(&mut self, line: &str) {
From 688dc6dde32d06d2032e7ef1abd56a0d8d2ba88c Mon Sep 17 00:00:00 2001
From: TcMits
Date: Wed, 10 Sep 2025 16:31:57 +0700
Subject: [PATCH 20/66] minor
---
cli/main.rs | 15 ++-------------
1 file changed, 2 insertions(+), 13 deletions(-)
diff --git a/cli/main.rs b/cli/main.rs
index affb73888..de4c6c681 100644
--- a/cli/main.rs
+++ b/cli/main.rs
@@ -64,12 +64,7 @@ fn main() -> anyhow::Result<()> {
loop {
match app.readline() {
- Ok(_) => match app.consume(false) {
- Ok(_) => {}
- Err(e) => {
- eprintln!("{e}");
- }
- },
+ Ok(_) => app.consume(false),
Err(ReadlineError::Interrupted) => {
// At prompt, increment interrupt count
if app.interrupt_count.fetch_add(1, Ordering::SeqCst) >= 1 {
@@ -83,13 +78,7 @@ fn main() -> anyhow::Result<()> {
}
Err(ReadlineError::Eof) => {
// consume remaining input before exit
- match app.consume(true) {
- Ok(_) => {}
- Err(e) => {
- eprintln!("{e}");
- }
- };
-
+ app.consume(true);
let _ = app.close_conn();
break;
}
From eeef8b85fa5fefa1dda780ab59b6f5d292ea1127 Mon Sep 17 00:00:00 2001
From: TcMits
Date: Wed, 10 Sep 2025 16:54:51 +0700
Subject: [PATCH 21/66] always use consume instead of run_query,
handle_dot_command
---
cli/app.rs | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/cli/app.rs b/cli/app.rs
index 2f0df3d61..bcaf6bbf1 100644
--- a/cli/app.rs
+++ b/cli/app.rs
@@ -150,7 +150,7 @@ macro_rules! row_step_result_query {
impl Limbo {
pub fn new() -> anyhow::Result<(Self, WorkerGuard)> {
- let opts = Opts::parse();
+ let mut opts = Opts::parse();
let guard = Self::init_tracing(&opts)?;
let db_file = opts
@@ -203,7 +203,8 @@ impl Limbo {
})
.expect("Error setting Ctrl-C handler");
}
- let sql = opts.sql.clone();
+ let sql = opts.sql.take();
+ let has_sql = sql.is_some();
let quiet = opts.quiet;
let config = Config::for_output_mode(opts.output_mode);
let mut app = Self {
@@ -212,12 +213,12 @@ impl Limbo {
writer: Some(get_writer(&opts.output)),
conn,
interrupt_count,
- input_buff: ManuallyDrop::new(String::new()),
+ input_buff: ManuallyDrop::new(sql.unwrap_or_default()),
opts: Settings::from(opts),
rl: None,
config: Some(config),
};
- app.first_run(sql, quiet)?;
+ app.first_run(has_sql, quiet)?;
Ok((app, guard))
}
@@ -236,14 +237,14 @@ impl Limbo {
self
}
- fn first_run(&mut self, sql: Option, quiet: bool) -> Result<(), LimboError> {
+ fn first_run(&mut self, has_sql: bool, quiet: bool) -> Result<(), LimboError> {
// Skip startup messages and SQL execution in MCP mode
if self.is_mcp_mode() {
return Ok(());
}
- if let Some(sql) = sql {
- self.handle_first_input(&sql)?;
+ if has_sql {
+ self.handle_first_input()?;
}
if !quiet {
self.writeln_fmt(format_args!("Turso v{}", env!("CARGO_PKG_VERSION")))?;
@@ -256,12 +257,8 @@ impl Limbo {
Ok(())
}
- fn handle_first_input(&mut self, cmd: &str) -> Result<(), LimboError> {
- if cmd.trim().starts_with('.') {
- self.handle_dot_command(&cmd[1..]);
- } else {
- self.run_query(cmd);
- }
+ fn handle_first_input(&mut self) -> Result<(), LimboError> {
+ self.consume(true);
self.close_conn()?;
std::process::exit(0);
}
From f3d1c8c1a4da1cda74569a45c290c88f93f17464 Mon Sep 17 00:00:00 2001
From: Pekka Enberg
Date: Wed, 10 Sep 2025 13:30:04 +0300
Subject: [PATCH 22/66] Update CHANGELOG
---
CHANGELOG.md | 180 +++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 180 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5bd406681..405138435 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,185 @@
# Changelog
+## 0.1.5 -- 2024-09-10
+
+### Added
+
+* add missing module type for browser package (Nikita Sivukhin)
+* Implement 2-args json_each (Mikaël Francoeur)
+* Add OPFS support to JavaScript bindings (Nikita Sivukhin)
+* test/fuzz: add UPDATE/DELETE fuzz test (Jussi Saurio)
+* add gen-bigass-database.py (Jussi Saurio)
+* Add assertion: we read a page with the correct id (Jussi Saurio)
+* support float without fractional part (Lâm Hoàng Phúc)
+* expr: use more efficient implementation for binary condition exprs (Jussi Saurio)
+* Add json_each table-valued function (1-arg only) (Mikaël Francoeur)
+* Add io_uring support to stress (Pekka Enberg)
+* Refactor LIMIT/OFFSET handling to support expressions (bit-aloo)
+* Encryption: add support for other AEGIS and AES-GCM cipher variants (Frank Denis)
+* introduce package.json for separate *-browser package (both database and sync) (Nikita Sivukhin)
+* introduce `eq/contains/starts_with/ends_with_ignore_ascii_case` macros (Lâm Hoàng Phúc)
+* introduce `match_ignore_ascii_case` macro (Lâm Hoàng Phúc)
+* core: Make strict schema support experimental (Pekka Enberg)
+* core/printf: support for more basic substitution types (Luiz Gustavo)
+* Return sqlite_version() without being initialized (Preston Thorpe)
+* Support encryption for raw WAL frames (Gaurav Sarma)
+* bindings/java: Implement date, time related methods under JDBC4PreparedStatement (Kim Seon Woo)
+* Support cipher and encryption key URI options (William Souza)
+* Implement UPSERT (Preston Thorpe)
+* CLI: implement `Line` output .mode (Andrey Oskin)
+* add sqlite integrity check back (Pedro Muniz)
+* core: Initial pass on synchronous pragma (Pekka Enberg)
+* Introduce and propagate `IOContext` as required (Avinash Sajjanshetty)
+* Add some docs on encryption (Avinash Sajjanshetty)
+* sqlite3: Implement sqlite3_malloc() and sqlite3_free() (Pekka Enberg)
+* sqlite3: Implement sqlite3_next_stmt() (Pekka Enberg)
+* core/translate: Add support (Pekka Enberg)
+* sqlite3: Implement sqlite3_db_filename() (Pekka Enberg)
+* flake.nix: add uv dependency to nativeBuildInputs (Ceferino Patino)
+* sqlite3: Implement sqlite3_bind_parameter_index() (Pekka Enberg)
+* sqlite3: Implement sqlite3_clear_bindings() (Pekka Enberg)
+* sqlite3: Implement sqlite3_get_autocommit() (Pekka Enberg)
+* Add support for AEGIS encryption algorithm (Avinash Sajjanshetty)
+* bindings/java: Implement batch operations for JDBC4Statement (Kim Seon Woo)
+* Add syntax highlighting for EXPLAIN and ANALYZE (Alex Miller)
+* Add basic support for ANALYZE statement (Alex Miller)
+* correctly implement offset() in parser (Lâm Hoàng Phúc)
+* Switch to new parser in core (Levy A.)
+* github: Remove Intel Mac support (Pekka Enberg)
+* add remove_file method to the IO (Nikita Sivukhin)
+* Add libc fault injection to Antithesis (Pekka Enberg)
+* core/mvcc: support for MVCC (Pere Diaz Bou)
+* SQLite C API improvements: add column type and column decltype (Danawan Bimantoro)
+* Initial pass to support per page encryption (Avinash Sajjanshetty)
+
+### Updated
+* clean `print_query_result` (Lâm Hoàng Phúc)
+* update update-script to properly handle JS workspace (Nikita Sivukhin)
+* no need `QueryStatistics` if `self.opts.timer` is not set (Lâm Hoàng Phúc)
+* optimizer: convert outer join to inner join if possible (Jussi Saurio)
+* Handle case where null flag is set in op_column (Jussi Saurio)
+* remove &1 (Lâm Hoàng Phúc)
+* reduce cloning `Arc` (Lâm Hoàng Phúc)
+* Evaluate left join seek key condition again after null row (Jussi Saurio)
+* use mlugg/setup-zig instead of unmaintained action (Kingsword)
+* Prevent setting of encryption keys if already set (Gaurav Sarma)
+* Remove RefCell from Cursor (Pedro Muniz)
+* Page Cache: optimize and use sieve/Gclock hybird algorithm in place of LRU (Preston Thorpe)
+* core: handle edge cases for read_varint (Sonny)
+* Persistence for DBSP-based materialized views (Glauber Costa)
+* io_uring: prevent out of order operations that could interfere with durability (Preston Thorpe)
+* core: Simplify WalFileShared life cycle (Pekka Enberg)
+* prevent modification to system tables. (Glauber Costa)
+* mark completion as done only after callback will be executed (Nikita Sivukhin)
+* core/mvcc: make commit_txn return on I/O (Pere Diaz Bou)
+* windows iterator returns no values for shorter slice (Lâm Hoàng Phúc)
+* Unify resolution of aggregate functions (Piotr Rżysko)
+* replace some matches with `match_ignore_ascii_case` macro (Lâm Hoàng Phúc)
+* Make io_uring sound for connections on multiple threads (Preston Thorpe)
+* build native package for ARM64 (Nikita Sivukhin)
+* refactor parser fmt (Lâm Hoàng Phúc)
+* string sometimes used as identifier quoting (Lâm Hoàng Phúc)
+* CURRENT_TIMESTAMP can fallback TK_ID (Lâm Hoàng Phúc)
+* remove `turso_sqlite3_parser` from `turso_parser` (Lâm Hoàng Phúc)
+* Simulate I/O in memory (Pedro)
+* Simulate I/O in memory (Pedro Muniz)
+* Refactor encryption to manage authentication tag internally (bit-aloo)
+* Unify handling of grouped and ungrouped aggregations (Piotr Rżysko)
+* Evict page from cache if page is unlocked and unloaded (Pedro Muniz)
+* Use u64 for file offsets in I/O and calculate such offsets in u64 (Preston Thorpe)
+* Document how to use CDC (Pavan Nambi)
+* Upgrade Rust version in simulator build Dockerfile (Preston Thorpe)
+* Parse booleans to integer literals in expressions (Preston Thorpe)
+* Simulator Profiles (Pedro Muniz)
+* Change views to use DBSP circuits (Glauber Costa)
+* core/wal: cache file size (Pere Diaz Bou)
+* Remove some code duplication in the CLI (Preston Thorpe)
+* core/translate: parse_table remove unnecessary clone of table name (Pere Diaz Bou)
+* Update COMPAT.md to remove CREATE INDEX default disabled (Preston Thorpe)
+* core/translate: remove unneessary agg clones (Pere Diaz Bou)
+* core/vdbe: Micro-optimize "zero_or_null" opcode (Pekka Enberg)
+* translate: with_capacity insns (Pere Diaz Bou)
+* perf: avoid constructing PageType in helper methods (Jussi Saurio)
+* refactor/perf: remove BTreePageInner (Jussi Saurio)
+* Improve integrity check (Nikita Sivukhin)
+* translate/insert: Improve string format performance (Pere Diaz Bou)
+* core/schema: get_dependent_materialized_views_unnormalized (Pere Diaz Bou)
+* core/util: emit literal, cow instead of replace (Pere Diaz Bou)
+* core/translate: sanize_string fast path improvement (Pere Diaz Bou)
+* core/io: Switch Unix I/O to use libc::pwrite() (Pekka Enberg)
+* Update README.md for Go documentation (Preston Thorpe)
+* improve sync engine (Nikita Sivukhin)
+* Remove Go bindings (Preston Thorpe)
+* core/storage: Micro-optimize Pager::commit_dirty_pages() (Pekka Enberg)
+* Rename Go driver to `turso` to not conflict with sqlite3 (Preston Thorpe)
+* Refactor: `Cell` instead of `RefCell` to store `CipherMode` in connection (Avinash Sajjanshetty)
+* Improve documentation of page pinning (Jussi Saurio)
+* Remove double indirection in the Parser (Pedro Muniz")
+* Fail CI run if Turso output differs from SQLite in TPC-H queries (Jussi Saurio)
+* Decouple SQL generation from Simulator crate (Pedro Muniz)
+* Make fill_cell_payload() safe for async IO and cache spilling (Jussi Saurio)
+* Remove Windows IO in place of Generic IO (Preston Thorpe)
+* Improve encryption API (Avinash Sajjanshetty)
+* Remove double indirection in the Parser (Pedro Muniz)
+* Update TPC-H running instructions in PERF.md (Alex Miller)
+* Truncate the WAL on last connection close (Preston Thorpe)
+* DBSP projection (Pekka Enberg)
+* Use vectored I/O for appending WAL frames (Preston Thorpe)
+* Remove unnecessary argument from Pager::end_tx() (Nikita Sivukhin)
+* refactor/btree: rewrite the find_free_cell() function (Jussi Saurio)
+* refactor/btree: rewrite the free_cell_range() function (Jussi Saurio)
+* Remove Result from signature (Mikaël Francoeur)
+* Remove duplicated attribute in (bit-aloo)
+* reduce cloning Token in parser (Lâm Hoàng Phúc)
+* refactor encryption module and make it configurable (Avinash Sajjanshetty)
+* Replace a couple refcells for types that trivially impl Copy (Preston Thorpe)
+* wal-api: allow to mix frames insert with SQL execution (Nikita Sivukhin)
+* move check code into parser (Lâm Hoàng Phúc)
+* Serialize compat tests and use Mutex::lock() instead of Mutex::try_lock() in UnixIO (Jussi Saurio)
+* sim: remove "run_once faults" (Jussi Saurio)
+* should not return a Completion when there is a page cache hit (Pedro Muniz)
+* github: Reduce Python build matrix (Pekka Enberg)
+* Page cache truncate (Nikita Sivukhin)
+* Wal api checkpoint seq (Nikita Sivukhin)
+* Use more structured approach in translate_insert (Jussi Saurio)
+* Remove hardcoded flag usage in DBHeader for encryption (Avinash Sajjanshetty)
+* properly execute pragmas - they may require some IO (Nikita Sivukhin)
+* Wal checkpoint upper bound (Nikita Sivukhin)
+* Improve WAL checkpointing performance (Preston Thorpe)
+* core/mvcc: store txid in conn and reset transaction state on commit (Pere Diaz Bou)
+* core/mvcc: start first rowid at 1 (Pere Diaz Bou)
+* refactor/vdbe: move insert-related seeking to VDBE from BTreeCursor (Jussi Saurio)
+
+### Fixed
+* Fix clear_page_cache method and rollback (Preston Thorpe)
+* Fix read_entire_wal_dumb: incrementally build the frame cache (Preston Thorpe)
+* Fix merge script to prompt if tests are still in progress (Preston Thorpe)
+* SQL generation fixes (Pekka Enberg)
+* Fix affinity handling in MakeRecord (Pekka Enberg)
+* Fix infinite loop when IO failure happens on allocating first page (Preston Thorpe)
+* Fix crash in Next opcode if cursor stack has no pages (Jussi Saurio)
+* cli: Fix dump compatibility in "PRAGMA foreign_keys" (Pekka Enberg)
+* Small fixes (Nikita Sivukhin)
+* Avoid allocating and then immediately fallbacking errors in affinity (Jussi Saurio)
+* Fix float formatting and comparison + Blob concat (Levy A.)
+* Fix infinite loop when query starts comment token ("--") (Lâm Hoàng Phúc)
+* Fix sqlite3 test cases (Pekka Enberg)
+* Fix non-determinism in simulator (Pedro Muniz)
+* Fix column count in ImmutableRow (Glauber Costa)
+* Fix memory leak in page cache during balancing (Preston Thorpe)
+* Fix `sim-schema` command (Pedro Muniz)
+* Propagate decryption error from the callback (Avinash Sajjanshetty)
+* Fix sorter column deduplication (Piotr Rżysko)
+* Fix missing functions after revert (Pedro Muniz)
+* ci: fix merge-pr issue to escape command-line backticks (Ceferino Patino)
+* Fix several issues with integrity_check (Jussi Saurio)
+* core/io: Fix build on Android and iOS (Pekka Enberg)
+* WAL txn: fix reads from DB file (Nikita Sivukhin)
+* Fix blob type handling in JavaScript (Pekka Enberg)
+* Fix: all indexes need to be updated if the rowid changes (Jussi Saurio)
+* Fix: in UPDATE, insert rowid into index instead of NULL (Jussi Saurio)
+* Fix: normalize table name in DELETE (Jussi Saurio)
+
## 0.1.4 -- 2025-08-20
### Added
From e04938eaf5194724eecf23b77996daf49411af3c Mon Sep 17 00:00:00 2001
From: Pekka Enberg
Date: Wed, 10 Sep 2025 13:30:18 +0300
Subject: [PATCH 23/66] Turso 0.1.5
---
Cargo.lock | 52 +++++++++----------
Cargo.toml | 34 ++++++------
bindings/javascript/package-lock.json | 14 ++---
bindings/javascript/package.json | 2 +-
.../javascript/packages/browser/package.json | 4 +-
.../javascript/packages/common/package.json | 2 +-
.../javascript/packages/native/package.json | 4 +-
bindings/javascript/yarn.lock | 6 +--
sync/javascript/package.json | 2 +-
9 files changed, 60 insertions(+), 60 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 3078152e6..e72c7b5f2 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -667,7 +667,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
name = "core_tester"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"anyhow",
"assert_cmd",
@@ -2126,7 +2126,7 @@ dependencies = [
[[package]]
name = "limbo_completion"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"mimalloc",
"turso_ext",
@@ -2134,7 +2134,7 @@ dependencies = [
[[package]]
name = "limbo_crypto"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"blake3",
"data-encoding",
@@ -2147,7 +2147,7 @@ dependencies = [
[[package]]
name = "limbo_csv"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"csv",
"mimalloc",
@@ -2157,7 +2157,7 @@ dependencies = [
[[package]]
name = "limbo_ipaddr"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"ipnetwork",
"mimalloc",
@@ -2166,7 +2166,7 @@ dependencies = [
[[package]]
name = "limbo_percentile"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"mimalloc",
"turso_ext",
@@ -2174,7 +2174,7 @@ dependencies = [
[[package]]
name = "limbo_regexp"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"mimalloc",
"regex",
@@ -2183,7 +2183,7 @@ dependencies = [
[[package]]
name = "limbo_sim"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"anyhow",
"chrono",
@@ -2216,7 +2216,7 @@ dependencies = [
[[package]]
name = "limbo_sqlite_test_ext"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"cc",
]
@@ -2971,7 +2971,7 @@ dependencies = [
[[package]]
name = "py-turso"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"anyhow",
"pyo3",
@@ -3666,7 +3666,7 @@ checksum = "d372029cb5195f9ab4e4b9aef550787dce78b124fcaee8d82519925defcd6f0d"
[[package]]
name = "sql_generation"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"anarchist-readable-name-generator-lib 0.2.0",
"anyhow",
@@ -4176,7 +4176,7 @@ dependencies = [
[[package]]
name = "turso"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"rand 0.8.5",
"rand_chacha 0.3.1",
@@ -4188,7 +4188,7 @@ dependencies = [
[[package]]
name = "turso-java"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"jni",
"thiserror 2.0.12",
@@ -4197,7 +4197,7 @@ dependencies = [
[[package]]
name = "turso_cli"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"anyhow",
"cfg-if",
@@ -4230,7 +4230,7 @@ dependencies = [
[[package]]
name = "turso_core"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"aegis",
"aes",
@@ -4289,7 +4289,7 @@ dependencies = [
[[package]]
name = "turso_dart"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"flutter_rust_bridge",
"turso_core",
@@ -4297,7 +4297,7 @@ dependencies = [
[[package]]
name = "turso_ext"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"chrono",
"getrandom 0.3.2",
@@ -4306,7 +4306,7 @@ dependencies = [
[[package]]
name = "turso_ext_tests"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"env_logger 0.11.7",
"lazy_static",
@@ -4317,7 +4317,7 @@ dependencies = [
[[package]]
name = "turso_macros"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"proc-macro2",
"quote",
@@ -4326,7 +4326,7 @@ dependencies = [
[[package]]
name = "turso_node"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"napi",
"napi-build",
@@ -4338,7 +4338,7 @@ dependencies = [
[[package]]
name = "turso_parser"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"bitflags 2.9.0",
"criterion",
@@ -4354,7 +4354,7 @@ dependencies = [
[[package]]
name = "turso_sqlite3"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"env_logger 0.11.7",
"libc",
@@ -4367,7 +4367,7 @@ dependencies = [
[[package]]
name = "turso_sqlite3_parser"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"bitflags 2.9.0",
"cc",
@@ -4385,7 +4385,7 @@ dependencies = [
[[package]]
name = "turso_stress"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"anarchist-readable-name-generator-lib 0.1.2",
"antithesis_sdk",
@@ -4401,7 +4401,7 @@ dependencies = [
[[package]]
name = "turso_sync_engine"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"base64",
"bytes",
@@ -4427,7 +4427,7 @@ dependencies = [
[[package]]
name = "turso_sync_js"
-version = "0.1.5-pre.5"
+version = "0.1.5"
dependencies = [
"genawaiter",
"http",
diff --git a/Cargo.toml b/Cargo.toml
index e393d48e3..28059424e 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -33,29 +33,29 @@ members = [
exclude = ["perf/latency/limbo"]
[workspace.package]
-version = "0.1.5-pre.5"
+version = "0.1.5"
authors = ["the Limbo authors"]
edition = "2021"
license = "MIT"
repository = "https://github.com/tursodatabase/turso"
[workspace.dependencies]
-turso = { path = "bindings/rust", version = "0.1.5-pre.5" }
-turso_node = { path = "bindings/javascript", version = "0.1.5-pre.5" }
-limbo_completion = { path = "extensions/completion", version = "0.1.5-pre.5" }
-turso_core = { path = "core", version = "0.1.5-pre.5" }
-turso_sync_engine = { path = "sync/engine", version = "0.1.5-pre.5" }
-limbo_crypto = { path = "extensions/crypto", version = "0.1.5-pre.5" }
-limbo_csv = { path = "extensions/csv", version = "0.1.5-pre.5" }
-turso_ext = { path = "extensions/core", version = "0.1.5-pre.5" }
-turso_ext_tests = { path = "extensions/tests", version = "0.1.5-pre.5" }
-limbo_ipaddr = { path = "extensions/ipaddr", version = "0.1.5-pre.5" }
-turso_macros = { path = "macros", version = "0.1.5-pre.5" }
-limbo_percentile = { path = "extensions/percentile", version = "0.1.5-pre.5" }
-limbo_regexp = { path = "extensions/regexp", version = "0.1.5-pre.5" }
-turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.1.5-pre.5" }
-limbo_uuid = { path = "extensions/uuid", version = "0.1.5-pre.5" }
-turso_parser = { path = "parser", version = "0.1.5-pre.5" }
+turso = { path = "bindings/rust", version = "0.1.5" }
+turso_node = { path = "bindings/javascript", version = "0.1.5" }
+limbo_completion = { path = "extensions/completion", version = "0.1.5" }
+turso_core = { path = "core", version = "0.1.5" }
+turso_sync_engine = { path = "sync/engine", version = "0.1.5" }
+limbo_crypto = { path = "extensions/crypto", version = "0.1.5" }
+limbo_csv = { path = "extensions/csv", version = "0.1.5" }
+turso_ext = { path = "extensions/core", version = "0.1.5" }
+turso_ext_tests = { path = "extensions/tests", version = "0.1.5" }
+limbo_ipaddr = { path = "extensions/ipaddr", version = "0.1.5" }
+turso_macros = { path = "macros", version = "0.1.5" }
+limbo_percentile = { path = "extensions/percentile", version = "0.1.5" }
+limbo_regexp = { path = "extensions/regexp", version = "0.1.5" }
+turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.1.5" }
+limbo_uuid = { path = "extensions/uuid", version = "0.1.5" }
+turso_parser = { path = "parser", version = "0.1.5" }
sql_generation = { path = "sql_generation" }
strum = { version = "0.26", features = ["derive"] }
strum_macros = "0.26"
diff --git a/bindings/javascript/package-lock.json b/bindings/javascript/package-lock.json
index 551080310..d0dd32d5d 100644
--- a/bindings/javascript/package-lock.json
+++ b/bindings/javascript/package-lock.json
@@ -1,11 +1,11 @@
{
"name": "javascript",
- "version": "0.1.5-pre.5",
+ "version": "0.1.5",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
- "version": "0.1.5-pre.5",
+ "version": "0.1.5",
"workspaces": [
"packages/common",
"packages/native",
@@ -2485,11 +2485,11 @@
},
"packages/browser": {
"name": "@tursodatabase/database-browser",
- "version": "0.1.5-pre.5",
+ "version": "0.1.5",
"license": "MIT",
"dependencies": {
"@napi-rs/wasm-runtime": "^1.0.3",
- "@tursodatabase/database-common": "^0.1.5-pre.5"
+ "@tursodatabase/database-common": "^0.1.5"
},
"devDependencies": {
"@napi-rs/cli": "^3.1.5",
@@ -2501,7 +2501,7 @@
},
"packages/common": {
"name": "@tursodatabase/database-common",
- "version": "0.1.5-pre.5",
+ "version": "0.1.5",
"license": "MIT",
"devDependencies": {
"typescript": "^5.9.2"
@@ -2518,10 +2518,10 @@
},
"packages/native": {
"name": "@tursodatabase/database",
- "version": "0.1.5-pre.5",
+ "version": "0.1.5",
"license": "MIT",
"dependencies": {
- "@tursodatabase/database-common": "^0.1.5-pre.5"
+ "@tursodatabase/database-common": "^0.1.5"
},
"devDependencies": {
"@napi-rs/cli": "^3.1.5",
diff --git a/bindings/javascript/package.json b/bindings/javascript/package.json
index 70213de55..0145b2468 100644
--- a/bindings/javascript/package.json
+++ b/bindings/javascript/package.json
@@ -9,5 +9,5 @@
"packages/native",
"packages/browser"
],
- "version": "0.1.5-pre.5"
+ "version": "0.1.5"
}
diff --git a/bindings/javascript/packages/browser/package.json b/bindings/javascript/packages/browser/package.json
index 5475fd25b..ca9f38147 100644
--- a/bindings/javascript/packages/browser/package.json
+++ b/bindings/javascript/packages/browser/package.json
@@ -1,6 +1,6 @@
{
"name": "@tursodatabase/database-browser",
- "version": "0.1.5-pre.5",
+ "version": "0.1.5",
"repository": {
"type": "git",
"url": "https://github.com/tursodatabase/turso"
@@ -40,6 +40,6 @@
},
"dependencies": {
"@napi-rs/wasm-runtime": "^1.0.3",
- "@tursodatabase/database-common": "^0.1.5-pre.5"
+ "@tursodatabase/database-common": "^0.1.5"
}
}
diff --git a/bindings/javascript/packages/common/package.json b/bindings/javascript/packages/common/package.json
index 4a4af4d3c..f45a99ef2 100644
--- a/bindings/javascript/packages/common/package.json
+++ b/bindings/javascript/packages/common/package.json
@@ -1,6 +1,6 @@
{
"name": "@tursodatabase/database-common",
- "version": "0.1.5-pre.5",
+ "version": "0.1.5",
"repository": {
"type": "git",
"url": "https://github.com/tursodatabase/turso"
diff --git a/bindings/javascript/packages/native/package.json b/bindings/javascript/packages/native/package.json
index abd6cfe97..d2270e48c 100644
--- a/bindings/javascript/packages/native/package.json
+++ b/bindings/javascript/packages/native/package.json
@@ -1,6 +1,6 @@
{
"name": "@tursodatabase/database",
- "version": "0.1.5-pre.5",
+ "version": "0.1.5",
"repository": {
"type": "git",
"url": "https://github.com/tursodatabase/turso"
@@ -44,7 +44,7 @@
]
},
"dependencies": {
- "@tursodatabase/database-common": "^0.1.5-pre.5"
+ "@tursodatabase/database-common": "^0.1.5"
},
"imports": {
"#index": "./index.js"
diff --git a/bindings/javascript/yarn.lock b/bindings/javascript/yarn.lock
index f8d062830..fcd207aa2 100644
--- a/bindings/javascript/yarn.lock
+++ b/bindings/javascript/yarn.lock
@@ -1400,7 +1400,7 @@ __metadata:
dependencies:
"@napi-rs/cli": "npm:^3.1.5"
"@napi-rs/wasm-runtime": "npm:^1.0.3"
- "@tursodatabase/database-common": "npm:^0.1.5-pre.5"
+ "@tursodatabase/database-common": "npm:^0.1.5"
"@vitest/browser": "npm:^3.2.4"
playwright: "npm:^1.55.0"
typescript: "npm:^5.9.2"
@@ -1408,7 +1408,7 @@ __metadata:
languageName: unknown
linkType: soft
-"@tursodatabase/database-common@npm:^0.1.5-pre.5, @tursodatabase/database-common@workspace:packages/common":
+"@tursodatabase/database-common@npm:^0.1.5, @tursodatabase/database-common@workspace:packages/common":
version: 0.0.0-use.local
resolution: "@tursodatabase/database-common@workspace:packages/common"
dependencies:
@@ -1421,7 +1421,7 @@ __metadata:
resolution: "@tursodatabase/database@workspace:packages/native"
dependencies:
"@napi-rs/cli": "npm:^3.1.5"
- "@tursodatabase/database-common": "npm:^0.1.5-pre.5"
+ "@tursodatabase/database-common": "npm:^0.1.5"
"@types/node": "npm:^24.3.1"
typescript: "npm:^5.9.2"
vitest: "npm:^3.2.4"
diff --git a/sync/javascript/package.json b/sync/javascript/package.json
index 11a91c979..850aee631 100644
--- a/sync/javascript/package.json
+++ b/sync/javascript/package.json
@@ -1,6 +1,6 @@
{
"name": "@tursodatabase/sync",
- "version": "0.1.5-pre.5",
+ "version": "0.1.5",
"repository": {
"type": "git",
"url": "https://github.com/tursodatabase/turso"
From a2f0725a62d4cdcaead1b1e49b46a194a87a6203 Mon Sep 17 00:00:00 2001
From: Pekka Enberg
Date: Wed, 10 Sep 2025 13:49:55 +0300
Subject: [PATCH 24/66] Fix publish-create.sh script
---
scripts/publish-crates.sh | 1 +
1 file changed, 1 insertion(+)
diff --git a/scripts/publish-crates.sh b/scripts/publish-crates.sh
index 4b379cc7b..7d32697cf 100755
--- a/scripts/publish-crates.sh
+++ b/scripts/publish-crates.sh
@@ -2,6 +2,7 @@
cargo publish -p turso_macros
cargo publish -p turso_ext
+cargo publish -p turso_sqlite3_parser
cargo publish -p turso_parser
cargo publish -p turso_core
cargo publish -p turso
From 9655b455ed03e86eaf6bee0bf13ab759a3609818 Mon Sep 17 00:00:00 2001
From: Pekka Enberg
Date: Wed, 10 Sep 2025 13:54:42 +0300
Subject: [PATCH 25/66] Clippy you are so smart
---
stress/main.rs | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/stress/main.rs b/stress/main.rs
index de9aaf5fa..8178e8e13 100644
--- a/stress/main.rs
+++ b/stress/main.rs
@@ -519,7 +519,8 @@ async fn main() -> Result<(), Box> {
let mut conn = db.lock().await.connect()?;
println!("\rExecuting queries...");
for query_index in 0..nr_iterations {
- if gen_bool(0.001) && false {
+ if gen_bool(0.0) {
+ // disabled
if opts.verbose {
println!("Reopening database");
}
@@ -531,7 +532,8 @@ async fn main() -> Result<(), Box> {
}
*db_guard = builder.build().await?;
conn = db_guard.connect()?;
- } else if gen_bool(0.01) && false {
+ } else if gen_bool(0.0) {
+ // disabled
// Reconnect to the database
if opts.verbose {
println!("Reconnecting to database");
From 5c8afc5caf69fbe91c1e6935742c9a24619d8dab Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Wed, 10 Sep 2025 14:01:34 +0300
Subject: [PATCH 26/66] pager: fix incorrect freelist page count bookkeeping
---
core/storage/pager.rs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/core/storage/pager.rs b/core/storage/pager.rs
index f2fb9d160..8fcb1d26f 100644
--- a/core/storage/pager.rs
+++ b/core/storage/pager.rs
@@ -1984,7 +1984,7 @@ impl Pager {
// Freelist is not empty, so we can reuse the trunk itself as a new page
// and update the database's first freelist trunk page to the next trunk page.
header.freelist_trunk_page = next_trunk_page_id.into();
- header.freelist_pages = (header.freelist_pages.get() + 1).into();
+ header.freelist_pages = (header.freelist_pages.get() - 1).into();
self.add_dirty(trunk_page);
// zero out the page
turso_assert!(
From f3ee355fa1c4107c8f6d73e9218fc66540773bff Mon Sep 17 00:00:00 2001
From: Pekka Enberg
Date: Wed, 10 Sep 2025 14:14:46 +0300
Subject: [PATCH 27/66] testing/compare.test: Clean up tests by using in-memory
database
Suggested by Jussi
---
testing/compare.test | 15 +++++----------
1 file changed, 5 insertions(+), 10 deletions(-)
diff --git a/testing/compare.test b/testing/compare.test
index dd09ffb92..ab262aed0 100644
--- a/testing/compare.test
+++ b/testing/compare.test
@@ -256,19 +256,14 @@ foreach {testname lhs rhs ans} {
}
# github-issue: 2957.
-do_execsql_test compare-int-float-setup {
+do_execsql_test_on_specific_db {:memory:} compare-int-float-lte-negative-zero {
CREATE TABLE t1(i INTEGER);
INSERT INTO t1 VALUES (0), (-1), (1);
-} {}
-
-do_execsql_test compare-int-float-lte-negative-zero {
SELECT i FROM t1 WHERE i <= -0.0 ORDER BY i;
} {-1 0}
-do_execsql_test compare-int-float-lt-negative-zero {
+do_execsql_test_on_specific_db {:memory:} compare-int-float-lt-negative-zero {
+ CREATE TABLE t1(i INTEGER);
+ INSERT INTO t1 VALUES (0), (-1), (1);
SELECT i FROM t1 WHERE i < -0.0 ORDER BY i;
-} {-1}
-
-do_execsql_test compare-int-float-cleanup {
- DROP TABLE t1;
-} {}
\ No newline at end of file
+} {-1}
\ No newline at end of file
From 2131a04b7d2f1697e14858f51de8b3a83ae73efb Mon Sep 17 00:00:00 2001
From: Pekka Enberg
Date: Tue, 9 Sep 2025 13:43:36 +0300
Subject: [PATCH 28/66] core: Rename IO::run_once() to IO::step()
The `run_once()` name is just a historical accident. Furthermore, it now
started to appear elsewhere as well, so let's just call it IO::step() as we
should have from the beginning.
---
bindings/javascript/src/lib.rs | 4 ++--
core/io/generic.rs | 2 +-
core/io/io_uring.rs | 4 ++--
core/io/mod.rs | 4 ++--
core/io/unix.rs | 2 +-
core/io/vfs.rs | 2 +-
core/io/windows.rs | 2 +-
core/lib.rs | 2 +-
core/mvcc/database/tests.rs | 2 +-
core/storage/btree.rs | 18 +++++++++---------
simulator/runner/io.rs | 4 ++--
simulator/runner/memory/io.rs | 2 +-
sync/engine/src/database_tape.rs | 18 +++++++++---------
sync/javascript/src/lib.rs | 2 +-
tests/integration/functions/test_wal_api.rs | 12 ++++++------
.../integration/query_processing/test_btree.rs | 2 +-
.../query_processing/test_write_path.rs | 2 +-
17 files changed, 42 insertions(+), 42 deletions(-)
diff --git a/bindings/javascript/src/lib.rs b/bindings/javascript/src/lib.rs
index 928b475ef..0911c116b 100644
--- a/bindings/javascript/src/lib.rs
+++ b/bindings/javascript/src/lib.rs
@@ -338,7 +338,7 @@ impl Database {
#[napi]
pub fn io_loop_sync(&self) -> Result<()> {
self.io
- .run_once()
+ .step()
.map_err(|e| Error::new(Status::GenericFailure, format!("IO error: {e}")))?;
Ok(())
}
@@ -631,7 +631,7 @@ impl Task for IoLoopTask {
type JsValue = ();
fn compute(&mut self) -> napi::Result {
- self.io.run_once().map_err(|e| {
+ self.io.step().map_err(|e| {
napi::Error::new(napi::Status::GenericFailure, format!("IO error: {e}"))
})?;
Ok(())
diff --git a/core/io/generic.rs b/core/io/generic.rs
index 7b7fcf99b..8eef59d3b 100644
--- a/core/io/generic.rs
+++ b/core/io/generic.rs
@@ -37,7 +37,7 @@ impl IO for GenericIO {
}
#[instrument(err, skip_all, level = Level::TRACE)]
- fn run_once(&self) -> Result<()> {
+ fn step(&self) -> Result<()> {
Ok(())
}
}
diff --git a/core/io/io_uring.rs b/core/io/io_uring.rs
index e56221b65..03f8dc3a0 100644
--- a/core/io/io_uring.rs
+++ b/core/io/io_uring.rs
@@ -624,8 +624,8 @@ impl IO for UringIO {
Ok(())
}
- fn run_once(&self) -> Result<()> {
- trace!("run_once()");
+ fn step(&self) -> Result<()> {
+ trace!("step()");
let mut inner = self.inner.lock();
let ring = &mut inner.ring;
ring.flush_overflow()?;
diff --git a/core/io/mod.rs b/core/io/mod.rs
index 15028f7eb..139d99221 100644
--- a/core/io/mod.rs
+++ b/core/io/mod.rs
@@ -84,7 +84,7 @@ pub trait IO: Clock + Send + Sync {
// remove_file is used in the sync-engine
fn remove_file(&self, path: &str) -> Result<()>;
- fn run_once(&self) -> Result<()> {
+ fn step(&self) -> Result<()> {
Ok(())
}
@@ -99,7 +99,7 @@ pub trait IO: Clock + Send + Sync {
fn wait_for_completion(&self, c: Completion) -> Result<()> {
while !c.finished() {
- self.run_once()?
+ self.step()?
}
if let Some(Some(err)) = c.inner.result.get().copied() {
return Err(err.into());
diff --git a/core/io/unix.rs b/core/io/unix.rs
index 1f7fbb8c7..174a3941c 100644
--- a/core/io/unix.rs
+++ b/core/io/unix.rs
@@ -120,7 +120,7 @@ impl IO for UnixIO {
}
#[instrument(err, skip_all, level = Level::TRACE)]
- fn run_once(&self) -> Result<()> {
+ fn step(&self) -> Result<()> {
Ok(())
}
}
diff --git a/core/io/vfs.rs b/core/io/vfs.rs
index a0eb7aec5..9c7b116c0 100644
--- a/core/io/vfs.rs
+++ b/core/io/vfs.rs
@@ -45,7 +45,7 @@ impl IO for VfsMod {
Ok(())
}
- fn run_once(&self) -> Result<()> {
+ fn step(&self) -> Result<()> {
if self.ctx.is_null() {
return Err(LimboError::ExtensionError("VFS is null".to_string()));
}
diff --git a/core/io/windows.rs b/core/io/windows.rs
index 77662bce2..a884cc922 100644
--- a/core/io/windows.rs
+++ b/core/io/windows.rs
@@ -37,7 +37,7 @@ impl IO for WindowsIO {
}
#[instrument(err, skip_all, level = Level::TRACE)]
- fn run_once(&self) -> Result<()> {
+ fn step(&self) -> Result<()> {
Ok(())
}
}
diff --git a/core/lib.rs b/core/lib.rs
index 544b81cbf..cb3184625 100644
--- a/core/lib.rs
+++ b/core/lib.rs
@@ -2212,7 +2212,7 @@ impl Statement {
}
pub fn run_once(&self) -> Result<()> {
- let res = self.pager.io.run_once();
+ let res = self.pager.io.step();
if self.program.connection.is_nested_stmt.get() {
return res;
}
diff --git a/core/mvcc/database/tests.rs b/core/mvcc/database/tests.rs
index be3a29a5d..481f9cdaf 100644
--- a/core/mvcc/database/tests.rs
+++ b/core/mvcc/database/tests.rs
@@ -1309,7 +1309,7 @@ fn test_concurrent_writes() {
}
}
}
- db.get_db().io.run_once().unwrap();
+ db.get_db().io.step().unwrap();
if all_finished {
break;
diff --git a/core/storage/btree.rs b/core/storage/btree.rs
index a89341974..92db4ae85 100644
--- a/core/storage/btree.rs
+++ b/core/storage/btree.rs
@@ -7606,7 +7606,7 @@ mod tests {
let cursor = BTreeCursor::new_table(None, pager.clone(), page_idx, num_columns);
let (page, _c) = cursor.read_page(page_idx).unwrap();
while page.is_locked() {
- pager.io.run_once().unwrap();
+ pager.io.step().unwrap();
}
// Pin page in order to not drop it in between
@@ -7626,7 +7626,7 @@ mod tests {
}) => {
let (child_page, _c) = cursor.read_page(left_child_page as usize).unwrap();
while child_page.is_locked() {
- pager.io.run_once().unwrap();
+ pager.io.step().unwrap();
}
child_pages.push(child_page);
if left_child_page == page.get().id as u32 {
@@ -7685,7 +7685,7 @@ mod tests {
*p = new_page;
}
while p.is_locked() {
- pager.io.run_once().unwrap();
+ pager.io.step().unwrap();
}
p.get_contents().page_type()
});
@@ -7696,7 +7696,7 @@ mod tests {
*page = new_page;
}
while page.is_locked() {
- pager.io.run_once().unwrap();
+ pager.io.step().unwrap();
}
if page.get_contents().page_type() != child_type {
tracing::error!("child pages have different types");
@@ -7717,7 +7717,7 @@ mod tests {
let cursor = BTreeCursor::new_table(None, pager.clone(), page_idx, num_columns);
let (page, _c) = cursor.read_page(page_idx).unwrap();
while page.is_locked() {
- pager.io.run_once().unwrap();
+ pager.io.step().unwrap();
}
// Pin page in order to not drop it in between loading of different pages. If not contents will be a dangling reference.
@@ -8711,7 +8711,7 @@ mod tests {
.unwrap(),
);
- pager.io.run_once().unwrap();
+ pager.io.step().unwrap();
let _ = run_until_done(|| pager.allocate_page1(), &pager);
for _ in 0..(database_size - 1) {
@@ -8763,11 +8763,11 @@ mod tests {
&IOContext::default(),
c,
)?;
- pager.io.run_once()?;
+ pager.io.step()?;
let (page, _c) = cursor.read_page(current_page as usize)?;
while page.is_locked() {
- cursor.pager.io.run_once()?;
+ cursor.pager.io.step()?;
}
{
@@ -8786,7 +8786,7 @@ mod tests {
current_page += 1;
}
- pager.io.run_once()?;
+ pager.io.step()?;
// Create leaf cell pointing to start of overflow chain
let leaf_cell = BTreeCell::TableLeafCell(TableLeafCell {
diff --git a/simulator/runner/io.rs b/simulator/runner/io.rs
index dbb0b3953..63c350ae2 100644
--- a/simulator/runner/io.rs
+++ b/simulator/runner/io.rs
@@ -121,12 +121,12 @@ impl IO for SimulatorIO {
Ok(())
}
- fn run_once(&self) -> Result<()> {
+ fn step(&self) -> Result<()> {
let now = self.now();
for file in self.files.borrow().iter() {
file.run_queued_io(now)?;
}
- self.inner.run_once()?;
+ self.inner.step()?;
Ok(())
}
diff --git a/simulator/runner/memory/io.rs b/simulator/runner/memory/io.rs
index 46f272500..124ff2caf 100644
--- a/simulator/runner/memory/io.rs
+++ b/simulator/runner/memory/io.rs
@@ -227,7 +227,7 @@ impl IO for MemorySimIO {
Ok(file)
}
- fn run_once(&self) -> Result<()> {
+ fn step(&self) -> Result<()> {
let mut callbacks = self.callbacks.lock();
let mut timeouts = self.timeouts.lock();
tracing::trace!(
diff --git a/sync/engine/src/database_tape.rs b/sync/engine/src/database_tape.rs
index d0ee1eed0..daeb6c3d9 100644
--- a/sync/engine/src/database_tape.rs
+++ b/sync/engine/src/database_tape.rs
@@ -716,7 +716,7 @@ mod tests {
});
let rows = loop {
match gen.resume_with(Ok(())) {
- genawaiter::GeneratorState::Yielded(..) => io.run_once().unwrap(),
+ genawaiter::GeneratorState::Yielded(..) => io.step().unwrap(),
genawaiter::GeneratorState::Complete(result) => break result,
}
};
@@ -750,7 +750,7 @@ mod tests {
});
let changes = loop {
match gen.resume_with(Ok(())) {
- genawaiter::GeneratorState::Yielded(..) => io.run_once().unwrap(),
+ genawaiter::GeneratorState::Yielded(..) => io.step().unwrap(),
genawaiter::GeneratorState::Complete(result) => break result,
}
};
@@ -841,7 +841,7 @@ mod tests {
});
let rows = loop {
match gen.resume_with(Ok(())) {
- genawaiter::GeneratorState::Yielded(..) => io.run_once().unwrap(),
+ genawaiter::GeneratorState::Yielded(..) => io.step().unwrap(),
genawaiter::GeneratorState::Complete(rows) => break rows,
}
};
@@ -921,7 +921,7 @@ mod tests {
});
let rows = loop {
match gen.resume_with(Ok(())) {
- genawaiter::GeneratorState::Yielded(..) => io.run_once().unwrap(),
+ genawaiter::GeneratorState::Yielded(..) => io.step().unwrap(),
genawaiter::GeneratorState::Complete(rows) => break rows,
}
};
@@ -992,7 +992,7 @@ mod tests {
});
let rows = loop {
match gen.resume_with(Ok(())) {
- genawaiter::GeneratorState::Yielded(..) => io.run_once().unwrap(),
+ genawaiter::GeneratorState::Yielded(..) => io.step().unwrap(),
genawaiter::GeneratorState::Complete(rows) => break rows,
}
};
@@ -1129,7 +1129,7 @@ mod tests {
});
loop {
match gen.resume_with(Ok(())) {
- genawaiter::GeneratorState::Yielded(..) => io.run_once().unwrap(),
+ genawaiter::GeneratorState::Yielded(..) => io.step().unwrap(),
genawaiter::GeneratorState::Complete(result) => {
result.unwrap();
break;
@@ -1214,7 +1214,7 @@ mod tests {
});
loop {
match gen.resume_with(Ok(())) {
- genawaiter::GeneratorState::Yielded(..) => io.run_once().unwrap(),
+ genawaiter::GeneratorState::Yielded(..) => io.step().unwrap(),
genawaiter::GeneratorState::Complete(result) => {
result.unwrap();
break;
@@ -1289,7 +1289,7 @@ mod tests {
});
loop {
match gen.resume_with(Ok(())) {
- genawaiter::GeneratorState::Yielded(..) => io.run_once().unwrap(),
+ genawaiter::GeneratorState::Yielded(..) => io.step().unwrap(),
genawaiter::GeneratorState::Complete(result) => {
result.unwrap();
break;
@@ -1385,7 +1385,7 @@ mod tests {
});
loop {
match gen.resume_with(Ok(())) {
- genawaiter::GeneratorState::Yielded(..) => io.run_once().unwrap(),
+ genawaiter::GeneratorState::Yielded(..) => io.step().unwrap(),
genawaiter::GeneratorState::Complete(result) => {
result.unwrap();
break;
diff --git a/sync/javascript/src/lib.rs b/sync/javascript/src/lib.rs
index 290d9235f..7ebc535d4 100644
--- a/sync/javascript/src/lib.rs
+++ b/sync/javascript/src/lib.rs
@@ -252,7 +252,7 @@ impl SyncEngine {
#[napi]
pub fn io_loop_sync(&self) -> napi::Result<()> {
- self.io.run_once().map_err(|e| {
+ self.io.step().map_err(|e| {
napi::Error::new(napi::Status::GenericFailure, format!("IO error: {e}"))
})?;
Ok(())
diff --git a/tests/integration/functions/test_wal_api.rs b/tests/integration/functions/test_wal_api.rs
index 4f3444cc8..1a3f13fa6 100644
--- a/tests/integration/functions/test_wal_api.rs
+++ b/tests/integration/functions/test_wal_api.rs
@@ -509,7 +509,7 @@ fn test_wal_upper_bound_passive() {
Ok(StepResult::Row) => {
rows.push(stmt.row().unwrap().get_values().cloned().collect())
}
- Ok(StepResult::IO) => db_copy.io.run_once().unwrap(),
+ Ok(StepResult::IO) => db_copy.io.step().unwrap(),
Ok(StepResult::Done) => break,
result => panic!("unexpected step result: {result:?}"),
}
@@ -702,7 +702,7 @@ fn test_wal_api_exec_commit() {
let result = stmt.step();
match result {
Ok(StepResult::Row) => rows.push(stmt.row().unwrap().get_values().cloned().collect()),
- Ok(StepResult::IO) => db.io.run_once().unwrap(),
+ Ok(StepResult::IO) => db.io.step().unwrap(),
Ok(StepResult::Done) => break,
result => panic!("unexpected step result: {result:?}"),
}
@@ -749,7 +749,7 @@ fn test_wal_api_exec_rollback() {
let result = stmt.step();
match result {
Ok(StepResult::Row) => rows.push(stmt.row().unwrap().get_values().cloned().collect()),
- Ok(StepResult::IO) => db.io.run_once().unwrap(),
+ Ok(StepResult::IO) => db.io.step().unwrap(),
Ok(StepResult::Done) => break,
result => panic!("unexpected step result: {result:?}"),
}
@@ -813,7 +813,7 @@ fn test_wal_api_insert_exec_mix() {
let result = stmt.step();
match result {
Ok(StepResult::Row) => rows.push(stmt.row().unwrap().get_values().cloned().collect()),
- Ok(StepResult::IO) => db.io.run_once().unwrap(),
+ Ok(StepResult::IO) => db.io.step().unwrap(),
Ok(StepResult::Done) => break,
result => panic!("unexpected step result: {result:?}"),
}
@@ -839,7 +839,7 @@ fn test_wal_api_insert_exec_mix() {
let result = stmt.step();
match result {
Ok(StepResult::Row) => rows.push(stmt.row().unwrap().get_values().cloned().collect()),
- Ok(StepResult::IO) => db.io.run_once().unwrap(),
+ Ok(StepResult::IO) => db.io.step().unwrap(),
Ok(StepResult::Done) => break,
result => panic!("unexpected step result: {result:?}"),
}
@@ -908,7 +908,7 @@ fn test_db_share_same_file() {
let result = stmt.step();
match result {
Ok(StepResult::Row) => rows.push(stmt.row().unwrap().get_values().cloned().collect()),
- Ok(StepResult::IO) => db2.io.run_once().unwrap(),
+ Ok(StepResult::IO) => db2.io.step().unwrap(),
Ok(StepResult::Done) => break,
result => panic!("unexpected step result: {result:?}"),
}
diff --git a/tests/integration/query_processing/test_btree.rs b/tests/integration/query_processing/test_btree.rs
index 1355d81e8..5a4fa79c8 100644
--- a/tests/integration/query_processing/test_btree.rs
+++ b/tests/integration/query_processing/test_btree.rs
@@ -436,7 +436,7 @@ fn write_at(io: &impl IO, file: Arc, offset: usize, data: &[u8]) {
});
let result = file.pwrite(offset as u64, buffer, completion).unwrap();
while !result.is_completed() {
- io.run_once().unwrap();
+ io.step().unwrap();
}
}
diff --git a/tests/integration/query_processing/test_write_path.rs b/tests/integration/query_processing/test_write_path.rs
index 222d2deb4..2bd4263c0 100644
--- a/tests/integration/query_processing/test_write_path.rs
+++ b/tests/integration/query_processing/test_write_path.rs
@@ -317,7 +317,7 @@ fn test_wal_restart() -> anyhow::Result<()> {
let insert_query = format!("INSERT INTO test VALUES ({i})");
run_query(tmp_db, conn, &insert_query)?;
debug!("inserted {i}");
- tmp_db.io.run_once()?;
+ tmp_db.io.step()?;
Ok(())
}
From df83b560831498b34c691854f3117b3978b67e87 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Wed, 10 Sep 2025 14:40:12 +0300
Subject: [PATCH 29/66] check freelist count in integrity check
---
core/storage/btree.rs | 32 ++++++++++++++++++++++++++++----
core/vdbe/execute.rs | 11 +++++++++++
2 files changed, 39 insertions(+), 4 deletions(-)
diff --git a/core/storage/btree.rs b/core/storage/btree.rs
index a89341974..e92156f0e 100644
--- a/core/storage/btree.rs
+++ b/core/storage/btree.rs
@@ -5487,6 +5487,13 @@ pub enum IntegrityCheckError {
references: Vec,
page_category: PageCategory,
},
+ #[error(
+ "Freelist count mismatch. actual_count={actual_count}, expected_count={expected_count}"
+ )]
+ FreelistCountMismatch {
+ actual_count: usize,
+ expected_count: usize,
+ },
}
#[derive(Debug, Clone, Copy, PartialEq)]
@@ -5497,6 +5504,12 @@ pub(crate) enum PageCategory {
FreePage,
}
+#[derive(Clone)]
+pub struct CheckFreelist {
+ pub expected_count: usize,
+ pub actual_count: usize,
+}
+
#[derive(Clone)]
struct IntegrityCheckPageEntry {
page_idx: usize,
@@ -5509,6 +5522,7 @@ pub struct IntegrityCheckState {
first_leaf_level: Option,
page_reference: HashMap,
page: Option,
+ pub freelist_count: CheckFreelist,
}
impl IntegrityCheckState {
@@ -5518,9 +5532,17 @@ impl IntegrityCheckState {
page_reference: HashMap::new(),
first_leaf_level: None,
page: None,
+ freelist_count: CheckFreelist {
+ expected_count: 0,
+ actual_count: 0,
+ },
}
}
+ pub fn set_expected_freelist_count(&mut self, count: usize) {
+ self.freelist_count.expected_count = count;
+ }
+
pub fn start(
&mut self,
page_idx: usize,
@@ -5554,10 +5576,7 @@ impl IntegrityCheckState {
) {
let page_id = entry.page_idx as u64;
let Some(previous) = self.page_reference.insert(page_id, referenced_by) else {
- // do not traverse free pages as they have no meaingful structured content
- if entry.page_category != PageCategory::FreePage {
- self.page_stack.push(entry);
- }
+ self.page_stack.push(entry);
return;
};
errors.push(IntegrityCheckError::PageReferencedMultipleTimes {
@@ -5616,6 +5635,7 @@ pub fn integrity_check(
let contents = page.get_contents();
if page_category == PageCategory::FreeListTrunk {
+ state.freelist_count.actual_count += 1;
let next_freelist_trunk_page = contents.read_u32_no_offset(0);
if next_freelist_trunk_page != 0 {
state.push_page(
@@ -5645,6 +5665,10 @@ pub fn integrity_check(
}
continue;
}
+ if page_category == PageCategory::FreePage {
+ state.freelist_count.actual_count += 1;
+ continue;
+ }
if page_category == PageCategory::Overflow {
let next_overflow_page = contents.read_u32_no_offset(0);
if next_overflow_page != 0 {
diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs
index 86ae514a4..d9fa306e2 100644
--- a/core/vdbe/execute.rs
+++ b/core/vdbe/execute.rs
@@ -7349,6 +7349,9 @@ pub fn op_integrity_check(
let mut current_root_idx = 0;
// check freelist pages first, if there are any for database
if freelist_trunk_page > 0 {
+ let expected_freelist_count =
+ return_if_io!(pager.with_header(|header| header.freelist_pages.get()));
+ integrity_check_state.set_expected_freelist_count(expected_freelist_count as usize);
integrity_check_state.start(
freelist_trunk_page as usize,
PageCategory::FreeListTrunk,
@@ -7375,6 +7378,14 @@ pub fn op_integrity_check(
*current_root_idx += 1;
return Ok(InsnFunctionStepResult::Step);
} else {
+ if integrity_check_state.freelist_count.actual_count
+ != integrity_check_state.freelist_count.expected_count
+ {
+ errors.push(IntegrityCheckError::FreelistCountMismatch {
+ actual_count: integrity_check_state.freelist_count.actual_count,
+ expected_count: integrity_check_state.freelist_count.expected_count,
+ });
+ }
let message = if errors.is_empty() {
"ok".to_string()
} else {
From 6d43bdbf7135ef1403e9f36143040e8e3bde2773 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Mon, 18 Aug 2025 10:50:32 +0300
Subject: [PATCH 30/66] emit the Delete instruction for the iteration index
cursor, and do it last
---
core/translate/emitter.rs | 98 +++++++++++++++++++++++----------------
1 file changed, 59 insertions(+), 39 deletions(-)
diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs
index 9e233a2b5..bf0c008fe 100644
--- a/core/translate/emitter.rs
+++ b/core/translate/emitter.rs
@@ -530,48 +530,57 @@ fn emit_delete_insns(
.schema
.indexes
.get(table_reference.table.get_name());
- let index_refs_opt = indexes.map(|indexes| {
- indexes
- .iter()
- .map(|index| {
- (
- index.clone(),
- program.resolve_cursor_id(&CursorKey::index(
- table_reference.internal_id,
- index.clone(),
- )),
- )
- })
- .collect::>()
- });
- if let Some(index_refs) = index_refs_opt {
- for (index, index_cursor_id) in index_refs {
- let num_regs = index.columns.len() + 1;
- let start_reg = program.alloc_registers(num_regs);
- // Emit columns that are part of the index
- index
- .columns
+ // Get the index that is being used to iterate the deletion loop, if there is one.
+ let iteration_index = table_reference.op.index();
+ // Get all indexes that are not the iteration index.
+ let other_indexes = indexes
+ .map(|indexes| {
+ indexes
.iter()
- .enumerate()
- .for_each(|(reg_offset, column_index)| {
- program.emit_column_or_rowid(
- main_table_cursor_id,
- column_index.pos_in_table,
- start_reg + reg_offset,
- );
- });
- program.emit_insn(Insn::RowId {
- cursor_id: main_table_cursor_id,
- dest: start_reg + num_regs - 1,
+ .filter(|index| {
+ iteration_index
+ .as_ref()
+ .is_none_or(|it_idx| !Arc::ptr_eq(it_idx, index))
+ })
+ .map(|index| {
+ (
+ index.clone(),
+ program.resolve_cursor_id(&CursorKey::index(
+ table_reference.internal_id,
+ index.clone(),
+ )),
+ )
+ })
+ .collect::>()
+ })
+ .unwrap_or_default();
+
+ for (index, index_cursor_id) in other_indexes {
+ let num_regs = index.columns.len() + 1;
+ let start_reg = program.alloc_registers(num_regs);
+ // Emit columns that are part of the index
+ index
+ .columns
+ .iter()
+ .enumerate()
+ .for_each(|(reg_offset, column_index)| {
+ program.emit_column_or_rowid(
+ main_table_cursor_id,
+ column_index.pos_in_table,
+ start_reg + reg_offset,
+ );
});
- program.emit_insn(Insn::IdxDelete {
- start_reg,
- num_regs,
- cursor_id: index_cursor_id,
- raise_error_if_no_matching_entry: true,
- });
- }
+ program.emit_insn(Insn::RowId {
+ cursor_id: main_table_cursor_id,
+ dest: start_reg + num_regs - 1,
+ });
+ program.emit_insn(Insn::IdxDelete {
+ start_reg,
+ num_regs,
+ cursor_id: index_cursor_id,
+ raise_error_if_no_matching_entry: true,
+ });
}
// Emit update in the CDC table if necessary (before DELETE updated the table)
@@ -636,6 +645,17 @@ fn emit_delete_insns(
cursor_id: main_table_cursor_id,
table_name: table_reference.table.get_name().to_string(),
});
+
+ if let Some(index) = iteration_index {
+ let iteration_index_cursor = program.resolve_cursor_id(&CursorKey::index(
+ table_reference.internal_id,
+ index.clone(),
+ ));
+ program.emit_insn(Insn::Delete {
+ cursor_id: iteration_index_cursor,
+ table_name: index.name.clone(),
+ });
+ }
}
if let Some(limit_ctx) = t_ctx.limit_ctx {
program.emit_insn(Insn::DecrJumpZero {
From e0ca0cf8af7e7c55e45cb868d31736289e7a33f3 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Mon, 18 Aug 2025 10:52:13 +0300
Subject: [PATCH 31/66] Enable access path optimizer for DELETE
---
core/translate/optimizer/mod.rs | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/core/translate/optimizer/mod.rs b/core/translate/optimizer/mod.rs
index e0b1c6b73..f2222975c 100644
--- a/core/translate/optimizer/mod.rs
+++ b/core/translate/optimizer/mod.rs
@@ -88,7 +88,7 @@ pub fn optimize_select_plan(plan: &mut SelectPlan, schema: &Schema) -> Result<()
Ok(())
}
-fn optimize_delete_plan(plan: &mut DeletePlan, _schema: &Schema) -> Result<()> {
+fn optimize_delete_plan(plan: &mut DeletePlan, schema: &Schema) -> Result<()> {
rewrite_exprs_delete(plan)?;
if let ConstantConditionEliminationResult::ImpossibleCondition =
eliminate_constant_conditions(&mut plan.where_clause)?
@@ -97,15 +97,14 @@ fn optimize_delete_plan(plan: &mut DeletePlan, _schema: &Schema) -> Result<()> {
return Ok(());
}
- // FIXME: don't use indexes for delete right now because it's buggy. See for example:
- // https://github.com/tursodatabase/turso/issues/1714
- // let _ = optimize_table_access(
- // &mut plan.table_references,
- // &schema.indexes,
- // &mut plan.where_clause,
- // &mut plan.order_by,
- // &mut None,
- // )?;
+ let _ = optimize_table_access(
+ schema,
+ &mut plan.table_references,
+ &schema.indexes,
+ &mut plan.where_clause,
+ &mut plan.order_by,
+ &mut None,
+ )?;
Ok(())
}
From f469113d9fba42006a7fd30cd3be738354060426 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Tue, 9 Sep 2025 11:32:04 +0300
Subject: [PATCH 32/66] Don't crash if DELETE uses index
---
core/translate/display.rs | 21 ++++++++++++++++++---
1 file changed, 18 insertions(+), 3 deletions(-)
diff --git a/core/translate/display.rs b/core/translate/display.rs
index 384cf7f54..8a83f9a76 100644
--- a/core/translate/display.rs
+++ b/core/translate/display.rs
@@ -143,9 +143,24 @@ impl Display for DeletePlan {
writeln!(f, "{indent}DELETE FROM {table_name}")?;
}
- Operation::Search { .. } => {
- panic!("DELETE plans should not contain search operations");
- }
+ Operation::Search(search) => match search {
+ Search::RowidEq { .. } | Search::Seek { index: None, .. } => {
+ writeln!(
+ f,
+ "{}SEARCH {} USING INTEGER PRIMARY KEY (rowid=?)",
+ indent, reference.identifier
+ )?;
+ }
+ Search::Seek {
+ index: Some(index), ..
+ } => {
+ writeln!(
+ f,
+ "{}SEARCH {} USING INDEX {}",
+ indent, reference.identifier, index.name
+ )?;
+ }
+ },
}
}
Ok(())
From 813bdc088bd89f83cec96356295609a5dfdddd1e Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Tue, 9 Sep 2025 16:01:15 +0300
Subject: [PATCH 33/66] Adjust fuzz test to do a WHERE-less update or delete
sometimes
---
tests/integration/fuzz/mod.rs | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs
index 283dab940..1e4818c5e 100644
--- a/tests/integration/fuzz/mod.rs
+++ b/tests/integration/fuzz/mod.rs
@@ -515,7 +515,7 @@ mod tests {
let (mut rng, seed) = rng_from_time();
println!("index_scan_single_key_mutation_fuzz seed: {seed}");
- const OUTER_ITERATIONS: usize = 30;
+ const OUTER_ITERATIONS: usize = 100;
for i in 0..OUTER_ITERATIONS {
println!(
"table_index_mutation_fuzz iteration {}/{}",
@@ -580,7 +580,7 @@ mod tests {
limbo_exec_rows(&limbo_db, &limbo_conn, &insert);
const COMPARISONS: [&str; 3] = ["=", "<", ">"];
- const INNER_ITERATIONS: usize = 100;
+ const INNER_ITERATIONS: usize = 20;
for _ in 0..INNER_ITERATIONS {
let do_update = rng.random_range(0..2) == 0;
@@ -590,11 +590,23 @@ mod tests {
let predicate_col = rng.random_range(0..num_cols);
let predicate_value = rng.random_range(0..1000);
+ let omit_where = rng.random_bool(0.05);
+
let query = if do_update {
let new_y = rng.random_range(0..1000);
- format!("UPDATE t SET c{affected_col} = {new_y} WHERE c{predicate_col} {comparison} {predicate_value}")
+ if omit_where {
+ format!("UPDATE t SET c{affected_col} = {new_y}")
+ } else {
+ format!("UPDATE t SET c{affected_col} = {new_y} WHERE c{predicate_col} {comparison} {predicate_value}")
+ }
} else {
- format!("DELETE FROM t WHERE c{predicate_col} {comparison} {predicate_value}")
+ if omit_where {
+ "DELETE FROM t".to_string()
+ } else {
+ format!(
+ "DELETE FROM t WHERE c{predicate_col} {comparison} {predicate_value}"
+ )
+ }
};
dml_statements.push(query.clone());
From 36ec654631990b656397e6d2931556a2ad8e0703 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Tue, 9 Sep 2025 13:02:51 +0300
Subject: [PATCH 34/66] Seek with GE after delete balancing and skip next
advance
---
core/storage/btree.rs | 28 ++++++++++++++++++++++------
core/vdbe/execute.rs | 24 +++++++++++++++++-------
2 files changed, 39 insertions(+), 13 deletions(-)
diff --git a/core/storage/btree.rs b/core/storage/btree.rs
index a89341974..95f740ebb 100644
--- a/core/storage/btree.rs
+++ b/core/storage/btree.rs
@@ -481,7 +481,7 @@ pub struct BTreeCursor {
/// Page id of the root page used to go back up fast.
root_page: usize,
/// Rowid and record are stored before being consumed.
- has_record: Cell,
+ pub has_record: Cell,
null_flag: bool,
/// Index internal pages are consumed on the way up, so we store going upwards flag in case
/// we just moved to a parent page and the parent page is an internal index page which requires
@@ -543,6 +543,8 @@ pub struct BTreeCursor {
seek_end_state: SeekEndState,
/// State machine for [BTreeCursor::move_to]
move_to_state: MoveToState,
+ /// Whether the next call to [BTreeCursor::next()] should be a no-op
+ skip_advance: Cell,
}
/// We store the cell index and cell count for each page in the stack.
@@ -615,6 +617,7 @@ impl BTreeCursor {
count_state: CountState::Start,
seek_end_state: SeekEndState::Start,
move_to_state: MoveToState::Start,
+ skip_advance: Cell::new(false),
}
}
@@ -696,7 +699,7 @@ impl BTreeCursor {
/// Move the cursor to the previous record and return it.
/// Used in backwards iteration.
#[instrument(skip(self), level = Level::DEBUG, name = "prev")]
- fn get_prev_record(&mut self) -> Result> {
+ pub fn get_prev_record(&mut self) -> Result> {
loop {
let (old_top_idx, page_type, is_index, is_leaf, cell_count) = {
let page = self.stack.top_ref();
@@ -1202,7 +1205,7 @@ impl BTreeCursor {
/// Move the cursor to the next record and return it.
/// Used in forwards iteration, which is the default.
#[instrument(skip(self), level = Level::DEBUG, name = "next")]
- fn get_next_record(&mut self) -> Result> {
+ pub fn get_next_record(&mut self) -> Result> {
if let Some(mv_cursor) = &self.mv_cursor {
let mut mv_cursor = mv_cursor.borrow_mut();
mv_cursor.forward();
@@ -4241,6 +4244,7 @@ impl BTreeCursor {
if self.valid_state == CursorValidState::Invalid {
return Ok(IOResult::Done(()));
}
+ self.skip_advance.set(false);
loop {
match self.rewind_state {
RewindState::Start => {
@@ -4280,6 +4284,16 @@ impl BTreeCursor {
if self.valid_state == CursorValidState::Invalid {
return Ok(IOResult::Done(false));
}
+ if self.skip_advance.get() {
+ self.skip_advance.set(false);
+ let mem_page = self.stack.top_ref();
+ let contents = mem_page.get_contents();
+ let cell_idx = self.stack.current_cell_index();
+ let cell_count = contents.cell_count();
+ let has_record = cell_idx >= 0 && cell_idx < cell_count as i32;
+ self.has_record.set(has_record);
+ return Ok(IOResult::Done(has_record));
+ }
loop {
match self.advance_state {
AdvanceState::Start => {
@@ -4296,7 +4310,7 @@ impl BTreeCursor {
}
}
- fn invalidate_record(&mut self) {
+ pub fn invalidate_record(&mut self) {
self.get_immutable_record_or_create()
.as_mut()
.unwrap()
@@ -4361,6 +4375,7 @@ impl BTreeCursor {
let mut mv_cursor = mv_cursor.borrow_mut();
return mv_cursor.seek(key, op);
}
+ self.skip_advance.set(false);
// Empty trace to capture the span information
tracing::trace!("");
// We need to clear the null flag for the table cursor before seeking,
@@ -4547,7 +4562,7 @@ impl BTreeCursor {
};
CursorContext {
key: CursorContextKey::IndexKeyRowId(record),
- seek_op: SeekOp::LT,
+ seek_op: SeekOp::GE { eq_only: true },
}
} else {
let Some(rowid) = return_if_io!(self.rowid()) else {
@@ -4555,7 +4570,7 @@ impl BTreeCursor {
};
CursorContext {
key: CursorContextKey::TableRowId(rowid),
- seek_op: SeekOp::LT,
+ seek_op: SeekOp::GE { eq_only: true },
}
};
@@ -4828,6 +4843,7 @@ impl BTreeCursor {
}
DeleteState::RestoreContextAfterBalancing => {
return_if_io!(self.restore_context());
+ self.skip_advance.set(true);
self.state = CursorState::None;
return Ok(IOResult::Done(()));
}
diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs
index 86ae514a4..4a19e5bf5 100644
--- a/core/vdbe/execute.rs
+++ b/core/vdbe/execute.rs
@@ -2701,6 +2701,7 @@ pub enum OpSeekKey {
IndexKeyFromRegister(usize),
}
+#[derive(Debug)]
pub enum OpSeekState {
/// Initial state
Start,
@@ -3012,11 +3013,16 @@ pub fn seek_internal(
// this same logic applies for indexes, but the next/prev record is expected to be found in the parent page's
// divider cell.
let result = match op {
- SeekOp::GT | SeekOp::GE { .. } => cursor.next()?,
- SeekOp::LT | SeekOp::LE { .. } => cursor.prev()?,
+ // deliberately call get_next_record() instead of next() to avoid skip_advance triggering unwantedly
+ SeekOp::GT | SeekOp::GE { .. } => cursor.get_next_record()?,
+ SeekOp::LT | SeekOp::LE { .. } => cursor.get_prev_record()?,
};
match result {
- IOResult::Done(found) => found,
+ IOResult::Done(found) => {
+ cursor.has_record.set(found);
+ cursor.invalidate_record();
+ found
+ }
IOResult::IO(io) => return Ok(SeekInternalResult::IO(io)),
}
};
@@ -5730,9 +5736,11 @@ pub fn op_idx_delete(
// If P5 is not zero, then raise an SQLITE_CORRUPT_INDEX error if no matching index entry is found
// Also, do not raise this (self-correcting and non-critical) error if in writable_schema mode.
if *raise_error_if_no_matching_entry {
- let record = make_record(&state.registers, start_reg, num_regs);
+ let reg_values = (*start_reg..*start_reg + *num_regs)
+ .map(|i| &state.registers[i])
+ .collect::>();
return Err(LimboError::Corrupt(format!(
- "IdxDelete: no matching index entry found for record {record:?}"
+ "IdxDelete: no matching index entry found for key {reg_values:?}"
)));
}
state.pc += 1;
@@ -5749,9 +5757,11 @@ pub fn op_idx_delete(
};
if rowid.is_none() && *raise_error_if_no_matching_entry {
+ let reg_values = (*start_reg..*start_reg + *num_regs)
+ .map(|i| &state.registers[i])
+ .collect::>();
return Err(LimboError::Corrupt(format!(
- "IdxDelete: no matching index entry found for record {:?}",
- make_record(&state.registers, start_reg, num_regs)
+ "IdxDelete: no matching index entry found for key {reg_values:?}"
)));
}
state.op_idx_delete_state = Some(OpIdxDeleteState::Deleting);
From 53eaf56a63bf49bb33802702e48758cdd7f8345a Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Tue, 9 Sep 2025 17:26:11 +0300
Subject: [PATCH 35/66] let's apply clippy's suggestion that makes the code
less readable
---
tests/integration/fuzz/mod.rs | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)
diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs
index 1e4818c5e..59134a75b 100644
--- a/tests/integration/fuzz/mod.rs
+++ b/tests/integration/fuzz/mod.rs
@@ -599,14 +599,10 @@ mod tests {
} else {
format!("UPDATE t SET c{affected_col} = {new_y} WHERE c{predicate_col} {comparison} {predicate_value}")
}
+ } else if omit_where {
+ "DELETE FROM t".to_string()
} else {
- if omit_where {
- "DELETE FROM t".to_string()
- } else {
- format!(
- "DELETE FROM t WHERE c{predicate_col} {comparison} {predicate_value}"
- )
- }
+ format!("DELETE FROM t WHERE c{predicate_col} {comparison} {predicate_value}")
};
dml_statements.push(query.clone());
From 80f8794fda716b4338f4701619bd2cb68bdf8f70 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Tue, 9 Sep 2025 17:32:00 +0300
Subject: [PATCH 36/66] add comments
---
core/storage/btree.rs | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/core/storage/btree.rs b/core/storage/btree.rs
index 95f740ebb..394fed46a 100644
--- a/core/storage/btree.rs
+++ b/core/storage/btree.rs
@@ -543,7 +543,8 @@ pub struct BTreeCursor {
seek_end_state: SeekEndState,
/// State machine for [BTreeCursor::move_to]
move_to_state: MoveToState,
- /// Whether the next call to [BTreeCursor::next()] should be a no-op
+ /// Whether the next call to [BTreeCursor::next()] should be a no-op.
+ /// This is currently only used after a delete operation causes a rebalancing.
skip_advance: Cell,
}
@@ -4285,6 +4286,7 @@ impl BTreeCursor {
return Ok(IOResult::Done(false));
}
if self.skip_advance.get() {
+ // See DeleteState::RestoreContextAfterBalancing
self.skip_advance.set(false);
let mem_page = self.stack.top_ref();
let contents = mem_page.get_contents();
@@ -4843,6 +4845,11 @@ impl BTreeCursor {
}
DeleteState::RestoreContextAfterBalancing => {
return_if_io!(self.restore_context());
+
+ // We deleted key K, and performed a seek to: GE { eq_only: true } K.
+ // This means that the cursor is now pointing to the next key after K.
+ // We need to make the next call to BTreeCursor::next() a no-op so that we don't skip over
+ // a row when deleting rows in a loop.
self.skip_advance.set(true);
self.state = CursorState::None;
return Ok(IOResult::Done(()));
From 618f51330ad90a4d61a877114e3bf40d0b3e4173 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Wed, 10 Sep 2025 00:12:20 +0300
Subject: [PATCH 37/66] advance despite skip_advance flag if cursor not
pointing at record
---
core/storage/btree.rs | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/core/storage/btree.rs b/core/storage/btree.rs
index 394fed46a..a21c0c532 100644
--- a/core/storage/btree.rs
+++ b/core/storage/btree.rs
@@ -4293,8 +4293,14 @@ impl BTreeCursor {
let cell_idx = self.stack.current_cell_index();
let cell_count = contents.cell_count();
let has_record = cell_idx >= 0 && cell_idx < cell_count as i32;
- self.has_record.set(has_record);
- return Ok(IOResult::Done(has_record));
+ if has_record {
+ self.has_record.set(true);
+ // If we are positioned at a record, we stop here without advancing.
+ return Ok(IOResult::Done(true));
+ }
+ // But: if we aren't currently positioned at a record (for example, we are at the end of a page),
+ // we need to advance despite the skip_advance flag
+ // because the intent is to find the next record immediately after the one we just deleted.
}
loop {
match self.advance_state {
From 2ff5e15f58aeff0f8fe93dffb1802c46bcf16d6c Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Wed, 10 Sep 2025 12:43:57 +0300
Subject: [PATCH 38/66] Add scripts that help debug bugs from simulator
1. Add script that cleans simulator logs into just the SQL statements
2. Add script that bisects a set of SQL statements to find the minimal
prefix set of statements that fails SQLite integrity check
---
.gitignore | 1 +
scripts/clean_interactions.sh | 39 +++++++
scripts/corruption_bisecter.py | 195 +++++++++++++++++++++++++++++++++
3 files changed, 235 insertions(+)
create mode 100755 scripts/clean_interactions.sh
create mode 100644 scripts/corruption_bisecter.py
diff --git a/.gitignore b/.gitignore
index b851e8025..294d5a6dc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -44,3 +44,4 @@ profile.json.gz
simulator-output/
&1
+bisected.sql
\ No newline at end of file
diff --git a/scripts/clean_interactions.sh b/scripts/clean_interactions.sh
new file mode 100755
index 000000000..360ee31e8
--- /dev/null
+++ b/scripts/clean_interactions.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+# Clean lines from simulator output by:
+# 1) Removing everything up to and including "interaction="
+# 2) Replacing everything from "}:" to the end with a single semicolon
+# 3) Only retaining lines containing CREATE/INSERT/UPDATE/DELETE/DROP (the rest are usually meaningless for debugging)
+#
+# The purpose of this is to transform the interaction plan into a list of executable SQL statements
+# in cases where:
+# 1. Shrinking the plan failed
+# 2. We know the point at which the simulator failure occurred.
+#
+# I use this script like this in the simulator directory:
+# cargo run &> raw_output.txt
+# manually edit out the shrinking parts and the WarGames intro graphics etc and save the file
+# then run:
+# ./clean_interactions.sh raw_output.txt > interactions.sql
+#
+# Usage:
+# clean_interactions.sh INPUT [OUTPUT]
+#
+# If OUTPUT is omitted, the result is written to stdout.
+
+if [[ $# -lt 1 || $# -gt 2 ]]; then
+ echo "Usage: $0 INPUT [OUTPUT]" >&2
+ exit 1
+fi
+
+input_path="$1"
+output_path="${2:-}"
+
+if [[ -z "${output_path}" ]]; then
+ awk '{ line=$0; sub(/^[^\n]*interaction=/, "", line); sub(/}:.*/, ";", line); print line }' "${input_path}" | grep -E 'CREATE|INSERT|UPDATE|DELETE|DROP'
+else
+ awk '{ line=$0; sub(/^[^\n]*interaction=/, "", line); sub(/}:.*/, ";", line); print line }' "${input_path}" | grep -E 'CREATE|INSERT|UPDATE|DELETE|DROP' > "${output_path}"
+fi
+
+
diff --git a/scripts/corruption_bisecter.py b/scripts/corruption_bisecter.py
new file mode 100644
index 000000000..38f512a67
--- /dev/null
+++ b/scripts/corruption_bisecter.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python3
+# Usage e.g.: uv run scripts/corruption_bisecter.py -i corruption.sql -o bisected.sql
+# To clean up input data for this script, consider using `scripts/clean_interactions.sh`
+import argparse
+import shutil
+import subprocess
+import sys
+import tempfile
+from pathlib import Path
+from typing import Callable, List, Literal, Sequence
+
+def read_statements(input_path: Path) -> List[str]:
+ with input_path.open("r", encoding="utf-8", errors="replace") as f:
+ lines = [line.rstrip("\n") for line in f]
+ return [line for line in lines if line.strip()]
+
+
+# Run a set of SQL statements using tursodb and then run integrity_check on the given db file using sqlite3.
+# Return whether the integrity check passed or failed.
+def run_sql_and_do_integrity_check(
+ workspace_root: Path,
+ db_path: Path,
+ statements: Sequence[str],
+) -> bool:
+ # Apply statements (if any) and then run integrity_check on the given db file
+ if statements:
+ sql_input = "\n".join(statements) + "\n"
+ run_cmd = ["cargo", "run", "--quiet", "--", str(db_path)]
+ run_proc = subprocess.run(
+ run_cmd,
+ input=sql_input,
+ text=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=str(workspace_root),
+ check=False,
+ )
+ if run_proc.returncode != 0:
+ raise RuntimeError(f"cargo run failed (code {run_proc.returncode}) for candidate with {len(statements)} statements")
+
+ sqlite_cmd = [
+ "sqlite3",
+ str(db_path),
+ "pragma integrity_check;",
+ ]
+ sqlite_proc = subprocess.run(
+ sqlite_cmd,
+ text=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=str(workspace_root),
+ check=False,
+ )
+
+ output = (sqlite_proc.stdout or "").strip()
+ if sqlite_proc.returncode != 0:
+ raise RuntimeError(f"sqlite3 returned code {sqlite_proc.returncode} with output: {output}")
+
+ return output.lower() == "ok"
+
+
+# Find a minimal subset of SQL statements that still fails integrity check.
+# This is done by binary searching for the minimal prefix.
+# We don't care about scenarios where some prefix P fails and then a larger prefix P' does not fail anymore;
+# We just want to find the minimal prefix that fails in some manner.
+def find_min_failing_prefix(
+ workspace_root: Path,
+ statements: Sequence[str],
+) -> List[str]:
+ # For performance reasons, reuse DB across attempts: keep last passing DB snapshot and apply only deltas.
+ with tempfile.TemporaryDirectory(prefix="limbo-bisect-") as tmpdir:
+ tmpdir_path = Path(tmpdir)
+ db_pass = tmpdir_path / "pass.db"
+ db_work = tmpdir_path / "work.db"
+
+ def delete_db(base: Path) -> None:
+ for suffix in ("", "-wal", "-shm"):
+ p = Path(str(base) + suffix)
+ if p.exists():
+ try:
+ p.unlink()
+ except FileNotFoundError:
+ pass
+
+ def copy_db(src: Path, dst: Path) -> None:
+ delete_db(dst)
+ for suffix in ("", "-wal", "-shm"):
+ s = Path(str(src) + suffix)
+ d = Path(str(dst) + suffix)
+ if s.exists():
+ d.parent.mkdir(parents=True, exist_ok=True)
+ shutil.copy2(s, d)
+
+ last_pass_len = 0
+
+ def check_prefix(k: int) -> bool:
+ nonlocal last_pass_len
+ # Prepare working DB starting from last passing snapshot when possible
+ if last_pass_len > 0 and k > last_pass_len and db_pass.exists():
+ copy_db(db_pass, db_work)
+ delta = statements[last_pass_len:k]
+ integrity_check_ok = run_sql_and_do_integrity_check(workspace_root, db_work, delta)
+ else:
+ delete_db(db_work)
+ initial = statements[:k]
+ integrity_check_ok = run_sql_and_do_integrity_check(workspace_root, db_work, initial)
+
+ sys.stderr.write(f"Test prefix {k} -> {integrity_check_ok}\n")
+ if integrity_check_ok:
+ copy_db(db_work, db_pass)
+ last_pass_len = k
+ return not integrity_check_ok
+
+ # Binary search minimal k such that prefix of length k FAILS.
+ low = 1
+ high = len(statements)
+ answer_k = None
+
+ # Initialize with empty DB as passing baseline
+ delete_db(db_pass)
+ delete_db(db_work)
+
+ while low <= high:
+ mid = (low + high) // 2
+ failed = check_prefix(mid)
+ if failed:
+ answer_k = mid
+ high = mid - 1
+ else:
+ low = mid + 1
+
+ if answer_k is None:
+ raise RuntimeError("Could not find a failing prefix despite full set failing.")
+
+ return list(statements[:answer_k])
+
+def main(argv: List[str]) -> int:
+ parser = argparse.ArgumentParser(
+ description=(
+ "Find a minimal subset of SQL statements that still FAILS pragma integrity_check."
+ )
+ )
+ parser.add_argument(
+ "-i",
+ "--input",
+ type=Path,
+ help="Path to input SQL file (one statement per line)",
+ required=True,
+ )
+ parser.add_argument(
+ "-o",
+ "--output",
+ type=Path,
+ default=Path("bisected.sql"),
+ help="Path to write the minimized failing prefix (default: bisected.sql)",
+ )
+
+ args = parser.parse_args(argv)
+
+ # Assume the script is placed under /scripts/ and use repo root as workspace
+ workspace_root = Path(__file__).resolve().parent.parent
+
+ if not args.input.exists():
+ raise RuntimeError(f"Input file not found: {args.input}")
+
+ statements = read_statements(args.input)
+ if not statements:
+ raise RuntimeError("Input file has no statements after filtering empty lines.")
+
+ # Confirm the full input fails integrity check on a fresh DB
+ with tempfile.TemporaryDirectory(prefix="limbo-bisect-precheck-") as pretmp:
+ pre_db = Path(pretmp) / "check.db"
+ integrity_check_ok = run_sql_and_do_integrity_check(workspace_root, pre_db, statements)
+ if integrity_check_ok:
+ raise RuntimeError("Full input did not FAIL integrity check")
+
+ result_lines = find_min_failing_prefix(workspace_root, statements)
+ summary = (
+ f"Reduced failing subset to {len(result_lines)} of {len(statements)} statements.\n"
+ )
+
+ args.output.parent.mkdir(parents=True, exist_ok=True)
+ with args.output.open("w", encoding="utf-8") as f:
+ for line in result_lines:
+ f.write(line + "\n")
+ sys.stderr.write(f"Wrote minimized failing prefix to {args.output}\n")
+
+ sys.stderr.write(summary)
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
+
+
From 8b7c0334d4177c84e7c8a31ffeb428c8d86ff517 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Wed, 10 Sep 2025 15:00:08 +0300
Subject: [PATCH 39/66] RUFFFffff
---
scripts/corruption_bisecter.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/scripts/corruption_bisecter.py b/scripts/corruption_bisecter.py
index 38f512a67..4485beda5 100644
--- a/scripts/corruption_bisecter.py
+++ b/scripts/corruption_bisecter.py
@@ -7,7 +7,8 @@ import subprocess
import sys
import tempfile
from pathlib import Path
-from typing import Callable, List, Literal, Sequence
+from typing import List, Sequence
+
def read_statements(input_path: Path) -> List[str]:
with input_path.open("r", encoding="utf-8", errors="replace") as f:
@@ -36,7 +37,7 @@ def run_sql_and_do_integrity_check(
check=False,
)
if run_proc.returncode != 0:
- raise RuntimeError(f"cargo run failed (code {run_proc.returncode}) for candidate with {len(statements)} statements")
+ raise RuntimeError(f"cargo run failed (code {run_proc.returncode})")
sqlite_cmd = [
"sqlite3",
@@ -63,7 +64,7 @@ def run_sql_and_do_integrity_check(
# This is done by binary searching for the minimal prefix.
# We don't care about scenarios where some prefix P fails and then a larger prefix P' does not fail anymore;
# We just want to find the minimal prefix that fails in some manner.
-def find_min_failing_prefix(
+def find_min_failing_prefix( # noqa: C901
workspace_root: Path,
statements: Sequence[str],
) -> List[str]:
From 4eb61a95275356c320f8a207585ef00e73aa5d47 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Wed, 10 Sep 2025 15:30:26 +0300
Subject: [PATCH 40/66] Add gaps to update/delete fuzz test where clauses
---
tests/integration/fuzz/mod.rs | 29 ++++++++++++++++++++---------
1 file changed, 20 insertions(+), 9 deletions(-)
diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs
index 59134a75b..3cc0b1fe2 100644
--- a/tests/integration/fuzz/mod.rs
+++ b/tests/integration/fuzz/mod.rs
@@ -590,19 +590,30 @@ mod tests {
let predicate_col = rng.random_range(0..num_cols);
let predicate_value = rng.random_range(0..1000);
- let omit_where = rng.random_bool(0.05);
+ enum WhereClause {
+ Normal,
+ Gaps,
+ Omit,
+ }
+
+ let where_kind = match rng.random_range(0..10) {
+ 0..8 => WhereClause::Normal,
+ 8 => WhereClause::Gaps,
+ 9 => WhereClause::Omit,
+ _ => unreachable!(),
+ };
+
+ let where_clause = match where_kind {
+ WhereClause::Normal => format!("WHERE c{predicate_col} {comparison} {predicate_value}"),
+ WhereClause::Gaps => format!("WHERE c{predicate_col} {comparison} {predicate_value} AND c{predicate_col} % 2 = 0"),
+ WhereClause::Omit => "".to_string(),
+ };
let query = if do_update {
let new_y = rng.random_range(0..1000);
- if omit_where {
- format!("UPDATE t SET c{affected_col} = {new_y}")
- } else {
- format!("UPDATE t SET c{affected_col} = {new_y} WHERE c{predicate_col} {comparison} {predicate_value}")
- }
- } else if omit_where {
- "DELETE FROM t".to_string()
+ format!("UPDATE t SET c{affected_col} = {new_y} {where_clause}")
} else {
- format!("DELETE FROM t WHERE c{predicate_col} {comparison} {predicate_value}")
+ format!("DELETE FROM t {where_clause}")
};
dml_statements.push(query.clone());
From 32c4f5ce8180eed61a1ce70cf60289b69accad32 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Wed, 10 Sep 2025 15:30:56 +0300
Subject: [PATCH 41/66] Assert that skip_advance is not set in the middle of a
seek
---
core/vdbe/execute.rs | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs
index 4a19e5bf5..b58ddcb41 100644
--- a/core/vdbe/execute.rs
+++ b/core/vdbe/execute.rs
@@ -3012,10 +3012,14 @@ pub fn seek_internal(
// this same logic applies for indexes, but the next/prev record is expected to be found in the parent page's
// divider cell.
+ turso_assert!(
+ !cursor.skip_advance.get(),
+ "skip_advance should not be true in the middle of a seek operation"
+ );
let result = match op {
// deliberately call get_next_record() instead of next() to avoid skip_advance triggering unwantedly
- SeekOp::GT | SeekOp::GE { .. } => cursor.get_next_record()?,
- SeekOp::LT | SeekOp::LE { .. } => cursor.get_prev_record()?,
+ SeekOp::GT | SeekOp::GE { .. } => cursor.next()?,
+ SeekOp::LT | SeekOp::LE { .. } => cursor.prev()?,
};
match result {
IOResult::Done(found) => {
From e3594d0ae0a7c44b93e64b27db469b703a56b45c Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Wed, 10 Sep 2025 15:31:10 +0300
Subject: [PATCH 42/66] make the comment for skip_advance more accurate
---
core/storage/btree.rs | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/core/storage/btree.rs b/core/storage/btree.rs
index a21c0c532..ea415d0ed 100644
--- a/core/storage/btree.rs
+++ b/core/storage/btree.rs
@@ -545,7 +545,9 @@ pub struct BTreeCursor {
move_to_state: MoveToState,
/// Whether the next call to [BTreeCursor::next()] should be a no-op.
/// This is currently only used after a delete operation causes a rebalancing.
- skip_advance: Cell,
+ /// Advancing is only skipped if the cursor is currently pointing to a valid record
+ /// when next() is called.
+ pub skip_advance: Cell,
}
/// We store the cell index and cell count for each page in the stack.
From eb2710438c33e2eb27322bb31849a4756c2c2468 Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Wed, 10 Sep 2025 15:46:52 +0300
Subject: [PATCH 43/66] translate: return parse error for unsupported join
types
---
core/translate/planner.rs | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/core/translate/planner.rs b/core/translate/planner.rs
index ea4cc8f53..3ffc68d14 100644
--- a/core/translate/planner.rs
+++ b/core/translate/planner.rs
@@ -996,6 +996,12 @@ fn parse_join(
let (outer, natural) = match join_operator {
ast::JoinOperator::TypedJoin(Some(join_type)) => {
+ if join_type.contains(JoinType::RIGHT) {
+ crate::bail_parse_error!("RIGHT JOIN is not supported");
+ }
+ if join_type.contains(JoinType::CROSS) {
+ crate::bail_parse_error!("CROSS JOIN is not supported");
+ }
let is_outer = join_type.contains(JoinType::OUTER);
let is_natural = join_type.contains(JoinType::NATURAL);
(is_outer, is_natural)
From 84ecef271822bc43a1bc439295f96587141ad21c Mon Sep 17 00:00:00 2001
From: Jussi Saurio
Date: Wed, 10 Sep 2025 16:10:21 +0300
Subject: [PATCH 44/66] Return parse error for unsupported exprs
---
core/translate/expr.rs | 80 ++++++++++++++++++++++++++++++++++--------
1 file changed, 66 insertions(+), 14 deletions(-)
diff --git a/core/translate/expr.rs b/core/translate/expr.rs
index 1afdabe0e..2cf3b7f96 100644
--- a/core/translate/expr.rs
+++ b/core/translate/expr.rs
@@ -283,8 +283,45 @@ pub fn translate_condition_expr(
resolver: &Resolver,
) -> Result<()> {
match expr {
+ ast::Expr::Register(_) => {
+ crate::bail_parse_error!("Register in WHERE clause is currently unused. Consider removing Resolver::expr_to_reg_cache and using Expr::Register instead");
+ }
+ ast::Expr::Collate(_, _) => {
+ crate::bail_parse_error!("Collate in WHERE clause is not supported");
+ }
+ ast::Expr::DoublyQualified(_, _, _) | ast::Expr::Id(_) | ast::Expr::Qualified(_, _) => {
+ crate::bail_parse_error!(
+ "DoublyQualified/Id/Qualified should have been rewritten in optimizer"
+ );
+ }
+ ast::Expr::Exists(_) => {
+ crate::bail_parse_error!("EXISTS in WHERE clause is not supported");
+ }
+ ast::Expr::Subquery(_) => {
+ crate::bail_parse_error!("Subquery in WHERE clause is not supported");
+ }
+ ast::Expr::InSelect { .. } => {
+ crate::bail_parse_error!("IN (...subquery) in WHERE clause is not supported");
+ }
+ ast::Expr::InTable { .. } => {
+ crate::bail_parse_error!("Table expression in WHERE clause is not supported");
+ }
+ ast::Expr::FunctionCallStar { .. } => {
+ crate::bail_parse_error!("FunctionCallStar in WHERE clause is not supported");
+ }
+ ast::Expr::Raise(_, _) => {
+ crate::bail_parse_error!("RAISE in WHERE clause is not supported");
+ }
ast::Expr::Between { .. } => {
- unreachable!("expression should have been rewritten in optmizer")
+ crate::bail_parse_error!("BETWEEN expression should have been rewritten in optmizer")
+ }
+ ast::Expr::Variable(_) => {
+ crate::bail_parse_error!(
+ "Variable as a direct predicate in WHERE clause is not supported"
+ );
+ }
+ ast::Expr::Name(_) => {
+ crate::bail_parse_error!("Name as a direct predicate in WHERE clause is not supported");
}
ast::Expr::Binary(lhs, ast::Operator::And, rhs) => {
// In a binary AND, never jump to the parent 'jump_target_when_true' label on the first condition, because
@@ -445,7 +482,6 @@ pub fn translate_condition_expr(
translate_expr(program, Some(referenced_tables), expr, expr_reg, resolver)?;
emit_cond_jump(program, condition_metadata, expr_reg);
}
- other => todo!("expression {:?} not implemented", other),
}
Ok(())
}
@@ -641,8 +677,10 @@ pub fn translate_expr(
program.set_collation(Some((collation, true)));
Ok(target_register)
}
- ast::Expr::DoublyQualified(_, _, _) => todo!(),
- ast::Expr::Exists(_) => todo!(),
+ ast::Expr::DoublyQualified(_, _, _) => {
+ crate::bail_parse_error!("DoublyQualified should have been rewritten in optimizer")
+ }
+ ast::Expr::Exists(_) => crate::bail_parse_error!("EXISTS in WHERE clause is not supported"),
ast::Expr::FunctionCall {
name,
distinctness: _,
@@ -1768,7 +1806,9 @@ pub fn translate_expr(
Func::AlterTable(_) => unreachable!(),
}
}
- ast::Expr::FunctionCallStar { .. } => todo!("{:?}", &expr),
+ ast::Expr::FunctionCallStar { .. } => {
+ crate::bail_parse_error!("FunctionCallStar in WHERE clause is not supported")
+ }
ast::Expr::Id(id) => {
// Treat double-quoted identifiers as string literals (SQLite compatibility)
program.emit_insn(Insn::String8 {
@@ -1979,8 +2019,12 @@ pub fn translate_expr(
Ok(result_reg)
}
- ast::Expr::InSelect { .. } => todo!(),
- ast::Expr::InTable { .. } => todo!(),
+ ast::Expr::InSelect { .. } => {
+ crate::bail_parse_error!("IN (...subquery) in WHERE clause is not supported")
+ }
+ ast::Expr::InTable { .. } => {
+ crate::bail_parse_error!("Table expression in WHERE clause is not supported")
+ }
ast::Expr::IsNull(expr) => {
let reg = program.alloc_register();
translate_expr(program, referenced_tables, expr, reg, resolver)?;
@@ -2016,7 +2060,9 @@ pub fn translate_expr(
Ok(target_register)
}
ast::Expr::Literal(lit) => emit_literal(program, lit, target_register),
- ast::Expr::Name(_) => todo!(),
+ ast::Expr::Name(_) => {
+ crate::bail_parse_error!("ast::Expr::Name in WHERE clause is not supported")
+ }
ast::Expr::NotNull(expr) => {
let reg = program.alloc_register();
translate_expr(program, referenced_tables, expr, reg, resolver)?;
@@ -2051,15 +2097,19 @@ pub fn translate_expr(
} else {
// Parenthesized expressions with multiple arguments are reserved for special cases
// like `(a, b) IN ((1, 2), (3, 4))`.
- todo!("TODO: parenthesized expression with multiple arguments not yet supported");
+ crate::bail_parse_error!(
+ "TODO: parenthesized expression with multiple arguments not yet supported"
+ );
}
Ok(target_register)
}
ast::Expr::Qualified(_, _) => {
unreachable!("Qualified should be resolved to a Column before translation")
}
- ast::Expr::Raise(_, _) => todo!(),
- ast::Expr::Subquery(_) => todo!(),
+ ast::Expr::Raise(_, _) => crate::bail_parse_error!("RAISE is not supported"),
+ ast::Expr::Subquery(_) => {
+ crate::bail_parse_error!("Subquery in WHERE clause is not supported")
+ }
ast::Expr::Unary(op, expr) => match (op, expr.as_ref()) {
(UnaryOperator::Positive, expr) => {
translate_expr(program, referenced_tables, expr, target_register, resolver)
@@ -2848,8 +2898,8 @@ fn translate_like_base(
},
});
}
- ast::LikeOperator::Match => todo!(),
- ast::LikeOperator::Regexp => todo!(),
+ ast::LikeOperator::Match => crate::bail_parse_error!("MATCH in LIKE is not supported"),
+ ast::LikeOperator::Regexp => crate::bail_parse_error!("REGEXP in LIKE is not supported"),
}
Ok(target_register)
@@ -3582,7 +3632,9 @@ pub fn emit_literal(
});
Ok(target_register)
}
- ast::Literal::Keyword(_) => todo!(),
+ ast::Literal::Keyword(_) => {
+ crate::bail_parse_error!("Keyword in WHERE clause is not supported")
+ }
ast::Literal::Null => {
program.emit_insn(Insn::Null {
dest: target_register,
From 5adb5862828c92910c1c6bdc0f5959e167f78edc Mon Sep 17 00:00:00 2001
From: Pekka Enberg
Date: Wed, 10 Sep 2025 16:39:11 +0300
Subject: [PATCH 45/66] s/2024/2025/
---
CHANGELOG.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 405138435..5135aef17 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,6 @@
# Changelog
-## 0.1.5 -- 2024-09-10
+## 0.1.5 -- 2025-09-10
### Added
From b93ad749a9d50b5fb85a52898a6617438bda5a48 Mon Sep 17 00:00:00 2001
From: PThorpe92
Date: Tue, 9 Sep 2025 21:40:34 -0400
Subject: [PATCH 46/66] Remove some traces in super hot paths in btree
---
core/storage/btree.rs | 31 ++++++++++++++++++-------------
core/vdbe/execute.rs | 1 +
2 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/core/storage/btree.rs b/core/storage/btree.rs
index 0c2baaffd..1098a0b39 100644
--- a/core/storage/btree.rs
+++ b/core/storage/btree.rs
@@ -3378,13 +3378,15 @@ impl BTreeCursor {
"left pointer is the same as parent page id"
);
#[cfg(debug_assertions)]
- pages_pointed_to.insert(left_pointer);
- tracing::debug!(
- "balance_non_root(insert_divider_cell, first_divider_cell={}, divider_cell={}, left_pointer={})",
- balance_info.first_divider_cell,
- sibling_page_idx,
- left_pointer
- );
+ {
+ pages_pointed_to.insert(left_pointer);
+ tracing::debug!(
+ "balance_non_root(insert_divider_cell, first_divider_cell={}, divider_cell={}, left_pointer={})",
+ balance_info.first_divider_cell,
+ sibling_page_idx,
+ left_pointer
+ );
+ }
turso_assert!(
left_pointer == page.get().id as u32,
"left pointer is not the same as page id"
@@ -4379,7 +4381,7 @@ impl BTreeCursor {
}
}
- #[instrument(skip(self), level = Level::DEBUG)]
+ #[instrument(skip(self, key), level = Level::DEBUG)]
pub fn seek(&mut self, key: SeekKey<'_>, op: SeekOp) -> Result> {
if let Some(mv_cursor) = &self.mv_cursor {
let mut mv_cursor = mv_cursor.borrow_mut();
@@ -4480,7 +4482,7 @@ impl BTreeCursor {
Ok(IOResult::Done(Some(record_ref)))
}
- #[instrument(skip(self), level = Level::DEBUG)]
+ #[instrument(skip_all, level = Level::DEBUG)]
pub fn insert(&mut self, key: &BTreeKey) -> Result> {
tracing::debug!(valid_state = ?self.valid_state, cursor_state = ?self.state, is_write_in_progress = self.is_write_in_progress());
match &self.mv_cursor {
@@ -6177,10 +6179,13 @@ impl PageStack {
#[instrument(skip(self), level = Level::DEBUG, name = "pagestack::retreat")]
fn retreat(&mut self) {
let current = self.current();
- tracing::trace!(
- curr_cell_index = self.node_states[current].cell_idx,
- node_states = ?self.node_states.iter().map(|state| state.cell_idx).collect::>(),
- );
+ #[cfg(debug_assertions)]
+ {
+ tracing::trace!(
+ curr_cell_index = self.node_states[current].cell_idx,
+ node_states = ?self.node_states.iter().map(|state| state.cell_idx).collect::>(),
+ );
+ }
self.node_states[current].cell_idx -= 1;
}
diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs
index 83e164adc..32ca663e5 100644
--- a/core/vdbe/execute.rs
+++ b/core/vdbe/execute.rs
@@ -5707,6 +5707,7 @@ pub fn op_idx_delete(
);
loop {
+ #[cfg(debug_assertions)]
tracing::debug!(
"op_idx_delete(cursor_id={}, start_reg={}, num_regs={}, rootpage={}, state={:?})",
cursor_id,
From f117b2c966d99fc7f23ec20d732cca9dc93cadc8 Mon Sep 17 00:00:00 2001
From: PThorpe92
Date: Wed, 10 Sep 2025 09:50:56 -0400
Subject: [PATCH 47/66] Remove unused lru dependency
---
Cargo.lock | 18 ------------------
core/Cargo.toml | 1 -
2 files changed, 19 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index e72c7b5f2..6f97d2837 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -104,12 +104,6 @@ dependencies = [
"backtrace",
]
-[[package]]
-name = "allocator-api2"
-version = "0.2.21"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
-
[[package]]
name = "anarchist-readable-name-generator-lib"
version = "0.1.2"
@@ -1554,8 +1548,6 @@ version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
dependencies = [
- "allocator-api2",
- "equivalent",
"foldhash",
]
@@ -2281,15 +2273,6 @@ version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
-[[package]]
-name = "lru"
-version = "0.14.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f8cc7106155f10bdf99a6f379688f543ad6596a415375b36a59a054ceda1198"
-dependencies = [
- "hashbrown 0.15.2",
-]
-
[[package]]
name = "matchers"
version = "0.1.0"
@@ -4252,7 +4235,6 @@ dependencies = [
"libc",
"libloading",
"libm",
- "lru",
"memory-stats",
"miette",
"mimalloc",
diff --git a/core/Cargo.toml b/core/Cargo.toml
index b53fcfbb0..88c209e44 100644
--- a/core/Cargo.toml
+++ b/core/Cargo.toml
@@ -101,7 +101,6 @@ rand = "0.8.5" # Required for quickcheck
rand_chacha = "0.9.0"
env_logger = "0.11.6"
test-log = { version = "0.2.17", features = ["trace"] }
-lru = "0.14.0"
sorted-vec = "0.8.6"
mimalloc = { version = "0.1.46", default-features = false }
From ba1ed72ed8684e12ce45616bf566617944d7e566 Mon Sep 17 00:00:00 2001
From: PThorpe92
Date: Wed, 10 Sep 2025 09:50:41 -0400
Subject: [PATCH 48/66] Add tracing_release feature for benchmarks to compile
tracing macros to noops
---
Makefile | 36 +++++++++++++++++-----------------
bindings/dart/rust/Cargo.toml | 3 +++
bindings/java/Cargo.toml | 3 ++-
bindings/javascript/Cargo.toml | 2 +-
bindings/python/Cargo.toml | 1 +
bindings/rust/Cargo.toml | 1 +
cli/Cargo.toml | 1 +
core/Cargo.toml | 1 +
8 files changed, 28 insertions(+), 20 deletions(-)
diff --git a/Makefile b/Makefile
index 30185f9e8..5275d201f 100644
--- a/Makefile
+++ b/Makefile
@@ -9,7 +9,7 @@ MINIMUM_TCL_VERSION := 8.6
SQLITE_EXEC ?= scripts/limbo-sqlite3
RUST_LOG := off
-all: check-rust-version limbo
+all: check-rust-version build
.PHONY: all
check-rust-version:
@@ -39,13 +39,13 @@ check-tcl-version:
| tclsh
.PHONY: check-tcl-version
-limbo:
+build: check-rust-version
cargo build
-.PHONY: limbo
+.PHONY: build
-limbo-c:
+turso-c:
cargo cbuild
-.PHONY: limbo-c
+.PHONY: turso-c
uv-sync:
uv sync --all-packages
@@ -55,14 +55,14 @@ uv-sync-test:
uv sync --all-extras --dev --package turso_test
.PHONE: uv-sync
-test: limbo uv-sync-test test-compat test-alter-column test-vector test-sqlite3 test-shell test-memory test-write test-update test-constraint test-collate test-extensions test-mvcc test-matviews
+test: build uv-sync-test test-compat test-alter-column test-vector test-sqlite3 test-shell test-memory test-write test-update test-constraint test-collate test-extensions test-mvcc test-matviews
.PHONY: test
-test-extensions: limbo uv-sync-test
+test-extensions: build uv-sync-test
RUST_LOG=$(RUST_LOG) uv run --project limbo_test test-extensions
.PHONY: test-extensions
-test-shell: limbo uv-sync-test
+test-shell: build uv-sync-test
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-shell
.PHONY: test-shell
@@ -100,11 +100,11 @@ test-json:
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) ./testing/json.test
.PHONY: test-json
-test-memory: limbo uv-sync-test
+test-memory: build uv-sync-test
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-memory
.PHONY: test-memory
-test-write: limbo uv-sync-test
+test-write: build uv-sync-test
@if [ "$(SQLITE_EXEC)" != "scripts/limbo-sqlite3" ]; then \
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-write; \
else \
@@ -112,7 +112,7 @@ test-write: limbo uv-sync-test
fi
.PHONY: test-write
-test-update: limbo uv-sync-test
+test-update: build uv-sync-test
@if [ "$(SQLITE_EXEC)" != "scripts/limbo-sqlite3" ]; then \
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-update; \
else \
@@ -120,7 +120,7 @@ test-update: limbo uv-sync-test
fi
.PHONY: test-update
-test-collate: limbo uv-sync-test
+test-collate: build uv-sync-test
@if [ "$(SQLITE_EXEC)" != "scripts/limbo-sqlite3" ]; then \
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-collate; \
else \
@@ -128,7 +128,7 @@ test-collate: limbo uv-sync-test
fi
.PHONY: test-collate
-test-constraint: limbo uv-sync-test
+test-constraint: build uv-sync-test
@if [ "$(SQLITE_EXEC)" != "scripts/limbo-sqlite3" ]; then \
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-constraint; \
else \
@@ -136,22 +136,22 @@ test-constraint: limbo uv-sync-test
fi
.PHONY: test-constraint
-test-mvcc: limbo uv-sync-test
+test-mvcc: build uv-sync-test
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-mvcc;
.PHONY: test-mvcc
-bench-vfs: uv-sync-test
- cargo build --release
+bench-vfs: uv-sync-test build-release
RUST_LOG=$(RUST_LOG) uv run --project limbo_test bench-vfs "$(SQL)" "$(N)"
-bench-sqlite: uv-sync-test
- cargo build --release
+bench-sqlite: uv-sync-test build-release
RUST_LOG=$(RUST_LOG) uv run --project limbo_test bench-sqlite "$(VFS)" "$(SQL)" "$(N)"
clickbench:
./perf/clickbench/benchmark.sh
.PHONY: clickbench
+build-release: check-rust-version
+ cargo build --bin tursodb --release --features=tracing_release
bench-exclude-tpc-h:
@benchmarks=$$(cargo bench --bench 2>&1 | grep -A 1000 '^Available bench targets:' | grep -v '^Available bench targets:' | grep -v '^ *$$' | grep -v 'tpc_h_benchmark' | xargs -I {} printf -- "--bench %s " {}); \
diff --git a/bindings/dart/rust/Cargo.toml b/bindings/dart/rust/Cargo.toml
index ace4c80a7..cbc0c6fbb 100644
--- a/bindings/dart/rust/Cargo.toml
+++ b/bindings/dart/rust/Cargo.toml
@@ -7,6 +7,9 @@ license.workspace = true
repository.workspace = true
publish = false
+[features]
+release = ["turso_core/tracing_release"]
+
[lib]
crate-type = ["cdylib", "staticlib"]
diff --git a/bindings/java/Cargo.toml b/bindings/java/Cargo.toml
index b0f69db94..93858e0a1 100644
--- a/bindings/java/Cargo.toml
+++ b/bindings/java/Cargo.toml
@@ -6,7 +6,8 @@ edition.workspace = true
license.workspace = true
repository.workspace = true
publish = false
-
+[features]
+tracing_release = ["turso_core/tracing_release"]
[lib]
name = "_turso_java"
crate-type = ["cdylib"]
diff --git a/bindings/javascript/Cargo.toml b/bindings/javascript/Cargo.toml
index a3b2384fe..836780122 100644
--- a/bindings/javascript/Cargo.toml
+++ b/bindings/javascript/Cargo.toml
@@ -20,6 +20,6 @@ tracing.workspace = true
[features]
encryption = ["turso_core/encryption"]
browser = []
-
+tracing_release = ["turso_core/tracing_release"]
[build-dependencies]
napi-build = "2.2.3"
diff --git a/bindings/python/Cargo.toml b/bindings/python/Cargo.toml
index 46023b643..2ffc62f8a 100644
--- a/bindings/python/Cargo.toml
+++ b/bindings/python/Cargo.toml
@@ -14,6 +14,7 @@ crate-type = ["cdylib"]
[features]
# must be enabled when building with `cargo build`, maturin enables this automatically
extension-module = ["pyo3/extension-module"]
+tracing_release = ["turso_core/tracing_release"]
[dependencies]
anyhow = "1.0"
diff --git a/bindings/rust/Cargo.toml b/bindings/rust/Cargo.toml
index 0f35d0b2f..63be50f42 100644
--- a/bindings/rust/Cargo.toml
+++ b/bindings/rust/Cargo.toml
@@ -14,6 +14,7 @@ default = ["experimental_indexes"]
conn_raw_api = ["turso_core/conn_raw_api"]
experimental_indexes = []
antithesis = ["turso_core/antithesis"]
+tracing_release = ["turso_core/tracing_release"]
[dependencies]
turso_core = { workspace = true, features = ["io_uring"] }
diff --git a/cli/Cargo.toml b/cli/Cargo.toml
index 74a37cd47..92f384c6f 100644
--- a/cli/Cargo.toml
+++ b/cli/Cargo.toml
@@ -51,6 +51,7 @@ mimalloc = { workspace = true }
[features]
default = ["io_uring"]
io_uring = ["turso_core/io_uring"]
+tracing_release = ["turso_core/tracing_release"]
[build-dependencies]
syntect = { git = "https://github.com/trishume/syntect.git", rev = "64644ffe064457265cbcee12a0c1baf9485ba6ee" }
diff --git a/core/Cargo.toml b/core/Cargo.toml
index 88c209e44..fb79398c4 100644
--- a/core/Cargo.toml
+++ b/core/Cargo.toml
@@ -16,6 +16,7 @@ path = "lib.rs"
[features]
default = ["fs", "uuid", "time", "json", "series"]
antithesis = ["dep:antithesis_sdk"]
+tracing_release = ["tracing/release_max_level_info"]
conn_raw_api = []
fs = ["turso_ext/vfs"]
json = []
From 5caf9a26401980054da460efef559a85de9b02ca Mon Sep 17 00:00:00 2001
From: TcMits
Date: Thu, 11 Sep 2025 00:14:38 +0700
Subject: [PATCH 49/66] make it more safe + clippy
---
cli/app.rs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/cli/app.rs b/cli/app.rs
index bcaf6bbf1..c6be2336b 100644
--- a/cli/app.rs
+++ b/cli/app.rs
@@ -575,7 +575,7 @@ impl Limbo {
// if the address is not the same, meaning the string has been reallocated
// so we just drop the part we took earlier
- if ptr.addr() != old_address {
+ if ptr.addr() != old_address || !app.input_buff.is_empty() {
return;
}
From d55026f84f0a0d4bb0cc8b65c3ac697336ccf8cc Mon Sep 17 00:00:00 2001
From: Nikita Sivukhin
Date: Wed, 10 Sep 2025 03:01:37 +0400
Subject: [PATCH 50/66] opfs for sync in one commit!
---
Cargo.toml | 2 +-
.../javascript/examples/browser/index.html | 272 ++
.../javascript/examples/browser/package.json | 19 +
.../examples/browser/vite.config.js | 22 +
bindings/javascript/examples/wasm/index.html | 34 +
.../javascript/examples/wasm/package.json | 19 +
.../javascript/examples/wasm/vite.config.js | 26 +
bindings/javascript/package-lock.json | 19 +-
bindings/javascript/package.json | 6 +-
.../packages/browser-common/README.md | 8 +
.../packages/browser-common/index.ts | 239 ++
.../packages/browser-common/package.json | 25 +
.../packages/browser-common/tsconfig.json | 18 +
bindings/javascript/packages/browser/index.js | 13 +-
.../javascript/packages/browser/package.json | 1 +
.../javascript/packages/browser/promise.ts | 64 +-
.../javascript/packages/browser/tsconfig.json | 1 +
.../javascript/packages/browser/worker.mjs | 119 +-
bindings/javascript/packages/common/types.ts | 8 +-
.../javascript/packages/native/index.d.ts | 16 +
bindings/javascript/packages/native/index.js | 6 +-
bindings/javascript/replace.sh | 11 +
bindings/javascript/src/browser.rs | 10 +-
bindings/javascript/src/lib.rs | 24 +-
.../javascript/sync}/Cargo.toml | 3 +
.../javascript/sync}/README.md | 0
.../javascript/sync}/build.rs | 0
.../sync/packages/browser/README.md | 124 +
.../javascript/sync/packages/browser/index.js | 24 +-
.../sync/packages/browser/package.json | 46 +
.../sync/packages/browser/promise.test.ts | 281 ++
.../sync/packages/browser/promise.ts | 113 +
.../sync/packages/browser}/tsconfig.json | 8 +-
.../sync/packages/browser/vitest.config.ts | 23 +
.../sync/packages/browser/worker.mjs | 27 +-
.../javascript/sync/packages/common/README.md | 8 +
.../javascript/sync/packages/common/index.ts | 5 +
.../sync/packages/common/package.json | 25 +
.../javascript/sync/packages/common/run.ts | 127 +
.../sync/packages/common/tsconfig.json | 17 +
.../javascript/sync/packages/common/types.ts | 50 +
.../javascript/sync/packages/native/README.md | 125 +
.../sync/packages/native}/index.d.ts | 65 +-
.../javascript/sync/packages/native/index.js | 520 ++++
.../sync/packages/native/package.json | 53 +
.../sync/packages/native/promise.test.ts | 288 ++
.../sync/packages/native/promise.ts | 104 +
.../sync/packages/native/tsconfig.json | 21 +
bindings/javascript/sync/src/generator.rs | 102 +
.../javascript/sync}/src/js_protocol_io.rs | 126 +-
.../javascript/sync}/src/lib.rs | 218 +-
bindings/javascript/yarn.lock | 9 +
sync/engine/src/database_replay_generator.rs | 20 +-
sync/engine/src/database_sync_engine.rs | 244 +-
sync/engine/src/database_sync_operations.rs | 293 +-
sync/engine/src/database_tape.rs | 53 +-
sync/engine/src/protocol_io.rs | 26 +-
sync/engine/src/types.rs | 45 +-
sync/javascript/.github/renovate.json | 20 -
sync/javascript/.gitignore | 138 -
sync/javascript/.yarn/releases/yarn-4.9.2.cjs | 942 ------
sync/javascript/.yarnrc.yml | 5 -
sync/javascript/Makefile | 20 -
sync/javascript/browser.js | 1 -
sync/javascript/examples/cloud/example.js | 33 -
.../examples/cloud/package-lock.json | 39 -
sync/javascript/examples/cloud/package.json | 15 -
sync/javascript/index.js | 406 ---
sync/javascript/package.browser.json | 57 -
sync/javascript/package.json | 63 -
sync/javascript/src/generator.rs | 59 -
sync/javascript/sync_engine.ts | 174 --
sync/javascript/turso-sync-js.wasi.cjs | 120 -
sync/javascript/wasi-worker.mjs | 63 -
sync/javascript/yarn.lock | 2758 -----------------
75 files changed, 3553 insertions(+), 5535 deletions(-)
create mode 100644 bindings/javascript/examples/browser/index.html
create mode 100644 bindings/javascript/examples/browser/package.json
create mode 100644 bindings/javascript/examples/browser/vite.config.js
create mode 100644 bindings/javascript/examples/wasm/index.html
create mode 100644 bindings/javascript/examples/wasm/package.json
create mode 100644 bindings/javascript/examples/wasm/vite.config.js
create mode 100644 bindings/javascript/packages/browser-common/README.md
create mode 100644 bindings/javascript/packages/browser-common/index.ts
create mode 100644 bindings/javascript/packages/browser-common/package.json
create mode 100644 bindings/javascript/packages/browser-common/tsconfig.json
create mode 100644 bindings/javascript/replace.sh
rename {sync/javascript => bindings/javascript/sync}/Cargo.toml (93%)
rename {sync/javascript => bindings/javascript/sync}/README.md (100%)
rename {sync/javascript => bindings/javascript/sync}/build.rs (100%)
create mode 100644 bindings/javascript/sync/packages/browser/README.md
rename sync/javascript/turso-sync-js.wasi-browser.js => bindings/javascript/sync/packages/browser/index.js (68%)
create mode 100644 bindings/javascript/sync/packages/browser/package.json
create mode 100644 bindings/javascript/sync/packages/browser/promise.test.ts
create mode 100644 bindings/javascript/sync/packages/browser/promise.ts
rename {sync/javascript => bindings/javascript/sync/packages/browser}/tsconfig.json (79%)
create mode 100644 bindings/javascript/sync/packages/browser/vitest.config.ts
rename sync/javascript/wasi-worker-browser.mjs => bindings/javascript/sync/packages/browser/worker.mjs (50%)
create mode 100644 bindings/javascript/sync/packages/common/README.md
create mode 100644 bindings/javascript/sync/packages/common/index.ts
create mode 100644 bindings/javascript/sync/packages/common/package.json
create mode 100644 bindings/javascript/sync/packages/common/run.ts
create mode 100644 bindings/javascript/sync/packages/common/tsconfig.json
create mode 100644 bindings/javascript/sync/packages/common/types.ts
create mode 100644 bindings/javascript/sync/packages/native/README.md
rename {sync/javascript => bindings/javascript/sync/packages/native}/index.d.ts (74%)
create mode 100644 bindings/javascript/sync/packages/native/index.js
create mode 100644 bindings/javascript/sync/packages/native/package.json
create mode 100644 bindings/javascript/sync/packages/native/promise.test.ts
create mode 100644 bindings/javascript/sync/packages/native/promise.ts
create mode 100644 bindings/javascript/sync/packages/native/tsconfig.json
create mode 100644 bindings/javascript/sync/src/generator.rs
rename {sync/javascript => bindings/javascript/sync}/src/js_protocol_io.rs (53%)
rename {sync/javascript => bindings/javascript/sync}/src/lib.rs (68%)
delete mode 100644 sync/javascript/.github/renovate.json
delete mode 100644 sync/javascript/.gitignore
delete mode 100755 sync/javascript/.yarn/releases/yarn-4.9.2.cjs
delete mode 100644 sync/javascript/.yarnrc.yml
delete mode 100644 sync/javascript/Makefile
delete mode 100644 sync/javascript/browser.js
delete mode 100644 sync/javascript/examples/cloud/example.js
delete mode 100644 sync/javascript/examples/cloud/package-lock.json
delete mode 100644 sync/javascript/examples/cloud/package.json
delete mode 100644 sync/javascript/index.js
delete mode 100644 sync/javascript/package.browser.json
delete mode 100644 sync/javascript/package.json
delete mode 100644 sync/javascript/src/generator.rs
delete mode 100644 sync/javascript/sync_engine.ts
delete mode 100644 sync/javascript/turso-sync-js.wasi.cjs
delete mode 100644 sync/javascript/wasi-worker.mjs
delete mode 100644 sync/javascript/yarn.lock
diff --git a/Cargo.toml b/Cargo.toml
index 28059424e..8672dcdc2 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -6,6 +6,7 @@ members = [
"bindings/dart/rust",
"bindings/java",
"bindings/javascript",
+ "bindings/javascript/sync",
"bindings/python",
"bindings/rust",
"cli",
@@ -27,7 +28,6 @@ members = [
"vendored/sqlite3-parser/sqlparser_bench",
"parser",
"sync/engine",
- "sync/javascript",
"sql_generation",
]
exclude = ["perf/latency/limbo"]
diff --git a/bindings/javascript/examples/browser/index.html b/bindings/javascript/examples/browser/index.html
new file mode 100644
index 000000000..540e86750
--- /dev/null
+++ b/bindings/javascript/examples/browser/index.html
@@ -0,0 +1,272 @@
+
+
+
+
+
+
+ Brutal DB Viewer
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bindings/javascript/examples/browser/package.json b/bindings/javascript/examples/browser/package.json
new file mode 100644
index 000000000..f77eb2b32
--- /dev/null
+++ b/bindings/javascript/examples/browser/package.json
@@ -0,0 +1,19 @@
+{
+ "name": "wasm",
+ "version": "1.0.0",
+ "main": "index.js",
+ "scripts": {
+ "dev": "vite",
+ "build": "vite build",
+ "preview": "vite preview"
+ },
+ "author": "",
+ "license": "ISC",
+ "description": "",
+ "devDependencies": {
+ "vite": "^7.1.4"
+ },
+ "dependencies": {
+ "@tursodatabase/database-browser": "../../browser"
+ }
+}
diff --git a/bindings/javascript/examples/browser/vite.config.js b/bindings/javascript/examples/browser/vite.config.js
new file mode 100644
index 000000000..3d37c5172
--- /dev/null
+++ b/bindings/javascript/examples/browser/vite.config.js
@@ -0,0 +1,22 @@
+import { defineConfig, searchForWorkspaceRoot } from 'vite'
+
+export default defineConfig({
+ server: {
+ fs: {
+ allow: ['.', '../../']
+ },
+ define:
+ {
+ 'process.env.NODE_DEBUG_NATIVE': 'false', // string replace at build-time
+ },
+ headers: {
+ 'Cross-Origin-Opener-Policy': 'same-origin',
+ 'Cross-Origin-Embedder-Policy': 'require-corp',
+ }
+ },
+ optimizeDeps: {
+ esbuildOptions: {
+ define: { 'process.env.NODE_DEBUG_NATIVE': 'false' },
+ },
+ },
+})
diff --git a/bindings/javascript/examples/wasm/index.html b/bindings/javascript/examples/wasm/index.html
new file mode 100644
index 000000000..efd7b0b7b
--- /dev/null
+++ b/bindings/javascript/examples/wasm/index.html
@@ -0,0 +1,34 @@
+
+
+
+
+
+
+
+
+
diff --git a/bindings/javascript/examples/wasm/package.json b/bindings/javascript/examples/wasm/package.json
new file mode 100644
index 000000000..3cd63f705
--- /dev/null
+++ b/bindings/javascript/examples/wasm/package.json
@@ -0,0 +1,19 @@
+{
+ "name": "wasm",
+ "version": "1.0.0",
+ "main": "index.js",
+ "scripts": {
+ "dev": "vite",
+ "build": "vite build",
+ "preview": "vite preview"
+ },
+ "author": "",
+ "license": "ISC",
+ "description": "",
+ "devDependencies": {
+ "vite": "^7.1.4"
+ },
+ "dependencies": {
+ "@tursodatabase/database": "../.."
+ }
+}
diff --git a/bindings/javascript/examples/wasm/vite.config.js b/bindings/javascript/examples/wasm/vite.config.js
new file mode 100644
index 000000000..299f21d60
--- /dev/null
+++ b/bindings/javascript/examples/wasm/vite.config.js
@@ -0,0 +1,26 @@
+import { defineConfig, searchForWorkspaceRoot } from 'vite'
+
+export default defineConfig({
+ build: {
+ minify: false, // Set this to false to disable minification
+ },
+ resolve: {
+ alias: {
+ '@tursodatabase/database-wasm32-wasi': '../../turso.wasi-browser.js'
+ },
+ },
+ server: {
+ fs: {
+ allow: ['.']
+ },
+ headers: {
+ 'Cross-Origin-Opener-Policy': 'same-origin',
+ 'Cross-Origin-Embedder-Policy': 'require-corp',
+ }
+ },
+ optimizeDeps: {
+ exclude: [
+ "@tursodatabase/database-wasm32-wasi",
+ ]
+ },
+})
diff --git a/bindings/javascript/package-lock.json b/bindings/javascript/package-lock.json
index d0dd32d5d..5185f2c63 100644
--- a/bindings/javascript/package-lock.json
+++ b/bindings/javascript/package-lock.json
@@ -9,7 +9,11 @@
"workspaces": [
"packages/common",
"packages/native",
- "packages/browser"
+ "packages/browser",
+ "packages/browser-common",
+ "packages/sync/common",
+ "packages/sync/native",
+ "packages/sync/browser"
]
},
"node_modules/@babel/code-frame": {
@@ -1103,6 +1107,10 @@
"resolved": "packages/browser",
"link": true
},
+ "node_modules/@tursodatabase/database-browser-common": {
+ "resolved": "packages/browser-common",
+ "link": true
+ },
"node_modules/@tursodatabase/database-common": {
"resolved": "packages/common",
"link": true
@@ -2489,6 +2497,7 @@
"license": "MIT",
"dependencies": {
"@napi-rs/wasm-runtime": "^1.0.3",
+ "@tursodatabase/database-browser-common": "^0.1.5",
"@tursodatabase/database-common": "^0.1.5"
},
"devDependencies": {
@@ -2499,6 +2508,14 @@
"vitest": "^3.2.4"
}
},
+ "packages/browser-common": {
+ "name": "@tursodatabase/database-browser-common",
+ "version": "0.1.5",
+ "license": "MIT",
+ "devDependencies": {
+ "typescript": "^5.9.2"
+ }
+ },
"packages/common": {
"name": "@tursodatabase/database-common",
"version": "0.1.5",
diff --git a/bindings/javascript/package.json b/bindings/javascript/package.json
index 0145b2468..8ee630ce4 100644
--- a/bindings/javascript/package.json
+++ b/bindings/javascript/package.json
@@ -7,7 +7,11 @@
"workspaces": [
"packages/common",
"packages/native",
- "packages/browser"
+ "packages/browser",
+ "packages/browser-common",
+ "packages/sync/common",
+ "packages/sync/native",
+ "packages/sync/browser"
],
"version": "0.1.5"
}
diff --git a/bindings/javascript/packages/browser-common/README.md b/bindings/javascript/packages/browser-common/README.md
new file mode 100644
index 000000000..179123f7f
--- /dev/null
+++ b/bindings/javascript/packages/browser-common/README.md
@@ -0,0 +1,8 @@
+## About
+
+This package is the Turso embedded database common JS library which is shared between final builds for Node and Browser.
+
+Do not use this package directly - instead you must use `@tursodatabase/database` or `@tursodatabase/database-browser`.
+
+> **⚠️ Warning:** This software is ALPHA, only use for development, testing, and experimentation. We are working to make it production ready, but do not use it for critical data right now.
+
diff --git a/bindings/javascript/packages/browser-common/index.ts b/bindings/javascript/packages/browser-common/index.ts
new file mode 100644
index 000000000..9fab8790f
--- /dev/null
+++ b/bindings/javascript/packages/browser-common/index.ts
@@ -0,0 +1,239 @@
+function getUint8ArrayFromMemory(memory: WebAssembly.Memory, ptr: number, len: number): Uint8Array {
+ ptr = ptr >>> 0;
+ return new Uint8Array(memory.buffer).subarray(ptr, ptr + len);
+}
+
+function getStringFromMemory(memory: WebAssembly.Memory, ptr: number, len: number): string {
+ const shared = getUint8ArrayFromMemory(memory, ptr, len);
+ const copy = new Uint8Array(shared.length);
+ copy.set(shared);
+ const decoder = new TextDecoder('utf-8');
+ return decoder.decode(copy);
+}
+
+interface BrowserImports {
+ is_web_worker(): boolean;
+ lookup_file(ptr: number, len: number): number;
+ read(handle: number, ptr: number, len: number, offset: number): number;
+ write(handle: number, ptr: number, len: number, offset: number): number;
+ sync(handle: number): number;
+ truncate(handle: number, len: number): number;
+ size(handle: number): number;
+}
+
+function panic(name): never {
+ throw new Error(`method ${name} must be invoked only from the main thread`);
+}
+
+const MainDummyImports: BrowserImports = {
+ is_web_worker: function (): boolean {
+ return false;
+ },
+ lookup_file: function (ptr: number, len: number): number {
+ panic("lookup_file")
+ },
+ read: function (handle: number, ptr: number, len: number, offset: number): number {
+ panic("read")
+ },
+ write: function (handle: number, ptr: number, len: number, offset: number): number {
+ panic("write")
+ },
+ sync: function (handle: number): number {
+ panic("sync")
+ },
+ truncate: function (handle: number, len: number): number {
+ panic("truncate")
+ },
+ size: function (handle: number): number {
+ panic("size")
+ }
+};
+
+function workerImports(opfs: OpfsDirectory, memory: WebAssembly.Memory): BrowserImports {
+ return {
+ is_web_worker: function (): boolean {
+ return true;
+ },
+ lookup_file: function (ptr: number, len: number): number {
+ try {
+ const handle = opfs.lookupFileHandle(getStringFromMemory(memory, ptr, len));
+ return handle == null ? -404 : handle;
+ } catch (e) {
+ return -1;
+ }
+ },
+ read: function (handle: number, ptr: number, len: number, offset: number): number {
+ try {
+ return opfs.read(handle, getUint8ArrayFromMemory(memory, ptr, len), offset);
+ } catch (e) {
+ return -1;
+ }
+ },
+ write: function (handle: number, ptr: number, len: number, offset: number): number {
+ try {
+ return opfs.write(handle, getUint8ArrayFromMemory(memory, ptr, len), offset)
+ } catch (e) {
+ return -1;
+ }
+ },
+ sync: function (handle: number): number {
+ try {
+ opfs.sync(handle);
+ return 0;
+ } catch (e) {
+ return -1;
+ }
+ },
+ truncate: function (handle: number, len: number): number {
+ try {
+ opfs.truncate(handle, len);
+ return 0;
+ } catch (e) {
+ return -1;
+ }
+ },
+ size: function (handle: number): number {
+ try {
+ return opfs.size(handle);
+ } catch (e) {
+ return -1;
+ }
+ }
+ }
+}
+
+class OpfsDirectory {
+ fileByPath: Map;
+ fileByHandle: Map;
+ fileHandleNo: number;
+
+ constructor() {
+ this.fileByPath = new Map();
+ this.fileByHandle = new Map();
+ this.fileHandleNo = 0;
+ }
+
+ async registerFile(path: string) {
+ if (this.fileByPath.has(path)) {
+ return;
+ }
+ const opfsRoot = await navigator.storage.getDirectory();
+ const opfsHandle = await opfsRoot.getFileHandle(path, { create: true });
+ const opfsSync = await opfsHandle.createSyncAccessHandle();
+ this.fileHandleNo += 1;
+ this.fileByPath.set(path, { handle: this.fileHandleNo, sync: opfsSync });
+ this.fileByHandle.set(this.fileHandleNo, opfsSync);
+ }
+
+ async unregisterFile(path: string) {
+ const file = this.fileByPath.get(path);
+ if (file == null) {
+ return;
+ }
+ this.fileByPath.delete(path);
+ this.fileByHandle.delete(file.handle);
+ file.sync.close();
+ }
+ lookupFileHandle(path: string): number | null {
+ try {
+ const file = this.fileByPath.get(path);
+ if (file == null) {
+ return null;
+ }
+ return file.handle;
+ } catch (e) {
+ console.error('lookupFile', path, e);
+ throw e;
+ }
+ }
+ read(handle: number, buffer: Uint8Array, offset: number): number {
+ try {
+ const file = this.fileByHandle.get(handle);
+ const result = file.read(buffer, { at: Number(offset) });
+ return result;
+ } catch (e) {
+ console.error('read', handle, buffer.length, offset, e);
+ throw e;
+ }
+ }
+ write(handle: number, buffer: Uint8Array, offset: number): number {
+ try {
+ const file = this.fileByHandle.get(handle);
+ const result = file.write(buffer, { at: Number(offset) });
+ return result;
+ } catch (e) {
+ console.error('write', handle, buffer.length, offset, e);
+ throw e;
+ }
+ }
+ sync(handle: number) {
+ try {
+ const file = this.fileByHandle.get(handle);
+ file.flush();
+ } catch (e) {
+ console.error('sync', handle, e);
+ throw e;
+ }
+ }
+ truncate(handle: number, size: number) {
+ try {
+ const file = this.fileByHandle.get(handle);
+ const result = file.truncate(size);
+ return result;
+ } catch (e) {
+ console.error('truncate', handle, size, e);
+ throw e;
+ }
+ }
+ size(handle: number): number {
+ try {
+ const file = this.fileByHandle.get(handle);
+ const size = file.getSize()
+ return size;
+ } catch (e) {
+ console.error('size', handle, e);
+ throw e;
+ }
+ }
+}
+
+var workerRequestId = 0;
+function waitForWorkerResponse(worker: Worker, id: number): Promise {
+ let waitResolve, waitReject;
+ const callback = msg => {
+ if (msg.data.id == id) {
+ if (msg.data.error != null) {
+ waitReject(msg.data.error)
+ } else {
+ waitResolve()
+ }
+ cleanup();
+ }
+ };
+ const cleanup = () => worker.removeEventListener("message", callback);
+
+ worker.addEventListener("message", callback);
+ const result = new Promise((resolve, reject) => {
+ waitResolve = resolve;
+ waitReject = reject;
+ });
+ return result;
+}
+
+function registerFileAtWorker(worker: Worker, path: string): Promise {
+ workerRequestId += 1;
+ const currentId = workerRequestId;
+ const promise = waitForWorkerResponse(worker, currentId);
+ worker.postMessage({ __turso__: "register", path: path, id: currentId });
+ return promise;
+}
+
+function unregisterFileAtWorker(worker: Worker, path: string): Promise {
+ workerRequestId += 1;
+ const currentId = workerRequestId;
+ const promise = waitForWorkerResponse(worker, currentId);
+ worker.postMessage({ __turso__: "unregister", path: path, id: currentId });
+ return promise;
+}
+
+export { OpfsDirectory, workerImports, MainDummyImports, waitForWorkerResponse, registerFileAtWorker, unregisterFileAtWorker }
\ No newline at end of file
diff --git a/bindings/javascript/packages/browser-common/package.json b/bindings/javascript/packages/browser-common/package.json
new file mode 100644
index 000000000..ad5daea1e
--- /dev/null
+++ b/bindings/javascript/packages/browser-common/package.json
@@ -0,0 +1,25 @@
+{
+ "name": "@tursodatabase/database-browser-common",
+ "version": "0.1.5",
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/tursodatabase/turso"
+ },
+ "type": "module",
+ "license": "MIT",
+ "main": "dist/index.js",
+ "types": "dist/index.d.ts",
+ "packageManager": "yarn@4.9.2",
+ "files": [
+ "dist/**",
+ "README.md"
+ ],
+ "devDependencies": {
+ "typescript": "^5.9.2"
+ },
+ "scripts": {
+ "tsc-build": "npm exec tsc",
+ "build": "npm run tsc-build",
+ "test": "echo 'no tests'"
+ }
+}
diff --git a/bindings/javascript/packages/browser-common/tsconfig.json b/bindings/javascript/packages/browser-common/tsconfig.json
new file mode 100644
index 000000000..410aeee0e
--- /dev/null
+++ b/bindings/javascript/packages/browser-common/tsconfig.json
@@ -0,0 +1,18 @@
+{
+ "compilerOptions": {
+ "skipLibCheck": true,
+ "declaration": true,
+ "declarationMap": true,
+ "module": "esnext",
+ "target": "esnext",
+ "outDir": "dist/",
+ "lib": [
+ "es2020",
+ "DOM",
+ "WebWorker"
+ ],
+ },
+ "include": [
+ "*"
+ ]
+}
\ No newline at end of file
diff --git a/bindings/javascript/packages/browser/index.js b/bindings/javascript/packages/browser/index.js
index be8564969..84eb146aa 100644
--- a/bindings/javascript/packages/browser/index.js
+++ b/bindings/javascript/packages/browser/index.js
@@ -5,6 +5,7 @@ import {
WASI as __WASI,
} from '@napi-rs/wasm-runtime'
+import { MainDummyImports } from "@tursodatabase/database-browser-common";
const __wasi = new __WASI({
@@ -25,10 +26,6 @@ const __wasmFile = await fetch(__wasmUrl).then((res) => res.arrayBuffer())
export let MainWorker = null;
-function panic(name) {
- throw new Error(`method ${name} must be invoked only from the main thread`);
-}
-
const {
instance: __napiInstance,
module: __wasiModule,
@@ -49,14 +46,8 @@ const {
...importObject.env,
...importObject.napi,
...importObject.emnapi,
+ ...MainDummyImports,
memory: __sharedMemory,
- is_web_worker: () => false,
- lookup_file: () => panic("lookup_file"),
- read: () => panic("read"),
- write: () => panic("write"),
- sync: () => panic("sync"),
- truncate: () => panic("truncate"),
- size: () => panic("size"),
}
return importObject
},
diff --git a/bindings/javascript/packages/browser/package.json b/bindings/javascript/packages/browser/package.json
index ca9f38147..487ff57a4 100644
--- a/bindings/javascript/packages/browser/package.json
+++ b/bindings/javascript/packages/browser/package.json
@@ -40,6 +40,7 @@
},
"dependencies": {
"@napi-rs/wasm-runtime": "^1.0.3",
+ "@tursodatabase/database-browser-common": "^0.1.5",
"@tursodatabase/database-common": "^0.1.5"
}
}
diff --git a/bindings/javascript/packages/browser/promise.ts b/bindings/javascript/packages/browser/promise.ts
index 8f713f958..e5d0d3c9e 100644
--- a/bindings/javascript/packages/browser/promise.ts
+++ b/bindings/javascript/packages/browser/promise.ts
@@ -1,50 +1,24 @@
-import { DatabasePromise, NativeDatabase, DatabaseOpts, SqliteError } from "@tursodatabase/database-common"
+import { registerFileAtWorker, unregisterFileAtWorker } from "@tursodatabase/database-browser-common"
+import { DatabasePromise, NativeDatabase, DatabaseOpts, SqliteError, } from "@tursodatabase/database-common"
import { connect as nativeConnect, initThreadPool, MainWorker } from "#index";
-let workerRequestId = 0;
class Database extends DatabasePromise {
- files: string[];
- constructor(db: NativeDatabase, files: string[], opts: DatabaseOpts = {}) {
+ path: string | null;
+ constructor(db: NativeDatabase, fsPath: string | null, opts: DatabaseOpts = {}) {
super(db, opts)
- this.files = files;
+ this.path = fsPath;
}
async close() {
- let currentId = workerRequestId;
- workerRequestId += this.files.length;
-
- let tasks = [];
- for (const file of this.files) {
- (MainWorker as any).postMessage({ __turso__: "unregister", path: file, id: currentId });
- tasks.push(waitFor(currentId));
- currentId += 1;
+ if (this.path != null) {
+ await Promise.all([
+ unregisterFileAtWorker(MainWorker, this.path),
+ unregisterFileAtWorker(MainWorker, `${this.path}-wal`)
+ ]);
}
- await Promise.all(tasks);
this.db.close();
}
}
-function waitFor(id: number): Promise {
- let waitResolve, waitReject;
- const callback = msg => {
- if (msg.data.id == id) {
- if (msg.data.error != null) {
- waitReject(msg.data.error)
- } else {
- waitResolve()
- }
- cleanup();
- }
- };
- const cleanup = () => (MainWorker as any).removeEventListener("message", callback);
-
- (MainWorker as any).addEventListener("message", callback);
- const result = new Promise((resolve, reject) => {
- waitResolve = resolve;
- waitReject = reject;
- });
- return result;
-}
-
/**
* Creates a new database connection asynchronously.
*
@@ -55,24 +29,18 @@ function waitFor(id: number): Promise {
async function connect(path: string, opts: DatabaseOpts = {}): Promise {
if (path == ":memory:") {
const db = await nativeConnect(path, { tracing: opts.tracing });
- return new Database(db, [], opts);
+ return new Database(db, null, opts);
}
await initThreadPool();
if (MainWorker == null) {
throw new Error("panic: MainWorker is not set");
}
-
- let currentId = workerRequestId;
- workerRequestId += 2;
-
- let dbHandlePromise = waitFor(currentId);
- let walHandlePromise = waitFor(currentId + 1);
- (MainWorker as any).postMessage({ __turso__: "register", path: `${path}`, id: currentId });
- (MainWorker as any).postMessage({ __turso__: "register", path: `${path}-wal`, id: currentId + 1 });
- await Promise.all([dbHandlePromise, walHandlePromise]);
+ await Promise.all([
+ registerFileAtWorker(MainWorker, path),
+ registerFileAtWorker(MainWorker, `${path}-wal`)
+ ]);
const db = await nativeConnect(path, { tracing: opts.tracing });
- const files = [path, `${path}-wal`];
- return new Database(db, files, opts);
+ return new Database(db, path, opts);
}
export { connect, Database, SqliteError }
diff --git a/bindings/javascript/packages/browser/tsconfig.json b/bindings/javascript/packages/browser/tsconfig.json
index b46abc167..56b1bcc28 100644
--- a/bindings/javascript/packages/browser/tsconfig.json
+++ b/bindings/javascript/packages/browser/tsconfig.json
@@ -5,6 +5,7 @@
"declarationMap": true,
"module": "nodenext",
"target": "esnext",
+ "moduleResolution": "nodenext",
"outDir": "dist/",
"lib": [
"es2020"
diff --git a/bindings/javascript/packages/browser/worker.mjs b/bindings/javascript/packages/browser/worker.mjs
index 9c29d4390..104fd4c19 100644
--- a/bindings/javascript/packages/browser/worker.mjs
+++ b/bindings/javascript/packages/browser/worker.mjs
@@ -1,108 +1,9 @@
import { instantiateNapiModuleSync, MessageHandler, WASI } from '@napi-rs/wasm-runtime'
+import { OpfsDirectory, workerImports } from '@tursodatabase/database-browser-common';
-var fileByPath = new Map();
-var fileByHandle = new Map();
-let fileHandles = 0;
+var opfs = new OpfsDirectory();
var memory = null;
-function getUint8ArrayFromWasm(ptr, len) {
- ptr = ptr >>> 0;
- return new Uint8Array(memory.buffer).subarray(ptr, ptr + len);
-}
-
-
-async function registerFile(path) {
- if (fileByPath.has(path)) {
- return;
- }
- const opfsRoot = await navigator.storage.getDirectory();
- const opfsHandle = await opfsRoot.getFileHandle(path, { create: true });
- const opfsSync = await opfsHandle.createSyncAccessHandle();
- fileHandles += 1;
- fileByPath.set(path, { handle: fileHandles, sync: opfsSync });
- fileByHandle.set(fileHandles, opfsSync);
-}
-
-async function unregisterFile(path) {
- const file = fileByPath.get(path);
- if (file == null) {
- return;
- }
- fileByPath.delete(path);
- fileByHandle.delete(file.handle);
- file.sync.close();
-}
-
-function lookup_file(pathPtr, pathLen) {
- try {
- const buffer = getUint8ArrayFromWasm(pathPtr, pathLen);
- const notShared = new Uint8Array(buffer.length);
- notShared.set(buffer);
- const decoder = new TextDecoder('utf-8');
- const path = decoder.decode(notShared);
- const file = fileByPath.get(path);
- if (file == null) {
- return -404;
- }
- return file.handle;
- } catch (e) {
- console.error('lookupFile', pathPtr, pathLen, e);
- return -1;
- }
-}
-function read(handle, bufferPtr, bufferLen, offset) {
- try {
- const buffer = getUint8ArrayFromWasm(bufferPtr, bufferLen);
- const file = fileByHandle.get(Number(handle));
- const result = file.read(buffer, { at: Number(offset) });
- return result;
- } catch (e) {
- console.error('read', handle, bufferPtr, bufferLen, offset, e);
- return -1;
- }
-}
-function write(handle, bufferPtr, bufferLen, offset) {
- try {
- const buffer = getUint8ArrayFromWasm(bufferPtr, bufferLen);
- const file = fileByHandle.get(Number(handle));
- const result = file.write(buffer, { at: Number(offset) });
- return result;
- } catch (e) {
- console.error('write', handle, bufferPtr, bufferLen, offset, e);
- return -1;
- }
-}
-function sync(handle) {
- try {
- const file = fileByHandle.get(Number(handle));
- file.flush();
- return 0;
- } catch (e) {
- console.error('sync', handle, e);
- return -1;
- }
-}
-function truncate(handle, size) {
- try {
- const file = fileByHandle.get(Number(handle));
- const result = file.truncate(size);
- return result;
- } catch (e) {
- console.error('truncate', handle, size, e);
- return -1;
- }
-}
-function size(handle) {
- try {
- const file = fileByHandle.get(Number(handle));
- const size = file.getSize()
- return size;
- } catch (e) {
- console.error('size', handle, e);
- return -1;
- }
-}
-
const handler = new MessageHandler({
onLoad({ wasmModule, wasmMemory }) {
memory = wasmMemory;
@@ -124,14 +25,8 @@ const handler = new MessageHandler({
...importObject.env,
...importObject.napi,
...importObject.emnapi,
+ ...workerImports(opfs, memory),
memory: wasmMemory,
- is_web_worker: () => true,
- lookup_file: lookup_file,
- read: read,
- write: write,
- sync: sync,
- truncate: truncate,
- size: size,
}
},
})
@@ -141,16 +36,16 @@ const handler = new MessageHandler({
globalThis.onmessage = async function (e) {
if (e.data.__turso__ == 'register') {
try {
- await registerFile(e.data.path)
- self.postMessage({ id: e.data.id })
+ await opfs.registerFile(e.data.path);
+ self.postMessage({ id: e.data.id });
} catch (error) {
self.postMessage({ id: e.data.id, error: error });
}
return;
} else if (e.data.__turso__ == 'unregister') {
try {
- await unregisterFile(e.data.path)
- self.postMessage({ id: e.data.id })
+ await opfs.unregisterFile(e.data.path);
+ self.postMessage({ id: e.data.id });
} catch (error) {
self.postMessage({ id: e.data.id, error: error });
}
diff --git a/bindings/javascript/packages/common/types.ts b/bindings/javascript/packages/common/types.ts
index 2b843bb9f..3a2b075da 100644
--- a/bindings/javascript/packages/common/types.ts
+++ b/bindings/javascript/packages/common/types.ts
@@ -18,7 +18,6 @@ export interface NativeDatabase {
prepare(sql: string): NativeStatement;
- pluck(pluckMode: boolean);
defaultSafeIntegers(toggle: boolean);
totalChanges(): number;
changes(): number;
@@ -32,6 +31,11 @@ export const STEP_ROW = 1;
export const STEP_DONE = 2;
export const STEP_IO = 3;
+export interface TableColumn {
+ name: string,
+ type: string
+}
+
export interface NativeStatement {
stepAsync(): Promise;
stepSync(): number;
@@ -39,7 +43,7 @@ export interface NativeStatement {
pluck(pluckMode: boolean);
safeIntegers(toggle: boolean);
raw(toggle: boolean);
- columns(): string[];
+ columns(): TableColumn[];
row(): any;
reset();
finalize();
diff --git a/bindings/javascript/packages/native/index.d.ts b/bindings/javascript/packages/native/index.d.ts
index 1c510cfdc..915d4e073 100644
--- a/bindings/javascript/packages/native/index.d.ts
+++ b/bindings/javascript/packages/native/index.d.ts
@@ -91,6 +91,14 @@ export declare class Database {
ioLoopAsync(): Promise
}
+export declare class Opfs {
+ constructor()
+}
+
+export declare class OpfsFile {
+
+}
+
/** A prepared statement. */
export declare class Statement {
reset(): void
@@ -144,6 +152,14 @@ export declare class Statement {
finalize(): void
}
+export declare function connect(path: string, opts?: DatabaseOpts | undefined | null): Promise
+
export interface DatabaseOpts {
tracing?: string
}
+
+/**
+ * turso-db in the the browser requires explicit thread pool initialization
+ * so, we just put no-op task on the thread pool and force emnapi to allocate web worker
+ */
+export declare function initThreadPool(): Promise
diff --git a/bindings/javascript/packages/native/index.js b/bindings/javascript/packages/native/index.js
index d69167a1a..5ae272f81 100644
--- a/bindings/javascript/packages/native/index.js
+++ b/bindings/javascript/packages/native/index.js
@@ -508,6 +508,10 @@ if (!nativeBinding) {
throw new Error(`Failed to load native binding`)
}
-const { Database, Statement } = nativeBinding
+const { Database, Opfs, OpfsFile, Statement, connect, initThreadPool } = nativeBinding
export { Database }
+export { Opfs }
+export { OpfsFile }
export { Statement }
+export { connect }
+export { initThreadPool }
diff --git a/bindings/javascript/replace.sh b/bindings/javascript/replace.sh
new file mode 100644
index 000000000..323bed2f0
--- /dev/null
+++ b/bindings/javascript/replace.sh
@@ -0,0 +1,11 @@
+sed -i "s/$NAME_FROM/$NAME_TO/g" packages/common/package.json
+sed -i "s/$NAME_FROM/$NAME_TO/g" packages/native/package.json
+sed -i "s/$NAME_FROM/$NAME_TO/g" packages/browser/package.json
+
+sed -i "s/$VERSION_FROM/$VERSION_TO/g" packages/common/package.json
+sed -i "s/$VERSION_FROM/$VERSION_TO/g" packages/native/package.json
+sed -i "s/$VERSION_FROM/$VERSION_TO/g" packages/browser/package.json
+
+sed -i "s/$NAME_FROM\/database-common/$NAME_TO\/database-common/g" packages/native/promise.ts
+sed -i "s/$NAME_FROM\/database-common/$NAME_TO\/database-common/g" packages/native/compat.ts
+sed -i "s/$NAME_FROM\/database-common/$NAME_TO\/database-common/g" packages/browser/promise.ts
diff --git a/bindings/javascript/src/browser.rs b/bindings/javascript/src/browser.rs
index f9c6bffa9..b2c2047d2 100644
--- a/bindings/javascript/src/browser.rs
+++ b/bindings/javascript/src/browser.rs
@@ -28,7 +28,6 @@ pub fn init_thread_pool() -> napi::Result> {
pub struct ConnectTask {
path: String,
- is_memory: bool,
io: Arc,
}
@@ -70,7 +69,7 @@ impl Task for ConnectTask {
Some(result.db),
self.io.clone(),
result.conn,
- self.is_memory,
+ self.path.clone(),
))
}
}
@@ -88,16 +87,11 @@ pub fn connect(path: String, opts: Option) -> Result>,
io: Arc,
conn: Option>,
- is_memory: bool,
+ path: String,
is_open: Cell,
default_safe_integers: Cell,
}
@@ -186,20 +186,20 @@ impl Database {
.connect()
.map_err(|e| Error::new(Status::GenericFailure, format!("Failed to connect: {e}")))?;
- Ok(Self::create(Some(db), io, conn, is_memory(&path)))
+ Ok(Self::create(Some(db), io, conn, path))
}
pub fn create(
db: Option>,
io: Arc,
conn: Arc,
- is_memory: bool,
+ path: String,
) -> Self {
Database {
_db: db,
io,
conn: Some(conn),
- is_memory,
+ path,
is_open: Cell::new(true),
default_safe_integers: Cell::new(false),
}
@@ -218,7 +218,13 @@ impl Database {
/// Returns whether the database is in memory-only mode.
#[napi(getter)]
pub fn memory(&self) -> bool {
- self.is_memory
+ is_memory(&self.path)
+ }
+
+ /// Returns whether the database is in memory-only mode.
+ #[napi(getter)]
+ pub fn path(&self) -> String {
+ self.path.clone()
}
/// Returns whether the database connection is open.
@@ -246,7 +252,7 @@ impl Database {
/// * `sql` - The SQL statements to execute.
///
/// # Returns
- #[napi]
+ #[napi(ts_return_type = "Promise")]
pub fn batch_async(&self, sql: String) -> Result> {
Ok(AsyncTask::new(DbTask::Batch {
conn: self.conn()?.clone(),
@@ -319,7 +325,7 @@ impl Database {
#[napi]
pub fn close(&mut self) -> Result<()> {
self.is_open.set(false);
- let _ = self._db.take().unwrap();
+ let _ = self._db.take();
let _ = self.conn.take().unwrap();
Ok(())
}
@@ -482,7 +488,7 @@ impl Statement {
/// Step the statement and return result code (executed on the background thread):
/// 1 = Row available, 2 = Done, 3 = I/O needed
- #[napi]
+ #[napi(ts_return_type = "Promise")]
pub fn step_async(&self) -> Result> {
Ok(AsyncTask::new(DbTask::Step {
stmt: self.stmt.clone(),
@@ -577,7 +583,7 @@ impl Statement {
}
/// Get column information for the statement
- #[napi]
+ #[napi(ts_return_type = "Promise")]
pub fn columns<'env>(&self, env: &'env Env) -> Result> {
let stmt_ref = self.stmt.borrow();
let stmt = stmt_ref
diff --git a/sync/javascript/Cargo.toml b/bindings/javascript/sync/Cargo.toml
similarity index 93%
rename from sync/javascript/Cargo.toml
rename to bindings/javascript/sync/Cargo.toml
index e16f64891..029a04fb1 100644
--- a/sync/javascript/Cargo.toml
+++ b/bindings/javascript/sync/Cargo.toml
@@ -21,3 +21,6 @@ tracing-subscriber = "0.3.19"
[build-dependencies]
napi-build = "2.2.3"
+
+[features]
+browser = ["turso_node/browser"]
\ No newline at end of file
diff --git a/sync/javascript/README.md b/bindings/javascript/sync/README.md
similarity index 100%
rename from sync/javascript/README.md
rename to bindings/javascript/sync/README.md
diff --git a/sync/javascript/build.rs b/bindings/javascript/sync/build.rs
similarity index 100%
rename from sync/javascript/build.rs
rename to bindings/javascript/sync/build.rs
diff --git a/bindings/javascript/sync/packages/browser/README.md b/bindings/javascript/sync/packages/browser/README.md
new file mode 100644
index 000000000..e443f495e
--- /dev/null
+++ b/bindings/javascript/sync/packages/browser/README.md
@@ -0,0 +1,124 @@
+
+
Turso Database for JavaScript in Browser
+
+
+
+
+
+
+
+
+
+
+---
+
+## About
+
+This package is the Turso embedded database library for JavaScript in Browser.
+
+> **⚠️ Warning:** This software is ALPHA, only use for development, testing, and experimentation. We are working to make it production ready, but do not use it for critical data right now.
+
+## Features
+
+- **SQLite compatible:** SQLite query language and file format support ([status](https://github.com/tursodatabase/turso/blob/main/COMPAT.md)).
+- **In-process**: No network overhead, runs directly in your Node.js process
+- **TypeScript support**: Full TypeScript definitions included
+
+## Installation
+
+```bash
+npm install @tursodatabase/database-browser
+```
+
+## Getting Started
+
+### In-Memory Database
+
+```javascript
+import { connect } from '@tursodatabase/database-browser';
+
+// Create an in-memory database
+const db = await connect(':memory:');
+
+// Create a table
+await db.exec('CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)');
+
+// Insert data
+const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
+await insert.run('Alice', 'alice@example.com');
+await insert.run('Bob', 'bob@example.com');
+
+// Query data
+const users = await db.prepare('SELECT * FROM users').all();
+console.log(users);
+// Output: [
+// { id: 1, name: 'Alice', email: 'alice@example.com' },
+// { id: 2, name: 'Bob', email: 'bob@example.com' }
+// ]
+```
+
+### File-Based Database
+
+```javascript
+import { connect } from '@tursodatabase/database-browser';
+
+// Create or open a database file
+const db = await connect('my-database.db');
+
+// Create a table
+await db.exec(`
+ CREATE TABLE IF NOT EXISTS posts (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ title TEXT NOT NULL,
+ content TEXT,
+ created_at DATETIME DEFAULT CURRENT_TIMESTAMP
+ )
+`);
+
+// Insert a post
+const insertPost = db.prepare('INSERT INTO posts (title, content) VALUES (?, ?)');
+const result = await insertPost.run('Hello World', 'This is my first blog post!');
+
+console.log(`Inserted post with ID: ${result.lastInsertRowid}`);
+```
+
+### Transactions
+
+```javascript
+import { connect } from '@tursodatabase/database-browser';
+
+const db = await connect('transactions.db');
+
+// Using transactions for atomic operations
+const transaction = db.transaction(async (users) => {
+ const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
+ for (const user of users) {
+ await insert.run(user.name, user.email);
+ }
+});
+
+// Execute transaction
+await transaction([
+ { name: 'Alice', email: 'alice@example.com' },
+ { name: 'Bob', email: 'bob@example.com' }
+]);
+```
+
+## API Reference
+
+For complete API documentation, see [JavaScript API Reference](../../../../docs/javascript-api-reference.md).
+
+## Related Packages
+
+* The [@tursodatabase/serverless](https://www.npmjs.com/package/@tursodatabase/serverless) package provides a serverless driver with the same API.
+* The [@tursodatabase/sync](https://www.npmjs.com/package/@tursodatabase/sync) package provides bidirectional sync between a local Turso database and Turso Cloud.
+
+## License
+
+This project is licensed under the [MIT license](../../LICENSE.md).
+
+## Support
+
+- [GitHub Issues](https://github.com/tursodatabase/turso/issues)
+- [Documentation](https://docs.turso.tech)
+- [Discord Community](https://tur.so/discord)
diff --git a/sync/javascript/turso-sync-js.wasi-browser.js b/bindings/javascript/sync/packages/browser/index.js
similarity index 68%
rename from sync/javascript/turso-sync-js.wasi-browser.js
rename to bindings/javascript/sync/packages/browser/index.js
index 55e6a698d..77e4d6567 100644
--- a/sync/javascript/turso-sync-js.wasi-browser.js
+++ b/bindings/javascript/sync/packages/browser/index.js
@@ -1,17 +1,18 @@
import {
createOnMessage as __wasmCreateOnMessageForFsProxy,
getDefaultContext as __emnapiGetDefaultContext,
- instantiateNapiModuleSync as __emnapiInstantiateNapiModuleSync,
+ instantiateNapiModule as __emnapiInstantiateNapiModule,
WASI as __WASI,
} from '@napi-rs/wasm-runtime'
+import { MainDummyImports } from "@tursodatabase/database-browser-common";
const __wasi = new __WASI({
version: 'preview1',
})
-const __wasmUrl = new URL('./turso-sync-js.wasm32-wasi.wasm', import.meta.url).href
+const __wasmUrl = new URL('./sync.wasm32-wasi.wasm', import.meta.url).href
const __emnapiContext = __emnapiGetDefaultContext()
@@ -23,19 +24,21 @@ const __sharedMemory = new WebAssembly.Memory({
const __wasmFile = await fetch(__wasmUrl).then((res) => res.arrayBuffer())
+export let MainWorker = null;
+
const {
instance: __napiInstance,
module: __wasiModule,
napiModule: __napiModule,
-} = __emnapiInstantiateNapiModuleSync(__wasmFile, {
+} = await __emnapiInstantiateNapiModule(__wasmFile, {
context: __emnapiContext,
- asyncWorkPoolSize: 4,
+ asyncWorkPoolSize: 1,
wasi: __wasi,
onCreateWorker() {
- const worker = new Worker(new URL('./wasi-worker-browser.mjs', import.meta.url), {
+ const worker = new Worker(new URL('./worker.mjs', import.meta.url), {
type: 'module',
})
-
+ MainWorker = worker;
return worker
},
overwriteImports(importObject) {
@@ -43,6 +46,7 @@ const {
...importObject.env,
...importObject.napi,
...importObject.emnapi,
+ ...MainDummyImports,
memory: __sharedMemory,
}
return importObject
@@ -58,11 +62,15 @@ const {
export default __napiModule.exports
export const Database = __napiModule.exports.Database
export const Statement = __napiModule.exports.Statement
+export const Opfs = __napiModule.exports.Opfs
+export const OpfsFile = __napiModule.exports.OpfsFile
+export const connect = __napiModule.exports.connect
+export const initThreadPool = __napiModule.exports.initThreadPool
export const GeneratorHolder = __napiModule.exports.GeneratorHolder
export const JsDataCompletion = __napiModule.exports.JsDataCompletion
-export const JsDataPollResult = __napiModule.exports.JsDataPollResult
export const JsProtocolIo = __napiModule.exports.JsProtocolIo
-export const JsProtocolRequestData = __napiModule.exports.JsProtocolRequestData
+export const JsProtocolRequestBytes = __napiModule.exports.JsProtocolRequestBytes
export const SyncEngine = __napiModule.exports.SyncEngine
export const DatabaseChangeTypeJs = __napiModule.exports.DatabaseChangeTypeJs
export const SyncEngineProtocolVersion = __napiModule.exports.SyncEngineProtocolVersion
+
diff --git a/bindings/javascript/sync/packages/browser/package.json b/bindings/javascript/sync/packages/browser/package.json
new file mode 100644
index 000000000..f7dde1ca7
--- /dev/null
+++ b/bindings/javascript/sync/packages/browser/package.json
@@ -0,0 +1,46 @@
+{
+ "name": "@tursodatabase/sync-browser",
+ "version": "0.1.5",
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/tursodatabase/turso"
+ },
+ "type": "module",
+ "license": "MIT",
+ "main": "dist/promise.js",
+ "packageManager": "yarn@4.9.2",
+ "files": [
+ "index.js",
+ "worker.mjs",
+ "sync.wasm32-wasi.wasm",
+ "dist/**",
+ "README.md"
+ ],
+ "devDependencies": {
+ "@napi-rs/cli": "^3.1.5",
+ "@vitest/browser": "^3.2.4",
+ "playwright": "^1.55.0",
+ "typescript": "^5.9.2",
+ "vitest": "^3.2.4"
+ },
+ "scripts": {
+ "napi-build": "napi build --features browser --release --platform --target wasm32-wasip1-threads --no-js --manifest-path ../../Cargo.toml --output-dir . && rm index.d.ts sync.wasi* wasi* browser.js",
+ "tsc-build": "npm exec tsc",
+ "build": "npm run napi-build && npm run tsc-build",
+ "test": "VITE_TURSO_DB_URL=http://b--a--a.localhost:10000 CI=1 vitest --browser=chromium --run && VITE_TURSO_DB_URL=http://b--a--a.localhost:10000 CI=1 vitest --browser=firefox --run"
+ },
+ "napi": {
+ "binaryName": "sync",
+ "targets": [
+ "wasm32-wasip1-threads"
+ ]
+ },
+ "imports": {
+ "#index": "./index.js"
+ },
+ "dependencies": {
+ "@napi-rs/wasm-runtime": "^1.0.3",
+ "@tursodatabase/sync-common": "^0.1.5",
+ "@tursodatabase/database-common": "^0.1.5"
+ }
+}
\ No newline at end of file
diff --git a/bindings/javascript/sync/packages/browser/promise.test.ts b/bindings/javascript/sync/packages/browser/promise.test.ts
new file mode 100644
index 000000000..b602ce698
--- /dev/null
+++ b/bindings/javascript/sync/packages/browser/promise.test.ts
@@ -0,0 +1,281 @@
+import { expect, test } from 'vitest'
+import { connect, DatabaseRowMutation, DatabaseRowTransformResult } from './promise.js'
+
+const localeCompare = (a, b) => a.x.localeCompare(b.x);
+
+test('select-after-push', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS t(x)");
+ await db.exec("DELETE FROM t");
+ await db.push();
+ await db.close();
+ }
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("INSERT INTO t VALUES (1), (2), (3)");
+ await db.push();
+ }
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ const rows = await db.prepare('SELECT * FROM t').all();
+ expect(rows).toEqual([{ x: 1 }, { x: 2 }, { x: 3 }])
+ }
+})
+
+test('select-without-push', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS t(x)");
+ await db.exec("DELETE FROM t");
+ await db.push();
+ await db.close();
+ }
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("INSERT INTO t VALUES (1), (2), (3)");
+ }
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ const rows = await db.prepare('SELECT * FROM t').all();
+ expect(rows).toEqual([])
+ }
+})
+
+test('merge-non-overlapping-keys', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
+ await db.exec("DELETE FROM q");
+ await db.push();
+ await db.close();
+ }
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db1.exec("INSERT INTO q VALUES ('k1', 'value1'), ('k2', 'value2')");
+
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db2.exec("INSERT INTO q VALUES ('k3', 'value3'), ('k4', 'value4'), ('k5', 'value5')");
+
+ await Promise.all([db1.push(), db2.push()]);
+ await Promise.all([db1.pull(), db2.pull()]);
+
+ const rows1 = await db1.prepare('SELECT * FROM q').all();
+ const rows2 = await db1.prepare('SELECT * FROM q').all();
+ const expected = [{ x: 'k1', y: 'value1' }, { x: 'k2', y: 'value2' }, { x: 'k3', y: 'value3' }, { x: 'k4', y: 'value4' }, { x: 'k5', y: 'value5' }];
+ expect(rows1.sort(localeCompare)).toEqual(expected.sort(localeCompare))
+ expect(rows2.sort(localeCompare)).toEqual(expected.sort(localeCompare))
+})
+
+test('last-push-wins', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
+ await db.exec("DELETE FROM q");
+ await db.push();
+ await db.close();
+ }
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db1.exec("INSERT INTO q VALUES ('k1', 'value1'), ('k2', 'value2'), ('k4', 'value4')");
+
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db2.exec("INSERT INTO q VALUES ('k1', 'value3'), ('k2', 'value4'), ('k3', 'value5')");
+
+ await db2.push();
+ await db1.push();
+ await Promise.all([db1.pull(), db2.pull()]);
+
+ const rows1 = await db1.prepare('SELECT * FROM q').all();
+ const rows2 = await db1.prepare('SELECT * FROM q').all();
+ const expected = [{ x: 'k1', y: 'value1' }, { x: 'k2', y: 'value2' }, { x: 'k3', y: 'value5' }, { x: 'k4', y: 'value4' }];
+ expect(rows1.sort(localeCompare)).toEqual(expected.sort(localeCompare))
+ expect(rows2.sort(localeCompare)).toEqual(expected.sort(localeCompare))
+})
+
+test('last-push-wins-with-delete', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
+ await db.exec("DELETE FROM q");
+ await db.push();
+ await db.close();
+ }
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db1.exec("INSERT INTO q VALUES ('k1', 'value1'), ('k2', 'value2'), ('k4', 'value4')");
+ await db1.exec("DELETE FROM q")
+
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db2.exec("INSERT INTO q VALUES ('k1', 'value3'), ('k2', 'value4'), ('k3', 'value5')");
+
+ await db2.push();
+ await db1.push();
+ await Promise.all([db1.pull(), db2.pull()]);
+
+ const rows1 = await db1.prepare('SELECT * FROM q').all();
+ const rows2 = await db1.prepare('SELECT * FROM q').all();
+ const expected = [{ x: 'k3', y: 'value5' }];
+ expect(rows1).toEqual(expected)
+ expect(rows2).toEqual(expected)
+})
+
+test('constraint-conflict', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS u(x TEXT PRIMARY KEY, y UNIQUE)");
+ await db.exec("DELETE FROM u");
+ await db.push();
+ await db.close();
+ }
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db1.exec("INSERT INTO u VALUES ('k1', 'value1')");
+
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db2.exec("INSERT INTO u VALUES ('k2', 'value1')");
+
+ await db1.push();
+ await expect(async () => await db2.push()).rejects.toThrow('SQLite error: UNIQUE constraint failed: u.y');
+})
+
+test('checkpoint', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
+ await db.exec("DELETE FROM q");
+ await db.push();
+ await db.close();
+ }
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ for (let i = 0; i < 1000; i++) {
+ await db1.exec(`INSERT INTO q VALUES ('k${i}', 'v${i}')`);
+ }
+ expect((await db1.stats()).mainWal).toBeGreaterThan(4096 * 1000);
+ await db1.checkpoint();
+ expect((await db1.stats()).mainWal).toBe(0);
+ let revertWal = (await db1.stats()).revertWal;
+ expect(revertWal).toBeLessThan(4096 * 1000 / 100);
+
+ for (let i = 0; i < 1000; i++) {
+ await db1.exec(`UPDATE q SET y = 'u${i}' WHERE x = 'k${i}'`);
+ }
+ await db1.checkpoint();
+ expect((await db1.stats()).revertWal).toBe(revertWal);
+})
+
+test('persistence', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
+ await db.exec("DELETE FROM q");
+ await db.push();
+ await db.close();
+ }
+ const path = `test-${(Math.random() * 10000) | 0}.db`;
+ {
+ const db1 = await connect({ path: path, url: process.env.VITE_TURSO_DB_URL });
+ await db1.exec(`INSERT INTO q VALUES ('k1', 'v1')`);
+ await db1.exec(`INSERT INTO q VALUES ('k2', 'v2')`);
+ await db1.close();
+ }
+
+ {
+ const db2 = await connect({ path: path, url: process.env.VITE_TURSO_DB_URL });
+ await db2.exec(`INSERT INTO q VALUES ('k3', 'v3')`);
+ await db2.exec(`INSERT INTO q VALUES ('k4', 'v4')`);
+ const stmt = db2.prepare('SELECT * FROM q');
+ const rows = await stmt.all();
+ const expected = [{ x: 'k1', y: 'v1' }, { x: 'k2', y: 'v2' }, { x: 'k3', y: 'v3' }, { x: 'k4', y: 'v4' }];
+ expect(rows).toEqual(expected)
+ stmt.close();
+ await db2.close();
+ }
+
+ {
+ const db3 = await connect({ path: path, url: process.env.VITE_TURSO_DB_URL });
+ await db3.push();
+ await db3.close();
+ }
+
+ {
+ const db4 = await connect({ path: path, url: process.env.VITE_TURSO_DB_URL });
+ const rows = await db4.prepare('SELECT * FROM q').all();
+ const expected = [{ x: 'k1', y: 'v1' }, { x: 'k2', y: 'v2' }, { x: 'k3', y: 'v3' }, { x: 'k4', y: 'v4' }];
+ expect(rows).toEqual(expected)
+ await db4.close();
+ }
+})
+
+test('transform', async () => {
+ {
+ const db = await connect({
+ path: ':memory:',
+ url: process.env.VITE_TURSO_DB_URL,
+ });
+ await db.exec("CREATE TABLE IF NOT EXISTS counter(key TEXT PRIMARY KEY, value INTEGER)");
+ await db.exec("DELETE FROM counter");
+ await db.exec("INSERT INTO counter VALUES ('1', 0)")
+ await db.push();
+ await db.close();
+ }
+ const transform = (m: DatabaseRowMutation) => ({
+ operation: 'rewrite',
+ stmt: {
+ sql: `UPDATE counter SET value = value + ? WHERE key = ?`,
+ values: [m.after.value - m.before.value, m.after.key]
+ }
+ } as DatabaseRowTransformResult);
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, transform: transform });
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, transform: transform });
+
+ await db1.exec("UPDATE counter SET value = value + 1 WHERE key = '1'");
+ await db2.exec("UPDATE counter SET value = value + 1 WHERE key = '1'");
+
+ await Promise.all([db1.push(), db2.push()]);
+ await Promise.all([db1.pull(), db2.pull()]);
+
+ const rows1 = await db1.prepare('SELECT * FROM counter').all();
+ const rows2 = await db2.prepare('SELECT * FROM counter').all();
+ expect(rows1).toEqual([{ key: '1', value: 2 }]);
+ expect(rows2).toEqual([{ key: '1', value: 2 }]);
+})
+
+test('transform-many', async () => {
+ {
+ const db = await connect({
+ path: ':memory:',
+ url: process.env.VITE_TURSO_DB_URL,
+ });
+ await db.exec("CREATE TABLE IF NOT EXISTS counter(key TEXT PRIMARY KEY, value INTEGER)");
+ await db.exec("DELETE FROM counter");
+ await db.exec("INSERT INTO counter VALUES ('1', 0)")
+ await db.push();
+ await db.close();
+ }
+ const transform = (m: DatabaseRowMutation) => ({
+ operation: 'rewrite',
+ stmt: {
+ sql: `UPDATE counter SET value = value + ? WHERE key = ?`,
+ values: [m.after.value - m.before.value, m.after.key]
+ }
+ } as DatabaseRowTransformResult);
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, transform: transform });
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, transform: transform });
+
+ for (let i = 0; i < 1002; i++) {
+ await db1.exec("UPDATE counter SET value = value + 1 WHERE key = '1'");
+ }
+ for (let i = 0; i < 1001; i++) {
+ await db2.exec("UPDATE counter SET value = value + 1 WHERE key = '1'");
+ }
+
+ let start = performance.now();
+ await Promise.all([db1.push(), db2.push()]);
+ console.info('push', performance.now() - start);
+
+ start = performance.now();
+ await Promise.all([db1.pull(), db2.pull()]);
+ console.info('pull', performance.now() - start);
+
+ const rows1 = await db1.prepare('SELECT * FROM counter').all();
+ const rows2 = await db2.prepare('SELECT * FROM counter').all();
+ expect(rows1).toEqual([{ key: '1', value: 1001 + 1002 }]);
+ expect(rows2).toEqual([{ key: '1', value: 1001 + 1002 }]);
+})
\ No newline at end of file
diff --git a/bindings/javascript/sync/packages/browser/promise.ts b/bindings/javascript/sync/packages/browser/promise.ts
new file mode 100644
index 000000000..8c28ad057
--- /dev/null
+++ b/bindings/javascript/sync/packages/browser/promise.ts
@@ -0,0 +1,113 @@
+import { registerFileAtWorker, unregisterFileAtWorker } from "@tursodatabase/database-browser-common"
+import { DatabasePromise, DatabaseOpts, NativeDatabase } from "@tursodatabase/database-common"
+import { ProtocolIo, run, SyncOpts, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult, memoryIO } from "@tursodatabase/sync-common";
+import { connect as nativeConnect, initThreadPool, MainWorker } from "#index";
+import { Database as NativeDB, SyncEngine } from "#index";
+
+let BrowserIo: ProtocolIo = {
+ async read(path: string): Promise {
+ const result = localStorage.getItem(path);
+ if (result == null) {
+ return null;
+ }
+ return new TextEncoder().encode(result);
+ },
+ async write(path: string, data: Buffer | Uint8Array): Promise {
+ const array = new Uint8Array(data);
+ const value = new TextDecoder('utf-8').decode(array);
+ localStorage.setItem(path, value);
+ }
+};
+
+
+class Database extends DatabasePromise {
+ runOpts: RunOpts;
+ engine: any;
+ io: ProtocolIo;
+ fsPath: string | null;
+ constructor(db: NativeDatabase, io: ProtocolIo, runOpts: RunOpts, engine: any, fsPath: string | null, opts: DatabaseOpts = {}) {
+ super(db, opts)
+ this.runOpts = runOpts;
+ this.engine = engine;
+ this.fsPath = fsPath;
+ this.io = io;
+ }
+ async sync() {
+ await run(this.runOpts, this.io, this.engine, this.engine.sync());
+ }
+ async pull() {
+ await run(this.runOpts, this.io, this.engine, this.engine.pull());
+ }
+ async push() {
+ await run(this.runOpts, this.io, this.engine, this.engine.push());
+ }
+ async checkpoint() {
+ await run(this.runOpts, this.io, this.engine, this.engine.checkpoint());
+ }
+ async stats(): Promise<{ operations: number, mainWal: number, revertWal: number, lastPullUnixTime: number, lastPushUnixTime: number | null }> {
+ return (await run(this.runOpts, this.io, this.engine, this.engine.stats()));
+ }
+ override async close(): Promise {
+ this.db.close();
+ this.engine.close();
+ if (this.fsPath != null) {
+ await Promise.all([
+ unregisterFileAtWorker(MainWorker, this.fsPath),
+ unregisterFileAtWorker(MainWorker, `${this.fsPath}-wal`),
+ unregisterFileAtWorker(MainWorker, `${this.fsPath}-revert`),
+ unregisterFileAtWorker(MainWorker, `${this.fsPath}-info`),
+ unregisterFileAtWorker(MainWorker, `${this.fsPath}-changes`),
+ ]);
+ }
+ }
+}
+
+/**
+ * Creates a new database connection asynchronously.
+ *
+ * @param {string} path - Path to the database file.
+ * @param {Object} opts - Options for database behavior.
+ * @returns {Promise} - A promise that resolves to a Database instance.
+ */
+async function connect(opts: SyncOpts): Promise {
+ const engine = new SyncEngine({
+ path: opts.path,
+ clientName: opts.clientName,
+ tablesIgnore: opts.tablesIgnore,
+ useTransform: opts.transform != null,
+ tracing: opts.tracing,
+ protocolVersion: 1
+ });
+ const runOpts: RunOpts = {
+ url: opts.url,
+ headers: {
+ ...(opts.authToken != null && { "Authorization": `Bearer ${opts.authToken}` }),
+ ...(opts.encryptionKey != null && { "x-turso-encryption-key": opts.encryptionKey })
+ },
+ preemptionMs: 1,
+ transform: opts.transform,
+ };
+ const isMemory = opts.path == ':memory:';
+ let io = isMemory ? memoryIO() : BrowserIo;
+
+ await initThreadPool();
+ if (MainWorker == null) {
+ throw new Error("panic: MainWorker is not set");
+ }
+ if (!isMemory) {
+ await Promise.all([
+ registerFileAtWorker(MainWorker, opts.path),
+ registerFileAtWorker(MainWorker, `${opts.path}-wal`),
+ registerFileAtWorker(MainWorker, `${opts.path}-revert`),
+ registerFileAtWorker(MainWorker, `${opts.path}-info`),
+ registerFileAtWorker(MainWorker, `${opts.path}-changes`),
+ ]);
+ }
+ await run(runOpts, io, engine, engine.init());
+
+ const nativeDb = engine.open();
+ return new Database(nativeDb as any, io, runOpts, engine, isMemory ? null : opts.path, {});
+}
+
+export { connect, Database, }
+export type { DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult }
diff --git a/sync/javascript/tsconfig.json b/bindings/javascript/sync/packages/browser/tsconfig.json
similarity index 79%
rename from sync/javascript/tsconfig.json
rename to bindings/javascript/sync/packages/browser/tsconfig.json
index ad8b905ec..b380f4fa5 100644
--- a/sync/javascript/tsconfig.json
+++ b/bindings/javascript/sync/packages/browser/tsconfig.json
@@ -1,17 +1,19 @@
{
"compilerOptions": {
"skipLibCheck": true,
+ "declaration": true,
+ "declarationMap": true,
"module": "nodenext",
"target": "esnext",
"moduleResolution": "nodenext",
- "declaration": true,
"outDir": "dist/",
"lib": [
"es2020",
- "dom",
+ "DOM",
+ "WebWorker"
],
"paths": {
- "#entry-point": [
+ "#index": [
"./index.js"
]
}
diff --git a/bindings/javascript/sync/packages/browser/vitest.config.ts b/bindings/javascript/sync/packages/browser/vitest.config.ts
new file mode 100644
index 000000000..deeaec485
--- /dev/null
+++ b/bindings/javascript/sync/packages/browser/vitest.config.ts
@@ -0,0 +1,23 @@
+import { defineConfig } from 'vitest/config'
+
+export default defineConfig({
+ define: {
+ 'process.env.NODE_DEBUG_NATIVE': 'false',
+ },
+ server: {
+ headers: {
+ "Cross-Origin-Embedder-Policy": "require-corp",
+ "Cross-Origin-Opener-Policy": "same-origin"
+ },
+ },
+ test: {
+ browser: {
+ enabled: true,
+ provider: 'playwright',
+ instances: [
+ { browser: 'chromium' },
+ { browser: 'firefox' }
+ ],
+ },
+ },
+})
diff --git a/sync/javascript/wasi-worker-browser.mjs b/bindings/javascript/sync/packages/browser/worker.mjs
similarity index 50%
rename from sync/javascript/wasi-worker-browser.mjs
rename to bindings/javascript/sync/packages/browser/worker.mjs
index 8b1b17221..38c377f83 100644
--- a/sync/javascript/wasi-worker-browser.mjs
+++ b/bindings/javascript/sync/packages/browser/worker.mjs
@@ -1,13 +1,18 @@
import { instantiateNapiModuleSync, MessageHandler, WASI } from '@napi-rs/wasm-runtime'
+import { OpfsDirectory, workerImports } from "@tursodatabase/database-browser-common";
+
+var opfs = new OpfsDirectory();
+var memory = null;
const handler = new MessageHandler({
onLoad({ wasmModule, wasmMemory }) {
+ memory = wasmMemory;
const wasi = new WASI({
print: function () {
// eslint-disable-next-line no-console
console.log.apply(console, arguments)
},
- printErr: function() {
+ printErr: function () {
// eslint-disable-next-line no-console
console.error.apply(console, arguments)
},
@@ -20,6 +25,7 @@ const handler = new MessageHandler({
...importObject.env,
...importObject.napi,
...importObject.emnapi,
+ ...workerImports(opfs, memory),
memory: wasmMemory,
}
},
@@ -27,6 +33,23 @@ const handler = new MessageHandler({
},
})
-globalThis.onmessage = function (e) {
+globalThis.onmessage = async function (e) {
+ if (e.data.__turso__ == 'register') {
+ try {
+ await opfs.registerFile(e.data.path);
+ self.postMessage({ id: e.data.id });
+ } catch (error) {
+ self.postMessage({ id: e.data.id, error: error });
+ }
+ return;
+ } else if (e.data.__turso__ == 'unregister') {
+ try {
+ await opfs.unregisterFile(e.data.path);
+ self.postMessage({ id: e.data.id });
+ } catch (error) {
+ self.postMessage({ id: e.data.id, error: error });
+ }
+ return;
+ }
handler.handle(e)
}
diff --git a/bindings/javascript/sync/packages/common/README.md b/bindings/javascript/sync/packages/common/README.md
new file mode 100644
index 000000000..f9327f368
--- /dev/null
+++ b/bindings/javascript/sync/packages/common/README.md
@@ -0,0 +1,8 @@
+## About
+
+This package is the Turso Sync common JS library which is shared between final builds for Node and Browser.
+
+Do not use this package directly - instead you must use `@tursodatabase/sync` or `@tursodatabase/sync-browser`.
+
+> **⚠️ Warning:** This software is ALPHA, only use for development, testing, and experimentation. We are working to make it production ready, but do not use it for critical data right now.
+
diff --git a/bindings/javascript/sync/packages/common/index.ts b/bindings/javascript/sync/packages/common/index.ts
new file mode 100644
index 000000000..1b264c80b
--- /dev/null
+++ b/bindings/javascript/sync/packages/common/index.ts
@@ -0,0 +1,5 @@
+import { run, memoryIO } from "./run.js"
+import { SyncOpts, ProtocolIo, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult } from "./types.js"
+
+export { run, memoryIO, }
+export type { SyncOpts, ProtocolIo, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult }
\ No newline at end of file
diff --git a/bindings/javascript/sync/packages/common/package.json b/bindings/javascript/sync/packages/common/package.json
new file mode 100644
index 000000000..68e1bfc02
--- /dev/null
+++ b/bindings/javascript/sync/packages/common/package.json
@@ -0,0 +1,25 @@
+{
+ "name": "@tursodatabase/sync-common",
+ "version": "0.1.5",
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/tursodatabase/turso"
+ },
+ "type": "module",
+ "license": "MIT",
+ "main": "dist/index.js",
+ "types": "dist/index.d.ts",
+ "packageManager": "yarn@4.9.2",
+ "files": [
+ "dist/**",
+ "README.md"
+ ],
+ "devDependencies": {
+ "typescript": "^5.9.2"
+ },
+ "scripts": {
+ "tsc-build": "npm exec tsc",
+ "build": "npm run tsc-build",
+ "test": "echo 'no tests'"
+ }
+}
\ No newline at end of file
diff --git a/bindings/javascript/sync/packages/common/run.ts b/bindings/javascript/sync/packages/common/run.ts
new file mode 100644
index 000000000..f26333d4b
--- /dev/null
+++ b/bindings/javascript/sync/packages/common/run.ts
@@ -0,0 +1,127 @@
+"use strict";
+
+import { GeneratorResponse, ProtocolIo, RunOpts } from "./types.js";
+
+const GENERATOR_RESUME_IO = 0;
+const GENERATOR_RESUME_DONE = 1;
+
+interface TrackPromise {
+ promise: Promise,
+ finished: boolean
+}
+
+function trackPromise(p: Promise): TrackPromise {
+ let status = { promise: null, finished: false };
+ status.promise = p.finally(() => status.finished = true);
+ return status;
+}
+
+function timeoutMs(ms: number): Promise {
+ return new Promise(resolve => setTimeout(resolve, ms))
+}
+
+async function process(opts: RunOpts, io: ProtocolIo, request: any) {
+ const requestType = request.request();
+ const completion = request.completion();
+ if (requestType.type == 'Http') {
+ try {
+ let headers = opts.headers;
+ if (requestType.headers != null && requestType.headers.length > 0) {
+ headers = { ...opts.headers };
+ for (let header of requestType.headers) {
+ headers[header[0]] = header[1];
+ }
+ }
+ const response = await fetch(`${opts.url}${requestType.path}`, {
+ method: requestType.method,
+ headers: headers,
+ body: requestType.body != null ? new Uint8Array(requestType.body) : null,
+ });
+ completion.status(response.status);
+ const reader = response.body.getReader();
+ while (true) {
+ const { done, value } = await reader.read();
+ if (done) {
+ completion.done();
+ break;
+ }
+ completion.pushBuffer(value);
+ }
+ } catch (error) {
+ completion.poison(`fetch error: ${error}`);
+ }
+ } else if (requestType.type == 'FullRead') {
+ try {
+ const metadata = await io.read(requestType.path);
+ if (metadata != null) {
+ completion.pushBuffer(metadata);
+ }
+ completion.done();
+ } catch (error) {
+ completion.poison(`metadata read error: ${error}`);
+ }
+ } else if (requestType.type == 'FullWrite') {
+ try {
+ await io.write(requestType.path, requestType.content);
+ completion.done();
+ } catch (error) {
+ completion.poison(`metadata write error: ${error}`);
+ }
+ } else if (requestType.type == 'Transform') {
+ if (opts.transform == null) {
+ completion.poison("transform is not set");
+ return;
+ }
+ const results = [];
+ for (const mutation of requestType.mutations) {
+ const result = opts.transform(mutation);
+ if (result == null) {
+ results.push({ type: 'Keep' });
+ } else if (result.operation == 'skip') {
+ results.push({ type: 'Skip' });
+ } else if (result.operation == 'rewrite') {
+ results.push({ type: 'Rewrite', stmt: result.stmt });
+ } else {
+ completion.poison("unexpected transform operation");
+ return;
+ }
+ }
+ completion.pushTransform(results);
+ completion.done();
+ }
+}
+
+export function memoryIO(): ProtocolIo {
+ let values = new Map();
+ return {
+ async read(path: string): Promise {
+ return values.get(path);
+ },
+ async write(path: string, data: Buffer | Uint8Array): Promise {
+ values.set(path, data);
+ }
+ }
+};
+
+
+export async function run(opts: RunOpts, io: ProtocolIo, engine: any, generator: any): Promise {
+ let tasks = [];
+ while (true) {
+ const { type, ...rest }: GeneratorResponse = await generator.resumeAsync(null);
+ if (type == 'Done') {
+ return null;
+ }
+ if (type == 'SyncEngineStats') {
+ return rest;
+ }
+ for (let request = engine.protocolIo(); request != null; request = engine.protocolIo()) {
+ tasks.push(trackPromise(process(opts, io, request)));
+ }
+
+ const tasksRace = tasks.length == 0 ? Promise.resolve() : Promise.race([timeoutMs(opts.preemptionMs), ...tasks.map(t => t.promise)]);
+ await Promise.all([engine.ioLoopAsync(), tasksRace]);
+
+ tasks = tasks.filter(t => !t.finished);
+ }
+ return generator.take();
+}
\ No newline at end of file
diff --git a/bindings/javascript/sync/packages/common/tsconfig.json b/bindings/javascript/sync/packages/common/tsconfig.json
new file mode 100644
index 000000000..9bc14edd3
--- /dev/null
+++ b/bindings/javascript/sync/packages/common/tsconfig.json
@@ -0,0 +1,17 @@
+{
+ "compilerOptions": {
+ "skipLibCheck": true,
+ "declaration": true,
+ "declarationMap": true,
+ "module": "esnext",
+ "target": "esnext",
+ "outDir": "dist/",
+ "lib": [
+ "es2020",
+ "dom"
+ ],
+ },
+ "include": [
+ "*"
+ ]
+}
\ No newline at end of file
diff --git a/bindings/javascript/sync/packages/common/types.ts b/bindings/javascript/sync/packages/common/types.ts
new file mode 100644
index 000000000..25fa1e47e
--- /dev/null
+++ b/bindings/javascript/sync/packages/common/types.ts
@@ -0,0 +1,50 @@
+export declare const enum DatabaseChangeType {
+ Insert = 0,
+ Update = 1,
+ Delete = 2
+}
+
+export interface DatabaseRowMutation {
+ changeTime: number
+ tableName: string
+ id: number
+ changeType: DatabaseChangeType
+ before?: Record
+ after?: Record
+ updates?: Record
+}
+
+export type DatabaseRowTransformResult = { operation: 'skip' } | { operation: 'rewrite', stmt: DatabaseRowStatement } | null;
+export type Transform = (arg: DatabaseRowMutation) => DatabaseRowTransformResult;
+export interface RunOpts {
+ preemptionMs: number,
+ url: string,
+ headers: { [K: string]: string }
+ transform?: Transform,
+}
+
+export interface ProtocolIo {
+ read(path: string): Promise;
+ write(path: string, content: Buffer | Uint8Array): Promise;
+}
+
+export interface SyncOpts {
+ path: string;
+ clientName?: string;
+ url: string;
+ authToken?: string;
+ encryptionKey?: string;
+ tablesIgnore?: string[],
+ transform?: Transform,
+ tracing?: string,
+}
+
+export interface DatabaseRowStatement {
+ sql: string
+ values: Array
+}
+
+export type GeneratorResponse =
+ | { type: 'IO' }
+ | { type: 'Done' }
+ | { type: 'SyncEngineStats', operations: number, mainWal: number, revertWal: number, lastPullUnixTime: number, lastPushUnixTime: number | null }
\ No newline at end of file
diff --git a/bindings/javascript/sync/packages/native/README.md b/bindings/javascript/sync/packages/native/README.md
new file mode 100644
index 000000000..d5444435c
--- /dev/null
+++ b/bindings/javascript/sync/packages/native/README.md
@@ -0,0 +1,125 @@
+
+
Turso Database for JavaScript in Node
+
+
+
+
+
+
+
+
+
+
+---
+
+## About
+
+This package is the Turso embedded database library for JavaScript in Node.
+
+> **⚠️ Warning:** This software is ALPHA, only use for development, testing, and experimentation. We are working to make it production ready, but do not use it for critical data right now.
+
+## Features
+
+- **SQLite compatible:** SQLite query language and file format support ([status](https://github.com/tursodatabase/turso/blob/main/COMPAT.md)).
+- **In-process**: No network overhead, runs directly in your Node.js process
+- **TypeScript support**: Full TypeScript definitions included
+- **Cross-platform**: Supports Linux (x86 and arm64), macOS, Windows (browser is supported in the separate package `@tursodatabase/database-browser` package)
+
+## Installation
+
+```bash
+npm install @tursodatabase/database
+```
+
+## Getting Started
+
+### In-Memory Database
+
+```javascript
+import { connect } from '@tursodatabase/database';
+
+// Create an in-memory database
+const db = await connect(':memory:');
+
+// Create a table
+await db.exec('CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)');
+
+// Insert data
+const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
+await insert.run('Alice', 'alice@example.com');
+await insert.run('Bob', 'bob@example.com');
+
+// Query data
+const users = await db.prepare('SELECT * FROM users').all();
+console.log(users);
+// Output: [
+// { id: 1, name: 'Alice', email: 'alice@example.com' },
+// { id: 2, name: 'Bob', email: 'bob@example.com' }
+// ]
+```
+
+### File-Based Database
+
+```javascript
+import { connect } from '@tursodatabase/database';
+
+// Create or open a database file
+const db = await connect('my-database.db');
+
+// Create a table
+await db.exec(`
+ CREATE TABLE IF NOT EXISTS posts (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ title TEXT NOT NULL,
+ content TEXT,
+ created_at DATETIME DEFAULT CURRENT_TIMESTAMP
+ )
+`);
+
+// Insert a post
+const insertPost = db.prepare('INSERT INTO posts (title, content) VALUES (?, ?)');
+const result = await insertPost.run('Hello World', 'This is my first blog post!');
+
+console.log(`Inserted post with ID: ${result.lastInsertRowid}`);
+```
+
+### Transactions
+
+```javascript
+import { connect } from '@tursodatabase/database';
+
+const db = await connect('transactions.db');
+
+// Using transactions for atomic operations
+const transaction = db.transaction(async (users) => {
+ const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
+ for (const user of users) {
+ await insert.run(user.name, user.email);
+ }
+});
+
+// Execute transaction
+await transaction([
+ { name: 'Alice', email: 'alice@example.com' },
+ { name: 'Bob', email: 'bob@example.com' }
+]);
+```
+
+## API Reference
+
+For complete API documentation, see [JavaScript API Reference](../../../../docs/javascript-api-reference.md).
+
+## Related Packages
+
+* The [@tursodatabase/serverless](https://www.npmjs.com/package/@tursodatabase/serverless) package provides a serverless driver with the same API.
+* The [@tursodatabase/sync](https://www.npmjs.com/package/@tursodatabase/sync) package provides bidirectional sync between a local Turso database and Turso Cloud.
+
+## License
+
+This project is licensed under the [MIT license](../../LICENSE.md).
+
+## Support
+
+- [GitHub Issues](https://github.com/tursodatabase/turso/issues)
+- [Documentation](https://docs.turso.tech)
+- [Discord Community](https://tur.so/discord)
diff --git a/sync/javascript/index.d.ts b/bindings/javascript/sync/packages/native/index.d.ts
similarity index 74%
rename from sync/javascript/index.d.ts
rename to bindings/javascript/sync/packages/native/index.d.ts
index 62d3b18ae..3ff5f0390 100644
--- a/sync/javascript/index.d.ts
+++ b/bindings/javascript/sync/packages/native/index.d.ts
@@ -8,13 +8,15 @@ export declare class Database {
* # Arguments
* * `path` - The path to the database file.
*/
- constructor(path: string)
+ constructor(path: string, opts?: DatabaseOpts | undefined | null)
/** Returns whether the database is in memory-only mode. */
get memory(): boolean
+ /** Returns whether the database is in memory-only mode. */
+ get path(): string
/** Returns whether the database connection is open. */
get open(): boolean
/**
- * Executes a batch of SQL statements.
+ * Executes a batch of SQL statements on main thread
*
* # Arguments
*
@@ -22,7 +24,17 @@ export declare class Database {
*
* # Returns
*/
- batch(sql: string): void
+ batchSync(sql: string): void
+ /**
+ * Executes a batch of SQL statements outside of main thread
+ *
+ * # Arguments
+ *
+ * * `sql` - The SQL statements to execute.
+ *
+ * # Returns
+ */
+ batchAsync(sql: string): Promise
/**
* Prepares a statement for execution.
*
@@ -105,10 +117,15 @@ export declare class Statement {
*/
bindAt(index: number, value: unknown): void
/**
- * Step the statement and return result code:
+ * Step the statement and return result code (executed on the main thread):
* 1 = Row available, 2 = Done, 3 = I/O needed
*/
- step(): number
+ stepSync(): number
+ /**
+ * Step the statement and return result code (executed on the background thread):
+ * 1 = Row available, 2 = Done, 3 = I/O needed
+ */
+ stepAsync(): Promise
/** Get the current row data according to the presentation mode */
row(): unknown
/** Sets the presentation mode to raw. */
@@ -124,31 +141,32 @@ export declare class Statement {
*/
safeIntegers(toggle?: boolean | undefined | null): void
/** Get column information for the statement */
- columns(): unknown[]
+ columns(): Promise
/** Finalizes the statement. */
finalize(): void
}
+
+export interface DatabaseOpts {
+ tracing?: string
+}
export declare class GeneratorHolder {
- resume(error?: string | undefined | null): number
- take(): GeneratorResponse | null
+ resumeSync(error?: string | undefined | null): GeneratorResponse
+ resumeAsync(error?: string | undefined | null): Promise
}
export declare class JsDataCompletion {
poison(err: string): void
status(value: number): void
- push(value: Buffer): void
+ pushBuffer(value: Buffer): void
+ pushTransform(values: Array): void
done(): void
}
-export declare class JsDataPollResult {
-
-}
-
export declare class JsProtocolIo {
- takeRequest(): JsProtocolRequestData | null
+ takeRequest(): JsProtocolRequestBytes | null
}
-export declare class JsProtocolRequestData {
+export declare class JsProtocolRequestBytes {
request(): JsProtocolRequest
completion(): JsDataCompletion
}
@@ -159,13 +177,14 @@ export declare class SyncEngine {
ioLoopSync(): void
/** Runs the I/O loop asynchronously, returning a Promise. */
ioLoopAsync(): Promise
- protocolIo(): JsProtocolRequestData | null
+ protocolIo(): JsProtocolRequestBytes | null
sync(): GeneratorHolder
push(): GeneratorHolder
stats(): GeneratorHolder
pull(): GeneratorHolder
checkpoint(): GeneratorHolder
open(): Database
+ close(): void
}
export declare const enum DatabaseChangeTypeJs {
@@ -193,21 +212,29 @@ export interface DatabaseRowStatementJs {
values: Array
}
+export type DatabaseRowTransformResultJs =
+ | { type: 'Keep' }
+ | { type: 'Skip' }
+ | { type: 'Rewrite', stmt: DatabaseRowStatementJs }
+
export type GeneratorResponse =
- | { type: 'SyncEngineStats', operations: number, wal: number }
+ | { type: 'IO' }
+ | { type: 'Done' }
+ | { type: 'SyncEngineStats', operations: number, mainWal: number, revertWal: number, lastPullUnixTime: number, lastPushUnixTime?: number }
export type JsProtocolRequest =
| { type: 'Http', method: string, path: string, body?: Array, headers: Array<[string, string]> }
| { type: 'FullRead', path: string }
| { type: 'FullWrite', path: string, content: Array }
+ | { type: 'Transform', mutations: Array }
export interface SyncEngineOpts {
path: string
clientName?: string
walPullBatchSize?: number
- enableTracing?: string
+ tracing?: string
tablesIgnore?: Array
- transform?: (arg: DatabaseRowMutationJs) => DatabaseRowStatementJs | null
+ useTransform: boolean
protocolVersion?: SyncEngineProtocolVersion
}
diff --git a/bindings/javascript/sync/packages/native/index.js b/bindings/javascript/sync/packages/native/index.js
new file mode 100644
index 000000000..fd71b4c77
--- /dev/null
+++ b/bindings/javascript/sync/packages/native/index.js
@@ -0,0 +1,520 @@
+// prettier-ignore
+/* eslint-disable */
+// @ts-nocheck
+/* auto-generated by NAPI-RS */
+
+import { createRequire } from 'node:module'
+const require = createRequire(import.meta.url)
+const __dirname = new URL('.', import.meta.url).pathname
+
+const { readFileSync } = require('node:fs')
+let nativeBinding = null
+const loadErrors = []
+
+const isMusl = () => {
+ let musl = false
+ if (process.platform === 'linux') {
+ musl = isMuslFromFilesystem()
+ if (musl === null) {
+ musl = isMuslFromReport()
+ }
+ if (musl === null) {
+ musl = isMuslFromChildProcess()
+ }
+ }
+ return musl
+}
+
+const isFileMusl = (f) => f.includes('libc.musl-') || f.includes('ld-musl-')
+
+const isMuslFromFilesystem = () => {
+ try {
+ return readFileSync('/usr/bin/ldd', 'utf-8').includes('musl')
+ } catch {
+ return null
+ }
+}
+
+const isMuslFromReport = () => {
+ let report = null
+ if (typeof process.report?.getReport === 'function') {
+ process.report.excludeNetwork = true
+ report = process.report.getReport()
+ }
+ if (!report) {
+ return null
+ }
+ if (report.header && report.header.glibcVersionRuntime) {
+ return false
+ }
+ if (Array.isArray(report.sharedObjects)) {
+ if (report.sharedObjects.some(isFileMusl)) {
+ return true
+ }
+ }
+ return false
+}
+
+const isMuslFromChildProcess = () => {
+ try {
+ return require('child_process').execSync('ldd --version', { encoding: 'utf8' }).includes('musl')
+ } catch (e) {
+ // If we reach this case, we don't know if the system is musl or not, so is better to just fallback to false
+ return false
+ }
+}
+
+function requireNative() {
+ if (process.env.NAPI_RS_NATIVE_LIBRARY_PATH) {
+ try {
+ nativeBinding = require(process.env.NAPI_RS_NATIVE_LIBRARY_PATH);
+ } catch (err) {
+ loadErrors.push(err)
+ }
+ } else if (process.platform === 'android') {
+ if (process.arch === 'arm64') {
+ try {
+ return require('./sync.android-arm64.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-android-arm64')
+ const bindingPackageVersion = require('@tursodatabase/sync-android-arm64/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else if (process.arch === 'arm') {
+ try {
+ return require('./sync.android-arm-eabi.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-android-arm-eabi')
+ const bindingPackageVersion = require('@tursodatabase/sync-android-arm-eabi/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else {
+ loadErrors.push(new Error(`Unsupported architecture on Android ${process.arch}`))
+ }
+ } else if (process.platform === 'win32') {
+ if (process.arch === 'x64') {
+ try {
+ return require('./sync.win32-x64-msvc.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-win32-x64-msvc')
+ const bindingPackageVersion = require('@tursodatabase/sync-win32-x64-msvc/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else if (process.arch === 'ia32') {
+ try {
+ return require('./sync.win32-ia32-msvc.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-win32-ia32-msvc')
+ const bindingPackageVersion = require('@tursodatabase/sync-win32-ia32-msvc/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else if (process.arch === 'arm64') {
+ try {
+ return require('./sync.win32-arm64-msvc.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-win32-arm64-msvc')
+ const bindingPackageVersion = require('@tursodatabase/sync-win32-arm64-msvc/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else {
+ loadErrors.push(new Error(`Unsupported architecture on Windows: ${process.arch}`))
+ }
+ } else if (process.platform === 'darwin') {
+ try {
+ return require('./sync.darwin-universal.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-darwin-universal')
+ const bindingPackageVersion = require('@tursodatabase/sync-darwin-universal/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ if (process.arch === 'x64') {
+ try {
+ return require('./sync.darwin-x64.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-darwin-x64')
+ const bindingPackageVersion = require('@tursodatabase/sync-darwin-x64/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else if (process.arch === 'arm64') {
+ try {
+ return require('./sync.darwin-arm64.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-darwin-arm64')
+ const bindingPackageVersion = require('@tursodatabase/sync-darwin-arm64/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else {
+ loadErrors.push(new Error(`Unsupported architecture on macOS: ${process.arch}`))
+ }
+ } else if (process.platform === 'freebsd') {
+ if (process.arch === 'x64') {
+ try {
+ return require('./sync.freebsd-x64.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-freebsd-x64')
+ const bindingPackageVersion = require('@tursodatabase/sync-freebsd-x64/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else if (process.arch === 'arm64') {
+ try {
+ return require('./sync.freebsd-arm64.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-freebsd-arm64')
+ const bindingPackageVersion = require('@tursodatabase/sync-freebsd-arm64/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else {
+ loadErrors.push(new Error(`Unsupported architecture on FreeBSD: ${process.arch}`))
+ }
+ } else if (process.platform === 'linux') {
+ if (process.arch === 'x64') {
+ if (isMusl()) {
+ try {
+ return require('./sync.linux-x64-musl.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-linux-x64-musl')
+ const bindingPackageVersion = require('@tursodatabase/sync-linux-x64-musl/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else {
+ try {
+ return require('./sync.linux-x64-gnu.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-linux-x64-gnu')
+ const bindingPackageVersion = require('@tursodatabase/sync-linux-x64-gnu/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ }
+ } else if (process.arch === 'arm64') {
+ if (isMusl()) {
+ try {
+ return require('./sync.linux-arm64-musl.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-linux-arm64-musl')
+ const bindingPackageVersion = require('@tursodatabase/sync-linux-arm64-musl/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else {
+ try {
+ return require('./sync.linux-arm64-gnu.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-linux-arm64-gnu')
+ const bindingPackageVersion = require('@tursodatabase/sync-linux-arm64-gnu/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ }
+ } else if (process.arch === 'arm') {
+ if (isMusl()) {
+ try {
+ return require('./sync.linux-arm-musleabihf.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-linux-arm-musleabihf')
+ const bindingPackageVersion = require('@tursodatabase/sync-linux-arm-musleabihf/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else {
+ try {
+ return require('./sync.linux-arm-gnueabihf.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-linux-arm-gnueabihf')
+ const bindingPackageVersion = require('@tursodatabase/sync-linux-arm-gnueabihf/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ }
+ } else if (process.arch === 'riscv64') {
+ if (isMusl()) {
+ try {
+ return require('./sync.linux-riscv64-musl.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-linux-riscv64-musl')
+ const bindingPackageVersion = require('@tursodatabase/sync-linux-riscv64-musl/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else {
+ try {
+ return require('./sync.linux-riscv64-gnu.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-linux-riscv64-gnu')
+ const bindingPackageVersion = require('@tursodatabase/sync-linux-riscv64-gnu/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ }
+ } else if (process.arch === 'ppc64') {
+ try {
+ return require('./sync.linux-ppc64-gnu.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-linux-ppc64-gnu')
+ const bindingPackageVersion = require('@tursodatabase/sync-linux-ppc64-gnu/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else if (process.arch === 's390x') {
+ try {
+ return require('./sync.linux-s390x-gnu.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-linux-s390x-gnu')
+ const bindingPackageVersion = require('@tursodatabase/sync-linux-s390x-gnu/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else {
+ loadErrors.push(new Error(`Unsupported architecture on Linux: ${process.arch}`))
+ }
+ } else if (process.platform === 'openharmony') {
+ if (process.arch === 'arm64') {
+ try {
+ return require('./sync.openharmony-arm64.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-openharmony-arm64')
+ const bindingPackageVersion = require('@tursodatabase/sync-openharmony-arm64/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else if (process.arch === 'x64') {
+ try {
+ return require('./sync.openharmony-x64.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-openharmony-x64')
+ const bindingPackageVersion = require('@tursodatabase/sync-openharmony-x64/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else if (process.arch === 'arm') {
+ try {
+ return require('./sync.openharmony-arm.node')
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ try {
+ const binding = require('@tursodatabase/sync-openharmony-arm')
+ const bindingPackageVersion = require('@tursodatabase/sync-openharmony-arm/package.json').version
+ if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
+ throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
+ }
+ return binding
+ } catch (e) {
+ loadErrors.push(e)
+ }
+ } else {
+ loadErrors.push(new Error(`Unsupported architecture on OpenHarmony: ${process.arch}`))
+ }
+ } else {
+ loadErrors.push(new Error(`Unsupported OS: ${process.platform}, architecture: ${process.arch}`))
+ }
+}
+
+nativeBinding = requireNative()
+
+if (!nativeBinding || process.env.NAPI_RS_FORCE_WASI) {
+ try {
+ nativeBinding = require('./sync.wasi.cjs')
+ } catch (err) {
+ if (process.env.NAPI_RS_FORCE_WASI) {
+ loadErrors.push(err)
+ }
+ }
+ if (!nativeBinding) {
+ try {
+ nativeBinding = require('@tursodatabase/sync-wasm32-wasi')
+ } catch (err) {
+ if (process.env.NAPI_RS_FORCE_WASI) {
+ loadErrors.push(err)
+ }
+ }
+ }
+}
+
+if (!nativeBinding) {
+ if (loadErrors.length > 0) {
+ throw new Error(
+ `Cannot find native binding. ` +
+ `npm has a bug related to optional dependencies (https://github.com/npm/cli/issues/4828). ` +
+ 'Please try `npm i` again after removing both package-lock.json and node_modules directory.',
+ { cause: loadErrors }
+ )
+ }
+ throw new Error(`Failed to load native binding`)
+}
+
+const { Database, Statement, GeneratorHolder, JsDataCompletion, JsProtocolIo, JsProtocolRequestBytes, SyncEngine, DatabaseChangeTypeJs, SyncEngineProtocolVersion } = nativeBinding
+export { Database }
+export { Statement }
+export { GeneratorHolder }
+export { JsDataCompletion }
+export { JsProtocolIo }
+export { JsProtocolRequestBytes }
+export { SyncEngine }
+export { DatabaseChangeTypeJs }
+export { SyncEngineProtocolVersion }
diff --git a/bindings/javascript/sync/packages/native/package.json b/bindings/javascript/sync/packages/native/package.json
new file mode 100644
index 000000000..8a7e387dc
--- /dev/null
+++ b/bindings/javascript/sync/packages/native/package.json
@@ -0,0 +1,53 @@
+{
+ "name": "@tursodatabase/sync",
+ "version": "0.1.5",
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/tursodatabase/turso"
+ },
+ "license": "MIT",
+ "module": "./dist/promise.js",
+ "main": "./dist/promise.js",
+ "type": "module",
+ "exports": {
+ ".": "./dist/promise.js",
+ "./compat": "./dist/compat.js"
+ },
+ "files": [
+ "index.js",
+ "dist/**",
+ "README.md"
+ ],
+ "packageManager": "yarn@4.9.2",
+ "devDependencies": {
+ "@napi-rs/cli": "^3.1.5",
+ "@types/node": "^24.3.1",
+ "typescript": "^5.9.2",
+ "vitest": "^3.2.4"
+ },
+ "scripts": {
+ "napi-build": "napi build --platform --release --esm --manifest-path ../../Cargo.toml --output-dir .",
+ "napi-dirs": "napi create-npm-dirs",
+ "napi-artifacts": "napi artifacts --output-dir .",
+ "tsc-build": "npm exec tsc",
+ "build": "npm run napi-build && npm run tsc-build",
+ "test": "vitest --run",
+ "prepublishOnly": "npm run napi-dirs && npm run napi-artifacts && napi prepublish -t npm"
+ },
+ "napi": {
+ "binaryName": "sync",
+ "targets": [
+ "x86_64-unknown-linux-gnu",
+ "x86_64-pc-windows-msvc",
+ "universal-apple-darwin",
+ "aarch64-unknown-linux-gnu"
+ ]
+ },
+ "dependencies": {
+ "@tursodatabase/database-common": "^0.1.5",
+ "@tursodatabase/sync-common": "^0.1.5"
+ },
+ "imports": {
+ "#index": "./index.js"
+ }
+}
\ No newline at end of file
diff --git a/bindings/javascript/sync/packages/native/promise.test.ts b/bindings/javascript/sync/packages/native/promise.test.ts
new file mode 100644
index 000000000..ec8381190
--- /dev/null
+++ b/bindings/javascript/sync/packages/native/promise.test.ts
@@ -0,0 +1,288 @@
+import { unlinkSync } from "node:fs";
+import { expect, test } from 'vitest'
+import { connect, DatabaseRowMutation, DatabaseRowTransformResult } from './promise.js'
+
+const localeCompare = (a, b) => a.x.localeCompare(b.x);
+
+test('select-after-push', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS t(x)");
+ await db.exec("DELETE FROM t");
+ await db.push();
+ await db.close();
+ }
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("INSERT INTO t VALUES (1), (2), (3)");
+ await db.push();
+ }
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ const rows = await db.prepare('SELECT * FROM t').all();
+ expect(rows).toEqual([{ x: 1 }, { x: 2 }, { x: 3 }])
+ }
+})
+
+test('select-without-push', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS t(x)");
+ await db.exec("DELETE FROM t");
+ await db.push();
+ await db.close();
+ }
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("INSERT INTO t VALUES (1), (2), (3)");
+ }
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ const rows = await db.prepare('SELECT * FROM t').all();
+ expect(rows).toEqual([])
+ }
+})
+
+test('merge-non-overlapping-keys', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
+ await db.exec("DELETE FROM q");
+ await db.push();
+ await db.close();
+ }
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db1.exec("INSERT INTO q VALUES ('k1', 'value1'), ('k2', 'value2')");
+
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db2.exec("INSERT INTO q VALUES ('k3', 'value3'), ('k4', 'value4'), ('k5', 'value5')");
+
+ await Promise.all([db1.push(), db2.push()]);
+ await Promise.all([db1.pull(), db2.pull()]);
+
+ const rows1 = await db1.prepare('SELECT * FROM q').all();
+ const rows2 = await db1.prepare('SELECT * FROM q').all();
+ const expected = [{ x: 'k1', y: 'value1' }, { x: 'k2', y: 'value2' }, { x: 'k3', y: 'value3' }, { x: 'k4', y: 'value4' }, { x: 'k5', y: 'value5' }];
+ expect(rows1.sort(localeCompare)).toEqual(expected.sort(localeCompare))
+ expect(rows2.sort(localeCompare)).toEqual(expected.sort(localeCompare))
+})
+
+test('last-push-wins', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
+ await db.exec("DELETE FROM q");
+ await db.push();
+ await db.close();
+ }
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db1.exec("INSERT INTO q VALUES ('k1', 'value1'), ('k2', 'value2'), ('k4', 'value4')");
+
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db2.exec("INSERT INTO q VALUES ('k1', 'value3'), ('k2', 'value4'), ('k3', 'value5')");
+
+ await db2.push();
+ await db1.push();
+ await Promise.all([db1.pull(), db2.pull()]);
+
+ const rows1 = await db1.prepare('SELECT * FROM q').all();
+ const rows2 = await db1.prepare('SELECT * FROM q').all();
+ const expected = [{ x: 'k1', y: 'value1' }, { x: 'k2', y: 'value2' }, { x: 'k3', y: 'value5' }, { x: 'k4', y: 'value4' }];
+ expect(rows1.sort(localeCompare)).toEqual(expected.sort(localeCompare))
+ expect(rows2.sort(localeCompare)).toEqual(expected.sort(localeCompare))
+})
+
+test('last-push-wins-with-delete', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
+ await db.exec("DELETE FROM q");
+ await db.push();
+ await db.close();
+ }
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db1.exec("INSERT INTO q VALUES ('k1', 'value1'), ('k2', 'value2'), ('k4', 'value4')");
+ await db1.exec("DELETE FROM q")
+
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db2.exec("INSERT INTO q VALUES ('k1', 'value3'), ('k2', 'value4'), ('k3', 'value5')");
+
+ await db2.push();
+ await db1.push();
+ await Promise.all([db1.pull(), db2.pull()]);
+
+ const rows1 = await db1.prepare('SELECT * FROM q').all();
+ const rows2 = await db1.prepare('SELECT * FROM q').all();
+ const expected = [{ x: 'k3', y: 'value5' }];
+ expect(rows1).toEqual(expected)
+ expect(rows2).toEqual(expected)
+})
+
+test('constraint-conflict', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS u(x TEXT PRIMARY KEY, y UNIQUE)");
+ await db.exec("DELETE FROM u");
+ await db.push();
+ await db.close();
+ }
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db1.exec("INSERT INTO u VALUES ('k1', 'value1')");
+
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db2.exec("INSERT INTO u VALUES ('k2', 'value1')");
+
+ await db1.push();
+ await expect(async () => await db2.push()).rejects.toThrow('SQLite error: UNIQUE constraint failed: u.y');
+})
+
+test('checkpoint', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
+ await db.exec("DELETE FROM q");
+ await db.push();
+ await db.close();
+ }
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ for (let i = 0; i < 1000; i++) {
+ await db1.exec(`INSERT INTO q VALUES ('k${i}', 'v${i}')`);
+ }
+ expect((await db1.stats()).mainWal).toBeGreaterThan(4096 * 1000);
+ await db1.checkpoint();
+ expect((await db1.stats()).mainWal).toBe(0);
+ let revertWal = (await db1.stats()).revertWal;
+ expect(revertWal).toBeLessThan(4096 * 1000 / 100);
+
+ for (let i = 0; i < 1000; i++) {
+ await db1.exec(`UPDATE q SET y = 'u${i}' WHERE x = 'k${i}'`);
+ }
+ await db1.checkpoint();
+ expect((await db1.stats()).revertWal).toBe(revertWal);
+})
+
+test('persistence', async () => {
+ {
+ const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
+ await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
+ await db.exec("DELETE FROM q");
+ await db.push();
+ await db.close();
+ }
+ const path = `test-${(Math.random() * 10000) | 0}.db`;
+ try {
+ {
+ const db1 = await connect({ path: path, url: process.env.VITE_TURSO_DB_URL });
+ await db1.exec(`INSERT INTO q VALUES ('k1', 'v1')`);
+ await db1.exec(`INSERT INTO q VALUES ('k2', 'v2')`);
+ await db1.close();
+ }
+
+ {
+ const db2 = await connect({ path: path, url: process.env.VITE_TURSO_DB_URL });
+ await db2.exec(`INSERT INTO q VALUES ('k3', 'v3')`);
+ await db2.exec(`INSERT INTO q VALUES ('k4', 'v4')`);
+ const rows = await db2.prepare('SELECT * FROM q').all();
+ const expected = [{ x: 'k1', y: 'v1' }, { x: 'k2', y: 'v2' }, { x: 'k3', y: 'v3' }, { x: 'k4', y: 'v4' }];
+ expect(rows).toEqual(expected)
+ await db2.close();
+ }
+
+ {
+ const db3 = await connect({ path: path, url: process.env.VITE_TURSO_DB_URL });
+ await db3.push();
+ await db3.close();
+ }
+
+ {
+ const db4 = await connect({ path: path, url: process.env.VITE_TURSO_DB_URL });
+ const rows = await db4.prepare('SELECT * FROM q').all();
+ const expected = [{ x: 'k1', y: 'v1' }, { x: 'k2', y: 'v2' }, { x: 'k3', y: 'v3' }, { x: 'k4', y: 'v4' }];
+ expect(rows).toEqual(expected)
+ await db4.close();
+ }
+ } finally {
+ unlinkSync(path);
+ unlinkSync(`${path}-wal`);
+ unlinkSync(`${path}-info`);
+ unlinkSync(`${path}-changes`);
+ try { unlinkSync(`${path}-revert`) } catch (e) { }
+ }
+})
+
+test('transform', async () => {
+ {
+ const db = await connect({
+ path: ':memory:',
+ url: process.env.VITE_TURSO_DB_URL,
+ });
+ await db.exec("CREATE TABLE IF NOT EXISTS counter(key TEXT PRIMARY KEY, value INTEGER)");
+ await db.exec("DELETE FROM counter");
+ await db.exec("INSERT INTO counter VALUES ('1', 0)")
+ await db.push();
+ await db.close();
+ }
+ const transform = (m: DatabaseRowMutation) => ({
+ operation: 'rewrite',
+ stmt: {
+ sql: `UPDATE counter SET value = value + ? WHERE key = ?`,
+ values: [m.after.value - m.before.value, m.after.key]
+ }
+ } as DatabaseRowTransformResult);
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, transform: transform });
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, transform: transform });
+
+ await db1.exec("UPDATE counter SET value = value + 1 WHERE key = '1'");
+ await db2.exec("UPDATE counter SET value = value + 1 WHERE key = '1'");
+
+ await Promise.all([db1.push(), db2.push()]);
+ await Promise.all([db1.pull(), db2.pull()]);
+
+ const rows1 = await db1.prepare('SELECT * FROM counter').all();
+ const rows2 = await db2.prepare('SELECT * FROM counter').all();
+ expect(rows1).toEqual([{ key: '1', value: 2 }]);
+ expect(rows2).toEqual([{ key: '1', value: 2 }]);
+})
+
+test('transform-many', async () => {
+ {
+ const db = await connect({
+ path: ':memory:',
+ url: process.env.VITE_TURSO_DB_URL,
+ });
+ await db.exec("CREATE TABLE IF NOT EXISTS counter(key TEXT PRIMARY KEY, value INTEGER)");
+ await db.exec("DELETE FROM counter");
+ await db.exec("INSERT INTO counter VALUES ('1', 0)")
+ await db.push();
+ await db.close();
+ }
+ const transform = (m: DatabaseRowMutation) => ({
+ operation: 'rewrite',
+ stmt: {
+ sql: `UPDATE counter SET value = value + ? WHERE key = ?`,
+ values: [m.after.value - m.before.value, m.after.key]
+ }
+ } as DatabaseRowTransformResult);
+ const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, transform: transform });
+ const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, transform: transform });
+
+ for (let i = 0; i < 1002; i++) {
+ await db1.exec("UPDATE counter SET value = value + 1 WHERE key = '1'");
+ }
+ for (let i = 0; i < 1001; i++) {
+ await db2.exec("UPDATE counter SET value = value + 1 WHERE key = '1'");
+ }
+
+ let start = performance.now();
+ await Promise.all([db1.push(), db2.push()]);
+ console.info('push', performance.now() - start);
+
+ start = performance.now();
+ await Promise.all([db1.pull(), db2.pull()]);
+ console.info('pull', performance.now() - start);
+
+ const rows1 = await db1.prepare('SELECT * FROM counter').all();
+ const rows2 = await db2.prepare('SELECT * FROM counter').all();
+ expect(rows1).toEqual([{ key: '1', value: 1001 + 1002 }]);
+ expect(rows2).toEqual([{ key: '1', value: 1001 + 1002 }]);
+})
\ No newline at end of file
diff --git a/bindings/javascript/sync/packages/native/promise.ts b/bindings/javascript/sync/packages/native/promise.ts
new file mode 100644
index 000000000..86f020109
--- /dev/null
+++ b/bindings/javascript/sync/packages/native/promise.ts
@@ -0,0 +1,104 @@
+import { DatabasePromise, DatabaseOpts, NativeDatabase } from "@tursodatabase/database-common"
+import { ProtocolIo, run, SyncOpts, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult } from "@tursodatabase/sync-common";
+import { Database as NativeDB, SyncEngine } from "#index";
+import { promises } from "node:fs";
+
+let NodeIO: ProtocolIo = {
+ async read(path: string): Promise {
+ try {
+ return await promises.readFile(path);
+ } catch (error) {
+ if (error.code === 'ENOENT') {
+ return null;
+ }
+ throw error;
+ }
+ },
+ async write(path: string, data: Buffer | Uint8Array): Promise {
+ const unix = Math.floor(Date.now() / 1000);
+ const nonce = Math.floor(Math.random() * 1000000000);
+ const tmp = `${path}.tmp.${unix}.${nonce}`;
+ await promises.writeFile(tmp, new Uint8Array(data));
+ try {
+ await promises.rename(tmp, path);
+ } catch (err) {
+ await promises.unlink(tmp);
+ throw err;
+ }
+ }
+};
+
+function memoryIO(): ProtocolIo {
+ let values = new Map();
+ return {
+ async read(path: string): Promise {
+ return values.get(path);
+ },
+ async write(path: string, data: Buffer | Uint8Array): Promise {
+ values.set(path, data);
+ }
+ }
+};
+class Database extends DatabasePromise {
+ runOpts: RunOpts;
+ engine: any;
+ io: ProtocolIo;
+ constructor(db: NativeDatabase, io: ProtocolIo, runOpts: RunOpts, engine: any, opts: DatabaseOpts = {}) {
+ super(db, opts)
+ this.runOpts = runOpts;
+ this.engine = engine;
+ this.io = io;
+ }
+ async sync() {
+ await run(this.runOpts, this.io, this.engine, this.engine.sync());
+ }
+ async pull() {
+ await run(this.runOpts, this.io, this.engine, this.engine.pull());
+ }
+ async push() {
+ await run(this.runOpts, this.io, this.engine, this.engine.push());
+ }
+ async checkpoint() {
+ await run(this.runOpts, this.io, this.engine, this.engine.checkpoint());
+ }
+ async stats(): Promise<{ operations: number, mainWal: number, revertWal: number, lastPullUnixTime: number, lastPushUnixTime: number | null }> {
+ return (await run(this.runOpts, this.io, this.engine, this.engine.stats()));
+ }
+ override async close(): Promise {
+ this.engine.close();
+ }
+}
+
+/**
+ * Creates a new database connection asynchronously.
+ *
+ * @param {string} path - Path to the database file.
+ * @param {Object} opts - Options for database behavior.
+ * @returns {Promise} - A promise that resolves to a Database instance.
+ */
+async function connect(opts: SyncOpts): Promise {
+ const engine = new SyncEngine({
+ path: opts.path,
+ clientName: opts.clientName,
+ tablesIgnore: opts.tablesIgnore,
+ useTransform: opts.transform != null,
+ tracing: opts.tracing,
+ protocolVersion: 1
+ });
+ const runOpts: RunOpts = {
+ url: opts.url,
+ headers: {
+ ...(opts.authToken != null && { "Authorization": `Bearer ${opts.authToken}` }),
+ ...(opts.encryptionKey != null && { "x-turso-encryption-key": opts.encryptionKey })
+ },
+ preemptionMs: 1,
+ transform: opts.transform,
+ };
+ let io = opts.path == ':memory:' ? memoryIO() : NodeIO;
+ await run(runOpts, io, engine, engine.init());
+
+ const nativeDb = engine.open();
+ return new Database(nativeDb as any, io, runOpts, engine, {});
+}
+
+export { connect, Database, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult }
diff --git a/bindings/javascript/sync/packages/native/tsconfig.json b/bindings/javascript/sync/packages/native/tsconfig.json
new file mode 100644
index 000000000..e40dd870e
--- /dev/null
+++ b/bindings/javascript/sync/packages/native/tsconfig.json
@@ -0,0 +1,21 @@
+{
+ "compilerOptions": {
+ "skipLibCheck": true,
+ "declaration": true,
+ "declarationMap": true,
+ "module": "nodenext",
+ "target": "esnext",
+ "outDir": "dist/",
+ "lib": [
+ "es2020"
+ ],
+ "paths": {
+ "#index": [
+ "./index.d.ts"
+ ]
+ }
+ },
+ "include": [
+ "*"
+ ]
+}
\ No newline at end of file
diff --git a/bindings/javascript/sync/src/generator.rs b/bindings/javascript/sync/src/generator.rs
new file mode 100644
index 000000000..89045e131
--- /dev/null
+++ b/bindings/javascript/sync/src/generator.rs
@@ -0,0 +1,102 @@
+use napi::{bindgen_prelude::AsyncTask, Env, Task};
+use napi_derive::napi;
+use std::{
+ future::Future,
+ sync::{Arc, Mutex},
+};
+
+use turso_sync_engine::types::ProtocolCommand;
+
+pub const GENERATOR_RESUME_IO: u32 = 0;
+pub const GENERATOR_RESUME_DONE: u32 = 1;
+
+pub trait Generator {
+ fn resume(&mut self, result: Option) -> napi::Result;
+}
+
+impl>> Generator
+ for genawaiter::sync::Gen, F>
+{
+ fn resume(&mut self, error: Option) -> napi::Result {
+ let result = match error {
+ Some(err) => Err(turso_sync_engine::errors::Error::DatabaseSyncEngineError(
+ format!("JsProtocolIo error: {err}"),
+ )),
+ None => Ok(()),
+ };
+ match self.resume_with(result) {
+ genawaiter::GeneratorState::Yielded(ProtocolCommand::IO) => Ok(GeneratorResponse::IO),
+ genawaiter::GeneratorState::Complete(Ok(())) => Ok(GeneratorResponse::Done),
+ genawaiter::GeneratorState::Complete(Err(err)) => Err(napi::Error::new(
+ napi::Status::GenericFailure,
+ format!("sync engine operation failed: {err}"),
+ )),
+ }
+ }
+}
+
+#[napi(discriminant = "type")]
+pub enum GeneratorResponse {
+ IO,
+ Done,
+ SyncEngineStats {
+ operations: i64,
+ main_wal: i64,
+ revert_wal: i64,
+ last_pull_unix_time: i64,
+ last_push_unix_time: Option,
+ },
+}
+
+#[napi]
+#[derive(Clone)]
+pub struct GeneratorHolder {
+ pub(crate) generator: Arc>,
+ pub(crate) response: Arc>>,
+}
+
+pub struct ResumeTask {
+ holder: GeneratorHolder,
+ error: Option,
+}
+
+unsafe impl Send for ResumeTask {}
+
+impl Task for ResumeTask {
+ type Output = GeneratorResponse;
+ type JsValue = GeneratorResponse;
+
+ fn compute(&mut self) -> napi::Result {
+ resume_sync(&self.holder, self.error.take())
+ }
+
+ fn resolve(&mut self, _: Env, output: Self::Output) -> napi::Result {
+ Ok(output)
+ }
+}
+
+fn resume_sync(holder: &GeneratorHolder, error: Option) -> napi::Result {
+ let result = holder.generator.lock().unwrap().resume(error)?;
+ if let GeneratorResponse::Done = result {
+ let response = holder.response.lock().unwrap().take();
+ Ok(response.unwrap_or(GeneratorResponse::Done))
+ } else {
+ Ok(result)
+ }
+}
+
+#[napi]
+impl GeneratorHolder {
+ #[napi]
+ pub fn resume_sync(&self, error: Option) -> napi::Result {
+ resume_sync(&self, error)
+ }
+
+ #[napi]
+ pub fn resume_async(&self, error: Option) -> napi::Result> {
+ Ok(AsyncTask::new(ResumeTask {
+ holder: self.clone(),
+ error,
+ }))
+ }
+}
diff --git a/sync/javascript/src/js_protocol_io.rs b/bindings/javascript/sync/src/js_protocol_io.rs
similarity index 53%
rename from sync/javascript/src/js_protocol_io.rs
rename to bindings/javascript/sync/src/js_protocol_io.rs
index 429c85f7f..9208e4636 100644
--- a/sync/javascript/src/js_protocol_io.rs
+++ b/bindings/javascript/sync/src/js_protocol_io.rs
@@ -7,7 +7,15 @@ use std::{
use napi::bindgen_prelude::*;
use napi_derive::napi;
-use turso_sync_engine::protocol_io::{DataCompletion, DataPollResult, ProtocolIO};
+use turso_sync_engine::{
+ protocol_io::{DataCompletion, DataPollResult, ProtocolIO},
+ types::{DatabaseRowTransformResult, DatabaseStatementReplay},
+};
+
+use crate::{
+ core_change_type_to_js, core_values_map_to_js, js_value_to_core, DatabaseRowMutationJs,
+ DatabaseRowTransformResultJs,
+};
#[napi]
pub enum JsProtocolRequest {
@@ -24,15 +32,34 @@ pub enum JsProtocolRequest {
path: String,
content: Vec,
},
+ Transform {
+ mutations: Vec,
+ },
}
#[derive(Clone)]
#[napi]
pub struct JsDataCompletion(Arc>);
+pub struct JsBytesPollResult(Buffer);
+
+impl DataPollResult for JsBytesPollResult {
+ fn data(&self) -> &[u8] {
+ &self.0
+ }
+}
+pub struct JsTransformPollResult(Vec);
+
+impl DataPollResult for JsTransformPollResult {
+ fn data(&self) -> &[DatabaseRowTransformResult] {
+ &self.0
+ }
+}
+
struct JsDataCompletionInner {
status: Option,
chunks: VecDeque,
+ transformed: VecDeque,
finished: bool,
err: Option,
}
@@ -49,8 +76,8 @@ impl JsDataCompletion {
}
}
-impl DataCompletion for JsDataCompletion {
- type DataPollResult = JsDataPollResult;
+impl DataCompletion for JsDataCompletion {
+ type DataPollResult = JsBytesPollResult;
fn status(&self) -> turso_sync_engine::Result