Merge branch 'main' into static

This commit is contained in:
Pekka Enberg
2025-01-27 09:49:34 +02:00
committed by GitHub
90 changed files with 6101 additions and 3398 deletions

42
.github/workflows/push_only.yml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: Benchmarks+Nyrkiö
# Pull request support isn't integrated to the github-action-benchmark so run only post-merge
on:
push:
branches: [ "main", "master", "notmain", "add-nyrkio" ]
env:
CARGO_TERM_COLOR: never
jobs:
bench:
runs-on: ubuntu-latest
environment: test
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v4
with:
node-version: 20
# cache: 'npm'
# - name: Install dependencies
# run: npm install && npm run build
- name: Bench
run: cargo bench 2>&1 | tee output.txt
- name: Analyze benchmark result with Nyrkiö
uses: nyrkio/github-action-benchmark@HEAD
with:
name: turso
tool: criterion
output-file-path: output.txt
fail-on-alert: true
# Nyrkiö configuration
nyrkio-enable: true
# Get yours from https://nyrkio.com/docs/getting-started
nyrkio-token: ${{ secrets.NYRKIO_JWT_TOKEN }}
# Old way...
# Explicitly set this to null. We don't want threshold based alerts today.
external-data-json-path: null
gh-repository: null

View File

@@ -4,16 +4,24 @@ This document describes the compatibility of Limbo with SQLite.
## Table of contents:
- [Features](#features)
- [SQLite query language](#sqlite-query-language)
- [Compatibility with SQLite](#compatibility-with-sqlite)
- [Table of contents:](#table-of-contents)
- [Features](#features)
- [SQLite query language](#sqlite-query-language)
- [Statements](#statements)
- [PRAGMA Statements](#pragma)
- [PRAGMA](#pragma)
- [Expressions](#expressions)
- [Functions](#functions)
- [SQLite C API](#sqlite-c-api)
- [SQLite VDBE opcodes](#sqlite-vdbe-opcodes)
- [Extensions](#extensions)
- [SQL functions](#sql-functions)
- [Scalar functions](#scalar-functions)
- [Mathematical functions](#mathematical-functions)
- [Aggregate functions](#aggregate-functions)
- [Date and time functions](#date-and-time-functions)
- [JSON functions](#json-functions)
- [SQLite C API](#sqlite-c-api)
- [SQLite VDBE opcodes](#sqlite-vdbe-opcodes)
- [Extensions](#extensions)
- [UUID](#uuid)
- [regexp](#regexp)
## Features
@@ -308,7 +316,7 @@ Feature support of [sqlite expr syntax](https://www.sqlite.org/lang_expr.html).
| datetime() | Yes | partially supports modifiers |
| julianday() | Partial | does not support modifiers |
| unixepoch() | Partial | does not support modifiers |
| strftime() | No | |
| strftime() | Yes | partially supports modifiers |
| timediff() | No | |
Modifiers:
@@ -367,7 +375,7 @@ Modifiers:
| jsonb_set(json,path,value,...) | | |
| json_type(json) | Yes | |
| json_type(json,path) | Yes | |
| json_valid(json) | | |
| json_valid(json) | Yes | |
| json_valid(json,flags) | | |
| json_quote(value) | | |
| json_group_array(value) | | |
@@ -400,7 +408,7 @@ Modifiers:
| AggFinal | Yes |
| AggStep | Yes |
| AggStep | Yes |
| And | No |
| And | Yes |
| AutoCommit | No |
| BitAnd | Yes |
| BitNot | Yes |
@@ -493,7 +501,7 @@ Modifiers:
| OpenWrite | No |
| OpenWriteAsync | Yes |
| OpenWriteAwait | Yes |
| Or | No |
| Or | Yes |
| Pagecount | No |
| Param | No |
| ParseSchema | No |

98
Cargo.lock generated
View File

@@ -782,6 +782,18 @@ dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "filetime"
version = "0.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586"
dependencies = [
"cfg-if",
"libc",
"libredox",
"windows-sys 0.59.0",
]
[[package]]
name = "findshlibs"
version = "0.10.2"
@@ -800,6 +812,15 @@ version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
[[package]]
name = "fsevent-sys"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2"
dependencies = [
"libc",
]
[[package]]
name = "futures"
version = "0.3.31"
@@ -1056,6 +1077,26 @@ dependencies = [
"str_stack",
]
[[package]]
name = "inotify"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3"
dependencies = [
"bitflags 2.8.0",
"inotify-sys",
"libc",
]
[[package]]
name = "inotify-sys"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb"
dependencies = [
"libc",
]
[[package]]
name = "io-uring"
version = "0.6.4"
@@ -1170,6 +1211,26 @@ dependencies = [
"chrono",
]
[[package]]
name = "kqueue"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7447f1ca1b7b563588a205fe93dea8df60fd981423a768bc1c0ded35ed147d0c"
dependencies = [
"kqueue-sys",
"libc",
]
[[package]]
name = "kqueue-sys"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b"
dependencies = [
"bitflags 1.3.2",
"libc",
]
[[package]]
name = "lazy_static"
version = "1.5.0"
@@ -1210,6 +1271,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
dependencies = [
"bitflags 2.8.0",
"libc",
"redox_syscall",
]
[[package]]
@@ -1239,6 +1301,13 @@ dependencies = [
"rustyline",
]
[[package]]
name = "limbo-go"
version = "0.0.13"
dependencies = [
"limbo_core",
]
[[package]]
name = "limbo-wasm"
version = "0.0.13"
@@ -1345,8 +1414,11 @@ dependencies = [
"env_logger 0.10.2",
"limbo_core",
"log",
"notify",
"rand",
"rand_chacha",
"serde",
"serde_json",
"tempfile",
]
@@ -1477,6 +1549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd"
dependencies = [
"libc",
"log",
"wasi",
"windows-sys 0.52.0",
]
@@ -1560,6 +1633,31 @@ dependencies = [
"minimal-lexical",
]
[[package]]
name = "notify"
version = "8.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fee8403b3d66ac7b26aee6e40a897d85dc5ce26f44da36b8b73e987cc52e943"
dependencies = [
"bitflags 2.8.0",
"filetime",
"fsevent-sys",
"inotify",
"kqueue",
"libc",
"log",
"mio",
"notify-types",
"walkdir",
"windows-sys 0.59.0",
]
[[package]]
name = "notify-types"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e0826a989adedc2a244799e823aece04662b66609d96af8dff7ac6df9a8925d"
[[package]]
name = "num-format"
version = "0.4.4"

View File

@@ -7,6 +7,7 @@ members = [
"bindings/python",
"bindings/rust",
"bindings/wasm",
"bindings/go",
"cli",
"core",
"extensions/core",
@@ -57,7 +58,7 @@ github-attestations = true
debug = "line-tables-only"
codegen-units = 1
panic = "abort"
lto = "off"
lto = true
[profile.bench-profile]
inherits = "release"
@@ -65,3 +66,4 @@ debug = true
[profile.dist]
inherits = "release"
lto = "thin"

View File

@@ -23,6 +23,11 @@ This product depends on logback, distributed by the logback authors:
* License: licenses/bindings/java/logback-license.md (Apache License v2.0)
* Homepage: https://github.com/qos-ch/logback?tab=License-1-ov-file
This product depends on spotless, distributed by the diffplug authors:
* License: licenses/bindings/java/spotless-license.md (Apache License v2.0)
* Homepage: https://github.com/diffplug/spotless
This product depends on serde, distributed by the serde-rs project:
* License: licenses/core/serde-apache-license.md (Apache License v2.0)

View File

@@ -17,7 +17,7 @@
<a title="Last Commit" target="_blank" href="https://github.com/tursodatabase/limbo/commits/main"><img src="https://img.shields.io/github/last-commit/tursodatabase/limbo.svg?style=flat-square&color=FF9900"></a>
</p>
<p align="center">
<a title="Discord" target="_blank" href="[https://discord.gg/dmMbCqVX7G](https://discord.gg/jgjmyYgHwB)"><img alt="Chat on Discord" src="https://img.shields.io/discord/1258658826257961020?label=Discord&logo=Discord&style=social"></a>
<a title="Discord" target="_blank" href="[https://discord.gg/jgjmyYgHwB](https://discord.gg/jgjmyYgHwB)"><img alt="Chat on Discord" src="https://img.shields.io/discord/1258658826257961020?label=Discord&logo=Discord&style=social"></a>
</p>
---

23
bindings/go/Cargo.toml Normal file
View File

@@ -0,0 +1,23 @@
[package]
name = "limbo-go"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
repository.workspace = true
[lib]
name = "_limbo_go"
crate-type = ["cdylib"]
path = "rs_src/lib.rs"
[features]
default = ["io_uring"]
io_uring = ["limbo_core/io_uring"]
[dependencies]
limbo_core = { path = "../../core/" }
[target.'cfg(target_os = "linux")'.dependencies]
limbo_core = { path = "../../core/", features = ["io_uring"] }

8
bindings/go/go.mod Normal file
View File

@@ -0,0 +1,8 @@
module limbo
go 1.23.4
require (
github.com/ebitengine/purego v0.8.2
golang.org/x/sys/windows v0.29.0
)

4
bindings/go/go.sum Normal file
View File

@@ -0,0 +1,4 @@
github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=

141
bindings/go/limbo.go Normal file
View File

@@ -0,0 +1,141 @@
package limbo
import (
"database/sql"
"database/sql/driver"
"errors"
"fmt"
"log/slog"
"os"
"runtime"
"sync"
"unsafe"
"github.com/ebitengine/purego"
"golang.org/x/sys/windows"
)
const limbo = "../../target/debug/lib_limbo_go"
const driverName = "limbo"
var limboLib uintptr
func getSystemLibrary() error {
switch runtime.GOOS {
case "darwin":
slib, err := purego.Dlopen(fmt.Sprintf("%s.dylib", limbo), purego.RTLD_LAZY)
if err != nil {
return err
}
limboLib = slib
case "linux":
slib, err := purego.Dlopen(fmt.Sprintf("%s.so", limbo), purego.RTLD_LAZY)
if err != nil {
return err
}
limboLib = slib
case "windows":
slib, err := windows.LoadLibrary(fmt.Sprintf("%s.dll", limbo))
if err != nil {
return err
}
limboLib = slib
default:
panic(fmt.Errorf("GOOS=%s is not supported", runtime.GOOS))
}
return nil
}
func init() {
err := getSystemLibrary()
if err != nil {
slog.Error("Error opening limbo library: ", err)
os.Exit(1)
}
sql.Register(driverName, &limboDriver{})
}
type limboDriver struct{}
func (d limboDriver) Open(name string) (driver.Conn, error) {
return openConn(name)
}
func toCString(s string) uintptr {
b := append([]byte(s), 0)
return uintptr(unsafe.Pointer(&b[0]))
}
// helper to register an FFI function in the lib_limbo_go library
func getFfiFunc(ptr interface{}, name string) {
purego.RegisterLibFunc(&ptr, limboLib, name)
}
type limboConn struct {
ctx uintptr
sync.Mutex
prepare func(uintptr, uintptr) uintptr
}
func newConn(ctx uintptr) *limboConn {
var prepare func(uintptr, uintptr) uintptr
getFfiFunc(&prepare, FfiDbPrepare)
return &limboConn{
ctx,
sync.Mutex{},
prepare,
}
}
func openConn(dsn string) (*limboConn, error) {
var dbOpen func(uintptr) uintptr
getFfiFunc(&dbOpen, FfiDbOpen)
cStr := toCString(dsn)
defer freeCString(cStr)
ctx := dbOpen(cStr)
if ctx == 0 {
return nil, fmt.Errorf("failed to open database for dsn=%q", dsn)
}
return &limboConn{ctx: ctx}, nil
}
func (c *limboConn) Close() error {
if c.ctx == 0 {
return nil
}
var dbClose func(uintptr) uintptr
getFfiFunc(&dbClose, FfiDbClose)
dbClose(c.ctx)
c.ctx = 0
return nil
}
func (c *limboConn) Prepare(query string) (driver.Stmt, error) {
if c.ctx == 0 {
return nil, errors.New("connection closed")
}
if c.prepare == nil {
var dbPrepare func(uintptr, uintptr) uintptr
getFfiFunc(&dbPrepare, FfiDbPrepare)
c.prepare = dbPrepare
}
qPtr := toCString(query)
stmtPtr := c.prepare(c.ctx, qPtr)
freeCString(qPtr)
if stmtPtr == 0 {
return nil, fmt.Errorf("prepare failed: %q", query)
}
return &limboStmt{
ctx: stmtPtr,
sql: query,
}, nil
}
// begin is needed to implement driver.Conn.. for now not implemented
func (c *limboConn) Begin() (driver.Tx, error) {
return nil, errors.New("transactions not implemented")
}

203
bindings/go/rs_src/lib.rs Normal file
View File

@@ -0,0 +1,203 @@
mod rows;
#[allow(dead_code)]
mod statement;
mod types;
use limbo_core::{Connection, Database, LimboError};
use std::{
ffi::{c_char, c_void},
rc::Rc,
str::FromStr,
sync::Arc,
};
/// # Safety
/// Safe to be called from Go with null terminated DSN string.
/// performs null check on the path.
#[no_mangle]
pub unsafe extern "C" fn db_open(path: *const c_char) -> *mut c_void {
if path.is_null() {
println!("Path is null");
return std::ptr::null_mut();
}
let path = unsafe { std::ffi::CStr::from_ptr(path) };
let path = path.to_str().unwrap();
let db_options = parse_query_str(path);
if let Ok(io) = get_io(&db_options.path) {
let db = Database::open_file(io.clone(), &db_options.path.to_string());
match db {
Ok(db) => {
println!("Opened database: {}", path);
let conn = db.connect();
return LimboConn::new(conn, io).to_ptr();
}
Err(e) => {
println!("Error opening database: {}", e);
return std::ptr::null_mut();
}
};
}
std::ptr::null_mut()
}
#[allow(dead_code)]
struct LimboConn {
conn: Rc<Connection>,
io: Arc<dyn limbo_core::IO>,
}
impl LimboConn {
fn new(conn: Rc<Connection>, io: Arc<dyn limbo_core::IO>) -> Self {
LimboConn { conn, io }
}
#[allow(clippy::wrong_self_convention)]
fn to_ptr(self) -> *mut c_void {
Box::into_raw(Box::new(self)) as *mut c_void
}
fn from_ptr(ptr: *mut c_void) -> &'static mut LimboConn {
if ptr.is_null() {
panic!("Null pointer");
}
unsafe { &mut *(ptr as *mut LimboConn) }
}
}
/// Close the database connection
/// # Safety
/// safely frees the connection's memory
#[no_mangle]
pub unsafe extern "C" fn db_close(db: *mut c_void) {
if !db.is_null() {
let _ = unsafe { Box::from_raw(db as *mut LimboConn) };
}
}
#[allow(clippy::arc_with_non_send_sync)]
fn get_io(db_location: &DbType) -> Result<Arc<dyn limbo_core::IO>, LimboError> {
Ok(match db_location {
DbType::Memory => Arc::new(limbo_core::MemoryIO::new()?),
_ => {
return Ok(Arc::new(limbo_core::PlatformIO::new()?));
}
})
}
#[allow(dead_code)]
struct DbOptions {
path: DbType,
params: Parameters,
}
#[derive(Default, Debug, Clone)]
enum DbType {
File(String),
#[default]
Memory,
}
impl std::fmt::Display for DbType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DbType::File(path) => write!(f, "{}", path),
DbType::Memory => write!(f, ":memory:"),
}
}
}
#[derive(Debug, Clone, Default)]
struct Parameters {
mode: Mode,
cache: Option<Cache>,
vfs: Option<String>,
nolock: bool,
immutable: bool,
modeof: Option<String>,
}
impl FromStr for Parameters {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
if !s.contains('?') {
return Ok(Parameters::default());
}
let mut params = Parameters::default();
for param in s.split('?').nth(1).unwrap().split('&') {
let mut kv = param.split('=');
match kv.next() {
Some("mode") => params.mode = kv.next().unwrap().parse().unwrap(),
Some("cache") => params.cache = Some(kv.next().unwrap().parse().unwrap()),
Some("vfs") => params.vfs = Some(kv.next().unwrap().to_string()),
Some("nolock") => params.nolock = true,
Some("immutable") => params.immutable = true,
Some("modeof") => params.modeof = Some(kv.next().unwrap().to_string()),
_ => {}
}
}
Ok(params)
}
}
#[derive(Default, Debug, Clone, Copy)]
enum Cache {
Shared,
#[default]
Private,
}
impl FromStr for Cache {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"shared" => Ok(Cache::Shared),
_ => Ok(Cache::Private),
}
}
}
#[allow(clippy::enum_variant_names)]
#[derive(Default, Debug, Clone, Copy)]
enum Mode {
ReadOnly,
ReadWrite,
#[default]
ReadWriteCreate,
}
impl FromStr for Mode {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"readonly" | "ro" => Ok(Mode::ReadOnly),
"readwrite" | "rw" => Ok(Mode::ReadWrite),
"readwritecreate" | "rwc" => Ok(Mode::ReadWriteCreate),
_ => Ok(Mode::default()),
}
}
}
// At this point we don't have configurable parameters but many
// DSN's are going to have query parameters
fn parse_query_str(mut path: &str) -> DbOptions {
if path == ":memory:" {
return DbOptions {
path: DbType::Memory,
params: Parameters::default(),
};
}
if path.starts_with("sqlite://") {
path = &path[10..];
}
if path.contains('?') {
let parameters = Parameters::from_str(path).unwrap();
let path = &path[..path.find('?').unwrap()];
DbOptions {
path: DbType::File(path.to_string()),
params: parameters,
}
} else {
DbOptions {
path: DbType::File(path.to_string()),
params: Parameters::default(),
}
}
}

138
bindings/go/rs_src/rows.rs Normal file
View File

@@ -0,0 +1,138 @@
use crate::{
statement::LimboStatement,
types::{LimboValue, ResultCode},
};
use limbo_core::{Statement, StepResult, Value};
use std::ffi::{c_char, c_void};
pub struct LimboRows<'a> {
rows: Statement,
cursor: Option<Vec<Value<'a>>>,
stmt: Box<LimboStatement<'a>>,
}
impl<'a> LimboRows<'a> {
pub fn new(rows: Statement, stmt: Box<LimboStatement<'a>>) -> Self {
LimboRows {
rows,
stmt,
cursor: None,
}
}
#[allow(clippy::wrong_self_convention)]
pub fn to_ptr(self) -> *mut c_void {
Box::into_raw(Box::new(self)) as *mut c_void
}
pub fn from_ptr(ptr: *mut c_void) -> &'static mut LimboRows<'a> {
if ptr.is_null() {
panic!("Null pointer");
}
unsafe { &mut *(ptr as *mut LimboRows) }
}
}
#[no_mangle]
pub extern "C" fn rows_next(ctx: *mut c_void) -> ResultCode {
if ctx.is_null() {
return ResultCode::Error;
}
let ctx = LimboRows::from_ptr(ctx);
match ctx.rows.step() {
Ok(StepResult::Row(row)) => {
ctx.cursor = Some(row.values);
ResultCode::Row
}
Ok(StepResult::Done) => ResultCode::Done,
Ok(StepResult::IO) => {
let _ = ctx.stmt.conn.io.run_once();
ResultCode::Io
}
Ok(StepResult::Busy) => ResultCode::Busy,
Ok(StepResult::Interrupt) => ResultCode::Interrupt,
Err(_) => ResultCode::Error,
}
}
#[no_mangle]
pub extern "C" fn rows_get_value(ctx: *mut c_void, col_idx: usize) -> *const c_void {
if ctx.is_null() {
return std::ptr::null();
}
let ctx = LimboRows::from_ptr(ctx);
if let Some(ref cursor) = ctx.cursor {
if let Some(value) = cursor.get(col_idx) {
let val = LimboValue::from_value(value);
return val.to_ptr();
}
}
std::ptr::null()
}
#[no_mangle]
pub extern "C" fn free_string(s: *mut c_char) {
if !s.is_null() {
unsafe { drop(std::ffi::CString::from_raw(s)) };
}
}
#[no_mangle]
pub extern "C" fn rows_get_columns(
rows_ptr: *mut c_void,
out_length: *mut usize,
) -> *mut *const c_char {
if rows_ptr.is_null() || out_length.is_null() {
return std::ptr::null_mut();
}
let rows = LimboRows::from_ptr(rows_ptr);
let c_strings: Vec<std::ffi::CString> = rows
.rows
.columns()
.iter()
.map(|name| std::ffi::CString::new(name.as_str()).unwrap())
.collect();
let c_ptrs: Vec<*const c_char> = c_strings.iter().map(|s| s.as_ptr()).collect();
unsafe {
*out_length = c_ptrs.len();
}
let ptr = c_ptrs.as_ptr();
std::mem::forget(c_strings);
std::mem::forget(c_ptrs);
ptr as *mut *const c_char
}
#[no_mangle]
pub extern "C" fn rows_close(rows_ptr: *mut c_void) {
if !rows_ptr.is_null() {
let _ = unsafe { Box::from_raw(rows_ptr as *mut LimboRows) };
}
}
#[no_mangle]
pub extern "C" fn free_columns(columns: *mut *const c_char) {
if columns.is_null() {
return;
}
unsafe {
let mut idx = 0;
while !(*columns.add(idx)).is_null() {
let _ = std::ffi::CString::from_raw(*columns.add(idx) as *mut c_char);
idx += 1;
}
let _ = Box::from_raw(columns);
}
}
#[no_mangle]
pub extern "C" fn free_rows(rows: *mut c_void) {
if rows.is_null() {
return;
}
unsafe {
let _ = Box::from_raw(rows as *mut Statement);
}
}

View File

@@ -0,0 +1,139 @@
use crate::rows::LimboRows;
use crate::types::{AllocPool, LimboValue, ResultCode};
use crate::LimboConn;
use limbo_core::{Statement, StepResult};
use std::ffi::{c_char, c_void};
use std::num::NonZero;
#[no_mangle]
pub extern "C" fn db_prepare(ctx: *mut c_void, query: *const c_char) -> *mut c_void {
if ctx.is_null() || query.is_null() {
return std::ptr::null_mut();
}
let query_str = unsafe { std::ffi::CStr::from_ptr(query) }.to_str().unwrap();
let db = LimboConn::from_ptr(ctx);
let stmt = db.conn.prepare(query_str.to_string());
match stmt {
Ok(stmt) => LimboStatement::new(stmt, db).to_ptr(),
Err(_) => std::ptr::null_mut(),
}
}
#[no_mangle]
pub extern "C" fn stmt_execute(
ctx: *mut c_void,
args_ptr: *mut LimboValue,
arg_count: usize,
changes: *mut i64,
) -> ResultCode {
if ctx.is_null() {
return ResultCode::Error;
}
let stmt = LimboStatement::from_ptr(ctx);
let args = if !args_ptr.is_null() && arg_count > 0 {
unsafe { std::slice::from_raw_parts(args_ptr, arg_count) }
} else {
&[]
};
for (i, arg) in args.iter().enumerate() {
let val = arg.to_value(&mut stmt.pool);
stmt.statement.bind_at(NonZero::new(i + 1).unwrap(), val);
}
loop {
match stmt.statement.step() {
Ok(StepResult::Row(_)) => {
// unexpected row during execution, error out.
return ResultCode::Error;
}
Ok(StepResult::Done) => {
stmt.conn.conn.total_changes();
if !changes.is_null() {
unsafe {
*changes = stmt.conn.conn.total_changes();
}
}
return ResultCode::Done;
}
Ok(StepResult::IO) => {
let _ = stmt.conn.io.run_once();
}
Ok(StepResult::Busy) => {
return ResultCode::Busy;
}
Ok(StepResult::Interrupt) => {
return ResultCode::Interrupt;
}
Err(_) => {
return ResultCode::Error;
}
}
}
}
#[no_mangle]
pub extern "C" fn stmt_parameter_count(ctx: *mut c_void) -> i32 {
if ctx.is_null() {
return -1;
}
let stmt = LimboStatement::from_ptr(ctx);
stmt.statement.parameters_count() as i32
}
#[no_mangle]
pub extern "C" fn stmt_query(
ctx: *mut c_void,
args_ptr: *mut LimboValue,
args_count: usize,
) -> *mut c_void {
if ctx.is_null() {
return std::ptr::null_mut();
}
let stmt = LimboStatement::from_ptr(ctx);
let args = if !args_ptr.is_null() && args_count > 0 {
unsafe { std::slice::from_raw_parts(args_ptr, args_count) }
} else {
&[]
};
for (i, arg) in args.iter().enumerate() {
let val = arg.to_value(&mut stmt.pool);
stmt.statement.bind_at(NonZero::new(i + 1).unwrap(), val);
}
match stmt.statement.query() {
Ok(rows) => {
let stmt = unsafe { Box::from_raw(stmt) };
LimboRows::new(rows, stmt).to_ptr()
}
Err(_) => std::ptr::null_mut(),
}
}
pub struct LimboStatement<'conn> {
pub statement: Statement,
pub conn: &'conn mut LimboConn,
pub pool: AllocPool,
}
impl<'conn> LimboStatement<'conn> {
pub fn new(statement: Statement, conn: &'conn mut LimboConn) -> Self {
LimboStatement {
statement,
conn,
pool: AllocPool::new(),
}
}
#[allow(clippy::wrong_self_convention)]
fn to_ptr(self) -> *mut c_void {
Box::into_raw(Box::new(self)) as *mut c_void
}
fn from_ptr(ptr: *mut c_void) -> &'static mut LimboStatement<'conn> {
if ptr.is_null() {
panic!("Null pointer");
}
unsafe { &mut *(ptr as *mut LimboStatement) }
}
}

190
bindings/go/rs_src/types.rs Normal file
View File

@@ -0,0 +1,190 @@
use std::ffi::{c_char, c_void};
#[allow(dead_code)]
#[repr(C)]
pub enum ResultCode {
Error = -1,
Ok = 0,
Row = 1,
Busy = 2,
Io = 3,
Interrupt = 4,
Invalid = 5,
Null = 6,
NoMem = 7,
ReadOnly = 8,
NoData = 9,
Done = 10,
}
#[repr(C)]
pub enum ValueType {
Integer = 0,
Text = 1,
Blob = 2,
Real = 3,
Null = 4,
}
#[repr(C)]
pub struct LimboValue {
pub value_type: ValueType,
pub value: ValueUnion,
}
#[repr(C)]
pub union ValueUnion {
pub int_val: i64,
pub real_val: f64,
pub text_ptr: *const c_char,
pub blob_ptr: *const c_void,
}
#[repr(C)]
pub struct Blob {
pub data: *const u8,
pub len: usize,
}
impl Blob {
pub fn to_ptr(&self) -> *const c_void {
self as *const Blob as *const c_void
}
}
pub struct AllocPool {
strings: Vec<String>,
blobs: Vec<Vec<u8>>,
}
impl AllocPool {
pub fn new() -> Self {
AllocPool {
strings: Vec::new(),
blobs: Vec::new(),
}
}
pub fn add_string(&mut self, s: String) -> &String {
self.strings.push(s);
self.strings.last().unwrap()
}
pub fn add_blob(&mut self, b: Vec<u8>) -> &Vec<u8> {
self.blobs.push(b);
self.blobs.last().unwrap()
}
}
#[no_mangle]
pub extern "C" fn free_blob(blob_ptr: *mut c_void) {
if blob_ptr.is_null() {
return;
}
unsafe {
let _ = Box::from_raw(blob_ptr as *mut Blob);
}
}
#[allow(dead_code)]
impl ValueUnion {
fn from_str(s: &str) -> Self {
ValueUnion {
text_ptr: s.as_ptr() as *const c_char,
}
}
fn from_bytes(b: &[u8]) -> Self {
ValueUnion {
blob_ptr: Blob {
data: b.as_ptr(),
len: b.len(),
}
.to_ptr(),
}
}
fn from_int(i: i64) -> Self {
ValueUnion { int_val: i }
}
fn from_real(r: f64) -> Self {
ValueUnion { real_val: r }
}
fn from_null() -> Self {
ValueUnion { int_val: 0 }
}
pub fn to_int(&self) -> i64 {
unsafe { self.int_val }
}
pub fn to_real(&self) -> f64 {
unsafe { self.real_val }
}
pub fn to_str(&self) -> &str {
unsafe { std::ffi::CStr::from_ptr(self.text_ptr).to_str().unwrap() }
}
pub fn to_bytes(&self) -> &[u8] {
let blob = unsafe { self.blob_ptr as *const Blob };
let blob = unsafe { &*blob };
unsafe { std::slice::from_raw_parts(blob.data, blob.len) }
}
}
impl LimboValue {
pub fn new(value_type: ValueType, value: ValueUnion) -> Self {
LimboValue { value_type, value }
}
#[allow(clippy::wrong_self_convention)]
pub fn to_ptr(self) -> *const c_void {
Box::into_raw(Box::new(self)) as *const c_void
}
pub fn from_value(value: &limbo_core::Value<'_>) -> Self {
match value {
limbo_core::Value::Integer(i) => {
LimboValue::new(ValueType::Integer, ValueUnion::from_int(*i))
}
limbo_core::Value::Float(r) => {
LimboValue::new(ValueType::Real, ValueUnion::from_real(*r))
}
limbo_core::Value::Text(s) => LimboValue::new(ValueType::Text, ValueUnion::from_str(s)),
limbo_core::Value::Blob(b) => {
LimboValue::new(ValueType::Blob, ValueUnion::from_bytes(b))
}
limbo_core::Value::Null => LimboValue::new(ValueType::Null, ValueUnion::from_null()),
}
}
pub fn to_value<'pool>(&self, pool: &'pool mut AllocPool) -> limbo_core::Value<'pool> {
match self.value_type {
ValueType::Integer => limbo_core::Value::Integer(unsafe { self.value.int_val }),
ValueType::Real => limbo_core::Value::Float(unsafe { self.value.real_val }),
ValueType::Text => {
let cstr = unsafe { std::ffi::CStr::from_ptr(self.value.text_ptr) };
match cstr.to_str() {
Ok(utf8_str) => {
let owned = utf8_str.to_owned();
// statement needs to own these strings, will free when closed
let borrowed = pool.add_string(owned);
limbo_core::Value::Text(borrowed)
}
Err(_) => limbo_core::Value::Null,
}
}
ValueType::Blob => {
let blob_ptr = unsafe { self.value.blob_ptr as *const Blob };
if blob_ptr.is_null() {
limbo_core::Value::Null
} else {
let blob = unsafe { &*blob_ptr };
let data = unsafe { std::slice::from_raw_parts(blob.data, blob.len) };
let borrowed = pool.add_blob(data.to_vec());
limbo_core::Value::Blob(borrowed)
}
}
ValueType::Null => limbo_core::Value::Null,
}
}
}

194
bindings/go/stmt.go Normal file
View File

@@ -0,0 +1,194 @@
package limbo
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"io"
"unsafe"
)
// only construct limboStmt with initStmt function to ensure proper initialization
type limboStmt struct {
ctx uintptr
sql string
query stmtQueryFn
execute stmtExecuteFn
getParamCount func(uintptr) int32
}
// Initialize/register the FFI function pointers for the statement methods
func initStmt(ctx uintptr, sql string) *limboStmt {
var query stmtQueryFn
var execute stmtExecuteFn
var getParamCount func(uintptr) int32
methods := []ExtFunc{{query, FfiStmtQuery}, {execute, FfiStmtExec}, {getParamCount, FfiStmtParameterCount}}
for i := range methods {
methods[i].initFunc()
}
return &limboStmt{
ctx: uintptr(ctx),
sql: sql,
}
}
func (st *limboStmt) NumInput() int {
return int(st.getParamCount(st.ctx))
}
func (st *limboStmt) Exec(args []driver.Value) (driver.Result, error) {
argArray, err := buildArgs(args)
if err != nil {
return nil, err
}
argPtr := uintptr(0)
argCount := uint64(len(argArray))
if argCount > 0 {
argPtr = uintptr(unsafe.Pointer(&argArray[0]))
}
var changes uint64
rc := st.execute(st.ctx, argPtr, argCount, uintptr(unsafe.Pointer(&changes)))
switch ResultCode(rc) {
case Ok:
return driver.RowsAffected(changes), nil
case Error:
return nil, errors.New("error executing statement")
case Busy:
return nil, errors.New("busy")
case Interrupt:
return nil, errors.New("interrupted")
case Invalid:
return nil, errors.New("invalid statement")
default:
return nil, fmt.Errorf("unexpected status: %d", rc)
}
}
func (st *limboStmt) Query(args []driver.Value) (driver.Rows, error) {
queryArgs, err := buildArgs(args)
if err != nil {
return nil, err
}
rowsPtr := st.query(st.ctx, uintptr(unsafe.Pointer(&queryArgs[0])), uint64(len(queryArgs)))
if rowsPtr == 0 {
return nil, fmt.Errorf("query failed for: %q", st.sql)
}
return initRows(rowsPtr), nil
}
func (ts *limboStmt) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
stripped := namedValueToValue(args)
argArray, err := getArgsPtr(stripped)
if err != nil {
return nil, err
}
var changes uintptr
res := ts.execute(ts.ctx, argArray, uint64(len(args)), changes)
switch ResultCode(res) {
case Ok:
return driver.RowsAffected(changes), nil
case Error:
return nil, errors.New("error executing statement")
case Busy:
return nil, errors.New("busy")
case Interrupt:
return nil, errors.New("interrupted")
default:
return nil, fmt.Errorf("unexpected status: %d", res)
}
}
func (st *limboStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
queryArgs, err := buildNamedArgs(args)
if err != nil {
return nil, err
}
rowsPtr := st.query(st.ctx, uintptr(unsafe.Pointer(&queryArgs[0])), uint64(len(queryArgs)))
if rowsPtr == 0 {
return nil, fmt.Errorf("query failed for: %q", st.sql)
}
return initRows(rowsPtr), nil
}
// only construct limboRows with initRows function to ensure proper initialization
type limboRows struct {
ctx uintptr
columns []string
closed bool
getCols func(uintptr, *uint) uintptr
next func(uintptr) uintptr
getValue func(uintptr, int32) uintptr
closeRows func(uintptr) uintptr
freeCols func(uintptr) uintptr
}
// Initialize/register the FFI function pointers for the rows methods
// DO NOT construct 'limboRows' without this function
func initRows(ctx uintptr) *limboRows {
var getCols func(uintptr, *uint) uintptr
var getValue func(uintptr, int32) uintptr
var closeRows func(uintptr) uintptr
var freeCols func(uintptr) uintptr
var next func(uintptr) uintptr
methods := []ExtFunc{
{getCols, FfiRowsGetColumns},
{getValue, FfiRowsGetValue},
{closeRows, FfiRowsClose},
{freeCols, FfiFreeColumns},
{next, FfiRowsNext}}
for i := range methods {
methods[i].initFunc()
}
return &limboRows{
ctx: ctx,
getCols: getCols,
getValue: getValue,
closeRows: closeRows,
freeCols: freeCols,
next: next,
}
}
func (r *limboRows) Columns() []string {
if r.columns == nil {
var columnCount uint
colArrayPtr := r.getCols(r.ctx, &columnCount)
if colArrayPtr != 0 && columnCount > 0 {
r.columns = cArrayToGoStrings(colArrayPtr, columnCount)
if r.freeCols == nil {
getFfiFunc(&r.freeCols, FfiFreeColumns)
}
defer r.freeCols(colArrayPtr)
}
}
return r.columns
}
func (r *limboRows) Close() error {
if r.closed {
return nil
}
r.closed = true
r.closeRows(r.ctx)
r.ctx = 0
return nil
}
func (r *limboRows) Next(dest []driver.Value) error {
status := r.next(r.ctx)
switch ResultCode(status) {
case Row:
for i := range dest {
valPtr := r.getValue(r.ctx, int32(i))
val := toGoValue(valPtr)
dest[i] = val
}
return nil
case Done:
return io.EOF
default:
return fmt.Errorf("unexpected status: %d", status)
}
}

248
bindings/go/types.go Normal file
View File

@@ -0,0 +1,248 @@
package limbo
import (
"database/sql/driver"
"fmt"
"unsafe"
)
type ResultCode int
const (
Error ResultCode = -1
Ok ResultCode = 0
Row ResultCode = 1
Busy ResultCode = 2
Io ResultCode = 3
Interrupt ResultCode = 4
Invalid ResultCode = 5
Null ResultCode = 6
NoMem ResultCode = 7
ReadOnly ResultCode = 8
NoData ResultCode = 9
Done ResultCode = 10
)
const (
FfiDbOpen string = "db_open"
FfiDbClose string = "db_close"
FfiDbPrepare string = "db_prepare"
FfiStmtExec string = "stmt_execute"
FfiStmtQuery string = "stmt_query"
FfiStmtParameterCount string = "stmt_parameter_count"
FfiRowsClose string = "rows_close"
FfiRowsGetColumns string = "rows_get_columns"
FfiRowsNext string = "rows_next"
FfiRowsGetValue string = "rows_get_value"
FfiFreeColumns string = "free_columns"
FfiFreeCString string = "free_string"
)
// convert a namedValue slice into normal values until named parameters are supported
func namedValueToValue(named []driver.NamedValue) []driver.Value {
out := make([]driver.Value, len(named))
for i, nv := range named {
out[i] = nv.Value
}
return out
}
func buildNamedArgs(named []driver.NamedValue) ([]limboValue, error) {
args := make([]driver.Value, len(named))
for i, nv := range named {
args[i] = nv.Value
}
return buildArgs(args)
}
type ExtFunc struct {
funcPtr interface{}
funcName string
}
func (ef *ExtFunc) initFunc() {
getFfiFunc(&ef.funcPtr, ef.funcName)
}
type valueType int
const (
intVal valueType = iota
textVal
blobVal
realVal
nullVal
)
// struct to pass Go values over FFI
type limboValue struct {
Type valueType
Value [8]byte
}
// struct to pass byte slices over FFI
type Blob struct {
Data uintptr
Len uint
}
// convert a limboValue to a native Go value
func toGoValue(valPtr uintptr) interface{} {
val := (*limboValue)(unsafe.Pointer(valPtr))
switch val.Type {
case intVal:
return *(*int64)(unsafe.Pointer(&val.Value))
case realVal:
return *(*float64)(unsafe.Pointer(&val.Value))
case textVal:
textPtr := *(*uintptr)(unsafe.Pointer(&val.Value))
return GoString(textPtr)
case blobVal:
blobPtr := *(*uintptr)(unsafe.Pointer(&val.Value))
return toGoBlob(blobPtr)
case nullVal:
return nil
default:
return nil
}
}
func getArgsPtr(args []driver.Value) (uintptr, error) {
if len(args) == 0 {
return 0, nil
}
argSlice, err := buildArgs(args)
if err != nil {
return 0, err
}
return uintptr(unsafe.Pointer(&argSlice[0])), nil
}
// convert a byte slice to a Blob type that can be sent over FFI
func makeBlob(b []byte) *Blob {
if len(b) == 0 {
return nil
}
blob := &Blob{
Data: uintptr(unsafe.Pointer(&b[0])),
Len: uint(len(b)),
}
return blob
}
// converts a blob received via FFI to a native Go byte slice
func toGoBlob(blobPtr uintptr) []byte {
if blobPtr == 0 {
return nil
}
blob := (*Blob)(unsafe.Pointer(blobPtr))
return unsafe.Slice((*byte)(unsafe.Pointer(blob.Data)), blob.Len)
}
var freeString func(*byte)
// free a C style string allocated via FFI
func freeCString(cstr uintptr) {
if cstr == 0 {
return
}
if freeString == nil {
getFfiFunc(&freeString, FfiFreeCString)
}
freeString((*byte)(unsafe.Pointer(cstr)))
}
func cArrayToGoStrings(arrayPtr uintptr, length uint) []string {
if arrayPtr == 0 || length == 0 {
return nil
}
ptrSlice := unsafe.Slice(
(**byte)(unsafe.Pointer(arrayPtr)),
length,
)
out := make([]string, 0, length)
for _, cstr := range ptrSlice {
out = append(out, GoString(uintptr(unsafe.Pointer(cstr))))
}
return out
}
// convert a Go slice of driver.Value to a slice of limboValue that can be sent over FFI
func buildArgs(args []driver.Value) ([]limboValue, error) {
argSlice := make([]limboValue, len(args))
for i, v := range args {
switch val := v.(type) {
case nil:
argSlice[i].Type = nullVal
case int64:
argSlice[i].Type = intVal
storeInt64(&argSlice[i].Value, val)
case float64:
argSlice[i].Type = realVal
storeFloat64(&argSlice[i].Value, val)
case string:
argSlice[i].Type = textVal
cstr := CString(val)
storePointer(&argSlice[i].Value, cstr)
case []byte:
argSlice[i].Type = blobVal
blob := makeBlob(val)
*(*uintptr)(unsafe.Pointer(&argSlice[i].Value)) = uintptr(unsafe.Pointer(blob))
default:
return nil, fmt.Errorf("unsupported type: %T", v)
}
}
return argSlice, nil
}
func storeInt64(data *[8]byte, val int64) {
*(*int64)(unsafe.Pointer(data)) = val
}
func storeFloat64(data *[8]byte, val float64) {
*(*float64)(unsafe.Pointer(data)) = val
}
func storePointer(data *[8]byte, ptr *byte) {
*(*uintptr)(unsafe.Pointer(data)) = uintptr(unsafe.Pointer(ptr))
}
type stmtExecuteFn func(stmtPtr uintptr, argsPtr uintptr, argCount uint64, changes uintptr) int32
type stmtQueryFn func(stmtPtr uintptr, argsPtr uintptr, argCount uint64) uintptr
/* Credit below (Apache2 License) to:
https://github.com/ebitengine/purego/blob/main/internal/strings/strings.go
*/
func hasSuffix(s, suffix string) bool {
return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
}
func CString(name string) *byte {
if hasSuffix(name, "\x00") {
return &(*(*[]byte)(unsafe.Pointer(&name)))[0]
}
b := make([]byte, len(name)+1)
copy(b, name)
return &b[0]
}
func GoString(c uintptr) string {
ptr := *(*unsafe.Pointer)(unsafe.Pointer(&c))
if ptr == nil {
return ""
}
var length int
for {
if *(*byte)(unsafe.Add(ptr, uintptr(length))) == '\x00' {
break
}
length++
}
return string(unsafe.Slice((*byte)(ptr), length))
}

View File

@@ -1,6 +1,12 @@
.PHONY: test build_test
.PHONY: java_lint test build_test
test: build_test
lint:
./gradlew spotlessCheck
lint_apply:
./gradlew spotlessApply
test: lint build_test
./gradlew test --info
build_test:

View File

@@ -7,6 +7,9 @@ plugins {
java
application
id("net.ltgt.errorprone") version "3.1.0"
// If you're stuck on JRE 8, use id 'com.diffplug.spotless' version '6.13.0' or older.
id("com.diffplug.spotless") version "6.13.0"
}
group = "org.github.tursodatabase"
@@ -111,3 +114,12 @@ tasks.withType<JavaCompile> {
}
}
}
spotless {
java {
target("**/*.java")
targetExclude(layout.buildDirectory.dir("**/*.java").get().asFile)
removeUnusedImports()
googleJavaFormat("1.7") // or use eclipse().configFile("path/to/eclipse-format.xml")
}
}

View File

@@ -9,16 +9,19 @@ use jni::sys::jlong;
use jni::JNIEnv;
use limbo_core::Connection;
use std::rc::Rc;
use std::sync::Arc;
#[derive(Clone)]
#[allow(dead_code)]
pub struct LimboConnection {
// Because java's LimboConnection is 1:1 mapped to limbo connection, we can use Rc
pub(crate) conn: Rc<Connection>,
pub(crate) io: Rc<dyn limbo_core::IO>,
// Because io is shared across multiple `LimboConnection`s, wrap it with Arc
pub(crate) io: Arc<dyn limbo_core::IO>,
}
impl LimboConnection {
pub fn new(conn: Rc<Connection>, io: Rc<dyn limbo_core::IO>) -> Self {
pub fn new(conn: Rc<Connection>, io: Arc<dyn limbo_core::IO>) -> Self {
LimboConnection { conn, io }
}
@@ -69,7 +72,7 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboConnection_prepar
};
match connection.conn.prepare(sql) {
Ok(stmt) => LimboStatement::new(stmt).to_ptr(),
Ok(stmt) => LimboStatement::new(stmt, connection.clone()).to_ptr(),
Err(e) => {
set_err_msg_and_throw_exception(
&mut env,

View File

@@ -5,16 +5,16 @@ use jni::objects::{JByteArray, JObject};
use jni::sys::{jint, jlong};
use jni::JNIEnv;
use limbo_core::Database;
use std::rc::Rc;
use std::sync::Arc;
struct LimboDB {
db: Arc<Database>,
io: Arc<dyn limbo_core::IO>,
}
impl LimboDB {
pub fn new(db: Arc<Database>) -> Self {
LimboDB { db }
pub fn new(db: Arc<Database>, io: Arc<dyn limbo_core::IO>) -> Self {
LimboDB { db, io }
}
pub fn to_ptr(self) -> jlong {
@@ -76,14 +76,13 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboDB_openUtf8<'loca
}
};
LimboDB::new(db).to_ptr()
LimboDB::new(db, io).to_ptr()
}
#[no_mangle]
pub extern "system" fn Java_org_github_tursodatabase_core_LimboDB_connect0<'local>(
mut env: JNIEnv<'local>,
obj: JObject<'local>,
file_path_byte_arr: JByteArray<'local>,
db_pointer: jlong,
) -> jlong {
let db = match to_limbo_db(db_pointer) {
@@ -94,41 +93,7 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboDB_connect0<'loca
}
};
let path = match env
.convert_byte_array(file_path_byte_arr)
.map_err(|e| e.to_string())
{
Ok(bytes) => match String::from_utf8(bytes) {
Ok(s) => s,
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string());
return 0;
}
},
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string());
return 0;
}
};
let io: Rc<dyn limbo_core::IO> = match path.as_str() {
":memory:" => match limbo_core::MemoryIO::new() {
Ok(io) => Rc::new(io),
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string());
return 0;
}
},
_ => match limbo_core::PlatformIO::new() {
Ok(io) => Rc::new(io),
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string());
return 0;
}
},
};
let conn = LimboConnection::new(db.db.connect(), io);
let conn = LimboConnection::new(db.db.connect(), db.io.clone());
conn.to_ptr()
}

View File

@@ -1,5 +1,6 @@
use crate::errors::Result;
use crate::errors::{LimboError, LIMBO_ETC};
use crate::limbo_connection::LimboConnection;
use crate::utils::set_err_msg_and_throw_exception;
use jni::objects::{JObject, JValue};
use jni::sys::jlong;
@@ -7,6 +8,7 @@ use jni::JNIEnv;
use limbo_core::{Statement, StepResult};
pub const STEP_RESULT_ID_ROW: i32 = 10;
#[allow(dead_code)]
pub const STEP_RESULT_ID_IO: i32 = 20;
pub const STEP_RESULT_ID_DONE: i32 = 30;
pub const STEP_RESULT_ID_INTERRUPT: i32 = 40;
@@ -15,11 +17,12 @@ pub const STEP_RESULT_ID_ERROR: i32 = 60;
pub struct LimboStatement {
pub(crate) stmt: Statement,
pub(crate) connection: LimboConnection,
}
impl LimboStatement {
pub fn new(stmt: Statement) -> Self {
LimboStatement { stmt }
pub fn new(stmt: Statement, connection: LimboConnection) -> Self {
LimboStatement { stmt, connection }
}
pub fn to_ptr(self) -> jlong {
@@ -50,30 +53,38 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboStatement_step<'l
Ok(stmt) => stmt,
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string());
return JObject::null();
return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None);
}
};
match stmt.stmt.step() {
Ok(StepResult::Row(row)) => match row_to_obj_array(&mut env, &row) {
Ok(row) => to_limbo_step_result(&mut env, STEP_RESULT_ID_ROW, Some(row)),
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string());
to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None)
loop {
let step_result = match stmt.stmt.step() {
Ok(result) => result,
Err(_) => return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None),
};
match step_result {
StepResult::Row(row) => {
return match row_to_obj_array(&mut env, &row) {
Ok(row) => to_limbo_step_result(&mut env, STEP_RESULT_ID_ROW, Some(row)),
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string());
to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None)
}
}
}
},
Ok(StepResult::IO) => match env.new_object_array(0, "java/lang/Object", JObject::null()) {
Ok(row) => to_limbo_step_result(&mut env, STEP_RESULT_ID_IO, Some(row.into())),
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string());
to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None)
StepResult::IO => {
if let Err(e) = stmt.connection.io.run_once() {
set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string());
return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None);
}
}
},
Ok(StepResult::Done) => to_limbo_step_result(&mut env, STEP_RESULT_ID_DONE, None),
Ok(StepResult::Interrupt) => to_limbo_step_result(&mut env, STEP_RESULT_ID_INTERRUPT, None),
Ok(StepResult::Busy) => to_limbo_step_result(&mut env, STEP_RESULT_ID_BUSY, None),
_ => to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None),
StepResult::Done => return to_limbo_step_result(&mut env, STEP_RESULT_ID_DONE, None),
StepResult::Interrupt => {
return to_limbo_step_result(&mut env, STEP_RESULT_ID_INTERRUPT, None)
}
StepResult::Busy => return to_limbo_step_result(&mut env, STEP_RESULT_ID_BUSY, None),
}
}
}

View File

@@ -1,79 +1,82 @@
package org.github.tursodatabase;
import org.github.tursodatabase.annotations.Nullable;
import org.github.tursodatabase.annotations.SkipNullableCheck;
import org.github.tursodatabase.core.LimboConnection;
import org.github.tursodatabase.jdbc4.JDBC4Connection;
import java.sql.*;
import java.util.Locale;
import java.util.Properties;
import java.util.logging.Logger;
import org.github.tursodatabase.annotations.Nullable;
import org.github.tursodatabase.annotations.SkipNullableCheck;
import org.github.tursodatabase.core.LimboConnection;
import org.github.tursodatabase.jdbc4.JDBC4Connection;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JDBC implements Driver {
private static final String VALID_URL_PREFIX = "jdbc:sqlite:";
private static final Logger logger = LoggerFactory.getLogger(JDBC.class);
static {
try {
DriverManager.registerDriver(new JDBC());
} catch (Exception e) {
// TODO: log
}
private static final String VALID_URL_PREFIX = "jdbc:sqlite:";
static {
try {
DriverManager.registerDriver(new JDBC());
} catch (Exception e) {
logger.error("Failed to register driver", e);
}
}
@Nullable
public static LimboConnection createConnection(String url, Properties properties) throws SQLException {
if (!isValidURL(url)) return null;
@Nullable
public static LimboConnection createConnection(String url, Properties properties)
throws SQLException {
if (!isValidURL(url)) return null;
url = url.trim();
return new JDBC4Connection(url, extractAddress(url), properties);
}
url = url.trim();
return new JDBC4Connection(url, extractAddress(url), properties);
}
private static boolean isValidURL(String url) {
return url != null && url.toLowerCase(Locale.ROOT).startsWith(VALID_URL_PREFIX);
}
private static boolean isValidURL(String url) {
return url != null && url.toLowerCase(Locale.ROOT).startsWith(VALID_URL_PREFIX);
}
private static String extractAddress(String url) {
return url.substring(VALID_URL_PREFIX.length());
}
private static String extractAddress(String url) {
return url.substring(VALID_URL_PREFIX.length());
}
@Nullable
@Override
public Connection connect(String url, Properties info) throws SQLException {
return createConnection(url, info);
}
@Nullable
@Override
public Connection connect(String url, Properties info) throws SQLException {
return createConnection(url, info);
}
@Override
public boolean acceptsURL(String url) throws SQLException {
return isValidURL(url);
}
@Override
public boolean acceptsURL(String url) throws SQLException {
return isValidURL(url);
}
@Override
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
return LimboConfig.getDriverPropertyInfo();
}
@Override
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
return LimboConfig.getDriverPropertyInfo();
}
@Override
public int getMajorVersion() {
// TODO
return 0;
}
@Override
public int getMajorVersion() {
// TODO
return 0;
}
@Override
public int getMinorVersion() {
// TODO
return 0;
}
@Override
public int getMinorVersion() {
// TODO
return 0;
}
@Override
public boolean jdbcCompliant() {
return false;
}
@Override
public boolean jdbcCompliant() {
return false;
}
@Override
@SkipNullableCheck
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public java.util.logging.Logger getParentLogger() throws SQLFeatureNotSupportedException {
// TODO
return null;
}
}

View File

@@ -4,48 +4,47 @@ import java.sql.DriverPropertyInfo;
import java.util.Arrays;
import java.util.Properties;
/**
* Limbo Configuration.
*/
/** Limbo Configuration. */
public class LimboConfig {
private final Properties pragma;
private final Properties pragma;
public LimboConfig(Properties properties) {
this.pragma = properties;
public LimboConfig(Properties properties) {
this.pragma = properties;
}
public static DriverPropertyInfo[] getDriverPropertyInfo() {
return Arrays.stream(Pragma.values())
.map(
p -> {
DriverPropertyInfo info = new DriverPropertyInfo(p.pragmaName, null);
info.description = p.description;
info.choices = p.choices;
info.required = false;
return info;
})
.toArray(DriverPropertyInfo[]::new);
}
public Properties toProperties() {
Properties copy = new Properties();
copy.putAll(pragma);
return copy;
}
public enum Pragma {
;
private final String pragmaName;
private final String description;
private final String[] choices;
Pragma(String pragmaName, String description, String[] choices) {
this.pragmaName = pragmaName;
this.description = description;
this.choices = choices;
}
public static DriverPropertyInfo[] getDriverPropertyInfo() {
return Arrays.stream(Pragma.values())
.map(p -> {
DriverPropertyInfo info = new DriverPropertyInfo(p.pragmaName, null);
info.description = p.description;
info.choices = p.choices;
info.required = false;
return info;
})
.toArray(DriverPropertyInfo[]::new);
}
public Properties toProperties() {
Properties copy = new Properties();
copy.putAll(pragma);
return copy;
}
public enum Pragma {
;
private final String pragmaName;
private final String description;
private final String[] choices;
Pragma(String pragmaName, String description, String[] choices) {
this.pragmaName = pragmaName;
this.description = description;
this.choices = choices;
}
public String getPragmaName() {
return pragmaName;
}
public String getPragmaName() {
return pragmaName;
}
}
}

View File

@@ -1,88 +1,87 @@
package org.github.tursodatabase;
import org.github.tursodatabase.annotations.Nullable;
import org.github.tursodatabase.annotations.SkipNullableCheck;
import javax.sql.DataSource;
import java.io.PrintWriter;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.Properties;
import java.util.logging.Logger;
import javax.sql.DataSource;
import org.github.tursodatabase.annotations.Nullable;
import org.github.tursodatabase.annotations.SkipNullableCheck;
/**
* Provides {@link DataSource} API for configuring Limbo database connection.
*/
/** Provides {@link DataSource} API for configuring Limbo database connection. */
public class LimboDataSource implements DataSource {
private final LimboConfig limboConfig;
private final String url;
private final LimboConfig limboConfig;
private final String url;
/**
* Creates a datasource based on the provided configuration.
*
* @param limboConfig The configuration for the datasource.
*/
public LimboDataSource(LimboConfig limboConfig, String url) {
this.limboConfig = limboConfig;
this.url = url;
}
/**
* Creates a datasource based on the provided configuration.
*
* @param limboConfig The configuration for the datasource.
*/
public LimboDataSource(LimboConfig limboConfig, String url) {
this.limboConfig = limboConfig;
this.url = url;
}
@Override
@Nullable
public Connection getConnection() throws SQLException {
return getConnection(null, null);
}
@Override
@Nullable
public Connection getConnection() throws SQLException {
return getConnection(null, null);
}
@Override
@Nullable
public Connection getConnection(@Nullable String username, @Nullable String password) throws SQLException {
Properties properties = limboConfig.toProperties();
if (username != null) properties.put("user", username);
if (password != null) properties.put("pass", password);
return JDBC.createConnection(url, properties);
}
@Override
@SkipNullableCheck
public PrintWriter getLogWriter() throws SQLException {
// TODO
return null;
}
@Override
@Nullable
public Connection getConnection(@Nullable String username, @Nullable String password)
throws SQLException {
Properties properties = limboConfig.toProperties();
if (username != null) properties.put("user", username);
if (password != null) properties.put("pass", password);
return JDBC.createConnection(url, properties);
}
@Override
public void setLogWriter(PrintWriter out) throws SQLException {
// TODO
}
@Override
@SkipNullableCheck
public PrintWriter getLogWriter() throws SQLException {
// TODO
return null;
}
@Override
public void setLoginTimeout(int seconds) throws SQLException {
// TODO
}
@Override
public void setLogWriter(PrintWriter out) throws SQLException {
// TODO
}
@Override
public int getLoginTimeout() throws SQLException {
// TODO
return 0;
}
@Override
public void setLoginTimeout(int seconds) throws SQLException {
// TODO
}
@Override
@SkipNullableCheck
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
// TODO
return null;
}
@Override
public int getLoginTimeout() throws SQLException {
// TODO
return 0;
}
@Override
@SkipNullableCheck
public <T> T unwrap(Class<T> iface) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
// TODO
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
// TODO
return false;
}
@Override
@SkipNullableCheck
public <T> T unwrap(Class<T> iface) throws SQLException {
// TODO
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
// TODO
return false;
}
}

View File

@@ -2,72 +2,67 @@ package org.github.tursodatabase;
import org.github.tursodatabase.core.SqliteCode;
/**
* Limbo error code. Superset of SQLite3 error code.
*/
/** Limbo error code. Superset of SQLite3 error code. */
public enum LimboErrorCode {
SQLITE_OK(SqliteCode.SQLITE_OK, "Successful result"),
SQLITE_ERROR(SqliteCode.SQLITE_ERROR, "SQL error or missing database"),
SQLITE_INTERNAL(SqliteCode.SQLITE_INTERNAL, "An internal logic error in SQLite"),
SQLITE_PERM(SqliteCode.SQLITE_PERM, "Access permission denied"),
SQLITE_ABORT(SqliteCode.SQLITE_ABORT, "Callback routine requested an abort"),
SQLITE_BUSY(SqliteCode.SQLITE_BUSY, "The database file is locked"),
SQLITE_LOCKED(SqliteCode.SQLITE_LOCKED, "A table in the database is locked"),
SQLITE_NOMEM(SqliteCode.SQLITE_NOMEM, "A malloc() failed"),
SQLITE_READONLY(SqliteCode.SQLITE_READONLY, "Attempt to write a readonly database"),
SQLITE_INTERRUPT(SqliteCode.SQLITE_INTERRUPT, "Operation terminated by sqlite_interrupt()"),
SQLITE_IOERR(SqliteCode.SQLITE_IOERR, "Some kind of disk I/O error occurred"),
SQLITE_CORRUPT(SqliteCode.SQLITE_CORRUPT, "The database disk image is malformed"),
SQLITE_NOTFOUND(SqliteCode.SQLITE_NOTFOUND, "(Internal Only) Table or record not found"),
SQLITE_FULL(SqliteCode.SQLITE_FULL, "Insertion failed because database is full"),
SQLITE_CANTOPEN(SqliteCode.SQLITE_CANTOPEN, "Unable to open the database file"),
SQLITE_PROTOCOL(SqliteCode.SQLITE_PROTOCOL, "Database lock protocol error"),
SQLITE_EMPTY(SqliteCode.SQLITE_EMPTY, "(Internal Only) Database table is empty"),
SQLITE_SCHEMA(SqliteCode.SQLITE_SCHEMA, "The database schema changed"),
SQLITE_TOOBIG(SqliteCode.SQLITE_TOOBIG, "Too much data for one row of a table"),
SQLITE_CONSTRAINT(SqliteCode.SQLITE_CONSTRAINT, "Abort due to constraint violation"),
SQLITE_MISMATCH(SqliteCode.SQLITE_MISMATCH, "Data type mismatch"),
SQLITE_MISUSE(SqliteCode.SQLITE_MISUSE, "Library used incorrectly"),
SQLITE_NOLFS(SqliteCode.SQLITE_NOLFS, "Uses OS features not supported on host"),
SQLITE_AUTH(SqliteCode.SQLITE_AUTH, "Authorization denied"),
SQLITE_ROW(SqliteCode.SQLITE_ROW, "sqlite_step() has another row ready"),
SQLITE_DONE(SqliteCode.SQLITE_DONE, "sqlite_step() has finished executing"),
SQLITE_INTEGER(SqliteCode.SQLITE_INTEGER, "Integer type"),
SQLITE_FLOAT(SqliteCode.SQLITE_FLOAT, "Float type"),
SQLITE_TEXT(SqliteCode.SQLITE_TEXT, "Text type"),
SQLITE_BLOB(SqliteCode.SQLITE_BLOB, "Blob type"),
SQLITE_NULL(SqliteCode.SQLITE_NULL, "Null type"),
SQLITE_OK(SqliteCode.SQLITE_OK, "Successful result"),
SQLITE_ERROR(SqliteCode.SQLITE_ERROR, "SQL error or missing database"),
SQLITE_INTERNAL(SqliteCode.SQLITE_INTERNAL, "An internal logic error in SQLite"),
SQLITE_PERM(SqliteCode.SQLITE_PERM, "Access permission denied"),
SQLITE_ABORT(SqliteCode.SQLITE_ABORT, "Callback routine requested an abort"),
SQLITE_BUSY(SqliteCode.SQLITE_BUSY, "The database file is locked"),
SQLITE_LOCKED(SqliteCode.SQLITE_LOCKED, "A table in the database is locked"),
SQLITE_NOMEM(SqliteCode.SQLITE_NOMEM, "A malloc() failed"),
SQLITE_READONLY(SqliteCode.SQLITE_READONLY, "Attempt to write a readonly database"),
SQLITE_INTERRUPT(SqliteCode.SQLITE_INTERRUPT, "Operation terminated by sqlite_interrupt()"),
SQLITE_IOERR(SqliteCode.SQLITE_IOERR, "Some kind of disk I/O error occurred"),
SQLITE_CORRUPT(SqliteCode.SQLITE_CORRUPT, "The database disk image is malformed"),
SQLITE_NOTFOUND(SqliteCode.SQLITE_NOTFOUND, "(Internal Only) Table or record not found"),
SQLITE_FULL(SqliteCode.SQLITE_FULL, "Insertion failed because database is full"),
SQLITE_CANTOPEN(SqliteCode.SQLITE_CANTOPEN, "Unable to open the database file"),
SQLITE_PROTOCOL(SqliteCode.SQLITE_PROTOCOL, "Database lock protocol error"),
SQLITE_EMPTY(SqliteCode.SQLITE_EMPTY, "(Internal Only) Database table is empty"),
SQLITE_SCHEMA(SqliteCode.SQLITE_SCHEMA, "The database schema changed"),
SQLITE_TOOBIG(SqliteCode.SQLITE_TOOBIG, "Too much data for one row of a table"),
SQLITE_CONSTRAINT(SqliteCode.SQLITE_CONSTRAINT, "Abort due to constraint violation"),
SQLITE_MISMATCH(SqliteCode.SQLITE_MISMATCH, "Data type mismatch"),
SQLITE_MISUSE(SqliteCode.SQLITE_MISUSE, "Library used incorrectly"),
SQLITE_NOLFS(SqliteCode.SQLITE_NOLFS, "Uses OS features not supported on host"),
SQLITE_AUTH(SqliteCode.SQLITE_AUTH, "Authorization denied"),
SQLITE_ROW(SqliteCode.SQLITE_ROW, "sqlite_step() has another row ready"),
SQLITE_DONE(SqliteCode.SQLITE_DONE, "sqlite_step() has finished executing"),
SQLITE_INTEGER(SqliteCode.SQLITE_INTEGER, "Integer type"),
SQLITE_FLOAT(SqliteCode.SQLITE_FLOAT, "Float type"),
SQLITE_TEXT(SqliteCode.SQLITE_TEXT, "Text type"),
SQLITE_BLOB(SqliteCode.SQLITE_BLOB, "Blob type"),
SQLITE_NULL(SqliteCode.SQLITE_NULL, "Null type"),
UNKNOWN_ERROR(-1, "Unknown error"),
LIMBO_FAILED_TO_PARSE_BYTE_ARRAY(1100, "Failed to parse ut8 byte array"),
LIMBO_FAILED_TO_PREPARE_STATEMENT(1200, "Failed to prepare statement"),
LIMBO_ETC(9999, "Unclassified error");
UNKNOWN_ERROR(-1, "Unknown error"),
LIMBO_FAILED_TO_PARSE_BYTE_ARRAY(1100, "Failed to parse ut8 byte array"),
LIMBO_FAILED_TO_PREPARE_STATEMENT(1200, "Failed to prepare statement"),
LIMBO_ETC(9999, "Unclassified error");
public final int code;
public final String message;
public final int code;
public final String message;
/**
* @param code Error code
* @param message Message for the error.
*/
LimboErrorCode(int code, String message) {
this.code = code;
this.message = message;
/**
* @param code Error code
* @param message Message for the error.
*/
LimboErrorCode(int code, String message) {
this.code = code;
this.message = message;
}
public static LimboErrorCode getErrorCode(int errorCode) {
for (LimboErrorCode limboErrorCode : LimboErrorCode.values()) {
if (errorCode == limboErrorCode.code) return limboErrorCode;
}
public static LimboErrorCode getErrorCode(int errorCode) {
for (LimboErrorCode limboErrorCode: LimboErrorCode.values()) {
if (errorCode == limboErrorCode.code) return limboErrorCode;
}
return UNKNOWN_ERROR;
}
return UNKNOWN_ERROR;
}
@Override
public String toString() {
return "LimboErrorCode{" +
"code=" + code +
", message='" + message + '\'' +
'}';
}
@Override
public String toString() {
return "LimboErrorCode{" + "code=" + code + ", message='" + message + '\'' + '}';
}
}

View File

@@ -1,17 +1,16 @@
package org.github.tursodatabase.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation to mark methods that are called by native functions.
* For example, throwing exceptions or creating java objects.
* Annotation to mark methods that are called by native functions. For example, throwing exceptions
* or creating java objects.
*/
@Retention(RetentionPolicy.SOURCE)
@Target({ElementType.METHOD, ElementType.CONSTRUCTOR})
public @interface NativeInvocation {
String invokedFrom() default "";
String invokedFrom() default "";
}

View File

@@ -1,6 +1,5 @@
package org.github.tursodatabase.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
@@ -8,11 +7,10 @@ import java.lang.annotation.Target;
/**
* Annotation to mark nullable types.
* <p>
* This annotation is used to indicate that a method, field, or parameter can be null.
* It helps in identifying potential nullability issues and improving code quality.
*
* <p>This annotation is used to indicate that a method, field, or parameter can be null. It helps
* in identifying potential nullability issues and improving code quality.
*/
@Retention(RetentionPolicy.SOURCE)
@Target({ElementType.METHOD, ElementType.FIELD, ElementType.PARAMETER})
public @interface Nullable {
}
public @interface Nullable {}

View File

@@ -1,6 +1,5 @@
package org.github.tursodatabase.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
@@ -8,11 +7,11 @@ import java.lang.annotation.Target;
/**
* Marker annotation to skip nullable checks.
* <p>
* This annotation is used to mark methods, fields, or parameters that should be excluded from nullable checks.
* It is typically applied to code that is still under development or requires special handling.
*
* <p>This annotation is used to mark methods, fields, or parameters that should be excluded from
* nullable checks. It is typically applied to code that is still under development or requires
* special handling.
*/
@Retention(RetentionPolicy.SOURCE)
@Target({ElementType.METHOD, ElementType.FIELD, ElementType.PARAMETER})
public @interface SkipNullableCheck {
}
public @interface SkipNullableCheck {}

View File

@@ -5,10 +5,7 @@ import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation to mark methods that use larger visibility for testing purposes.
*/
/** Annotation to mark methods that use larger visibility for testing purposes. */
@Retention(RetentionPolicy.SOURCE)
@Target(ElementType.METHOD)
public @interface VisibleForTesting {
}
public @interface VisibleForTesting {}

View File

@@ -5,74 +5,71 @@ import java.sql.SQLFeatureNotSupportedException;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Interface to Limbo. It provides some helper functions
* used by other parts of the driver. The goal of the helper functions here
* are not only to provide functionality, but to handle contractual
* Interface to Limbo. It provides some helper functions used by other parts of the driver. The goal
* of the helper functions here are not only to provide functionality, but to handle contractual
* differences between the JDBC specification and the Limbo API.
*/
public abstract class AbstractDB {
protected final String url;
protected final String filePath;
private final AtomicBoolean closed = new AtomicBoolean(true);
protected final String url;
protected final String filePath;
private final AtomicBoolean closed = new AtomicBoolean(true);
public AbstractDB(String url, String filePath) {
this.url = url;
this.filePath = filePath;
}
public AbstractDB(String url, String filePath) {
this.url = url;
this.filePath = filePath;
}
public boolean isClosed() {
return closed.get();
}
public boolean isClosed() {
return closed.get();
}
/**
* Aborts any pending operation and returns at its earliest opportunity.
*/
public abstract void interrupt() throws SQLException;
/** Aborts any pending operation and returns at its earliest opportunity. */
public abstract void interrupt() throws SQLException;
/**
* Creates an SQLite interface to a database for the given connection.
*
* @param openFlags Flags for opening the database.
* @throws SQLException if a database access error occurs.
*/
public final synchronized void open(int openFlags) throws SQLException {
open0(filePath, openFlags);
}
/**
* Creates an SQLite interface to a database for the given connection.
*
* @param openFlags Flags for opening the database.
* @throws SQLException if a database access error occurs.
*/
public final synchronized void open(int openFlags) throws SQLException {
open0(filePath, openFlags);
}
protected abstract void open0(String fileName, int openFlags) throws SQLException;
protected abstract void open0(String fileName, int openFlags) throws SQLException;
/**
* Closes a database connection and finalizes any remaining statements before the closing
* operation.
*
* @throws SQLException if a database access error occurs.
*/
public final synchronized void close() throws SQLException {
// TODO: add implementation
throw new SQLFeatureNotSupportedException();
}
/**
* Closes a database connection and finalizes any remaining statements before the closing
* operation.
*
* @throws SQLException if a database access error occurs.
*/
public final synchronized void close() throws SQLException {
// TODO: add implementation
throw new SQLFeatureNotSupportedException();
}
/**
* Connects to a database.
*
* @return Pointer to the connection.
*/
public abstract long connect() throws SQLException;
/**
* Connects to a database.
*
* @return Pointer to the connection.
*/
public abstract long connect() throws SQLException;
/**
* Creates an SQLite interface to a database with the provided open flags.
*
* @param fileName The database to open.
* @param openFlags Flags for opening the database.
* @return pointer to database instance
* @throws SQLException if a database access error occurs.
*/
protected abstract long openUtf8(byte[] fileName, int openFlags) throws SQLException;
/**
* Creates an SQLite interface to a database with the provided open flags.
*
* @param fileName The database to open.
* @param openFlags Flags for opening the database.
* @return pointer to database instance
* @throws SQLException if a database access error occurs.
*/
protected abstract long openUtf8(byte[] fileName, int openFlags) throws SQLException;
/**
* Closes the SQLite interface to a database.
*
* @throws SQLException if a database access error occurs.
*/
protected abstract void close0() throws SQLException;
/**
* Closes the SQLite interface to a database.
*
* @throws SQLException if a database access error occurs.
*/
protected abstract void close0() throws SQLException;
}

View File

@@ -1,140 +1,139 @@
package org.github.tursodatabase.core;
import org.github.tursodatabase.annotations.NativeInvocation;
import org.github.tursodatabase.utils.LimboExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.github.tursodatabase.utils.ByteArrayUtils.stringToUtf8ByteArray;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Properties;
import static org.github.tursodatabase.utils.ByteArrayUtils.stringToUtf8ByteArray;
import org.github.tursodatabase.annotations.NativeInvocation;
import org.github.tursodatabase.utils.LimboExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class LimboConnection implements Connection {
private static final Logger logger = LoggerFactory.getLogger(LimboConnection.class);
private static final Logger logger = LoggerFactory.getLogger(LimboConnection.class);
private final long connectionPtr;
private final AbstractDB database;
private final long connectionPtr;
private final AbstractDB database;
public LimboConnection(String url, String filePath) throws SQLException {
this(url, filePath, new Properties());
}
public LimboConnection(String url, String filePath) throws SQLException {
this(url, filePath, new Properties());
}
/**
* Creates a connection to limbo database
*
* @param url e.g. "jdbc:sqlite:fileName"
* @param filePath path to file
*/
public LimboConnection(String url, String filePath, Properties properties) throws SQLException {
AbstractDB db = null;
/**
* Creates a connection to limbo database
*
* @param url e.g. "jdbc:sqlite:fileName"
* @param filePath path to file
*/
public LimboConnection(String url, String filePath, Properties properties) throws SQLException {
AbstractDB db = null;
try {
db = open(url, filePath, properties);
} catch (Throwable t) {
try {
if (db != null) {
db.close();
}
} catch (Throwable t2) {
t.addSuppressed(t2);
}
throw t;
try {
db = open(url, filePath, properties);
} catch (Throwable t) {
try {
if (db != null) {
db.close();
}
} catch (Throwable t2) {
t.addSuppressed(t2);
}
this.database = db;
this.connectionPtr = db.connect();
throw t;
}
private static AbstractDB open(String url, String filePath, Properties properties) throws SQLException {
return LimboDBFactory.open(url, filePath, properties);
this.database = db;
this.connectionPtr = db.connect();
}
private static AbstractDB open(String url, String filePath, Properties properties)
throws SQLException {
return LimboDBFactory.open(url, filePath, properties);
}
protected void checkOpen() throws SQLException {
if (isClosed()) throw new SQLException("database connection closed");
}
@Override
public void close() throws SQLException {
if (isClosed()) return;
database.close();
}
@Override
public boolean isClosed() throws SQLException {
return database.isClosed();
}
public AbstractDB getDatabase() {
return database;
}
/**
* Compiles an SQL statement.
*
* @param sql An SQL statement.
* @return Pointer to statement.
* @throws SQLException if a database access error occurs.
*/
public LimboStatement prepare(String sql) throws SQLException {
logger.trace("DriverManager [{}] [SQLite EXEC] {}", Thread.currentThread().getName(), sql);
byte[] sqlBytes = stringToUtf8ByteArray(sql);
if (sqlBytes == null) {
throw new SQLException("Failed to convert " + sql + " into bytes");
}
return new LimboStatement(sql, prepareUtf8(connectionPtr, sqlBytes));
}
protected void checkOpen() throws SQLException {
if (isClosed()) throw new SQLException("database connection closed");
}
private native long prepareUtf8(long connectionPtr, byte[] sqlUtf8) throws SQLException;
@Override
public void close() throws SQLException {
if (isClosed()) return;
database.close();
}
/** @return busy timeout in milliseconds. */
public int getBusyTimeout() {
// TODO: add support for busyTimeout
return 0;
}
@Override
public boolean isClosed() throws SQLException {
return database.isClosed();
}
// TODO: check whether this is still valid for limbo
public AbstractDB getDatabase() {
return database;
}
/**
* Checks whether the type, concurrency, and holdability settings for a {@link ResultSet} are
* supported by the SQLite interface. Supported settings are:
*
* <ul>
* <li>type: {@link ResultSet#TYPE_FORWARD_ONLY}
* <li>concurrency: {@link ResultSet#CONCUR_READ_ONLY})
* <li>holdability: {@link ResultSet#CLOSE_CURSORS_AT_COMMIT}
* </ul>
*
* @param resultSetType the type setting.
* @param resultSetConcurrency the concurrency setting.
* @param resultSetHoldability the holdability setting.
*/
protected void checkCursor(int resultSetType, int resultSetConcurrency, int resultSetHoldability)
throws SQLException {
if (resultSetType != ResultSet.TYPE_FORWARD_ONLY)
throw new SQLException("SQLite only supports TYPE_FORWARD_ONLY cursors");
if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY)
throw new SQLException("SQLite only supports CONCUR_READ_ONLY cursors");
if (resultSetHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT)
throw new SQLException("SQLite only supports closing cursors at commit");
}
/**
* Compiles an SQL statement.
*
* @param sql An SQL statement.
* @return Pointer to statement.
* @throws SQLException if a database access error occurs.
*/
public LimboStatement prepare(String sql) throws SQLException {
logger.trace("DriverManager [{}] [SQLite EXEC] {}", Thread.currentThread().getName(), sql);
byte[] sqlBytes = stringToUtf8ByteArray(sql);
if (sqlBytes == null) {
throw new SQLException("Failed to convert " + sql + " into bytes");
}
return new LimboStatement(sql, prepareUtf8(connectionPtr, sqlBytes));
}
public void setBusyTimeout(int busyTimeout) {
// TODO: add support for busy timeout
}
private native long prepareUtf8(long connectionPtr, byte[] sqlUtf8) throws SQLException;
/**
* @return busy timeout in milliseconds.
*/
public int getBusyTimeout() {
// TODO: add support for busyTimeout
return 0;
}
// TODO: check whether this is still valid for limbo
/**
* Checks whether the type, concurrency, and holdability settings for a {@link ResultSet} are
* supported by the SQLite interface. Supported settings are:
*
* <ul>
* <li>type: {@link ResultSet#TYPE_FORWARD_ONLY}
* <li>concurrency: {@link ResultSet#CONCUR_READ_ONLY})
* <li>holdability: {@link ResultSet#CLOSE_CURSORS_AT_COMMIT}
* </ul>
*
* @param resultSetType the type setting.
* @param resultSetConcurrency the concurrency setting.
* @param resultSetHoldability the holdability setting.
*/
protected void checkCursor(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
if (resultSetType != ResultSet.TYPE_FORWARD_ONLY)
throw new SQLException("SQLite only supports TYPE_FORWARD_ONLY cursors");
if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY)
throw new SQLException("SQLite only supports CONCUR_READ_ONLY cursors");
if (resultSetHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT)
throw new SQLException("SQLite only supports closing cursors at commit");
}
public void setBusyTimeout(int busyTimeout) {
// TODO: add support for busy timeout
}
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessageBytes Error message.
*/
@NativeInvocation(invokedFrom = "limbo_connection.rs")
private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException {
LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes);
}
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessageBytes Error message.
*/
@NativeInvocation(invokedFrom = "limbo_connection.rs")
private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException {
LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes);
}
}

View File

@@ -1,6 +1,9 @@
package org.github.tursodatabase.core;
import static org.github.tursodatabase.utils.ByteArrayUtils.stringToUtf8ByteArray;
import java.sql.SQLException;
import java.util.concurrent.locks.ReentrantLock;
import org.github.tursodatabase.LimboErrorCode;
import org.github.tursodatabase.annotations.NativeInvocation;
import org.github.tursodatabase.annotations.VisibleForTesting;
@@ -8,113 +11,104 @@ import org.github.tursodatabase.utils.LimboExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.concurrent.locks.ReentrantLock;
import static org.github.tursodatabase.utils.ByteArrayUtils.stringToUtf8ByteArray;
/**
* This class provides a thin JNI layer over the SQLite3 C API.
*/
/** This class provides a thin JNI layer over the SQLite3 C API. */
public final class LimboDB extends AbstractDB {
private static final Logger logger = LoggerFactory.getLogger(LimboDB.class);
// Pointer to database instance
private long dbPointer;
private boolean isOpen;
private static final Logger logger = LoggerFactory.getLogger(LimboDB.class);
// Pointer to database instance
private long dbPointer;
private boolean isOpen;
private static boolean isLoaded;
private ReentrantLock dbLock = new ReentrantLock();
private static boolean isLoaded;
private ReentrantLock dbLock = new ReentrantLock();
static {
if ("The Android Project".equals(System.getProperty("java.vm.vendor"))) {
// TODO
} else {
// continue with non Android execution path
isLoaded = false;
}
static {
if ("The Android Project".equals(System.getProperty("java.vm.vendor"))) {
// TODO
} else {
// continue with non Android execution path
isLoaded = false;
}
}
/** Loads the SQLite interface backend. */
public static void load() {
if (isLoaded) {
return;
}
/**
* Loads the SQLite interface backend.
*/
public static void load() {
if (isLoaded) return;
try {
System.loadLibrary("_limbo_java");
} finally {
isLoaded = true;
}
}
try {
System.loadLibrary("_limbo_java");
} finally {
isLoaded = true;
}
/**
* @param url e.g. "jdbc:sqlite:fileName
* @param filePath e.g. path to file
*/
public static LimboDB create(String url, String filePath) throws SQLException {
return new LimboDB(url, filePath);
}
// TODO: receive config as argument
private LimboDB(String url, String filePath) {
super(url, filePath);
}
// WRAPPER FUNCTIONS ////////////////////////////////////////////
// TODO: add support for JNI
@Override
protected native long openUtf8(byte[] file, int openFlags) throws SQLException;
// TODO: add support for JNI
@Override
protected native void close0() throws SQLException;
// TODO: add support for JNI
native int execUtf8(byte[] sqlUtf8) throws SQLException;
// TODO: add support for JNI
@Override
public native void interrupt();
@Override
protected void open0(String filePath, int openFlags) throws SQLException {
if (isOpen) {
throw LimboExceptionUtils.buildLimboException(
LimboErrorCode.LIMBO_ETC.code, "Already opened");
}
/**
* @param url e.g. "jdbc:sqlite:fileName
* @param filePath e.g. path to file
*/
public static LimboDB create(String url, String filePath) throws SQLException {
return new LimboDB(url, filePath);
byte[] filePathBytes = stringToUtf8ByteArray(filePath);
if (filePathBytes == null) {
throw LimboExceptionUtils.buildLimboException(
LimboErrorCode.LIMBO_ETC.code,
"File path cannot be converted to byteArray. File name: " + filePath);
}
// TODO: receive config as argument
private LimboDB(String url, String filePath) {
super(url, filePath);
}
dbPointer = openUtf8(filePathBytes, openFlags);
isOpen = true;
}
// WRAPPER FUNCTIONS ////////////////////////////////////////////
@Override
public long connect() throws SQLException {
return connect0(dbPointer);
}
// TODO: add support for JNI
@Override
protected native long openUtf8(byte[] file, int openFlags) throws SQLException;
private native long connect0(long databasePtr) throws SQLException;
// TODO: add support for JNI
@Override
protected native void close0() throws SQLException;
@VisibleForTesting
native void throwJavaException(int errorCode) throws SQLException;
// TODO: add support for JNI
native int execUtf8(byte[] sqlUtf8) throws SQLException;
// TODO: add support for JNI
@Override
public native void interrupt();
@Override
protected void open0(String filePath, int openFlags) throws SQLException {
if (isOpen) {
throw LimboExceptionUtils.buildLimboException(LimboErrorCode.LIMBO_ETC.code, "Already opened");
}
byte[] filePathBytes = stringToUtf8ByteArray(filePath);
if (filePathBytes == null) {
throw LimboExceptionUtils.buildLimboException(LimboErrorCode.LIMBO_ETC.code, "File path cannot be converted to byteArray. File name: " + filePath);
}
dbPointer = openUtf8(filePathBytes, openFlags);
isOpen = true;
}
@Override
public long connect() throws SQLException {
byte[] filePathBytes = stringToUtf8ByteArray(filePath);
if (filePathBytes == null) {
throw LimboExceptionUtils.buildLimboException(LimboErrorCode.LIMBO_ETC.code, "File path cannot be converted to byteArray. File name: " + filePath);
}
return connect0(filePathBytes, dbPointer);
}
private native long connect0(byte[] path, long databasePtr) throws SQLException;
@VisibleForTesting
native void throwJavaException(int errorCode) throws SQLException;
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessageBytes Error message.
*/
@NativeInvocation(invokedFrom = "limbo_db.rs")
private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException {
LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes);
}
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessageBytes Error message.
*/
@NativeInvocation(invokedFrom = "limbo_db.rs")
private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException {
LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes);
}
}

View File

@@ -5,43 +5,45 @@ import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
/**
* Factory class for managing and creating instances of {@link LimboDB}.
* This class ensures that multiple instances of {@link LimboDB} with the same URL are not created.
* Factory class for managing and creating instances of {@link LimboDB}. This class ensures that
* multiple instances of {@link LimboDB} with the same URL are not created.
*/
public class LimboDBFactory {
private static final ConcurrentHashMap<String, LimboDB> databaseHolder = new ConcurrentHashMap<>();
private static final ConcurrentHashMap<String, LimboDB> databaseHolder =
new ConcurrentHashMap<>();
/**
* If a database with the same URL already exists, it returns the existing instance.
* Otherwise, it creates a new instance and stores it in the database holder.
*
* @param url the URL of the database
* @param filePath the path to the database file
* @param properties additional properties for the database connection
* @return an instance of {@link LimboDB}
* @throws SQLException if there is an error opening the connection
* @throws IllegalArgumentException if the fileName is empty
*/
public static LimboDB open(String url, String filePath, Properties properties) throws SQLException {
if (databaseHolder.containsKey(url)) {
return databaseHolder.get(url);
}
if (filePath.isEmpty()) {
throw new IllegalArgumentException("filePath should not be empty");
}
final LimboDB database;
try {
LimboDB.load();
database = LimboDB.create(url, filePath);
} catch (Exception e) {
throw new SQLException("Error opening connection", e);
}
database.open(0);
databaseHolder.put(url, database);
return database;
/**
* If a database with the same URL already exists, it returns the existing instance. Otherwise, it
* creates a new instance and stores it in the database holder.
*
* @param url the URL of the database
* @param filePath the path to the database file
* @param properties additional properties for the database connection
* @return an instance of {@link LimboDB}
* @throws SQLException if there is an error opening the connection
* @throws IllegalArgumentException if the fileName is empty
*/
public static LimboDB open(String url, String filePath, Properties properties)
throws SQLException {
if (databaseHolder.containsKey(url)) {
return databaseHolder.get(url);
}
if (filePath.isEmpty()) {
throw new IllegalArgumentException("filePath should not be empty");
}
final LimboDB database;
try {
LimboDB.load();
database = LimboDB.create(url, filePath);
} catch (Exception e) {
throw new SQLException("Error opening connection", e);
}
database.open(0);
databaseHolder.put(url, database);
return database;
}
}

View File

@@ -1,111 +1,119 @@
package org.github.tursodatabase.core;
import java.sql.SQLException;
import org.github.tursodatabase.annotations.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A table of data representing limbo database result set, which is generated by executing a statement that queries the
* database.
* <p>
* A {@link LimboResultSet} object is automatically closed when the {@link LimboStatement} object that generated it is
* closed or re-executed.
* A table of data representing limbo database result set, which is generated by executing a
* statement that queries the database.
*
* <p>A {@link LimboResultSet} object is automatically closed when the {@link LimboStatement} object
* that generated it is closed or re-executed.
*/
public class LimboResultSet {
private static final Logger log = LoggerFactory.getLogger(LimboResultSet.class);
private static final Logger log = LoggerFactory.getLogger(LimboResultSet.class);
private final LimboStatement statement;
private final LimboStatement statement;
// Whether the result set does not have any rows.
private boolean isEmptyResultSet = false;
// If the result set is open. Doesn't mean it has results.
private boolean open;
// Maximum number of rows as set by the statement
private long maxRows;
// number of current row, starts at 1 (0 is used to represent loading data)
private int row = 0;
private boolean pastLastRow = false;
// Whether the result set does not have any rows.
private boolean isEmptyResultSet = false;
// If the result set is open. Doesn't mean it has results.
private boolean open;
// Maximum number of rows as set by the statement
private long maxRows;
// number of current row, starts at 1 (0 is used to represent loading data)
private int row = 0;
private boolean pastLastRow = false;
@Nullable
private LimboStepResult lastStepResult;
@Nullable private LimboStepResult lastStepResult;
public static LimboResultSet of(LimboStatement statement) {
return new LimboResultSet(statement);
public static LimboResultSet of(LimboStatement statement) {
return new LimboResultSet(statement);
}
private LimboResultSet(LimboStatement statement) {
this.open = true;
this.statement = statement;
}
/**
* Moves the cursor forward one row from its current position. A {@link LimboResultSet} cursor is
* initially positioned before the first fow; the first call to the method <code>next</code> makes
* the first row the current row; the second call makes the second row the current row, and so on.
* When a call to the <code>next</code> method returns <code>false</code>, the cursor is
* positioned after the last row.
*
* <p>Note that limbo only supports <code>ResultSet.TYPE_FORWARD_ONLY</code>, which means that the
* cursor can only move forward.
*/
public boolean next() throws SQLException {
if (!open || isEmptyResultSet || pastLastRow) {
return false; // completed ResultSet
}
private LimboResultSet(LimboStatement statement) {
this.open = true;
this.statement = statement;
if (maxRows != 0 && row == maxRows) {
return false;
}
/**
* Moves the cursor forward one row from its current position. A {@link LimboResultSet} cursor is initially positioned
* before the first fow; the first call to the method <code>next</code> makes the first row the current row; the second call
* makes the second row the current row, and so on.
* When a call to the <code>next</code> method returns <code>false</code>, the cursor is positioned after the last row.
* <p>
* Note that limbo only supports <code>ResultSet.TYPE_FORWARD_ONLY</code>, which means that the cursor can only move forward.
*/
public boolean next() throws SQLException {
if (!open || isEmptyResultSet || pastLastRow) {
return false; // completed ResultSet
}
if (maxRows != 0 && row == maxRows) {
return false;
}
lastStepResult = this.statement.step();
log.debug("lastStepResult: {}", lastStepResult);
if (lastStepResult.isRow()) {
row++;
}
pastLastRow = lastStepResult.isDone();
if (pastLastRow) {
open = false;
}
return !pastLastRow;
lastStepResult = this.statement.step();
log.debug("lastStepResult: {}", lastStepResult);
if (lastStepResult.isRow()) {
row++;
}
/**
* Checks whether the last step result has returned row result.
*/
public boolean hasLastStepReturnedRow() {
return lastStepResult != null && lastStepResult.isRow();
if (lastStepResult.isInInvalidState()) {
open = false;
throw new SQLException("step() returned invalid result: " + lastStepResult);
}
/**
* Checks the status of the result set.
*
* @return true if it's ready to iterate over the result set; false otherwise.
*/
public boolean isOpen() {
return open;
pastLastRow = lastStepResult.isDone();
if (pastLastRow) {
open = false;
}
return !pastLastRow;
}
/**
* @throws SQLException if not {@link #open}
*/
public void checkOpen() throws SQLException {
if (!open) {
throw new SQLException("ResultSet closed");
}
}
/** Checks whether the last step result has returned row result. */
public boolean hasLastStepReturnedRow() {
return lastStepResult != null && lastStepResult.isRow();
}
@Override
public String toString() {
return "LimboResultSet{" +
"statement=" + statement +
", isEmptyResultSet=" + isEmptyResultSet +
", open=" + open +
", maxRows=" + maxRows +
", row=" + row +
", pastLastRow=" + pastLastRow +
", lastResult=" + lastStepResult +
'}';
/**
* Checks the status of the result set.
*
* @return true if it's ready to iterate over the result set; false otherwise.
*/
public boolean isOpen() {
return open;
}
/** @throws SQLException if not {@link #open} */
public void checkOpen() throws SQLException {
if (!open) {
throw new SQLException("ResultSet closed");
}
}
@Override
public String toString() {
return "LimboResultSet{"
+ "statement="
+ statement
+ ", isEmptyResultSet="
+ isEmptyResultSet
+ ", open="
+ open
+ ", maxRows="
+ maxRows
+ ", row="
+ row
+ ", pastLastRow="
+ pastLastRow
+ ", lastResult="
+ lastStepResult
+ '}';
}
}

View File

@@ -1,7 +1,6 @@
package org.github.tursodatabase.core;
import java.sql.SQLException;
import org.github.tursodatabase.annotations.NativeInvocation;
import org.github.tursodatabase.annotations.Nullable;
import org.github.tursodatabase.utils.LimboExceptionUtils;
@@ -9,68 +8,73 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* By default, only one <code>resultSet</code> object per <code>LimboStatement</code> can be open at the same time.
* Therefore, if the reading of one <code>resultSet</code> object is interleaved with the reading of another, each must
* have been generated by different <code>LimboStatement</code> objects. All execution method in the <code>LimboStatement</code>
* implicitly close the current <code>resultSet</code> object of the statement if an open one exists.
* By default, only one <code>resultSet</code> object per <code>LimboStatement</code> can be open at
* the same time. Therefore, if the reading of one <code>resultSet</code> object is interleaved with
* the reading of another, each must have been generated by different <code>LimboStatement</code>
* objects. All execution method in the <code>LimboStatement</code> implicitly close the current
* <code>resultSet</code> object of the statement if an open one exists.
*/
public class LimboStatement {
private static final Logger log = LoggerFactory.getLogger(LimboStatement.class);
private static final Logger log = LoggerFactory.getLogger(LimboStatement.class);
private final String sql;
private final long statementPointer;
private final LimboResultSet resultSet;
private final String sql;
private final long statementPointer;
private final LimboResultSet resultSet;
// TODO: what if the statement we ran was DDL, update queries and etc. Should we still create a resultSet?
public LimboStatement(String sql, long statementPointer) {
this.sql = sql;
this.statementPointer = statementPointer;
this.resultSet = LimboResultSet.of(this);
log.debug("Creating statement with sql: {}", this.sql);
// TODO: what if the statement we ran was DDL, update queries and etc. Should we still create a
// resultSet?
public LimboStatement(String sql, long statementPointer) {
this.sql = sql;
this.statementPointer = statementPointer;
this.resultSet = LimboResultSet.of(this);
log.debug("Creating statement with sql: {}", this.sql);
}
public LimboResultSet getResultSet() {
return resultSet;
}
/**
* Expects a clean statement created right after prepare method is called.
*
* @return true if the ResultSet has at least one row; false otherwise.
*/
public boolean execute() throws SQLException {
resultSet.next();
return resultSet.hasLastStepReturnedRow();
}
LimboStepResult step() throws SQLException {
final LimboStepResult result = step(this.statementPointer);
if (result == null) {
throw new SQLException("step() returned null, which is only returned when an error occurs");
}
public LimboResultSet getResultSet() {
return resultSet;
}
return result;
}
/**
* Expects a clean statement created right after prepare method is called.
*
* @return true if the ResultSet has at least one row; false otherwise.
*/
public boolean execute() throws SQLException {
resultSet.next();
return resultSet.hasLastStepReturnedRow();
}
@Nullable
private native LimboStepResult step(long stmtPointer) throws SQLException;
LimboStepResult step() throws SQLException {
final LimboStepResult result = step(this.statementPointer);
if (result == null) {
throw new SQLException("step() returned null, which is only returned when an error occurs");
}
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessageBytes Error message.
*/
@NativeInvocation(invokedFrom = "limbo_statement.rs")
private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException {
LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes);
}
return result;
}
@Nullable
private native LimboStepResult step(long stmtPointer) throws SQLException;
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessageBytes Error message.
*/
@NativeInvocation(invokedFrom = "limbo_statement.rs")
private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException {
LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes);
}
@Override
public String toString() {
return "LimboStatement{" +
"statementPointer=" + statementPointer +
", sql='" + sql + '\'' +
'}';
}
@Override
public String toString() {
return "LimboStatement{"
+ "statementPointer="
+ statementPointer
+ ", sql='"
+ sql
+ '\''
+ '}';
}
}

View File

@@ -1,70 +1,78 @@
package org.github.tursodatabase.core;
import java.util.Arrays;
import org.github.tursodatabase.annotations.NativeInvocation;
import org.github.tursodatabase.annotations.Nullable;
/**
* Represents the step result of limbo's statement's step function.
*/
/** Represents the step result of limbo's statement's step function. */
public class LimboStepResult {
private static final int STEP_RESULT_ID_ROW = 10;
private static final int STEP_RESULT_ID_IO = 20;
private static final int STEP_RESULT_ID_DONE = 30;
private static final int STEP_RESULT_ID_INTERRUPT = 40;
private static final int STEP_RESULT_ID_BUSY = 50;
private static final int STEP_RESULT_ID_ERROR = 60;
private static final int STEP_RESULT_ID_ROW = 10;
private static final int STEP_RESULT_ID_IO = 20;
private static final int STEP_RESULT_ID_DONE = 30;
private static final int STEP_RESULT_ID_INTERRUPT = 40;
// Indicates that the database file could not be written because of concurrent activity by some
// other connection
private static final int STEP_RESULT_ID_BUSY = 50;
private static final int STEP_RESULT_ID_ERROR = 60;
// Identifier for limbo's StepResult
private final int stepResultId;
@Nullable
private final Object[] result;
// Identifier for limbo's StepResult
private final int stepResultId;
@Nullable private final Object[] result;
@NativeInvocation(invokedFrom = "limbo_statement.rs")
public LimboStepResult(int stepResultId) {
this.stepResultId = stepResultId;
this.result = null;
}
@NativeInvocation(invokedFrom = "limbo_statement.rs")
public LimboStepResult(int stepResultId, Object[] result) {
this.stepResultId = stepResultId;
this.result = result;
}
public boolean isRow() {
return stepResultId == STEP_RESULT_ID_ROW;
}
public boolean isDone() {
return stepResultId == STEP_RESULT_ID_DONE;
}
@Override
public String toString() {
return "LimboStepResult{" +
"stepResultName=" + getStepResultName() +
", result=" + Arrays.toString(result) +
'}';
}
private String getStepResultName() {
switch (stepResultId) {
case STEP_RESULT_ID_ROW:
return "ROW";
case STEP_RESULT_ID_IO:
return "IO";
case STEP_RESULT_ID_DONE:
return "DONE";
case STEP_RESULT_ID_INTERRUPT:
return "INTERRUPT";
case STEP_RESULT_ID_BUSY:
return "BUSY";
case STEP_RESULT_ID_ERROR:
return "ERROR";
default:
return "UNKNOWN";
}
@NativeInvocation(invokedFrom = "limbo_statement.rs")
public LimboStepResult(int stepResultId) {
this.stepResultId = stepResultId;
this.result = null;
}
@NativeInvocation(invokedFrom = "limbo_statement.rs")
public LimboStepResult(int stepResultId, Object[] result) {
this.stepResultId = stepResultId;
this.result = result;
}
public boolean isRow() {
return stepResultId == STEP_RESULT_ID_ROW;
}
public boolean isDone() {
return stepResultId == STEP_RESULT_ID_DONE;
}
public boolean isInInvalidState() {
// current implementation doesn't allow STEP_RESULT_ID_IO to be returned
return stepResultId == STEP_RESULT_ID_IO
|| stepResultId == STEP_RESULT_ID_INTERRUPT
|| stepResultId == STEP_RESULT_ID_BUSY
|| stepResultId == STEP_RESULT_ID_ERROR;
}
@Override
public String toString() {
return "LimboStepResult{"
+ "stepResultName="
+ getStepResultName()
+ ", result="
+ Arrays.toString(result)
+ '}';
}
private String getStepResultName() {
switch (stepResultId) {
case STEP_RESULT_ID_ROW:
return "ROW";
case STEP_RESULT_ID_IO:
return "IO";
case STEP_RESULT_ID_DONE:
return "DONE";
case STEP_RESULT_ID_INTERRUPT:
return "INTERRUPT";
case STEP_RESULT_ID_BUSY:
return "BUSY";
case STEP_RESULT_ID_ERROR:
return "ERROR";
default:
return "UNKNOWN";
}
}
}

View File

@@ -15,93 +15,91 @@
*/
package org.github.tursodatabase.core;
/**
* Sqlite error codes.
*/
/** Sqlite error codes. */
public class SqliteCode {
/** Successful result */
public static final int SQLITE_OK = 0;
/** Successful result */
public static final int SQLITE_OK = 0;
/** SQL error or missing database */
public static final int SQLITE_ERROR = 1;
/** SQL error or missing database */
public static final int SQLITE_ERROR = 1;
/** An internal logic error in SQLite */
public static final int SQLITE_INTERNAL = 2;
/** An internal logic error in SQLite */
public static final int SQLITE_INTERNAL = 2;
/** Access permission denied */
public static final int SQLITE_PERM = 3;
/** Access permission denied */
public static final int SQLITE_PERM = 3;
/** Callback routine requested an abort */
public static final int SQLITE_ABORT = 4;
/** Callback routine requested an abort */
public static final int SQLITE_ABORT = 4;
/** The database file is locked */
public static final int SQLITE_BUSY = 5;
/** The database file is locked */
public static final int SQLITE_BUSY = 5;
/** A table in the database is locked */
public static final int SQLITE_LOCKED = 6;
/** A table in the database is locked */
public static final int SQLITE_LOCKED = 6;
/** A malloc() failed */
public static final int SQLITE_NOMEM = 7;
/** A malloc() failed */
public static final int SQLITE_NOMEM = 7;
/** Attempt to write a readonly database */
public static final int SQLITE_READONLY = 8;
/** Attempt to write a readonly database */
public static final int SQLITE_READONLY = 8;
/** Operation terminated by sqlite_interrupt() */
public static final int SQLITE_INTERRUPT = 9;
/** Operation terminated by sqlite_interrupt() */
public static final int SQLITE_INTERRUPT = 9;
/** Some kind of disk I/O error occurred */
public static final int SQLITE_IOERR = 10;
/** Some kind of disk I/O error occurred */
public static final int SQLITE_IOERR = 10;
/** The database disk image is malformed */
public static final int SQLITE_CORRUPT = 11;
/** The database disk image is malformed */
public static final int SQLITE_CORRUPT = 11;
/** (Internal Only) Table or record not found */
public static final int SQLITE_NOTFOUND = 12;
/** (Internal Only) Table or record not found */
public static final int SQLITE_NOTFOUND = 12;
/** Insertion failed because database is full */
public static final int SQLITE_FULL = 13;
/** Insertion failed because database is full */
public static final int SQLITE_FULL = 13;
/** Unable to open the database file */
public static final int SQLITE_CANTOPEN = 14;
/** Unable to open the database file */
public static final int SQLITE_CANTOPEN = 14;
/** Database lock protocol error */
public static final int SQLITE_PROTOCOL = 15;
/** Database lock protocol error */
public static final int SQLITE_PROTOCOL = 15;
/** (Internal Only) Database table is empty */
public static final int SQLITE_EMPTY = 16;
/** (Internal Only) Database table is empty */
public static final int SQLITE_EMPTY = 16;
/** The database schema changed */
public static final int SQLITE_SCHEMA = 17;
/** The database schema changed */
public static final int SQLITE_SCHEMA = 17;
/** Too much data for one row of a table */
public static final int SQLITE_TOOBIG = 18;
/** Too much data for one row of a table */
public static final int SQLITE_TOOBIG = 18;
/** Abort due to constraint violation */
public static final int SQLITE_CONSTRAINT = 19;
/** Abort due to constraint violation */
public static final int SQLITE_CONSTRAINT = 19;
/** Data type mismatch */
public static final int SQLITE_MISMATCH = 20;
/** Data type mismatch */
public static final int SQLITE_MISMATCH = 20;
/** Library used incorrectly */
public static final int SQLITE_MISUSE = 21;
/** Library used incorrectly */
public static final int SQLITE_MISUSE = 21;
/** Uses OS features not supported on host */
public static final int SQLITE_NOLFS = 22;
/** Uses OS features not supported on host */
public static final int SQLITE_NOLFS = 22;
/** Authorization denied */
public static final int SQLITE_AUTH = 23;
/** Authorization denied */
public static final int SQLITE_AUTH = 23;
/** sqlite_step() has another row ready */
public static final int SQLITE_ROW = 100;
/** sqlite_step() has another row ready */
public static final int SQLITE_ROW = 100;
/** sqlite_step() has finished executing */
public static final int SQLITE_DONE = 101;
/** sqlite_step() has finished executing */
public static final int SQLITE_DONE = 101;
// types returned by sqlite3_column_type()
// types returned by sqlite3_column_type()
public static final int SQLITE_INTEGER = 1;
public static final int SQLITE_FLOAT = 2;
public static final int SQLITE_TEXT = 3;
public static final int SQLITE_BLOB = 4;
public static final int SQLITE_NULL = 5;
public static final int SQLITE_INTEGER = 1;
public static final int SQLITE_FLOAT = 2;
public static final int SQLITE_TEXT = 3;
public static final int SQLITE_BLOB = 4;
public static final int SQLITE_NULL = 5;
}

View File

@@ -1,18 +1,17 @@
package org.github.tursodatabase.exceptions;
import java.sql.SQLException;
import org.github.tursodatabase.LimboErrorCode;
import java.sql.SQLException;
public class LimboException extends SQLException {
private final LimboErrorCode resultCode;
private final LimboErrorCode resultCode;
public LimboException(String message, LimboErrorCode resultCode) {
super(message, null, resultCode.code & 0xff);
this.resultCode = resultCode;
}
public LimboException(String message, LimboErrorCode resultCode) {
super(message, null, resultCode.code & 0xff);
this.resultCode = resultCode;
}
public LimboErrorCode getResultCode() {
return resultCode;
}
public LimboErrorCode getResultCode() {
return resultCode;
}
}

View File

@@ -1,353 +1,357 @@
package org.github.tursodatabase.jdbc4;
import org.github.tursodatabase.core.LimboConnection;
import org.github.tursodatabase.annotations.SkipNullableCheck;
import java.sql.*;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executor;
import org.github.tursodatabase.annotations.SkipNullableCheck;
import org.github.tursodatabase.core.LimboConnection;
public class JDBC4Connection extends LimboConnection {
public JDBC4Connection(String url, String filePath) throws SQLException {
super(url, filePath);
}
public JDBC4Connection(String url, String filePath) throws SQLException {
super(url, filePath);
}
public JDBC4Connection(String url, String filePath, Properties properties) throws SQLException {
super(url, filePath, properties);
}
public JDBC4Connection(String url, String filePath, Properties properties) throws SQLException {
super(url, filePath, properties);
}
@Override
public Statement createStatement() throws SQLException {
return createStatement(
ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY,
ResultSet.CLOSE_CURSORS_AT_COMMIT
);
}
@Override
public Statement createStatement() throws SQLException {
return createStatement(
ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT);
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
return createStatement(resultSetType, resultSetConcurrency, ResultSet.CLOSE_CURSORS_AT_COMMIT);
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency)
throws SQLException {
return createStatement(resultSetType, resultSetConcurrency, ResultSet.CLOSE_CURSORS_AT_COMMIT);
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
checkOpen();
checkCursor(resultSetType, resultSetConcurrency, resultSetHoldability);
@Override
public Statement createStatement(
int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
checkOpen();
checkCursor(resultSetType, resultSetConcurrency, resultSetHoldability);
return new JDBC4Statement(this);
}
return new JDBC4Statement(this);
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(String sql) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(String sql) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public CallableStatement prepareCall(String sql) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public CallableStatement prepareCall(String sql) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public String nativeSQL(String sql) throws SQLException {
// TODO
return "";
}
@Override
@SkipNullableCheck
public String nativeSQL(String sql) throws SQLException {
// TODO
return "";
}
@Override
public void setAutoCommit(boolean autoCommit) throws SQLException {
// TODO
}
@Override
public void setAutoCommit(boolean autoCommit) throws SQLException {
// TODO
}
@Override
public boolean getAutoCommit() throws SQLException {
// TODO
return false;
}
@Override
public boolean getAutoCommit() throws SQLException {
// TODO
return false;
}
@Override
public void commit() throws SQLException {
// TODO
}
@Override
public void commit() throws SQLException {
// TODO
}
@Override
public void rollback() throws SQLException {
// TODO
}
@Override
public void rollback() throws SQLException {
// TODO
}
@Override
public void close() throws SQLException {
// TODO
}
@Override
public void close() throws SQLException {
// TODO
}
@Override
public boolean isClosed() throws SQLException {
// TODO
return false;
}
@Override
public boolean isClosed() throws SQLException {
// TODO
return false;
}
@Override
@SkipNullableCheck
public DatabaseMetaData getMetaData() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public DatabaseMetaData getMetaData() throws SQLException {
// TODO
return null;
}
@Override
public void setReadOnly(boolean readOnly) throws SQLException {
// TODO
}
@Override
public void setReadOnly(boolean readOnly) throws SQLException {
// TODO
}
@Override
public boolean isReadOnly() throws SQLException {
// TODO
return false;
}
@Override
public boolean isReadOnly() throws SQLException {
// TODO
return false;
}
@Override
public void setCatalog(String catalog) throws SQLException {
// TODO
}
@Override
public void setCatalog(String catalog) throws SQLException {
// TODO
}
@Override
public String getCatalog() throws SQLException {
// TODO
return "";
}
@Override
public String getCatalog() throws SQLException {
// TODO
return "";
}
@Override
public void setTransactionIsolation(int level) throws SQLException {
// TODO
}
@Override
public void setTransactionIsolation(int level) throws SQLException {
// TODO
}
@Override
public int getTransactionIsolation() throws SQLException {
// TODO
return 0;
}
@Override
public int getTransactionIsolation() throws SQLException {
// TODO
return 0;
}
@Override
@SkipNullableCheck
public SQLWarning getWarnings() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public SQLWarning getWarnings() throws SQLException {
// TODO
return null;
}
@Override
public void clearWarnings() throws SQLException {
// TODO
}
@Override
public void clearWarnings() throws SQLException {
// TODO
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency)
throws SQLException {
// TODO
return null;
}
@Override
public Map<String, Class<?>> getTypeMap() throws SQLException {
// TODO
return new HashMap<>();
}
@Override
public Map<String, Class<?>> getTypeMap() throws SQLException {
// TODO
return new HashMap<>();
}
@Override
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
// TODO
}
@Override
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
// TODO
}
@Override
public void setHoldability(int holdability) throws SQLException {
// TODO
}
@Override
public void setHoldability(int holdability) throws SQLException {
// TODO
}
@Override
public int getHoldability() throws SQLException {
return 0;
}
@Override
public int getHoldability() throws SQLException {
return 0;
}
@Override
@SkipNullableCheck
public Savepoint setSavepoint() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Savepoint setSavepoint() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Savepoint setSavepoint(String name) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Savepoint setSavepoint(String name) throws SQLException {
// TODO
return null;
}
@Override
public void rollback(Savepoint savepoint) throws SQLException {
// TODO
}
@Override
public void rollback(Savepoint savepoint) throws SQLException {
// TODO
}
@Override
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
// TODO
}
@Override
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
// TODO
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(
String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability)
throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public CallableStatement prepareCall(
String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability)
throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Clob createClob() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Clob createClob() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Blob createBlob() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Blob createBlob() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public NClob createNClob() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public NClob createNClob() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public SQLXML createSQLXML() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public SQLXML createSQLXML() throws SQLException {
// TODO
return null;
}
@Override
public boolean isValid(int timeout) throws SQLException {
// TODO
return false;
}
@Override
public boolean isValid(int timeout) throws SQLException {
// TODO
return false;
}
@Override
public void setClientInfo(String name, String value) throws SQLClientInfoException {
// TODO
}
@Override
public void setClientInfo(String name, String value) throws SQLClientInfoException {
// TODO
}
@Override
public void setClientInfo(Properties properties) throws SQLClientInfoException {
// TODO
}
@Override
public void setClientInfo(Properties properties) throws SQLClientInfoException {
// TODO
}
@Override
public String getClientInfo(String name) throws SQLException {
// TODO
return "";
}
@Override
public String getClientInfo(String name) throws SQLException {
// TODO
return "";
}
@Override
@SkipNullableCheck
public Properties getClientInfo() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Properties getClientInfo() throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
// TODO
return null;
}
@Override
@SkipNullableCheck
public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
// TODO
return null;
}
@Override
public void setSchema(String schema) throws SQLException {
// TODO
}
@Override
public void setSchema(String schema) throws SQLException {
// TODO
}
@Override
@SkipNullableCheck
public String getSchema() throws SQLException {
// TODO
return "";
}
@Override
@SkipNullableCheck
public String getSchema() throws SQLException {
// TODO
return "";
}
@Override
public void abort(Executor executor) throws SQLException {
// TODO
}
@Override
public void abort(Executor executor) throws SQLException {
// TODO
}
@Override
public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
// TODO
}
@Override
public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
// TODO
}
@Override
public int getNetworkTimeout() throws SQLException {
// TODO
return 0;
}
@Override
public int getNetworkTimeout() throws SQLException {
// TODO
return 0;
}
@Override
@SkipNullableCheck
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
@SkipNullableCheck
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
// TODO
return false;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
// TODO
return false;
}
}

View File

@@ -8,7 +8,6 @@ import java.sql.SQLException;
import java.sql.SQLWarning;
import java.sql.Statement;
import java.util.concurrent.locks.ReentrantLock;
import org.github.tursodatabase.annotations.Nullable;
import org.github.tursodatabase.annotations.SkipNullableCheck;
import org.github.tursodatabase.core.LimboConnection;
@@ -17,359 +16,366 @@ import org.github.tursodatabase.core.LimboStatement;
public class JDBC4Statement implements Statement {
private final LimboConnection connection;
@Nullable
private LimboStatement statement = null;
private final LimboConnection connection;
@Nullable private LimboStatement statement = null;
private boolean closed;
private boolean closeOnCompletion;
private boolean closed;
private boolean closeOnCompletion;
private final int resultSetType;
private final int resultSetConcurrency;
private final int resultSetHoldability;
private final int resultSetType;
private final int resultSetConcurrency;
private final int resultSetHoldability;
private int queryTimeoutSeconds;
private long updateCount;
private boolean exhaustedResults = false;
private int queryTimeoutSeconds;
private long updateCount;
private boolean exhaustedResults = false;
private ReentrantLock connectionLock = new ReentrantLock();
private ReentrantLock connectionLock = new ReentrantLock();
public JDBC4Statement(LimboConnection connection) {
this(connection, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY,
ResultSet.CLOSE_CURSORS_AT_COMMIT);
public JDBC4Statement(LimboConnection connection) {
this(
connection,
ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY,
ResultSet.CLOSE_CURSORS_AT_COMMIT);
}
public JDBC4Statement(
LimboConnection connection,
int resultSetType,
int resultSetConcurrency,
int resultSetHoldability) {
this.connection = connection;
this.resultSetType = resultSetType;
this.resultSetConcurrency = resultSetConcurrency;
this.resultSetHoldability = resultSetHoldability;
}
@Override
public ResultSet executeQuery(String sql) throws SQLException {
execute(sql);
requireNonNull(statement, "statement should not be null after running execute method");
return new JDBC4ResultSet(statement.getResultSet());
}
@Override
public int executeUpdate(String sql) throws SQLException {
execute(sql);
requireNonNull(statement, "statement should not be null after running execute method");
final LimboResultSet resultSet = statement.getResultSet();
while (resultSet.isOpen()) {
resultSet.next();
}
public JDBC4Statement(LimboConnection connection, int resultSetType, int resultSetConcurrency,
int resultSetHoldability) {
this.connection = connection;
this.resultSetType = resultSetType;
this.resultSetConcurrency = resultSetConcurrency;
this.resultSetHoldability = resultSetHoldability;
// TODO: return update count;
return 0;
}
@Override
public void close() throws SQLException {
clearGeneratedKeys();
internalClose();
closed = true;
}
@Override
public int getMaxFieldSize() throws SQLException {
// TODO
return 0;
}
@Override
public void setMaxFieldSize(int max) throws SQLException {
// TODO
}
@Override
public int getMaxRows() throws SQLException {
// TODO
return 0;
}
@Override
public void setMaxRows(int max) throws SQLException {
// TODO
}
@Override
public void setEscapeProcessing(boolean enable) throws SQLException {
// TODO
}
@Override
public int getQueryTimeout() throws SQLException {
// TODO
return 0;
}
@Override
public void setQueryTimeout(int seconds) throws SQLException {
if (seconds < 0) {
throw new SQLException("Query timeout must be greater than 0");
}
this.queryTimeoutSeconds = seconds;
}
@Override
public void cancel() throws SQLException {
// TODO
}
@Override
@SkipNullableCheck
public SQLWarning getWarnings() throws SQLException {
// TODO
return null;
}
@Override
public void clearWarnings() throws SQLException {
// TODO
}
@Override
public void setCursorName(String name) throws SQLException {
// TODO
}
/**
* The <code>execute</code> method executes an SQL statement and indicates the form of the first
* result. You must then use the methods <code>getResultSet</code> or <code>getUpdateCount</code>
* to retrieve the result, and <code>getMoreResults</code> to move to any subsequent result(s).
*/
@Override
public boolean execute(String sql) throws SQLException {
internalClose();
return this.withConnectionTimeout(
() -> {
try {
// TODO: if sql is a readOnly query, do we still need the locks?
connectionLock.lock();
statement = connection.prepare(sql);
final boolean result = statement.execute();
updateGeneratedKeys();
exhaustedResults = false;
return result;
} finally {
connectionLock.unlock();
}
});
}
@Override
public ResultSet getResultSet() throws SQLException {
requireNonNull(statement, "statement is null");
return new JDBC4ResultSet(statement.getResultSet());
}
@Override
public int getUpdateCount() throws SQLException {
// TODO
return 0;
}
@Override
public boolean getMoreResults() throws SQLException {
// TODO
return false;
}
@Override
public void setFetchDirection(int direction) throws SQLException {
// TODO
}
@Override
public int getFetchDirection() throws SQLException {
// TODO
return 0;
}
@Override
public void setFetchSize(int rows) throws SQLException {
// TODO
}
@Override
public int getFetchSize() throws SQLException {
// TODO
return 0;
}
@Override
public int getResultSetConcurrency() {
return resultSetConcurrency;
}
@Override
public int getResultSetType() {
return resultSetType;
}
@Override
public void addBatch(String sql) throws SQLException {
// TODO
}
@Override
public void clearBatch() throws SQLException {
// TODO
}
@Override
public int[] executeBatch() throws SQLException {
// TODO
return new int[0];
}
@Override
@SkipNullableCheck
public Connection getConnection() throws SQLException {
// TODO
return null;
}
@Override
public boolean getMoreResults(int current) throws SQLException {
// TODO
return false;
}
@Override
@SkipNullableCheck
public ResultSet getGeneratedKeys() throws SQLException {
// TODO
return null;
}
@Override
public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
// TODO
return 0;
}
@Override
public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
// TODO
return 0;
}
@Override
public int executeUpdate(String sql, String[] columnNames) throws SQLException {
// TODO
return 0;
}
@Override
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
// TODO
return false;
}
@Override
public boolean execute(String sql, int[] columnIndexes) throws SQLException {
// TODO
return false;
}
@Override
public boolean execute(String sql, String[] columnNames) throws SQLException {
// TODO
return false;
}
@Override
public int getResultSetHoldability() {
return resultSetHoldability;
}
@Override
public boolean isClosed() throws SQLException {
// TODO
return false;
}
@Override
public void setPoolable(boolean poolable) throws SQLException {
// TODO
}
@Override
public boolean isPoolable() throws SQLException {
// TODO
return false;
}
@Override
public void closeOnCompletion() throws SQLException {
if (closed) {
throw new SQLException("statement is closed");
}
closeOnCompletion = true;
}
/**
* Indicates whether the statement should be closed automatically when all its dependent result
* sets are closed.
*/
@Override
public boolean isCloseOnCompletion() throws SQLException {
if (closed) {
throw new SQLException("statement is closed");
}
return closeOnCompletion;
}
@Override
@SkipNullableCheck
public <T> T unwrap(Class<T> iface) throws SQLException {
// TODO
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
// TODO
return false;
}
protected void internalClose() throws SQLException {
// TODO
}
protected void clearGeneratedKeys() throws SQLException {
// TODO
}
protected void updateGeneratedKeys() throws SQLException {
// TODO
}
private <T> T withConnectionTimeout(SQLCallable<T> callable) throws SQLException {
final int originalBusyTimeoutMillis = connection.getBusyTimeout();
if (queryTimeoutSeconds > 0) {
// TODO: set busy timeout
connection.setBusyTimeout(1000 * queryTimeoutSeconds);
}
@Override
public ResultSet executeQuery(String sql) throws SQLException {
execute(sql);
requireNonNull(statement, "statement should not be null after running execute method");
return new JDBC4ResultSet(statement.getResultSet());
try {
return callable.call();
} finally {
if (queryTimeoutSeconds > 0) {
connection.setBusyTimeout(originalBusyTimeoutMillis);
}
}
}
@Override
public int executeUpdate(String sql) throws SQLException {
execute(sql);
requireNonNull(statement, "statement should not be null after running execute method");
final LimboResultSet resultSet = statement.getResultSet();
while (resultSet.isOpen()) {
resultSet.next();
}
// TODO: return update count;
return 0;
}
@Override
public void close() throws SQLException {
clearGeneratedKeys();
internalClose();
closed = true;
}
@Override
public int getMaxFieldSize() throws SQLException {
// TODO
return 0;
}
@Override
public void setMaxFieldSize(int max) throws SQLException {
// TODO
}
@Override
public int getMaxRows() throws SQLException {
// TODO
return 0;
}
@Override
public void setMaxRows(int max) throws SQLException {
// TODO
}
@Override
public void setEscapeProcessing(boolean enable) throws SQLException {
// TODO
}
@Override
public int getQueryTimeout() throws SQLException {
// TODO
return 0;
}
@Override
public void setQueryTimeout(int seconds) throws SQLException {
if (seconds < 0) {
throw new SQLException("Query timeout must be greater than 0");
}
this.queryTimeoutSeconds = seconds;
}
@Override
public void cancel() throws SQLException {
// TODO
}
@Override
@SkipNullableCheck
public SQLWarning getWarnings() throws SQLException {
// TODO
return null;
}
@Override
public void clearWarnings() throws SQLException {
// TODO
}
@Override
public void setCursorName(String name) throws SQLException {
// TODO
}
/**
* The <code>execute</code> method executes an SQL statement and indicates the
* form of the first result. You must then use the methods
* <code>getResultSet</code> or <code>getUpdateCount</code>
* to retrieve the result, and <code>getMoreResults</code> to
* move to any subsequent result(s).
*/
@Override
public boolean execute(String sql) throws SQLException {
internalClose();
return this.withConnectionTimeout(
() -> {
try {
// TODO: if sql is a readOnly query, do we still need the locks?
connectionLock.lock();
statement = connection.prepare(sql);
final boolean result = statement.execute();
updateGeneratedKeys();
exhaustedResults = false;
return result;
} finally {
connectionLock.unlock();
}
}
);
}
@Override
public ResultSet getResultSet() throws SQLException {
requireNonNull(statement, "statement is null");
return new JDBC4ResultSet(statement.getResultSet());
}
@Override
public int getUpdateCount() throws SQLException {
// TODO
return 0;
}
@Override
public boolean getMoreResults() throws SQLException {
// TODO
return false;
}
@Override
public void setFetchDirection(int direction) throws SQLException {
// TODO
}
@Override
public int getFetchDirection() throws SQLException {
// TODO
return 0;
}
@Override
public void setFetchSize(int rows) throws SQLException {
// TODO
}
@Override
public int getFetchSize() throws SQLException {
// TODO
return 0;
}
@Override
public int getResultSetConcurrency() {
return resultSetConcurrency;
}
@Override
public int getResultSetType() {
return resultSetType;
}
@Override
public void addBatch(String sql) throws SQLException {
// TODO
}
@Override
public void clearBatch() throws SQLException {
// TODO
}
@Override
public int[] executeBatch() throws SQLException {
// TODO
return new int[0];
}
@Override
@SkipNullableCheck
public Connection getConnection() throws SQLException {
// TODO
return null;
}
@Override
public boolean getMoreResults(int current) throws SQLException {
// TODO
return false;
}
@Override
@SkipNullableCheck
public ResultSet getGeneratedKeys() throws SQLException {
// TODO
return null;
}
@Override
public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
// TODO
return 0;
}
@Override
public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
// TODO
return 0;
}
@Override
public int executeUpdate(String sql, String[] columnNames) throws SQLException {
// TODO
return 0;
}
@Override
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
// TODO
return false;
}
@Override
public boolean execute(String sql, int[] columnIndexes) throws SQLException {
// TODO
return false;
}
@Override
public boolean execute(String sql, String[] columnNames) throws SQLException {
// TODO
return false;
}
@Override
public int getResultSetHoldability() {
return resultSetHoldability;
}
@Override
public boolean isClosed() throws SQLException {
// TODO
return false;
}
@Override
public void setPoolable(boolean poolable) throws SQLException {
// TODO
}
@Override
public boolean isPoolable() throws SQLException {
// TODO
return false;
}
@Override
public void closeOnCompletion() throws SQLException {
if (closed) {throw new SQLException("statement is closed");}
closeOnCompletion = true;
}
/**
* Indicates whether the statement should be closed automatically when all its dependent result sets are closed.
*/
@Override
public boolean isCloseOnCompletion() throws SQLException {
if (closed) {throw new SQLException("statement is closed");}
return closeOnCompletion;
}
@Override
@SkipNullableCheck
public <T> T unwrap(Class<T> iface) throws SQLException {
// TODO
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
// TODO
return false;
}
protected void internalClose() throws SQLException {
// TODO
}
protected void clearGeneratedKeys() throws SQLException {
// TODO
}
protected void updateGeneratedKeys() throws SQLException {
// TODO
}
private <T> T withConnectionTimeout(SQLCallable<T> callable) throws SQLException {
final int originalBusyTimeoutMillis = connection.getBusyTimeout();
if (queryTimeoutSeconds > 0) {
// TODO: set busy timeout
connection.setBusyTimeout(1000 * queryTimeoutSeconds);
}
try {
return callable.call();
} finally {
if (queryTimeoutSeconds > 0) {
connection.setBusyTimeout(originalBusyTimeoutMillis);
}
}
}
@FunctionalInterface
protected interface SQLCallable<T> {
T call() throws SQLException;
}
@FunctionalInterface
protected interface SQLCallable<T> {
T call() throws SQLException;
}
}

View File

@@ -1,24 +1,23 @@
package org.github.tursodatabase.utils;
import java.nio.charset.StandardCharsets;
import org.github.tursodatabase.annotations.Nullable;
import java.nio.charset.StandardCharsets;
public class ByteArrayUtils {
@Nullable
public static String utf8ByteBufferToString(@Nullable byte[] buffer) {
if (buffer == null) {
return null;
}
return new String(buffer, StandardCharsets.UTF_8);
@Nullable
public static String utf8ByteBufferToString(@Nullable byte[] buffer) {
if (buffer == null) {
return null;
}
@Nullable
public static byte[] stringToUtf8ByteArray(@Nullable String str) {
if (str == null) {
return null;
}
return str.getBytes(StandardCharsets.UTF_8);
return new String(buffer, StandardCharsets.UTF_8);
}
@Nullable
public static byte[] stringToUtf8ByteArray(@Nullable String str) {
if (str == null) {
return null;
}
return str.getBytes(StandardCharsets.UTF_8);
}
}

View File

@@ -3,39 +3,39 @@ package org.github.tursodatabase.utils;
import static org.github.tursodatabase.utils.ByteArrayUtils.utf8ByteBufferToString;
import java.sql.SQLException;
import org.github.tursodatabase.LimboErrorCode;
import org.github.tursodatabase.annotations.Nullable;
import org.github.tursodatabase.exceptions.LimboException;
public class LimboExceptionUtils {
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessageBytes Error message.
*/
public static void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException {
String errorMessage = utf8ByteBufferToString(errorMessageBytes);
throw buildLimboException(errorCode, errorMessage);
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessageBytes Error message.
*/
public static void throwLimboException(int errorCode, byte[] errorMessageBytes)
throws SQLException {
String errorMessage = utf8ByteBufferToString(errorMessageBytes);
throw buildLimboException(errorCode, errorMessage);
}
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessage Error message.
*/
public static LimboException buildLimboException(int errorCode, @Nullable String errorMessage)
throws SQLException {
LimboErrorCode code = LimboErrorCode.getErrorCode(errorCode);
String msg;
if (code == LimboErrorCode.UNKNOWN_ERROR) {
msg = String.format("%s:%s (%s)", code, errorCode, errorMessage);
} else {
msg = String.format("%s (%s)", code, errorMessage);
}
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessage Error message.
*/
public static LimboException buildLimboException(int errorCode, @Nullable String errorMessage)
throws SQLException {
LimboErrorCode code = LimboErrorCode.getErrorCode(errorCode);
String msg;
if (code == LimboErrorCode.UNKNOWN_ERROR) {
msg = String.format("%s:%s (%s)", code, errorCode, errorMessage);
} else {
msg = String.format("%s (%s)", code, errorMessage);
}
return new LimboException(msg, code);
}
return new LimboException(msg, code);
}
}

View File

@@ -1,37 +1,36 @@
package org.github.tursodatabase;
import org.github.tursodatabase.jdbc4.JDBC4Connection;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
import org.github.tursodatabase.jdbc4.JDBC4Connection;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class IntegrationTest {
private JDBC4Connection connection;
private JDBC4Connection connection;
@BeforeEach
void setUp() throws Exception {
String filePath = TestUtils.createTempFile();
String url = "jdbc:sqlite:" + filePath;
connection = new JDBC4Connection(url, filePath, new Properties());
}
@BeforeEach
void setUp() throws Exception {
String filePath = TestUtils.createTempFile();
String url = "jdbc:sqlite:" + filePath;
connection = new JDBC4Connection(url, filePath, new Properties());
}
@Test
void create_table_multi_inserts_select() throws Exception {
Statement stmt = createDefaultStatement();
stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
stmt.execute("INSERT INTO users VALUES (1, 'seonwoo');");
stmt.execute("INSERT INTO users VALUES (2, 'seonwoo');");
stmt.execute("INSERT INTO users VALUES (3, 'seonwoo');");
stmt.execute("SELECT * FROM users");
}
@Test
void create_table_multi_inserts_select() throws Exception {
Statement stmt = createDefaultStatement();
stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
stmt.execute("INSERT INTO users VALUES (1, 'seonwoo');");
stmt.execute("INSERT INTO users VALUES (2, 'seonwoo');");
stmt.execute("INSERT INTO users VALUES (3, 'seonwoo');");
stmt.execute("SELECT * FROM users");
}
private Statement createDefaultStatement() throws SQLException {
return connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT);
}
private Statement createDefaultStatement() throws SQLException {
return connection.createStatement(
ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT);
}
}

View File

@@ -1,34 +1,33 @@
package org.github.tursodatabase;
import org.github.tursodatabase.core.LimboConnection;
import org.junit.jupiter.api.Test;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Properties;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
import org.github.tursodatabase.core.LimboConnection;
import org.junit.jupiter.api.Test;
class JDBCTest {
@Test
void null_is_returned_when_invalid_url_is_passed() throws Exception {
LimboConnection connection = JDBC.createConnection("jdbc:invalid:xxx", new Properties());
assertThat(connection).isNull();
}
@Test
void null_is_returned_when_invalid_url_is_passed() throws Exception {
LimboConnection connection = JDBC.createConnection("jdbc:invalid:xxx", new Properties());
assertThat(connection).isNull();
}
@Test
void non_null_connection_is_returned_when_valid_url_is_passed() throws Exception {
String fileUrl = TestUtils.createTempFile();
LimboConnection connection = JDBC.createConnection("jdbc:sqlite:" + fileUrl, new Properties());
assertThat(connection).isNotNull();
}
@Test
void non_null_connection_is_returned_when_valid_url_is_passed() throws Exception {
String fileUrl = TestUtils.createTempFile();
LimboConnection connection = JDBC.createConnection("jdbc:sqlite:" + fileUrl, new Properties());
assertThat(connection).isNotNull();
}
@Test
void connection_can_be_retrieved_from_DriverManager() throws SQLException {
try (Connection connection = DriverManager.getConnection("jdbc:sqlite:sample.db")) {
assertThat(connection).isNotNull();
}
@Test
void connection_can_be_retrieved_from_DriverManager() throws SQLException {
try (Connection connection = DriverManager.getConnection("jdbc:sqlite:sample.db")) {
assertThat(connection).isNotNull();
}
}
}

View File

@@ -4,10 +4,8 @@ import java.io.IOException;
import java.nio.file.Files;
public class TestUtils {
/**
* Create temporary file and returns the path.
*/
public static String createTempFile() throws IOException {
return Files.createTempFile("limbo_test_db", null).toAbsolutePath().toString();
}
/** Create temporary file and returns the path. */
public static String createTempFile() throws IOException {
return Files.createTempFile("limbo_test_db", null).toAbsolutePath().toString();
}
}

View File

@@ -1,32 +1,31 @@
package org.github.tursodatabase.core;
import org.github.tursodatabase.TestUtils;
import org.junit.jupiter.api.Test;
import java.util.Properties;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import java.util.Properties;
import org.github.tursodatabase.TestUtils;
import org.junit.jupiter.api.Test;
class LimboDBFactoryTest {
@Test
void single_database_should_be_created_when_urls_are_same() throws Exception {
String filePath = TestUtils.createTempFile();
String url = "jdbc:sqlite:" + filePath;
LimboDB db1 = LimboDBFactory.open(url, filePath, new Properties());
LimboDB db2 = LimboDBFactory.open(url, filePath, new Properties());
assertEquals(db1, db2);
}
@Test
void single_database_should_be_created_when_urls_are_same() throws Exception {
String filePath = TestUtils.createTempFile();
String url = "jdbc:sqlite:" + filePath;
LimboDB db1 = LimboDBFactory.open(url, filePath, new Properties());
LimboDB db2 = LimboDBFactory.open(url, filePath, new Properties());
assertEquals(db1, db2);
}
@Test
void multiple_databases_should_be_created_when_urls_differ() throws Exception {
String filePath1 = TestUtils.createTempFile();
String filePath2 = TestUtils.createTempFile();
String url1 = "jdbc:sqlite:" + filePath1;
String url2 = "jdbc:sqlite:" + filePath2;
LimboDB db1 = LimboDBFactory.open(url1, filePath1, new Properties());
LimboDB db2 = LimboDBFactory.open(url2, filePath2, new Properties());
assertNotEquals(db1, db2);
}
@Test
void multiple_databases_should_be_created_when_urls_differ() throws Exception {
String filePath1 = TestUtils.createTempFile();
String filePath2 = TestUtils.createTempFile();
String url1 = "jdbc:sqlite:" + filePath1;
String url2 = "jdbc:sqlite:" + filePath2;
LimboDB db1 = LimboDBFactory.open(url1, filePath1, new Properties());
LimboDB db2 = LimboDBFactory.open(url2, filePath2, new Properties());
assertNotEquals(db1, db2);
}
}

View File

@@ -1,48 +1,47 @@
package org.github.tursodatabase.core;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import java.sql.SQLException;
import org.github.tursodatabase.LimboErrorCode;
import org.github.tursodatabase.TestUtils;
import org.github.tursodatabase.exceptions.LimboException;
import org.junit.jupiter.api.Test;
import java.sql.SQLException;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
public class LimboDBTest {
@Test
void db_should_open_normally() throws Exception {
String dbPath = TestUtils.createTempFile();
LimboDB.load();
LimboDB db = LimboDB.create("jdbc:sqlite" + dbPath, dbPath);
db.open(0);
}
@Test
void should_throw_exception_when_opened_twice() throws Exception {
String dbPath = TestUtils.createTempFile();
LimboDB.load();
LimboDB db = LimboDB.create("jdbc:sqlite:" + dbPath, dbPath);
db.open(0);
assertThatThrownBy(() -> db.open(0)).isInstanceOf(SQLException.class);
}
@Test
void throwJavaException_should_throw_appropriate_java_exception() throws Exception {
String dbPath = TestUtils.createTempFile();
LimboDB.load();
LimboDB db = LimboDB.create("jdbc:sqlite:" + dbPath, dbPath);
final int limboExceptionCode = LimboErrorCode.LIMBO_ETC.code;
try {
db.throwJavaException(limboExceptionCode);
} catch (Exception e) {
assertThat(e).isInstanceOf(LimboException.class);
LimboException limboException = (LimboException) e;
assertThat(limboException.getResultCode().code).isEqualTo(limboExceptionCode);
}
@Test
void db_should_open_normally() throws Exception {
String dbPath = TestUtils.createTempFile();
LimboDB.load();
LimboDB db = LimboDB.create("jdbc:sqlite" + dbPath, dbPath);
db.open(0);
}
@Test
void should_throw_exception_when_opened_twice() throws Exception {
String dbPath = TestUtils.createTempFile();
LimboDB.load();
LimboDB db = LimboDB.create("jdbc:sqlite:" + dbPath, dbPath);
db.open(0);
assertThatThrownBy(() -> db.open(0)).isInstanceOf(SQLException.class);
}
@Test
void throwJavaException_should_throw_appropriate_java_exception() throws Exception {
String dbPath = TestUtils.createTempFile();
LimboDB.load();
LimboDB db = LimboDB.create("jdbc:sqlite:" + dbPath, dbPath);
final int limboExceptionCode = LimboErrorCode.LIMBO_ETC.code;
try {
db.throwJavaException(limboExceptionCode);
} catch (Exception e) {
assertThat(e).isInstanceOf(LimboException.class);
LimboException limboException = (LimboException) e;
assertThat(limboException.getResultCode().code).isEqualTo(limboExceptionCode);
}
}
}

View File

@@ -1,63 +1,68 @@
package org.github.tursodatabase.jdbc4;
import org.github.tursodatabase.TestUtils;
import org.github.tursodatabase.core.LimboConnection;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
import static org.junit.jupiter.api.Assertions.*;
import org.github.tursodatabase.TestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
class JDBC4ConnectionTest {
private JDBC4Connection connection;
private JDBC4Connection connection;
@BeforeEach
void setUp() throws Exception {
String filePath = TestUtils.createTempFile();
String url = "jdbc:sqlite:" + filePath;
connection = new JDBC4Connection(url, filePath, new Properties());
}
@BeforeEach
void setUp() throws Exception {
String filePath = TestUtils.createTempFile();
String url = "jdbc:sqlite:" + filePath;
connection = new JDBC4Connection(url, filePath, new Properties());
}
@Test
void test_create_statement_valid() throws SQLException {
Statement stmt = connection.createStatement();
assertNotNull(stmt);
assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType());
assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency());
assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, stmt.getResultSetHoldability());
}
@Test
void test_create_statement_valid() throws SQLException {
Statement stmt = connection.createStatement();
assertNotNull(stmt);
assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType());
assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency());
assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, stmt.getResultSetHoldability());
}
@Test
void test_create_statement_with_type_and_concurrency_valid() throws SQLException {
Statement stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
assertNotNull(stmt);
assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType());
assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency());
}
@Test
void test_create_statement_with_type_and_concurrency_valid() throws SQLException {
Statement stmt =
connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
assertNotNull(stmt);
assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType());
assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency());
}
@Test
void test_create_statement_with_all_params_valid() throws SQLException {
Statement stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT);
assertNotNull(stmt);
assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType());
assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency());
assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, stmt.getResultSetHoldability());
}
@Test
void test_create_statement_with_all_params_valid() throws SQLException {
Statement stmt =
connection.createStatement(
ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY,
ResultSet.CLOSE_CURSORS_AT_COMMIT);
assertNotNull(stmt);
assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType());
assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency());
assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, stmt.getResultSetHoldability());
}
@Test
void test_create_statement_invalid() {
assertThrows(SQLException.class, () -> {
connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, -1);
@Test
void test_create_statement_invalid() {
assertThrows(
SQLException.class,
() -> {
connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, -1);
});
}
}
@Test
void prepare_simple_create_table() throws Exception {
connection.prepare("CREATE TABLE users (id INT PRIMARY KEY, username TEXT)");
}
@Test
void prepare_simple_create_table() throws Exception {
connection.prepare("CREATE TABLE users (id INT PRIMARY KEY, username TEXT)");
}
}

View File

@@ -6,56 +6,55 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
import java.sql.ResultSet;
import java.sql.Statement;
import java.util.Properties;
import org.github.tursodatabase.TestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
class JDBC4ResultSetTest {
private Statement stmt;
private Statement stmt;
@BeforeEach
void setUp() throws Exception {
String filePath = TestUtils.createTempFile();
String url = "jdbc:sqlite:" + filePath;
final JDBC4Connection connection = new JDBC4Connection(url, filePath, new Properties());
stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY,
ResultSet.CLOSE_CURSORS_AT_COMMIT);
@BeforeEach
void setUp() throws Exception {
String filePath = TestUtils.createTempFile();
String url = "jdbc:sqlite:" + filePath;
final JDBC4Connection connection = new JDBC4Connection(url, filePath, new Properties());
stmt =
connection.createStatement(
ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY,
ResultSet.CLOSE_CURSORS_AT_COMMIT);
}
@Test
void invoking_next_before_the_last_row_should_return_true() throws Exception {
stmt.executeUpdate("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
stmt.executeUpdate("INSERT INTO users VALUES (1, 'sinwoo');");
stmt.executeUpdate("INSERT INTO users VALUES (2, 'seonwoo');");
// first call to next occur internally
stmt.executeQuery("SELECT * FROM users");
ResultSet resultSet = stmt.getResultSet();
assertTrue(resultSet.next());
}
@Test
void invoking_next_after_the_last_row_should_return_false() throws Exception {
stmt.executeUpdate("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
stmt.executeUpdate("INSERT INTO users VALUES (1, 'sinwoo');");
stmt.executeUpdate("INSERT INTO users VALUES (2, 'seonwoo');");
// first call to next occur internally
stmt.executeQuery("SELECT * FROM users");
ResultSet resultSet = stmt.getResultSet();
while (resultSet.next()) {
// run until next() returns false
}
@Test
@Disabled("https://github.com/tursodatabase/limbo/pull/743#issuecomment-2600746904")
void invoking_next_before_the_last_row_should_return_true() throws Exception {
stmt.executeUpdate("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
stmt.executeUpdate("INSERT INTO users VALUES (1, 'sinwoo');");
stmt.executeUpdate("INSERT INTO users VALUES (2, 'seonwoo');");
// first call to next occur internally
stmt.executeQuery("SELECT * FROM users");
ResultSet resultSet = stmt.getResultSet();
assertTrue(resultSet.next());
}
@Test
@Disabled("https://github.com/tursodatabase/limbo/pull/743#issuecomment-2600746904")
void invoking_next_after_the_last_row_should_return_false() throws Exception {
stmt.executeUpdate("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
stmt.executeUpdate("INSERT INTO users VALUES (1, 'sinwoo');");
stmt.executeUpdate("INSERT INTO users VALUES (2, 'seonwoo');");
// first call to next occur internally
stmt.executeQuery("SELECT * FROM users");
ResultSet resultSet = stmt.getResultSet();
while (resultSet.next()) {
// run until next() returns false
}
// if the previous call to next() returned false, consecutive call to next() should return false as well
assertFalse(resultSet.next());
}
// if the previous call to next() returned false, consecutive call to next() should return false
// as well
assertFalse(resultSet.next());
}
}

View File

@@ -5,7 +5,6 @@ import static org.junit.jupiter.api.Assertions.*;
import java.sql.ResultSet;
import java.sql.Statement;
import java.util.Properties;
import org.github.tursodatabase.TestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
@@ -13,41 +12,43 @@ import org.junit.jupiter.api.Test;
class JDBC4StatementTest {
private Statement stmt;
private Statement stmt;
@BeforeEach
void setUp() throws Exception {
String filePath = TestUtils.createTempFile();
String url = "jdbc:sqlite:" + filePath;
final JDBC4Connection connection = new JDBC4Connection(url, filePath, new Properties());
stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY,
ResultSet.CLOSE_CURSORS_AT_COMMIT);
}
@BeforeEach
void setUp() throws Exception {
String filePath = TestUtils.createTempFile();
String url = "jdbc:sqlite:" + filePath;
final JDBC4Connection connection = new JDBC4Connection(url, filePath, new Properties());
stmt =
connection.createStatement(
ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY,
ResultSet.CLOSE_CURSORS_AT_COMMIT);
}
@Test
void execute_ddl_should_return_false() throws Exception{
assertFalse(stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"));
}
@Test
void execute_ddl_should_return_false() throws Exception {
assertFalse(stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"));
}
@Test
void execute_insert_should_return_false() throws Exception {
stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
assertFalse(stmt.execute("INSERT INTO users VALUES (1, 'limbo');"));
}
@Test
void execute_insert_should_return_false() throws Exception {
stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
assertFalse(stmt.execute("INSERT INTO users VALUES (1, 'limbo');"));
}
@Test
@Disabled("UPDATE not supported yet")
void execute_update_should_return_false() throws Exception {
stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
stmt.execute("INSERT INTO users VALUES (1, 'limbo');");
assertFalse(stmt.execute("UPDATE users SET username = 'seonwoo' WHERE id = 1;"));
}
@Test
@Disabled("UPDATE not supported yet")
void execute_update_should_return_false() throws Exception {
stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
stmt.execute("INSERT INTO users VALUES (1, 'limbo');");
assertFalse(stmt.execute("UPDATE users SET username = 'seonwoo' WHERE id = 1;"));
}
@Test
void execute_select_should_return_true() throws Exception {
stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
stmt.execute("INSERT INTO users VALUES (1, 'limbo');");
assertTrue(stmt.execute("SELECT * FROM users;"));
}
@Test
void execute_select_should_return_true() throws Exception {
stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);");
stmt.execute("INSERT INTO users VALUES (1, 'limbo');");
assertTrue(stmt.execute("SELECT * FROM users;"));
}
}

View File

@@ -110,7 +110,7 @@ pub enum Params {
pub struct Transaction {}
pub struct Rows {
_inner: Rc<limbo_core::Rows>,
_inner: Rc<limbo_core::Statement>,
}
impl Rows {

View File

@@ -3,7 +3,7 @@ use crate::{
opcodes_dictionary::OPCODE_DESCRIPTIONS,
};
use cli_table::{Cell, Table};
use limbo_core::{Database, LimboError, Rows, StepResult, Value};
use limbo_core::{Database, LimboError, Statement, StepResult, Value};
use clap::{Parser, ValueEnum};
use std::{
@@ -614,7 +614,7 @@ impl Limbo {
fn print_query_result(
&mut self,
sql: &str,
mut output: Result<Option<Rows>, LimboError>,
mut output: Result<Option<Statement>, LimboError>,
) -> anyhow::Result<()> {
match output {
Ok(Some(ref mut rows)) => match self.opts.output_mode {
@@ -624,7 +624,7 @@ impl Limbo {
return Ok(());
}
match rows.next_row() {
match rows.step() {
Ok(StepResult::Row(row)) => {
for (i, value) in row.values.iter().enumerate() {
if i > 0 {
@@ -669,7 +669,7 @@ impl Limbo {
}
let mut table_rows: Vec<Vec<_>> = vec![];
loop {
match rows.next_row() {
match rows.step() {
Ok(StepResult::Row(row)) => {
table_rows.push(
row.values
@@ -739,7 +739,7 @@ impl Limbo {
Ok(Some(ref mut rows)) => {
let mut found = false;
loop {
match rows.next_row()? {
match rows.step()? {
StepResult::Row(row) => {
if let Some(Value::Text(schema)) = row.values.first() {
let _ = self.write_fmt(format_args!("{};", schema));
@@ -796,7 +796,7 @@ impl Limbo {
Ok(Some(ref mut rows)) => {
let mut tables = String::new();
loop {
match rows.next_row()? {
match rows.step()? {
StepResult::Row(row) => {
if let Some(Value::Text(table)) = row.values.first() {
tables.push_str(table);

View File

@@ -95,7 +95,7 @@ impl<'a> ImportFile<'a> {
match self.conn.query(insert_string) {
Ok(rows) => {
if let Some(mut rows) = rows {
while let Ok(x) = rows.next_row() {
while let Ok(x) = rows.step() {
match x {
limbo_core::StepResult::IO => {
self.io.run_once().unwrap();

View File

@@ -46,7 +46,7 @@ fn limbo_bench(criterion: &mut Criterion) {
let io = io.clone();
b.iter(|| {
let mut rows = stmt.query().unwrap();
match rows.next_row().unwrap() {
match rows.step().unwrap() {
limbo_core::StepResult::Row(row) => {
assert_eq!(row.get::<i64>(0).unwrap(), 1);
}
@@ -74,7 +74,7 @@ fn limbo_bench(criterion: &mut Criterion) {
let io = io.clone();
b.iter(|| {
let mut rows = stmt.query().unwrap();
match rows.next_row().unwrap() {
match rows.step().unwrap() {
limbo_core::StepResult::Row(row) => {
assert_eq!(row.get::<i64>(0).unwrap(), 1);
}
@@ -103,7 +103,7 @@ fn limbo_bench(criterion: &mut Criterion) {
let io = io.clone();
b.iter(|| {
let mut rows = stmt.query().unwrap();
match rows.next_row().unwrap() {
match rows.step().unwrap() {
limbo_core::StepResult::Row(row) => {
assert_eq!(row.get::<i64>(0).unwrap(), 1);
}

View File

@@ -79,6 +79,7 @@ pub enum JsonFunc {
JsonObject,
JsonType,
JsonErrorPosition,
JsonValid,
}
#[cfg(feature = "json")]
@@ -97,6 +98,7 @@ impl Display for JsonFunc {
Self::JsonObject => "json_object".to_string(),
Self::JsonType => "json_type".to_string(),
Self::JsonErrorPosition => "json_error_position".to_string(),
Self::JsonValid => "json_valid".to_string(),
}
)
}
@@ -213,6 +215,7 @@ pub enum ScalarFunc {
Replace,
#[cfg(not(target_family = "wasm"))]
LoadExtension,
StrfTime,
}
impl Display for ScalarFunc {
@@ -264,6 +267,7 @@ impl Display for ScalarFunc {
Self::DateTime => "datetime".to_string(),
#[cfg(not(target_family = "wasm"))]
Self::LoadExtension => "load_extension".to_string(),
Self::StrfTime => "strftime".to_string(),
};
write!(f, "{}", str)
}
@@ -517,6 +521,8 @@ impl Func {
"json_type" => Ok(Func::Json(JsonFunc::JsonType)),
#[cfg(feature = "json")]
"json_error_position" => Ok(Self::Json(JsonFunc::JsonErrorPosition)),
#[cfg(feature = "json")]
"json_valid" => Ok(Self::Json(JsonFunc::JsonValid)),
"unixepoch" => Ok(Self::Scalar(ScalarFunc::UnixEpoch)),
"julianday" => Ok(Self::Scalar(ScalarFunc::JulianDay)),
"hex" => Ok(Self::Scalar(ScalarFunc::Hex)),
@@ -554,6 +560,7 @@ impl Func {
"trunc" => Ok(Self::Math(MathFunc::Trunc)),
#[cfg(not(target_family = "wasm"))]
"load_extension" => Ok(Self::Scalar(ScalarFunc::LoadExtension)),
"strftime" => Ok(Self::Scalar(ScalarFunc::StrfTime)),
_ => crate::bail_parse_error!("no such function: {}", name),
}
}

View File

@@ -984,3 +984,17 @@ mod tests {
}
}
}
pub fn is_json_valid(json_value: &OwnedValue) -> crate::Result<OwnedValue> {
match json_value {
OwnedValue::Text(ref t) => match from_str::<Val>(&t.value) {
Ok(_) => Ok(OwnedValue::Integer(1)),
Err(_) => Ok(OwnedValue::Integer(0)),
},
OwnedValue::Blob(b) => match jsonb::from_slice(b) {
Ok(_) => Ok(OwnedValue::Integer(1)),
Err(_) => Ok(OwnedValue::Integer(0)),
},
OwnedValue::Null => Ok(OwnedValue::Null),
_ => Ok(OwnedValue::Integer(1)),
}
}

View File

@@ -283,7 +283,7 @@ impl Connection {
}
}
pub fn query(self: &Rc<Connection>, sql: impl Into<String>) -> Result<Option<Rows>> {
pub fn query(self: &Rc<Connection>, sql: impl Into<String>) -> Result<Option<Statement>> {
let sql = sql.into();
trace!("Querying: {}", sql);
let mut parser = Parser::new(sql.as_bytes());
@@ -294,10 +294,9 @@ impl Connection {
}
}
pub(crate) fn run_cmd(self: &Rc<Connection>, cmd: Cmd) -> Result<Option<Rows>> {
pub(crate) fn run_cmd(self: &Rc<Connection>, cmd: Cmd) -> Result<Option<Statement>> {
let db = self.db.clone();
let syms: &SymbolTable = &db.syms.borrow();
match cmd {
Cmd::Stmt(stmt) => {
let program = Rc::new(translate::translate(
@@ -309,7 +308,7 @@ impl Connection {
syms,
)?);
let stmt = Statement::new(program, self.pager.clone());
Ok(Some(Rows { stmt }))
Ok(Some(stmt))
}
Cmd::Explain(stmt) => {
let program = translate::translate(
@@ -375,7 +374,8 @@ impl Connection {
syms,
)?;
let mut state = vdbe::ProgramState::new(program.max_registers);
let mut state =
vdbe::ProgramState::new(program.max_registers, program.cursor_ref.len());
program.step(&mut state, self.pager.clone())?;
}
}
@@ -430,6 +430,10 @@ impl Connection {
let prev_total_changes = self.total_changes.get();
self.total_changes.set(prev_total_changes + nchange);
}
pub fn total_changes(&self) -> i64 {
self.total_changes.get()
}
}
pub struct Statement {
@@ -440,7 +444,7 @@ pub struct Statement {
impl Statement {
pub fn new(program: Rc<vdbe::Program>, pager: Rc<Pager>) -> Self {
let state = vdbe::ProgramState::new(program.max_registers);
let state = vdbe::ProgramState::new(program.max_registers, program.cursor_ref.len());
Self {
program,
state,
@@ -463,22 +467,29 @@ impl Statement {
}
}
pub fn query(&mut self) -> Result<Rows> {
pub fn query(&mut self) -> Result<Statement> {
let stmt = Statement::new(self.program.clone(), self.pager.clone());
Ok(Rows::new(stmt))
Ok(stmt)
}
pub fn columns(&self) -> &[String] {
&self.program.columns
}
pub fn parameters(&self) -> &parameters::Parameters {
&self.program.parameters
}
pub fn parameters_count(&self) -> usize {
self.program.parameters.count()
}
pub fn bind_at(&mut self, index: NonZero<usize>, value: Value) {
self.state.bind_at(index, value.into());
}
pub fn reset(&mut self) {
let state = vdbe::ProgramState::new(self.program.max_registers);
self.state = state
self.state.reset();
}
}
@@ -503,20 +514,6 @@ impl<'a> Row<'a> {
}
}
pub struct Rows {
stmt: Statement,
}
impl Rows {
pub fn new(stmt: Statement) -> Self {
Self { stmt }
}
pub fn next_row(&mut self) -> Result<StepResult<'_>> {
self.stmt.step()
}
}
pub(crate) struct SymbolTable {
pub functions: HashMap<String, Rc<function::ExternalFunc>>,
#[cfg(not(target_family = "wasm"))]
@@ -591,7 +588,7 @@ impl<'a> QueryRunner<'a> {
}
impl Iterator for QueryRunner<'_> {
type Item = Result<Option<Rows>>;
type Item = Result<Option<Statement>>;
fn next(&mut self) -> Option<Self::Item> {
match self.parser.next() {

View File

@@ -105,7 +105,6 @@ pub struct BTreeCursor {
rowid: RefCell<Option<u64>>,
record: RefCell<Option<OwnedRecord>>,
null_flag: bool,
database_header: Rc<RefCell<DatabaseHeader>>,
/// Index internal pages are consumed on the way up, so we store going upwards flag in case
/// we just moved to a parent page and the parent page is an internal index page which requires
/// to be consumed.
@@ -137,18 +136,13 @@ struct PageStack {
}
impl BTreeCursor {
pub fn new(
pager: Rc<Pager>,
root_page: usize,
database_header: Rc<RefCell<DatabaseHeader>>,
) -> Self {
pub fn new(pager: Rc<Pager>, root_page: usize) -> Self {
Self {
pager,
root_page,
rowid: RefCell::new(None),
record: RefCell::new(None),
null_flag: false,
database_header,
going_upwards: false,
write_info: WriteInfo {
state: WriteState::Start,
@@ -750,7 +744,7 @@ impl BTreeCursor {
/// and the overflow cell count is used to determine if the page overflows,
/// i.e. whether we need to balance the btree after the insert.
fn insert_into_cell(&self, page: &mut PageContent, payload: &[u8], cell_idx: usize) {
let free = self.compute_free_space(page, RefCell::borrow(&self.database_header));
let free = self.compute_free_space(page, RefCell::borrow(&self.pager.db_header));
const CELL_POINTER_SIZE_BYTES: usize = 2;
let enough_space = payload.len() + CELL_POINTER_SIZE_BYTES <= free as usize;
if !enough_space {
@@ -832,7 +826,7 @@ impl BTreeCursor {
// then we need to do some more calculation to figure out where to insert the freeblock
// in the freeblock linked list.
let maxpc = {
let db_header = self.database_header.borrow();
let db_header = self.pager.db_header.borrow();
let usable_space = (db_header.page_size - db_header.reserved_space as u16) as usize;
usable_space as u16
};
@@ -1063,7 +1057,7 @@ impl BTreeCursor {
contents.write_u16(PAGE_HEADER_OFFSET_FIRST_FREEBLOCK, 0);
contents.write_u16(PAGE_HEADER_OFFSET_CELL_COUNT, 0);
let db_header = RefCell::borrow(&self.database_header);
let db_header = RefCell::borrow(&self.pager.db_header);
let cell_content_area_start =
db_header.page_size - db_header.reserved_space as u16;
contents.write_u16(
@@ -1294,7 +1288,7 @@ impl BTreeCursor {
/// This marks the page as dirty and writes the page header.
fn allocate_page(&self, page_type: PageType, offset: usize) -> PageRef {
let page = self.pager.allocate_page().unwrap();
btree_init_page(&page, page_type, &self.database_header.borrow(), offset);
btree_init_page(&page, page_type, &self.pager.db_header.borrow(), offset);
page
}
@@ -1322,7 +1316,7 @@ impl BTreeCursor {
// there are free blocks and enough space
if page_ref.first_freeblock() != 0 && gap + 2 <= top {
// find slot
let db_header = RefCell::borrow(&self.database_header);
let db_header = RefCell::borrow(&self.pager.db_header);
let pc = find_free_cell(page_ref, db_header, amount);
if pc != 0 {
return pc as u16;
@@ -1332,11 +1326,11 @@ impl BTreeCursor {
if gap + 2 + amount > top {
// defragment
self.defragment_page(page_ref, RefCell::borrow(&self.database_header));
self.defragment_page(page_ref, RefCell::borrow(&self.pager.db_header));
top = page_ref.read_u16(PAGE_HEADER_OFFSET_CELL_CONTENT_AREA) as usize;
}
let db_header = RefCell::borrow(&self.database_header);
let db_header = RefCell::borrow(&self.pager.db_header);
top -= amount;
page_ref.write_u16(PAGE_HEADER_OFFSET_CELL_CONTENT_AREA, top as u16);
@@ -1656,7 +1650,7 @@ impl BTreeCursor {
/// The usable size of a page might be an odd number. However, the usable size is not allowed to be less than 480.
/// In other words, if the page size is 512, then the reserved space size cannot exceed 32.
fn usable_space(&self) -> usize {
let db_header = RefCell::borrow(&self.database_header);
let db_header = self.pager.db_header.borrow();
(db_header.page_size - db_header.reserved_space as u16) as usize
}

View File

@@ -6,7 +6,7 @@ use super::pager::PageRef;
// In limbo, page cache is shared by default, meaning that multiple frames from WAL can reside in
// the cache, meaning, we need a way to differentiate between pages cached in different
// connections. For this we include the max_frame that will read a connection from so that if two
// connections. For this we include the max_frame that a connection will read from so that if two
// connections have different max_frames, they might or not have different frame read from WAL.
//
// WAL was introduced after Shared cache in SQLite, so this is why these two features don't work

View File

@@ -157,7 +157,7 @@ pub struct Pager {
/// I/O interface for input/output operations.
pub io: Arc<dyn crate::io::IO>,
dirty_pages: Rc<RefCell<HashSet<usize>>>,
db_header: Rc<RefCell<DatabaseHeader>>,
pub db_header: Rc<RefCell<DatabaseHeader>>,
flush_info: RefCell<FlushInfo>,
checkpoint_state: RefCell<CheckpointState>,

View File

@@ -187,7 +187,7 @@ pub enum CheckpointStatus {
// min_frame and max_frame is the range of frames that can be safely transferred from WAL to db
// file.
// current_page is a helper to iterate through all the pages that might have a frame in the safe
// range. This is inneficient for now.
// range. This is inefficient for now.
struct OngoingCheckpoint {
page: PageRef,
state: CheckpointState,
@@ -228,13 +228,13 @@ pub struct WalFileShared {
max_frame: u64,
nbackfills: u64,
// Frame cache maps a Page to all the frames it has stored in WAL in ascending order.
// This is do to easily find the frame it must checkpoint each connection if a checkpoint is
// This is to easily find the frame it must checkpoint each connection if a checkpoint is
// necessary.
// One difference between SQLite and limbo is that we will never support multi process, meaning
// we don't need WAL's index file. So we can do stuff like this without shared memory.
// TODO: this will need refactoring because this is incredible memory inneficient.
// TODO: this will need refactoring because this is incredible memory inefficient.
frame_cache: HashMap<u64, Vec<u64>>,
// Another memory inneficient array made to just keep track of pages that are in frame_cache.
// Another memory inefficient array made to just keep track of pages that are in frame_cache.
pages_in_frames: Vec<u64>,
last_checksum: (u32, u32), // Check of last frame in WAL, this is a cumulative checksum over all frames in the WAL
file: Rc<dyn File>,

View File

@@ -175,7 +175,11 @@ fn emit_program_for_select(
// Finalize program
epilogue(program, init_label, start_offset)?;
program.columns = plan
.result_columns
.iter()
.map(|rc| rc.name.clone())
.collect::<Vec<_>>();
Ok(())
}
@@ -286,7 +290,11 @@ fn emit_program_for_delete(
// Finalize program
epilogue(program, init_label, start_offset)?;
program.columns = plan
.result_columns
.iter()
.map(|rc| rc.name.clone())
.collect::<Vec<_>>();
Ok(())
}

View File

@@ -605,6 +605,20 @@ pub fn translate_expr(
dest: target_register,
});
}
ast::Operator::And => {
program.emit_insn(Insn::And {
lhs: e1_reg,
rhs: e2_reg,
dest: target_register,
});
}
ast::Operator::Or => {
program.emit_insn(Insn::Or {
lhs: e1_reg,
rhs: e2_reg,
dest: target_register,
});
}
ast::Operator::BitwiseAnd => {
program.emit_insn(Insn::BitAnd {
lhs: e1_reg,
@@ -915,6 +929,14 @@ pub fn translate_expr(
func_ctx,
)
}
JsonFunc::JsonValid => translate_function(
program,
args.as_deref().unwrap_or_default(),
referenced_tables,
resolver,
target_register,
func_ctx,
),
},
Func::Scalar(srf) => {
match srf {
@@ -1517,6 +1539,26 @@ pub fn translate_expr(
});
Ok(target_register)
}
ScalarFunc::StrfTime => {
if let Some(args) = args {
for arg in args.iter() {
// register containing result of each argument expression
let _ = translate_and_mark(
program,
referenced_tables,
arg,
resolver,
)?;
}
}
program.emit_insn(Insn::Function {
constant_mask: 0,
start_reg: target_register + 1,
dest: target_register,
func: func_ctx,
});
Ok(target_register)
}
}
}
Func::Math(math_func) => match math_func.arity() {

View File

@@ -2,7 +2,10 @@ use limbo_ext::{AggCtx, FinalizeFunction, StepFunction};
use crate::error::LimboError;
use crate::ext::{ExtValue, ExtValueType};
use crate::pseudo::PseudoCursor;
use crate::storage::btree::BTreeCursor;
use crate::storage::sqlite3_ondisk::write_varint;
use crate::vdbe::sorter::Sorter;
use crate::Result;
use std::fmt::Display;
use std::rc::Rc;
@@ -604,7 +607,59 @@ impl OwnedRecord {
}
}
#[derive(PartialEq, Debug)]
pub enum Cursor {
Table(BTreeCursor),
Index(BTreeCursor),
Pseudo(PseudoCursor),
Sorter(Sorter),
}
impl Cursor {
pub fn new_table(cursor: BTreeCursor) -> Self {
Self::Table(cursor)
}
pub fn new_index(cursor: BTreeCursor) -> Self {
Self::Index(cursor)
}
pub fn new_pseudo(cursor: PseudoCursor) -> Self {
Self::Pseudo(cursor)
}
pub fn new_sorter(cursor: Sorter) -> Self {
Self::Sorter(cursor)
}
pub fn as_table_mut(&mut self) -> &mut BTreeCursor {
match self {
Self::Table(cursor) => cursor,
_ => panic!("Cursor is not a table"),
}
}
pub fn as_index_mut(&mut self) -> &mut BTreeCursor {
match self {
Self::Index(cursor) => cursor,
_ => panic!("Cursor is not an index"),
}
}
pub fn as_pseudo_mut(&mut self) -> &mut PseudoCursor {
match self {
Self::Pseudo(cursor) => cursor,
_ => panic!("Cursor is not a pseudo cursor"),
}
}
pub fn as_sorter_mut(&mut self) -> &mut Sorter {
match self {
Self::Sorter(cursor) => cursor,
_ => panic!("Cursor is not a sorter cursor"),
}
}
}
pub enum CursorResult<T> {
Ok(T),
IO,

View File

@@ -4,7 +4,7 @@ use sqlite3_parser::ast::{Expr, FunctionTail, Literal};
use crate::{
schema::{self, Schema},
Result, Rows, StepResult, IO,
Result, Statement, StepResult, IO,
};
// https://sqlite.org/lang_keywords.html
@@ -25,11 +25,15 @@ pub fn normalize_ident(identifier: &str) -> String {
pub const PRIMARY_KEY_AUTOMATIC_INDEX_NAME_PREFIX: &str = "sqlite_autoindex_";
pub fn parse_schema_rows(rows: Option<Rows>, schema: &mut Schema, io: Arc<dyn IO>) -> Result<()> {
pub fn parse_schema_rows(
rows: Option<Statement>,
schema: &mut Schema,
io: Arc<dyn IO>,
) -> Result<()> {
if let Some(mut rows) = rows {
let mut automatic_indexes = Vec::new();
loop {
match rows.next_row()? {
match rows.step()? {
StepResult::Row(row) => {
let ty = row.get::<&str>(0)?;
if ty != "table" && ty != "index" {

View File

@@ -30,6 +30,7 @@ pub struct ProgramBuilder {
// map of instruction index to manual comment (used in EXPLAIN)
comments: HashMap<InsnReference, &'static str>,
pub parameters: Parameters,
pub columns: Vec<String>,
}
#[derive(Debug, Clone)]
@@ -60,6 +61,7 @@ impl ProgramBuilder {
seekrowid_emitted_bitmask: 0,
comments: HashMap::new(),
parameters: Parameters::new(),
columns: Vec::new(),
}
}
@@ -352,6 +354,7 @@ impl ProgramBuilder {
parameters: self.parameters,
n_change: Cell::new(0),
change_cnt_on,
columns: self.columns,
}
}
}

View File

@@ -22,24 +22,44 @@ pub fn exec_datetime_full(values: &[OwnedValue]) -> OwnedValue {
exec_datetime(values, DateTimeOutput::DateTime)
}
#[inline(always)]
pub fn exec_strftime(values: &[OwnedValue]) -> OwnedValue {
if values.is_empty() {
return OwnedValue::Null;
}
let format_str = match &values[0] {
OwnedValue::Text(text) => text.value.to_string(),
OwnedValue::Integer(num) => num.to_string(),
OwnedValue::Float(num) => format!("{:.14}", num),
_ => return OwnedValue::Null,
};
exec_datetime(&values[1..], DateTimeOutput::StrfTime(format_str))
}
enum DateTimeOutput {
Date,
Time,
DateTime,
// Holds the format string
StrfTime(String),
}
fn exec_datetime(values: &[OwnedValue], output_type: DateTimeOutput) -> OwnedValue {
if values.is_empty() {
return OwnedValue::build_text(Rc::new(
parse_naive_date_time(&OwnedValue::build_text(Rc::new("now".to_string())))
.unwrap()
.format(match output_type {
DateTimeOutput::DateTime => "%Y-%m-%d %H:%M:%S",
DateTimeOutput::Time => "%H:%M:%S",
DateTimeOutput::Date => "%Y-%m-%d",
})
.to_string(),
));
let now =
parse_naive_date_time(&OwnedValue::build_text(Rc::new("now".to_string()))).unwrap();
let formatted_str = match output_type {
DateTimeOutput::DateTime => now.format("%Y-%m-%d %H:%M:%S").to_string(),
DateTimeOutput::Time => now.format("%H:%M:%S").to_string(),
DateTimeOutput::Date => now.format("%Y-%m-%d").to_string(),
DateTimeOutput::StrfTime(ref format_str) => strftime_format(&now, format_str),
};
// Parse here
return OwnedValue::build_text(Rc::new(formatted_str));
}
if let Some(mut dt) = parse_naive_date_time(&values[0]) {
// if successful, treat subsequent entries as modifiers
@@ -95,6 +115,31 @@ fn format_dt(dt: NaiveDateTime, output_type: DateTimeOutput, subsec: bool) -> St
dt.format("%Y-%m-%d %H:%M:%S").to_string()
}
}
DateTimeOutput::StrfTime(format_str) => strftime_format(&dt, &format_str),
}
}
// Not as fast as if the formatting was native to chrono, but a good enough
// for now, just to have the feature implemented
fn strftime_format(dt: &NaiveDateTime, format_str: &str) -> String {
use std::fmt::Write;
// Necessary to remove %f and %J that are exclusive formatters to sqlite
// Chrono does not support them, so it is necessary to replace the modifiers manually
// Sqlite uses 9 decimal places for julianday in strftime
let copy_format = format_str
.to_string()
.replace("%J", &format!("{:.9}", to_julian_day_exact(dt)));
// Just change the formatting here to have fractional seconds using chrono builtin modifier
let copy_format = copy_format.replace("%f", "%S.%3f");
// The write! macro is used here as chrono's format can panic if the formatting string contains
// unknown specifiers. By using a writer, we can catch the panic and handle the error
let mut formatted = String::new();
match write!(formatted, "{}", dt.format(&copy_format)) {
Ok(_) => formatted,
// On sqlite when the formatting fails nothing is printed
Err(_) => "".to_string(),
}
}
@@ -1729,4 +1774,7 @@ mod tests {
.naive_utc();
assert!(is_leap_second(&dt));
}
#[test]
fn test_strftime() {}
}

View File

@@ -1120,6 +1120,24 @@ pub fn insn_to_str(
0,
format!("r[{}]=r[{}] + r[{}]", dest, lhs, rhs),
),
Insn::And { lhs, rhs, dest } => (
"And",
*rhs as i32,
*lhs as i32,
*dest as i32,
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("r[{}]=(r[{}] && r[{}])", dest, lhs, rhs),
),
Insn::Or { lhs, rhs, dest } => (
"Or",
*rhs as i32,
*lhs as i32,
*dest as i32,
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("r[{}]=(r[{}] || r[{}])", dest, lhs, rhs),
),
};
format!(
"{:<4} {:<17} {:<4} {:<4} {:<4} {:<13} {:<2} {}",

View File

@@ -551,6 +551,18 @@ pub enum Insn {
rhs: usize,
dest: usize,
},
/// Take the logical AND of the values in registers P1 and P2 and write the result into register P3.
And {
lhs: usize,
rhs: usize,
dest: usize,
},
/// Take the logical OR of the values in register P1 and P2 and store the answer in register P3.
Or {
lhs: usize,
rhs: usize,
dest: usize,
},
}
fn cast_text_to_numerical(value: &str) -> OwnedValue {
@@ -955,3 +967,167 @@ pub fn exec_concat(lhs: &OwnedValue, rhs: &OwnedValue) -> OwnedValue {
(OwnedValue::Record(_), _) | (_, OwnedValue::Record(_)) => unreachable!(),
}
}
pub fn exec_and(mut lhs: &OwnedValue, mut rhs: &OwnedValue) -> OwnedValue {
if let OwnedValue::Agg(agg) = lhs {
lhs = agg.final_value();
}
if let OwnedValue::Agg(agg) = rhs {
rhs = agg.final_value();
}
match (lhs, rhs) {
(_, OwnedValue::Integer(0))
| (OwnedValue::Integer(0), _)
| (_, OwnedValue::Float(0.0))
| (OwnedValue::Float(0.0), _) => OwnedValue::Integer(0),
(OwnedValue::Null, _) | (_, OwnedValue::Null) => OwnedValue::Null,
(OwnedValue::Text(lhs), OwnedValue::Text(rhs)) => exec_and(
&cast_text_to_numerical(&lhs.value),
&cast_text_to_numerical(&rhs.value),
),
(OwnedValue::Text(text), other) | (other, OwnedValue::Text(text)) => {
exec_and(&cast_text_to_numerical(&text.value), other)
}
_ => OwnedValue::Integer(1),
}
}
pub fn exec_or(mut lhs: &OwnedValue, mut rhs: &OwnedValue) -> OwnedValue {
if let OwnedValue::Agg(agg) = lhs {
lhs = agg.final_value();
}
if let OwnedValue::Agg(agg) = rhs {
rhs = agg.final_value();
}
match (lhs, rhs) {
(OwnedValue::Null, OwnedValue::Null)
| (OwnedValue::Null, OwnedValue::Float(0.0))
| (OwnedValue::Float(0.0), OwnedValue::Null)
| (OwnedValue::Null, OwnedValue::Integer(0))
| (OwnedValue::Integer(0), OwnedValue::Null) => OwnedValue::Null,
(OwnedValue::Float(0.0), OwnedValue::Integer(0))
| (OwnedValue::Integer(0), OwnedValue::Float(0.0))
| (OwnedValue::Float(0.0), OwnedValue::Float(0.0))
| (OwnedValue::Integer(0), OwnedValue::Integer(0)) => OwnedValue::Integer(0),
(OwnedValue::Text(lhs), OwnedValue::Text(rhs)) => exec_or(
&cast_text_to_numerical(&lhs.value),
&cast_text_to_numerical(&rhs.value),
),
(OwnedValue::Text(text), other) | (other, OwnedValue::Text(text)) => {
exec_or(&cast_text_to_numerical(&text.value), other)
}
_ => OwnedValue::Integer(1),
}
}
#[cfg(test)]
mod tests {
use std::rc::Rc;
use crate::{
types::{LimboText, OwnedValue},
vdbe::insn::exec_or,
};
use super::exec_and;
#[test]
fn test_exec_and() {
let inputs = vec![
(OwnedValue::Integer(0), OwnedValue::Null),
(OwnedValue::Null, OwnedValue::Integer(1)),
(OwnedValue::Null, OwnedValue::Null),
(OwnedValue::Float(0.0), OwnedValue::Null),
(OwnedValue::Integer(1), OwnedValue::Float(2.2)),
(
OwnedValue::Integer(0),
OwnedValue::Text(LimboText::new(Rc::new("string".to_string()))),
),
(
OwnedValue::Integer(0),
OwnedValue::Text(LimboText::new(Rc::new("1".to_string()))),
),
(
OwnedValue::Integer(1),
OwnedValue::Text(LimboText::new(Rc::new("1".to_string()))),
),
];
let outpus = [
OwnedValue::Integer(0),
OwnedValue::Null,
OwnedValue::Null,
OwnedValue::Integer(0),
OwnedValue::Integer(1),
OwnedValue::Integer(0),
OwnedValue::Integer(0),
OwnedValue::Integer(1),
];
assert_eq!(
inputs.len(),
outpus.len(),
"Inputs and Outputs should have same size"
);
for (i, (lhs, rhs)) in inputs.iter().enumerate() {
assert_eq!(
exec_and(lhs, rhs),
outpus[i],
"Wrong AND for lhs: {}, rhs: {}",
lhs,
rhs
);
}
}
#[test]
fn test_exec_or() {
let inputs = vec![
(OwnedValue::Integer(0), OwnedValue::Null),
(OwnedValue::Null, OwnedValue::Integer(1)),
(OwnedValue::Null, OwnedValue::Null),
(OwnedValue::Float(0.0), OwnedValue::Null),
(OwnedValue::Integer(1), OwnedValue::Float(2.2)),
(OwnedValue::Float(0.0), OwnedValue::Integer(0)),
(
OwnedValue::Integer(0),
OwnedValue::Text(LimboText::new(Rc::new("string".to_string()))),
),
(
OwnedValue::Integer(0),
OwnedValue::Text(LimboText::new(Rc::new("1".to_string()))),
),
(
OwnedValue::Integer(0),
OwnedValue::Text(LimboText::new(Rc::new("".to_string()))),
),
];
let outpus = [
OwnedValue::Null,
OwnedValue::Integer(1),
OwnedValue::Null,
OwnedValue::Null,
OwnedValue::Integer(1),
OwnedValue::Integer(0),
OwnedValue::Integer(0),
OwnedValue::Integer(1),
OwnedValue::Integer(0),
];
assert_eq!(
inputs.len(),
outpus.len(),
"Inputs and Outputs should have same size"
);
for (i, (lhs, rhs)) in inputs.iter().enumerate() {
assert_eq!(
exec_or(lhs, rhs),
outpus[i],
"Wrong OR for lhs: {}, rhs: {}",
lhs,
rhs
);
}
}
}

View File

@@ -31,22 +31,27 @@ use crate::pseudo::PseudoCursor;
use crate::result::LimboResult;
use crate::storage::sqlite3_ondisk::DatabaseHeader;
use crate::storage::{btree::BTreeCursor, pager::Pager};
use crate::types::ExternalAggState;
use crate::types::{AggContext, CursorResult, OwnedRecord, OwnedValue, Record, SeekKey, SeekOp};
use crate::types::{
AggContext, Cursor, CursorResult, ExternalAggState, OwnedRecord, OwnedValue, Record, SeekKey,
SeekOp,
};
use crate::util::parse_schema_rows;
use crate::vdbe::builder::CursorType;
use crate::vdbe::insn::Insn;
#[cfg(feature = "json")]
use crate::{
function::JsonFunc, json::get_json, json::json_array, json::json_array_length,
json::json_arrow_extract, json::json_arrow_shift_extract, json::json_error_position,
json::json_extract, json::json_object, json::json_type,
function::JsonFunc, json::get_json, json::is_json_valid, json::json_array,
json::json_array_length, json::json_arrow_extract, json::json_arrow_shift_extract,
json::json_error_position, json::json_extract, json::json_object, json::json_type,
};
use crate::{resolve_ext_path, Connection, Result, TransactionState, DATABASE_VERSION};
use datetime::{
exec_date, exec_datetime_full, exec_julianday, exec_strftime, exec_time, exec_unixepoch,
};
use crate::{resolve_ext_path, Connection, Result, Rows, TransactionState, DATABASE_VERSION};
use datetime::{exec_date, exec_datetime_full, exec_julianday, exec_time, exec_unixepoch};
use insn::{
exec_add, exec_bit_and, exec_bit_not, exec_bit_or, exec_boolean_not, exec_concat, exec_divide,
exec_multiply, exec_remainder, exec_shift_left, exec_shift_right, exec_subtract,
exec_add, exec_and, exec_bit_and, exec_bit_not, exec_bit_or, exec_boolean_not, exec_concat,
exec_divide, exec_multiply, exec_or, exec_remainder, exec_shift_left, exec_shift_right,
exec_subtract,
};
use likeop::{construct_like_escape_arg, exec_glob, exec_like_with_escape};
use rand::distributions::{Distribution, Uniform};
@@ -54,8 +59,8 @@ use rand::{thread_rng, Rng};
use regex::{Regex, RegexBuilder};
use sorter::Sorter;
use std::borrow::BorrowMut;
use std::cell::{Cell, RefCell};
use std::collections::{BTreeMap, HashMap};
use std::cell::{Cell, RefCell, RefMut};
use std::collections::HashMap;
use std::num::NonZero;
use std::rc::{Rc, Weak};
@@ -189,40 +194,106 @@ impl RegexCache {
}
}
fn get_cursor_as_table_mut<'long, 'short>(
cursors: &'short mut RefMut<'long, Vec<Option<Cursor>>>,
cursor_id: CursorID,
) -> &'short mut BTreeCursor {
let cursor = cursors
.get_mut(cursor_id)
.expect("cursor id out of bounds")
.as_mut()
.expect("cursor not allocated")
.as_table_mut();
cursor
}
fn get_cursor_as_index_mut<'long, 'short>(
cursors: &'short mut RefMut<'long, Vec<Option<Cursor>>>,
cursor_id: CursorID,
) -> &'short mut BTreeCursor {
let cursor = cursors
.get_mut(cursor_id)
.expect("cursor id out of bounds")
.as_mut()
.expect("cursor not allocated")
.as_index_mut();
cursor
}
fn get_cursor_as_pseudo_mut<'long, 'short>(
cursors: &'short mut RefMut<'long, Vec<Option<Cursor>>>,
cursor_id: CursorID,
) -> &'short mut PseudoCursor {
let cursor = cursors
.get_mut(cursor_id)
.expect("cursor id out of bounds")
.as_mut()
.expect("cursor not allocated")
.as_pseudo_mut();
cursor
}
fn get_cursor_as_sorter_mut<'long, 'short>(
cursors: &'short mut RefMut<'long, Vec<Option<Cursor>>>,
cursor_id: CursorID,
) -> &'short mut Sorter {
let cursor = cursors
.get_mut(cursor_id)
.expect("cursor id out of bounds")
.as_mut()
.expect("cursor not allocated")
.as_sorter_mut();
cursor
}
struct Bitfield<const N: usize>([u64; N]);
impl<const N: usize> Bitfield<N> {
fn new() -> Self {
Self([0; N])
}
fn set(&mut self, bit: usize) {
assert!(bit < N * 64, "bit out of bounds");
self.0[bit / 64] |= 1 << (bit % 64);
}
fn unset(&mut self, bit: usize) {
assert!(bit < N * 64, "bit out of bounds");
self.0[bit / 64] &= !(1 << (bit % 64));
}
fn get(&self, bit: usize) -> bool {
assert!(bit < N * 64, "bit out of bounds");
(self.0[bit / 64] & (1 << (bit % 64))) != 0
}
}
/// The program state describes the environment in which the program executes.
pub struct ProgramState {
pub pc: InsnReference,
btree_table_cursors: RefCell<BTreeMap<CursorID, BTreeCursor>>,
btree_index_cursors: RefCell<BTreeMap<CursorID, BTreeCursor>>,
pseudo_cursors: RefCell<BTreeMap<CursorID, PseudoCursor>>,
sorter_cursors: RefCell<BTreeMap<CursorID, Sorter>>,
cursors: RefCell<Vec<Option<Cursor>>>,
registers: Vec<OwnedValue>,
last_compare: Option<std::cmp::Ordering>,
deferred_seek: Option<(CursorID, CursorID)>,
ended_coroutine: HashMap<usize, bool>, // flag to indicate that a coroutine has ended (key is the yield register)
ended_coroutine: Bitfield<4>, // flag to indicate that a coroutine has ended (key is the yield register. currently we assume that the yield register is always between 0-255, YOLO)
regex_cache: RegexCache,
interrupted: bool,
parameters: HashMap<NonZero<usize>, OwnedValue>,
}
impl ProgramState {
pub fn new(max_registers: usize) -> Self {
let btree_table_cursors = RefCell::new(BTreeMap::new());
let btree_index_cursors = RefCell::new(BTreeMap::new());
let pseudo_cursors = RefCell::new(BTreeMap::new());
let sorter_cursors = RefCell::new(BTreeMap::new());
let mut registers = Vec::with_capacity(max_registers);
registers.resize(max_registers, OwnedValue::Null);
pub fn new(max_registers: usize, max_cursors: usize) -> Self {
let cursors: RefCell<Vec<Option<Cursor>>> =
RefCell::new((0..max_cursors).map(|_| None).collect());
let registers = vec![OwnedValue::Null; max_registers];
Self {
pc: 0,
btree_table_cursors,
btree_index_cursors,
pseudo_cursors,
sorter_cursors,
cursors,
registers,
last_compare: None,
deferred_seek: None,
ended_coroutine: HashMap::new(),
ended_coroutine: Bitfield::new(),
regex_cache: RegexCache::new(),
interrupted: false,
parameters: HashMap::new(),
@@ -254,16 +325,26 @@ impl ProgramState {
}
pub fn reset(&mut self) {
self.pc = 0;
self.cursors.borrow_mut().iter_mut().for_each(|c| *c = None);
self.registers
.iter_mut()
.for_each(|r| *r = OwnedValue::Null);
self.last_compare = None;
self.deferred_seek = None;
self.ended_coroutine.0 = [0; 4];
self.regex_cache.like.clear();
self.interrupted = false;
self.parameters.clear();
}
}
macro_rules! must_be_btree_cursor {
($cursor_id:expr, $cursor_ref:expr, $btree_table_cursors:expr, $btree_index_cursors:expr, $insn_name:expr) => {{
($cursor_id:expr, $cursor_ref:expr, $cursors:expr, $insn_name:expr) => {{
let (_, cursor_type) = $cursor_ref.get($cursor_id).unwrap();
let cursor = match cursor_type {
CursorType::BTreeTable(_) => $btree_table_cursors.get_mut(&$cursor_id).unwrap(),
CursorType::BTreeIndex(_) => $btree_index_cursors.get_mut(&$cursor_id).unwrap(),
CursorType::BTreeTable(_) => get_cursor_as_table_mut(&mut $cursors, $cursor_id),
CursorType::BTreeIndex(_) => get_cursor_as_index_mut(&mut $cursors, $cursor_id),
CursorType::Pseudo(_) => panic!("{} on pseudo cursor", $insn_name),
CursorType::Sorter => panic!("{} on sorter cursor", $insn_name),
};
@@ -283,6 +364,7 @@ pub struct Program {
pub auto_commit: bool,
pub n_change: Cell<i64>,
pub change_cnt_on: bool,
pub columns: Vec<String>,
}
impl Program {
@@ -315,10 +397,7 @@ impl Program {
}
let insn = &self.insns[state.pc as usize];
trace_insn(self, state.pc as InsnReference, insn);
let mut btree_table_cursors = state.btree_table_cursors.borrow_mut();
let mut btree_index_cursors = state.btree_index_cursors.borrow_mut();
let mut pseudo_cursors = state.pseudo_cursors.borrow_mut();
let mut sorter_cursors = state.sorter_cursors.borrow_mut();
let mut cursors = state.cursors.borrow_mut();
match insn {
Insn::Init { target_pc } => {
assert!(target_pc.is_offset());
@@ -396,13 +475,8 @@ impl Program {
state.pc += 1;
}
Insn::NullRow { cursor_id } => {
let cursor = must_be_btree_cursor!(
*cursor_id,
self.cursor_ref,
btree_table_cursors,
btree_index_cursors,
"NullRow"
);
let cursor =
must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "NullRow");
cursor.set_null_flag(true);
state.pc += 1;
}
@@ -698,14 +772,19 @@ impl Program {
root_page,
} => {
let (_, cursor_type) = self.cursor_ref.get(*cursor_id).unwrap();
let cursor =
BTreeCursor::new(pager.clone(), *root_page, self.database_header.clone());
let cursor = BTreeCursor::new(pager.clone(), *root_page);
match cursor_type {
CursorType::BTreeTable(_) => {
btree_table_cursors.insert(*cursor_id, cursor);
cursors
.get_mut(*cursor_id)
.unwrap()
.replace(Cursor::new_table(cursor));
}
CursorType::BTreeIndex(_) => {
btree_index_cursors.insert(*cursor_id, cursor);
cursors
.get_mut(*cursor_id)
.unwrap()
.replace(Cursor::new_index(cursor));
}
CursorType::Pseudo(_) => {
panic!("OpenReadAsync on pseudo cursor");
@@ -725,28 +804,21 @@ impl Program {
num_fields: _,
} => {
let cursor = PseudoCursor::new();
pseudo_cursors.insert(*cursor_id, cursor);
cursors
.get_mut(*cursor_id)
.unwrap()
.replace(Cursor::new_pseudo(cursor));
state.pc += 1;
}
Insn::RewindAsync { cursor_id } => {
let cursor = must_be_btree_cursor!(
*cursor_id,
self.cursor_ref,
btree_table_cursors,
btree_index_cursors,
"RewindAsync"
);
let cursor =
must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "RewindAsync");
return_if_io!(cursor.rewind());
state.pc += 1;
}
Insn::LastAsync { cursor_id } => {
let cursor = must_be_btree_cursor!(
*cursor_id,
self.cursor_ref,
btree_table_cursors,
btree_index_cursors,
"LastAsync"
);
let cursor =
must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "LastAsync");
return_if_io!(cursor.last());
state.pc += 1;
}
@@ -755,13 +827,8 @@ impl Program {
pc_if_empty,
} => {
assert!(pc_if_empty.is_offset());
let cursor = must_be_btree_cursor!(
*cursor_id,
self.cursor_ref,
btree_table_cursors,
btree_index_cursors,
"LastAwait"
);
let cursor =
must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "LastAwait");
cursor.wait_for_completion()?;
if cursor.is_empty() {
state.pc = pc_if_empty.to_offset_int();
@@ -774,13 +841,8 @@ impl Program {
pc_if_empty,
} => {
assert!(pc_if_empty.is_offset());
let cursor = must_be_btree_cursor!(
*cursor_id,
self.cursor_ref,
btree_table_cursors,
btree_index_cursors,
"RewindAwait"
);
let cursor =
must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "RewindAwait");
cursor.wait_for_completion()?;
if cursor.is_empty() {
state.pc = pc_if_empty.to_offset_int();
@@ -794,9 +856,9 @@ impl Program {
dest,
} => {
if let Some((index_cursor_id, table_cursor_id)) = state.deferred_seek.take() {
let index_cursor = btree_index_cursors.get_mut(&index_cursor_id).unwrap();
let index_cursor = get_cursor_as_index_mut(&mut cursors, index_cursor_id);
let rowid = index_cursor.rowid()?;
let table_cursor = btree_table_cursors.get_mut(&table_cursor_id).unwrap();
let table_cursor = get_cursor_as_table_mut(&mut cursors, table_cursor_id);
match table_cursor.seek(SeekKey::TableRowId(rowid.unwrap()), SeekOp::EQ)? {
CursorResult::Ok(_) => {}
CursorResult::IO => {
@@ -811,8 +873,7 @@ impl Program {
let cursor = must_be_btree_cursor!(
*cursor_id,
self.cursor_ref,
btree_table_cursors,
btree_index_cursors,
cursors,
"Column"
);
let record = cursor.record()?;
@@ -827,7 +888,7 @@ impl Program {
}
}
CursorType::Sorter => {
let cursor = sorter_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_sorter_mut(&mut cursors, *cursor_id);
if let Some(record) = cursor.record() {
state.registers[*dest] = record.values[*column].clone();
} else {
@@ -835,7 +896,7 @@ impl Program {
}
}
CursorType::Pseudo(_) => {
let cursor = pseudo_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_pseudo_mut(&mut cursors, *cursor_id);
if let Some(record) = cursor.record() {
state.registers[*dest] = record.values[*column].clone();
} else {
@@ -861,25 +922,15 @@ impl Program {
return Ok(StepResult::Row(record));
}
Insn::NextAsync { cursor_id } => {
let cursor = must_be_btree_cursor!(
*cursor_id,
self.cursor_ref,
btree_table_cursors,
btree_index_cursors,
"NextAsync"
);
let cursor =
must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "NextAsync");
cursor.set_null_flag(false);
return_if_io!(cursor.next());
state.pc += 1;
}
Insn::PrevAsync { cursor_id } => {
let cursor = must_be_btree_cursor!(
*cursor_id,
self.cursor_ref,
btree_table_cursors,
btree_index_cursors,
"PrevAsync"
);
let cursor =
must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "PrevAsync");
cursor.set_null_flag(false);
return_if_io!(cursor.prev());
state.pc += 1;
@@ -889,13 +940,8 @@ impl Program {
pc_if_next,
} => {
assert!(pc_if_next.is_offset());
let cursor = must_be_btree_cursor!(
*cursor_id,
self.cursor_ref,
btree_table_cursors,
btree_index_cursors,
"PrevAwait"
);
let cursor =
must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "PrevAwait");
cursor.wait_for_completion()?;
if !cursor.is_empty() {
state.pc = pc_if_next.to_offset_int();
@@ -908,13 +954,8 @@ impl Program {
pc_if_next,
} => {
assert!(pc_if_next.is_offset());
let cursor = must_be_btree_cursor!(
*cursor_id,
self.cursor_ref,
btree_table_cursors,
btree_index_cursors,
"NextAwait"
);
let cursor =
must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "NextAwait");
cursor.wait_for_completion()?;
if !cursor.is_empty() {
state.pc = pc_if_next.to_offset_int();
@@ -1044,9 +1085,9 @@ impl Program {
}
Insn::RowId { cursor_id, dest } => {
if let Some((index_cursor_id, table_cursor_id)) = state.deferred_seek.take() {
let index_cursor = btree_index_cursors.get_mut(&index_cursor_id).unwrap();
let index_cursor = get_cursor_as_index_mut(&mut cursors, index_cursor_id);
let rowid = index_cursor.rowid()?;
let table_cursor = btree_table_cursors.get_mut(&table_cursor_id).unwrap();
let table_cursor = get_cursor_as_table_mut(&mut cursors, table_cursor_id);
match table_cursor.seek(SeekKey::TableRowId(rowid.unwrap()), SeekOp::EQ)? {
CursorResult::Ok(_) => {}
CursorResult::IO => {
@@ -1056,7 +1097,7 @@ impl Program {
}
}
let cursor = btree_table_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id);
if let Some(ref rowid) = cursor.rowid()? {
state.registers[*dest] = OwnedValue::Integer(*rowid as i64);
} else {
@@ -1070,7 +1111,7 @@ impl Program {
target_pc,
} => {
assert!(target_pc.is_offset());
let cursor = btree_table_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id);
let rowid = match &state.registers[*src_reg] {
OwnedValue::Integer(rowid) => *rowid as u64,
OwnedValue::Null => {
@@ -1106,7 +1147,7 @@ impl Program {
} => {
assert!(target_pc.is_offset());
if *is_index {
let cursor = btree_index_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_index_mut(&mut cursors, *cursor_id);
let record_from_regs: OwnedRecord =
make_owned_record(&state.registers, start_reg, num_regs);
let found = return_if_io!(
@@ -1118,7 +1159,7 @@ impl Program {
state.pc += 1;
}
} else {
let cursor = btree_table_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id);
let rowid = match &state.registers[*start_reg] {
OwnedValue::Null => {
// All integer values are greater than null so we just rewind the cursor
@@ -1151,7 +1192,7 @@ impl Program {
} => {
assert!(target_pc.is_offset());
if *is_index {
let cursor = btree_index_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_index_mut(&mut cursors, *cursor_id);
let record_from_regs: OwnedRecord =
make_owned_record(&state.registers, start_reg, num_regs);
let found = return_if_io!(
@@ -1163,7 +1204,7 @@ impl Program {
state.pc += 1;
}
} else {
let cursor = btree_table_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id);
let rowid = match &state.registers[*start_reg] {
OwnedValue::Null => {
// All integer values are greater than null so we just rewind the cursor
@@ -1194,7 +1235,7 @@ impl Program {
target_pc,
} => {
assert!(target_pc.is_offset());
let cursor = btree_index_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_index_mut(&mut cursors, *cursor_id);
let record_from_regs: OwnedRecord =
make_owned_record(&state.registers, start_reg, num_regs);
if let Some(ref idx_record) = *cursor.record()? {
@@ -1217,7 +1258,7 @@ impl Program {
target_pc,
} => {
assert!(target_pc.is_offset());
let cursor = btree_index_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_index_mut(&mut cursors, *cursor_id);
let record_from_regs: OwnedRecord =
make_owned_record(&state.registers, start_reg, num_regs);
if let Some(ref idx_record) = *cursor.record()? {
@@ -1547,7 +1588,10 @@ impl Program {
})
.collect();
let cursor = Sorter::new(order);
sorter_cursors.insert(*cursor_id, cursor);
cursors
.get_mut(*cursor_id)
.unwrap()
.replace(Cursor::new_sorter(cursor));
state.pc += 1;
}
Insn::SorterData {
@@ -1555,7 +1599,7 @@ impl Program {
dest_reg,
pseudo_cursor,
} => {
let sorter_cursor = sorter_cursors.get_mut(cursor_id).unwrap();
let sorter_cursor = get_cursor_as_sorter_mut(&mut cursors, *cursor_id);
let record = match sorter_cursor.record() {
Some(record) => record.clone(),
None => {
@@ -1564,7 +1608,7 @@ impl Program {
}
};
state.registers[*dest_reg] = OwnedValue::Record(record.clone());
let pseudo_cursor = pseudo_cursors.get_mut(pseudo_cursor).unwrap();
let pseudo_cursor = get_cursor_as_pseudo_mut(&mut cursors, *pseudo_cursor);
pseudo_cursor.insert(record);
state.pc += 1;
}
@@ -1572,7 +1616,7 @@ impl Program {
cursor_id,
record_reg,
} => {
let cursor = sorter_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_sorter_mut(&mut cursors, *cursor_id);
let record = match &state.registers[*record_reg] {
OwnedValue::Record(record) => record,
_ => unreachable!("SorterInsert on non-record register"),
@@ -1584,11 +1628,12 @@ impl Program {
cursor_id,
pc_if_empty,
} => {
if let Some(cursor) = sorter_cursors.get_mut(cursor_id) {
let cursor = get_cursor_as_sorter_mut(&mut cursors, *cursor_id);
if cursor.is_empty() {
state.pc = pc_if_empty.to_offset_int();
} else {
cursor.sort();
state.pc += 1;
} else {
state.pc = pc_if_empty.to_offset_int();
}
}
Insn::SorterNext {
@@ -1596,9 +1641,9 @@ impl Program {
pc_if_next,
} => {
assert!(pc_if_next.is_offset());
let cursor = sorter_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_sorter_mut(&mut cursors, *cursor_id);
cursor.next();
if !cursor.is_empty() {
if cursor.has_more() {
state.pc = pc_if_next.to_offset_int();
} else {
state.pc += 1;
@@ -1697,6 +1742,10 @@ impl Program {
Err(e) => return Err(e),
}
}
JsonFunc::JsonValid => {
let json_value = &state.registers[*start_reg];
state.registers[*dest] = is_json_valid(json_value)?;
}
},
crate::function::Func::Scalar(scalar_func) => match scalar_func {
ScalarFunc::Cast => {
@@ -2004,6 +2053,12 @@ impl Program {
conn.load_extension(ext)?;
}
}
ScalarFunc::StrfTime => {
let result = exec_strftime(
&state.registers[*start_reg..*start_reg + arg_count],
);
state.registers[*dest] = result;
}
},
crate::function::Func::External(f) => match f.func {
ExtFunc::Scalar(f) => {
@@ -2077,7 +2132,7 @@ impl Program {
assert!(jump_on_definition.is_offset());
let start_offset = start_offset.to_offset_int();
state.registers[*yield_reg] = OwnedValue::Integer(start_offset as i64);
state.ended_coroutine.insert(*yield_reg, false);
state.ended_coroutine.unset(*yield_reg);
let jump_on_definition = jump_on_definition.to_offset_int();
state.pc = if jump_on_definition == 0 {
state.pc + 1
@@ -2087,7 +2142,7 @@ impl Program {
}
Insn::EndCoroutine { yield_reg } => {
if let OwnedValue::Integer(pc) = state.registers[*yield_reg] {
state.ended_coroutine.insert(*yield_reg, true);
state.ended_coroutine.set(*yield_reg);
let pc: u32 = pc
.try_into()
.unwrap_or_else(|_| panic!("EndCoroutine: pc overflow: {}", pc));
@@ -2101,11 +2156,7 @@ impl Program {
end_offset,
} => {
if let OwnedValue::Integer(pc) = state.registers[*yield_reg] {
if *state
.ended_coroutine
.get(yield_reg)
.expect("coroutine not initialized")
{
if state.ended_coroutine.get(*yield_reg) {
state.pc = end_offset.to_offset_int();
} else {
let pc: u32 = pc
@@ -2129,7 +2180,7 @@ impl Program {
record_reg,
flag: _,
} => {
let cursor = btree_table_cursors.get_mut(cursor).unwrap();
let cursor = get_cursor_as_table_mut(&mut cursors, *cursor);
let record = match &state.registers[*record_reg] {
OwnedValue::Record(r) => r,
_ => unreachable!("Not a record! Cannot insert a non record value."),
@@ -2139,7 +2190,7 @@ impl Program {
state.pc += 1;
}
Insn::InsertAwait { cursor_id } => {
let cursor = btree_table_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id);
cursor.wait_for_completion()?;
// Only update last_insert_rowid for regular table inserts, not schema modifications
if cursor.root_page() != 1 {
@@ -2154,12 +2205,12 @@ impl Program {
state.pc += 1;
}
Insn::DeleteAsync { cursor_id } => {
let cursor = btree_table_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id);
return_if_io!(cursor.delete());
state.pc += 1;
}
Insn::DeleteAwait { cursor_id } => {
let cursor = btree_table_cursors.get_mut(cursor_id).unwrap();
let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id);
cursor.wait_for_completion()?;
let prev_changes = self.n_change.get();
self.n_change.set(prev_changes + 1);
@@ -2168,7 +2219,7 @@ impl Program {
Insn::NewRowid {
cursor, rowid_reg, ..
} => {
let cursor = btree_table_cursors.get_mut(cursor).unwrap();
let cursor = get_cursor_as_table_mut(&mut cursors, *cursor);
// TODO: make io handle rng
let rowid = return_if_io!(get_new_rowid(cursor, thread_rng()));
state.registers[*rowid_reg] = OwnedValue::Integer(rowid);
@@ -2208,13 +2259,8 @@ impl Program {
rowid_reg,
target_pc,
} => {
let cursor = must_be_btree_cursor!(
*cursor,
self.cursor_ref,
btree_table_cursors,
btree_index_cursors,
"NotExists"
);
let cursor =
must_be_btree_cursor!(*cursor, self.cursor_ref, cursors, "NotExists");
let exists = return_if_io!(cursor.exists(&state.registers[*rowid_reg]));
if exists {
state.pc += 1;
@@ -2231,12 +2277,17 @@ impl Program {
} => {
let (_, cursor_type) = self.cursor_ref.get(*cursor_id).unwrap();
let is_index = cursor_type.is_index();
let cursor =
BTreeCursor::new(pager.clone(), *root_page, self.database_header.clone());
let cursor = BTreeCursor::new(pager.clone(), *root_page);
if is_index {
btree_index_cursors.insert(*cursor_id, cursor);
cursors
.get_mut(*cursor_id)
.unwrap()
.replace(Cursor::new_index(cursor));
} else {
btree_table_cursors.insert(*cursor_id, cursor);
cursors
.get_mut(*cursor_id)
.unwrap()
.replace(Cursor::new_table(cursor));
}
state.pc += 1;
}
@@ -2258,32 +2309,14 @@ impl Program {
// TODO: implement temp datbases
todo!("temp databases not implemented yet");
}
let mut cursor = Box::new(BTreeCursor::new(
pager.clone(),
0,
self.database_header.clone(),
));
let mut cursor = Box::new(BTreeCursor::new(pager.clone(), 0));
let root_page = cursor.btree_create(*flags);
state.registers[*root] = OwnedValue::Integer(root_page as i64);
state.pc += 1;
}
Insn::Close { cursor_id } => {
let (_, cursor_type) = self.cursor_ref.get(*cursor_id).unwrap();
match cursor_type {
CursorType::BTreeTable(_) => {
let _ = btree_table_cursors.remove(cursor_id);
}
CursorType::BTreeIndex(_) => {
let _ = btree_index_cursors.remove(cursor_id);
}
CursorType::Pseudo(_) => {
let _ = pseudo_cursors.remove(cursor_id);
}
CursorType::Sorter => {
let _ = sorter_cursors.remove(cursor_id);
}
}
cursors.get_mut(*cursor_id).unwrap().take();
state.pc += 1;
}
Insn::IsNull { src, target_pc } => {
@@ -2303,10 +2336,9 @@ impl Program {
"SELECT * FROM sqlite_schema WHERE {}",
where_clause
))?;
let rows = Rows { stmt };
let mut schema = RefCell::borrow_mut(&conn.schema);
// TODO: This function below is synchronous, make it not async
parse_schema_rows(Some(rows), &mut schema, conn.pager.io.clone())?;
parse_schema_rows(Some(stmt), &mut schema, conn.pager.io.clone())?;
state.pc += 1;
}
Insn::ShiftRight { lhs, rhs, dest } => {
@@ -2345,6 +2377,16 @@ impl Program {
exec_concat(&state.registers[*lhs], &state.registers[*rhs]);
state.pc += 1;
}
Insn::And { lhs, rhs, dest } => {
state.registers[*dest] =
exec_and(&state.registers[*lhs], &state.registers[*rhs]);
state.pc += 1;
}
Insn::Or { lhs, rhs, dest } => {
state.registers[*dest] =
exec_or(&state.registers[*lhs], &state.registers[*rhs]);
state.pc += 1;
}
}
}
}
@@ -3403,7 +3445,7 @@ mod tests {
exec_ltrim, exec_max, exec_min, exec_nullif, exec_quote, exec_random, exec_randomblob,
exec_round, exec_rtrim, exec_sign, exec_soundex, exec_substring, exec_trim, exec_typeof,
exec_unhex, exec_unicode, exec_upper, exec_zeroblob, execute_sqlite_version, AggContext,
OwnedValue,
Bitfield, OwnedValue,
};
use std::{collections::HashMap, rc::Rc};
@@ -4292,4 +4334,23 @@ mod tests {
expected_str
);
}
#[test]
fn test_bitfield() {
let mut bitfield = Bitfield::<4>::new();
for i in 0..256 {
bitfield.set(i);
assert!(bitfield.get(i));
for j in 0..i {
assert!(bitfield.get(j));
}
for j in i + 1..256 {
assert!(!bitfield.get(j));
}
}
for i in 0..256 {
bitfield.unset(i);
assert!(!bitfield.get(i));
}
}
}

View File

@@ -16,8 +16,13 @@ impl Sorter {
}
}
pub fn is_empty(&self) -> bool {
self.current.is_none()
self.records.is_empty()
}
pub fn has_more(&self) -> bool {
self.current.is_some()
}
// We do the sorting here since this is what is called by the SorterSort instruction
pub fn sort(&mut self) {
self.records.sort_by(|a, b| {

View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "{}" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -36,7 +36,7 @@ fn main() {
let mut rows = stmt.query().unwrap();
let mut count = 0;
loop {
let row = rows.next_row().unwrap();
let row = rows.step().unwrap();
match row {
limbo_core::StepResult::Row(_) => {
count += 1;

View File

@@ -23,3 +23,6 @@ tempfile = "3.0.7"
env_logger = "0.10.1"
anarchist-readable-name-generator-lib = "0.1.2"
clap = { version = "4.5", features = ["derive"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0" }
notify = "8.0.0"

View File

@@ -1,13 +1,15 @@
use std::{fmt::Display, rc::Rc, vec};
use std::{fmt::Display, path::Path, rc::Rc, vec};
use limbo_core::{Connection, Result, StepResult};
use serde::{Deserialize, Serialize};
use crate::{
model::{
query::{Create, Insert, Query, Select},
table::Value,
},
SimConnection, SimulatorEnv,
runner::env::SimConnection,
SimulatorEnv,
};
use crate::generation::{frequency, Arbitrary, ArbitraryFrom};
@@ -19,18 +21,81 @@ use super::{
pub(crate) type ResultSet = Result<Vec<Vec<Value>>>;
#[derive(Clone)]
#[derive(Clone, Serialize, Deserialize)]
pub(crate) struct InteractionPlan {
pub(crate) plan: Vec<Interactions>,
}
impl InteractionPlan {
/// Compute via diff computes a a plan from a given `.plan` file without the need to parse
/// sql. This is possible because there are two versions of the plan file, one that is human
/// readable and one that is serialized as JSON. Under watch mode, the users will be able to
/// delete interactions from the human readable file, and this function uses the JSON file as
/// a baseline to detect with interactions were deleted and constructs the plan from the
/// remaining interactions.
pub(crate) fn compute_via_diff(plan_path: &Path) -> Vec<Vec<Interaction>> {
let interactions = std::fs::read_to_string(plan_path).unwrap();
let interactions = interactions.lines().collect::<Vec<_>>();
let plan: InteractionPlan = serde_json::from_str(
std::fs::read_to_string(plan_path.with_extension("plan.json"))
.unwrap()
.as_str(),
)
.unwrap();
let mut plan = plan
.plan
.into_iter()
.map(|i| i.interactions())
.collect::<Vec<_>>();
let (mut i, mut j1, mut j2) = (0, 0, 0);
while i < interactions.len() && j1 < plan.len() {
if interactions[i].starts_with("-- begin")
|| interactions[i].starts_with("-- end")
|| interactions[i].is_empty()
{
i += 1;
continue;
}
if interactions[i].contains(plan[j1][j2].to_string().as_str()) {
i += 1;
if j2 + 1 < plan[j1].len() {
j2 += 1;
} else {
j1 += 1;
j2 = 0;
}
} else {
plan[j1].remove(j2);
if plan[j1].is_empty() {
plan.remove(j1);
j2 = 0;
}
}
}
if j1 < plan.len() {
if j2 < plan[j1].len() {
let _ = plan[j1].split_off(j2);
}
let _ = plan.split_off(j1);
}
plan
}
}
pub(crate) struct InteractionPlanState {
pub(crate) stack: Vec<ResultSet>,
pub(crate) interaction_pointer: usize,
pub(crate) secondary_pointer: usize,
}
#[derive(Clone)]
#[derive(Clone, Serialize, Deserialize)]
pub(crate) enum Interactions {
Property(Property),
Query(Query),
@@ -108,12 +173,12 @@ impl Display for InteractionPlan {
match interaction {
Interaction::Query(query) => writeln!(f, "{};", query)?,
Interaction::Assumption(assumption) => {
writeln!(f, "-- ASSUME: {};", assumption.message)?
writeln!(f, "-- ASSUME {};", assumption.message)?
}
Interaction::Assertion(assertion) => {
writeln!(f, "-- ASSERT: {};", assertion.message)?
writeln!(f, "-- ASSERT {};", assertion.message)?
}
Interaction::Fault(fault) => writeln!(f, "-- FAULT: {};", fault)?,
Interaction::Fault(fault) => writeln!(f, "-- FAULT '{}';", fault)?,
}
}
writeln!(f, "-- end testing '{}'", name)?;
@@ -160,9 +225,9 @@ impl Display for Interaction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Query(query) => write!(f, "{}", query),
Self::Assumption(assumption) => write!(f, "ASSUME: {}", assumption.message),
Self::Assertion(assertion) => write!(f, "ASSERT: {}", assertion.message),
Self::Fault(fault) => write!(f, "FAULT: {}", fault),
Self::Assumption(assumption) => write!(f, "ASSUME {}", assumption.message),
Self::Assertion(assertion) => write!(f, "ASSERT {}", assertion.message),
Self::Fault(fault) => write!(f, "FAULT '{}'", fault),
}
}
}
@@ -178,7 +243,7 @@ pub(crate) struct Assertion {
pub(crate) message: String,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) enum Fault {
Disconnect,
}
@@ -195,6 +260,29 @@ impl Interactions {
pub(crate) fn shadow(&self, env: &mut SimulatorEnv) {
match self {
Interactions::Property(property) => {
match property {
Property::InsertSelect {
insert,
row_index: _,
queries,
select,
} => {
insert.shadow(env);
for query in queries {
query.shadow(env);
}
select.shadow(env);
}
Property::DoubleCreateFailure { create, queries } => {
if env.tables.iter().any(|t| t.name == create.table.name) {
return;
}
create.shadow(env);
for query in queries {
query.shadow(env);
}
}
}
for interaction in property.interactions() {
match interaction {
Interaction::Query(query) => match query {
@@ -220,23 +308,7 @@ impl Interactions {
}
}
}
Interactions::Query(query) => match query {
Query::Create(create) => {
if !env.tables.iter().any(|t| t.name == create.table.name) {
env.tables.push(create.table.clone());
}
}
Query::Insert(insert) => {
let table = env
.tables
.iter_mut()
.find(|t| t.name == insert.table)
.unwrap();
table.rows.extend(insert.values.clone());
}
Query::Delete(_) => todo!(),
Query::Select(_) => {}
},
Interactions::Query(query) => query.shadow(env),
Interactions::Fault(_) => {}
}
}
@@ -317,6 +389,14 @@ impl ArbitraryFrom<&mut SimulatorEnv> for InteractionPlan {
}
impl Interaction {
pub(crate) fn shadow(&self, env: &mut SimulatorEnv) {
match self {
Self::Query(query) => query.shadow(env),
Self::Assumption(_) => {}
Self::Assertion(_) => {}
Self::Fault(_) => {}
}
}
pub(crate) fn execute_query(&self, conn: &mut Rc<Connection>) -> ResultSet {
if let Self::Query(query) = self {
let query_str = query.to_string();
@@ -334,7 +414,7 @@ impl Interaction {
assert!(rows.is_some());
let mut rows = rows.unwrap();
let mut out = Vec::new();
while let Ok(row) = rows.next_row() {
while let Ok(row) = rows.step() {
match row {
StepResult::Row(row) => {
let mut r = Vec::new();

View File

@@ -1,4 +1,5 @@
use limbo_core::LimboError;
use serde::{Deserialize, Serialize};
use crate::{
model::{
@@ -16,7 +17,7 @@ use super::{
/// Properties are representations of executable specifications
/// about the database behavior.
#[derive(Clone)]
#[derive(Clone, Serialize, Deserialize)]
pub(crate) enum Property {
/// Insert-Select is a property in which the inserted row
/// must be in the resulting rows of a select query that has a
@@ -103,7 +104,6 @@ impl Property {
let assertion = Interaction::Assertion(Assertion {
message: format!(
// todo: add the part inserting ({} = {})",
"row [{:?}] not found in table {}",
row.iter().map(|v| v.to_string()).collect::<Vec<String>>(),
insert.table,

View File

@@ -1,20 +1,20 @@
#![allow(clippy::arc_with_non_send_sync, dead_code)]
use clap::Parser;
use core::panic;
use generation::plan::{InteractionPlan, InteractionPlanState};
use generation::plan::{Interaction, InteractionPlan, InteractionPlanState};
use generation::ArbitraryFrom;
use limbo_core::Database;
use notify::event::{DataChange, ModifyKind};
use notify::{EventKind, RecursiveMode, Watcher};
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
use runner::cli::SimulatorCLI;
use runner::env::{SimConnection, SimulatorEnv, SimulatorOpts};
use runner::env::SimulatorEnv;
use runner::execution::{execute_plans, Execution, ExecutionHistory, ExecutionResult};
use runner::io::SimulatorIO;
use runner::watch;
use std::any::Any;
use std::backtrace::Backtrace;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::sync::{mpsc, Arc, Mutex};
use tempfile::TempDir;
mod generation;
@@ -49,6 +49,10 @@ impl Paths {
log::info!("shrunk database path: {:?}", paths.shrunk_db);
}
log::info!("simulator plan path: {:?}", paths.plan);
log::info!(
"simulator plan serialized path: {:?}",
paths.plan.with_extension("plan.json")
);
if shrink {
log::info!("shrunk plan path: {:?}", paths.shrunk_plan);
}
@@ -77,7 +81,85 @@ fn main() -> Result<(), String> {
log::info!("seed: {}", seed);
let last_execution = Arc::new(Mutex::new(Execution::new(0, 0, 0)));
let (env, plans) = setup_simulation(seed, &cli_opts, &paths.db, &paths.plan);
if cli_opts.watch {
watch_mode(seed, &cli_opts, &paths, last_execution.clone()).unwrap();
} else {
run_simulator(seed, &cli_opts, &paths, env, plans, last_execution.clone());
}
Ok(())
}
fn watch_mode(
seed: u64,
cli_opts: &SimulatorCLI,
paths: &Paths,
last_execution: Arc<Mutex<Execution>>,
) -> notify::Result<()> {
let (tx, rx) = mpsc::channel::<notify::Result<notify::Event>>();
println!("watching {:?}", paths.plan);
// Use recommended_watcher() to automatically select the best implementation
// for your platform. The `EventHandler` passed to this constructor can be a
// closure, a `std::sync::mpsc::Sender`, a `crossbeam_channel::Sender`, or
// another type the trait is implemented for.
let mut watcher = notify::recommended_watcher(tx)?;
// Add a path to be watched. All files and directories at that path and
// below will be monitored for changes.
watcher.watch(&paths.plan, RecursiveMode::NonRecursive)?;
// Block forever, printing out events as they come in
for res in rx {
match res {
Ok(event) => {
if let EventKind::Modify(ModifyKind::Data(DataChange::Content)) = event.kind {
log::info!("plan file modified, rerunning simulation");
let result = SandboxedResult::from(
std::panic::catch_unwind(|| {
let plan: Vec<Vec<Interaction>> =
InteractionPlan::compute_via_diff(&paths.plan);
let mut env = SimulatorEnv::new(seed, cli_opts, &paths.db);
plan.iter().for_each(|is| {
is.iter().for_each(|i| {
i.shadow(&mut env);
});
});
let env = Arc::new(Mutex::new(env.clone()));
watch::run_simulation(env, &mut [plan], last_execution.clone())
}),
last_execution.clone(),
);
match result {
SandboxedResult::Correct => {
log::info!("simulation succeeded");
println!("simulation succeeded");
}
SandboxedResult::Panicked { error, .. }
| SandboxedResult::FoundBug { error, .. } => {
log::error!("simulation failed: '{}'", error);
println!("simulation failed: '{}'", error);
}
}
}
}
Err(e) => println!("watch error: {:?}", e),
}
}
Ok(())
}
fn run_simulator(
seed: u64,
cli_opts: &SimulatorCLI,
paths: &Paths,
env: SimulatorEnv,
plans: Vec<InteractionPlan>,
last_execution: Arc<Mutex<Execution>>,
) {
std::panic::set_hook(Box::new(move |info| {
log::error!("panic occurred");
@@ -94,81 +176,22 @@ fn main() -> Result<(), String> {
log::error!("captured backtrace:\n{}", bt);
}));
let env = Arc::new(Mutex::new(env));
let result = SandboxedResult::from(
std::panic::catch_unwind(|| {
run_simulation(
seed,
&cli_opts,
&paths.db,
&paths.plan,
last_execution.clone(),
None,
)
run_simulation(env.clone(), &mut plans.clone(), last_execution.clone())
}),
last_execution.clone(),
);
if cli_opts.doublecheck {
// Run the simulation again
let result2 = SandboxedResult::from(
std::panic::catch_unwind(|| {
run_simulation(
seed,
&cli_opts,
&paths.doublecheck_db,
&paths.plan,
last_execution.clone(),
None,
)
}),
last_execution.clone(),
);
match (result, result2) {
(SandboxedResult::Correct, SandboxedResult::Panicked { .. }) => {
log::error!("doublecheck failed! first run succeeded, but second run panicked.");
}
(SandboxedResult::FoundBug { .. }, SandboxedResult::Panicked { .. }) => {
log::error!(
"doublecheck failed! first run failed an assertion, but second run panicked."
);
}
(SandboxedResult::Panicked { .. }, SandboxedResult::Correct) => {
log::error!("doublecheck failed! first run panicked, but second run succeeded.");
}
(SandboxedResult::Panicked { .. }, SandboxedResult::FoundBug { .. }) => {
log::error!(
"doublecheck failed! first run panicked, but second run failed an assertion."
);
}
(SandboxedResult::Correct, SandboxedResult::FoundBug { .. }) => {
log::error!(
"doublecheck failed! first run succeeded, but second run failed an assertion."
);
}
(SandboxedResult::FoundBug { .. }, SandboxedResult::Correct) => {
log::error!(
"doublecheck failed! first run failed an assertion, but second run succeeded."
);
}
(SandboxedResult::Correct, SandboxedResult::Correct)
| (SandboxedResult::FoundBug { .. }, SandboxedResult::FoundBug { .. })
| (SandboxedResult::Panicked { .. }, SandboxedResult::Panicked { .. }) => {
// Compare the two database files byte by byte
let db_bytes = std::fs::read(&paths.db).unwrap();
let doublecheck_db_bytes = std::fs::read(&paths.doublecheck_db).unwrap();
if db_bytes != doublecheck_db_bytes {
log::error!("doublecheck failed! database files are different.");
} else {
log::info!("doublecheck succeeded! database files are the same.");
}
}
}
doublecheck(env.clone(), paths, &plans, last_execution.clone(), result);
} else {
// No doublecheck, run shrinking if panicking or found a bug.
match &result {
SandboxedResult::Correct => {
log::info!("simulation succeeded");
println!("simulation succeeded");
}
SandboxedResult::Panicked {
error,
@@ -196,21 +219,28 @@ fn main() -> Result<(), String> {
}
log::error!("simulation failed: '{}'", error);
println!("simulation failed: '{}'", error);
if cli_opts.shrink {
log::info!("Starting to shrink");
let shrink = Some(last_execution);
let shrunk_plans = plans
.iter()
.map(|plan| {
let shrunk = plan.shrink_interaction_plan(last_execution);
log::info!("{}", shrunk.stats());
shrunk
})
.collect::<Vec<_>>();
let last_execution = Arc::new(Mutex::new(*last_execution));
let shrunk = SandboxedResult::from(
std::panic::catch_unwind(|| {
run_simulation(
seed,
&cli_opts,
&paths.shrunk_db,
&paths.shrunk_plan,
env.clone(),
&mut shrunk_plans.clone(),
last_execution.clone(),
shrink,
)
}),
last_execution,
@@ -258,35 +288,78 @@ fn main() -> Result<(), String> {
println!("shrunk database path: {:?}", paths.shrunk_db);
}
println!("simulator plan path: {:?}", paths.plan);
println!(
"simulator plan serialized path: {:?}",
paths.plan.with_extension("plan.json")
);
if cli_opts.shrink {
println!("shrunk plan path: {:?}", paths.shrunk_plan);
}
println!("simulator history path: {:?}", paths.history);
println!("seed: {}", seed);
Ok(())
}
fn move_db_and_plan_files(output_dir: &Path) {
let old_db_path = output_dir.join("simulator.db");
let old_plan_path = output_dir.join("simulator.plan");
fn doublecheck(
env: Arc<Mutex<SimulatorEnv>>,
paths: &Paths,
plans: &[InteractionPlan],
last_execution: Arc<Mutex<Execution>>,
result: SandboxedResult,
) {
{
let mut env_ = env.lock().unwrap();
env_.db =
Database::open_file(env_.io.clone(), paths.doublecheck_db.to_str().unwrap()).unwrap();
}
let new_db_path = output_dir.join("simulator_double.db");
let new_plan_path = output_dir.join("simulator_double.plan");
// Run the simulation again
let result2 = SandboxedResult::from(
std::panic::catch_unwind(|| {
run_simulation(env.clone(), &mut plans.to_owned(), last_execution.clone())
}),
last_execution.clone(),
);
std::fs::rename(&old_db_path, &new_db_path).unwrap();
std::fs::rename(&old_plan_path, &new_plan_path).unwrap();
}
fn revert_db_and_plan_files(output_dir: &Path) {
let old_db_path = output_dir.join("simulator.db");
let old_plan_path = output_dir.join("simulator.plan");
let new_db_path = output_dir.join("simulator_double.db");
let new_plan_path = output_dir.join("simulator_double.plan");
std::fs::rename(&new_db_path, &old_db_path).unwrap();
std::fs::rename(&new_plan_path, &old_plan_path).unwrap();
match (result, result2) {
(SandboxedResult::Correct, SandboxedResult::Panicked { .. }) => {
log::error!("doublecheck failed! first run succeeded, but second run panicked.");
}
(SandboxedResult::FoundBug { .. }, SandboxedResult::Panicked { .. }) => {
log::error!(
"doublecheck failed! first run failed an assertion, but second run panicked."
);
}
(SandboxedResult::Panicked { .. }, SandboxedResult::Correct) => {
log::error!("doublecheck failed! first run panicked, but second run succeeded.");
}
(SandboxedResult::Panicked { .. }, SandboxedResult::FoundBug { .. }) => {
log::error!(
"doublecheck failed! first run panicked, but second run failed an assertion."
);
}
(SandboxedResult::Correct, SandboxedResult::FoundBug { .. }) => {
log::error!(
"doublecheck failed! first run succeeded, but second run failed an assertion."
);
}
(SandboxedResult::FoundBug { .. }, SandboxedResult::Correct) => {
log::error!(
"doublecheck failed! first run failed an assertion, but second run succeeded."
);
}
(SandboxedResult::Correct, SandboxedResult::Correct)
| (SandboxedResult::FoundBug { .. }, SandboxedResult::FoundBug { .. })
| (SandboxedResult::Panicked { .. }, SandboxedResult::Panicked { .. }) => {
// Compare the two database files byte by byte
let db_bytes = std::fs::read(&paths.db).unwrap();
let doublecheck_db_bytes = std::fs::read(&paths.doublecheck_db).unwrap();
if db_bytes != doublecheck_db_bytes {
log::error!("doublecheck failed! database files are different.");
} else {
log::info!("doublecheck succeeded! database files are the same.");
}
}
}
}
#[derive(Debug)]
@@ -342,67 +415,64 @@ impl SandboxedResult {
}
}
fn run_simulation(
seed: u64,
fn setup_simulation(
mut seed: u64,
cli_opts: &SimulatorCLI,
db_path: &Path,
plan_path: &Path,
) -> (SimulatorEnv, Vec<InteractionPlan>) {
if let Some(load) = &cli_opts.load {
let seed_path = PathBuf::from(load).with_extension("seed");
let seed_str = std::fs::read_to_string(&seed_path).unwrap();
seed = seed_str.parse().unwrap();
}
let mut env = SimulatorEnv::new(seed, cli_opts, db_path);
// todo: the loading works correctly because of a hacky decision
// Rigth now, the plan generation is the only point we use the rng, so the environment doesn't
// even need it. In the future, especially with multi-connections and multi-threading, we might
// use the RNG for more things such as scheduling, so this assumption will fail. When that happens,
// we'll need to reachitect this logic by saving and loading RNG state.
let plans = if let Some(load) = &cli_opts.load {
log::info!("Loading database interaction plan...");
let plan = std::fs::read_to_string(load).unwrap();
let plan: InteractionPlan = serde_json::from_str(&plan).unwrap();
vec![plan]
} else {
log::info!("Generating database interaction plan...");
(1..=env.opts.max_connections)
.map(|_| InteractionPlan::arbitrary_from(&mut env.rng.clone(), &mut env))
.collect::<Vec<_>>()
};
// todo: for now, we only use 1 connection, so it's safe to use the first plan.
let plan = plans[0].clone();
let mut f = std::fs::File::create(plan_path).unwrap();
// todo: create a detailed plan file with all the plans. for now, we only use 1 connection, so it's safe to use the first plan.
f.write_all(plan.to_string().as_bytes()).unwrap();
let serialized_plan_path = plan_path.with_extension("plan.json");
let mut f = std::fs::File::create(&serialized_plan_path).unwrap();
f.write_all(serde_json::to_string(&plan).unwrap().as_bytes())
.unwrap();
let seed_path = plan_path.with_extension("seed");
let mut f = std::fs::File::create(&seed_path).unwrap();
f.write_all(seed.to_string().as_bytes()).unwrap();
log::info!("{}", plan.stats());
(env, plans)
}
fn run_simulation(
env: Arc<Mutex<SimulatorEnv>>,
plans: &mut [InteractionPlan],
last_execution: Arc<Mutex<Execution>>,
shrink: Option<&Execution>,
) -> ExecutionResult {
let mut rng = ChaCha8Rng::seed_from_u64(seed);
log::info!("Executing database interaction plan...");
let (create_percent, read_percent, write_percent, delete_percent) = {
let mut remaining = 100.0;
let read_percent = rng.gen_range(0.0..=remaining);
remaining -= read_percent;
let write_percent = rng.gen_range(0.0..=remaining);
remaining -= write_percent;
let delete_percent = remaining;
let create_percent = write_percent / 10.0;
let write_percent = write_percent - create_percent;
(create_percent, read_percent, write_percent, delete_percent)
};
let opts = SimulatorOpts {
ticks: rng.gen_range(cli_opts.minimum_size..=cli_opts.maximum_size),
max_connections: 1, // TODO: for now let's use one connection as we didn't implement
// correct transactions procesing
max_tables: rng.gen_range(0..128),
create_percent,
read_percent,
write_percent,
delete_percent,
page_size: 4096, // TODO: randomize this too
max_interactions: rng.gen_range(cli_opts.minimum_size..=cli_opts.maximum_size),
max_time_simulation: cli_opts.maximum_time,
};
let io = Arc::new(SimulatorIO::new(seed, opts.page_size).unwrap());
let db = match Database::open_file(io.clone(), db_path.to_str().unwrap()) {
Ok(db) => db,
Err(e) => {
panic!("error opening simulator test file {:?}: {:?}", db_path, e);
}
};
let connections = vec![SimConnection::Disconnected; opts.max_connections];
let mut env = SimulatorEnv {
opts,
tables: Vec::new(),
connections,
rng,
io,
db,
};
log::info!("Generating database interaction plan...");
let mut plans = (1..=env.opts.max_connections)
.map(|_| InteractionPlan::arbitrary_from(&mut env.rng.clone(), &mut env))
.collect::<Vec<_>>();
let mut states = plans
.iter()
.map(|_| InteractionPlanState {
@@ -411,27 +481,9 @@ fn run_simulation(
secondary_pointer: 0,
})
.collect::<Vec<_>>();
let result = execute_plans(env.clone(), plans, &mut states, last_execution);
let plan = if let Some(failing_execution) = shrink {
// todo: for now, we only use 1 connection, so it's safe to use the first plan.
println!("Interactions Before: {}", plans[0].plan.len());
let shrunk = plans[0].shrink_interaction_plan(failing_execution);
println!("Interactions After: {}", shrunk.plan.len());
shrunk
} else {
plans[0].clone()
};
let mut f = std::fs::File::create(plan_path).unwrap();
// todo: create a detailed plan file with all the plans. for now, we only use 1 connection, so it's safe to use the first plan.
f.write_all(plan.to_string().as_bytes()).unwrap();
log::info!("{}", plan.stats());
log::info!("Executing database interaction plan...");
let result = execute_plans(&mut env, &mut plans, &mut states, last_execution);
let env = env.lock().unwrap();
env.io.print_stats();
log::info!("Simulation completed");

View File

@@ -1,8 +1,13 @@
use std::fmt::Display;
use crate::model::table::{Table, Value};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq)]
use crate::{
model::table::{Table, Value},
runner::env::SimulatorEnv,
};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub(crate) enum Predicate {
And(Vec<Predicate>), // p1 AND p2 AND p3... AND pn
Or(Vec<Predicate>), // p1 OR p2 OR p3... OR pn
@@ -83,7 +88,7 @@ impl Display for Predicate {
}
// This type represents the potential queries on the database.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) enum Query {
Create(Create),
Select(Select),
@@ -108,30 +113,65 @@ impl Query {
| Query::Delete(Delete { table, .. }) => vec![table.clone()],
}
}
pub(crate) fn shadow(&self, env: &mut SimulatorEnv) {
match self {
Query::Create(create) => create.shadow(env),
Query::Insert(insert) => insert.shadow(env),
Query::Delete(delete) => delete.shadow(env),
Query::Select(select) => select.shadow(env),
}
}
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct Create {
pub(crate) table: Table,
}
#[derive(Clone, Debug, PartialEq)]
impl Create {
pub(crate) fn shadow(&self, env: &mut SimulatorEnv) {
if !env.tables.iter().any(|t| t.name == self.table.name) {
env.tables.push(self.table.clone());
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub(crate) struct Select {
pub(crate) table: String,
pub(crate) predicate: Predicate,
}
#[derive(Clone, Debug, PartialEq)]
impl Select {
pub(crate) fn shadow(&self, _env: &mut SimulatorEnv) {}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub(crate) struct Insert {
pub(crate) table: String,
pub(crate) values: Vec<Vec<Value>>,
}
#[derive(Clone, Debug, PartialEq)]
impl Insert {
pub(crate) fn shadow(&self, env: &mut SimulatorEnv) {
if let Some(t) = env.tables.iter_mut().find(|t| t.name == self.table) {
t.rows.extend(self.values.clone());
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub(crate) struct Delete {
pub(crate) table: String,
pub(crate) predicate: Predicate,
}
impl Delete {
pub(crate) fn shadow(&self, _env: &mut SimulatorEnv) {
todo!()
}
}
impl Display for Query {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {

View File

@@ -1,5 +1,7 @@
use std::{fmt::Display, ops::Deref};
use serde::{Deserialize, Serialize};
pub(crate) struct Name(pub(crate) String);
impl Deref for Name {
@@ -10,14 +12,14 @@ impl Deref for Name {
}
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct Table {
pub(crate) rows: Vec<Vec<Value>>,
pub(crate) name: String,
pub(crate) columns: Vec<Column>,
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct Column {
pub(crate) name: String,
pub(crate) column_type: ColumnType,
@@ -25,7 +27,7 @@ pub(crate) struct Column {
pub(crate) unique: bool,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) enum ColumnType {
Integer,
Float,
@@ -44,10 +46,30 @@ impl Display for ColumnType {
}
}
#[derive(Clone, Debug, PartialEq)]
fn float_to_string<S>(float: &f64, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&format!("{}", float))
}
fn string_to_float<'de, D>(deserializer: D) -> Result<f64, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
s.parse().map_err(serde::de::Error::custom)
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub(crate) enum Value {
Null,
Integer(i64),
// we use custom serialization to preserve float precision
#[serde(
serialize_with = "float_to_string",
deserialize_with = "string_to_float"
)]
Float(f64),
Text(String),
Blob(Vec<u8>),

View File

@@ -41,6 +41,14 @@ pub struct SimulatorCLI {
help = "minimize(shrink) the failing counterexample"
)]
pub shrink: bool,
#[clap(short = 'l', long, help = "load plan from a file")]
pub load: Option<String>,
#[clap(
short = 'w',
long,
help = "enable watch mode that reruns the simulation on file changes"
)]
pub watch: bool,
}
impl SimulatorCLI {
@@ -51,9 +59,21 @@ impl SimulatorCLI {
if self.maximum_size < 1 {
return Err("maximum size must be at least 1".to_string());
}
// todo: fix an issue here where if minimum size is not defined, it prevents setting low maximum sizes.
if self.minimum_size > self.maximum_size {
return Err("Minimum size cannot be greater than maximum size".to_string());
}
// Make sure uncompatible options are not set
if self.shrink && self.doublecheck {
return Err("Cannot use shrink and doublecheck at the same time".to_string());
}
if let Some(plan_path) = &self.load {
std::fs::File::open(plan_path)
.map_err(|_| format!("Plan file '{}' could not be opened", plan_path))?;
}
Ok(())
}
}

View File

@@ -1,13 +1,18 @@
use std::path::Path;
use std::rc::Rc;
use std::sync::Arc;
use limbo_core::{Connection, Database};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use crate::model::table::Table;
use crate::runner::io::SimulatorIO;
use super::cli::SimulatorCLI;
#[derive(Clone)]
pub(crate) struct SimulatorEnv {
pub(crate) opts: SimulatorOpts,
pub(crate) tables: Vec<Table>,
@@ -17,6 +22,65 @@ pub(crate) struct SimulatorEnv {
pub(crate) rng: ChaCha8Rng,
}
impl SimulatorEnv {
pub(crate) fn new(seed: u64, cli_opts: &SimulatorCLI, db_path: &Path) -> Self {
let mut rng = ChaCha8Rng::seed_from_u64(seed);
let (create_percent, read_percent, write_percent, delete_percent) = {
let mut remaining = 100.0;
let read_percent = rng.gen_range(0.0..=remaining);
remaining -= read_percent;
let write_percent = rng.gen_range(0.0..=remaining);
remaining -= write_percent;
let delete_percent = remaining;
let create_percent = write_percent / 10.0;
let write_percent = write_percent - create_percent;
(create_percent, read_percent, write_percent, delete_percent)
};
let opts = SimulatorOpts {
ticks: rng.gen_range(cli_opts.minimum_size..=cli_opts.maximum_size),
max_connections: 1, // TODO: for now let's use one connection as we didn't implement
// correct transactions procesing
max_tables: rng.gen_range(0..128),
create_percent,
read_percent,
write_percent,
delete_percent,
page_size: 4096, // TODO: randomize this too
max_interactions: rng.gen_range(cli_opts.minimum_size..=cli_opts.maximum_size),
max_time_simulation: cli_opts.maximum_time,
};
let io = Arc::new(SimulatorIO::new(seed, opts.page_size).unwrap());
// Remove existing database file if it exists
if db_path.exists() {
std::fs::remove_file(db_path).unwrap();
}
let db = match Database::open_file(io.clone(), db_path.to_str().unwrap()) {
Ok(db) => db,
Err(e) => {
panic!("error opening simulator test file {:?}: {:?}", db_path, e);
}
};
let connections = vec![SimConnection::Disconnected; opts.max_connections];
SimulatorEnv {
opts,
tables: Vec::new(),
connections,
rng,
io,
db,
}
}
}
#[derive(Clone)]
pub(crate) enum SimConnection {
Connected(Rc<Connection>),

View File

@@ -36,7 +36,7 @@ pub(crate) struct ExecutionHistory {
}
impl ExecutionHistory {
fn new() -> Self {
pub(crate) fn new() -> Self {
Self {
history: Vec::new(),
}
@@ -49,19 +49,20 @@ pub(crate) struct ExecutionResult {
}
impl ExecutionResult {
fn new(history: ExecutionHistory, error: Option<LimboError>) -> Self {
pub(crate) fn new(history: ExecutionHistory, error: Option<LimboError>) -> Self {
Self { history, error }
}
}
pub(crate) fn execute_plans(
env: &mut SimulatorEnv,
env: Arc<Mutex<SimulatorEnv>>,
plans: &mut [InteractionPlan],
states: &mut [InteractionPlanState],
last_execution: Arc<Mutex<Execution>>,
) -> ExecutionResult {
let mut history = ExecutionHistory::new();
let now = std::time::Instant::now();
let mut env = env.lock().unwrap();
for _tick in 0..env.opts.ticks {
// Pick the connection to interact with
let connection_index = pick_index(env.connections.len(), &mut env.rng);
@@ -77,7 +78,7 @@ pub(crate) fn execute_plans(
last_execution.interaction_index = state.interaction_pointer;
last_execution.secondary_index = state.secondary_pointer;
// Execute the interaction for the selected connection
match execute_plan(env, connection_index, plans, states) {
match execute_plan(&mut env, connection_index, plans, states) {
Ok(_) => {}
Err(err) => {
return ExecutionResult::new(history, Some(err));
@@ -155,14 +156,14 @@ fn execute_plan(
/// `execute_interaction` uses this type in conjunction with a result, where
/// the `Err` case indicates a full-stop due to a bug, and the `Ok` case
/// indicates the next step in the plan.
enum ExecutionContinuation {
pub(crate) enum ExecutionContinuation {
/// Default continuation, execute the next interaction.
NextInteraction,
/// Typically used in the case of preconditions failures, skip to the next property.
NextProperty,
}
fn execute_interaction(
pub(crate) fn execute_interaction(
env: &mut SimulatorEnv,
connection_index: usize,
interaction: &Interaction,

View File

@@ -4,3 +4,4 @@ pub mod execution;
#[allow(dead_code)]
pub mod file;
pub mod io;
pub mod watch;

133
simulator/runner/watch.rs Normal file
View File

@@ -0,0 +1,133 @@
use std::sync::{Arc, Mutex};
use crate::{
generation::{
pick_index,
plan::{Interaction, InteractionPlanState},
},
runner::execution::ExecutionContinuation,
};
use super::{
env::{SimConnection, SimulatorEnv},
execution::{execute_interaction, Execution, ExecutionHistory, ExecutionResult},
};
pub(crate) fn run_simulation(
env: Arc<Mutex<SimulatorEnv>>,
plans: &mut [Vec<Vec<Interaction>>],
last_execution: Arc<Mutex<Execution>>,
) -> ExecutionResult {
let mut states = plans
.iter()
.map(|_| InteractionPlanState {
stack: vec![],
interaction_pointer: 0,
secondary_pointer: 0,
})
.collect::<Vec<_>>();
let result = execute_plans(env.clone(), plans, &mut states, last_execution);
let env = env.lock().unwrap();
env.io.print_stats();
log::info!("Simulation completed");
result
}
pub(crate) fn execute_plans(
env: Arc<Mutex<SimulatorEnv>>,
plans: &mut [Vec<Vec<Interaction>>],
states: &mut [InteractionPlanState],
last_execution: Arc<Mutex<Execution>>,
) -> ExecutionResult {
let mut history = ExecutionHistory::new();
let now = std::time::Instant::now();
let mut env = env.lock().unwrap();
for _tick in 0..env.opts.ticks {
// Pick the connection to interact with
let connection_index = pick_index(env.connections.len(), &mut env.rng);
let state = &mut states[connection_index];
history.history.push(Execution::new(
connection_index,
state.interaction_pointer,
state.secondary_pointer,
));
let mut last_execution = last_execution.lock().unwrap();
last_execution.connection_index = connection_index;
last_execution.interaction_index = state.interaction_pointer;
last_execution.secondary_index = state.secondary_pointer;
// Execute the interaction for the selected connection
match execute_plan(&mut env, connection_index, plans, states) {
Ok(_) => {}
Err(err) => {
return ExecutionResult::new(history, Some(err));
}
}
// Check if the maximum time for the simulation has been reached
if now.elapsed().as_secs() >= env.opts.max_time_simulation as u64 {
return ExecutionResult::new(
history,
Some(limbo_core::LimboError::InternalError(
"maximum time for simulation reached".into(),
)),
);
}
}
ExecutionResult::new(history, None)
}
fn execute_plan(
env: &mut SimulatorEnv,
connection_index: usize,
plans: &mut [Vec<Vec<Interaction>>],
states: &mut [InteractionPlanState],
) -> limbo_core::Result<()> {
let connection = &env.connections[connection_index];
let plan = &mut plans[connection_index];
let state = &mut states[connection_index];
if state.interaction_pointer >= plan.len() {
return Ok(());
}
let interaction = &plan[state.interaction_pointer][state.secondary_pointer];
if let SimConnection::Disconnected = connection {
log::info!("connecting {}", connection_index);
env.connections[connection_index] = SimConnection::Connected(env.db.connect());
} else {
match execute_interaction(env, connection_index, interaction, &mut state.stack) {
Ok(next_execution) => {
log::debug!("connection {} processed", connection_index);
// Move to the next interaction or property
match next_execution {
ExecutionContinuation::NextInteraction => {
if state.secondary_pointer + 1 >= plan[state.interaction_pointer].len() {
// If we have reached the end of the interactions for this property, move to the next property
state.interaction_pointer += 1;
state.secondary_pointer = 0;
} else {
// Otherwise, move to the next interaction
state.secondary_pointer += 1;
}
}
ExecutionContinuation::NextProperty => {
// Skip to the next property
state.interaction_pointer += 1;
state.secondary_pointer = 0;
}
}
}
Err(err) => {
log::error!("error {}", err);
return Err(err);
}
}
}
Ok(())
}

View File

@@ -544,3 +544,35 @@ do_execsql_test json_from_json_object {
#do_execsql_test json_object_duplicated_keys {
# SELECT json_object('key', 'value', 'key', 'value2');
#} {{{"key":"value2"}}}
#
do_execsql_test json_valid_1 {
SELECT json_valid('{"a":55,"b":72}');
} {1}
do_execsql_test json_valid_2 {
SELECT json_valid('["a",55,"b",72]');
} {1}
do_execsql_test json_valid_3 {
SELECT json_valid( CAST('{"a":1}' AS BLOB) );
} {1}
do_execsql_test json_valid_4 {
SELECT json_valid(123);
} {1}
do_execsql_test json_valid_5 {
SELECT json_valid(12.3);
} {1}
do_execsql_test json_valid_6 {
SELECT json_valid('not a valid json');
} {0}
do_execsql_test json_valid_7 {
SELECT json_valid('{"a":"55,"b":72}');
} {0}
do_execsql_test json_valid_8 {
SELECT json_valid('{"a":55 "b":72}');
} {0}
do_execsql_test json_valid_3 {
SELECT json_valid( CAST('{"a":"1}' AS BLOB) );
} {0}
do_execsql_test json_valid_9 {
SELECT json_valid(NULL);
} {}

View File

@@ -443,3 +443,140 @@ do_execsql_test julianday-time-only {
# SELECT julianday('2023-05-18');
#} {2460082.5}
# Strftime tests
set FMT {%d,%e,%f,%F,%G,%g,%H,%I,%j,%J,%k,%l,%i,%m,%M,%p,%P,%R,%s,%S,%T,%U,%u,%V,%w,%W,%Y,%%}
do_execsql_test strftime-day {
SELECT strftime('%d', '2025-01-23T13:10:30.567');
} {23}
do_execsql_test strftime-day-without-leading-zero-1 {
SELECT strftime('%e', '2025-01-23T13:10:30.567');
} {23}
do_execsql_test strftime-day-without-leading-zero-2 {
SELECT strftime('%e', '2025-01-02T13:10:30.567');
} {" 2"}
# TODO not a typo in sqlite there is also a space
do_execsql_test strftime-fractional-seconds {
SELECT strftime('%f', '2025-01-02T13:10:30.567');
} {30.567}
do_execsql_test strftime-iso-8601-date {
SELECT strftime('%F', '2025-01-23T13:10:30.567');
} {2025-01-23}
do_execsql_test strftime-iso-8601-year {
SELECT strftime('%G', '2025-01-23T13:10:30.567');
} {2025}
do_execsql_test strftime-iso-8601-year-2_digit {
SELECT strftime('%g', '2025-01-23T13:10:30.567');
} {25}
do_execsql_test strftime-hour {
SELECT strftime('%H', '2025-01-23T13:10:30.567');
} {13}
do_execsql_test strftime-hour-12-hour-clock {
SELECT strftime('%I', '2025-01-23T13:10:30.567');
} {01}
do_execsql_test strftime-day-of-year {
SELECT strftime('%j', '2025-01-23T13:10:30.567');
} {023}
do_execsql_test strftime-julianday {
SELECT strftime('%J', '2025-01-23T13:10:30.567');
} {2460699.048964896}
do_execsql_test strftime-hour-without-leading-zero-1 {
SELECT strftime('%k', '2025-01-23T13:10:30.567');
} {13}
do_execsql_test strftime-hour-without-leading-zero-2 {
SELECT strftime('%k', '2025-01-23T02:10:30.567');
} {" 2"}
do_execsql_test strftime-hour-12-hour-clock-without-leading-zero-2 {
SELECT strftime('%l', '2025-01-23T13:10:30.567');
} {" 1"}
do_execsql_test strftime-month {
SELECT strftime('%m', '2025-01-23T13:10:30.567');
} {01}
do_execsql_test strftime-minute {
SELECT strftime('%M', '2025-01-23T13:14:30.567');
} {14}
do_execsql_test strftime-am-pm=1 {
SELECT strftime('%p', '2025-01-23T11:14:30.567');
} {AM}
do_execsql_test strftime-am-pm-2 {
SELECT strftime('%p', '2025-01-23T13:14:30.567');
} {PM}
do_execsql_test strftime-am-pm-lower-1 {
SELECT strftime('%P', '2025-01-23T11:14:30.567');
} {am}
do_execsql_test strftime-am-pm-lower-2 {
SELECT strftime('%P', '2025-01-23T13:14:30.567');
} {pm}
do_execsql_test strftime-iso8601-time {
SELECT strftime('%R', '2025-01-23T13:14:30.567');
} {13:14}
do_execsql_test strftime-seconds-since-epoch {
SELECT strftime('%s', '2025-01-23T13:14:30.567');
} {1737638070}
do_execsql_test strftime-seconds {
SELECT strftime('%S', '2025-01-23T13:14:30.567');
} {30}
do_execsql_test strftime-iso8601-with-seconds {
SELECT strftime('%T', '2025-01-23T13:14:30.567');
} {13:14:30}
do_execsql_test strftime-week-year-start-sunday {
SELECT strftime('%U', '2025-01-23T13:14:30.567');
} {03}
do_execsql_test strftime-day-week-start-monday {
SELECT strftime('%u', '2025-01-23T13:14:30.567');
} {4}
do_execsql_test strftime-iso8601-week-year {
SELECT strftime('%V', '2025-01-23T13:14:30.567');
} {04}
do_execsql_test strftime-day-week-start-sunday {
SELECT strftime('%w', '2025-01-23T13:14:30.567');
} {4}
do_execsql_test strftime-day-week-start-sunday {
SELECT strftime('%w', '2025-01-23T13:14:30.567');
} {4}
do_execsql_test strftime-week-year-start-sunday {
SELECT strftime('%W', '2025-01-23T13:14:30.567');
} {03}
do_execsql_test strftime-year {
SELECT strftime('%Y', '2025-01-23T13:14:30.567');
} {2025}
do_execsql_test strftime-percent {
SELECT strftime('%%', '2025-01-23T13:14:30.567');
} {%}

View File

@@ -67,3 +67,45 @@ pub(crate) fn compare_string(a: &String, b: &String) {
}
}
}
#[cfg(test)]
mod tests {
use super::TempDatabase;
#[test]
fn test_statement_columns() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db =
TempDatabase::new("create table test (foo integer, bar integer, baz integer);");
let conn = tmp_db.connect_limbo();
let stmt = conn.prepare("select * from test;")?;
let columns = stmt.columns();
assert_eq!(columns.len(), 3);
assert_eq!(&columns[0], "foo");
assert_eq!(&columns[1], "bar");
assert_eq!(&columns[2], "baz");
let stmt = conn.prepare("select foo, bar from test;")?;
let columns = stmt.columns();
assert_eq!(columns.len(), 2);
assert_eq!(&columns[0], "foo");
assert_eq!(&columns[1], "bar");
let stmt = conn.prepare("delete from test;")?;
let columns = stmt.columns();
assert_eq!(columns.len(), 0);
let stmt = conn.prepare("insert into test (foo, bar, baz) values (1, 2, 3);")?;
let columns = stmt.columns();
assert_eq!(columns.len(), 0);
let stmt = conn.prepare("delete from test where foo = 1")?;
let columns = stmt.columns();
assert_eq!(columns.len(), 0);
Ok(())
}
}

View File

@@ -11,7 +11,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> {
let mut insert_query = conn.query("INSERT INTO test_rowid (id, val) VALUES (NULL, 'test1')")?;
if let Some(ref mut rows) = insert_query {
loop {
match rows.next_row()? {
match rows.step()? {
StepResult::IO => {
tmp_db.io.run_once()?;
}
@@ -25,7 +25,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> {
let mut select_query = conn.query("SELECT last_insert_rowid()")?;
if let Some(ref mut rows) = select_query {
loop {
match rows.next_row()? {
match rows.step()? {
StepResult::Row(row) => {
if let Value::Integer(id) = row.values[0] {
assert_eq!(id, 1, "First insert should have rowid 1");
@@ -44,7 +44,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> {
// Test explicit rowid
match conn.query("INSERT INTO test_rowid (id, val) VALUES (5, 'test2')") {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::IO => {
tmp_db.io.run_once()?;
}
@@ -60,7 +60,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> {
let mut last_id = 0;
match conn.query("SELECT last_insert_rowid()") {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::Row(row) => {
if let Value::Integer(id) = row.values[0] {
last_id = id;

View File

@@ -20,7 +20,7 @@ fn test_simple_overflow_page() -> anyhow::Result<()> {
match conn.query(insert_query) {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::IO => {
tmp_db.io.run_once()?;
}
@@ -39,7 +39,7 @@ fn test_simple_overflow_page() -> anyhow::Result<()> {
match conn.query(list_query) {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::Row(row) => {
let first_value = &row.values[0];
let text = &row.values[1];
@@ -93,7 +93,7 @@ fn test_sequential_overflow_page() -> anyhow::Result<()> {
let insert_query = format!("INSERT INTO test VALUES ({}, '{}')", i, huge_text.as_str());
match conn.query(insert_query) {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::IO => {
tmp_db.io.run_once()?;
}
@@ -112,7 +112,7 @@ fn test_sequential_overflow_page() -> anyhow::Result<()> {
let mut current_index = 0;
match conn.query(list_query) {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::Row(row) => {
let first_value = &row.values[0];
let text = &row.values[1];
@@ -166,7 +166,7 @@ fn test_sequential_write() -> anyhow::Result<()> {
let insert_query = format!("INSERT INTO test VALUES ({})", i);
match conn.query(insert_query) {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::IO => {
tmp_db.io.run_once()?;
}
@@ -183,7 +183,7 @@ fn test_sequential_write() -> anyhow::Result<()> {
let mut current_read_index = 0;
match conn.query(list_query) {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::Row(row) => {
let first_value = row.values.first().expect("missing id");
let id = match first_value {
@@ -227,7 +227,7 @@ fn test_regression_multi_row_insert() -> anyhow::Result<()> {
match conn.query(insert_query) {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::IO => {
tmp_db.io.run_once()?;
}
@@ -248,7 +248,7 @@ fn test_regression_multi_row_insert() -> anyhow::Result<()> {
let mut actual_ids = Vec::new();
match conn.query(list_query) {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::Row(row) => {
let first_value = row.values.first().expect("missing id");
let id = match first_value {
@@ -334,7 +334,7 @@ fn test_wal_checkpoint() -> anyhow::Result<()> {
conn.checkpoint()?;
match conn.query(insert_query) {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::IO => {
tmp_db.io.run_once()?;
}
@@ -355,7 +355,7 @@ fn test_wal_checkpoint() -> anyhow::Result<()> {
let mut current_index = 0;
match conn.query(list_query) {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::Row(row) => {
let first_value = &row.values[0];
let id = match first_value {
@@ -394,7 +394,7 @@ fn test_wal_restart() -> anyhow::Result<()> {
let insert_query = format!("INSERT INTO test VALUES ({})", i);
match conn.query(insert_query) {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
match rows.step()? {
StepResult::IO => {
tmp_db.io.run_once()?;
}
@@ -418,7 +418,7 @@ fn test_wal_restart() -> anyhow::Result<()> {
loop {
if let Some(ref mut rows) = conn.query(list_query)? {
loop {
match rows.next_row()? {
match rows.step()? {
StepResult::Row(row) => {
let first_value = &row.values[0];
let count = match first_value {