Merge branch 'tursodatabase:main' into main

This commit is contained in:
Alperen Keleş
2025-01-11 09:46:45 +03:00
committed by GitHub
68 changed files with 3971 additions and 1456 deletions

38
.github/workflows/java.yml vendored Normal file
View File

@@ -0,0 +1,38 @@
name: Java Tests
on:
push:
branches:
- main
tags:
- v*
pull_request:
branches:
- main
env:
working-directory: bindings/java
jobs:
test:
runs-on: ubuntu-latest
defaults:
run:
working-directory: ${{ env.working-directory }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Rust(stable)
uses: dtolnay/rust-toolchain@stable
- name: Set up JDK
uses: actions/setup-java@v3
with:
distribution: 'temurin'
java-version: '11'
- name: Run Java tests
run: make test

View File

@@ -281,8 +281,8 @@ Feature support of [sqlite expr syntax](https://www.sqlite.org/lang_expr.html).
| jsonb_replace(json,path,value,...) | | |
| json_set(json,path,value,...) | | |
| jsonb_set(json,path,value,...) | | |
| json_type(json) | | |
| json_type(json,path) | | |
| json_type(json) | Yes | |
| json_type(json,path) | Yes | |
| json_valid(json) | | |
| json_valid(json,flags) | | |
| json_quote(value) | | |

15
Cargo.lock generated
View File

@@ -1192,6 +1192,8 @@ dependencies = [
"js-sys",
"limbo_core",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
]
[[package]]
@@ -2495,6 +2497,19 @@ dependencies = [
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-futures"
version = "0.4.49"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2"
dependencies = [
"cfg-if",
"js-sys",
"once_cell",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.99"

View File

@@ -1,7 +1,7 @@
java_run: lib
export LIMBO_SYSTEM_PATH=../../target/debug && ./gradlew run
.PHONY: test build_test
.PHONY: lib
test: build_test
./gradlew test
lib:
cargo build
build_test:
CARGO_TARGET_DIR=src/test/resources/limbo cargo build

View File

@@ -13,6 +13,7 @@ repositories {
dependencies {
testImplementation(platform("org.junit:junit-bom:5.10.0"))
testImplementation("org.junit.jupiter:junit-jupiter")
testImplementation("org.assertj:assertj-core:3.27.0")
}
application {
@@ -28,4 +29,6 @@ application {
tasks.test {
useJUnitPlatform()
// In order to find rust built file under resources, we need to set it as system path
systemProperty("java.library.path", "${System.getProperty("java.library.path")}:$projectDir/src/test/resources/limbo/debug")
}

View File

@@ -6,7 +6,7 @@ pub struct CustomError {
}
/// This struct defines error codes that correspond to the constants defined in the
/// Java package `org.github.tursodatabase.exceptions.ErrorCode`.
/// Java package `org.github.tursodatabase.LimboErrorCode`.
///
/// These error codes are used to handle and represent specific error conditions
/// that may occur within the Rust code and need to be communicated to the Java side.
@@ -14,8 +14,7 @@ pub struct CustomError {
pub struct ErrorCode;
impl ErrorCode {
pub const CONNECTION_FAILURE: i32 = -1;
// TODO: change CONNECTION_FAILURE_STATEMENT_IS_DML to appropriate error code number
pub const STATEMENT_IS_DML: i32 = -1;
}

View File

@@ -1,66 +1,6 @@
mod connection;
mod cursor;
mod errors;
mod limbo_db;
mod macros;
mod utils;
use crate::connection::Connection;
use crate::errors::ErrorCode;
use jni::errors::JniError;
use jni::objects::{JClass, JString};
use jni::sys::jlong;
use jni::JNIEnv;
use std::sync::{Arc, Mutex};
/// Establishes a connection to the database specified by the given path.
///
/// This function is called from the Java side to create a connection to the database.
/// It returns a pointer to the `Connection` object, which can be used in subsequent
/// native function calls.
///
/// # Arguments
///
/// * `env` - The JNI environment pointer.
/// * `_class` - The Java class calling this function.
/// * `path` - A `JString` representing the path to the database file.
///
/// # Returns
///
/// A `jlong` representing the pointer to the newly created `Connection` object,
/// or [ErrorCode::CONNECTION_FAILURE] if the connection could not be established.
#[no_mangle]
pub extern "system" fn Java_org_github_tursodatabase_limbo_Limbo_connect<'local>(
mut env: JNIEnv<'local>,
_class: JClass<'local>,
path: JString<'local>,
) -> jlong {
connect_internal(&mut env, path).unwrap_or_else(|_| ErrorCode::CONNECTION_FAILURE as jlong)
}
#[allow(improper_ctypes_definitions, clippy::arc_with_non_send_sync)] // TODO: remove
fn connect_internal<'local>(
env: &mut JNIEnv<'local>,
path: JString<'local>,
) -> Result<jlong, JniError> {
let io = Arc::new(limbo_core::PlatformIO::new().map_err(|e| {
println!("IO initialization failed: {:?}", e);
JniError::Unknown
})?);
let path: String = env
.get_string(&path)
.expect("Failed to convert JString to Rust String")
.into();
let db = limbo_core::Database::open_file(io.clone(), &path).map_err(|e| {
println!("Failed to open database: {:?}", e);
JniError::Unknown
})?;
let conn = db.connect().clone();
let connection = Connection {
conn: Arc::new(Mutex::new(conn)),
io,
};
Ok(Box::into_raw(Box::new(connection)) as jlong)
}

View File

@@ -0,0 +1,89 @@
use jni::objects::{JByteArray, JObject};
use jni::sys::{jint, jlong};
use jni::JNIEnv;
use limbo_core::Database;
use std::sync::Arc;
const ERROR_CODE_ETC: i32 = 9999;
#[no_mangle]
#[allow(clippy::arc_with_non_send_sync)]
pub extern "system" fn Java_org_github_tursodatabase_core_LimboDB__1open_1utf8<'local>(
mut env: JNIEnv<'local>,
obj: JObject<'local>,
file_name_byte_arr: JByteArray<'local>,
_open_flags: jint,
) -> jlong {
let io = match limbo_core::PlatformIO::new() {
Ok(io) => Arc::new(io),
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, ERROR_CODE_ETC, e.to_string());
return -1;
}
};
let path = match env
.convert_byte_array(file_name_byte_arr)
.map_err(|e| e.to_string())
{
Ok(bytes) => match String::from_utf8(bytes) {
Ok(s) => s,
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, ERROR_CODE_ETC, e.to_string());
return -1;
}
},
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, ERROR_CODE_ETC, e.to_string());
return -1;
}
};
let db = match Database::open_file(io.clone(), &path) {
Ok(db) => db,
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, ERROR_CODE_ETC, e.to_string());
return -1;
}
};
Box::into_raw(Box::new(db)) as jlong
}
#[no_mangle]
pub extern "system" fn Java_org_github_tursodatabase_core_LimboDB_throwJavaException<'local>(
mut env: JNIEnv<'local>,
obj: JObject<'local>,
error_code: jint,
) {
set_err_msg_and_throw_exception(
&mut env,
obj,
error_code,
"throw java exception".to_string(),
);
}
fn set_err_msg_and_throw_exception<'local>(
env: &mut JNIEnv<'local>,
obj: JObject<'local>,
err_code: i32,
err_msg: String,
) {
let error_message_bytes = env
.byte_array_from_slice(err_msg.as_bytes())
.expect("Failed to convert to byte array");
match env.call_method(
obj,
"throwLimboException",
"(I[B)V",
&[err_code.into(), (&error_message_bytes).into()],
) {
Ok(_) => {
// do nothing because above method will always return Err
}
Err(_e) => {
// do nothing because our java app will handle Err
}
}
}

View File

@@ -0,0 +1,34 @@
package org.github.tursodatabase;
public enum LimboErrorCode {
UNKNOWN_ERROR(-1, "Unknown error"),
ETC(9999, "Unclassified error");
public final int code;
public final String message;
/**
* @param code Error code
* @param message Message for the error.
*/
LimboErrorCode(int code, String message) {
this.code = code;
this.message = message;
}
public static LimboErrorCode getErrorCode(int errorCode) {
for (LimboErrorCode limboErrorCode: LimboErrorCode.values()) {
if (errorCode == limboErrorCode.code) return limboErrorCode;
}
return UNKNOWN_ERROR;
}
@Override
public String toString() {
return "LimboErrorCode{" +
"code=" + code +
", message='" + message + '\'' +
'}';
}
}

View File

@@ -1,19 +0,0 @@
package org.github.tursodatabase;
import org.github.tursodatabase.limbo.Connection;
import org.github.tursodatabase.limbo.Cursor;
import org.github.tursodatabase.limbo.Limbo;
/**
* TODO: Remove Main class. We can use test code to verify behaviors.
*/
public class Main {
public static void main(String[] args) throws Exception {
Limbo limbo = Limbo.create();
Connection connection = limbo.getConnection("database.db");
Cursor cursor = connection.cursor();
cursor.execute("SELECT * FROM example_table;");
System.out.println("result: " + cursor.fetchOne());
}
}

View File

@@ -0,0 +1,15 @@
package org.github.tursodatabase;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation to mark methods that are called by native functions.
*/
@Retention(RetentionPolicy.SOURCE)
@Target(ElementType.METHOD)
public @interface NativeInvocation {
}

View File

@@ -0,0 +1,14 @@
package org.github.tursodatabase;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation to mark methods that use larger visibility for testing purposes.
*/
@Retention(RetentionPolicy.SOURCE)
@Target(ElementType.METHOD)
public @interface VisibleForTesting {
}

View File

@@ -1,5 +1,9 @@
package org.github.tursodatabase.core;
import org.github.tursodatabase.LimboErrorCode;
import org.github.tursodatabase.NativeInvocation;
import org.github.tursodatabase.exceptions.LimboException;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -10,18 +14,14 @@ import java.util.concurrent.atomic.AtomicBoolean;
* are not only to provide functionality, but to handle contractual
* differences between the JDBC specification and the Limbo API.
*/
public abstract class DB {
public abstract class AbstractDB {
private final String url;
private final String fileName;
private final AtomicBoolean closed = new AtomicBoolean(true);
public DB(String url, String fileName) throws SQLException {
public AbstractDB(String url, String filaName) throws SQLException {
this.url = url;
this.fileName = fileName;
}
public String getUrl() {
return url;
this.fileName = filaName;
}
public boolean isClosed() {
@@ -36,7 +36,7 @@ public abstract class DB {
/**
* Executes an SQL statement.
*
* @param sql SQL statement to be executed.
* @param sql SQL statement to be executed.
* @param autoCommit Whether to auto-commit the transaction.
* @throws SQLException if a database access error occurs.
*/
@@ -47,17 +47,16 @@ public abstract class DB {
/**
* Creates an SQLite interface to a database for the given connection.
* @see <a href="https://www.sqlite.org/c3ref/c_open_autoproxy.html">SQLite Open Flags</a>
*
* @param fileName The database.
* @param openFlags Flags for opening the database.
* @throws SQLException if a database access error occurs.
*/
public final synchronized void open(String fileName, int openFlags) throws SQLException {
// TODO: add implementation
throw new SQLFeatureNotSupportedException();
public final synchronized void open(int openFlags) throws SQLException {
_open(fileName, openFlags);
}
protected abstract void _open(String fileName, int openFlags) throws SQLException;
/**
* Closes a database connection and finalizes any remaining statements before the closing
* operation.
@@ -95,13 +94,13 @@ public abstract class DB {
/**
* Creates an SQLite interface to a database with the provided open flags.
* @see <a href="https://www.sqlite.org/c3ref/c_open_autoproxy.html">SQLite Open Flags</a>
*
* @param filename The database to open.
* @param fileName The database to open.
* @param openFlags Flags for opening the database.
* @return pointer to database instance
* @throws SQLException if a database access error occurs.
*/
protected abstract void _open(String filename, int openFlags) throws SQLException;
protected abstract long _open_utf8(byte[] fileName, int openFlags) throws SQLException;
/**
* Closes the SQLite interface to a database.

View File

@@ -1,65 +1,69 @@
package org.github.tursodatabase.core;
import org.github.tursodatabase.LimboErrorCode;
import org.github.tursodatabase.NativeInvocation;
import org.github.tursodatabase.VisibleForTesting;
import org.github.tursodatabase.exceptions.LimboException;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
/**
* This class provides a thin JNI layer over the SQLite3 C API.
*/
public final class LimboDB extends DB {
/**
* SQLite connection handle.
*/
private long pointer = 0;
public final class LimboDB extends AbstractDB {
// Pointer to database instance
private long dbPtr;
private boolean isOpen;
private static boolean isLoaded;
private static boolean loadSucceeded;
static {
if ("The Android Project".equals(System.getProperty("java.vm.vendor"))) {
System.loadLibrary("sqlitejdbc");
isLoaded = true;
loadSucceeded = true;
// TODO
} else {
// continue with non Android execution path
isLoaded = false;
loadSucceeded = false;
}
}
// url example: "jdbc:sqlite:{fileName}
/**
* @param url e.g. "jdbc:sqlite:fileName
* @param fileName e.g. path to file
*/
public static LimboDB create(String url, String fileName) throws SQLException {
return new LimboDB(url, fileName);
}
// TODO: receive config as argument
public LimboDB(String url, String fileName) throws SQLException {
private LimboDB(String url, String fileName) throws SQLException {
super(url, fileName);
}
/**
* Loads the SQLite interface backend.
*
* @return True if the SQLite JDBC driver is successfully loaded; false otherwise.
*/
public static boolean load() throws Exception {
if (isLoaded) return loadSucceeded;
public void load() {
if (isLoaded) return;
try {
System.loadLibrary("_limbo_java");
loadSucceeded = true;
} finally {
isLoaded = true;
}
return loadSucceeded;
}
// WRAPPER FUNCTIONS ////////////////////////////////////////////
@Override
protected synchronized void _open(String file, int openFlags) throws SQLException {
// TODO: add implementation
throw new SQLFeatureNotSupportedException();
}
// TODO: add support for JNI
synchronized native void _open_utf8(byte[] fileUtf8, int openFlags) throws SQLException;
@Override
protected synchronized native long _open_utf8(byte[] file, int openFlags) throws SQLException;
// TODO: add support for JNI
@Override
@@ -78,6 +82,15 @@ public final class LimboDB extends DB {
@Override
public native void interrupt();
@Override
protected void _open(String fileName, int openFlags) throws SQLException {
if (isOpen) {
throwLimboException(LimboErrorCode.UNKNOWN_ERROR.code, "Already opened");
}
dbPtr = _open_utf8(stringToUtf8ByteArray(fileName), openFlags);
isOpen = true;
}
@Override
protected synchronized SafeStmtPtr prepare(String sql) throws SQLException {
// TODO: add implementation
@@ -91,4 +104,52 @@ public final class LimboDB extends DB {
// TODO: add support for JNI
@Override
public synchronized native int step(long stmt);
@VisibleForTesting
native void throwJavaException(int errorCode) throws SQLException;
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessageBytes Error message.
*/
@NativeInvocation
private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException {
String errorMessage = utf8ByteBufferToString(errorMessageBytes);
throwLimboException(errorCode, errorMessage);
}
/**
* Throws formatted SQLException with error code and message.
*
* @param errorCode Error code.
* @param errorMessage Error message.
*/
public void throwLimboException(int errorCode, String errorMessage) throws SQLException {
LimboErrorCode code = LimboErrorCode.getErrorCode(errorCode);
String msg;
if (code == LimboErrorCode.UNKNOWN_ERROR) {
msg = String.format("%s:%s (%s)", code, errorCode, errorMessage);
} else {
msg = String.format("%s (%s)", code, errorMessage);
}
throw new LimboException(msg, code);
}
private static String utf8ByteBufferToString(byte[] buffer) {
if (buffer == null) {
return null;
}
return new String(buffer, StandardCharsets.UTF_8);
}
private static byte[] stringToUtf8ByteArray(String str) {
if (str == null) {
return null;
}
return str.getBytes(StandardCharsets.UTF_8);
}
}

View File

@@ -0,0 +1,18 @@
package org.github.tursodatabase.exceptions;
import org.github.tursodatabase.LimboErrorCode;
import java.sql.SQLException;
public class LimboException extends SQLException {
private final LimboErrorCode resultCode;
public LimboException(String message, LimboErrorCode resultCode) {
super(message, null, resultCode.code & 0xff);
this.resultCode = resultCode;
}
public LimboErrorCode getResultCode() {
return resultCode;
}
}

View File

@@ -1,67 +0,0 @@
package org.github.tursodatabase.limbo;
import java.lang.Exception;
/**
* Represents a connection to the database.
* TODO: Deprecate classes under limbo package. We leave this source code for reference.
*/
public class Connection {
// Pointer to the connection object
private final long connectionPtr;
public Connection(long connectionPtr) {
this.connectionPtr = connectionPtr;
}
/**
* Creates a new cursor object using this connection.
*
* @return A new Cursor object.
* @throws Exception If the cursor cannot be created.
*/
public Cursor cursor() throws Exception {
long cursorId = cursor(connectionPtr);
return new Cursor(cursorId);
}
private native long cursor(long connectionPtr);
/**
* Closes the connection to the database.
*
* @throws Exception If there is an error closing the connection.
*/
public void close() throws Exception {
close(connectionPtr);
}
private native void close(long connectionPtr);
/**
* Commits the current transaction.
*
* @throws Exception If there is an error during commit.
*/
public void commit() throws Exception {
try {
commit(connectionPtr);
} catch (Exception e) {
System.out.println("caught exception: " + e);
}
}
private native void commit(long connectionPtr) throws Exception;
/**
* Rolls back the current transaction.
*
* @throws Exception If there is an error during rollback.
*/
public void rollback() throws Exception {
rollback(connectionPtr);
}
private native void rollback(long connectionPtr) throws Exception;
}

View File

@@ -1,86 +0,0 @@
package org.github.tursodatabase.limbo;
/**
* Represents a database cursor.
* TODO: Deprecate classes under limbo package. We leave this source code for reference.
*/
public class Cursor {
private long cursorPtr;
public Cursor(long cursorPtr) {
this.cursorPtr = cursorPtr;
}
// TODO: support parameters
public Cursor execute(String sql) {
var result = execute(cursorPtr, sql);
System.out.println("resut: " + result);
return this;
}
private static native int execute(long cursorPtr, String sql);
public Object fetchOne() throws Exception {
Object result = fetchOne(cursorPtr);
return processSingleResult(result);
}
private static native Object fetchOne(long cursorPtr);
public Object fetchAll() throws Exception {
Object result = fetchAll(cursorPtr);
return processArrayResult(result);
}
private static native Object fetchAll(long cursorPtr);
private Object processSingleResult(Object result) throws Exception {
if (result instanceof Object[]) {
System.out.println("The result is of type: Object[]");
for (Object element : (Object[]) result) {
printElementType(element);
}
return result;
} else {
printElementType(result);
return result;
}
}
private Object processArrayResult(Object result) throws Exception {
if (result instanceof Object[][]) {
System.out.println("The result is of type: Object[][]");
Object[][] array = (Object[][]) result;
for (Object[] row : array) {
for (Object element : row) {
printElementType(element);
}
}
return array;
} else {
throw new Exception("result should be of type Object[][]. Maybe internal logic has error.");
}
}
private void printElementType(Object element) {
if (element instanceof String) {
System.out.println("String: " + element);
} else if (element instanceof Integer) {
System.out.println("Integer: " + element);
} else if (element instanceof Double) {
System.out.println("Double: " + element);
} else if (element instanceof Boolean) {
System.out.println("Boolean: " + element);
} else if (element instanceof Long) {
System.out.println("Long: " + element);
} else if (element instanceof byte[]) {
System.out.print("byte[]: ");
for (byte b : (byte[]) element) {
System.out.print(b + " ");
}
System.out.println();
} else {
System.out.println("Unknown type: " + element);
}
}
}

View File

@@ -1,34 +0,0 @@
package org.github.tursodatabase.limbo;
import org.github.tursodatabase.exceptions.ErrorCode;
import java.lang.Exception;
/**
* TODO: Deprecate classes under limbo package. We leave this source code for reference.
*/
public class Limbo {
private static volatile boolean initialized;
private Limbo() {
if (!initialized) {
System.loadLibrary("_limbo_java");
initialized = true;
}
}
public static Limbo create() {
return new Limbo();
}
public Connection getConnection(String path) throws Exception {
long connectionId = connect(path);
if (connectionId == ErrorCode.CONNECTION_FAILURE) {
throw new Exception("Failed to initialize connection");
}
return new Connection(connectionId);
}
private static native long connect(String path);
}

View File

@@ -0,0 +1,13 @@
package org.github.tursodatabase;
import java.io.IOException;
import java.nio.file.Files;
public class TestUtils {
/**
* Create temporary file and returns the path.
*/
public static String createTempFile() throws IOException {
return Files.createTempFile("limbo_test_db", null).toAbsolutePath().toString();
}
}

View File

@@ -0,0 +1,48 @@
package org.github.tursodatabase.core;
import org.github.tursodatabase.LimboErrorCode;
import org.github.tursodatabase.TestUtils;
import org.github.tursodatabase.exceptions.LimboException;
import org.junit.jupiter.api.Test;
import java.sql.SQLException;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
public class LimboDBTest {
@Test
void db_should_open_normally() throws Exception {
String dbPath = TestUtils.createTempFile();
LimboDB db = LimboDB.create("jdbc:sqlite" + dbPath, dbPath);
db.load();
db.open(0);
}
@Test
void should_throw_exception_when_opened_twice() throws Exception {
String dbPath = TestUtils.createTempFile();
LimboDB db = LimboDB.create("jdbc:sqlite:" + dbPath, dbPath);
db.load();
db.open(0);
assertThatThrownBy(() -> db.open(0)).isInstanceOf(SQLException.class);
}
@Test
void throwJavaException_should_throw_appropriate_java_exception() throws Exception {
String dbPath = TestUtils.createTempFile();
LimboDB db = LimboDB.create("jdbc:sqlite:" + dbPath, dbPath);
db.load();
final int limboExceptionCode = LimboErrorCode.ETC.code;
try {
db.throwJavaException(limboExceptionCode);
} catch (Exception e) {
assertThat(e).isInstanceOf(LimboException.class);
LimboException limboException = (LimboException) e;
assertThat(limboException.getResultCode().code).isEqualTo(limboExceptionCode);
}
}
}

2
bindings/wasm/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
node_modules/
*.wasm

View File

@@ -15,3 +15,10 @@ console_error_panic_hook = "0.1.7"
js-sys = "0.3.72"
limbo_core = { path = "../../core", default-features = false }
wasm-bindgen = "0.2"
wasm-bindgen-futures = "0.4"
web-sys = "0.3"
[features]
web = []
nodejs = []
default = ["nodejs"]

View File

@@ -4,6 +4,39 @@ This source tree contains Limbo Wasm bindings.
## Building
For nodejs
```
./scripts/build
```
For web
```
./scripts/build web
```
# Browser Support
Adding experimental support for limbo in the browser. This is done by adding support for OPFS as a VFS.
To see a basic example of this `npm run dev` and navigate to `http://localhost:5173/limbo-opfs-test.html` and open the console.
## Design
This design mirrors sqlite's approach for OPFS support. It has a sync api in `opfs.js` which communicates with `opfs-sync-proxy.js` via `SharedArrayBuffer` and `Atomics.wait`. This allows us to live the VFS api in `lib.rs` unchanged.
You can see `limbo-opfs-test.html` for basic usage.
## UTs
There are OPFS specific unit tests and then some basic limbo unit tests. These are run via `npm test` or `npx vitest`.
For more info and log output you can run `npx vitest:ui` but you can get some parallel execution of test cases which cause issues.
## TODO
-[] Add a wrapper js that provides a clean interface to the `limbo-worker.js`
-[] Add more tests for opfs.js operations
-[] Add error return handling
-[] Make sure posix flags for open are handled instead of just being ignored (this requires creating a mapping of behaviors from posix to opfs as far as makes sense)

View File

@@ -0,0 +1,9 @@
<!DOCTYPE html>
<html>
<body>
<script type="module">
import { VFSInterface } from './src/opfs-interface.js';
window.VFSInterface = VFSInterface;
</script>
</body>
</html>

View File

@@ -0,0 +1,83 @@
<!DOCTYPE html>
<html>
<head>
<title>Limbo Test</title>
</head>
<body>
<script type="module">
function waitForMessage(worker, type, op) {
return new Promise((resolve, reject) => {
const handler = (e) => {
if (e.data.type === type && (!op || e.data.op === op)) {
worker.removeEventListener('message', handler);
resolve(e.data);
} else if (e.data.type === 'error') {
worker.removeEventListener('message', handler);
reject(e.data.error);
}
};
worker.addEventListener('message', handler);
});
}
async function runTests() {
const worker = new Worker('./src/limbo-worker.js', { type: 'module' });
// Wait for ready then send createDb
await waitForMessage(worker, 'ready');
worker.postMessage({
op: 'createDb',
path: 'test.db'
});
// Wait for createDb success then send exec
await waitForMessage(worker, 'success', 'createDb');
worker.postMessage({
op: 'exec',
sql: `
CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT);
`
});
console.log("made it here");
// Wait for exec success then send prepare
await waitForMessage(worker, 'success', 'exec');
worker.postMessage({
op: 'exec',
sql: `
INSERT INTO users VALUES (1, 'Alice', 'alice@example.org');
`
});
await waitForMessage(worker, 'success', 'exec');
worker.postMessage({
op: 'exec',
sql: `
INSERT INTO users VALUES (2, 'Bob', 'bob@example.org');
`
});
await waitForMessage(worker, 'success', 'exec');
worker.postMessage({
op: 'exec',
sql: `
INSERT INTO users VALUES (3, 'bill', 'bill@example.com');
`
});
// Wait for exec success then send prepare
await waitForMessage(worker, 'success', 'exec');
worker.postMessage({
op: 'prepare',
sql: 'SELECT * FROM users;'
});
const results = await waitForMessage(worker, 'result');
console.log('Query results:', results);
}
runTests().catch(console.error);
</script>
</body>
</html>

View File

@@ -0,0 +1,11 @@
<!DOCTYPE html>
<html>
<head>
<title>Limbo Test</title>
</head>
<body>
<script type="module">
window.Worker = Worker;
</script>
</body>
</html>

View File

@@ -47,7 +47,9 @@ impl Database {
}
#[wasm_bindgen]
pub fn exec(&self, _sql: &str) {}
pub fn exec(&self, _sql: &str) {
let _res = self.conn.execute(_sql).unwrap();
}
#[wasm_bindgen]
pub fn prepare(&self, _sql: &str) -> Statement {
@@ -352,10 +354,39 @@ impl limbo_core::DatabaseStorage for DatabaseStorage {
}
}
#[cfg(all(feature = "web", feature = "nodejs"))]
compile_error!("Features 'web' and 'nodejs' cannot be enabled at the same time");
#[cfg(feature = "web")]
#[wasm_bindgen(module = "/src/web-vfs.js")]
extern "C" {
type VFS;
#[wasm_bindgen(constructor)]
fn new() -> VFS;
#[wasm_bindgen(method)]
fn open(this: &VFS, path: &str, flags: &str) -> i32;
#[wasm_bindgen(method)]
fn close(this: &VFS, fd: i32) -> bool;
#[wasm_bindgen(method)]
fn pwrite(this: &VFS, fd: i32, buffer: &[u8], offset: usize) -> i32;
#[wasm_bindgen(method)]
fn pread(this: &VFS, fd: i32, buffer: &mut [u8], offset: usize) -> i32;
#[wasm_bindgen(method)]
fn size(this: &VFS, fd: i32) -> u64;
#[wasm_bindgen(method)]
fn sync(this: &VFS, fd: i32);
}
#[cfg(feature = "nodejs")]
#[wasm_bindgen(module = "/vfs.js")]
extern "C" {
type VFS;
#[wasm_bindgen(constructor)]
fn new() -> VFS;

1457
bindings/wasm/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -16,5 +16,21 @@
"limbo_wasm.d.ts"
],
"main": "limbo_wasm.js",
"types": "limbo_wasm.d.ts"
"types": "limbo_wasm.d.ts",
"type": "module",
"scripts": {
"dev": "vite",
"test": "vitest --sequence.shuffle=false",
"test:ui": "vitest --ui"
},
"devDependencies": {
"@playwright/test": "^1.49.1",
"@vitest/ui": "^2.1.8",
"happy-dom": "^16.3.0",
"playwright": "^1.49.1",
"vite": "^6.0.7",
"vite-plugin-wasm": "^3.4.1",
"vitest": "^2.1.8",
"wasm-pack": "^0.13.1"
}
}

View File

@@ -0,0 +1,13 @@
// Using Playwright (recommended)
import { expect, test } from "@playwright/test";
// playwright.config.js
export default {
use: {
headless: true,
// Required for SharedArrayBuffer
launchOptions: {
args: ["--cross-origin-isolated"],
},
},
};

View File

@@ -1,4 +1,12 @@
#!/bin/bash
wasm-pack build --no-pack --target nodejs
# get target as argument from cli, defaults to nodejs if no argument is supplied
TARGET=${1:-nodejs}
FEATURE="nodejs"
if [ "$TARGET" = "web" ]; then
FEATURE="web"
fi
npx wasm-pack build --no-pack --target $TARGET --no-default-features --features $FEATURE
cp package.json pkg/package.json

View File

@@ -0,0 +1,74 @@
import { VFS } from "./opfs.js";
import init, { Database } from "./../pkg/limbo_wasm.js";
let db = null;
let currentStmt = null;
async function initVFS() {
const vfs = new VFS();
await vfs.ready;
self.vfs = vfs;
return vfs;
}
async function initAll() {
await initVFS();
await init();
}
initAll().then(() => {
self.postMessage({ type: "ready" });
self.onmessage = (e) => {
try {
switch (e.data.op) {
case "createDb": {
db = new Database(e.data.path);
self.postMessage({ type: "success", op: "createDb" });
break;
}
case "exec": {
log(e.data.sql);
db.exec(e.data.sql);
self.postMessage({ type: "success", op: "exec" });
break;
}
case "prepare": {
currentStmt = db.prepare(e.data.sql);
const results = currentStmt.raw().all();
self.postMessage({ type: "result", result: results });
break;
}
case "get": {
const row = currentStmt?.raw().get();
self.postMessage({ type: "result", result: row });
break;
}
}
} catch (err) {
self.postMessage({ type: "error", error: err.toString() });
}
};
}).catch((error) => {
self.postMessage({ type: "error", error: error.toString() });
});
// logLevel:
//
// 0 = no logging output
// 1 = only errors
// 2 = warnings and errors
// 3 = debug, warnings, and errors
const logLevel = 1;
const loggers = {
0: console.error.bind(console),
1: console.warn.bind(console),
2: console.log.bind(console),
};
const logImpl = (level, ...args) => {
if (logLevel > level) loggers[level]("OPFS asyncer:", ...args);
};
const log = (...args) => logImpl(2, ...args);
const warn = (...args) => logImpl(1, ...args);
const error = (...args) => logImpl(0, ...args);

View File

@@ -0,0 +1,67 @@
export class VFSInterface {
constructor(workerUrl) {
this.worker = new Worker(workerUrl, { type: "module" });
this.nextMessageId = 1;
this.pendingRequests = new Map();
this.worker.onmessage = (event) => {
console.log("interface onmessage: ", event.data);
let { id, result, error } = event.data;
const resolver = this.pendingRequests.get(id);
if (event.data?.buffer && event.data?.size) {
result = { size: event.data.size, buffer: event.data.buffer };
}
if (resolver) {
this.pendingRequests.delete(id);
if (error) {
resolver.reject(new Error(error));
} else {
resolver.resolve(result);
}
}
};
}
_sendMessage(method, args) {
const id = this.nextMessageId++;
return new Promise((resolve, reject) => {
this.pendingRequests.set(id, { resolve, reject });
this.worker.postMessage({ id, method, args });
});
}
async open(path, flags) {
return await this._sendMessage("open", { path, flags });
}
async close(fd) {
return await this._sendMessage("close", { fd });
}
async pwrite(fd, buffer, offset) {
return await this._sendMessage("pwrite", { fd, buffer, offset }, [
buffer.buffer,
]);
}
async pread(fd, buffer, offset) {
console.log("interface in buffer: ", [...buffer]);
const result = await this._sendMessage("pread", {
fd,
buffer: buffer,
offset,
}, []);
console.log("interface out buffer: ", [...buffer]);
buffer.set(new Uint8Array(result.buffer));
return buffer.length;
}
async size(fd) {
return await this._sendMessage("size", { fd });
}
async sync(fd) {
return await this._sendMessage("sync", { fd });
}
}

View File

@@ -0,0 +1,136 @@
// opfs-sync-proxy.js
let transferBuffer, statusBuffer, statusArray, statusView;
let transferArray;
let rootDir = null;
const handles = new Map();
let nextFd = 1;
self.postMessage("ready");
onmessage = async (e) => {
log("handle message: ", e.data);
if (e.data.cmd === "init") {
log("init");
transferBuffer = e.data.transferBuffer;
statusBuffer = e.data.statusBuffer;
transferArray = new Uint8Array(transferBuffer);
statusArray = new Int32Array(statusBuffer);
statusView = new DataView(statusBuffer);
self.postMessage("done");
return;
}
const result = await handleCommand(e.data);
sendResult(result);
};
self.onerror = (error) => {
console.error("opfssync error: ", error);
// Don't close, keep running
return true; // Prevents default error handling
};
function handleCommand(msg) {
log(`handle message: ${msg.cmd}`);
switch (msg.cmd) {
case "open":
return handleOpen(msg.path);
case "close":
return handleClose(msg.fd);
case "read":
return handleRead(msg.fd, msg.offset, msg.size);
case "write":
return handleWrite(msg.fd, msg.buffer, msg.offset);
case "size":
return handleSize(msg.fd);
case "sync":
return handleSync(msg.fd);
}
}
async function handleOpen(path) {
if (!rootDir) {
rootDir = await navigator.storage.getDirectory();
}
const fd = nextFd++;
const handle = await rootDir.getFileHandle(path, { create: true });
const syncHandle = await handle.createSyncAccessHandle();
handles.set(fd, syncHandle);
return { fd };
}
function handleClose(fd) {
const handle = handles.get(fd);
handle.close();
handles.delete(fd);
return { success: true };
}
function handleRead(fd, offset, size) {
const handle = handles.get(fd);
const readBuffer = new ArrayBuffer(size);
const readSize = handle.read(readBuffer, { at: offset });
log("opfssync read: size: ", readBuffer.byteLength);
const tmp = new Uint8Array(readBuffer);
log("opfssync read buffer: ", [...tmp]);
transferArray.set(tmp);
return { success: true, length: readSize };
}
function handleWrite(fd, buffer, offset) {
log("opfssync buffer size:", buffer.byteLength);
log("opfssync write buffer: ", [...buffer]);
const handle = handles.get(fd);
const size = handle.write(buffer, { at: offset });
return { success: true, length: size };
}
function handleSize(fd) {
const handle = handles.get(fd);
return { success: true, length: handle.getSize() };
}
function handleSync(fd) {
const handle = handles.get(fd);
handle.flush();
return { success: true };
}
function sendResult(result) {
if (result?.fd) {
statusView.setInt32(4, result.fd, true);
} else {
log("opfs-sync-proxy: result.length: ", result.length);
statusView.setInt32(4, result?.length || 0, true);
}
Atomics.store(statusArray, 0, 1);
Atomics.notify(statusArray, 0);
}
// logLevel:
//
// 0 = no logging output
// 1 = only errors
// 2 = warnings and errors
// 3 = debug, warnings, and errors
const logLevel = 1;
const loggers = {
0: console.error.bind(console),
1: console.warn.bind(console),
2: console.log.bind(console),
};
const logImpl = (level, ...args) => {
if (logLevel > level) loggers[level]("OPFS asyncer:", ...args);
};
const log = (...args) => logImpl(2, ...args);
const warn = (...args) => logImpl(1, ...args);
const error = (...args) => logImpl(0, ...args);

View File

@@ -0,0 +1,57 @@
import { VFS } from "./opfs.js";
const vfs = new VFS();
onmessage = async function (e) {
if (!vfs.isReady) {
console.log("opfs ready: ", vfs.isReady);
await vfs.ready;
console.log("opfs ready: ", vfs.isReady);
}
const { id, method, args } = e.data;
console.log(`interface onmessage method: ${method}`);
try {
let result;
switch (method) {
case "open":
result = vfs.open(args.path, args.flags);
break;
case "close":
result = vfs.close(args.fd);
break;
case "pread": {
const buffer = new Uint8Array(args.buffer);
result = vfs.pread(args.fd, buffer, args.offset);
self.postMessage(
{ id, size: result, error: null, buffer },
);
console.log("read size: ", result);
console.log("read buffer: ", [...buffer]);
return;
}
case "pwrite": {
result = vfs.pwrite(args.fd, args.buffer, args.offset);
console.log("write size: ", result);
break;
}
case "size":
result = vfs.size(args.fd);
break;
case "sync":
result = vfs.sync(args.fd);
break;
default:
throw new Error(`Unknown method: ${method}`);
}
self.postMessage(
{ id, result, error: null },
);
} catch (error) {
self.postMessage({ id, result: null, error: error.message });
}
};
console.log("opfs-worker.js");

154
bindings/wasm/src/opfs.js Normal file
View File

@@ -0,0 +1,154 @@
// First file: VFS class
class VFS {
constructor() {
this.transferBuffer = new SharedArrayBuffer(1024 * 1024); // 1mb
this.statusBuffer = new SharedArrayBuffer(8); // Room for status + size
this.statusArray = new Int32Array(this.statusBuffer);
this.statusView = new DataView(this.statusBuffer);
this.worker = new Worker(
new URL("./opfs-sync-proxy.js", import.meta.url),
{ type: "module" },
);
this.isReady = false;
this.ready = new Promise((resolve, reject) => {
this.worker.addEventListener("message", async (e) => {
if (e.data === "ready") {
await this.initWorker();
this.isReady = true;
resolve();
}
}, { once: true });
this.worker.addEventListener("error", reject, { once: true });
});
this.worker.onerror = (e) => {
console.error("Sync proxy worker error:", e.message);
};
}
initWorker() {
return new Promise((resolve) => {
this.worker.addEventListener("message", (e) => {
log("eventListener: ", e.data);
resolve();
}, { once: true });
this.worker.postMessage({
cmd: "init",
transferBuffer: this.transferBuffer,
statusBuffer: this.statusBuffer,
});
});
}
open(path) {
Atomics.store(this.statusArray, 0, 0);
this.worker.postMessage({ cmd: "open", path });
Atomics.wait(this.statusArray, 0, 0);
const result = this.statusView.getInt32(4, true);
log("opfs.js open result: ", result);
log("opfs.js open result type: ", typeof result);
return result;
}
close(fd) {
Atomics.store(this.statusArray, 0, 0);
this.worker.postMessage({ cmd: "close", fd });
Atomics.wait(this.statusArray, 0, 0);
return true;
}
pread(fd, buffer, offset) {
let bytesRead = 0;
while (bytesRead < buffer.byteLength) {
const chunkSize = Math.min(
this.transferBuffer.byteLength,
buffer.byteLength - bytesRead,
);
Atomics.store(this.statusArray, 0, 0);
this.worker.postMessage({
cmd: "read",
fd,
offset: offset + bytesRead,
size: chunkSize,
});
Atomics.wait(this.statusArray, 0, 0);
const readSize = this.statusView.getInt32(4, true);
buffer.set(
new Uint8Array(this.transferBuffer, 0, readSize),
bytesRead,
);
log("opfs pread buffer: ", [...buffer]);
bytesRead += readSize;
if (readSize < chunkSize) break;
}
return bytesRead;
}
pwrite(fd, buffer, offset) {
log("write buffer size: ", buffer.byteLength);
Atomics.store(this.statusArray, 0, 0);
this.worker.postMessage({
cmd: "write",
fd,
buffer: buffer,
offset: offset,
});
Atomics.wait(this.statusArray, 0, 0);
log(
"opfs pwrite length statusview: ",
this.statusView.getInt32(4, true),
);
return this.statusView.getInt32(4, true);
}
size(fd) {
Atomics.store(this.statusArray, 0, 0);
this.worker.postMessage({ cmd: "size", fd });
Atomics.wait(this.statusArray, 0, 0);
const result = this.statusView.getInt32(4, true);
log("opfs.js size result: ", result);
log("opfs.js size result type: ", typeof result);
return BigInt(result);
}
sync(fd) {
Atomics.store(this.statusArray, 0, 0);
this.worker.postMessage({ cmd: "sync", fd });
Atomics.wait(this.statusArray, 0, 0);
}
}
// logLevel:
//
// 0 = no logging output
// 1 = only errors
// 2 = warnings and errors
// 3 = debug, warnings, and errors
const logLevel = 1;
const loggers = {
0: console.error.bind(console),
1: console.warn.bind(console),
2: console.log.bind(console),
};
const logImpl = (level, ...args) => {
if (logLevel > level) loggers[level]("OPFS asyncer:", ...args);
};
const log = (...args) => logImpl(2, ...args);
const warn = (...args) => logImpl(1, ...args);
const error = (...args) => logImpl(0, ...args);
export { VFS };

View File

@@ -0,0 +1,29 @@
export class VFS {
constructor() {
return self.vfs;
}
open(path, flags) {
return self.vfs.open(path);
}
close(fd) {
return self.vfs.close(fd);
}
pread(fd, buffer, offset) {
return self.vfs.pread(fd, buffer, offset);
}
pwrite(fd, buffer, offset) {
return self.vfs.pwrite(fd, buffer, offset);
}
size(fd) {
return self.vfs.size(fd);
}
sync(fd) {
return self.vfs.sync(fd);
}
}

View File

@@ -0,0 +1,23 @@
import { createServer } from "vite";
import { chromium } from "playwright";
export async function setupTestEnvironment(port) {
const server = await createServer({
configFile: "./vite.config.js",
root: ".",
server: { port },
});
await server.listen();
const browser = await chromium.launch();
const context = await browser.newContext();
const page = await context.newPage();
globalThis.__page__ = page;
return { server, browser, context, page };
}
export async function teardownTestEnvironment({ server, browser, context }) {
await context.close();
await browser.close();
await server.close();
}

View File

@@ -0,0 +1,72 @@
import { afterAll, beforeAll, beforeEach, expect, test } from "vitest";
import { setupTestEnvironment, teardownTestEnvironment } from "./helpers.js";
let testEnv;
beforeAll(async () => {
testEnv = await setupTestEnvironment(5174);
});
beforeEach(async () => {
const { page } = testEnv;
await page.goto("http://localhost:5174/limbo-test.html");
});
afterAll(async () => {
await teardownTestEnvironment(testEnv);
});
test("basic database operations", async () => {
const { page } = testEnv;
const result = await page.evaluate(async () => {
const worker = new Worker("./src/limbo-worker.js", { type: "module" });
const waitForMessage = (type, op) =>
new Promise((resolve, reject) => {
const handler = (e) => {
if (e.data.type === type && (!op || e.data.op === op)) {
worker.removeEventListener("message", handler);
resolve(e.data);
} else if (e.data.type === "error") {
worker.removeEventListener("message", handler);
reject(e.data.error);
}
};
worker.addEventListener("message", handler);
});
try {
await waitForMessage("ready");
worker.postMessage({ op: "createDb", path: "test.db" });
await waitForMessage("success", "createDb");
worker.postMessage({
op: "exec",
sql:
"CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT);",
});
await waitForMessage("success", "exec");
worker.postMessage({
op: "exec",
sql: "INSERT INTO users VALUES (1, 'Alice', 'alice@example.org');",
});
await waitForMessage("success", "exec");
worker.postMessage({
op: "prepare",
sql: "SELECT * FROM users;",
});
const results = await waitForMessage("result");
return results;
} catch (error) {
return { error: error.message };
}
});
if (result.error) throw new Error(`Test failed: ${result.error}`);
expect(result.result).toHaveLength(1);
expect(result.result[0]).toEqual([1, "Alice", "alice@example.org"]);
});

View File

@@ -0,0 +1,140 @@
// test/opfs.test.js
import { afterAll, beforeAll, beforeEach, expect, test } from "vitest";
import { setupTestEnvironment, teardownTestEnvironment } from "./helpers";
let testEnv;
beforeAll(async () => {
testEnv = await setupTestEnvironment(5173);
});
beforeEach(async () => {
const { page } = testEnv;
await page.goto("http://localhost:5173/index.html");
await page.waitForFunction(() => window.VFSInterface !== undefined);
});
afterAll(async () => {
await teardownTestEnvironment(testEnv);
});
test("basic read/write functionality", async () => {
const { page } = testEnv;
const result = await page.evaluate(async () => {
const vfs = new window.VFSInterface("/src/opfs-worker.js");
let fd;
try {
fd = await vfs.open("test.txt", {});
const writeData = new Uint8Array([1, 2, 3, 4]);
const bytesWritten = await vfs.pwrite(fd, writeData, 0);
const readData = new Uint8Array(4);
const bytesRead = await vfs.pread(fd, readData, 0);
await vfs.close(fd);
return { fd, bytesWritten, bytesRead, readData: Array.from(readData) };
} catch (error) {
if (fd !== undefined) await vfs.close(fd);
return { error: error.message };
}
});
if (result.error) throw new Error(`Test failed: ${result.error}`);
expect(result.fd).toBe(1);
expect(result.bytesWritten).toBe(4);
expect(result.bytesRead).toBe(4);
expect(result.readData).toEqual([1, 2, 3, 4]);
});
test("larger data read/write", async () => {
const { page } = testEnv;
const result = await page.evaluate(async () => {
const vfs = new window.VFSInterface("/src/opfs-worker.js");
let fd;
try {
fd = await vfs.open("large.txt", {});
const writeData = new Uint8Array(1024).map((_, i) => i % 256);
const bytesWritten = await vfs.pwrite(fd, writeData, 0);
const readData = new Uint8Array(1024);
const bytesRead = await vfs.pread(fd, readData, 0);
await vfs.close(fd);
return { bytesWritten, bytesRead, readData: Array.from(readData) };
} catch (error) {
if (fd !== undefined) await vfs.close(fd);
return { error: error.message };
}
});
if (result.error) throw new Error(`Test failed: ${result.error}`);
expect(result.bytesWritten).toBe(1024);
expect(result.bytesRead).toBe(1024);
expect(result.readData).toEqual(
Array.from({ length: 1024 }, (_, i) => i % 256),
);
});
test("partial reads and writes", async () => {
const { page } = testEnv;
const result = await page.evaluate(async () => {
const vfs = new window.VFSInterface("/src/opfs-worker.js");
let fd;
try {
fd = await vfs.open("partial.txt", {});
const writeData1 = new Uint8Array([1, 2, 3, 4]);
const writeData2 = new Uint8Array([5, 6, 7, 8]);
await vfs.pwrite(fd, writeData1, 0);
await vfs.pwrite(fd, writeData2, 4);
const readData1 = new Uint8Array(2);
const readData2 = new Uint8Array(4);
const readData3 = new Uint8Array(2);
await vfs.pread(fd, readData1, 0);
await vfs.pread(fd, readData2, 2);
await vfs.pread(fd, readData3, 6);
await vfs.close(fd);
return {
readData1: Array.from(readData1),
readData2: Array.from(readData2),
readData3: Array.from(readData3),
};
} catch (error) {
if (fd !== undefined) await vfs.close(fd);
return { error: error.message };
}
});
if (result.error) throw new Error(`Test failed: ${result.error}`);
expect(result.readData1).toEqual([1, 2]);
expect(result.readData2).toEqual([3, 4, 5, 6]);
expect(result.readData3).toEqual([7, 8]);
});
test("file size operations", async () => {
const { page } = testEnv;
const result = await page.evaluate(async () => {
const vfs = new window.VFSInterface("/src/opfs-worker.js");
let fd;
try {
fd = await vfs.open("size.txt", {});
const writeData1 = new Uint8Array([1, 2, 3, 4]);
await vfs.pwrite(fd, writeData1, 0);
const size1 = await vfs.size(fd);
const writeData2 = new Uint8Array([5, 6, 7, 8]);
await vfs.pwrite(fd, writeData2, 4);
const size2 = await vfs.size(fd);
await vfs.close(fd);
return { size1, size2 };
} catch (error) {
if (fd !== undefined) await vfs.close(fd);
return { error: error.message };
}
});
if (result.error) throw new Error(`Test failed: ${result.error}`);
expect(Number(result.size1)).toBe(4);
expect(Number(result.size2)).toBe(8);
});

View File

View File

@@ -0,0 +1,29 @@
import { defineConfig } from "vite";
import wasm from "vite-plugin-wasm";
export default defineConfig({
publicDir: "./html",
root: ".",
plugins: [wasm()],
test: {
globals: true,
environment: "happy-dom",
setupFiles: ["./test/setup.js"],
include: ["test/*.test.js"],
},
server: {
headers: {
"Cross-Origin-Embedder-Policy": "require-corp",
"Cross-Origin-Opener-Policy": "same-origin",
"Cross-Origin-Resource-Policy": "cross-origin",
},
},
worker: {
format: "es",
rollupOptions: {
output: {
format: "es",
},
},
},
});

View File

@@ -14,7 +14,7 @@ name = "limbo_core"
path = "lib.rs"
[features]
default = ["fs", "json", "uuid"]
default = ["fs", "json", "uuid", "io_uring"]
fs = []
json = [
"dep:jsonb",
@@ -22,11 +22,12 @@ json = [
"dep:pest_derive",
]
uuid = ["dep:uuid"]
io_uring = ["dep:io-uring"]
[target.'cfg(target_os = "linux")'.dependencies]
io-uring = "0.6.1"
io-uring = { version = "0.6.1", optional = true }
[target.'cfg(target_os = "macos")'.dependencies]
[target.'cfg(target_family = "unix")'.dependencies]
polling = "3.7.2"
rustix = "0.38.34"

View File

@@ -19,12 +19,12 @@ pub enum LimboError {
EnvVarError(#[from] std::env::VarError),
#[error("I/O error: {0}")]
IOError(#[from] std::io::Error),
#[cfg(target_os = "linux")]
#[cfg(all(target_os = "linux", feature = "io_uring"))]
#[error("I/O error: {0}")]
LinuxIOError(String),
UringIOError(String),
#[error("Locking error: {0}")]
LockingError(String),
#[cfg(target_os = "macos")]
#[cfg(target_family = "unix")]
#[error("I/O error: {0}")]
RustixIOError(#[from] rustix::io::Errno),
#[error("Parse error: {0}")]

View File

@@ -27,6 +27,7 @@ pub enum JsonFunc {
JsonArray,
JsonExtract,
JsonArrayLength,
JsonType,
}
#[cfg(feature = "json")]
@@ -40,6 +41,7 @@ impl Display for JsonFunc {
Self::JsonArray => "json_array".to_string(),
Self::JsonExtract => "json_extract".to_string(),
Self::JsonArrayLength => "json_array_length".to_string(),
Self::JsonType => "json_type".to_string(),
}
)
}
@@ -371,6 +373,8 @@ impl Func {
"json_array" => Ok(Self::Json(JsonFunc::JsonArray)),
#[cfg(feature = "json")]
"json_extract" => Ok(Func::Json(JsonFunc::JsonExtract)),
#[cfg(feature = "json")]
"json_type" => Ok(Func::Json(JsonFunc::JsonType)),
"unixepoch" => Ok(Self::Scalar(ScalarFunc::UnixEpoch)),
"julianday" => Ok(Self::Scalar(ScalarFunc::JulianDay)),
"hex" => Ok(Self::Scalar(ScalarFunc::Hex)),

View File

@@ -14,15 +14,15 @@ const MAX_IOVECS: usize = 128;
const SQPOLL_IDLE: u32 = 1000;
#[derive(Debug, Error)]
enum LinuxIOError {
enum UringIOError {
IOUringCQError(i32),
}
// Implement the Display trait to customize error messages
impl fmt::Display for LinuxIOError {
impl fmt::Display for UringIOError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
LinuxIOError::IOUringCQError(code) => write!(
UringIOError::IOUringCQError(code) => write!(
f,
"IOUring completion queue error occurred with code {}",
code
@@ -31,8 +31,8 @@ impl fmt::Display for LinuxIOError {
}
}
pub struct LinuxIO {
inner: Rc<RefCell<InnerLinuxIO>>,
pub struct UringIO {
inner: Rc<RefCell<InnerUringIO>>,
}
struct WrappedIOUring {
@@ -42,13 +42,13 @@ struct WrappedIOUring {
key: u64,
}
struct InnerLinuxIO {
struct InnerUringIO {
ring: WrappedIOUring,
iovecs: [iovec; MAX_IOVECS],
next_iovec: usize,
}
impl LinuxIO {
impl UringIO {
pub fn new() -> Result<Self> {
let ring = match io_uring::IoUring::builder()
.setup_sqpoll(SQPOLL_IDLE)
@@ -57,7 +57,7 @@ impl LinuxIO {
Ok(ring) => ring,
Err(_) => io_uring::IoUring::new(MAX_IOVECS as u32)?,
};
let inner = InnerLinuxIO {
let inner = InnerUringIO {
ring: WrappedIOUring {
ring,
pending_ops: 0,
@@ -76,7 +76,7 @@ impl LinuxIO {
}
}
impl InnerLinuxIO {
impl InnerUringIO {
pub fn get_iovec(&mut self, buf: *const u8, len: usize) -> &iovec {
let iovec = &mut self.iovecs[self.next_iovec];
iovec.iov_base = buf as *mut std::ffi::c_void;
@@ -125,7 +125,7 @@ impl WrappedIOUring {
}
}
impl IO for LinuxIO {
impl IO for UringIO {
fn open_file(&self, path: &str, flags: OpenFlags, direct: bool) -> Result<Rc<dyn File>> {
trace!("open_file(path = {})", path);
let file = std::fs::File::options()
@@ -142,14 +142,14 @@ impl IO for LinuxIO {
Err(error) => debug!("Error {error:?} returned when setting O_DIRECT flag to read file. The performance of the system may be affected"),
};
}
let linux_file = Rc::new(LinuxFile {
let uring_file = Rc::new(UringFile {
io: self.inner.clone(),
file,
});
if std::env::var(common::ENV_DISABLE_FILE_LOCK).is_err() {
linux_file.lock_file(true)?;
uring_file.lock_file(true)?;
}
Ok(linux_file)
Ok(uring_file)
}
fn run_once(&self) -> Result<()> {
@@ -165,9 +165,9 @@ impl IO for LinuxIO {
while let Some(cqe) = ring.get_completion() {
let result = cqe.result();
if result < 0 {
return Err(LimboError::LinuxIOError(format!(
return Err(LimboError::UringIOError(format!(
"{} cqe: {:?}",
LinuxIOError::IOUringCQError(result),
UringIOError::IOUringCQError(result),
cqe
)));
}
@@ -191,12 +191,12 @@ impl IO for LinuxIO {
}
}
pub struct LinuxFile {
io: Rc<RefCell<InnerLinuxIO>>,
pub struct UringFile {
io: Rc<RefCell<InnerUringIO>>,
file: std::fs::File,
}
impl File for LinuxFile {
impl File for UringFile {
fn lock_file(&self, exclusive: bool) -> Result<()> {
let fd = self.file.as_raw_fd();
let flock = flock {
@@ -306,7 +306,7 @@ impl File for LinuxFile {
}
}
impl Drop for LinuxFile {
impl Drop for UringFile {
fn drop(&mut self) {
self.unlock_file().expect("Failed to unlock file");
}
@@ -319,6 +319,6 @@ mod tests {
#[test]
fn test_multiple_processes_cannot_open_file() {
common::tests::test_multiple_processes_cannot_open_file(LinuxIO::new);
common::tests::test_multiple_processes_cannot_open_file(UringIO::new);
}
}

View File

@@ -164,14 +164,14 @@ impl Buffer {
}
cfg_block! {
#[cfg(target_os = "linux")] {
mod linux;
pub use linux::LinuxIO as PlatformIO;
#[cfg(all(target_os = "linux", feature = "io_uring"))] {
mod io_uring;
pub use io_uring::UringIO as PlatformIO;
}
#[cfg(target_os = "macos")] {
mod darwin;
pub use darwin::DarwinIO as PlatformIO;
#[cfg(any(all(target_os = "linux",not(feature = "io_uring")), target_os = "macos"))] {
mod unix;
pub use unix::UnixIO as PlatformIO;
}
#[cfg(target_os = "windows")] {

View File

@@ -14,13 +14,13 @@ use std::collections::HashMap;
use std::io::{Read, Seek, Write};
use std::rc::Rc;
pub struct DarwinIO {
pub struct UnixIO {
poller: Rc<RefCell<Poller>>,
events: Rc<RefCell<Events>>,
callbacks: Rc<RefCell<HashMap<usize, CompletionCallback>>>,
}
impl DarwinIO {
impl UnixIO {
pub fn new() -> Result<Self> {
Ok(Self {
poller: Rc::new(RefCell::new(Poller::new()?)),
@@ -30,7 +30,7 @@ impl DarwinIO {
}
}
impl IO for DarwinIO {
impl IO for UnixIO {
fn open_file(&self, path: &str, flags: OpenFlags, _direct: bool) -> Result<Rc<dyn File>> {
trace!("open_file(path = {})", path);
let file = std::fs::File::options()
@@ -40,15 +40,15 @@ impl IO for DarwinIO {
.create(matches!(flags, OpenFlags::Create))
.open(path)?;
let darwin_file = Rc::new(DarwinFile {
let unix_file = Rc::new(UnixFile {
file: Rc::new(RefCell::new(file)),
poller: self.poller.clone(),
callbacks: self.callbacks.clone(),
});
if std::env::var(common::ENV_DISABLE_FILE_LOCK).is_err() {
darwin_file.lock_file(true)?;
unix_file.lock_file(true)?;
}
Ok(darwin_file)
Ok(unix_file)
}
fn run_once(&self) -> Result<()> {
@@ -127,13 +127,13 @@ enum CompletionCallback {
),
}
pub struct DarwinFile {
pub struct UnixFile {
file: Rc<RefCell<std::fs::File>>,
poller: Rc<RefCell<polling::Poller>>,
callbacks: Rc<RefCell<HashMap<usize, CompletionCallback>>>,
}
impl File for DarwinFile {
impl File for UnixFile {
fn lock_file(&self, exclusive: bool) -> Result<()> {
let fd = self.file.borrow().as_raw_fd();
let flock = flock {
@@ -279,7 +279,7 @@ impl File for DarwinFile {
}
}
impl Drop for DarwinFile {
impl Drop for UnixFile {
fn drop(&mut self) {
self.unlock_file().expect("Failed to unlock file");
}
@@ -291,6 +291,6 @@ mod tests {
#[test]
fn test_multiple_processes_cannot_open_file() {
common::tests::test_multiple_processes_cannot_open_file(DarwinIO::new);
common::tests::test_multiple_processes_cannot_open_file(UnixIO::new);
}
}

View File

@@ -121,17 +121,13 @@ pub fn json_array_length(
json_value: &OwnedValue,
json_path: Option<&OwnedValue>,
) -> crate::Result<OwnedValue> {
let path = match json_path {
Some(OwnedValue::Text(t)) => Some(t.value.to_string()),
Some(OwnedValue::Integer(i)) => Some(i.to_string()),
Some(OwnedValue::Float(f)) => Some(f.to_string()),
_ => None::<String>,
};
let json = get_json_value(json_value)?;
let arr_val = if let Some(path) = path {
&json_extract_single(&json, path.as_str())?
let arr_val = if let Some(path) = json_path {
match json_extract_single(&json, path)? {
Some(val) => val,
None => return Ok(OwnedValue::Null),
}
} else {
&json
};
@@ -161,10 +157,13 @@ pub fn json_extract(value: &OwnedValue, paths: &[OwnedValue]) -> crate::Result<O
for path in paths {
match path {
OwnedValue::Text(p) => {
let extracted = json_extract_single(&json, p.value.as_str())?;
OwnedValue::Null => {
return Ok(OwnedValue::Null);
}
_ => {
let extracted = json_extract_single(&json, path)?.unwrap_or_else(|| &Val::Null);
if paths.len() == 1 && extracted == Val::Null {
if paths.len() == 1 && extracted == &Val::Null {
return Ok(OwnedValue::Null);
}
@@ -173,8 +172,6 @@ pub fn json_extract(value: &OwnedValue, paths: &[OwnedValue]) -> crate::Result<O
result.push(',');
}
}
OwnedValue::Null => return Ok(OwnedValue::Null),
_ => crate::bail_constraint_error!("JSON path error near: {:?}", path.to_string()),
}
}
@@ -186,8 +183,49 @@ pub fn json_extract(value: &OwnedValue, paths: &[OwnedValue]) -> crate::Result<O
Ok(OwnedValue::Text(LimboText::json(Rc::new(result))))
}
fn json_extract_single(json: &Val, path: &str) -> crate::Result<Val> {
let json_path = json_path(path)?;
pub fn json_type(value: &OwnedValue, path: Option<&OwnedValue>) -> crate::Result<OwnedValue> {
if let OwnedValue::Null = value {
return Ok(OwnedValue::Null);
}
let json = get_json_value(value)?;
let json = if let Some(path) = path {
match json_extract_single(&json, path)? {
Some(val) => val,
None => return Ok(OwnedValue::Null),
}
} else {
&json
};
let val = match json {
Val::Null => "null",
Val::Bool(v) => {
if *v {
"true"
} else {
"false"
}
}
Val::Integer(_) => "integer",
Val::Float(_) => "real",
Val::String(_) => "text",
Val::Array(_) => "array",
Val::Object(_) => "object",
};
Ok(OwnedValue::Text(LimboText::json(Rc::new(val.to_string()))))
}
/// Returns the value at the given JSON path. If the path does not exist, it returns None.
/// If the path is an invalid path, returns an error.
fn json_extract_single<'a>(json: &'a Val, path: &OwnedValue) -> crate::Result<Option<&'a Val>> {
let json_path = match path {
OwnedValue::Text(t) => json_path(t.value.as_str())?,
OwnedValue::Null => return Ok(None),
_ => crate::bail_constraint_error!("JSON path error near: {:?}", path.to_string()),
};
let mut current_element = &Val::Null;
@@ -204,12 +242,10 @@ fn json_extract_single(json: &Val, path: &str) -> crate::Result<Val> {
if let Some(value) = map.get(key) {
current_element = value;
} else {
return Ok(Val::Null);
return Ok(None);
}
}
_ => {
return Ok(Val::Null);
}
_ => return Ok(None),
}
}
PathElement::ArrayLocator(idx) => match current_element {
@@ -223,16 +259,15 @@ fn json_extract_single(json: &Val, path: &str) -> crate::Result<Val> {
if idx < array.len() as i32 {
current_element = &array[idx as usize];
} else {
return Ok(Val::Null);
return Ok(None);
}
}
_ => {
return Ok(Val::Null);
}
_ => return Ok(None),
},
}
}
Ok(current_element.clone())
Ok(Some(&current_element))
}
#[cfg(test)]

View File

@@ -78,6 +78,7 @@ macro_rules! return_if_locked {
enum WriteState {
Start,
BalanceStart,
BalanceNonRoot,
BalanceGetParentPage,
BalanceMoveUp,
Finish,
@@ -730,9 +731,10 @@ impl BTreeCursor {
}
}
WriteState::BalanceStart
| WriteState::BalanceNonRoot
| WriteState::BalanceMoveUp
| WriteState::BalanceGetParentPage => {
return_if_io!(self.balance_leaf());
return_if_io!(self.balance());
}
WriteState::Finish => {
self.write_info.state = WriteState::Start;
@@ -882,7 +884,7 @@ impl BTreeCursor {
/// This is a naive algorithm that doesn't try to distribute cells evenly by content.
/// It will try to split the page in half by keys not by content.
/// Sqlite tries to have a page at least 40% full.
fn balance_leaf(&mut self) -> Result<CursorResult<()>> {
fn balance(&mut self) -> Result<CursorResult<()>> {
let state = &self.write_info.state;
match state {
WriteState::BalanceStart => {
@@ -906,7 +908,31 @@ impl BTreeCursor {
self.balance_root();
return Ok(CursorResult::Ok(()));
}
debug!("Balancing leaf. leaf={}", current_page.get().id);
self.write_info.state = WriteState::BalanceNonRoot;
self.balance_non_root()
}
WriteState::BalanceNonRoot
| WriteState::BalanceGetParentPage
| WriteState::BalanceMoveUp => self.balance_non_root(),
_ => unreachable!("invalid balance leaf state {:?}", state),
}
}
fn balance_non_root(&mut self) -> Result<CursorResult<()>> {
let state = &self.write_info.state;
match state {
WriteState::Start => todo!(),
WriteState::BalanceStart => todo!(),
WriteState::BalanceNonRoot => {
// drop divider cells and find right pointer
// NOTE: since we are doing a simple split we only finding the pointer we want to update (right pointer).
// Right pointer means cell that points to the last page, as we don't really want to drop this one. This one
// can be a "rightmost pointer" or a "cell".
// we always asumme there is a parent
let current_page = self.stack.top();
debug!("balance_non_root(page={})", current_page.get().id);
// Copy of page used to reference cell bytes.
// This needs to be saved somewhere safe so taht references still point to here,
@@ -1186,8 +1212,7 @@ impl BTreeCursor {
let _ = self.write_info.page_copy.take();
Ok(CursorResult::Ok(()))
}
_ => unreachable!("invalid balance leaf state {:?}", state),
WriteState::Finish => todo!(),
}
}

View File

@@ -104,12 +104,9 @@ fn prologue<'a>(
let mut program = ProgramBuilder::new();
let init_label = program.allocate_label();
program.emit_insn_with_label_dependency(
Insn::Init {
target_pc: init_label,
},
init_label,
);
program.emit_insn(Insn::Init {
target_pc: init_label,
});
let start_offset = program.offset();
@@ -151,8 +148,6 @@ fn epilogue(
target_pc: start_offset,
});
program.resolve_deferred_labels();
Ok(())
}
@@ -218,12 +213,9 @@ pub fn emit_query<'a>(
let after_main_loop_label = program.allocate_label();
t_ctx.label_main_loop_end = Some(after_main_loop_label);
if plan.contains_constant_false_condition {
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: after_main_loop_label,
},
after_main_loop_label,
);
program.emit_insn(Insn::Goto {
target_pc: after_main_loop_label,
});
}
// Allocate registers for result columns
@@ -281,12 +273,9 @@ fn emit_program_for_delete(
// No rows will be read from source table loops if there is a constant false condition eg. WHERE 0
let after_main_loop_label = program.allocate_label();
if plan.contains_constant_false_condition {
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: after_main_loop_label,
},
after_main_loop_label,
);
program.emit_insn(Insn::Goto {
target_pc: after_main_loop_label,
});
}
// Initialize cursors and other resources needed for query execution
@@ -356,13 +345,10 @@ fn emit_delete_insns<'a>(
dest: limit_reg,
});
program.mark_last_insn_constant();
program.emit_insn_with_label_dependency(
Insn::DecrJumpZero {
reg: limit_reg,
target_pc: t_ctx.label_main_loop_end.unwrap(),
},
t_ctx.label_main_loop_end.unwrap(),
)
program.emit_insn(Insn::DecrJumpZero {
reg: limit_reg,
target_pc: t_ctx.label_main_loop_end.unwrap(),
})
}
Ok(())

View File

@@ -13,11 +13,52 @@ use crate::Result;
use super::emitter::Resolver;
use super::plan::{TableReference, TableReferenceType};
#[derive(Default, Debug, Clone, Copy)]
#[derive(Debug, Clone, Copy)]
pub struct ConditionMetadata {
pub jump_if_condition_is_true: bool,
pub jump_target_when_true: BranchOffset,
pub jump_target_when_false: BranchOffset,
pub parent_op: Option<ast::Operator>,
}
fn emit_cond_jump(program: &mut ProgramBuilder, cond_meta: ConditionMetadata, reg: usize) {
if cond_meta.jump_if_condition_is_true {
program.emit_insn(Insn::If {
reg,
target_pc: cond_meta.jump_target_when_true,
null_reg: reg,
});
} else {
program.emit_insn(Insn::IfNot {
reg,
target_pc: cond_meta.jump_target_when_false,
null_reg: reg,
});
}
}
macro_rules! emit_cmp_insn {
(
$program:expr,
$cond:expr,
$op_true:ident,
$op_false:ident,
$lhs:expr,
$rhs:expr
) => {{
if $cond.jump_if_condition_is_true {
$program.emit_insn(Insn::$op_true {
lhs: $lhs,
rhs: $rhs,
target_pc: $cond.jump_target_when_true,
});
} else {
$program.emit_insn(Insn::$op_false {
lhs: $lhs,
rhs: $rhs,
target_pc: $cond.jump_target_when_false,
});
}
}};
}
pub fn translate_condition_expr(
@@ -38,6 +79,8 @@ pub fn translate_condition_expr(
lhs,
ConditionMetadata {
jump_if_condition_is_true: false,
// Mark that the parent op for sub-expressions is AND
parent_op: Some(ast::Operator::And),
..condition_metadata
},
resolver,
@@ -46,170 +89,91 @@ pub fn translate_condition_expr(
program,
referenced_tables,
rhs,
condition_metadata,
ConditionMetadata {
parent_op: Some(ast::Operator::And),
..condition_metadata
},
resolver,
);
}
ast::Expr::Binary(lhs, ast::Operator::Or, rhs) => {
let jump_target_when_false = program.allocate_label();
let _ = translate_condition_expr(
program,
referenced_tables,
lhs,
ConditionMetadata {
// If the first condition is true, we don't need to evaluate the second condition.
if matches!(condition_metadata.parent_op, Some(ast::Operator::And)) {
// we are inside a bigger AND expression, so we do NOT jump to parent's 'true' if LHS or RHS is true.
// we only short-circuit the parent's false label if LHS and RHS are both false.
let local_true_label = program.allocate_label();
let local_false_label = program.allocate_label();
// evaluate LHS in normal OR fashion, short-circuit local if true
let lhs_metadata = ConditionMetadata {
jump_if_condition_is_true: true,
jump_target_when_true: local_true_label,
jump_target_when_false: local_false_label,
parent_op: Some(ast::Operator::Or),
};
translate_condition_expr(program, referenced_tables, lhs, lhs_metadata, resolver)?;
// if lhs was false, we land here:
program.resolve_label(local_false_label, program.offset());
// evaluate rhs with normal OR: short-circuit if true, go to local_true
let rhs_metadata = ConditionMetadata {
jump_if_condition_is_true: true,
jump_target_when_true: local_true_label,
jump_target_when_false: condition_metadata.jump_target_when_false,
// if rhs is also false => parent's false
parent_op: Some(ast::Operator::Or),
};
translate_condition_expr(program, referenced_tables, rhs, rhs_metadata, resolver)?;
// if we get here, both lhs+rhs are false: explicit jump to parent's false
program.emit_insn(Insn::Goto {
target_pc: condition_metadata.jump_target_when_false,
});
// local_true: we do not jump to parent's "true" label because the parent is AND,
// so we want to keep evaluating the rest
program.resolve_label(local_true_label, program.offset());
} else {
let jump_target_when_false = program.allocate_label();
let lhs_metadata = ConditionMetadata {
jump_if_condition_is_true: true,
jump_target_when_false,
parent_op: Some(ast::Operator::Or),
..condition_metadata
},
resolver,
);
program.resolve_label(jump_target_when_false, program.offset());
let _ = translate_condition_expr(
program,
referenced_tables,
rhs,
condition_metadata,
resolver,
);
};
translate_condition_expr(program, referenced_tables, lhs, lhs_metadata, resolver)?;
// if LHS was false, we land here:
program.resolve_label(jump_target_when_false, program.offset());
let rhs_metadata = ConditionMetadata {
parent_op: Some(ast::Operator::Or),
..condition_metadata
};
translate_condition_expr(program, referenced_tables, rhs, rhs_metadata, resolver)?;
}
}
ast::Expr::Binary(lhs, op, rhs) => {
let lhs_reg = program.alloc_register();
let _ = translate_expr(program, Some(referenced_tables), lhs, lhs_reg, resolver);
if let ast::Expr::Literal(_) = lhs.as_ref() {
program.mark_last_insn_constant()
}
let rhs_reg = program.alloc_register();
let _ = translate_expr(program, Some(referenced_tables), rhs, rhs_reg, resolver);
if let ast::Expr::Literal(_) = rhs.as_ref() {
program.mark_last_insn_constant()
}
let lhs_reg = translate_and_mark(program, Some(referenced_tables), lhs, resolver)?;
let rhs_reg = translate_and_mark(program, Some(referenced_tables), rhs, resolver)?;
match op {
ast::Operator::Greater => {
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::Gt {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_true,
},
condition_metadata.jump_target_when_true,
)
} else {
program.emit_insn_with_label_dependency(
Insn::Le {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_false,
},
condition_metadata.jump_target_when_false,
)
}
emit_cmp_insn!(program, condition_metadata, Gt, Le, lhs_reg, rhs_reg)
}
ast::Operator::GreaterEquals => {
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::Ge {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_true,
},
condition_metadata.jump_target_when_true,
)
} else {
program.emit_insn_with_label_dependency(
Insn::Lt {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_false,
},
condition_metadata.jump_target_when_false,
)
}
emit_cmp_insn!(program, condition_metadata, Ge, Lt, lhs_reg, rhs_reg)
}
ast::Operator::Less => {
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::Lt {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_true,
},
condition_metadata.jump_target_when_true,
)
} else {
program.emit_insn_with_label_dependency(
Insn::Ge {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_false,
},
condition_metadata.jump_target_when_false,
)
}
emit_cmp_insn!(program, condition_metadata, Lt, Ge, lhs_reg, rhs_reg)
}
ast::Operator::LessEquals => {
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::Le {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_true,
},
condition_metadata.jump_target_when_true,
)
} else {
program.emit_insn_with_label_dependency(
Insn::Gt {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_false,
},
condition_metadata.jump_target_when_false,
)
}
emit_cmp_insn!(program, condition_metadata, Le, Gt, lhs_reg, rhs_reg)
}
ast::Operator::Equals => {
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::Eq {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_true,
},
condition_metadata.jump_target_when_true,
)
} else {
program.emit_insn_with_label_dependency(
Insn::Ne {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_false,
},
condition_metadata.jump_target_when_false,
)
}
emit_cmp_insn!(program, condition_metadata, Eq, Ne, lhs_reg, rhs_reg)
}
ast::Operator::NotEquals => {
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::Ne {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_true,
},
condition_metadata.jump_target_when_true,
)
} else {
program.emit_insn_with_label_dependency(
Insn::Eq {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_false,
},
condition_metadata.jump_target_when_false,
)
}
emit_cmp_insn!(program, condition_metadata, Ne, Eq, lhs_reg, rhs_reg)
}
ast::Operator::Is => todo!(),
ast::Operator::IsNot => todo!(),
@@ -227,25 +191,7 @@ pub fn translate_condition_expr(
value: int_value,
dest: reg,
});
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::If {
reg,
target_pc: condition_metadata.jump_target_when_true,
null_reg: reg,
},
condition_metadata.jump_target_when_true,
)
} else {
program.emit_insn_with_label_dependency(
Insn::IfNot {
reg,
target_pc: condition_metadata.jump_target_when_false,
null_reg: reg,
},
condition_metadata.jump_target_when_false,
)
}
emit_cond_jump(program, condition_metadata, reg);
} else {
crate::bail_parse_error!("unsupported literal type in condition");
}
@@ -256,25 +202,7 @@ pub fn translate_condition_expr(
value: string.clone(),
dest: reg,
});
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::If {
reg,
target_pc: condition_metadata.jump_target_when_true,
null_reg: reg,
},
condition_metadata.jump_target_when_true,
)
} else {
program.emit_insn_with_label_dependency(
Insn::IfNot {
reg,
target_pc: condition_metadata.jump_target_when_false,
null_reg: reg,
},
condition_metadata.jump_target_when_false,
)
}
emit_cond_jump(program, condition_metadata, reg);
}
unimpl => todo!("literal {:?} not implemented", unimpl),
},
@@ -302,20 +230,14 @@ pub fn translate_condition_expr(
// Note that we are already breaking up our WHERE clauses into a series of terms at "AND" boundaries, so right now we won't be running into cases where jumping on true would be incorrect,
// but once we have e.g. parenthesization and more complex conditions, not having this 'if' here would introduce a bug.
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: condition_metadata.jump_target_when_true,
},
condition_metadata.jump_target_when_true,
);
program.emit_insn(Insn::Goto {
target_pc: condition_metadata.jump_target_when_true,
});
}
} else {
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: condition_metadata.jump_target_when_false,
},
condition_metadata.jump_target_when_false,
);
program.emit_insn(Insn::Goto {
target_pc: condition_metadata.jump_target_when_false,
});
}
return Ok(());
}
@@ -349,35 +271,26 @@ pub fn translate_condition_expr(
translate_expr(program, Some(referenced_tables), expr, rhs_reg, resolver)?;
// If this is not the last condition, we need to jump to the 'jump_target_when_true' label if the condition is true.
if !last_condition {
program.emit_insn_with_label_dependency(
Insn::Eq {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: jump_target_when_true,
},
jump_target_when_true,
);
program.emit_insn(Insn::Eq {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: jump_target_when_true,
});
} else {
// If this is the last condition, we need to jump to the 'jump_target_when_false' label if there is no match.
program.emit_insn_with_label_dependency(
Insn::Ne {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_false,
},
condition_metadata.jump_target_when_false,
);
program.emit_insn(Insn::Ne {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_false,
});
}
}
// If we got here, then the last condition was a match, so we jump to the 'jump_target_when_true' label if 'jump_if_condition_is_true'.
// If not, we can just fall through without emitting an unnecessary instruction.
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: condition_metadata.jump_target_when_true,
},
condition_metadata.jump_target_when_true,
);
program.emit_insn(Insn::Goto {
target_pc: condition_metadata.jump_target_when_true,
});
}
} else {
// If it's a NOT IN expression, we need to jump to the 'jump_target_when_false' label if any of the conditions are true.
@@ -385,24 +298,18 @@ pub fn translate_condition_expr(
let rhs_reg = program.alloc_register();
let _ =
translate_expr(program, Some(referenced_tables), expr, rhs_reg, resolver)?;
program.emit_insn_with_label_dependency(
Insn::Eq {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_false,
},
condition_metadata.jump_target_when_false,
);
program.emit_insn(Insn::Eq {
lhs: lhs_reg,
rhs: rhs_reg,
target_pc: condition_metadata.jump_target_when_false,
});
}
// If we got here, then none of the conditions were a match, so we jump to the 'jump_target_when_true' label if 'jump_if_condition_is_true'.
// If not, we can just fall through without emitting an unnecessary instruction.
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: condition_metadata.jump_target_when_true,
},
condition_metadata.jump_target_when_true,
);
program.emit_insn(Insn::Goto {
target_pc: condition_metadata.jump_target_when_true,
});
}
}
@@ -421,18 +328,8 @@ pub fn translate_condition_expr(
match op {
ast::LikeOperator::Like | ast::LikeOperator::Glob => {
let pattern_reg = program.alloc_register();
let column_reg = program.alloc_register();
let mut constant_mask = 0;
let _ = translate_expr(
program,
Some(referenced_tables),
lhs,
column_reg,
resolver,
)?;
if let ast::Expr::Literal(_) = lhs.as_ref() {
program.mark_last_insn_constant();
}
let _ = translate_and_mark(program, Some(referenced_tables), lhs, resolver);
let _ = translate_expr(
program,
Some(referenced_tables),
@@ -440,7 +337,7 @@ pub fn translate_condition_expr(
pattern_reg,
resolver,
)?;
if let ast::Expr::Literal(_) = rhs.as_ref() {
if matches!(rhs.as_ref(), ast::Expr::Literal(_)) {
program.mark_last_insn_constant();
constant_mask = 1;
}
@@ -463,56 +360,34 @@ pub fn translate_condition_expr(
ast::LikeOperator::Regexp => todo!(),
}
if !*not {
if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::If {
reg: cur_reg,
target_pc: condition_metadata.jump_target_when_true,
null_reg: cur_reg,
},
condition_metadata.jump_target_when_true,
);
} else {
program.emit_insn_with_label_dependency(
Insn::IfNot {
reg: cur_reg,
target_pc: condition_metadata.jump_target_when_false,
null_reg: cur_reg,
},
condition_metadata.jump_target_when_false,
);
}
emit_cond_jump(program, condition_metadata, cur_reg);
} else if condition_metadata.jump_if_condition_is_true {
program.emit_insn_with_label_dependency(
Insn::IfNot {
reg: cur_reg,
target_pc: condition_metadata.jump_target_when_true,
null_reg: cur_reg,
},
condition_metadata.jump_target_when_true,
);
program.emit_insn(Insn::IfNot {
reg: cur_reg,
target_pc: condition_metadata.jump_target_when_true,
null_reg: cur_reg,
});
} else {
program.emit_insn_with_label_dependency(
Insn::If {
reg: cur_reg,
target_pc: condition_metadata.jump_target_when_false,
null_reg: cur_reg,
},
condition_metadata.jump_target_when_false,
);
program.emit_insn(Insn::If {
reg: cur_reg,
target_pc: condition_metadata.jump_target_when_false,
null_reg: cur_reg,
});
}
}
ast::Expr::Parenthesized(exprs) => {
// TODO: this is probably not correct; multiple expressions in a parenthesized expression
// are reserved for special cases like `(a, b) IN ((1, 2), (3, 4))`.
for expr in exprs {
if exprs.len() == 1 {
let _ = translate_condition_expr(
program,
referenced_tables,
expr,
&exprs[0],
condition_metadata,
resolver,
);
} else {
crate::bail_parse_error!(
"parenthesized condtional should have exactly one expression"
);
}
}
_ => todo!("op {:?} not implemented", expr),
@@ -707,23 +582,17 @@ pub fn translate_expr(
translate_expr(program, referenced_tables, when_expr, expr_reg, resolver)?;
match base_reg {
// CASE 1 WHEN 0 THEN 0 ELSE 1 becomes 1==0, Ne branch to next clause
Some(base_reg) => program.emit_insn_with_label_dependency(
Insn::Ne {
lhs: base_reg,
rhs: expr_reg,
target_pc: next_case_label,
},
next_case_label,
),
Some(base_reg) => program.emit_insn(Insn::Ne {
lhs: base_reg,
rhs: expr_reg,
target_pc: next_case_label,
}),
// CASE WHEN 0 THEN 0 ELSE 1 becomes ifnot 0 branch to next clause
None => program.emit_insn_with_label_dependency(
Insn::IfNot {
reg: expr_reg,
target_pc: next_case_label,
null_reg: 1,
},
next_case_label,
),
None => program.emit_insn(Insn::IfNot {
reg: expr_reg,
target_pc: next_case_label,
null_reg: 1,
}),
};
// THEN...
translate_expr(
@@ -733,12 +602,9 @@ pub fn translate_expr(
target_register,
resolver,
)?;
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: return_label,
},
return_label,
);
program.emit_insn(Insn::Goto {
target_pc: return_label,
});
// This becomes either the next WHEN, or in the last WHEN/THEN, we're
// assured to have at least one instruction corresponding to the ELSE immediately follow.
program.preassign_label_to_next_insn(next_case_label);
@@ -879,7 +745,7 @@ pub fn translate_expr(
});
Ok(target_register)
}
JsonFunc::JsonArrayLength => {
JsonFunc::JsonArrayLength | JsonFunc::JsonType => {
let args = if let Some(args) = args {
if args.len() > 2 {
crate::bail_parse_error!(
@@ -925,9 +791,9 @@ pub fn translate_expr(
unreachable!("this is always ast::Expr::Cast")
}
ScalarFunc::Changes => {
if let Some(_) = args {
if args.is_some() {
crate::bail_parse_error!(
"{} fucntion with more than 0 arguments",
"{} function with more than 0 arguments",
srf
);
}
@@ -984,13 +850,10 @@ pub fn translate_expr(
resolver,
)?;
if index < args.len() - 1 {
program.emit_insn_with_label_dependency(
Insn::NotNull {
reg,
target_pc: label_coalesce_end,
},
label_coalesce_end,
);
program.emit_insn(Insn::NotNull {
reg,
target_pc: label_coalesce_end,
});
}
}
program.preassign_label_to_next_insn(label_coalesce_end);
@@ -1085,7 +948,7 @@ pub fn translate_expr(
)?;
program.emit_insn(Insn::NotNull {
reg: temp_reg,
target_pc: program.offset() + 2,
target_pc: program.offset().add(2u32),
});
translate_expr(
@@ -1120,14 +983,11 @@ pub fn translate_expr(
resolver,
)?;
let jump_target_when_false = program.allocate_label();
program.emit_insn_with_label_dependency(
Insn::IfNot {
reg: temp_reg,
target_pc: jump_target_when_false,
null_reg: 1,
},
jump_target_when_false,
);
program.emit_insn(Insn::IfNot {
reg: temp_reg,
target_pc: jump_target_when_false,
null_reg: 1,
});
translate_expr(
program,
referenced_tables,
@@ -1136,12 +996,9 @@ pub fn translate_expr(
resolver,
)?;
let jump_target_result = program.allocate_label();
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: jump_target_result,
},
jump_target_result,
);
program.emit_insn(Insn::Goto {
target_pc: jump_target_result,
});
program.resolve_label(jump_target_when_false, program.offset());
translate_expr(
program,
@@ -1169,12 +1026,8 @@ pub fn translate_expr(
);
};
for arg in args {
let reg = program.alloc_register();
let _ =
translate_expr(program, referenced_tables, arg, reg, resolver)?;
if let ast::Expr::Literal(_) = arg {
program.mark_last_insn_constant()
}
translate_and_mark(program, referenced_tables, arg, resolver);
}
program.emit_insn(Insn::Function {
// Only constant patterns for LIKE are supported currently, so this
@@ -1212,12 +1065,11 @@ pub fn translate_expr(
srf.to_string()
);
};
let regs = program.alloc_register();
translate_expr(program, referenced_tables, &args[0], regs, resolver)?;
let reg =
translate_and_mark(program, referenced_tables, &args[0], resolver)?;
program.emit_insn(Insn::Function {
constant_mask: 0,
start_reg: regs,
start_reg: reg,
dest: target_register,
func: func_ctx,
});
@@ -1243,12 +1095,10 @@ pub fn translate_expr(
if let Some(args) = args {
for arg in args.iter() {
// register containing result of each argument expression
let target_reg = program.alloc_register();
_ = translate_expr(
let _ = translate_and_mark(
program,
referenced_tables,
arg,
target_reg,
resolver,
)?;
}
@@ -1280,15 +1130,14 @@ pub fn translate_expr(
let str_reg = program.alloc_register();
let start_reg = program.alloc_register();
let length_reg = program.alloc_register();
translate_expr(
let str_reg = translate_expr(
program,
referenced_tables,
&args[0],
str_reg,
resolver,
)?;
translate_expr(
let _ = translate_expr(
program,
referenced_tables,
&args[1],
@@ -1304,7 +1153,6 @@ pub fn translate_expr(
resolver,
)?;
}
program.emit_insn(Insn::Function {
constant_mask: 0,
start_reg: str_reg,
@@ -1324,8 +1172,8 @@ pub fn translate_expr(
} else {
crate::bail_parse_error!("hex function with no arguments",);
};
let regs = program.alloc_register();
translate_expr(program, referenced_tables, &args[0], regs, resolver)?;
let regs =
translate_and_mark(program, referenced_tables, &args[0], resolver)?;
program.emit_insn(Insn::Function {
constant_mask: 0,
start_reg: regs,
@@ -1365,12 +1213,10 @@ pub fn translate_expr(
if let Some(args) = args {
for arg in args.iter() {
// register containing result of each argument expression
let target_reg = program.alloc_register();
_ = translate_expr(
let _ = translate_and_mark(
program,
referenced_tables,
arg,
target_reg,
resolver,
)?;
}
@@ -1384,7 +1230,7 @@ pub fn translate_expr(
Ok(target_register)
}
ScalarFunc::TotalChanges => {
if let Some(_) = args {
if args.is_some() {
crate::bail_parse_error!(
"{} fucntion with more than 0 arguments",
srf.to_string()
@@ -1420,11 +1266,7 @@ pub fn translate_expr(
};
for arg in args.iter() {
let reg = program.alloc_register();
translate_expr(program, referenced_tables, arg, reg, resolver)?;
if let ast::Expr::Literal(_) = arg {
program.mark_last_insn_constant();
}
translate_and_mark(program, referenced_tables, arg, resolver)?;
}
program.emit_insn(Insn::Function {
constant_mask: 0,
@@ -1446,12 +1288,7 @@ pub fn translate_expr(
crate::bail_parse_error!("min function with no arguments");
};
for arg in args {
let reg = program.alloc_register();
let _ =
translate_expr(program, referenced_tables, arg, reg, resolver)?;
if let ast::Expr::Literal(_) = arg {
program.mark_last_insn_constant()
}
translate_and_mark(program, referenced_tables, arg, resolver)?;
}
program.emit_insn(Insn::Function {
@@ -1474,12 +1311,7 @@ pub fn translate_expr(
crate::bail_parse_error!("max function with no arguments");
};
for arg in args {
let reg = program.alloc_register();
let _ =
translate_expr(program, referenced_tables, arg, reg, resolver)?;
if let ast::Expr::Literal(_) = arg {
program.mark_last_insn_constant()
}
translate_and_mark(program, referenced_tables, arg, resolver)?;
}
program.emit_insn(Insn::Function {
@@ -1515,7 +1347,7 @@ pub fn translate_expr(
resolver,
)?;
let second_reg = program.alloc_register();
translate_expr(
let _ = translate_expr(
program,
referenced_tables,
&args[1],
@@ -1566,34 +1398,30 @@ pub fn translate_expr(
srf.to_string()
);
};
let str_reg = program.alloc_register();
let pattern_reg = program.alloc_register();
let replacement_reg = program.alloc_register();
translate_expr(
let _ = translate_expr(
program,
referenced_tables,
&args[0],
str_reg,
resolver,
)?;
translate_expr(
let _ = translate_expr(
program,
referenced_tables,
&args[1],
pattern_reg,
resolver,
)?;
translate_expr(
let _ = translate_expr(
program,
referenced_tables,
&args[2],
replacement_reg,
resolver,
)?;
program.emit_insn(Insn::Function {
constant_mask: 0,
start_reg: str_reg,
@@ -1660,12 +1488,12 @@ pub fn translate_expr(
};
let mut start_reg = None;
if let Some(arg) = args.first() {
let reg = program.alloc_register();
start_reg = Some(reg);
translate_expr(program, referenced_tables, arg, reg, resolver)?;
if let ast::Expr::Literal(_) = arg {
program.mark_last_insn_constant()
}
start_reg = Some(translate_and_mark(
program,
referenced_tables,
arg,
resolver,
)?);
}
program.emit_insn(Insn::Function {
constant_mask: 0,
@@ -1707,10 +1535,8 @@ pub fn translate_expr(
crate::bail_parse_error!("{} function with no arguments", math_func);
};
let reg = program.alloc_register();
translate_expr(program, referenced_tables, &args[0], reg, resolver)?;
let reg =
translate_and_mark(program, referenced_tables, &args[0], resolver)?;
program.emit_insn(Insn::Function {
constant_mask: 0,
start_reg: reg,
@@ -1732,20 +1558,12 @@ pub fn translate_expr(
} else {
crate::bail_parse_error!("{} function with no arguments", math_func);
};
let reg1 = program.alloc_register();
let _ =
translate_expr(program, referenced_tables, &args[0], reg1, resolver)?;
let reg2 = program.alloc_register();
translate_expr(program, referenced_tables, &args[0], reg1, resolver)?;
if let ast::Expr::Literal(_) = &args[0] {
program.mark_last_insn_constant();
}
translate_expr(program, referenced_tables, &args[1], reg2, resolver)?;
if let ast::Expr::Literal(_) = &args[1] {
program.mark_last_insn_constant();
}
let _ =
translate_expr(program, referenced_tables, &args[1], reg2, resolver)?;
program.emit_insn(Insn::Function {
constant_mask: 0,
start_reg: target_register + 1,
@@ -1769,7 +1587,6 @@ pub fn translate_expr(
};
let regs = program.alloc_registers(args.len());
for (i, arg) in args.iter().enumerate() {
translate_expr(program, referenced_tables, arg, regs + i, resolver)?;
}
@@ -2016,7 +1833,6 @@ fn translate_variable_sized_function_parameter_list(
for arg in args.iter() {
translate_expr(program, referenced_tables, arg, current_reg, resolver)?;
current_reg += 1;
}
@@ -2033,7 +1849,7 @@ fn wrap_eval_jump_expr(
value: 1, // emit True by default
dest: target_register,
});
program.emit_insn_with_label_dependency(insn, if_true_label);
program.emit_insn(insn);
program.emit_insn(Insn::Integer {
value: 0, // emit False if we reach this point (no jump)
dest: target_register,
@@ -2049,6 +1865,20 @@ pub fn maybe_apply_affinity(col_type: Type, target_register: usize, program: &mu
}
}
pub fn translate_and_mark(
program: &mut ProgramBuilder,
referenced_tables: Option<&[TableReference]>,
expr: &ast::Expr,
resolver: &Resolver,
) -> Result<usize> {
let target_register = program.alloc_register();
translate_expr(program, referenced_tables, expr, target_register, resolver)?;
if matches!(expr, ast::Expr::Literal(_)) {
program.mark_last_insn_constant();
}
Ok(target_register)
}
/// Get an appropriate name for an expression.
/// If the query provides an alias (e.g. `SELECT a AS b FROM t`), use that (e.g. `b`).
/// If the expression is a column from a table, use the column name (e.g. `a`).

View File

@@ -93,13 +93,10 @@ pub fn init_group_by(
program.add_comment(program.offset(), "go to clear accumulator subroutine");
let reg_subrtn_acc_clear_return_offset = program.alloc_register();
program.emit_insn_with_label_dependency(
Insn::Gosub {
target_pc: label_subrtn_acc_clear,
return_reg: reg_subrtn_acc_clear_return_offset,
},
label_subrtn_acc_clear,
);
program.emit_insn(Insn::Gosub {
target_pc: label_subrtn_acc_clear,
return_reg: reg_subrtn_acc_clear_return_offset,
});
t_ctx.reg_agg_start = Some(reg_agg_exprs_start);
@@ -187,15 +184,12 @@ pub fn emit_group_by<'a>(
});
// Sort the sorter based on the group by columns
program.emit_insn_with_label_dependency(
Insn::SorterSort {
cursor_id: sort_cursor,
pc_if_empty: label_grouping_loop_end,
},
label_grouping_loop_end,
);
program.emit_insn(Insn::SorterSort {
cursor_id: sort_cursor,
pc_if_empty: label_grouping_loop_end,
});
program.defer_label_resolution(label_grouping_loop_start, program.offset() as usize);
program.resolve_label(label_grouping_loop_start, program.offset());
// Read a row from the sorted data in the sorter into the pseudo cursor
program.emit_insn(Insn::SorterData {
cursor_id: sort_cursor,
@@ -229,14 +223,11 @@ pub fn emit_group_by<'a>(
"start new group if comparison is not equal",
);
// If we are at a new group, continue. If we are at the same group, jump to the aggregation step (i.e. accumulate more values into the aggregations)
program.emit_insn_with_label_dependency(
Insn::Jump {
target_pc_lt: program.offset() + 1,
target_pc_eq: agg_step_label,
target_pc_gt: program.offset() + 1,
},
agg_step_label,
);
program.emit_insn(Insn::Jump {
target_pc_lt: program.offset().add(1u32),
target_pc_eq: agg_step_label,
target_pc_gt: program.offset().add(1u32),
});
// New group, move current group by columns into the comparison register
program.emit_insn(Insn::Move {
@@ -249,32 +240,23 @@ pub fn emit_group_by<'a>(
program.offset(),
"check if ended group had data, and output if so",
);
program.emit_insn_with_label_dependency(
Insn::Gosub {
target_pc: label_subrtn_acc_output,
return_reg: reg_subrtn_acc_output_return_offset,
},
label_subrtn_acc_output,
);
program.emit_insn(Insn::Gosub {
target_pc: label_subrtn_acc_output,
return_reg: reg_subrtn_acc_output_return_offset,
});
program.add_comment(program.offset(), "check abort flag");
program.emit_insn_with_label_dependency(
Insn::IfPos {
reg: reg_abort_flag,
target_pc: label_group_by_end,
decrement_by: 0,
},
label_group_by_end,
);
program.emit_insn(Insn::IfPos {
reg: reg_abort_flag,
target_pc: label_group_by_end,
decrement_by: 0,
});
program.add_comment(program.offset(), "goto clear accumulator subroutine");
program.emit_insn_with_label_dependency(
Insn::Gosub {
target_pc: label_subrtn_acc_clear,
return_reg: reg_subrtn_acc_clear_return_offset,
},
label_subrtn_acc_clear,
);
program.emit_insn(Insn::Gosub {
target_pc: label_subrtn_acc_clear,
return_reg: reg_subrtn_acc_clear_return_offset,
});
// Accumulate the values into the aggregations
program.resolve_label(agg_step_label, program.offset());
@@ -299,14 +281,11 @@ pub fn emit_group_by<'a>(
program.offset(),
"don't emit group columns if continuing existing group",
);
program.emit_insn_with_label_dependency(
Insn::If {
target_pc: label_acc_indicator_set_flag_true,
reg: reg_data_in_acc_flag,
null_reg: 0, // unused in this case
},
label_acc_indicator_set_flag_true,
);
program.emit_insn(Insn::If {
target_pc: label_acc_indicator_set_flag_true,
reg: reg_data_in_acc_flag,
null_reg: 0, // unused in this case
});
// Read the group by columns for a finished group
for i in 0..group_by.exprs.len() {
@@ -326,32 +305,23 @@ pub fn emit_group_by<'a>(
dest: reg_data_in_acc_flag,
});
program.emit_insn_with_label_dependency(
Insn::SorterNext {
cursor_id: sort_cursor,
pc_if_next: label_grouping_loop_start,
},
label_grouping_loop_start,
);
program.emit_insn(Insn::SorterNext {
cursor_id: sort_cursor,
pc_if_next: label_grouping_loop_start,
});
program.resolve_label(label_grouping_loop_end, program.offset());
program.add_comment(program.offset(), "emit row for final group");
program.emit_insn_with_label_dependency(
Insn::Gosub {
target_pc: label_subrtn_acc_output,
return_reg: reg_subrtn_acc_output_return_offset,
},
label_subrtn_acc_output,
);
program.emit_insn(Insn::Gosub {
target_pc: label_subrtn_acc_output,
return_reg: reg_subrtn_acc_output_return_offset,
});
program.add_comment(program.offset(), "group by finished");
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: label_group_by_end,
},
label_group_by_end,
);
program.emit_insn(Insn::Goto {
target_pc: label_group_by_end,
});
program.emit_insn(Insn::Integer {
value: 1,
dest: reg_abort_flag,
@@ -363,19 +333,13 @@ pub fn emit_group_by<'a>(
program.resolve_label(label_subrtn_acc_output, program.offset());
program.add_comment(program.offset(), "output group by row subroutine start");
program.emit_insn_with_label_dependency(
Insn::IfPos {
reg: reg_data_in_acc_flag,
target_pc: label_agg_final,
decrement_by: 0,
},
label_agg_final,
);
program.emit_insn(Insn::IfPos {
reg: reg_data_in_acc_flag,
target_pc: label_agg_final,
decrement_by: 0,
});
let group_by_end_without_emitting_row_label = program.allocate_label();
program.defer_label_resolution(
group_by_end_without_emitting_row_label,
program.offset() as usize,
);
program.resolve_label(group_by_end_without_emitting_row_label, program.offset());
program.emit_insn(Insn::Return {
return_reg: reg_subrtn_acc_output_return_offset,
});
@@ -417,7 +381,8 @@ pub fn emit_group_by<'a>(
ConditionMetadata {
jump_if_condition_is_true: false,
jump_target_when_false: group_by_end_without_emitting_row_label,
jump_target_when_true: i64::MAX, // unused
jump_target_when_true: BranchOffset::Placeholder, // not used. FIXME: this is a bug. HAVING can have e.g. HAVING a OR b.
parent_op: None,
},
&t_ctx.resolver,
)?;

View File

@@ -7,6 +7,7 @@ use sqlite3_parser::ast::{
use crate::error::SQLITE_CONSTRAINT_PRIMARYKEY;
use crate::util::normalize_ident;
use crate::vdbe::BranchOffset;
use crate::{
schema::{Column, Schema, Table},
storage::sqlite3_ondisk::DatabaseHeader,
@@ -40,12 +41,9 @@ pub fn translate_insert(
let mut program = ProgramBuilder::new();
let resolver = Resolver::new(syms);
let init_label = program.allocate_label();
program.emit_insn_with_label_dependency(
Insn::Init {
target_pc: init_label,
},
init_label,
);
program.emit_insn(Insn::Init {
target_pc: init_label,
});
let start_offset = program.offset();
// open table
@@ -104,7 +102,7 @@ pub fn translate_insert(
let record_register = program.alloc_register();
let halt_label = program.allocate_label();
let mut loop_start_offset = 0;
let mut loop_start_offset = BranchOffset::Offset(0);
let inserting_multiple_rows = values.len() > 1;
@@ -112,14 +110,11 @@ pub fn translate_insert(
if inserting_multiple_rows {
let yield_reg = program.alloc_register();
let jump_on_definition_label = program.allocate_label();
program.emit_insn_with_label_dependency(
Insn::InitCoroutine {
yield_reg,
jump_on_definition: jump_on_definition_label,
start_offset: program.offset() + 1,
},
jump_on_definition_label,
);
program.emit_insn(Insn::InitCoroutine {
yield_reg,
jump_on_definition: jump_on_definition_label,
start_offset: program.offset().add(1u32),
});
for value in values {
populate_column_registers(
@@ -133,7 +128,7 @@ pub fn translate_insert(
)?;
program.emit_insn(Insn::Yield {
yield_reg,
end_offset: 0,
end_offset: halt_label,
});
}
program.emit_insn(Insn::EndCoroutine { yield_reg });
@@ -149,13 +144,10 @@ pub fn translate_insert(
// FIXME: rollback is not implemented. E.g. if you insert 2 rows and one fails to unique constraint violation,
// the other row will still be inserted.
loop_start_offset = program.offset();
program.emit_insn_with_label_dependency(
Insn::Yield {
yield_reg,
end_offset: halt_label,
},
halt_label,
);
program.emit_insn(Insn::Yield {
yield_reg,
end_offset: halt_label,
});
} else {
// Single row - populate registers directly
program.emit_insn(Insn::OpenWriteAsync {
@@ -194,13 +186,10 @@ pub fn translate_insert(
program.emit_insn(Insn::SoftNull { reg });
}
// the user provided rowid value might itself be NULL. If it is, we create a new rowid on the next instruction.
program.emit_insn_with_label_dependency(
Insn::NotNull {
reg: rowid_reg,
target_pc: check_rowid_is_integer_label.unwrap(),
},
check_rowid_is_integer_label.unwrap(),
);
program.emit_insn(Insn::NotNull {
reg: rowid_reg,
target_pc: check_rowid_is_integer_label.unwrap(),
});
}
// Create new rowid if a) not provided by user or b) provided by user but is NULL
@@ -220,14 +209,11 @@ pub fn translate_insert(
// When the DB allocates it there are no need for separate uniqueness checks.
if has_user_provided_rowid {
let make_record_label = program.allocate_label();
program.emit_insn_with_label_dependency(
Insn::NotExists {
cursor: cursor_id,
rowid_reg,
target_pc: make_record_label,
},
make_record_label,
);
program.emit_insn(Insn::NotExists {
cursor: cursor_id,
rowid_reg,
target_pc: make_record_label,
});
let rowid_column_name = if let Some(index) = rowid_alias_index {
table.column_index_to_name(index).unwrap()
} else {
@@ -276,7 +262,6 @@ pub fn translate_insert(
program.emit_insn(Insn::Goto {
target_pc: start_offset,
});
program.resolve_deferred_labels();
Ok(program.build(database_header, connection))
}

View File

@@ -194,7 +194,7 @@ pub fn open_loop(
// In case the subquery is an inner loop, it needs to be reinitialized on each iteration of the outer loop.
program.emit_insn(Insn::InitCoroutine {
yield_reg,
jump_on_definition: 0,
jump_on_definition: BranchOffset::Offset(0),
start_offset: coroutine_implementation_start,
});
let LoopLabels {
@@ -205,18 +205,15 @@ pub fn open_loop(
.labels_main_loop
.get(id)
.expect("subquery has no loop labels");
program.defer_label_resolution(loop_start, program.offset() as usize);
program.resolve_label(loop_start, program.offset());
// A subquery within the main loop of a parent query has no cursor, so instead of advancing the cursor,
// it emits a Yield which jumps back to the main loop of the subquery itself to retrieve the next row.
// When the subquery coroutine completes, this instruction jumps to the label at the top of the termination_label_stack,
// which in this case is the end of the Yield-Goto loop in the parent query.
program.emit_insn_with_label_dependency(
Insn::Yield {
yield_reg,
end_offset: loop_end,
},
loop_end,
);
program.emit_insn(Insn::Yield {
yield_reg,
end_offset: loop_end,
});
// These are predicates evaluated outside of the subquery,
// so they are translated here.
@@ -228,6 +225,7 @@ pub fn open_loop(
jump_if_condition_is_true: false,
jump_target_when_true,
jump_target_when_false: next,
parent_op: None,
};
translate_condition_expr(
program,
@@ -276,6 +274,7 @@ pub fn open_loop(
jump_if_condition_is_true: false,
jump_target_when_true,
jump_target_when_false,
parent_op: None,
};
for predicate in predicates.iter() {
translate_condition_expr(
@@ -291,10 +290,7 @@ pub fn open_loop(
if *outer {
let lj_meta = t_ctx.meta_left_joins.get(id).unwrap();
program.defer_label_resolution(
lj_meta.label_match_flag_set_true,
program.offset() as usize,
);
program.resolve_label(lj_meta.label_match_flag_set_true, program.offset());
program.emit_insn(Insn::Integer {
value: 1,
dest: lj_meta.reg_match_flag,
@@ -326,7 +322,7 @@ pub fn open_loop(
.labels_main_loop
.get(id)
.expect("scan has no loop labels");
program.emit_insn_with_label_dependency(
program.emit_insn(
if iter_dir
.as_ref()
.is_some_and(|dir| *dir == IterationDirection::Backwards)
@@ -341,9 +337,8 @@ pub fn open_loop(
pc_if_empty: loop_end,
}
},
loop_end,
);
program.defer_label_resolution(loop_start, program.offset() as usize);
program.resolve_label(loop_start, program.offset());
if let Some(preds) = predicates {
for expr in preds {
@@ -352,6 +347,7 @@ pub fn open_loop(
jump_if_condition_is_true: false,
jump_target_when_true,
jump_target_when_false: next,
parent_op: None,
};
translate_condition_expr(
program,
@@ -420,28 +416,25 @@ pub fn open_loop(
_ => unreachable!(),
}
// If we try to seek to a key that is not present in the table/index, we exit the loop entirely.
program.emit_insn_with_label_dependency(
match cmp_op {
ast::Operator::Equals | ast::Operator::GreaterEquals => Insn::SeekGE {
is_index: index_cursor_id.is_some(),
cursor_id: index_cursor_id.unwrap_or(table_cursor_id),
start_reg: cmp_reg,
num_regs: 1,
target_pc: loop_end,
},
ast::Operator::Greater
| ast::Operator::Less
| ast::Operator::LessEquals => Insn::SeekGT {
is_index: index_cursor_id.is_some(),
cursor_id: index_cursor_id.unwrap_or(table_cursor_id),
start_reg: cmp_reg,
num_regs: 1,
target_pc: loop_end,
},
_ => unreachable!(),
program.emit_insn(match cmp_op {
ast::Operator::Equals | ast::Operator::GreaterEquals => Insn::SeekGE {
is_index: index_cursor_id.is_some(),
cursor_id: index_cursor_id.unwrap_or(table_cursor_id),
start_reg: cmp_reg,
num_regs: 1,
target_pc: loop_end,
},
loop_end,
);
ast::Operator::Greater | ast::Operator::Less | ast::Operator::LessEquals => {
Insn::SeekGT {
is_index: index_cursor_id.is_some(),
cursor_id: index_cursor_id.unwrap_or(table_cursor_id),
start_reg: cmp_reg,
num_regs: 1,
target_pc: loop_end,
}
}
_ => unreachable!(),
});
if *cmp_op == ast::Operator::Less || *cmp_op == ast::Operator::LessEquals {
translate_expr(
program,
@@ -452,7 +445,7 @@ pub fn open_loop(
)?;
}
program.defer_label_resolution(loop_start, program.offset() as usize);
program.resolve_label(loop_start, program.offset());
// TODO: We are currently only handling ascending indexes.
// For conditions like index_key > 10, we have already seeked to the first key greater than 10, and can just scan forward.
// For conditions like index_key < 10, we are at the beginning of the index, and will scan forward and emit IdxGE(10) with a conditional jump to the end.
@@ -466,56 +459,44 @@ pub fn open_loop(
match cmp_op {
ast::Operator::Equals | ast::Operator::LessEquals => {
if let Some(index_cursor_id) = index_cursor_id {
program.emit_insn_with_label_dependency(
Insn::IdxGT {
cursor_id: index_cursor_id,
start_reg: cmp_reg,
num_regs: 1,
target_pc: loop_end,
},
loop_end,
);
program.emit_insn(Insn::IdxGT {
cursor_id: index_cursor_id,
start_reg: cmp_reg,
num_regs: 1,
target_pc: loop_end,
});
} else {
let rowid_reg = program.alloc_register();
program.emit_insn(Insn::RowId {
cursor_id: table_cursor_id,
dest: rowid_reg,
});
program.emit_insn_with_label_dependency(
Insn::Gt {
lhs: rowid_reg,
rhs: cmp_reg,
target_pc: loop_end,
},
loop_end,
);
program.emit_insn(Insn::Gt {
lhs: rowid_reg,
rhs: cmp_reg,
target_pc: loop_end,
});
}
}
ast::Operator::Less => {
if let Some(index_cursor_id) = index_cursor_id {
program.emit_insn_with_label_dependency(
Insn::IdxGE {
cursor_id: index_cursor_id,
start_reg: cmp_reg,
num_regs: 1,
target_pc: loop_end,
},
loop_end,
);
program.emit_insn(Insn::IdxGE {
cursor_id: index_cursor_id,
start_reg: cmp_reg,
num_regs: 1,
target_pc: loop_end,
});
} else {
let rowid_reg = program.alloc_register();
program.emit_insn(Insn::RowId {
cursor_id: table_cursor_id,
dest: rowid_reg,
});
program.emit_insn_with_label_dependency(
Insn::Ge {
lhs: rowid_reg,
rhs: cmp_reg,
target_pc: loop_end,
},
loop_end,
);
program.emit_insn(Insn::Ge {
lhs: rowid_reg,
rhs: cmp_reg,
target_pc: loop_end,
});
}
}
_ => {}
@@ -538,14 +519,11 @@ pub fn open_loop(
src_reg,
&t_ctx.resolver,
)?;
program.emit_insn_with_label_dependency(
Insn::SeekRowid {
cursor_id: table_cursor_id,
src_reg,
target_pc: next,
},
next,
);
program.emit_insn(Insn::SeekRowid {
cursor_id: table_cursor_id,
src_reg,
target_pc: next,
});
}
if let Some(predicates) = predicates {
for predicate in predicates.iter() {
@@ -554,6 +532,7 @@ pub fn open_loop(
jump_if_condition_is_true: false,
jump_target_when_true,
jump_target_when_false: next,
parent_op: None,
};
translate_condition_expr(
program,
@@ -748,12 +727,9 @@ pub fn close_loop(
// A subquery has no cursor to call NextAsync on, so it just emits a Goto
// to the Yield instruction, which in turn jumps back to the main loop of the subquery,
// so that the next row from the subquery can be read.
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: loop_labels.loop_start,
},
loop_labels.loop_start,
);
program.emit_insn(Insn::Goto {
target_pc: loop_labels.loop_start,
});
}
SourceOperator::Join {
id,
@@ -771,7 +747,7 @@ pub fn close_loop(
// If the left join match flag has been set to 1, we jump to the next row on the outer table,
// i.e. continue to the next row of t1 in our example.
program.resolve_label(lj_meta.label_match_flag_check_value, program.offset());
let jump_offset = program.offset() + 3;
let jump_offset = program.offset().add(3u32);
program.emit_insn(Insn::IfPos {
reg: lj_meta.reg_match_flag,
target_pc: jump_offset,
@@ -799,12 +775,9 @@ pub fn close_loop(
// and we will end up back in the IfPos instruction above, which will then
// check the match flag again, and since it is now 1, we will jump to the
// next row in the left table.
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: lj_meta.label_match_flag_set_true,
},
lj_meta.label_match_flag_set_true,
);
program.emit_insn(Insn::Goto {
target_pc: lj_meta.label_match_flag_set_true,
});
assert!(program.offset() == jump_offset);
}
@@ -830,21 +803,15 @@ pub fn close_loop(
.as_ref()
.is_some_and(|dir| *dir == IterationDirection::Backwards)
{
program.emit_insn_with_label_dependency(
Insn::PrevAwait {
cursor_id,
pc_if_next: loop_labels.loop_start,
},
loop_labels.loop_start,
);
program.emit_insn(Insn::PrevAwait {
cursor_id,
pc_if_next: loop_labels.loop_start,
});
} else {
program.emit_insn_with_label_dependency(
Insn::NextAwait {
cursor_id,
pc_if_next: loop_labels.loop_start,
},
loop_labels.loop_start,
);
program.emit_insn(Insn::NextAwait {
cursor_id,
pc_if_next: loop_labels.loop_start,
});
}
}
SourceOperator::Search {
@@ -866,13 +833,10 @@ pub fn close_loop(
};
program.emit_insn(Insn::NextAsync { cursor_id });
program.emit_insn_with_label_dependency(
Insn::NextAwait {
cursor_id,
pc_if_next: loop_labels.loop_start,
},
loop_labels.loop_start,
);
program.emit_insn(Insn::NextAwait {
cursor_id,
pc_if_next: loop_labels.loop_start,
});
}
SourceOperator::Nothing { .. } => {}
};

View File

@@ -388,12 +388,9 @@ fn translate_create_table(
if schema.get_table(tbl_name.name.0.as_str()).is_some() {
if if_not_exists {
let init_label = program.allocate_label();
program.emit_insn_with_label_dependency(
Insn::Init {
target_pc: init_label,
},
init_label,
);
program.emit_insn(Insn::Init {
target_pc: init_label,
});
let start_offset = program.offset();
program.emit_insn(Insn::Halt {
err_code: 0,
@@ -414,12 +411,9 @@ fn translate_create_table(
let parse_schema_label = program.allocate_label();
let init_label = program.allocate_label();
program.emit_insn_with_label_dependency(
Insn::Init {
target_pc: init_label,
},
init_label,
);
program.emit_insn(Insn::Init {
target_pc: init_label,
});
let start_offset = program.offset();
// TODO: ReadCookie
// TODO: If
@@ -544,12 +538,9 @@ fn translate_pragma(
) -> Result<Program> {
let mut program = ProgramBuilder::new();
let init_label = program.allocate_label();
program.emit_insn_with_label_dependency(
Insn::Init {
target_pc: init_label,
},
init_label,
);
program.emit_insn(Insn::Init {
target_pc: init_label,
});
let start_offset = program.offset();
let mut write = false;
match body {
@@ -581,7 +572,6 @@ fn translate_pragma(
program.emit_insn(Insn::Goto {
target_pc: start_offset,
});
program.resolve_deferred_labels();
Ok(program.build(database_header, connection))
}

View File

@@ -24,7 +24,7 @@ pub fn optimize_plan(plan: &mut Plan) -> Result<()> {
*/
fn optimize_select_plan(plan: &mut SelectPlan) -> Result<()> {
optimize_subqueries(&mut plan.source)?;
eliminate_between(&mut plan.source, &mut plan.where_clause)?;
rewrite_exprs_select(plan)?;
if let ConstantConditionEliminationResult::ImpossibleCondition =
eliminate_constants(&mut plan.source, &mut plan.where_clause)?
{
@@ -55,7 +55,7 @@ fn optimize_select_plan(plan: &mut SelectPlan) -> Result<()> {
}
fn optimize_delete_plan(plan: &mut DeletePlan) -> Result<()> {
eliminate_between(&mut plan.source, &mut plan.where_clause)?;
rewrite_exprs_delete(plan)?;
if let ConstantConditionEliminationResult::ImpossibleCondition =
eliminate_constants(&mut plan.source, &mut plan.where_clause)?
{
@@ -603,14 +603,45 @@ fn push_scan_direction(operator: &mut SourceOperator, direction: &Direction) {
}
}
fn eliminate_between(
operator: &mut SourceOperator,
where_clauses: &mut Option<Vec<ast::Expr>>,
) -> Result<()> {
if let Some(predicates) = where_clauses {
*predicates = predicates.drain(..).map(convert_between_expr).collect();
fn rewrite_exprs_select(plan: &mut SelectPlan) -> Result<()> {
rewrite_source_operator_exprs(&mut plan.source)?;
for rc in plan.result_columns.iter_mut() {
rewrite_expr(&mut rc.expr)?;
}
for agg in plan.aggregates.iter_mut() {
rewrite_expr(&mut agg.original_expr)?;
}
if let Some(predicates) = &mut plan.where_clause {
for expr in predicates {
rewrite_expr(expr)?;
}
}
if let Some(group_by) = &mut plan.group_by {
for expr in group_by.exprs.iter_mut() {
rewrite_expr(expr)?;
}
}
if let Some(order_by) = &mut plan.order_by {
for (expr, _) in order_by.iter_mut() {
rewrite_expr(expr)?;
}
}
Ok(())
}
fn rewrite_exprs_delete(plan: &mut DeletePlan) -> Result<()> {
rewrite_source_operator_exprs(&mut plan.source)?;
if let Some(predicates) = &mut plan.where_clause {
for expr in predicates {
rewrite_expr(expr)?;
}
}
Ok(())
}
fn rewrite_source_operator_exprs(operator: &mut SourceOperator) -> Result<()> {
match operator {
SourceOperator::Join {
left,
@@ -618,29 +649,37 @@ fn eliminate_between(
predicates,
..
} => {
eliminate_between(left, where_clauses)?;
eliminate_between(right, where_clauses)?;
rewrite_source_operator_exprs(left)?;
rewrite_source_operator_exprs(right)?;
if let Some(predicates) = predicates {
*predicates = predicates.drain(..).map(convert_between_expr).collect();
for expr in predicates.iter_mut() {
rewrite_expr(expr)?;
}
}
}
SourceOperator::Scan {
predicates: Some(preds),
..
} => {
*preds = preds.drain(..).map(convert_between_expr).collect();
}
SourceOperator::Search {
predicates: Some(preds),
..
} => {
*preds = preds.drain(..).map(convert_between_expr).collect();
}
_ => (),
}
Ok(())
Ok(())
}
SourceOperator::Scan { predicates, .. } | SourceOperator::Search { predicates, .. } => {
if let Some(predicates) = predicates {
for expr in predicates.iter_mut() {
rewrite_expr(expr)?;
}
}
Ok(())
}
SourceOperator::Subquery { predicates, .. } => {
if let Some(predicates) = predicates {
for expr in predicates.iter_mut() {
rewrite_expr(expr)?;
}
}
Ok(())
}
SourceOperator::Nothing { .. } => Ok(()),
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@@ -735,9 +774,17 @@ impl Optimizable for ast::Expr {
rhs.check_index_scan(table_index, referenced_tables, available_indexes)?;
if rhs_index.is_some() {
// swap lhs and rhs
let swapped_operator = match *op {
ast::Operator::Equals => ast::Operator::Equals,
ast::Operator::Greater => ast::Operator::Less,
ast::Operator::GreaterEquals => ast::Operator::LessEquals,
ast::Operator::Less => ast::Operator::Greater,
ast::Operator::LessEquals => ast::Operator::GreaterEquals,
_ => unreachable!(),
};
let lhs_new = rhs.take_ownership();
let rhs_new = lhs.take_ownership();
*self = Self::Binary(Box::new(lhs_new), *op, Box::new(rhs_new));
*self = Self::Binary(Box::new(lhs_new), swapped_operator, Box::new(rhs_new));
return Ok(rhs_index);
}
Ok(None)
@@ -747,16 +794,6 @@ impl Optimizable for ast::Expr {
}
fn check_constant(&self) -> Result<Option<ConstantPredicate>> {
match self {
Self::Id(id) => {
// true and false are special constants that are effectively aliases for 1 and 0
if id.0.eq_ignore_ascii_case("true") {
return Ok(Some(ConstantPredicate::AlwaysTrue));
}
if id.0.eq_ignore_ascii_case("false") {
return Ok(Some(ConstantPredicate::AlwaysFalse));
}
Ok(None)
}
Self::Literal(lit) => match lit {
ast::Literal::Null => Ok(Some(ConstantPredicate::AlwaysFalse)),
ast::Literal::Numeric(b) => {
@@ -967,8 +1004,20 @@ pub fn try_extract_index_search_expression(
}
}
fn convert_between_expr(expr: ast::Expr) -> ast::Expr {
fn rewrite_expr(expr: &mut ast::Expr) -> Result<()> {
match expr {
ast::Expr::Id(id) => {
// Convert "true" and "false" to 1 and 0
if id.0.eq_ignore_ascii_case("true") {
*expr = ast::Expr::Literal(ast::Literal::Numeric(1.to_string()));
return Ok(());
}
if id.0.eq_ignore_ascii_case("false") {
*expr = ast::Expr::Literal(ast::Literal::Numeric(0.to_string()));
return Ok(());
}
Ok(())
}
ast::Expr::Between {
lhs,
not,
@@ -976,53 +1025,62 @@ fn convert_between_expr(expr: ast::Expr) -> ast::Expr {
end,
} => {
// Convert `y NOT BETWEEN x AND z` to `x > y OR y > z`
let (lower_op, upper_op) = if not {
let (lower_op, upper_op) = if *not {
(ast::Operator::Greater, ast::Operator::Greater)
} else {
// Convert `y BETWEEN x AND z` to `x <= y AND y <= z`
(ast::Operator::LessEquals, ast::Operator::LessEquals)
};
let lower_bound = ast::Expr::Binary(start, lower_op, lhs.clone());
let upper_bound = ast::Expr::Binary(lhs, upper_op, end);
rewrite_expr(start)?;
rewrite_expr(lhs)?;
rewrite_expr(end)?;
if not {
ast::Expr::Binary(
let start = start.take_ownership();
let lhs = lhs.take_ownership();
let end = end.take_ownership();
let lower_bound = ast::Expr::Binary(Box::new(start), lower_op, Box::new(lhs.clone()));
let upper_bound = ast::Expr::Binary(Box::new(lhs), upper_op, Box::new(end));
if *not {
*expr = ast::Expr::Binary(
Box::new(lower_bound),
ast::Operator::Or,
Box::new(upper_bound),
)
);
} else {
ast::Expr::Binary(
*expr = ast::Expr::Binary(
Box::new(lower_bound),
ast::Operator::And,
Box::new(upper_bound),
)
);
}
Ok(())
}
ast::Expr::Parenthesized(mut exprs) => {
ast::Expr::Parenthesized(exprs.drain(..).map(convert_between_expr).collect())
ast::Expr::Parenthesized(ref mut exprs) => {
for subexpr in exprs.iter_mut() {
rewrite_expr(subexpr)?;
}
let exprs = std::mem::take(exprs);
*expr = ast::Expr::Parenthesized(exprs);
Ok(())
}
// Process other expressions recursively
ast::Expr::Binary(lhs, op, rhs) => ast::Expr::Binary(
Box::new(convert_between_expr(*lhs)),
op,
Box::new(convert_between_expr(*rhs)),
),
ast::Expr::FunctionCall {
name,
distinctness,
args,
order_by,
filter_over,
} => ast::Expr::FunctionCall {
name,
distinctness,
args: args.map(|args| args.into_iter().map(convert_between_expr).collect()),
order_by,
filter_over,
},
_ => expr,
ast::Expr::Binary(lhs, _, rhs) => {
rewrite_expr(lhs)?;
rewrite_expr(rhs)?;
Ok(())
}
ast::Expr::FunctionCall { args, .. } => {
if let Some(args) = args {
for arg in args.iter_mut() {
rewrite_expr(arg)?;
}
}
Ok(())
}
_ => Ok(()),
}
}

View File

@@ -110,15 +110,12 @@ pub fn emit_order_by(
num_fields: num_columns_in_sorter,
});
program.emit_insn_with_label_dependency(
Insn::SorterSort {
cursor_id: sort_cursor,
pc_if_empty: sort_loop_end_label,
},
sort_loop_end_label,
);
program.emit_insn(Insn::SorterSort {
cursor_id: sort_cursor,
pc_if_empty: sort_loop_end_label,
});
program.defer_label_resolution(sort_loop_start_label, program.offset() as usize);
program.resolve_label(sort_loop_start_label, program.offset());
program.emit_insn(Insn::SorterData {
cursor_id: sort_cursor,
dest_reg: reg_sorter_data,
@@ -140,13 +137,10 @@ pub fn emit_order_by(
emit_result_row_and_limit(program, t_ctx, plan, start_reg, Some(sort_loop_end_label))?;
program.emit_insn_with_label_dependency(
Insn::SorterNext {
cursor_id: sort_cursor,
pc_if_next: sort_loop_start_label,
},
sort_loop_start_label,
);
program.emit_insn(Insn::SorterNext {
cursor_id: sort_cursor,
pc_if_next: sort_loop_start_label,
});
program.resolve_label(sort_loop_end_label, program.offset());

View File

@@ -113,18 +113,18 @@ pub fn bind_column_references(
crate::bail_parse_error!("Column {} is ambiguous", id.0);
}
let col = table.columns().get(col_idx.unwrap()).unwrap();
match_result = Some((tbl_idx, col_idx.unwrap(), col.primary_key));
match_result = Some((tbl_idx, col_idx.unwrap(), col.is_rowid_alias));
}
}
if match_result.is_none() {
crate::bail_parse_error!("Column {} not found", id.0);
}
let (tbl_idx, col_idx, is_primary_key) = match_result.unwrap();
let (tbl_idx, col_idx, is_rowid_alias) = match_result.unwrap();
*expr = ast::Expr::Column {
database: None, // TODO: support different databases
table: tbl_idx,
column: col_idx,
is_rowid_alias: is_primary_key,
is_rowid_alias,
};
Ok(())
}
@@ -294,7 +294,7 @@ fn parse_from_clause_table(
};
subplan.query_type = SelectQueryType::Subquery {
yield_reg: usize::MAX, // will be set later in bytecode emission
coroutine_implementation_start: BranchOffset::MAX, // will be set later in bytecode emission
coroutine_implementation_start: BranchOffset::Placeholder, // will be set later in bytecode emission
};
let identifier = maybe_alias
.map(|a| match a {
@@ -544,6 +544,14 @@ fn parse_join(
pub fn parse_limit(limit: Limit) -> Option<usize> {
if let Expr::Literal(ast::Literal::Numeric(n)) = limit.expr {
n.parse().ok()
} else if let Expr::Id(id) = limit.expr {
if id.0.eq_ignore_ascii_case("true") {
Some(1)
} else if id.0.eq_ignore_ascii_case("false") {
Some(0)
} else {
None
}
} else {
None
}

View File

@@ -54,7 +54,7 @@ pub fn emit_result_row_and_limit(
SelectQueryType::Subquery { yield_reg, .. } => {
program.emit_insn(Insn::Yield {
yield_reg: *yield_reg,
end_offset: 0,
end_offset: BranchOffset::Offset(0),
});
}
}
@@ -71,13 +71,10 @@ pub fn emit_result_row_and_limit(
dest: t_ctx.reg_limit.unwrap(),
});
program.mark_last_insn_constant();
program.emit_insn_with_label_dependency(
Insn::DecrJumpZero {
reg: t_ctx.reg_limit.unwrap(),
target_pc: label_on_limit_reached.unwrap(),
},
label_on_limit_reached.unwrap(),
);
program.emit_insn(Insn::DecrJumpZero {
reg: t_ctx.reg_limit.unwrap(),
target_pc: label_on_limit_reached.unwrap(),
});
}
Ok(())
}

View File

@@ -72,7 +72,7 @@ pub fn emit_subquery<'a>(
t_ctx: &mut TranslateCtx<'a>,
) -> Result<usize> {
let yield_reg = program.alloc_register();
let coroutine_implementation_start_offset = program.offset() + 1;
let coroutine_implementation_start_offset = program.offset().add(1u32);
match &mut plan.query_type {
SelectQueryType::Subquery {
yield_reg: y,
@@ -100,14 +100,11 @@ pub fn emit_subquery<'a>(
resolver: Resolver::new(t_ctx.resolver.symbol_table),
};
let subquery_body_end_label = program.allocate_label();
program.emit_insn_with_label_dependency(
Insn::InitCoroutine {
yield_reg,
jump_on_definition: subquery_body_end_label,
start_offset: coroutine_implementation_start_offset,
},
subquery_body_end_label,
);
program.emit_insn(Insn::InitCoroutine {
yield_reg,
jump_on_definition: subquery_body_end_label,
start_offset: coroutine_implementation_start_offset,
});
// Normally we mark each LIMIT value as a constant insn that is emitted only once, but in the case of a subquery,
// we need to initialize it every time the subquery is run; otherwise subsequent runs of the subquery will already
// have the LIMIT counter at 0, and will never return rows.

View File

@@ -11,23 +11,20 @@ use super::{BranchOffset, CursorID, Insn, InsnReference, Program, Table};
#[allow(dead_code)]
pub struct ProgramBuilder {
next_free_register: usize,
next_free_label: BranchOffset,
next_free_label: i32,
next_free_cursor_id: usize,
insns: Vec<Insn>,
// for temporarily storing instructions that will be put after Transaction opcode
constant_insns: Vec<Insn>,
// Each label has a list of InsnReferences that must
// be resolved. Lists are indexed by: label.abs() - 1
unresolved_labels: Vec<Vec<InsnReference>>,
next_insn_label: Option<BranchOffset>,
// Cursors that are referenced by the program. Indexed by CursorID.
pub cursor_ref: Vec<(Option<String>, Option<Table>)>,
// List of deferred label resolutions. Each entry is a pair of (label, insn_reference).
deferred_label_resolutions: Vec<(BranchOffset, InsnReference)>,
// Hashmap of label to insn reference. Resolved in build().
label_to_resolved_offset: HashMap<i32, u32>,
// Bitmask of cursors that have emitted a SeekRowid instruction.
seekrowid_emitted_bitmask: u64,
// map of instruction index to manual comment (used in EXPLAIN)
comments: HashMap<BranchOffset, &'static str>,
comments: HashMap<InsnReference, &'static str>,
}
impl ProgramBuilder {
@@ -37,11 +34,10 @@ impl ProgramBuilder {
next_free_label: 0,
next_free_cursor_id: 0,
insns: Vec::new(),
unresolved_labels: Vec::new(),
next_insn_label: None,
cursor_ref: Vec::new(),
constant_insns: Vec::new(),
deferred_label_resolutions: Vec::new(),
label_to_resolved_offset: HashMap::new(),
seekrowid_emitted_bitmask: 0,
comments: HashMap::new(),
}
@@ -71,20 +67,17 @@ impl ProgramBuilder {
cursor
}
fn _emit_insn(&mut self, insn: Insn) {
pub fn emit_insn(&mut self, insn: Insn) {
if let Some(label) = self.next_insn_label {
self.label_to_resolved_offset
.insert(label.to_label_value(), self.insns.len() as InsnReference);
self.next_insn_label = None;
}
self.insns.push(insn);
}
pub fn emit_insn(&mut self, insn: Insn) {
self._emit_insn(insn);
if let Some(label) = self.next_insn_label {
self.next_insn_label = None;
self.resolve_label(label, (self.insns.len() - 1) as BranchOffset);
}
}
pub fn add_comment(&mut self, insn_index: BranchOffset, comment: &'static str) {
self.comments.insert(insn_index, comment);
self.comments.insert(insn_index.to_offset_int(), comment);
}
// Emit an instruction that will be put at the end of the program (after Transaction statement).
@@ -99,19 +92,13 @@ impl ProgramBuilder {
self.insns.append(&mut self.constant_insns);
}
pub fn emit_insn_with_label_dependency(&mut self, insn: Insn, label: BranchOffset) {
self._emit_insn(insn);
self.add_label_dependency(label, (self.insns.len() - 1) as BranchOffset);
}
pub fn offset(&self) -> BranchOffset {
self.insns.len() as BranchOffset
BranchOffset::Offset(self.insns.len() as InsnReference)
}
pub fn allocate_label(&mut self) -> BranchOffset {
self.next_free_label -= 1;
self.unresolved_labels.push(Vec::new());
self.next_free_label
BranchOffset::Label(self.next_free_label)
}
// Effectively a GOTO <next insn> without the need to emit an explicit GOTO instruction.
@@ -121,232 +108,187 @@ impl ProgramBuilder {
self.next_insn_label = Some(label);
}
fn label_to_index(&self, label: BranchOffset) -> usize {
(label.abs() - 1) as usize
}
pub fn add_label_dependency(&mut self, label: BranchOffset, insn_reference: BranchOffset) {
assert!(insn_reference >= 0);
assert!(label < 0);
let label_index = self.label_to_index(label);
assert!(label_index < self.unresolved_labels.len());
let insn_reference = insn_reference as InsnReference;
let label_references = &mut self.unresolved_labels[label_index];
label_references.push(insn_reference);
}
pub fn defer_label_resolution(&mut self, label: BranchOffset, insn_reference: InsnReference) {
self.deferred_label_resolutions
.push((label, insn_reference));
pub fn resolve_label(&mut self, label: BranchOffset, to_offset: BranchOffset) {
assert!(matches!(label, BranchOffset::Label(_)));
assert!(matches!(to_offset, BranchOffset::Offset(_)));
self.label_to_resolved_offset
.insert(label.to_label_value(), to_offset.to_offset_int());
}
/// Resolve unresolved labels to a specific offset in the instruction list.
///
/// This function updates all instructions that reference the given label
/// to point to the specified offset. It ensures that the label and offset
/// are valid and updates the target program counter (PC) of each instruction
/// that references the label.
///
/// # Arguments
///
/// * `label` - The label to resolve.
/// * `to_offset` - The offset to which the labeled instructions should be resolved to.
pub fn resolve_label(&mut self, label: BranchOffset, to_offset: BranchOffset) {
assert!(label < 0);
assert!(to_offset >= 0);
let label_index = self.label_to_index(label);
assert!(
label_index < self.unresolved_labels.len(),
"Forbidden resolve of an unexistent label!"
);
let label_references = &mut self.unresolved_labels[label_index];
for insn_reference in label_references.iter() {
let insn = &mut self.insns[*insn_reference];
/// This function scans all instructions and resolves any labels to their corresponding offsets.
/// It ensures that all labels are resolved correctly and updates the target program counter (PC)
/// of each instruction that references a label.
pub fn resolve_labels(&mut self) {
let resolve = |pc: &mut BranchOffset, insn_name: &str| {
if let BranchOffset::Label(label) = pc {
let to_offset = *self.label_to_resolved_offset.get(label).unwrap_or_else(|| {
panic!("Reference to undefined label in {}: {}", insn_name, label)
});
*pc = BranchOffset::Offset(to_offset);
}
};
for insn in self.insns.iter_mut() {
match insn {
Insn::Init { target_pc } => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "Init");
}
Insn::Eq {
lhs: _lhs,
rhs: _rhs,
target_pc,
} => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "Eq");
}
Insn::Ne {
lhs: _lhs,
rhs: _rhs,
target_pc,
} => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "Ne");
}
Insn::Lt {
lhs: _lhs,
rhs: _rhs,
target_pc,
} => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "Lt");
}
Insn::Le {
lhs: _lhs,
rhs: _rhs,
target_pc,
} => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "Le");
}
Insn::Gt {
lhs: _lhs,
rhs: _rhs,
target_pc,
} => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "Gt");
}
Insn::Ge {
lhs: _lhs,
rhs: _rhs,
target_pc,
} => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "Ge");
}
Insn::If {
reg: _reg,
target_pc,
null_reg: _,
} => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "If");
}
Insn::IfNot {
reg: _reg,
target_pc,
null_reg: _,
} => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "IfNot");
}
Insn::RewindAwait {
cursor_id: _cursor_id,
pc_if_empty,
} => {
assert!(*pc_if_empty < 0);
*pc_if_empty = to_offset;
resolve(pc_if_empty, "RewindAwait");
}
Insn::LastAwait {
cursor_id: _cursor_id,
pc_if_empty,
} => {
assert!(*pc_if_empty < 0);
*pc_if_empty = to_offset;
resolve(pc_if_empty, "LastAwait");
}
Insn::Goto { target_pc } => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "Goto");
}
Insn::DecrJumpZero {
reg: _reg,
target_pc,
} => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "DecrJumpZero");
}
Insn::SorterNext {
cursor_id: _cursor_id,
pc_if_next,
} => {
assert!(*pc_if_next < 0);
*pc_if_next = to_offset;
resolve(pc_if_next, "SorterNext");
}
Insn::SorterSort { pc_if_empty, .. } => {
assert!(*pc_if_empty < 0);
*pc_if_empty = to_offset;
resolve(pc_if_empty, "SorterSort");
}
Insn::NotNull {
reg: _reg,
target_pc,
} => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "NotNull");
}
Insn::IfPos { target_pc, .. } => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "IfPos");
}
Insn::NextAwait { pc_if_next, .. } => {
assert!(*pc_if_next < 0);
*pc_if_next = to_offset;
resolve(pc_if_next, "NextAwait");
}
Insn::PrevAwait { pc_if_next, .. } => {
assert!(*pc_if_next < 0);
*pc_if_next = to_offset;
resolve(pc_if_next, "PrevAwait");
}
Insn::InitCoroutine {
yield_reg: _,
jump_on_definition,
start_offset: _,
} => {
*jump_on_definition = to_offset;
resolve(jump_on_definition, "InitCoroutine");
}
Insn::NotExists {
cursor: _,
rowid_reg: _,
target_pc,
} => {
*target_pc = to_offset;
resolve(target_pc, "NotExists");
}
Insn::Yield {
yield_reg: _,
end_offset,
} => {
*end_offset = to_offset;
resolve(end_offset, "Yield");
}
Insn::SeekRowid { target_pc, .. } => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "SeekRowid");
}
Insn::Gosub { target_pc, .. } => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "Gosub");
}
Insn::Jump { target_pc_eq, .. } => {
// FIXME: this current implementation doesnt scale for insns that
// have potentially multiple label dependencies.
assert!(*target_pc_eq < 0);
*target_pc_eq = to_offset;
Insn::Jump {
target_pc_eq,
target_pc_lt,
target_pc_gt,
} => {
resolve(target_pc_eq, "Jump");
resolve(target_pc_lt, "Jump");
resolve(target_pc_gt, "Jump");
}
Insn::SeekGE { target_pc, .. } => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "SeekGE");
}
Insn::SeekGT { target_pc, .. } => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "SeekGT");
}
Insn::IdxGE { target_pc, .. } => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "IdxGE");
}
Insn::IdxGT { target_pc, .. } => {
assert!(*target_pc < 0);
*target_pc = to_offset;
resolve(target_pc, "IdxGT");
}
Insn::IsNull { src: _, target_pc } => {
assert!(*target_pc < 0);
*target_pc = to_offset;
}
_ => {
todo!("missing resolve_label for {:?}", insn);
resolve(target_pc, "IsNull");
}
_ => continue,
}
}
label_references.clear();
self.label_to_resolved_offset.clear();
}
// translate table to cursor id
@@ -361,23 +303,12 @@ impl ProgramBuilder {
.unwrap()
}
pub fn resolve_deferred_labels(&mut self) {
for i in 0..self.deferred_label_resolutions.len() {
let (label, insn_reference) = self.deferred_label_resolutions[i];
self.resolve_label(label, insn_reference as BranchOffset);
}
self.deferred_label_resolutions.clear();
}
pub fn build(
self,
mut self,
database_header: Rc<RefCell<DatabaseHeader>>,
connection: Weak<Connection>,
) -> Program {
assert!(
self.deferred_label_resolutions.is_empty(),
"deferred_label_resolutions is not empty when build() is called, did you forget to call resolve_deferred_labels()?"
);
self.resolve_labels();
assert!(
self.constant_insns.is_empty(),
"constant_insns is not empty when build() is called, did you forget to call emit_constant_insns()?"

View File

@@ -1432,10 +1432,17 @@ mod tests {
#[test]
fn test_subsec_modifier() {
let now = Utc::now().naive_utc();
let expected = now.format("%H:%M:%S%.3f").to_string();
let now = Utc::now().naive_utc().time();
let result = exec_datetime(&[text("now"), text("subsec")], DateTimeOutput::Time);
assert_eq!(result, text(&expected));
let tolerance = TimeDelta::milliseconds(1);
let result =
chrono::NaiveTime::parse_from_str(&result.to_string(), "%H:%M:%S%.3f").unwrap();
assert!(
(now - result).num_milliseconds().abs() <= tolerance.num_milliseconds(),
"Expected: {}, Actual: {}",
now,
result
);
}
#[test]
@@ -1505,11 +1512,10 @@ mod tests {
#[test]
fn test_combined_modifiers() {
let now = Utc::now().naive_utc();
let dt = now - TimeDelta::days(1)
let expected = now - TimeDelta::days(1)
+ TimeDelta::hours(5)
+ TimeDelta::minutes(30)
+ TimeDelta::seconds(15);
let expected = dt.format("%Y-%m-%d %H:%M:%S%.3f").to_string();
let result = exec_datetime(
&[
text("now"),
@@ -1521,7 +1527,16 @@ mod tests {
],
DateTimeOutput::DateTime,
);
assert_eq!(result, text(&expected));
let tolerance = TimeDelta::milliseconds(1);
let result =
chrono::NaiveDateTime::parse_from_str(&result.to_string(), "%Y-%m-%d %H:%M:%S%.3f")
.unwrap();
assert!(
(result - expected).num_milliseconds().abs() <= tolerance.num_milliseconds(),
"Expected: {}, Actual: {}",
expected,
result
);
}
#[test]

View File

@@ -13,11 +13,11 @@ pub fn insn_to_str(
Insn::Init { target_pc } => (
"Init",
0,
*target_pc as i32,
target_pc.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("Start at {}", target_pc),
format!("Start at {}", target_pc.to_debug_int()),
),
Insn::Add { lhs, rhs, dest } => (
"Add",
@@ -114,11 +114,11 @@ pub fn insn_to_str(
Insn::NotNull { reg, target_pc } => (
"NotNull",
*reg as i32,
*target_pc as i32,
target_pc.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("r[{}]!=NULL -> goto {}", reg, target_pc),
format!("r[{}]!=NULL -> goto {}", reg, target_pc.to_debug_int()),
),
Insn::Compare {
start_reg_a,
@@ -145,9 +145,9 @@ pub fn insn_to_str(
target_pc_gt,
} => (
"Jump",
*target_pc_lt as i32,
*target_pc_eq as i32,
*target_pc_gt as i32,
target_pc_lt.to_debug_int(),
target_pc_eq.to_debug_int(),
target_pc_gt.to_debug_int(),
OwnedValue::build_text(Rc::new("".to_string())),
0,
"".to_string(),
@@ -178,13 +178,16 @@ pub fn insn_to_str(
} => (
"IfPos",
*reg as i32,
*target_pc as i32,
target_pc.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!(
"r[{}]>0 -> r[{}]-={}, goto {}",
reg, reg, decrement_by, target_pc
reg,
reg,
decrement_by,
target_pc.to_debug_int()
),
),
Insn::Eq {
@@ -195,10 +198,15 @@ pub fn insn_to_str(
"Eq",
*lhs as i32,
*rhs as i32,
*target_pc as i32,
target_pc.to_debug_int(),
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("if r[{}]==r[{}] goto {}", lhs, rhs, target_pc),
format!(
"if r[{}]==r[{}] goto {}",
lhs,
rhs,
target_pc.to_debug_int()
),
),
Insn::Ne {
lhs,
@@ -208,10 +216,15 @@ pub fn insn_to_str(
"Ne",
*lhs as i32,
*rhs as i32,
*target_pc as i32,
target_pc.to_debug_int(),
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("if r[{}]!=r[{}] goto {}", lhs, rhs, target_pc),
format!(
"if r[{}]!=r[{}] goto {}",
lhs,
rhs,
target_pc.to_debug_int()
),
),
Insn::Lt {
lhs,
@@ -221,10 +234,10 @@ pub fn insn_to_str(
"Lt",
*lhs as i32,
*rhs as i32,
*target_pc as i32,
target_pc.to_debug_int(),
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("if r[{}]<r[{}] goto {}", lhs, rhs, target_pc),
format!("if r[{}]<r[{}] goto {}", lhs, rhs, target_pc.to_debug_int()),
),
Insn::Le {
lhs,
@@ -234,10 +247,15 @@ pub fn insn_to_str(
"Le",
*lhs as i32,
*rhs as i32,
*target_pc as i32,
target_pc.to_debug_int(),
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("if r[{}]<=r[{}] goto {}", lhs, rhs, target_pc),
format!(
"if r[{}]<=r[{}] goto {}",
lhs,
rhs,
target_pc.to_debug_int()
),
),
Insn::Gt {
lhs,
@@ -247,10 +265,10 @@ pub fn insn_to_str(
"Gt",
*lhs as i32,
*rhs as i32,
*target_pc as i32,
target_pc.to_debug_int(),
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("if r[{}]>r[{}] goto {}", lhs, rhs, target_pc),
format!("if r[{}]>r[{}] goto {}", lhs, rhs, target_pc.to_debug_int()),
),
Insn::Ge {
lhs,
@@ -260,10 +278,15 @@ pub fn insn_to_str(
"Ge",
*lhs as i32,
*rhs as i32,
*target_pc as i32,
target_pc.to_debug_int(),
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("if r[{}]>=r[{}] goto {}", lhs, rhs, target_pc),
format!(
"if r[{}]>=r[{}] goto {}",
lhs,
rhs,
target_pc.to_debug_int()
),
),
Insn::If {
reg,
@@ -272,11 +295,11 @@ pub fn insn_to_str(
} => (
"If",
*reg as i32,
*target_pc as i32,
target_pc.to_debug_int(),
*null_reg as i32,
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("if r[{}] goto {}", reg, target_pc),
format!("if r[{}] goto {}", reg, target_pc.to_debug_int()),
),
Insn::IfNot {
reg,
@@ -285,11 +308,11 @@ pub fn insn_to_str(
} => (
"IfNot",
*reg as i32,
*target_pc as i32,
target_pc.to_debug_int(),
*null_reg as i32,
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("if !r[{}] goto {}", reg, target_pc),
format!("if !r[{}] goto {}", reg, target_pc.to_debug_int()),
),
Insn::OpenReadAsync {
cursor_id,
@@ -347,7 +370,7 @@ pub fn insn_to_str(
} => (
"RewindAwait",
*cursor_id as i32,
*pc_if_empty as i32,
pc_if_empty.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -431,7 +454,7 @@ pub fn insn_to_str(
} => (
"NextAwait",
*cursor_id as i32,
*pc_if_next as i32,
pc_if_next.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -461,7 +484,7 @@ pub fn insn_to_str(
Insn::Goto { target_pc } => (
"Goto",
0,
*target_pc as i32,
target_pc.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -473,7 +496,7 @@ pub fn insn_to_str(
} => (
"Gosub",
*return_reg as i32,
*target_pc as i32,
target_pc.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -562,7 +585,7 @@ pub fn insn_to_str(
"SeekRowid",
*cursor_id as i32,
*src_reg as i32,
*target_pc as i32,
target_pc.to_debug_int(),
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!(
@@ -572,7 +595,7 @@ pub fn insn_to_str(
.0
.as_ref()
.unwrap_or(&format!("cursor {}", cursor_id)),
target_pc
target_pc.to_debug_int()
),
),
Insn::DeferredSeek {
@@ -596,7 +619,7 @@ pub fn insn_to_str(
} => (
"SeekGT",
*cursor_id as i32,
*target_pc as i32,
target_pc.to_debug_int(),
*start_reg as i32,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -611,7 +634,7 @@ pub fn insn_to_str(
} => (
"SeekGE",
*cursor_id as i32,
*target_pc as i32,
target_pc.to_debug_int(),
*start_reg as i32,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -625,7 +648,7 @@ pub fn insn_to_str(
} => (
"IdxGT",
*cursor_id as i32,
*target_pc as i32,
target_pc.to_debug_int(),
*start_reg as i32,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -639,7 +662,7 @@ pub fn insn_to_str(
} => (
"IdxGE",
*cursor_id as i32,
*target_pc as i32,
target_pc.to_debug_int(),
*start_reg as i32,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -648,11 +671,11 @@ pub fn insn_to_str(
Insn::DecrJumpZero { reg, target_pc } => (
"DecrJumpZero",
*reg as i32,
*target_pc as i32,
target_pc.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("if (--r[{}]==0) goto {}", reg, target_pc),
format!("if (--r[{}]==0) goto {}", reg, target_pc.to_debug_int()),
),
Insn::AggStep {
func,
@@ -742,7 +765,7 @@ pub fn insn_to_str(
} => (
"SorterSort",
*cursor_id as i32,
*pc_if_empty as i32,
pc_if_empty.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -754,7 +777,7 @@ pub fn insn_to_str(
} => (
"SorterNext",
*cursor_id as i32,
*pc_if_next as i32,
pc_if_next.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -792,8 +815,8 @@ pub fn insn_to_str(
} => (
"InitCoroutine",
*yield_reg as i32,
*jump_on_definition as i32,
*start_offset as i32,
jump_on_definition.to_debug_int(),
start_offset.to_debug_int(),
OwnedValue::build_text(Rc::new("".to_string())),
0,
"".to_string(),
@@ -813,7 +836,7 @@ pub fn insn_to_str(
} => (
"Yield",
*yield_reg as i32,
*end_offset as i32,
end_offset.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -898,7 +921,7 @@ pub fn insn_to_str(
} => (
"NotExists",
*cursor as i32,
*target_pc as i32,
target_pc.to_debug_int(),
*rowid_reg as i32,
OwnedValue::build_text(Rc::new("".to_string())),
0,
@@ -968,11 +991,11 @@ pub fn insn_to_str(
Insn::IsNull { src, target_pc } => (
"IsNull",
*src as i32,
*target_pc as i32,
target_pc.to_debug_int(),
0,
OwnedValue::build_text(Rc::new("".to_string())),
0,
format!("if (r[{}]==NULL) goto {}", src, target_pc),
format!("if (r[{}]==NULL) goto {}", src, target_pc.to_debug_int()),
),
Insn::ParseSchema { db, where_clause } => (
"ParseSchema",

View File

@@ -41,7 +41,7 @@ use crate::vdbe::insn::Insn;
#[cfg(feature = "json")]
use crate::{
function::JsonFunc, json::get_json, json::json_array, json::json_array_length,
json::json_extract,
json::json_extract, json::json_type,
};
use crate::{Connection, Result, Rows, TransactionState, DATABASE_VERSION};
use datetime::{exec_date, exec_datetime_full, exec_julianday, exec_time, exec_unixepoch};
@@ -58,13 +58,75 @@ use std::cell::RefCell;
use std::collections::{BTreeMap, HashMap};
use std::rc::{Rc, Weak};
pub type BranchOffset = i64;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
/// Represents a target for a jump instruction.
/// Stores 32-bit ints to keep the enum word-sized.
pub enum BranchOffset {
/// A label is a named location in the program.
/// If there are references to it, it must always be resolved to an Offset
/// via program.resolve_label().
Label(i32),
/// An offset is a direct index into the instruction list.
Offset(InsnReference),
/// A placeholder is a temporary value to satisfy the compiler.
/// It must be set later.
Placeholder,
}
impl BranchOffset {
/// Returns true if the branch offset is a label.
pub fn is_label(&self) -> bool {
matches!(self, BranchOffset::Label(_))
}
/// Returns true if the branch offset is an offset.
pub fn is_offset(&self) -> bool {
matches!(self, BranchOffset::Offset(_))
}
/// Returns the offset value. Panics if the branch offset is a label or placeholder.
pub fn to_offset_int(&self) -> InsnReference {
match self {
BranchOffset::Label(v) => unreachable!("Unresolved label: {}", v),
BranchOffset::Offset(v) => *v,
BranchOffset::Placeholder => unreachable!("Unresolved placeholder"),
}
}
/// Returns the label value. Panics if the branch offset is an offset or placeholder.
pub fn to_label_value(&self) -> i32 {
match self {
BranchOffset::Label(v) => *v,
BranchOffset::Offset(_) => unreachable!("Offset cannot be converted to label value"),
BranchOffset::Placeholder => unreachable!("Unresolved placeholder"),
}
}
/// Returns the branch offset as a signed integer.
/// Used in explain output, where we don't want to panic in case we have an unresolved
/// label or placeholder.
pub fn to_debug_int(&self) -> i32 {
match self {
BranchOffset::Label(v) => *v,
BranchOffset::Offset(v) => *v as i32,
BranchOffset::Placeholder => i32::MAX,
}
}
/// Adds an integer value to the branch offset.
/// Returns a new branch offset.
/// Panics if the branch offset is a label or placeholder.
pub fn add<N: Into<u32>>(self, n: N) -> BranchOffset {
BranchOffset::Offset(self.to_offset_int() + n.into())
}
}
pub type CursorID = usize;
pub type PageIdx = usize;
// Index of insn in list of insns
type InsnReference = usize;
type InsnReference = u32;
pub enum StepResult<'a> {
Done,
@@ -101,7 +163,7 @@ impl RegexCache {
/// The program state describes the environment in which the program executes.
pub struct ProgramState {
pub pc: BranchOffset,
pub pc: InsnReference,
cursors: RefCell<BTreeMap<CursorID, Box<dyn Cursor>>>,
registers: Vec<OwnedValue>,
last_compare: Option<std::cmp::Ordering>,
@@ -151,7 +213,7 @@ pub struct Program {
pub insns: Vec<Insn>,
pub cursor_ref: Vec<(Option<String>, Option<Table>)>,
pub database_header: Rc<RefCell<DatabaseHeader>>,
pub comments: HashMap<BranchOffset, &'static str>,
pub comments: HashMap<InsnReference, &'static str>,
pub connection: Weak<Connection>,
pub auto_commit: bool,
}
@@ -189,8 +251,8 @@ impl Program {
let mut cursors = state.cursors.borrow_mut();
match insn {
Insn::Init { target_pc } => {
assert!(*target_pc >= 0);
state.pc = *target_pc;
assert!(target_pc.is_offset());
state.pc = target_pc.to_offset_int();
}
Insn::Add { lhs, rhs, dest } => {
state.registers[*dest] =
@@ -278,6 +340,9 @@ impl Program {
target_pc_eq,
target_pc_gt,
} => {
assert!(target_pc_lt.is_offset());
assert!(target_pc_eq.is_offset());
assert!(target_pc_gt.is_offset());
let cmp = state.last_compare.take();
if cmp.is_none() {
return Err(LimboError::InternalError(
@@ -289,8 +354,7 @@ impl Program {
std::cmp::Ordering::Equal => *target_pc_eq,
std::cmp::Ordering::Greater => *target_pc_gt,
};
assert!(target_pc >= 0);
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
}
Insn::Move {
source_reg,
@@ -313,12 +377,12 @@ impl Program {
target_pc,
decrement_by,
} => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
let reg = *reg;
let target_pc = *target_pc;
match &state.registers[reg] {
OwnedValue::Integer(n) if *n > 0 => {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
state.registers[reg] = OwnedValue::Integer(*n - *decrement_by as i64);
}
OwnedValue::Integer(_) => {
@@ -332,7 +396,7 @@ impl Program {
}
}
Insn::NotNull { reg, target_pc } => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
let reg = *reg;
let target_pc = *target_pc;
match &state.registers[reg] {
@@ -340,7 +404,7 @@ impl Program {
state.pc += 1;
}
_ => {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
}
}
}
@@ -350,17 +414,17 @@ impl Program {
rhs,
target_pc,
} => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
let lhs = *lhs;
let rhs = *rhs;
let target_pc = *target_pc;
match (&state.registers[lhs], &state.registers[rhs]) {
(_, OwnedValue::Null) | (OwnedValue::Null, _) => {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
}
_ => {
if state.registers[lhs] == state.registers[rhs] {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -372,17 +436,17 @@ impl Program {
rhs,
target_pc,
} => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
let lhs = *lhs;
let rhs = *rhs;
let target_pc = *target_pc;
match (&state.registers[lhs], &state.registers[rhs]) {
(_, OwnedValue::Null) | (OwnedValue::Null, _) => {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
}
_ => {
if state.registers[lhs] != state.registers[rhs] {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -394,17 +458,17 @@ impl Program {
rhs,
target_pc,
} => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
let lhs = *lhs;
let rhs = *rhs;
let target_pc = *target_pc;
match (&state.registers[lhs], &state.registers[rhs]) {
(_, OwnedValue::Null) | (OwnedValue::Null, _) => {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
}
_ => {
if state.registers[lhs] < state.registers[rhs] {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -416,17 +480,17 @@ impl Program {
rhs,
target_pc,
} => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
let lhs = *lhs;
let rhs = *rhs;
let target_pc = *target_pc;
match (&state.registers[lhs], &state.registers[rhs]) {
(_, OwnedValue::Null) | (OwnedValue::Null, _) => {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
}
_ => {
if state.registers[lhs] <= state.registers[rhs] {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -438,17 +502,17 @@ impl Program {
rhs,
target_pc,
} => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
let lhs = *lhs;
let rhs = *rhs;
let target_pc = *target_pc;
match (&state.registers[lhs], &state.registers[rhs]) {
(_, OwnedValue::Null) | (OwnedValue::Null, _) => {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
}
_ => {
if state.registers[lhs] > state.registers[rhs] {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -460,17 +524,17 @@ impl Program {
rhs,
target_pc,
} => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
let lhs = *lhs;
let rhs = *rhs;
let target_pc = *target_pc;
match (&state.registers[lhs], &state.registers[rhs]) {
(_, OwnedValue::Null) | (OwnedValue::Null, _) => {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
}
_ => {
if state.registers[lhs] >= state.registers[rhs] {
state.pc = target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -482,9 +546,9 @@ impl Program {
target_pc,
null_reg,
} => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
if exec_if(&state.registers[*reg], &state.registers[*null_reg], false) {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -494,9 +558,9 @@ impl Program {
target_pc,
null_reg,
} => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
if exec_if(&state.registers[*reg], &state.registers[*null_reg], true) {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -539,10 +603,11 @@ impl Program {
cursor_id,
pc_if_empty,
} => {
assert!(pc_if_empty.is_offset());
let cursor = cursors.get_mut(cursor_id).unwrap();
cursor.wait_for_completion()?;
if cursor.is_empty() {
state.pc = *pc_if_empty;
state.pc = pc_if_empty.to_offset_int();
} else {
state.pc += 1;
}
@@ -551,10 +616,11 @@ impl Program {
cursor_id,
pc_if_empty,
} => {
assert!(pc_if_empty.is_offset());
let cursor = cursors.get_mut(cursor_id).unwrap();
cursor.wait_for_completion()?;
if cursor.is_empty() {
state.pc = *pc_if_empty;
state.pc = pc_if_empty.to_offset_int();
} else {
state.pc += 1;
}
@@ -620,11 +686,11 @@ impl Program {
cursor_id,
pc_if_next,
} => {
assert!(*pc_if_next >= 0);
assert!(pc_if_next.is_offset());
let cursor = cursors.get_mut(cursor_id).unwrap();
cursor.wait_for_completion()?;
if !cursor.is_empty() {
state.pc = *pc_if_next;
state.pc = pc_if_next.to_offset_int();
} else {
state.pc += 1;
}
@@ -633,11 +699,11 @@ impl Program {
cursor_id,
pc_if_next,
} => {
assert!(*pc_if_next >= 0);
assert!(pc_if_next.is_offset());
let cursor = cursors.get_mut(cursor_id).unwrap();
cursor.wait_for_completion()?;
if !cursor.is_empty() {
state.pc = *pc_if_next;
state.pc = pc_if_next.to_offset_int();
} else {
state.pc += 1;
}
@@ -705,24 +771,22 @@ impl Program {
state.pc += 1;
}
Insn::Goto { target_pc } => {
assert!(*target_pc >= 0);
state.pc = *target_pc;
assert!(target_pc.is_offset());
state.pc = target_pc.to_offset_int();
}
Insn::Gosub {
target_pc,
return_reg,
} => {
assert!(*target_pc >= 0);
state.registers[*return_reg] = OwnedValue::Integer(state.pc + 1);
state.pc = *target_pc;
assert!(target_pc.is_offset());
state.registers[*return_reg] = OwnedValue::Integer((state.pc + 1) as i64);
state.pc = target_pc.to_offset_int();
}
Insn::Return { return_reg } => {
if let OwnedValue::Integer(pc) = state.registers[*return_reg] {
if pc < 0 {
return Err(LimboError::InternalError(
"Return register is negative".to_string(),
));
}
let pc: u32 = pc
.try_into()
.unwrap_or_else(|_| panic!("Return register is negative: {}", pc));
state.pc = pc;
} else {
return Err(LimboError::InternalError(
@@ -779,11 +843,12 @@ impl Program {
src_reg,
target_pc,
} => {
assert!(target_pc.is_offset());
let cursor = cursors.get_mut(cursor_id).unwrap();
let rowid = match &state.registers[*src_reg] {
OwnedValue::Integer(rowid) => *rowid as u64,
OwnedValue::Null => {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
continue;
}
other => {
@@ -794,7 +859,7 @@ impl Program {
};
let found = return_if_io!(cursor.seek(SeekKey::TableRowId(rowid), SeekOp::EQ));
if !found {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -813,6 +878,7 @@ impl Program {
target_pc,
is_index,
} => {
assert!(target_pc.is_offset());
if *is_index {
let cursor = cursors.get_mut(cursor_id).unwrap();
let record_from_regs: OwnedRecord =
@@ -821,7 +887,7 @@ impl Program {
cursor.seek(SeekKey::IndexKey(&record_from_regs), SeekOp::GE)
);
if !found {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -844,7 +910,7 @@ impl Program {
let found =
return_if_io!(cursor.seek(SeekKey::TableRowId(rowid), SeekOp::GE));
if !found {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -857,6 +923,7 @@ impl Program {
target_pc,
is_index,
} => {
assert!(target_pc.is_offset());
if *is_index {
let cursor = cursors.get_mut(cursor_id).unwrap();
let record_from_regs: OwnedRecord =
@@ -865,7 +932,7 @@ impl Program {
cursor.seek(SeekKey::IndexKey(&record_from_regs), SeekOp::GT)
);
if !found {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -888,7 +955,7 @@ impl Program {
let found =
return_if_io!(cursor.seek(SeekKey::TableRowId(rowid), SeekOp::GT));
if !found {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -900,7 +967,7 @@ impl Program {
num_regs,
target_pc,
} => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
let cursor = cursors.get_mut(cursor_id).unwrap();
let record_from_regs: OwnedRecord =
make_owned_record(&state.registers, start_reg, num_regs);
@@ -909,12 +976,12 @@ impl Program {
if idx_record.values[..idx_record.values.len() - 1]
>= *record_from_regs.values
{
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
} else {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
}
}
Insn::IdxGT {
@@ -923,6 +990,7 @@ impl Program {
num_regs,
target_pc,
} => {
assert!(target_pc.is_offset());
let cursor = cursors.get_mut(cursor_id).unwrap();
let record_from_regs: OwnedRecord =
make_owned_record(&state.registers, start_reg, num_regs);
@@ -931,21 +999,21 @@ impl Program {
if idx_record.values[..idx_record.values.len() - 1]
> *record_from_regs.values
{
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
} else {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
}
}
Insn::DecrJumpZero { reg, target_pc } => {
assert!(*target_pc >= 0);
assert!(target_pc.is_offset());
match state.registers[*reg] {
OwnedValue::Integer(n) => {
let n = n - 1;
if n == 0 {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.registers[*reg] = OwnedValue::Integer(n);
state.pc += 1;
@@ -1250,18 +1318,18 @@ impl Program {
cursor.rewind()?;
state.pc += 1;
} else {
state.pc = *pc_if_empty;
state.pc = pc_if_empty.to_offset_int();
}
}
Insn::SorterNext {
cursor_id,
pc_if_next,
} => {
assert!(*pc_if_next >= 0);
assert!(pc_if_next.is_offset());
let cursor = cursors.get_mut(cursor_id).unwrap();
return_if_io!(cursor.next());
if !cursor.is_empty() {
state.pc = *pc_if_next;
state.pc = pc_if_next.to_offset_int();
} else {
state.pc += 1;
}
@@ -1313,17 +1381,25 @@ impl Program {
}
}
#[cfg(feature = "json")]
crate::function::Func::Json(JsonFunc::JsonArrayLength) => {
crate::function::Func::Json(
func @ (JsonFunc::JsonArrayLength | JsonFunc::JsonType),
) => {
let json_value = &state.registers[*start_reg];
let path_value = if arg_count > 1 {
Some(&state.registers[*start_reg + 1])
} else {
None
};
let json_array_length = json_array_length(json_value, path_value);
let func_result = match func {
JsonFunc::JsonArrayLength => {
json_array_length(json_value, path_value)
}
JsonFunc::JsonType => json_type(json_value, path_value),
_ => unreachable!(),
};
match json_array_length {
Ok(length) => state.registers[*dest] = length,
match func_result {
Ok(result) => state.registers[*dest] = result,
Err(e) => return Err(e),
}
}
@@ -1726,18 +1802,23 @@ impl Program {
jump_on_definition,
start_offset,
} => {
assert!(*jump_on_definition >= 0);
state.registers[*yield_reg] = OwnedValue::Integer(*start_offset);
assert!(jump_on_definition.is_offset());
let start_offset = start_offset.to_offset_int();
state.registers[*yield_reg] = OwnedValue::Integer(start_offset as i64);
state.ended_coroutine.insert(*yield_reg, false);
state.pc = if *jump_on_definition == 0 {
let jump_on_definition = jump_on_definition.to_offset_int();
state.pc = if jump_on_definition == 0 {
state.pc + 1
} else {
*jump_on_definition
jump_on_definition
};
}
Insn::EndCoroutine { yield_reg } => {
if let OwnedValue::Integer(pc) = state.registers[*yield_reg] {
state.ended_coroutine.insert(*yield_reg, true);
let pc: u32 = pc
.try_into()
.unwrap_or_else(|_| panic!("EndCoroutine: pc overflow: {}", pc));
state.pc = pc - 1; // yield jump is always next to yield. Here we substract 1 to go back to yield instruction
} else {
unreachable!();
@@ -1753,12 +1834,15 @@ impl Program {
.get(yield_reg)
.expect("coroutine not initialized")
{
state.pc = *end_offset;
state.pc = end_offset.to_offset_int();
} else {
let pc: u32 = pc
.try_into()
.unwrap_or_else(|_| panic!("Yield: pc overflow: {}", pc));
// swap the program counter with the value in the yield register
// this is the mechanism that allows jumping back and forth between the coroutine and the caller
(state.pc, state.registers[*yield_reg]) =
(pc, OwnedValue::Integer(state.pc + 1));
(pc, OwnedValue::Integer((state.pc + 1) as i64));
}
} else {
unreachable!(
@@ -1842,7 +1926,7 @@ impl Program {
if exists {
state.pc += 1;
} else {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
}
}
// this cursor may be reused for next insert
@@ -1894,7 +1978,7 @@ impl Program {
}
Insn::IsNull { src, target_pc } => {
if matches!(state.registers[*src], OwnedValue::Null) {
state.pc = *target_pc;
state.pc = target_pc.to_offset_int();
} else {
state.pc += 1;
}
@@ -1976,7 +2060,7 @@ fn trace_insn(program: &Program, addr: InsnReference, insn: &Insn) {
addr,
insn,
String::new(),
program.comments.get(&(addr as BranchOffset)).copied()
program.comments.get(&(addr as u32)).copied()
)
);
}
@@ -1987,7 +2071,7 @@ fn print_insn(program: &Program, addr: InsnReference, insn: &Insn, indent: Strin
addr,
insn,
indent,
program.comments.get(&(addr as BranchOffset)).copied(),
program.comments.get(&(addr as u32)).copied(),
);
println!("{}", s);
}

View File

@@ -221,3 +221,51 @@ do_execsql_test json_array_length_via_bad_prop {
do_execsql_test json_array_length_nested {
SELECT json_array_length('{"one":[[1,2,3],2,3]}', '$.one[0]');
} {{3}}
do_execsql_test json_type_no_path {
select json_type('{"a":[2,3.5,true,false,null,"x"]}')
} {{object}}
do_execsql_test json_type_root_path {
select json_type('{"a":[2,3.5,true,false,null,"x"]}','$')
} {{object}}
do_execsql_test json_type_array {
select json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a')
} {{array}}
do_execsql_test json_type_integer {
select json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[0]')
} {{integer}}
do_execsql_test json_type_real {
select json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[1]')
} {{real}}
do_execsql_test json_type_true {
select json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[2]')
} {{true}}
do_execsql_test json_type_false {
select json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[3]')
} {{false}}
do_execsql_test json_type_null {
select json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[4]')
} {{null}}
do_execsql_test json_type_text {
select json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[5]')
} {{text}}
do_execsql_test json_type_NULL {
select json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[6]')
} {{}}
do_execsql_test json_type_cast {
select json_type(1)
} {{integer}}
do_execsql_test json_type_null_arg {
select json_type(null)
} {{}}

View File

@@ -11,6 +11,14 @@ do_execsql_test select-const-2 {
SELECT 2
} {2}
do_execsql_test select-true {
SELECT true
} {1}
do_execsql_test select-false {
SELECT false
} {0}
do_execsql_test select-text-escape-1 {
SELECT '''a'
} {'a}
@@ -31,6 +39,15 @@ do_execsql_test select-limit-0 {
SELECT id FROM users LIMIT 0;
} {}
# ORDER BY id here because sqlite uses age_idx here and we (yet) don't so force it to evaluate in ID order
do_execsql_test select-limit-true {
SELECT id FROM users ORDER BY id LIMIT true;
} {1}
do_execsql_test select-limit-false {
SELECT id FROM users ORDER BY id LIMIT false;
} {}
do_execsql_test realify {
select price from products limit 1;
} {79.0}

View File

@@ -338,3 +338,30 @@ do_execsql_test between-price-range-with-names {
AND (name = 'sweatshirt' OR name = 'sneakers');
} {5|sweatshirt|74.0
8|sneakers|82.0}
do_execsql_test where-between-true-and-2 {
select id from users where id between true and 2;
} {1
2}
do_execsql_test nested-parens-conditionals-or-and-or {
SELECT count(*) FROM users WHERE ((age > 25 OR age < 18) AND (city = 'Boston' OR state = 'MA'));
} {146}
do_execsql_test nested-parens-conditionals-and-or-and {
SELECT * FROM users WHERE (((age > 18 AND city = 'New Mario') OR age = 92) AND city = 'Lake Paul');
} {{9989|Timothy|Harrison|woodsmichael@example.net|+1-447-830-5123|782 Wright Harbors|Lake Paul|ID|52330|92}}
do_execsql_test nested-parens-conditionals-and-double-or {
SELECT * FROM users WHERE ((age > 30 OR age < 20) AND (state = 'NY' OR state = 'CA')) AND first_name glob 'An*' order by id;
} {{1738|Angelica|Pena|jacksonjonathan@example.net|(867)536-1578x039|663 Jacqueline Estate Apt. 652|Clairehaven|NY|64172|74
1811|Andrew|Mckee|jchen@example.net|359.939.9548|19809 Blair Junction Apt. 438|New Lawrencefort|NY|26240|42
3773|Andrew|Peterson|cscott@example.com|(405)410-4972x90408|90513 Munoz Radial Apt. 786|Travisfurt|CA|52951|43
3875|Anthony|Cordova|ocross@example.org|+1-356-999-4070x557|77081 Aguilar Turnpike|Michaelfurt|CA|73353|37
4909|Andrew|Carson|michelle31@example.net|823.423.1516|78514 Luke Springs|Lake Crystal|CA|49481|74
5498|Anna|Hall|elizabethheath@example.org|9778473725|5803 Taylor Tunnel|New Nicholaston|NY|21825|14
6340|Angela|Freeman|juankelly@example.net|501.372.4720|3912 Ricardo Mission|West Nancyville|NY|60823|34
8171|Andrea|Lee|dgarrison@example.com|001-594-430-0646|452 Anthony Stravenue|Sandraville|CA|28572|12
9110|Anthony|Barrett|steven05@example.net|(562)928-9177x8454|86166 Foster Inlet Apt. 284|North Jeffreyburgh|CA|80147|97
9279|Annette|Lynn|joanne37@example.com|(272)700-7181|2676 Laura Points Apt. 683|Tristanville|NY|48646|91}}