Merge branch 'main' into vtab_schema

This commit is contained in:
Preston Thorpe
2025-04-28 22:09:10 -04:00
committed by GitHub
218 changed files with 36922 additions and 13792 deletions

1
testing/README.md Normal file
View File

@@ -0,0 +1 @@
# Limbo Testing

View File

@@ -98,3 +98,32 @@ do_execsql_test select-agg-binary-unary-negative {
do_execsql_test select-agg-binary-unary-positive {
SELECT min(age) + +max(age) FROM users;
} {101}
do_execsql_test select-non-agg-cols-should-be-not-null {
SELECT id, first_name, sum(age) FROM users LIMIT 1;
} {1|Jamie|503960}
do_execsql_test select-with-group-by-and-agg-1 {
SELECT id, first_name, avg(age) FROM users group by last_name limit 1;
} {274|Debra|66.25}
do_execsql_test select-with-group-by-and-agg-2 {
select first_name, last_name from users where state = 'AL' group by last_name limit 10;
} {Jay|Acosta
Daniel|Adams
Aaron|Baker
Sharon|Becker
Kim|Berg
Donald|Bishop
Brian|Bradford
Jesus|Bradley
John|Brown
Hunter|Burke}
do_execsql_test select-agg-json-array {
SELECT json_group_array(name) FROM products;
} {["hat","cap","shirt","sweater","sweatshirt","shorts","jeans","sneakers","boots","coat","accessories"]}
do_execsql_test select-agg-json-array-object {
SELECT json_group_array(json_object('name', name)) FROM products;
} {[{"name":"hat"},{"name":"cap"},{"name":"shirt"},{"name":"sweater"},{"name":"sweatshirt"},{"name":"shorts"},{"name":"jeans"},{"name":"sneakers"},{"name":"boots"},{"name":"coat"},{"name":"accessories"}]}

View File

@@ -28,3 +28,5 @@ source $testdir/scalar-functions-printf.test
source $testdir/transactions.test
source $testdir/update.test
source $testdir/drop_table.test
source $testdir/default_value.test
source $testdir/boolean.test

56
testing/boolean.test Executable file
View File

@@ -0,0 +1,56 @@
#!/usr/bin/env tclsh
set testdir [file dirname $argv0]
source $testdir/tester.tcl
foreach {testname lhs ans} {
int-1 1 0
int-2 2 0
int-3 0 1
float-1 1.0 0
float-2 2.0 0
float-3 0.0 1
text 'a' 1
text-int-1 '0' 1
text-int-2 '1' 0
text-float-1 '1.0' 0
text-float-2 '0.0' 1
text-float-edge '12-23.0' 0
null NULL {}
empty-blob x'' 1
cast-blob "CAST ('af' AS BLOB)" 1
blob x'0000' 1
blob-2 x'0001' 1
} {
do_execsql_test boolean-not-$testname "SELECT not $lhs" $::ans
}
foreach {testname lhs rhs ans} {
blob-blob x'' x'' 0
1-blob 1 x'' 0
0-blob 0 x'' 0
0-1 0 1 0
1-1 1 1 1
int-int 20 1000 1
int-float 20 1.0 1
int-0.0 20 0.0 0
0.0-0.0 0.0 0.0 0
text 'a' 1 0
text-int-1 '0' 1 0
text-int-2 '1' 0 0
text-float-1 '1.0' 0 0
text-float-2 '0.0' 1 0
text-float-3 '1.0' 1 1
text-float-edge '12-23.0' 0 0
null-null NULL NULL ""
1-null 1 NULL ""
1.0-null 1.0 NULL ""
blob-null x'' NULL 0
blob2-null x'0001' NULL 0
0-null 0 NULL 0
0.0-null 0.0 NULL 0
'0.0'-null '0.0' NULL 0
} {
do_execsql_test boolean-and-$testname "SELECT $lhs AND $rhs" $::ans
}

View File

@@ -1,8 +1,9 @@
#!/usr/bin/env python3
from test_limbo_cli import TestLimboShell
from cli_tests.test_limbo_cli import TestLimboShell
from pathlib import Path
import time
import os
from cli_tests import console
def test_basic_queries():
@@ -242,8 +243,66 @@ def test_table_patterns():
shell.quit()
if __name__ == "__main__":
print("Running all Limbo CLI tests...")
def test_update_with_limit():
limbo = TestLimboShell(
"CREATE TABLE t (a,b,c); insert into t values (1,2,3), (4,5,6), (7,8,9), (1,2,3),(4,5,6), (7,8,9);"
)
limbo.run_test("update-limit", "UPDATE t SET a = 10 LIMIT 1;", "")
limbo.run_test("update-limit-result", "SELECT COUNT(*) from t WHERE a = 10;", "1")
limbo.run_test("update-limit-zero", "UPDATE t SET a = 100 LIMIT 0;", "")
limbo.run_test(
"update-limit-zero-result", "SELECT COUNT(*) from t WHERE a = 100;", "0"
)
limbo.run_test("update-limit-all", "UPDATE t SET a = 100 LIMIT -1;", "")
# negative limit is treated as no limit in sqlite due to check for --val = 0
limbo.run_test("update-limit-result", "SELECT COUNT(*) from t WHERE a = 100;", "6")
limbo.run_test(
"udpate-limit-where", "UPDATE t SET a = 333 WHERE b = 5 LIMIT 1;", ""
)
limbo.run_test(
"update-limit-where-result", "SELECT COUNT(*) from t WHERE a = 333;", "1"
)
limbo.quit()
def test_update_with_limit_and_offset():
limbo = TestLimboShell(
"CREATE TABLE t (a,b,c); insert into t values (1,2,3), (4,5,6), (7,8,9), (1,2,3),(4,5,6), (7,8,9);"
)
limbo.run_test("update-limit-offset", "UPDATE t SET a = 10 LIMIT 1 OFFSET 3;", "")
limbo.run_test(
"update-limit-offset-result", "SELECT COUNT(*) from t WHERE a = 10;", "1"
)
limbo.run_test("update-limit-result", "SELECT a from t LIMIT 4;", "1\n4\n7\n10")
limbo.run_test(
"update-limit-offset-zero", "UPDATE t SET a = 100 LIMIT 0 OFFSET 0;", ""
)
limbo.run_test(
"update-limit-zero-result", "SELECT COUNT(*) from t WHERE a = 100;", "0"
)
limbo.run_test("update-limit-all", "UPDATE t SET a = 100 LIMIT -1 OFFSET 1;", "")
limbo.run_test("update-limit-result", "SELECT COUNT(*) from t WHERE a = 100;", "5")
limbo.run_test(
"udpate-limit-where", "UPDATE t SET a = 333 WHERE b = 5 LIMIT 1 OFFSET 2;", ""
)
limbo.run_test(
"update-limit-where-result", "SELECT COUNT(*) from t WHERE a = 333;", "0"
)
limbo.quit()
def test_insert_default_values():
limbo = TestLimboShell(
"CREATE TABLE t (a integer default(42),b integer default (43),c integer default(44));"
)
for _ in range(1, 10):
limbo.execute_dot("INSERT INTO t DEFAULT VALUES;")
limbo.run_test("insert-default-values", "SELECT * FROM t;", "42|43|44\n" * 9)
limbo.quit()
def main():
console.info("Running all Limbo CLI tests...")
test_basic_queries()
test_schema_operations()
test_file_operations()
@@ -259,4 +318,10 @@ if __name__ == "__main__":
test_import_csv_verbose()
test_import_csv_skip()
test_table_patterns()
print("All tests have passed")
test_update_with_limit()
test_update_with_limit_and_offset()
console.info("All tests have passed")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,122 @@
from typing import Any, Optional, Union
from rich.console import Console, JustifyMethod
from rich.theme import Theme
from rich.style import Style
custom_theme = Theme(
{
"info": "bold blue",
"error": "bold red",
"debug": "bold blue",
"test": "bold green",
}
)
console = Console(theme=custom_theme, force_terminal=True)
def info(
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
justify: Optional[JustifyMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
log_locals: bool = False,
_stack_offset: int = 1,
):
console.log(
"[info]INFO[/info]",
*objects,
sep=sep,
end=end,
style=style,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
log_locals=log_locals,
_stack_offset=_stack_offset + 1,
)
def error(
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
justify: Optional[JustifyMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
log_locals: bool = False,
_stack_offset: int = 1,
):
console.log(
"[error]ERROR[/error]",
*objects,
sep=sep,
end=end,
style=style,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
log_locals=log_locals,
_stack_offset=_stack_offset + 1,
)
def debug(
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
justify: Optional[JustifyMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
log_locals: bool = False,
_stack_offset: int = 1,
):
console.log(
"[debug]DEBUG[/debug]",
*objects,
sep=sep,
end=end,
style=style,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
log_locals=log_locals,
_stack_offset=_stack_offset + 1,
)
def test(
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
justify: Optional[JustifyMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
log_locals: bool = False,
_stack_offset: int = 1,
):
console.log(
"[test]TEST[/test]",
*objects,
sep=sep,
end=end,
style=style,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
log_locals=log_locals,
_stack_offset=_stack_offset + 1,
)

View File

@@ -0,0 +1,371 @@
#!/usr/bin/env python3
# Eventually extract these tests to be in the fuzzing integration tests
import os
from faker import Faker
from faker.providers.lorem.en_US import Provider as P
from cli_tests.test_limbo_cli import TestLimboShell
from pydantic import BaseModel
from cli_tests import console
from enum import Enum
import random
import sqlite3
sqlite_flags = os.getenv("SQLITE_FLAGS", "-q").split(" ")
keywords = [
"ABORT",
"ACTION",
"ADD",
"AFTER",
"ALL",
"ALTER",
"ALWAYS",
"ANALYZE",
"AND",
"AS",
"ASC",
"ATTACH",
"AUTOINCREMENT",
"BEFORE",
"BEGIN",
"BETWEEN",
"BY",
"CASCADE",
"CASE",
"CAST",
"CHECK",
"COLLATE",
"COLUMN",
"COMMIT",
"CONFLICT",
"CONSTRAINT",
"CREATE",
"CROSS",
"CURRENT",
"CURRENT_DATE",
"CURRENT_TIME",
"CURRENT_TIMESTAMP",
"DATABASE",
"DEFAULT",
"DEFERRABLE",
"DEFERRED",
"DELETE",
"DESC",
"DETACH",
"DISTINCT",
"DO",
"DROP",
"EACH",
"ELSE",
"END",
"ESCAPE",
"EXCEPT",
"EXCLUDE",
"EXCLUSIVE",
"EXISTS",
"EXPLAIN",
"FAIL",
"FILTER",
"FIRST",
"FOLLOWING",
"FOR",
"FOREIGN",
"FROM",
"FULL",
"GENERATED",
"GLOB",
"GROUP",
"GROUPS",
"HAVING",
"IF",
"IGNORE",
"IMMEDIATE",
"IN",
"INDEX",
"INDEXED",
"INITIALLY",
"INNER",
"INSERT",
"INSTEAD",
"INTERSECT",
"INTO",
"IS",
"ISNULL",
"JOIN",
"KEY",
"LAST",
"LEFT",
"LIKE",
"LIMIT",
"MATCH",
"MATERIALIZED",
"NATURAL",
"NO",
"NOT",
"NOTHING",
"NOTNULL",
"NULL",
"NULLS",
"OF",
"OFFSET",
"ON",
"OR",
"ORDER",
"OTHERS",
"OUTER",
"OVER",
"PARTITION",
"PLAN",
"PRAGMA",
"PRECEDING",
"PRIMARY",
"QUERY",
"RAISE",
"RANGE",
"RECURSIVE",
"REFERENCES",
"REGEXP",
"REINDEX",
"RELEASE",
"RENAME",
"REPLACE",
"RESTRICT",
"RETURNING",
"RIGHT",
"ROLLBACK",
"ROW",
"ROWS",
"SAVEPOINT",
"SELECT",
"SET",
"TABLE",
"TEMP",
"TEMPORARY",
"THEN",
"TIES",
"TO",
"TRANSACTION",
"TRIGGER",
"UNBOUNDED",
"UNION",
"UNIQUE",
"UPDATE",
"USING",
"VACUUM",
"VALUES",
"VIEW",
"VIRTUAL",
"WHEN",
"WHERE",
"WINDOW",
"WITH",
"WITHOUT",
]
P.word_list = tuple(word for word in P.word_list if word.upper() not in keywords)
del P
fake: Faker = Faker(locale="en_US").unique
Faker.seed(0)
class ColumnType(Enum):
blob = "blob"
integer = "integer"
real = "real"
text = "text"
def generate(self, faker: Faker) -> str:
match self.value:
case "blob":
blob = sqlite3.Binary(faker.binary(length=4)).hex()
return f"x'{blob}'"
case "integer":
return str(faker.pyint())
case "real":
return str(faker.pyfloat())
case "text":
return f"'{faker.text(max_nb_chars=20)}'"
def __str__(self) -> str:
return self.value.upper()
class Column(BaseModel):
name: str
col_type: ColumnType
primary_key: bool
def generate(faker: Faker) -> "Column":
name = faker.word().replace(" ", "_")
return Column(
name=name,
col_type=Faker().enum(ColumnType),
primary_key=False,
)
def __str__(self) -> str:
return f"{self.name} {str(self.col_type)}"
class Table(BaseModel):
columns: list[Column]
name: str
def create_table(self) -> str:
accum = f"CREATE TABLE {self.name} "
col_strings = [str(col) for col in self.columns]
pk_columns = [col.name for col in self.columns if col.primary_key]
primary_key_stmt = "PRIMARY KEY (" + ", ".join(pk_columns) + ")"
col_strings.append(primary_key_stmt)
accum = accum + "(" + ", ".join(col_strings) + ");"
return accum
def generate_insert(self) -> str:
vals = [col.col_type.generate(fake) for col in self.columns]
vals = ", ".join(vals)
return f"INSERT INTO {self.name} VALUES ({vals});"
class ConstraintTest(BaseModel):
table: Table
db_path: str = "testing/constraint.db"
insert_stmts: list[str]
insert_errors: list[str]
def run(
self,
limbo: TestLimboShell,
):
big_stmt = [self.table.create_table()]
for insert_stmt in self.insert_stmts:
big_stmt.append(insert_stmt)
limbo.run_test("Inserting values into table", "\n".join(big_stmt), "")
for insert_stmt in self.insert_errors:
limbo.run_test_fn(
insert_stmt,
lambda val: "Runtime error: UNIQUE constraint failed" in val,
)
limbo.run_test(
"Nothing was inserted after error",
f"SELECT count(*) from {self.table.name};",
str(len(self.insert_stmts)),
)
def validate_with_expected(result: str, expected: str):
return (expected in result, expected)
def generate_test(col_amount: int, primary_keys: int) -> ConstraintTest:
assert col_amount >= primary_keys, "Cannot have more primary keys than columns"
cols: list[Column] = []
for _ in range(col_amount):
cols.append(Column.generate(fake))
pk_cols = random.sample(
population=cols,
k=primary_keys,
)
for col in pk_cols:
for c in cols:
if col.name == c.name:
c.primary_key = True
table = Table(columns=cols, name=fake.word())
insert_stmts = [table.generate_insert() for _ in range(col_amount)]
return ConstraintTest(
table=table, insert_stmts=insert_stmts, insert_errors=insert_stmts
)
def custom_test_1() -> ConstraintTest:
cols = [
Column(name="id", col_type="integer", primary_key=True),
Column(name="username", col_type="text", primary_key=True),
]
table = Table(columns=cols, name="users")
insert_stmts = [
"INSERT INTO users VALUES (1, 'alice');",
"INSERT INTO users VALUES (2, 'bob');",
]
return ConstraintTest(
table=table, insert_stmts=insert_stmts, insert_errors=insert_stmts
)
def custom_test_2(limbo: TestLimboShell):
create = "CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"
first_insert = "INSERT INTO users VALUES (1, 'alice');"
limbo.run_test("Create unique INT index", create + first_insert, "")
fail_insert = "INSERT INTO users VALUES (1, 'bob');"
limbo.run_test_fn(
fail_insert,
lambda val: "Runtime error: UNIQUE constraint failed" in val,
)
def all_tests() -> list[ConstraintTest]:
tests: list[ConstraintTest] = []
max_cols = 10
curr_fake = Faker()
for _ in range(25):
num_cols = curr_fake.pyint(1, max_cols)
test = generate_test(num_cols, curr_fake.pyint(1, num_cols))
tests.append(test)
tests.append(custom_test_1())
return tests
def cleanup(db_fullpath: str):
wal_path = f"{db_fullpath}-wal"
shm_path = f"{db_fullpath}-shm"
paths = [db_fullpath, wal_path, shm_path]
for path in paths:
if os.path.exists(path):
os.remove(path)
def main():
tests = all_tests()
for test in tests:
console.info(test.table)
db_path = test.db_path
try:
# Use with syntax to automatically close shell on error
with TestLimboShell("") as limbo:
limbo.execute_dot(f".open {db_path}")
test.run(limbo)
except Exception as e:
console.error(f"Test FAILED: {e}")
console.debug(test.table.create_table(), test.insert_stmts)
cleanup(db_path)
exit(1)
# delete db after every compat test so we we have fresh db for next test
cleanup(db_path)
db_path = "testing/constraint.db"
try:
with TestLimboShell("") as limbo:
limbo.execute_dot(f".open {db_path}")
custom_test_2(limbo)
except Exception as e:
console.error(f"Test FAILED: {e}")
cleanup(db_path)
exit(1)
cleanup(db_path)
console.info("All tests passed successfully.")
if __name__ == "__main__":
main()

View File

@@ -1,6 +1,7 @@
#!/usr/bin/env python3
import os
from test_limbo_cli import TestLimboShell
from cli_tests.test_limbo_cli import TestLimboShell
from cli_tests import console
sqlite_exec = "./scripts/limbo-sqlite3"
sqlite_flags = os.getenv("SQLITE_FLAGS", "-q").split(" ")
@@ -81,7 +82,7 @@ def test_regexp():
lambda res: "Parse error: no such function" in res,
)
limbo.run_test_fn(f".load {extension_path}", null)
print(f"Extension {extension_path} loaded successfully.")
console.info(f"Extension {extension_path} loaded successfully.")
limbo.run_test_fn("SELECT regexp('a.c', 'abc');", true)
limbo.run_test_fn("SELECT regexp('a.c', 'ac');", false)
limbo.run_test_fn("SELECT regexp('[0-9]+', 'the year is 2021');", true)
@@ -339,16 +340,18 @@ def test_series():
def test_kv():
ext_path = "target/debug/liblimbo_ext_tests"
limbo = TestLimboShell()
# first, create a normal table to ensure no issues
limbo.execute_dot("CREATE TABLE other (a,b,c);")
limbo.execute_dot("INSERT INTO other values (23,32,23);")
limbo.run_test_fn(
"create virtual table t using kv_store;",
lambda res: "Parse error: no such module: kv_store" in res,
)
limbo.execute_dot(f".load {ext_path}")
limbo.run_test_fn(
limbo.execute_dot(
"create virtual table t using kv_store;",
null,
"can create kv_store vtable",
)
limbo.run_test_fn(".schema", lambda res: "CREATE VIRTUAL TABLE t" in res)
limbo.run_test_fn(
"insert into t values ('hello', 'world');",
null,
@@ -395,10 +398,35 @@ def test_kv():
limbo.run_test_fn(
"select count(*) from t;", lambda res: "100" == res, "can insert 100 rows"
)
limbo.run_test_fn("update t set value = 'updated' where key = 'key33';", null)
limbo.run_test_fn(
"select * from t where key = 'key33';",
lambda res: res == "key33|updated",
"can update single row",
)
limbo.run_test_fn(
"select COUNT(*) from t where value = 'updated';",
lambda res: res == "1",
"only updated a single row",
)
limbo.run_test_fn("update t set value = 'updated2';", null)
limbo.run_test_fn(
"select COUNT(*) from t where value = 'updated2';",
lambda res: res == "100",
"can update all rows",
)
limbo.run_test_fn("delete from t limit 96;", null, "can delete 96 rows")
limbo.run_test_fn(
"select count(*) from t;", lambda res: "4" == res, "four rows remain"
)
limbo.run_test_fn(
"update t set key = '100' where 1;", null, "where clause evaluates properly"
)
limbo.run_test_fn(
"select * from t where key = '100';",
lambda res: res == "100|updated2",
"there is only 1 key remaining after setting all keys to same value",
)
limbo.quit()
@@ -494,11 +522,33 @@ def test_vfs():
lambda res: res == "50",
"Tested large write to testfs",
)
print("Tested large write to testfs")
# open regular db file to ensure we don't segfault when vfs file is dropped
limbo.execute_dot(".open testing/vfs.db")
limbo.execute_dot("create table test (id integer primary key, value float);")
limbo.execute_dot("insert into test (value) values (1.0);")
console.info("Tested large write to testfs")
limbo.quit()
def test_drop_virtual_table():
ext_path = "target/debug/liblimbo_ext_tests"
limbo = TestLimboShell()
limbo.execute_dot(f".load {ext_path}")
limbo.debug_print(
"create virtual table t using kv_store;",
)
limbo.run_test_fn(".schema", lambda res: "CREATE VIRTUAL TABLE t" in res)
limbo.run_test_fn(
"insert into t values ('hello', 'world');",
null,
"can insert into kv_store vtable",
)
limbo.run_test_fn(
"DROP TABLE t;",
lambda res: "VDestroy called" in res,
"can drop kv_store vtable",
)
limbo.run_test_fn(
"DROP TABLE t;",
lambda res: "× Parse error: No such table: t" == res,
"should error when drop kv_store vtable",
)
limbo.quit()
@@ -538,20 +588,25 @@ def cleanup():
os.remove("testing/vfs.db-wal")
if __name__ == "__main__":
def main():
try:
test_regexp()
test_uuid()
test_aggregates()
test_crypto()
test_series()
test_kv()
test_ipaddr()
test_vfs()
test_sqlite_vfs_compat()
test_kv()
test_drop_virtual_table()
except Exception as e:
print(f"Test FAILED: {e}")
console.error(f"Test FAILED: {e}")
cleanup()
exit(1)
cleanup()
print("All tests passed successfully.")
console.info("All tests passed successfully.")
if __name__ == "__main__":
main()

110
testing/cli_tests/memory.py Executable file
View File

@@ -0,0 +1,110 @@
#!/usr/bin/env python3
import os
from cli_tests.test_limbo_cli import TestLimboShell
from cli_tests import console
sqlite_flags = os.getenv("SQLITE_FLAGS", "-q").split(" ")
def validate_with_expected(result: str, expected: str):
return (expected in result, expected)
def stub_memory_test(
limbo: TestLimboShell,
name: str,
blob_size: int = 1024**2,
vals: int = 100,
blobs: bool = True,
):
# zero_blob_size = 1024 **2
zero_blob = "0" * blob_size * 2
# vals = 100
big_stmt = ["CREATE TABLE temp (t1 BLOB, t2 INTEGER);"]
big_stmt = big_stmt + [
f"INSERT INTO temp (t1) VALUES (zeroblob({blob_size}));"
if i % 2 == 0 and blobs
else f"INSERT INTO temp (t2) VALUES ({i});"
for i in range(vals * 2)
]
expected = []
for i in range(vals * 2):
if i % 2 == 0 and blobs:
big_stmt.append(f"SELECT hex(t1) FROM temp LIMIT 1 OFFSET {i};")
expected.append(zero_blob)
else:
big_stmt.append(f"SELECT t2 FROM temp LIMIT 1 OFFSET {i};")
expected.append(f"{i}")
big_stmt.append("SELECT count(*) FROM temp;")
expected.append(str(vals * 2))
big_stmt = "".join(big_stmt)
expected = "\n".join(expected)
limbo.run_test_fn(big_stmt, lambda res: validate_with_expected(res, expected), name)
# TODO no delete tests for now because of limbo outputs some debug information on delete
def memory_tests() -> list[dict]:
tests = []
for vals in range(0, 1000, 100):
tests.append(
{
"name": f"small-insert-integer-vals-{vals}",
"vals": vals,
"blobs": False,
}
)
tests.append(
{
"name": f"small-insert-blob-interleaved-blob-size-{1024}",
"vals": 10,
"blob_size": 1024,
}
)
tests.append(
{
"name": f"big-insert-blob-interleaved-blob-size-{1024}",
"vals": 100,
"blob_size": 1024,
}
)
for blob_size in range(0, (1024 * 1024) + 1, 1024 * 4**4):
if blob_size == 0:
continue
tests.append(
{
"name": f"small-insert-blob-interleaved-blob-size-{blob_size}",
"vals": 10,
"blob_size": blob_size,
}
)
tests.append(
{
"name": f"big-insert-blob-interleaved-blob-size-{blob_size}",
"vals": 100,
"blob_size": blob_size,
}
)
return tests
def main():
tests = memory_tests()
# TODO see how to parallelize this loop with different subprocesses
for test in tests:
try:
with TestLimboShell("") as limbo:
stub_memory_test(limbo, **test)
except Exception as e:
console.error(f"Test FAILED: {e}")
exit(1)
console.info("All tests passed successfully.")
if __name__ == "__main__":
main()

View File

@@ -5,6 +5,7 @@ from time import sleep
import subprocess
from pathlib import Path
from typing import Callable, List, Optional
from cli_tests import console
PIPE_BUF = 4096
@@ -50,7 +51,8 @@ class LimboShell:
return ""
self._write_to_pipe(f"SELECT '{end_marker}';")
output = ""
while True:
done = False
while not done:
ready, _, errors = select.select(
[self.pipe.stdout, self.pipe.stderr],
[],
@@ -58,7 +60,7 @@ class LimboShell:
)
ready_or_errors = set(ready + errors)
if self.pipe.stderr in ready_or_errors:
self._handle_error()
done = self._handle_error()
if self.pipe.stdout in ready_or_errors:
fragment = self.pipe.stdout.read(PIPE_BUF).decode()
output += fragment
@@ -71,7 +73,7 @@ class LimboShell:
self.pipe.stdin.write((command + "\n").encode())
self.pipe.stdin.flush()
def _handle_error(self) -> None:
def _handle_error(self) -> bool:
while True:
ready, _, errors = select.select(
[self.pipe.stderr], [], [self.pipe.stderr], 0
@@ -79,7 +81,7 @@ class LimboShell:
if not (ready + errors):
break
error_output = self.pipe.stderr.read(PIPE_BUF).decode()
print(error_output, end="")
console.error(error_output, end="", _stack_offset=2)
raise RuntimeError("Error encountered in Limbo shell.")
@staticmethod
@@ -111,7 +113,6 @@ class TestLimboShell:
if init_commands is None:
# Default initialization
init_commands = """
.open :memory:
CREATE TABLE users (id INTEGER PRIMARY KEY, first_name TEXT, last_name TEXT, age INTEGER);
CREATE TABLE products (id INTEGER PRIMARY KEY, name TEXT, price INTEGER);
INSERT INTO users VALUES (1, 'Alice', 'Smith', 30), (2, 'Bob', 'Johnson', 25),
@@ -131,7 +132,7 @@ INSERT INTO t VALUES (zeroblob(1024 - 1), zeroblob(1024 - 2), zeroblob(1024 - 3)
self.shell.quit()
def run_test(self, name: str, sql: str, expected: str) -> None:
print(f"Running test: {name}")
console.test(f"Running test: {name}", _stack_offset=2)
actual = self.shell.execute(sql)
assert actual == expected, (
f"Test failed: {name}\n"
@@ -141,17 +142,26 @@ INSERT INTO t VALUES (zeroblob(1024 - 1), zeroblob(1024 - 2), zeroblob(1024 - 3)
)
def debug_print(self, sql: str):
print(f"debugging: {sql}")
console.debug(f"debugging: {sql}", _stack_offset=2)
actual = self.shell.execute(sql)
print(f"OUTPUT:\n{repr(actual)}")
console.debug(f"OUTPUT:\n{repr(actual)}", _stack_offset=2)
def run_test_fn(
self, sql: str, validate: Callable[[str], bool], desc: str = ""
) -> None:
actual = self.shell.execute(sql)
# Print the test that is executing before executing the sql command
# Printing later confuses the user of the code what test has actually failed
if desc:
print(f"Testing: {desc}")
console.test(f"Testing: {desc}", _stack_offset=2)
actual = self.shell.execute(sql)
assert validate(actual), f"Test failed\nSQL: {sql}\nActual:\n{repr(actual)}"
def execute_dot(self, dot_command: str) -> None:
self.shell._write_to_pipe(dot_command)
# Enables the use of `with` syntax
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, exception_traceback):
self.quit()

138
testing/cli_tests/update.py Normal file
View File

@@ -0,0 +1,138 @@
#!/usr/bin/env python3
import os
from cli_tests.test_limbo_cli import TestLimboShell
from pydantic import BaseModel
from cli_tests import console
sqlite_flags = os.getenv("SQLITE_FLAGS", "-q").split(" ")
class UpdateTest(BaseModel):
name: str
db_schema: str = "CREATE TABLE test (key INTEGER, t1 BLOB, t2 INTEGER, t3 TEXT);"
blob_size: int = 1024
vals: int = 1000
updates: int = 1
db_path: str = "testing/update.db"
def init_db(self):
with TestLimboShell(
init_commands="",
exec_name="sqlite3",
flags=f"{self.db_path}",
) as sqlite:
sqlite.execute_dot(f".open {self.db_path}")
zero_blob = "0" * self.blob_size * 2
t2_val = "1"
t3_val = "2"
stmt = [self.db_schema]
stmt = stmt + [
f"INSERT INTO test (key, t1, t2, t3) VALUES ({i} ,zeroblob({self.blob_size}), {t2_val}, {t3_val});"
for i in range(self.vals)
]
stmt.append("SELECT count(*) FROM test;")
sqlite.run_test(
"Init Update Db in Sqlite",
"".join(stmt),
f"{self.vals}",
)
stmt = [
f"SELECT hex(t1), t2, t3 FROM test LIMIT 1 OFFSET {i};"
for i in range(self.vals)
]
expected = [f"{zero_blob}|{t2_val}|{t3_val}" for _ in range(self.vals)]
sqlite.run_test(
"Check Values correctly inserted in Sqlite",
"".join(stmt),
"\n".join(expected),
)
def run(self, limbo: TestLimboShell):
limbo.execute_dot(f".open {self.db_path}")
# TODO blobs are hard. Forget about blob updates for now
# one_blob = ("0" * ((self.blob_size * 2) - 1)) + "1"
# TODO For now update just on one row. To expand the tests in the future
# use self.updates and do more than 1 update
t2_update_val = "123"
stmt = f"UPDATE test SET t2 = {t2_update_val} WHERE key = {0};"
limbo.run_test(self.name, stmt, "")
def test_compat(self):
console.info("Testing in SQLite\n")
with TestLimboShell(
init_commands="",
exec_name="sqlite3",
flags=f"{self.db_path}",
) as sqlite:
sqlite.execute_dot(f".open {self.db_path}")
zero_blob = "0" * self.blob_size * 2
t2_val = "1"
t2_update_val = "123"
t3_val = "2"
stmt = []
stmt.append("SELECT count(*) FROM test;")
sqlite.run_test(
"Check all rows present in Sqlite",
"".join(stmt),
f"{self.vals}",
)
stmt = [
f"SELECT hex(t1), t2, t3 FROM test LIMIT 1 OFFSET {i};"
for i in range(self.vals)
]
expected = [
f"{zero_blob}|{t2_val}|{t3_val}"
if i != 0
else f"{zero_blob}|{t2_update_val}|{t3_val}"
for i in range(self.vals)
]
sqlite.run_test(
"Check Values correctly updated in Sqlite",
"".join(stmt),
"\n".join(expected),
)
console.info()
def cleanup(db_fullpath: str):
wal_path = f"{db_fullpath}-wal"
shm_path = f"{db_fullpath}-shm"
paths = [db_fullpath, wal_path, shm_path]
for path in paths:
if os.path.exists(path):
os.remove(path)
def main():
test = UpdateTest(name="Update 1 column", vals=1)
console.info(test)
db_path = test.db_path
try:
test.init_db()
# Use with syntax to automatically close shell on error
with TestLimboShell("") as limbo:
test.run(limbo)
test.test_compat()
except Exception as e:
console.error(f"Test FAILED: {e}")
cleanup(db_path)
exit(1)
# delete db after every compat test so we we have fresh db for next test
cleanup(db_path)
console.info("All tests passed successfully.")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env python3
# vfs benchmarking/comparison
import os
from pathlib import Path
import subprocess
import statistics
import argparse
from time import perf_counter, sleep
from typing import Dict
from cli_tests.test_limbo_cli import TestLimboShell
from cli_tests.console import info, error, test
LIMBO_BIN = Path("./target/release/limbo")
DB_FILE = Path("testing/temp.db")
vfs_list = ["syscall", "io_uring"]
def append_time(times, start, perf_counter):
times.append(perf_counter() - start)
return True
def bench_one(vfs: str, sql: str, iterations: int) -> list[float]:
"""
Launch a single Limbo process with the requested VFS, run `sql`
`iterations` times, return a list of elapsed wallclock times.
"""
shell = TestLimboShell(
exec_name=str(LIMBO_BIN),
flags=f"-q -m list --vfs {vfs} {DB_FILE}",
init_commands="",
)
times: list[float] = []
for i in range(1, iterations + 1):
start = perf_counter()
_ = shell.run_test_fn(
sql, lambda x: x is not None and append_time(times, start, perf_counter)
)
test(f" {vfs} | run {i:>3}: {times[-1]:.6f}s")
shell.quit()
return times
def setup_temp_db() -> None:
cmd = ["sqlite3", "testing/testing.db", ".clone testing/temp.db"]
proc = subprocess.run(cmd, check=True)
proc.check_returncode()
sleep(0.3) # make sure it's finished
def cleanup_temp_db() -> None:
if DB_FILE.exists():
DB_FILE.unlink()
os.remove("testing/temp.db-wal")
def main() -> None:
parser = argparse.ArgumentParser(
description="Benchmark a SQL statement against all Limbo VFS backends."
)
parser.add_argument("sql", help="SQL statement to execute (quote it)")
parser.add_argument("iterations", type=int, help="number of repetitions")
args = parser.parse_args()
setup_temp_db()
sql, iterations = args.sql, args.iterations
if iterations <= 0:
error("iterations must be a positive integer")
parser.error("Invalid Arguments")
info(f"SQL : {sql}")
info(f"Iterations : {iterations}")
info(f"Database : {DB_FILE.resolve()}")
info("-" * 60)
averages: Dict[str, float] = {}
for vfs in vfs_list:
test(f"\n### VFS: {vfs} ###")
times = bench_one(vfs, sql, iterations)
info(f"All times ({vfs}):", " ".join(f"{t:.6f}" for t in times))
avg = statistics.mean(times)
averages[vfs] = avg
info("\n" + "-" * 60)
info("Average runtime per VFS")
info("-" * 60)
for vfs in vfs_list:
info(f"vfs: {vfs} : {averages[vfs]:.6f} s")
info("-" * 60)
baseline = "syscall"
baseline_avg = averages[baseline]
name_pad = max(len(v) for v in vfs_list)
for vfs in vfs_list:
avg = averages[vfs]
if vfs == baseline:
info(f"{vfs:<{name_pad}} : {avg:.6f} (baseline)")
else:
pct = (avg - baseline_avg) / baseline_avg * 100.0
faster_slower = "slower" if pct > 0 else "faster"
info(
f"{vfs:<{name_pad}} : {avg:.6f} ({abs(pct):.1f}% {faster_slower} than {baseline})"
)
info("-" * 60)
cleanup_temp_db()
if __name__ == "__main__":
main()

157
testing/cli_tests/write.py Executable file
View File

@@ -0,0 +1,157 @@
#!/usr/bin/env python3
import os
from cli_tests.test_limbo_cli import TestLimboShell
from pydantic import BaseModel
from cli_tests import console
sqlite_flags = os.getenv("SQLITE_FLAGS", "-q").split(" ")
class InsertTest(BaseModel):
name: str
db_schema: str = "CREATE TABLE test (t1 BLOB, t2 INTEGER);"
blob_size: int = 1024**2
vals: int = 100
has_blob: bool = True
db_path: str = "testing/writes.db"
def run(self, limbo: TestLimboShell):
zero_blob = "0" * self.blob_size * 2
big_stmt = [self.db_schema]
big_stmt = big_stmt + [
f"INSERT INTO test (t1) VALUES (zeroblob({self.blob_size}));"
if i % 2 == 0 and self.has_blob
else f"INSERT INTO test (t2) VALUES ({i});"
for i in range(self.vals * 2)
]
expected = []
for i in range(self.vals * 2):
if i % 2 == 0 and self.has_blob:
big_stmt.append(f"SELECT hex(t1) FROM test LIMIT 1 OFFSET {i};")
expected.append(zero_blob)
else:
big_stmt.append(f"SELECT t2 FROM test LIMIT 1 OFFSET {i};")
expected.append(f"{i}")
big_stmt.append("SELECT count(*) FROM test;")
expected.append(str(self.vals * 2))
big_stmt = "".join(big_stmt)
expected = "\n".join(expected)
limbo.run_test_fn(
big_stmt, lambda res: validate_with_expected(res, expected), self.name
)
def test_compat(self):
console.info("Testing in SQLite\n")
with TestLimboShell(
init_commands="",
exec_name="sqlite3",
flags=f"{self.db_path}",
) as sqlite:
sqlite.run_test_fn(
".show",
lambda res: f"filename: {self.db_path}" in res,
"Opened db file created with Limbo in sqlite3",
)
sqlite.run_test_fn(
".schema",
lambda res: self.db_schema in res,
"Tables created by previous Limbo test exist in db file",
)
sqlite.run_test_fn(
"SELECT count(*) FROM test;",
lambda res: res == str(self.vals * 2),
"Counting total rows inserted",
)
console.info()
def validate_with_expected(result: str, expected: str):
return (expected in result, expected)
# TODO no delete tests for now
def blob_tests() -> list[InsertTest]:
tests: list[InsertTest] = []
for vals in range(0, 1000, 100):
tests.append(
InsertTest(
name=f"small-insert-integer-vals-{vals}",
vals=vals,
has_blob=False,
)
)
tests.append(
InsertTest(
name=f"small-insert-blob-interleaved-blob-size-{1024}",
vals=10,
blob_size=1024,
)
)
tests.append(
InsertTest(
name=f"big-insert-blob-interleaved-blob-size-{1024}",
vals=100,
blob_size=1024,
)
)
for blob_size in range(0, (1024 * 1024) + 1, 1024 * 4**4):
if blob_size == 0:
continue
tests.append(
InsertTest(
name=f"small-insert-blob-interleaved-blob-size-{blob_size}",
vals=10,
blob_size=blob_size,
)
)
tests.append(
InsertTest(
name=f"big-insert-blob-interleaved-blob-size-{blob_size}",
vals=100,
blob_size=blob_size,
)
)
return tests
def cleanup(db_fullpath: str):
wal_path = f"{db_fullpath}-wal"
shm_path = f"{db_fullpath}-shm"
paths = [db_fullpath, wal_path, shm_path]
for path in paths:
if os.path.exists(path):
os.remove(path)
def main():
tests = blob_tests()
for test in tests:
console.info(test)
db_path = test.db_path
try:
# Use with syntax to automatically close shell on error
with TestLimboShell("") as limbo:
limbo.execute_dot(f".open {db_path}")
test.run(limbo)
test.test_compat()
except Exception as e:
console.error(f"Test FAILED: {e}")
cleanup(db_path)
exit(1)
# delete db after every compat test so we we have fresh db for next test
cleanup(db_path)
console.info("All tests passed successfully.")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,43 @@
#!/usr/bin/env tclsh
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_execsql_test_on_specific_db {:memory:} default-value-text {
CREATE TABLE t1(x INTEGER PRIMARY KEY, y TEXT DEFAULT 'default_value');
INSERT INTO t1 (x) VALUES (1);
SELECT y FROM t1 WHERE x = 1;
} {default_value}
do_execsql_test_on_specific_db {:memory:} default-value-integer {
CREATE TABLE t2(x INTEGER PRIMARY KEY, y INTEGER DEFAULT 42);
INSERT INTO t2 (x) VALUES (1);
SELECT y FROM t2 WHERE x = 1;
} {42}
do_execsql_test_on_specific_db {:memory:} default-value-real {
CREATE TABLE t3(x INTEGER PRIMARY KEY, y REAL DEFAULT 3.14);
INSERT INTO t3 (x) VALUES (1);
SELECT y FROM t3 WHERE x = 1;
} {3.14}
do_execsql_test_on_specific_db {:memory:} default-value-null {
CREATE TABLE t5(x INTEGER PRIMARY KEY, y TEXT DEFAULT NULL);
INSERT INTO t5 (x) VALUES (1);
SELECT y FROM t5 WHERE x = 1;
} {}
do_execsql_test_on_specific_db {:memory:} default-value-boolean {
CREATE TABLE t6(x INTEGER PRIMARY KEY, y BOOLEAN DEFAULT 1);
INSERT INTO t6 (x) VALUES (1);
SELECT y FROM t6 WHERE x = 1;
} {1}
do_execsql_test_on_specific_db {:memory:} default-value-function {
CREATE TABLE t7(x INTEGER PRIMARY KEY, y INTEGER DEFAULT (ABS(-5)));
INSERT INTO t7 (x) VALUES (1);
SELECT y FROM t7 WHERE x = 1;
} {5}

View File

@@ -15,4 +15,149 @@ do_execsql_test_on_specific_db {:memory:} must-be-int-insert {
} {1
2
3
4}
4}
do_execsql_test_on_specific_db {:memory:} strict-basic-creation {
CREATE TABLE test1(id INTEGER, name TEXT, price REAL) STRICT;
INSERT INTO test1 VALUES(1, 'item1', 10.5);
SELECT * FROM test1;
} {1|item1|10.5}
do_execsql_test_in_memory_any_error strict-require-datatype {
CREATE TABLE test2(id INTEGER, name) STRICT;
}
do_execsql_test_in_memory_any_error strict-valid-datatypes {
CREATE TABLE test2(id INTEGER, value DATETIME) STRICT;
}
do_execsql_test_in_memory_any_error strict-type-enforcement {
CREATE TABLE test3(id INTEGER, name TEXT, price REAL) STRICT;
INSERT INTO test3 VALUES(1, 'item1', 'not-a-number');
}
do_execsql_test_on_specific_db {:memory:} strict-type-coercion {
CREATE TABLE test4(id INTEGER, name TEXT, price REAL) STRICT;
INSERT INTO test4 VALUES(1, 'item1', '10.5');
SELECT typeof(price), price FROM test4;
} {real|10.5}
do_execsql_test_on_specific_db {:memory:} strict-any-flexibility {
CREATE TABLE test5(id INTEGER, data ANY) STRICT;
INSERT INTO test5 VALUES(1, 100);
INSERT INTO test5 VALUES(2, 'text');
INSERT INTO test5 VALUES(3, 3.14);
SELECT id, typeof(data) FROM test5 ORDER BY id;
} {1|integer
2|text
3|real}
do_execsql_test_on_specific_db {:memory:} strict-any-preservation {
CREATE TABLE test6(id INTEGER, code ANY) STRICT;
INSERT INTO test6 VALUES(1, '000123');
SELECT typeof(code), code FROM test6;
} {text|000123}
do_execsql_test_in_memory_any_error strict-int-vs-integer-pk {
CREATE TABLE test8(id INT PRIMARY KEY, name TEXT) STRICT
INSERT INTO test8 VALUES(NULL, 'test');
}
do_execsql_test_on_specific_db {:memory:} strict-integer-pk-behavior {
CREATE TABLE test9(id INTEGER PRIMARY KEY, name TEXT) STRICT;
INSERT INTO test9 VALUES(NULL, 'test');
SELECT id, name FROM test9;
} {1|test}
do_execsql_test_on_specific_db {:memory:} strict-mixed-inserts {
CREATE TABLE test11(
id INTEGER PRIMARY KEY,
name TEXT,
price REAL,
quantity INT,
tags ANY
) STRICT;
INSERT INTO test11 VALUES(1, 'item1', 10.5, 5, 'tag1');
INSERT INTO test11 VALUES(2, 'item2', 20.75, 10, 42);
SELECT id, name, price, quantity, typeof(tags) FROM test11 ORDER BY id;
} {1|item1|10.5|5|text
2|item2|20.75|10|integer}
do_execsql_test_on_specific_db {:memory:} strict-update-basic {
CREATE TABLE test1(id INTEGER, name TEXT, price REAL) STRICT;
INSERT INTO test1 VALUES(1, 'item1', 10.5);
UPDATE test1 SET price = 15.75 WHERE id = 1;
SELECT * FROM test1;
} {1|item1|15.75}
do_execsql_test_in_memory_any_error strict-update-type-enforcement {
CREATE TABLE test2(id INTEGER, name TEXT, price REAL) STRICT;
INSERT INTO test2 VALUES(1, 'item1', 10.5);
UPDATE test2 SET price = 'not-a-number' WHERE id = 1;
}
do_execsql_test_on_specific_db {:memory:} strict-update-type-coercion {
CREATE TABLE test3(id INTEGER, name TEXT, price REAL) STRICT;
INSERT INTO test3 VALUES(1, 'item1', 10.5);
UPDATE test3 SET price = '15.75' WHERE id = 1;
SELECT id, typeof(price), price FROM test3;
} {1|real|15.75}
do_execsql_test_on_specific_db {:memory:} strict-update-any-flexibility {
CREATE TABLE test4(id INTEGER, data ANY) STRICT;
INSERT INTO test4 VALUES(1, 100);
UPDATE test4 SET data = 'text' WHERE id = 1;
INSERT INTO test4 VALUES(2, 'original');
UPDATE test4 SET data = 3.14 WHERE id = 2;
SELECT id, typeof(data), data FROM test4 ORDER BY id;
} {1|text|text
2|real|3.14}
do_execsql_test_on_specific_db {:memory:} strict-update-any-preservation {
CREATE TABLE test5(id INTEGER, code ANY) STRICT;
INSERT INTO test5 VALUES(1, 'text');
UPDATE test5 SET code = '000123' WHERE id = 1;
SELECT typeof(code), code FROM test5;
} {text|000123}
do_execsql_test_in_memory_any_error strict-update-not-null-constraint {
CREATE TABLE test7(id INTEGER, name TEXT NOT NULL) STRICT;
INSERT INTO test7 VALUES(1, 'name');
UPDATE test7 SET name = NULL WHERE id = 1;
}
# Uncomment following test case when unique constraint is added
#do_execsql_test_any_error strict-update-pk-constraint {
# CREATE TABLE test8(id INTEGER PRIMARY KEY, name TEXT) STRICT;
# INSERT INTO test8 VALUES(1, 'name1');
# INSERT INTO test8 VALUES(2, 'name2');
# UPDATE test8 SET id = 2 WHERE id = 1;
#}
do_execsql_test_on_specific_db {:memory:} strict-update-multiple-columns {
CREATE TABLE test9(id INTEGER, name TEXT, price REAL, quantity INT) STRICT;
INSERT INTO test9 VALUES(1, 'item1', 10.5, 5);
UPDATE test9 SET name = 'updated', price = 20.75, quantity = 10 WHERE id = 1;
SELECT * FROM test9;
} {1|updated|20.75|10}
do_execsql_test_on_specific_db {:memory:} strict-update-where-clause {
CREATE TABLE test10(id INTEGER, category TEXT, price REAL) STRICT;
INSERT INTO test10 VALUES(1, 'A', 10);
INSERT INTO test10 VALUES(2, 'A', 20);
INSERT INTO test10 VALUES(3, 'B', 30);
UPDATE test10 SET price = price * 2 WHERE category = 'A';
SELECT id, price FROM test10 ORDER BY id;
} {1|20.0
2|40.0
3|30.0}
do_execsql_test_on_specific_db {:memory:} strict-update-expression {
CREATE TABLE test11(id INTEGER, name TEXT, price REAL, discount REAL) STRICT;
INSERT INTO test11 VALUES(1, 'item1', 100, 0.1);
UPDATE test11 SET price = price - (price * discount);
SELECT id, price FROM test11;
} {1|90.0}

View File

@@ -272,4 +272,14 @@ do_execsql_test natural-join-and-using-join {
select u.id, u2.id, p.id from users u natural join products p join users u2 using (first_name) limit 3;
} {"1|1|1
1|1204|1
1|1261|1"}
1|1261|1"}
# regression test for a backwards iteration left join case,
# where the null flag of the right table was not cleared after a previous unmatched row.
do_execsql_test left-join-backwards-iteration {
select users.id, users.first_name as user_name, products.name as product_name
from users left join products on users.id = products.id
where users.id < 13 order by users.id desc limit 3;
} {12|Alan|
11|Travis|accessories
10|Daniel|coat}

View File

@@ -298,7 +298,7 @@ do_execsql_test json_extract_with_escaping {
} {{1}}
do_execsql_test json_extract_with_escaping_2 {
SELECT json_extract('{"\x61": 1}', '$."\x61"')
SELECT json_extract('{"a": 1}', '$."\x61"')
} {{1}}
do_execsql_test json_extract_null_path {
@@ -458,7 +458,7 @@ do_execsql_test json_arrow_chained {
select '{"a":2,"c":[4,5,{"f":7}]}' -> 'c' -> 2 ->> 'f'
} {{7}}
# TODO: fix me - this passes on SQLite and needs to be fixed in Limbo.
do_execsql_test json_extract_multiple_null_paths {
SELECT json_extract(1, null, null, null)
} {{}}
@@ -669,10 +669,10 @@ do_execsql_test json_from_json_object {
# FIXME: this behaviour differs from sqlite. Although, sqlite docs states
# that this could change in a "future enhancement" (https://www.sqlite.org/json1.html#jobj)
#do_execsql_test json_object_duplicated_keys {
# SELECT json_object('key', 'value', 'key', 'value2');
#} {{{"key":"value2"}}}
#
do_execsql_test json_object_duplicated_keys {
SELECT json_object('key', 'value', 'key', 'value2');
} {{{"key":"value","key":"value2"}}}
do_execsql_test json_valid_1 {
SELECT json_valid('{"a":55,"b":72}');

File diff suppressed because it is too large Load Diff

View File

@@ -141,3 +141,62 @@ Collin|15}
do_execsql_test case-insensitive-alias {
select u.first_name as fF, count(1) > 0 as cC from users u where fF = 'Jamie' group by fF order by cC;
} {Jamie|1}
do_execsql_test age_idx_order_desc {
select first_name from users order by age desc limit 3;
} {Robert
Sydney
Matthew}
do_execsql_test rowid_or_integer_pk_desc {
select first_name from users order by id desc limit 3;
} {Nicole
Gina
Dorothy}
# These two following tests may seem dumb but they verify that index scanning by age_idx doesn't drop any rows due to BTree bugs
do_execsql_test orderby_asc_verify_rows {
select count(1) from (select * from users order by age desc)
} {10000}
do_execsql_test orderby_desc_verify_rows {
select count(1) from (select * from users order by age desc)
} {10000}
do_execsql_test orderby_desc_with_offset {
select first_name, age from users order by age desc limit 3 offset 666;
} {Francis|94
Matthew|94
Theresa|94}
do_execsql_test orderby_desc_with_filter {
select first_name, age from users where age <= 50 order by age desc limit 5;
} {Gerald|50
Nicole|50
Tammy|50
Marissa|50
Daniel|50}
do_execsql_test orderby_asc_with_filter_range {
select first_name, age from users where age <= 50 and age >= 49 order by age asc limit 5;
} {William|49
Jennifer|49
Robert|49
David|49
Stephanie|49}
do_execsql_test orderby_desc_with_filter_id_lt {
select id from users where id < 6666 order by id desc limit 5;
} {6665
6664
6663
6662
6661}
do_execsql_test orderby_desc_with_filter_id_le {
select id from users where id <= 6666 order by id desc limit 5;
} {6666
6665
6664
6663
6662}

32
testing/pyproject.toml Normal file
View File

@@ -0,0 +1,32 @@
[project]
description = "Limbo Python Testing Project"
name = "limbo_test"
readme = "README.md"
requires-python = ">=3.13"
version = "0.1.0"
dependencies = [
"faker>=37.1.0",
"pydantic>=2.11.1",
]
[project.scripts]
test-write = "cli_tests.write:main"
test-shell = "cli_tests.cli_test_cases:main"
test-extensions = "cli_tests.extensions:main"
test-update = "cli_tests.update:main"
test-memory = "cli_tests.memory:main"
bench-vfs = "cli_tests.vfs_bench:main"
test-constraint = "cli_tests.constraint:main"
[tool.uv]
package = true
[build-system]
build-backend = "hatchling.build"
requires = ["hatchling", "hatch-vcs"]
[tool.hatch.build.targets.wheel]
packages = ["cli_tests"]
[tool.hatch.metadata]
allow-direct-references = true

View File

@@ -423,26 +423,33 @@ do_execsql_test julianday-time-only {
SELECT julianday('15:30:45');
} {2451545.14635417}
#
# TODO: fix precision issue
#
#do_execsql_test julianday-midnight {
# SELECT julianday('2023-05-18 00:00:00');
#} {2460082.5}
do_execsql_test julianday-midnight {
SELECT julianday('2023-05-18 00:00:00');
} {2460082.5}
#do_execsql_test julianday-noon {
# SELECT julianday('2023-05-18 12:00:00');
#} {2460083.0}
do_execsql_test julianday-noon {
SELECT julianday('2023-05-18 12:00:00');
} {2460083.0}
#do_execsql_test julianday-fractional-zero {
# SELECT julianday('2023-05-18 00:00:00.000');
#} {2460082.5}
do_execsql_test julianday-fractional-zero {
SELECT julianday('2023-05-18 00:00:00.000');
} {2460082.5}
# same issue as above, we return .5000000 because we are using fmt precision
#do_execsql_test julianday-date-only {
# SELECT julianday('2023-05-18');
#} {2460082.5}
do_execsql_test julianday-date-only {
SELECT julianday('2023-05-18');
} {2460082.5}
do_execsql_test julianday-with-modifier-day {
SELECT julianday(2454832.5,'+1 day');
} {2454833.5}
do_execsql_test julianday-with-modifier-hour {
SELECT julianday(2454832.5,'-3 hours');
} {2454832.375}
do_execsql_test julianday-max-day {
SELECT julianday('9999-12-31 23:59:59');
} {5373484.49998843}
@@ -589,3 +596,74 @@ set FMT [list %S.%3f %C %y %b %B %h %a %A %D %x %v %.f %.3f %.6f %.9f %3f %6f %9
foreach i $FMT {
do_execsql_test strftime-invalid-$i "SELECT strftime('$i','2025-01-23T13:14:30.567');" {}
}
do_execsql_test strftime-julianday {
SELECT strftime('%Y-%m-%d %H:%M:%fZ', 2459717.08070103);
} {"2022-05-17 13:56:12.569Z"}
# Tests for the TIMEDIFF function
do_execsql_test timediff-basic-positive {
SELECT timediff('14:30:45', '12:00:00');
} {"+0000-00-00 02:30:45.000"}
do_execsql_test timediff-basic-negative {
SELECT timediff('12:00:00', '14:30:45');
} {"-0000-00-00 02:30:45.000"}
do_execsql_test timediff-with-milliseconds-positive {
SELECT timediff('12:00:01.300', '12:00:00.500');
} {"+0000-00-00 00:00:00.800"}
do_execsql_test timediff-same-time {
SELECT timediff('12:00:00', '12:00:00');
} {"+0000-00-00 00:00:00.000"}
do_execsql_test timediff-across-dates {
SELECT timediff('2023-05-11 01:15:00', '2023-05-10 23:30:00');
} {"+0000-00-00 01:45:00.000"}
do_execsql_test timediff-across-dates-negative {
SELECT timediff('2023-05-10 23:30:00', '2023-05-11 01:15:00');
} {"-0000-00-00 01:45:00.000"}
do_execsql_test timediff-different-formats {
SELECT timediff('2023-05-10T23:30:00', '2023-05-10 14:15:00');
} {"+0000-00-00 09:15:00.000"}
do_execsql_test timediff-with-timezone {
SELECT timediff('2023-05-10 23:30:00+02:00', '2023-05-10 18:30:00Z');
} {"+0000-00-00 03:00:00.000"}
do_execsql_test timediff-large-difference {
SELECT timediff('2023-05-12 10:00:00', '2023-05-10 08:00:00');
} {"+0000-00-02 02:00:00.000"}
do_execsql_test timediff-with-seconds-precision {
SELECT timediff('12:30:45.123', '12:30:44.987');
} {"+0000-00-00 00:00:00.136"}
do_execsql_test timediff-null-first-arg {
SELECT timediff(NULL, '12:00:00');
} {{}}
do_execsql_test timediff-null-second-arg {
SELECT timediff('12:00:00', NULL);
} {{}}
do_execsql_test timediff-invalid-first-arg {
SELECT timediff('not-a-time', '12:00:00');
} {{}}
do_execsql_test timediff-invalid-second-arg {
SELECT timediff('12:00:00', 'not-a-time');
} {{}}
do_execsql_test timediff-julian-day {
SELECT timediff(2460000, 2460000.5);
} {"-0000-00-00 12:00:00.000"}
do_execsql_test timediff-different-time-formats {
SELECT timediff('23:59:59', '00:00:00');
} {"+0000-00-00 23:59:59.000"}

View File

@@ -195,6 +195,54 @@ do_execsql_test hex-null {
select hex(null)
} {}
do_execsql_test likely {
select likely('limbo')
} {limbo}
do_execsql_test likely-int {
select likely(100)
} {100}
do_execsql_test likely-decimal {
select likely(12.34)
} {12.34}
do_execsql_test likely-null {
select likely(NULL)
} {}
do_execsql_test likelihood-string {
SELECT likelihood('limbo', 0.5);
} {limbo}
do_execsql_test likelihood-string-high-probability {
SELECT likelihood('database', 0.9375);
} {database}
do_execsql_test likelihood-integer {
SELECT likelihood(100, 0.0625);
} {100}
do_execsql_test likelihood-integer-probability-1 {
SELECT likelihood(42, 1.0);
} {42}
do_execsql_test likelihood-decimal {
SELECT likelihood(12.34, 0.5);
} {12.34}
do_execsql_test likelihood-null {
SELECT likelihood(NULL, 0.5);
} {}
do_execsql_test likelihood-blob {
SELECT hex(likelihood(x'01020304', 0.5));
} {01020304}
do_execsql_test likelihood-zero-probability {
SELECT likelihood(999, 0.0);
} {999}
do_execsql_test unhex-str-ab {
SELECT unhex('6162');
} {ab}

View File

@@ -11,6 +11,14 @@ do_execsql_test select-const-2 {
SELECT 2
} {2}
do_execsql_test select-const-3 {
SELECT 0xDEAF
} {57007}
do_execsql_test select-const-4 {
SELECT -0xA
} {-10}
do_execsql_test select-true {
SELECT true
} {1}
@@ -165,3 +173,70 @@ do_execsql_test select-not-like-expression {
do_execsql_test select-like-expression {
select 2 % 0.5
} {}
do_execsql_test select_positive_infinite_float {
SELECT 1.7976931348623157E+308 + 1e308; -- f64::MAX + 1e308
} {Inf}
do_execsql_test select_negative_infinite_float {
SELECT -1.7976931348623157E+308 - 1e308 -- f64::MIN - 1e308
} {-Inf}
do_execsql_test select_shl_large_negative_float {
SELECT 1 << -1e19;
SELECT 1 << -9223372036854775808; -- i64::MIN
SELECT 1 << 9223372036854775807; -- i64::MAX
} {0 0 0}
do_execsql_test select_shl_basic {
SELECT 1 << 0, 1 << 1, 1 << 2, 1 << 3;
SELECT 2 << 0, 2 << 1, 2 << 2, 2 << 3;
} {1|2|4|8
2|4|8|16}
do_execsql_test select_shl_negative_numbers {
SELECT -1 << 0, -1 << 1, -1 << 2, -1 << 3;
SELECT -2 << 0, -2 << 1, -2 << 2, -2 << 3;
} {-1|-2|-4|-8
-2|-4|-8|-16}
do_execsql_test select_shl_negative_shifts {
SELECT 8 << -1, 8 << -2, 8 << -3, 8 << -4;
SELECT -8 << -1, -8 << -2, -8 << -3, -8 << -4;
} {4|2|1|0
-4|-2|-1|-1}
do_execsql_test select_shl_large_shifts {
SELECT 1 << 62, 1 << 63, 1 << 64;
SELECT -1 << 62, -1 << 63, -1 << 64;
} {4611686018427387904|-9223372036854775808|0
-4611686018427387904|-9223372036854775808|0}
do_execsql_test select_shl_text_conversion {
SELECT '1' << '2';
SELECT '8' << '-2';
SELECT '-4' << '2';
} {4 2 -16}
do_execsql_test select_shl_chained {
SELECT (1 << 2) << 3;
SELECT (2 << 1) << (1 << 1);
} {32 16}
do_execsql_test select_shl_numeric_types {
SELECT CAST(1 AS INTEGER) << 2;
SELECT 1.0 << 2;
SELECT 1.5 << 2;
} {4 4 4}
do_execsql_test select_fuzz_failure_case {
SELECT (-9 << ((-6) << (9)) >> ((5)) % -10 - + - (-9));
} {-16}
# regression test for https://github.com/tursodatabase/limbo/issues/1157
do_execsql_test select-invalid-numeric-text {
select -'e';
} {0}
do_execsql_test select-invalid-numeric-text {
select -'E';
} {0}

View File

@@ -2,6 +2,14 @@ set sqlite_exec [expr {[info exists env(SQLITE_EXEC)] ? $env(SQLITE_EXEC) : "sql
set test_dbs [list "testing/testing.db" "testing/testing_norowidalias.db"]
set test_small_dbs [list "testing/testing_small.db" ]
proc error_put {sql} {
puts [format "\033\[1;31mTest FAILED:\033\[0m %s" $sql ]
}
proc test_put {msg db test_name} {
puts [format "\033\[1;34m(%s)\033\[0m %s $msg: \033\[1;32m%s\033\[0m" $db [string repeat " " [expr {40 - [string length $db]}]] $test_name]
}
proc evaluate_sql {sqlite_exec db_name sql} {
set command [list $sqlite_exec $db_name $sql]
set output [exec {*}$command]
@@ -11,7 +19,7 @@ proc evaluate_sql {sqlite_exec db_name sql} {
proc run_test {sqlite_exec db_name sql expected_output} {
set actual_output [evaluate_sql $sqlite_exec $db_name $sql]
if {$actual_output ne $expected_output} {
puts "Test FAILED: '$sql'"
error_put $sql
puts "returned '$actual_output'"
puts "expected '$expected_output'"
exit 1
@@ -20,7 +28,7 @@ proc run_test {sqlite_exec db_name sql expected_output} {
proc do_execsql_test {test_name sql_statements expected_outputs} {
foreach db $::test_dbs {
puts [format "(%s) %s Running test: %s" $db [string repeat " " [expr {40 - [string length $db]}]] $test_name]
test_put "Running test" $db $test_name
set combined_sql [string trim $sql_statements]
set combined_expected_output [join $expected_outputs "\n"]
run_test $::sqlite_exec $db $combined_sql $combined_expected_output
@@ -29,7 +37,7 @@ proc do_execsql_test {test_name sql_statements expected_outputs} {
proc do_execsql_test_small {test_name sql_statements expected_outputs} {
foreach db $::test_small_dbs {
puts [format "(%s) %s Running test: %s" $db [string repeat " " [expr {40 - [string length $db]}]] $test_name]
test_put "Running test" $db $test_name
set combined_sql [string trim $sql_statements]
set combined_expected_output [join $expected_outputs "\n"]
run_test $::sqlite_exec $db $combined_sql $combined_expected_output
@@ -39,13 +47,13 @@ proc do_execsql_test_small {test_name sql_statements expected_outputs} {
proc do_execsql_test_regex {test_name sql_statements expected_regex} {
foreach db $::test_dbs {
puts [format "(%s) %s Running test: %s" $db [string repeat " " [expr {40 - [string length $db]}]] $test_name]
test_put "Running test" $db $test_name
set combined_sql [string trim $sql_statements]
set actual_output [evaluate_sql $::sqlite_exec $db $combined_sql]
# Validate the actual output against the regular expression
if {![regexp $expected_regex $actual_output]} {
puts "Test FAILED: '$sql_statements'"
error_put $sql_statements
puts "returned '$actual_output'"
puts "expected to match regex '$expected_regex'"
exit 1
@@ -55,7 +63,7 @@ proc do_execsql_test_regex {test_name sql_statements expected_regex} {
proc do_execsql_test_on_specific_db {db_name test_name sql_statements expected_outputs} {
puts [format "(%s) %s Running test: %s" $db_name [string repeat " " [expr {40 - [string length $db_name]}]] $test_name]
test_put "Running test" $db_name $test_name
set combined_sql [string trim $sql_statements]
set combined_expected_output [join $expected_outputs "\n"]
run_test $::sqlite_exec $db_name $combined_sql $combined_expected_output
@@ -69,14 +77,14 @@ proc within_tolerance {actual expected tolerance} {
# FIXME: When Limbo's floating point presentation matches to SQLite, this could/should be removed
proc do_execsql_test_tolerance {test_name sql_statements expected_outputs tolerance} {
foreach db $::test_dbs {
puts [format "(%s) %s Running test: %s" $db [string repeat " " [expr {40 - [string length $db]}]] $test_name]
test_put "Running test" $db $test_name
set combined_sql [string trim $sql_statements]
set actual_output [evaluate_sql $::sqlite_exec $db $combined_sql]
set actual_values [split $actual_output "\n"]
set expected_values [split $expected_outputs "\n"]
if {[llength $actual_values] != [llength $expected_values]} {
puts "Test FAILED: '$sql_statements'"
error_put $sql_statements
puts "returned '$actual_output'"
puts "expected '$expected_outputs'"
exit 1
@@ -89,7 +97,7 @@ proc do_execsql_test_tolerance {test_name sql_statements expected_outputs tolera
if {![within_tolerance $actual $expected $tolerance]} {
set lower_bound [expr {$expected - $tolerance}]
set upper_bound [expr {$expected + $tolerance}]
puts "Test FAILED: '$sql_statements'"
error_put $sql_statements
puts "returned '$actual'"
puts "expected a value within the range \[$lower_bound, $upper_bound\]"
exit 1
@@ -97,3 +105,124 @@ proc do_execsql_test_tolerance {test_name sql_statements expected_outputs tolera
}
}
}
# This procedure passes the test if the output contains error messages
proc run_test_expecting_any_error {sqlite_exec db_name sql} {
# Execute the SQL command and capture output
set command [list $sqlite_exec $db_name $sql]
# Use catch to handle both successful and error cases
catch {exec {*}$command} result options
# Check if the output contains error indicators (×, error, syntax error, etc.)
if {[regexp {(error|ERROR|Error|×|syntax error|failed)} $result]} {
# Error found in output - test passed
puts "\033\[1;32mTest PASSED:\033\[0m Got expected error"
return 1
}
# No error indicators in output
error_put $sql
puts "Expected an error but command output didn't indicate any error: '$result'"
exit 1
}
# This procedure passes if error matches a specific pattern
proc run_test_expecting_error {sqlite_exec db_name sql expected_error_pattern} {
# Execute the SQL command and capture output
set command [list $sqlite_exec $db_name $sql]
# Capture output whether command succeeds or fails
catch {exec {*}$command} result options
# Check if the output contains error indicators first
if {![regexp {(error|ERROR|Error|×|syntax error|failed)} $result]} {
error_put $sql
puts "Expected an error matching '$expected_error_pattern'"
puts "But command output didn't indicate any error: '$result'"
exit 1
}
# Now check if the error message matches the expected pattern
if {![regexp $expected_error_pattern $result]} {
error_put $sql
puts "Error occurred but didn't match expected pattern."
puts "Output was: '$result'"
puts "Expected pattern: '$expected_error_pattern'"
exit 1
}
# If we get here, the test passed - got expected error matching pattern
return 1
}
# This version accepts exact error text, ignoring formatting
proc run_test_expecting_error_content {sqlite_exec db_name sql expected_error_text} {
# Execute the SQL command and capture output
set command [list $sqlite_exec $db_name $sql]
# Capture output whether command succeeds or fails
catch {exec {*}$command} result options
# Check if the output contains error indicators first
if {![regexp {(error|ERROR|Error|×|syntax error|failed)} $result]} {
error_put $sql
puts "Expected an error with text: '$expected_error_text'"
puts "But command output didn't indicate any error: '$result'"
exit 1
}
# Normalize both the actual and expected error messages
# Remove all whitespace, newlines, and special characters for comparison
set normalized_actual [regsub -all {[[:space:]]|[[:punct:]]} $result ""]
set normalized_expected [regsub -all {[[:space:]]|[[:punct:]]} $expected_error_text ""]
# Convert to lowercase for case-insensitive comparison
set normalized_actual [string tolower $normalized_actual]
set normalized_expected [string tolower $normalized_expected]
# Check if the normalized strings contain the same text
if {[string first $normalized_expected $normalized_actual] == -1} {
error_put $sql
puts "Error occurred but content didn't match."
puts "Output was: '$result'"
puts "Expected text: '$expected_error_text'"
exit 1
}
# If we get here, the test passed - got error with expected content
return 1
}
proc do_execsql_test_error {test_name sql_statements expected_error_pattern} {
foreach db $::test_dbs {
test_put "Running error test" $db $test_name
set combined_sql [string trim $sql_statements]
run_test_expecting_error $::sqlite_exec $db $combined_sql $expected_error_pattern
}
}
proc do_execsql_test_error_content {test_name sql_statements expected_error_text} {
foreach db $::test_dbs {
test_put "Running error content test" $db $test_name
set combined_sql [string trim $sql_statements]
run_test_expecting_error_content $::sqlite_exec $db $combined_sql $expected_error_text
}
}
proc do_execsql_test_any_error {test_name sql_statements} {
foreach db $::test_dbs {
test_put "Running any-error test" $db $test_name
set combined_sql [string trim $sql_statements]
run_test_expecting_any_error $::sqlite_exec $db $combined_sql
}
}
proc do_execsql_test_in_memory_any_error {test_name sql_statements} {
test_put "Running any-error test" in-memory $test_name
# Use ":memory:" special filename for in-memory database
set db_name ":memory:"
set combined_sql [string trim $sql_statements]
run_test_expecting_any_error $::sqlite_exec $db_name $combined_sql
}

View File

@@ -572,3 +572,13 @@ do_execsql_test where-constant-condition-no-tables {
do_execsql_test where-constant-condition-no-tables-2 {
select 1 where 1 IS NOT NULL;
} {1}
# We had a bug where NULL was incorrectly used as a seek key, returning all rows (because NULL < everything in index keys)
do_execsql_test where-null-comparison-index-seek-regression-test {
select age from users where age > NULL;
} {}
# We had a bug where Limbo tried to use an index when there was a WHERE term like 't.x = t.x'
do_execsql_test where-self-referential-regression {
select count(1) from users where id = id;
} {10000}