Cleaner and less error prone Write Tests

This commit is contained in:
pedrocarlo
2025-04-03 01:48:33 -03:00
parent 58e091cb23
commit 0c137d6dff
3 changed files with 110 additions and 154 deletions

View File

@@ -1,146 +1,147 @@
#!/usr/bin/env python3
import os
from cli_tests.test_limbo_cli import TestLimboShell
from pydantic import BaseModel
sqlite_flags = os.getenv("SQLITE_FLAGS", "-q").split(" ")
class InsertTest(BaseModel):
name: str
db_schema: str = "CREATE TABLE test (t1 BLOB, t2 INTEGER);"
blob_size: int = 1024**2
vals: int = 100
has_blob: bool = True
db_path: str = "testing/writes.db"
def run(self, limbo: TestLimboShell):
zero_blob = "0" * self.blob_size * 2
big_stmt = [self.db_schema]
big_stmt = big_stmt + [
f"INSERT INTO test (t1) VALUES (zeroblob({self.blob_size}));"
if i % 2 == 0 and self.has_blob
else f"INSERT INTO test (t2) VALUES ({i});"
for i in range(self.vals * 2)
]
expected = []
for i in range(self.vals * 2):
if i % 2 == 0 and self.has_blob:
big_stmt.append(f"SELECT hex(t1) FROM test LIMIT 1 OFFSET {i};")
expected.append(zero_blob)
else:
big_stmt.append(f"SELECT t2 FROM test LIMIT 1 OFFSET {i};")
expected.append(f"{i}")
big_stmt.append("SELECT count(*) FROM test;")
expected.append(str(self.vals * 2))
big_stmt = "".join(big_stmt)
expected = "\n".join(expected)
limbo.run_test_fn(
big_stmt, lambda res: validate_with_expected(res, expected), self.name
)
def test_compat(self):
print("Testing in SQLite\n")
with TestLimboShell(
init_commands="",
exec_name="sqlite3",
flags=f"{self.db_path}",
) as sqlite:
sqlite.run_test_fn(
".show",
lambda res: f"filename: {self.db_path}" in res,
"Opened db file created with Limbo in sqlite3",
)
sqlite.run_test_fn(
".schema",
lambda res: self.db_schema in res,
"Tables created by previous Limbo test exist in db file",
)
# TODO Have some pydantic object be passed to this function with common fields
# To extract the information necessary to query the db in sqlite
# The object should contain Schema information and queries that should be run to
# test in sqlite for compatibility sakes
print()
pass
def validate_with_expected(result: str, expected: str):
return (expected in result, expected)
def stub_write_blob_test(
limbo: TestLimboShell,
name: str,
blob_size: int = 1024**2,
vals: int = 100,
blobs: bool = True,
schema: str = "CREATE TABLE test (t1 BLOB, t2 INTEGER);",
):
zero_blob = "0" * blob_size * 2
big_stmt = [schema]
big_stmt = big_stmt + [
f"INSERT INTO test (t1) VALUES (zeroblob({blob_size}));"
if i % 2 == 0 and blobs
else f"INSERT INTO test (t2) VALUES ({i});"
for i in range(vals * 2)
]
expected = []
for i in range(vals * 2):
if i % 2 == 0 and blobs:
big_stmt.append(f"SELECT hex(t1) FROM test LIMIT 1 OFFSET {i};")
expected.append(zero_blob)
else:
big_stmt.append(f"SELECT t2 FROM test LIMIT 1 OFFSET {i};")
expected.append(f"{i}")
big_stmt.append("SELECT count(*) FROM test;")
expected.append(str(vals * 2))
big_stmt = "".join(big_stmt)
expected = "\n".join(expected)
limbo.run_test_fn(big_stmt, lambda res: validate_with_expected(res, expected), name)
# TODO no delete tests for now
def blob_tests() -> list[dict]:
def blob_tests() -> list[InsertTest]:
tests: list[dict] = []
for vals in range(0, 1000, 100):
tests.append(
{
"name": f"small-insert-integer-vals-{vals}",
"vals": vals,
"blobs": False,
}
InsertTest(
name=f"small-insert-integer-vals-{vals}",
vals=vals,
has_blob=False,
)
)
tests.append(
{
"name": f"small-insert-blob-interleaved-blob-size-{1024}",
"vals": 10,
"blob_size": 1024,
}
InsertTest(
name=f"small-insert-blob-interleaved-blob-size-{1024}",
vals=10,
blob_size=1024,
)
)
tests.append(
{
"name": f"big-insert-blob-interleaved-blob-size-{1024}",
"vals": 100,
"blob_size": 1024,
}
InsertTest(
name=f"big-insert-blob-interleaved-blob-size-{1024}",
vals=100,
blob_size=1024,
)
)
for blob_size in range(0, (1024 * 1024) + 1, 1024 * 4**4):
if blob_size == 0:
continue
tests.append(
{
"name": f"small-insert-blob-interleaved-blob-size-{blob_size}",
"vals": 10,
"blob_size": blob_size,
}
InsertTest(
name=f"small-insert-blob-interleaved-blob-size-{blob_size}",
vals=10,
blob_size=blob_size,
)
)
tests.append(
{
"name": f"big-insert-blob-interleaved-blob-size-{blob_size}",
"vals": 100,
"blob_size": blob_size,
}
InsertTest(
name=f"big-insert-blob-interleaved-blob-size-{blob_size}",
vals=100,
blob_size=blob_size,
)
)
return tests
def test_sqlite_compat(db_fullpath: str, schema: str):
sqlite = TestLimboShell(
with TestLimboShell(
init_commands="",
exec_name="sqlite3",
flags=f"{db_fullpath}",
)
sqlite.run_test_fn(
".show",
lambda res: f"filename: {db_fullpath}" in res,
"Opened db file created with Limbo in sqlite3",
)
sqlite.run_test_fn(
".schema",
lambda res: schema in res,
"Tables created by previous Limbo test exist in db file",
)
# TODO when we can import external dependencies
# Have some pydantic object be passed to this function with common fields
) as sqlite:
sqlite.run_test_fn(
".show",
lambda res: f"filename: {db_fullpath}" in res,
"Opened db file created with Limbo in sqlite3",
)
sqlite.run_test_fn(
".schema",
lambda res: schema in res,
"Tables created by previous Limbo test exist in db file",
)
# TODO Have some pydantic object be passed to this function with common fields
# To extract the information necessary to query the db in sqlite
# The object should contain Schema information and queries that should be run to
# test in sqlite for compatibility sakes
# sqlite.run_test_fn(
# "SELECT count(*) FROM test;",
# lambda res: res == "50",
# "Tested large write to testfs",
# )
# sqlite.run_test_fn(
# "SELECT count(*) FROM vfs;",
# lambda res: res == "50",
# "Tested large write to testfs",
# )
sqlite.quit()
def touch_db_file(db_fullpath: str):
os.O_RDWR
descriptor = os.open(
path=db_fullpath,
flags=(
os.O_RDWR # access mode: read and write
| os.O_CREAT # create if not exists
| os.O_TRUNC # truncate the file to zero
),
mode=0o777,
)
f = open(descriptor)
f.close()
def cleanup(db_fullpath: str):
wal_path = f"{db_fullpath}-wal"
@@ -153,18 +154,15 @@ def cleanup(db_fullpath: str):
def main():
tests = blob_tests()
db_path = "testing/writes.db"
schema = "CREATE TABLE test (t1 BLOB, t2 INTEGER);"
# TODO see how to parallelize this loop with different subprocesses
for test in tests:
db_path = test.db_path
try:
# Use with syntax to automatically close shell on error
with TestLimboShell() as limbo:
limbo.execute_dot(f".open {db_path}")
stub_write_blob_test(limbo, **test)
print("Testing in SQLite\n")
test_sqlite_compat(db_path, schema)
print()
test.run(limbo)
test.test_compat()
except Exception as e:
print(f"Test FAILED: {e}")

View File

@@ -4,7 +4,10 @@ name = "limbo_test"
readme = "README.md"
requires-python = ">=3.13"
version = "0.1.0"
dependencies = ["faker>=37.1.0", "pydantic>=2.11.1", "rich>=14.0.0"]
dependencies = [
"faker>=37.1.0",
"pydantic>=2.11.1",
]
[project.scripts]
test-writes = "cli_tests.writes:main"

45
uv.lock generated
View File

@@ -41,35 +41,12 @@ source = { editable = "testing" }
dependencies = [
{ name = "faker" },
{ name = "pydantic" },
{ name = "rich" },
]
[package.metadata]
requires-dist = [
{ name = "faker", specifier = ">=37.1.0" },
{ name = "pydantic", specifier = ">=2.11.1" },
{ name = "rich", specifier = ">=14.0.0" },
]
[[package]]
name = "markdown-it-py"
version = "3.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "mdurl" },
]
sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 },
]
[[package]]
name = "mdurl"
version = "0.1.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 },
]
[[package]]
@@ -115,28 +92,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/8e/4f/3fb47d6cbc08c7e00f92300e64ba655428c05c56b8ab6723bd290bae6458/pydantic_core-2.33.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8a1d581e8cdbb857b0e0e81df98603376c1a5c34dc5e54039dcc00f043df81e7", size = 1931234 },
]
[[package]]
name = "pygments"
version = "2.19.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 },
]
[[package]]
name = "rich"
version = "14.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "markdown-it-py" },
{ name = "pygments" },
]
sdist = { url = "https://files.pythonhosted.org/packages/a1/53/830aa4c3066a8ab0ae9a9955976fb770fe9c6102117c8ec4ab3ea62d89e8/rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725", size = 224078 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0", size = 243229 },
]
[[package]]
name = "typing-extensions"
version = "4.13.0"