Closes#3892Closes#3888
Stuff like:
```sql
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values (1, 2);
select case (select y, z from t2) when 1 then 'one' else 'other' end from t1;
× Parse error: base expression in CASE must return 1 value
turso> create table t(x, y);
insert into t values (1, 2);
select (select x, y from t) as result;
× Parse error: result column must return 1 value, got 2
turso> create table t1(x,y);
create table t2(y);
insert into t1 values (1,1);
insert into t2 values (1);
select * from t2 where y = (select x,y from t1);
× Parse error: all arguments to binary operator = must return the same number of
│ values. Got: (1) = (2)
turso> create table orders(customer_id, amount);
create table thresholds(min_amount, max_amount);
insert into orders values (100, 50), (100, 150);
insert into thresholds values (100, 200);
select customer_id, sum(amount) as total
from orders
group by customer_id
having total > (select min_amount, max_amount from thresholds);
× Parse error: all arguments to binary operator > must return the same number of
│ values. Got: (1) > (2)
turso> create table items(id);
create table config(max_results, other_col);
insert into items values (1), (2), (3);
insert into config values (2, 3);
select * from items limit (select max_results, other_col from config);
× Parse error: limit expression must return 1 value, got 2
turso> create table items(id);
create table config(skip_count, other_col);
insert into items values (1), (2), (3);
insert into config values (1, 2);
select * from items limit 1 offset (select skip_count, other_col from config);
× Parse error: offset expression must return 1 value, got 2
turso> create table items(id, name);
create table sort_order(priority, other_col);
insert into items values (1, 'a'), (2, 'b');
insert into sort_order values (1, 2);
select * from items order by (select priority, other_col from sort_order);
× Parse error: order by expression must return 1 value, got 2
turso> create table sales(product_id, amount);
create table grouping(category, other_col);
insert into sales values (1, 100), (2, 200);
insert into grouping values (1, 2);
select sum(amount) from sales group by (select category, other_col from grouping);
× Parse error: group by expression must return 1 value, got 2
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values (1, 2);
select case when (select y, z from t2) then 'yes' else 'no' end from t1;
× Parse error: when expression in CASE must return 1 value. Got: (2)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values (1, 2);
select case when x = 1 then (select y, z from t2) else 0 end from t1;
× Parse error: then expression in CASE must return 1 value. Got: (2)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values (1, 2);
select case when x = 2 then 0 else (select y, z from t2) end from t1;
× Parse error: else expression in CASE must return 1 value. Got: (2)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values (1, 2);
select max((select y, z from t2)) from t1;
× Parse error: argument 0 to function call max must return 1 value. Got: (2)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values (1, 2);
select x + (select y, z from t2) from t1;
× Parse error: all arguments to binary operator + must return the same number of
│ values. Got: (1) + (2)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (5);
insert into t2 values (1, 2);
select * from t1 where x between (select y, z from t2) and 10;
× Parse error: all arguments to binary operator <= must return the same number of
│ values. Got: (2) <= (1)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values (1, 2);
select cast((select y, z from t2) as integer) from t1;
× Parse error: argument to CAST must return 1 value. Got: (2)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values ('a', 'b');
select (select y, z from t2) collate nocase from t1;
× Parse error: argument to COLLATE must return 1 value. Got: (2)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values (1, 2);
select * from t1 where (select y, z from t2) is null;
× Parse error: all arguments to binary operator IS must return the same number of
│ values. Got: (2) IS (1)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values (1, 2);
select * from t1 where (select y, z from t2) not null;
× Parse error: argument to NOT NULL must return 1 value. Got: (2)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values ('a', 'b');
select * from t1 where (select y, z from t2) like 'a%';
× Parse error: left operand of LIKE must return 1 value. Got: (2)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values (1, 2);
select -(select y, z from t2) from t1;
× Parse error: argument to unary operator - must return 1 value. Got: (2)
turso> create table t1(x);
create table t2(y, z);
insert into t1 values (1);
insert into t2 values (1, 2);
select abs((select y, z from t2)) from t1;
× Parse error: argument 0 to function call abs must return 1 value. Got: (2)
```
Closes#3906
closes https://github.com/tursodatabase/turso/issues/3773
## Before
```rust
#[derive(Debug, Clone)]
pub struct Column {
pub name: Option<String>,
pub ty: Type,
// many sqlite operations like table_info retain the original string
pub ty_str: String,
pub primary_key: bool,
pub is_rowid_alias: bool,
pub notnull: bool,
pub default: Option<Box<Expr>>,
pub unique: bool,
pub collation: Option<CollationSeq>,
pub hidden: bool,
}
```
obviously not ideal. so lets pack `type`, `hidden`, `primary_key`,
`is_rowid_alias`, `notnull` and `collation` into a u16.
## After:
```rust
#[derive(Debug, Clone)]
pub struct Column {
pub name: Option<String>,
pub ty_str: String,
pub default: Option<Box<Expr>>,
raw: u16,
}
```
Also saw a place to replace a `Mutex<Enum>` with `AtomicEnum`, so I
snuck that in here too
Reviewed-by: Jussi Saurio <jussi.saurio@gmail.com>
Closes#3905
This PR fixes sync engine bug which leads to the state of db going back
in time.
The mistake was made in the pull operation which before fetched
information about last commited changes to the remote separately. This
crates a problem since pull already works with fixed WAL updates
received earlier from remote - and this WAL update can be inconsistent
with more fresh value of last_change_id fetched from remote.
The fix is to use only WAL update and "extract" necessary information
from it. In order to do that sync now read meta sync table while pull
operation is in progress (at the moment when local changes are rolled
back and remote changes already applied) and do not use any external
source to consume that information.
Also, this PR fixes bug in the JS tursodatabase client and reset
statement in the finally block opposed to the previous approach to reset
statement at the beginning. The problem with previous approach were in
cases when client do not fully consumed the statement (e.g. abort
iteration and take only one row) in which case the statement will be
kept active and can prevent another write transaction from starting or
just occupy place as a read transaction.
Closes#3860
This PR reset cursor state in the `stmt.reset()` method because under
the hood statement caches some BTree state which can be no longer valid
at the moment of next statement run.
Reviewed-by: Preston Thorpe <preston@turso.tech>
Closes#3859
Follow-up to #3847.
Adds support for subqueries in all other positions of a SELECT (the
result list, GROUP BY, ORDER BY, HAVING, LIMIT, OFFSET).
Turns out I am a sql noob and didn't realize that correlated subqueries
are supported in basically all positions except LIMIT/OFFSET, so added
support for those too + accompanying TCL tests.
Thankfully the abstractions introduced in #3847 carry over to this very
well so the code change is relatively small (over half of the diff is
tests and a lot of the remaining diff is just moving logic around).
Reviewed-by: Preston Thorpe <preston@turso.tech>
Closes#3852
Closes#3853
Currently LIMIT 0 jumps to "after the main loop", and it is done before
ORDER BY and GROUP BY cursor have had a chance to be initialized, which
causes a panic.
Simplest fix for now is to delay the LIMIT initialization.
Reviewed-by: Preston Thorpe <preston@turso.tech>
Closes#3854
I discovered a flaw in our current translation that makes queries of type
HAVING foo IN (SELECT ...) not work properly - in these cases we need to
defer translation of the subquery until later.
I will fix this in a future PR because I suspect it's not trivial.
This PR completely integrate custom indices to the query planner.
In order to do that new `Cursor::IndexMethod` is introduced with few
correlated changes in the VM implementation:
1. Added special `IndexMethod{Create,Destroy,Query}` opcodes to handle
index method creation, deletion and query
2. `Next` , `IdxRowid` , `IdxInsert`, `IdxDelete` opcodes updated to
properly handle new cursor case
Reviewed-by: Jussi Saurio <jussi.saurio@gmail.com>
Closes#3827
Turso incorrectly creates the first table in an autovacuumed table in
page 2.
(Note: this is on collaboration with @LeMikaelF)
SQLite does not allow enabling or disabling auto-vacuum after the first
table has been created
(https://sqlite.org/pragma.html#pragma_auto_vacuum). This is because the
sequence of the pages in the databases is different when auto-vacuum is
enabled, because the first b-tree page must be page 3 instead of 2, to
make room for the first [Pointer Map
page](https://sqlite.org/fileformat.html#pointer_map_or_ptrmap_pages).
But Turso doesn't currently consider this, which can lead to data loss.
The simplest way to reproduce this is to create an autovacuumed
databases with either `pragma auto_vacuum=full` so that autovacuum runs
on each commit, and then create a table with some data. Turso will
incorrectly create the new table on page 2. After this, every time a new
page is created, either through a page split or because a new table is
created, Turso will write a 5-byte pointer in page 2, starting from the
top of the page, thereby overwriting existing data.
For example, let's start with a clean database and the first bytes of
page 2. It starts with `0d`, the discriminator for a leaf page
([source](https://www.sqlite.org/fileformat.html#b_tree_pages)). The
next interesting number is the number of cells contained in this page
(`01`) at offset 5.
```
$ cargo run -- /tmp/a.db
turso> create table t(a);
turso> insert into t values ('myvalue');
$ dbtotxt /tmp/a.db
| size 8192 pagesize 4096 filename a.db
| page 1 offset 0
# ...snip...
| page 2 offset 4096
| 0: 0d 00 00 00 01 0f f5 00 0f f5 00 00 00 00 00 00 ................
| 4080: 00 00 00 00 00 09 01 02 1b 6d 79 76 61 6c 75 65 .........myvalue
| end a.db
```
Pointer map pages are located every N pages, starting from page 2, and
contain a list of 5-byte pointers that represent the parent page of a
certain page. So whenever Turso or SQLite needs to add a page, it will
overwrite 5 bytes of page 2. This means that for data loss to occur, it
is sufficient to add a single page to the database, for example by
creating a table. Offset 5 will then be zeroed out:
```
$ cargo run -- /tmp/a.db
turso> create table t(a);
turso> insert into t values ('myvalue');
turso> pragma auto_vacuum=full;
turso> create table tt(a);
$ dbtotxt /tmp/a.db
| size 12288 pagesize 4096 filename a.db
| page 1 offset 0
# ...snip...
| page 2 offset 4096
| 0: 01 00 00 00 00 0f f5 00 0f f5 00 00 00 00 00 00 ................
| 4080: 00 00 00 00 00 09 01 02 1b 6d 79 76 61 6c 75 65 .........myvalue
```
Creating more tables, or adding more B-tree pages, will keep overwriting
the rest of the page, until the cells themselves are also overwritten.
## Reproducing the issue in the simulator
We have been unable to reproduce this exact corruption mode in the
simulator, but patching it shows many failure modes, all of which don't
occur with the unpatched simulator. The following seeds are failing. The
following seeds are showing the issue when the patched simulator is ran
against `main`:
- `11522841279124073062`, with "Assertion 'table inquisitive_graham_159
should contain all of its expected values' failed: table
inquisitive_graham_159 does not contain the expected values, the
simulator model has more rows than the database"
- `7057400018220918989`, `16028085350691325843`, `7721542713659053944`,
and `203017821863546118`, with "Failed to read ptrmap key=XXX"
- `12533694709304969540`, `18357088553315413457`, `3108945730906932377`,
with "Integrity Check Failed: Cell N in page 2 is out of range."
- `4757352625344646473`, with "dirty pages should be empty for read
txn"
- `7083498604824302257`, with "header_size: 6272, header_len_bytes: 2,
payload.len(): 13"
- `17881876827470741581`, with "ParseError("no such table:
focused_historians_416")"
- `2092231500503735693`, with "range end index 4789 out of range for
slice of length 4096"
- `7555257419378470845`, with malformed database schema
(imaginative_ontivero\u{1})"
- `12905270229511147245`, with "index out of bounds: the len is 4096 but
the index is 4096"
## Fixing the issue
- When DB is opened, we read the `auto_vacuum` state, instead of
assuming `auto_vacuum=none`.
- Don't allow auto_vacuum to be flipped on non-empty databases as if we
allow this it could cause overlap with existing bits.(ptrmap could
overwrite existing data)
- Modify integrity check to avoid reporting that page 2 is orphaned in
auto-vacuumed databases.
Fixes#3752Closes#3830
Currently LIMIT 0 jumps to "after the main loop", and it is done
before ORDER BY and GROUP BY cursor have had a chance to be initialized,
which causes a panic.
Simplest fix for now is to delay the LIMIT initialization.