Introduce cdk-sql-common

The primary purpose of this new crate is to have a common and shared codebase
for all SQL storage systems. It would force us to write standard SQL using best
practices for all databases.

This crate has been extracted from #878
This commit is contained in:
Cesar Rodas
2025-07-15 12:17:25 +02:00
parent 0b79121de4
commit 349c773406
78 changed files with 4507 additions and 3870 deletions

View File

@@ -0,0 +1,46 @@
use crate::database::DatabaseExecutor;
use crate::stmt::query;
/// Migrates the migration generated by `build.rs`
#[inline(always)]
pub async fn migrate<C: DatabaseExecutor>(
conn: &C,
db_prefix: &str,
migrations: &[(&str, &str)],
) -> Result<(), cdk_common::database::Error> {
query(
r#"
CREATE TABLE IF NOT EXISTS migrations (
name TEXT PRIMARY KEY,
applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
"#,
)?
.execute(conn)
.await?;
// Apply each migration if it hasnt been applied yet
for (name, sql) in migrations {
if let Some((prefix, _)) = name.split_once(['/', '\\']) {
if prefix != db_prefix {
continue;
}
}
let is_missing = query("SELECT name FROM migrations WHERE name = :name")?
.bind("name", name)
.pluck(conn)
.await?
.is_none();
if is_missing {
query(sql)?.batch(conn).await?;
query(r#"INSERT INTO migrations (name) VALUES (:name)"#)?
.bind("name", name)
.execute(conn)
.await?;
}
}
Ok(())
}

View File

@@ -0,0 +1,53 @@
//! Database traits definition
use std::fmt::Debug;
use cdk_common::database::Error;
use crate::stmt::{Column, Statement};
/// Database Executor
///
/// This trait defines the expectations of a database execution
#[async_trait::async_trait]
pub trait DatabaseExecutor: Debug + Sync + Send {
/// Database driver name
fn name() -> &'static str;
/// Executes a query and returns the affected rows
async fn execute(&self, statement: Statement) -> Result<usize, Error>;
/// Runs the query and returns the first row or None
async fn fetch_one(&self, statement: Statement) -> Result<Option<Vec<Column>>, Error>;
/// Runs the query and returns the first row or None
async fn fetch_all(&self, statement: Statement) -> Result<Vec<Vec<Column>>, Error>;
/// Fetches the first row and column from a query
async fn pluck(&self, statement: Statement) -> Result<Option<Column>, Error>;
/// Batch execution
async fn batch(&self, statement: Statement) -> Result<(), Error>;
}
/// Database transaction trait
#[async_trait::async_trait]
pub trait DatabaseTransaction<'a>: Debug + DatabaseExecutor + Send + Sync {
/// Consumes the current transaction committing the changes
async fn commit(self) -> Result<(), Error>;
/// Consumes the transaction rolling back all changes
async fn rollback(self) -> Result<(), Error>;
}
/// Database connector
#[async_trait::async_trait]
pub trait DatabaseConnector: Debug + DatabaseExecutor + Send + Sync {
/// Transaction type for this database connection
type Transaction<'a>: DatabaseTransaction<'a>
where
Self: 'a;
/// Begin a new transaction
async fn begin(&self) -> Result<Self::Transaction<'_>, Error>;
}

View File

@@ -0,0 +1,23 @@
//! SQLite storage backend for cdk
#![warn(missing_docs)]
#![warn(rustdoc::bare_urls)]
mod common;
pub mod database;
mod macros;
pub mod pool;
pub mod stmt;
pub mod value;
pub use cdk_common::database::ConversionError;
#[cfg(feature = "mint")]
pub mod mint;
#[cfg(feature = "wallet")]
pub mod wallet;
#[cfg(feature = "mint")]
pub use mint::SQLMintDatabase;
#[cfg(feature = "wallet")]
pub use wallet::SQLWalletDatabase;

View File

@@ -0,0 +1,189 @@
//! Collection of macros to generate code to digest data from a generic SQL databasex
/// Unpacks a vector of Column, and consumes it, parsing into individual variables, checking the
/// vector is big enough.
#[macro_export]
macro_rules! unpack_into {
(let ($($var:ident),+) = $array:expr) => {
let ($($var),+) = {
let mut vec = $array.to_vec();
vec.reverse();
let required = 0 $(+ {let _ = stringify!($var); 1})+;
if vec.len() < required {
Err($crate::ConversionError::MissingColumn(required, vec.len()))?;
}
Ok::<_, cdk_common::database::Error>((
$(
vec.pop().expect(&format!("Checked length already for {}", stringify!($var)))
),+
))?
};
};
}
/// Parses a SQL column as a string or NULL
#[macro_export]
macro_rules! column_as_nullable_string {
($col:expr, $callback_str:expr, $callback_bytes:expr) => {
(match $col {
$crate::stmt::Column::Text(text) => Ok(Some(text).and_then($callback_str)),
$crate::stmt::Column::Blob(bytes) => Ok(Some(bytes).and_then($callback_bytes)),
$crate::stmt::Column::Null => Ok(None),
_ => Err($crate::ConversionError::InvalidType(
"String".to_owned(),
stringify!($col).to_owned(),
)),
})?
};
($col:expr, $callback_str:expr) => {
(match $col {
$crate::stmt::Column::Text(text) => Ok(Some(text).and_then($callback_str)),
$crate::stmt::Column::Blob(bytes) => {
Ok(Some(String::from_utf8_lossy(&bytes)).and_then($callback_str))
}
$crate::stmt::Column::Null => Ok(None),
_ => Err($crate::ConversionError::InvalidType(
"String".to_owned(),
stringify!($col).to_owned(),
)),
})?
};
($col:expr) => {
(match $col {
$crate::stmt::Column::Text(text) => Ok(Some(text.to_owned())),
$crate::stmt::Column::Blob(bytes) => {
Ok(Some(String::from_utf8_lossy(&bytes).to_string()))
}
$crate::stmt::Column::Null => Ok(None),
_ => Err($crate::ConversionError::InvalidType(
"String".to_owned(),
stringify!($col).to_owned(),
)),
})?
};
}
/// Parses a column as a number or NULL
#[macro_export]
macro_rules! column_as_nullable_number {
($col:expr) => {
(match $col {
$crate::stmt::Column::Text(text) => Ok(Some(text.parse().map_err(|_| {
$crate::ConversionError::InvalidConversion(
stringify!($col).to_owned(),
"Number".to_owned(),
)
})?)),
$crate::stmt::Column::Integer(n) => Ok(Some(n.try_into().map_err(|_| {
$crate::ConversionError::InvalidConversion(
stringify!($col).to_owned(),
"Number".to_owned(),
)
})?)),
$crate::stmt::Column::Null => Ok(None),
_ => Err($crate::ConversionError::InvalidType(
"Number".to_owned(),
stringify!($col).to_owned(),
)),
})?
};
}
/// Parses a column as a number
#[macro_export]
macro_rules! column_as_number {
($col:expr) => {
(match $col {
$crate::stmt::Column::Text(text) => text.parse().map_err(|_| {
$crate::ConversionError::InvalidConversion(
stringify!($col).to_owned(),
"Number".to_owned(),
)
}),
$crate::stmt::Column::Integer(n) => n.try_into().map_err(|_| {
$crate::ConversionError::InvalidConversion(
stringify!($col).to_owned(),
"Number".to_owned(),
)
}),
_ => Err($crate::ConversionError::InvalidType(
"Number".to_owned(),
stringify!($col).to_owned(),
)),
})?
};
}
/// Parses a column as a NULL or Binary
#[macro_export]
macro_rules! column_as_nullable_binary {
($col:expr) => {
(match $col {
$crate::stmt::Column::Text(text) => Ok(Some(text.as_bytes().to_vec())),
$crate::stmt::Column::Blob(bytes) => Ok(Some(bytes.to_owned())),
$crate::stmt::Column::Null => Ok(None),
_ => Err($crate::ConversionError::InvalidType(
"String".to_owned(),
stringify!($col).to_owned(),
)),
})?
};
}
/// Parses a SQL column as a binary
#[macro_export]
macro_rules! column_as_binary {
($col:expr) => {
(match $col {
$crate::stmt::Column::Text(text) => Ok(text.as_bytes().to_vec()),
$crate::stmt::Column::Blob(bytes) => Ok(bytes.to_owned()),
_ => Err($crate::ConversionError::InvalidType(
"String".to_owned(),
stringify!($col).to_owned(),
)),
})?
};
}
/// Parses a SQL column as a string
#[macro_export]
macro_rules! column_as_string {
($col:expr, $callback_str:expr, $callback_bytes:expr) => {
(match $col {
$crate::stmt::Column::Text(text) => {
$callback_str(&text).map_err($crate::ConversionError::from)
}
$crate::stmt::Column::Blob(bytes) => {
$callback_bytes(&bytes).map_err($crate::ConversionError::from)
}
_ => Err($crate::ConversionError::InvalidType(
"String".to_owned(),
stringify!($col).to_owned(),
)),
})?
};
($col:expr, $callback:expr) => {
(match $col {
$crate::stmt::Column::Text(text) => {
$callback(&text).map_err($crate::ConversionError::from)
}
$crate::stmt::Column::Blob(bytes) => {
$callback(&String::from_utf8_lossy(&bytes)).map_err($crate::ConversionError::from)
}
_ => Err($crate::ConversionError::InvalidType(
"String".to_owned(),
stringify!($col).to_owned(),
)),
})?
};
($col:expr) => {
(match $col {
$crate::stmt::Column::Text(text) => Ok(text.to_owned()),
$crate::stmt::Column::Blob(bytes) => Ok(String::from_utf8_lossy(&bytes).to_string()),
_ => Err($crate::ConversionError::InvalidType(
"String".to_owned(),
stringify!($col).to_owned(),
)),
})?
};
}

View File

@@ -0,0 +1,5 @@
/// @generated
/// Auto-generated by build.rs
pub static MIGRATIONS: &[(&str, &str)] = &[
("sqlite/20250109143347_init.sql", include_str!(r#"./migrations/sqlite/20250109143347_init.sql"#)),
];

View File

@@ -0,0 +1,44 @@
CREATE TABLE IF NOT EXISTS proof (
y BLOB PRIMARY KEY,
keyset_id TEXT NOT NULL,
secret TEXT NOT NULL,
c BLOB NOT NULL,
state TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS state_index ON proof(state);
CREATE INDEX IF NOT EXISTS secret_index ON proof(secret);
-- Keysets Table
CREATE TABLE IF NOT EXISTS keyset (
id TEXT PRIMARY KEY,
unit TEXT NOT NULL,
active BOOL NOT NULL,
valid_from INTEGER NOT NULL,
valid_to INTEGER,
derivation_path TEXT NOT NULL,
max_order INTEGER NOT NULL,
derivation_path_index INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS unit_index ON keyset(unit);
CREATE INDEX IF NOT EXISTS active_index ON keyset(active);
CREATE TABLE IF NOT EXISTS blind_signature (
y BLOB PRIMARY KEY,
amount INTEGER NOT NULL,
keyset_id TEXT NOT NULL,
c BLOB NOT NULL
);
CREATE INDEX IF NOT EXISTS keyset_id_index ON blind_signature(keyset_id);
CREATE TABLE IF NOT EXISTS protected_endpoints (
endpoint TEXT PRIMARY KEY,
auth TEXT NOT NULL
);

View File

@@ -0,0 +1,411 @@
//! SQL Mint Auth
use std::collections::HashMap;
use std::marker::PhantomData;
use std::str::FromStr;
use async_trait::async_trait;
use cdk_common::database::{self, MintAuthDatabase, MintAuthTransaction};
use cdk_common::mint::MintKeySetInfo;
use cdk_common::nuts::{AuthProof, BlindSignature, Id, PublicKey, State};
use cdk_common::{AuthRequired, ProtectedEndpoint};
use migrations::MIGRATIONS;
use tracing::instrument;
use super::{sql_row_to_blind_signature, sql_row_to_keyset_info, SQLTransaction};
use crate::column_as_string;
use crate::common::migrate;
use crate::database::{DatabaseConnector, DatabaseTransaction};
use crate::mint::Error;
use crate::stmt::query;
/// Mint SQL Database
#[derive(Debug, Clone)]
pub struct SQLMintAuthDatabase<DB>
where
DB: DatabaseConnector,
{
db: DB,
}
impl<DB> SQLMintAuthDatabase<DB>
where
DB: DatabaseConnector,
{
/// Creates a new instance
pub async fn new<X>(db: X) -> Result<Self, Error>
where
X: Into<DB>,
{
let db = db.into();
Self::migrate(&db).await?;
Ok(Self { db })
}
/// Migrate
async fn migrate(conn: &DB) -> Result<(), Error> {
let tx = conn.begin().await?;
migrate(&tx, DB::name(), MIGRATIONS).await?;
tx.commit().await?;
Ok(())
}
}
#[rustfmt::skip]
mod migrations;
#[async_trait]
impl<'a, T> MintAuthTransaction<database::Error> for SQLTransaction<'a, T>
where
T: DatabaseTransaction<'a>,
{
#[instrument(skip(self))]
async fn set_active_keyset(&mut self, id: Id) -> Result<(), database::Error> {
tracing::info!("Setting auth keyset {id} active");
query(
r#"
UPDATE keyset
SET active = CASE
WHEN id = :id THEN TRUE
ELSE FALSE
END;
"#,
)?
.bind("id", id.to_string())
.execute(&self.inner)
.await?;
Ok(())
}
async fn add_keyset_info(&mut self, keyset: MintKeySetInfo) -> Result<(), database::Error> {
query(
r#"
INSERT INTO
keyset (
id, unit, active, valid_from, valid_to, derivation_path,
max_order, derivation_path_index
)
VALUES (
:id, :unit, :active, :valid_from, :valid_to, :derivation_path,
:max_order, :derivation_path_index
)
ON CONFLICT(id) DO UPDATE SET
unit = excluded.unit,
active = excluded.active,
valid_from = excluded.valid_from,
valid_to = excluded.valid_to,
derivation_path = excluded.derivation_path,
max_order = excluded.max_order,
derivation_path_index = excluded.derivation_path_index
"#,
)?
.bind("id", keyset.id.to_string())
.bind("unit", keyset.unit.to_string())
.bind("active", keyset.active)
.bind("valid_from", keyset.valid_from as i64)
.bind("valid_to", keyset.final_expiry.map(|v| v as i64))
.bind("derivation_path", keyset.derivation_path.to_string())
.bind("max_order", keyset.max_order)
.bind("derivation_path_index", keyset.derivation_path_index)
.execute(&self.inner)
.await?;
Ok(())
}
async fn add_proof(&mut self, proof: AuthProof) -> Result<(), database::Error> {
if let Err(err) = query(
r#"
INSERT INTO proof
(y, keyset_id, secret, c, state)
VALUES
(:y, :keyset_id, :secret, :c, :state)
"#,
)?
.bind("y", proof.y()?.to_bytes().to_vec())
.bind("keyset_id", proof.keyset_id.to_string())
.bind("secret", proof.secret.to_string())
.bind("c", proof.c.to_bytes().to_vec())
.bind("state", "UNSPENT".to_string())
.execute(&self.inner)
.await
{
tracing::debug!("Attempting to add known proof. Skipping.... {:?}", err);
}
Ok(())
}
async fn update_proof_state(
&mut self,
y: &PublicKey,
proofs_state: State,
) -> Result<Option<State>, Self::Err> {
let current_state = query(r#"SELECT state FROM proof WHERE y = :y"#)?
.bind("y", y.to_bytes().to_vec())
.pluck(&self.inner)
.await?
.map(|state| Ok::<_, Error>(column_as_string!(state, State::from_str)))
.transpose()?;
query(r#"UPDATE proof SET state = :new_state WHERE state = :state AND y = :y"#)?
.bind("y", y.to_bytes().to_vec())
.bind(
"state",
current_state.as_ref().map(|state| state.to_string()),
)
.bind("new_state", proofs_state.to_string())
.execute(&self.inner)
.await?;
Ok(current_state)
}
async fn add_blind_signatures(
&mut self,
blinded_messages: &[PublicKey],
blind_signatures: &[BlindSignature],
) -> Result<(), database::Error> {
for (message, signature) in blinded_messages.iter().zip(blind_signatures) {
query(
r#"
INSERT
INTO blind_signature
(y, amount, keyset_id, c)
VALUES
(:y, :amount, :keyset_id, :c)
"#,
)?
.bind("y", message.to_bytes().to_vec())
.bind("amount", u64::from(signature.amount) as i64)
.bind("keyset_id", signature.keyset_id.to_string())
.bind("c", signature.c.to_bytes().to_vec())
.execute(&self.inner)
.await?;
}
Ok(())
}
async fn add_protected_endpoints(
&mut self,
protected_endpoints: HashMap<ProtectedEndpoint, AuthRequired>,
) -> Result<(), database::Error> {
for (endpoint, auth) in protected_endpoints.iter() {
if let Err(err) = query(
r#"
INSERT OR REPLACE INTO protected_endpoints
(endpoint, auth)
VALUES (:endpoint, :auth);
"#,
)?
.bind("endpoint", serde_json::to_string(endpoint)?)
.bind("auth", serde_json::to_string(auth)?)
.execute(&self.inner)
.await
{
tracing::debug!(
"Attempting to add protected endpoint. Skipping.... {:?}",
err
);
}
}
Ok(())
}
async fn remove_protected_endpoints(
&mut self,
protected_endpoints: Vec<ProtectedEndpoint>,
) -> Result<(), database::Error> {
query(r#"DELETE FROM protected_endpoints WHERE endpoint IN (:endpoints)"#)?
.bind_vec(
"endpoints",
protected_endpoints
.iter()
.map(serde_json::to_string)
.collect::<Result<_, _>>()?,
)
.execute(&self.inner)
.await?;
Ok(())
}
}
#[async_trait]
impl<DB> MintAuthDatabase for SQLMintAuthDatabase<DB>
where
DB: DatabaseConnector,
{
type Err = database::Error;
async fn begin_transaction<'a>(
&'a self,
) -> Result<Box<dyn MintAuthTransaction<database::Error> + Send + Sync + 'a>, database::Error>
{
Ok(Box::new(SQLTransaction {
inner: self.db.begin().await?,
_phantom: PhantomData,
}))
}
async fn get_active_keyset_id(&self) -> Result<Option<Id>, Self::Err> {
Ok(query(
r#"
SELECT
id
FROM
keyset
WHERE
active = 1;
"#,
)?
.pluck(&self.db)
.await?
.map(|id| Ok::<_, Error>(column_as_string!(id, Id::from_str, Id::from_bytes)))
.transpose()?)
}
async fn get_keyset_info(&self, id: &Id) -> Result<Option<MintKeySetInfo>, Self::Err> {
Ok(query(
r#"SELECT
id,
unit,
active,
valid_from,
valid_to,
derivation_path,
derivation_path_index,
max_order,
input_fee_ppk
FROM
keyset
WHERE id=:id"#,
)?
.bind("id", id.to_string())
.fetch_one(&self.db)
.await?
.map(sql_row_to_keyset_info)
.transpose()?)
}
async fn get_keyset_infos(&self) -> Result<Vec<MintKeySetInfo>, Self::Err> {
Ok(query(
r#"SELECT
id,
unit,
active,
valid_from,
valid_to,
derivation_path,
derivation_path_index,
max_order,
input_fee_ppk
FROM
keyset
WHERE id=:id"#,
)?
.fetch_all(&self.db)
.await?
.into_iter()
.map(sql_row_to_keyset_info)
.collect::<Result<Vec<_>, _>>()?)
}
async fn get_proofs_states(&self, ys: &[PublicKey]) -> Result<Vec<Option<State>>, Self::Err> {
let mut current_states = query(r#"SELECT y, state FROM proof WHERE y IN (:ys)"#)?
.bind_vec("ys", ys.iter().map(|y| y.to_bytes().to_vec()).collect())
.fetch_all(&self.db)
.await?
.into_iter()
.map(|row| {
Ok((
column_as_string!(&row[0], PublicKey::from_hex, PublicKey::from_slice),
column_as_string!(&row[1], State::from_str),
))
})
.collect::<Result<HashMap<_, _>, Error>>()?;
Ok(ys.iter().map(|y| current_states.remove(y)).collect())
}
async fn get_blind_signatures(
&self,
blinded_messages: &[PublicKey],
) -> Result<Vec<Option<BlindSignature>>, Self::Err> {
let mut blinded_signatures = query(
r#"SELECT
keyset_id,
amount,
c,
dleq_e,
dleq_s,
y
FROM
blind_signature
WHERE y IN (:y)
"#,
)?
.bind_vec(
"y",
blinded_messages
.iter()
.map(|y| y.to_bytes().to_vec())
.collect(),
)
.fetch_all(&self.db)
.await?
.into_iter()
.map(|mut row| {
Ok((
column_as_string!(
&row.pop().ok_or(Error::InvalidDbResponse)?,
PublicKey::from_hex,
PublicKey::from_slice
),
sql_row_to_blind_signature(row)?,
))
})
.collect::<Result<HashMap<_, _>, Error>>()?;
Ok(blinded_messages
.iter()
.map(|y| blinded_signatures.remove(y))
.collect())
}
async fn get_auth_for_endpoint(
&self,
protected_endpoint: ProtectedEndpoint,
) -> Result<Option<AuthRequired>, Self::Err> {
Ok(
query(r#"SELECT auth FROM protected_endpoints WHERE endpoint = :endpoint"#)?
.bind("endpoint", serde_json::to_string(&protected_endpoint)?)
.pluck(&self.db)
.await?
.map(|auth| {
Ok::<_, Error>(column_as_string!(
auth,
serde_json::from_str,
serde_json::from_slice
))
})
.transpose()?,
)
}
async fn get_auth_for_endpoints(
&self,
) -> Result<HashMap<ProtectedEndpoint, Option<AuthRequired>>, Self::Err> {
Ok(query(r#"SELECT endpoint, auth FROM protected_endpoints"#)?
.fetch_all(&self.db)
.await?
.into_iter()
.map(|row| {
let endpoint =
column_as_string!(&row[0], serde_json::from_str, serde_json::from_slice);
let auth = column_as_string!(&row[1], serde_json::from_str, serde_json::from_slice);
Ok((endpoint, Some(auth)))
})
.collect::<Result<HashMap<_, _>, Error>>()?)
}
}

View File

@@ -0,0 +1,26 @@
/// @generated
/// Auto-generated by build.rs
pub static MIGRATIONS: &[(&str, &str)] = &[
("sqlite/1_fix_sqlx_migration.sql", include_str!(r#"./migrations/sqlite/1_fix_sqlx_migration.sql"#)),
("sqlite/20240612124932_init.sql", include_str!(r#"./migrations/sqlite/20240612124932_init.sql"#)),
("sqlite/20240618195700_quote_state.sql", include_str!(r#"./migrations/sqlite/20240618195700_quote_state.sql"#)),
("sqlite/20240626092101_nut04_state.sql", include_str!(r#"./migrations/sqlite/20240626092101_nut04_state.sql"#)),
("sqlite/20240703122347_request_lookup_id.sql", include_str!(r#"./migrations/sqlite/20240703122347_request_lookup_id.sql"#)),
("sqlite/20240710145043_input_fee.sql", include_str!(r#"./migrations/sqlite/20240710145043_input_fee.sql"#)),
("sqlite/20240711183109_derivation_path_index.sql", include_str!(r#"./migrations/sqlite/20240711183109_derivation_path_index.sql"#)),
("sqlite/20240718203721_allow_unspent.sql", include_str!(r#"./migrations/sqlite/20240718203721_allow_unspent.sql"#)),
("sqlite/20240811031111_update_mint_url.sql", include_str!(r#"./migrations/sqlite/20240811031111_update_mint_url.sql"#)),
("sqlite/20240919103407_proofs_quote_id.sql", include_str!(r#"./migrations/sqlite/20240919103407_proofs_quote_id.sql"#)),
("sqlite/20240923153640_melt_requests.sql", include_str!(r#"./migrations/sqlite/20240923153640_melt_requests.sql"#)),
("sqlite/20240930101140_dleq_for_sigs.sql", include_str!(r#"./migrations/sqlite/20240930101140_dleq_for_sigs.sql"#)),
("sqlite/20241108093102_mint_mint_quote_pubkey.sql", include_str!(r#"./migrations/sqlite/20241108093102_mint_mint_quote_pubkey.sql"#)),
("sqlite/20250103201327_amount_to_pay_msats.sql", include_str!(r#"./migrations/sqlite/20250103201327_amount_to_pay_msats.sql"#)),
("sqlite/20250129200912_remove_mint_url.sql", include_str!(r#"./migrations/sqlite/20250129200912_remove_mint_url.sql"#)),
("sqlite/20250129230326_add_config_table.sql", include_str!(r#"./migrations/sqlite/20250129230326_add_config_table.sql"#)),
("sqlite/20250307213652_keyset_id_as_foreign_key.sql", include_str!(r#"./migrations/sqlite/20250307213652_keyset_id_as_foreign_key.sql"#)),
("sqlite/20250406091754_mint_time_of_quotes.sql", include_str!(r#"./migrations/sqlite/20250406091754_mint_time_of_quotes.sql"#)),
("sqlite/20250406093755_mint_created_time_signature.sql", include_str!(r#"./migrations/sqlite/20250406093755_mint_created_time_signature.sql"#)),
("sqlite/20250415093121_drop_keystore_foreign.sql", include_str!(r#"./migrations/sqlite/20250415093121_drop_keystore_foreign.sql"#)),
("sqlite/20250626120251_rename_blind_message_y_to_b.sql", include_str!(r#"./migrations/sqlite/20250626120251_rename_blind_message_y_to_b.sql"#)),
("sqlite/20250706101057_bolt12.sql", include_str!(r#"./migrations/sqlite/20250706101057_bolt12.sql"#)),
];

View File

@@ -0,0 +1,20 @@
-- Migrate `_sqlx_migrations` to our new migration system
CREATE TABLE IF NOT EXISTS _sqlx_migrations AS
SELECT
'' AS version,
'' AS description,
0 AS execution_time
WHERE 0;
INSERT INTO migrations
SELECT
version || '_' || REPLACE(description, ' ', '_') || '.sql',
execution_time
FROM _sqlx_migrations
WHERE EXISTS (
SELECT 1
FROM sqlite_master
WHERE type = 'table' AND name = '_sqlx_migrations'
);
DROP TABLE _sqlx_migrations;

View File

@@ -0,0 +1,68 @@
-- Proof Table
CREATE TABLE IF NOT EXISTS proof (
y BLOB PRIMARY KEY,
amount INTEGER NOT NULL,
keyset_id TEXT NOT NULL,
secret TEXT NOT NULL,
c BLOB NOT NULL,
witness TEXT,
state TEXT CHECK ( state IN ('SPENT', 'PENDING' ) ) NOT NULL
);
CREATE INDEX IF NOT EXISTS state_index ON proof(state);
CREATE INDEX IF NOT EXISTS secret_index ON proof(secret);
-- Keysets Table
CREATE TABLE IF NOT EXISTS keyset (
id TEXT PRIMARY KEY,
unit TEXT NOT NULL,
active BOOL NOT NULL,
valid_from INTEGER NOT NULL,
valid_to INTEGER,
derivation_path TEXT NOT NULL,
max_order INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS unit_index ON keyset(unit);
CREATE INDEX IF NOT EXISTS active_index ON keyset(active);
CREATE TABLE IF NOT EXISTS mint_quote (
id TEXT PRIMARY KEY,
mint_url TEXT NOT NULL,
amount INTEGER NOT NULL,
unit TEXT NOT NULL,
request TEXT NOT NULL,
paid BOOL NOT NULL DEFAULT FALSE,
expiry INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS paid_index ON mint_quote(paid);
CREATE INDEX IF NOT EXISTS request_index ON mint_quote(request);
CREATE INDEX IF NOT EXISTS expiry_index ON mint_quote(expiry);
CREATE TABLE IF NOT EXISTS melt_quote (
id TEXT PRIMARY KEY,
unit TEXT NOT NULL,
amount INTEGER NOT NULL,
request TEXT NOT NULL,
fee_reserve INTEGER NOT NULL,
paid BOOL NOT NULL DEFAULT FALSE,
expiry INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS paid_index ON melt_quote(paid);
CREATE INDEX IF NOT EXISTS request_index ON melt_quote(request);
CREATE INDEX IF NOT EXISTS expiry_index ON melt_quote(expiry);
CREATE TABLE IF NOT EXISTS blind_signature (
y BLOB PRIMARY KEY,
amount INTEGER NOT NULL,
keyset_id TEXT NOT NULL,
c BLOB NOT NULL
);
CREATE INDEX IF NOT EXISTS keyset_id_index ON blind_signature(keyset_id);

View File

@@ -0,0 +1,5 @@
ALTER TABLE melt_quote ADD state TEXT CHECK ( state IN ('UNPAID', 'PENDING', 'PAID' ) ) NOT NULL DEFAULT 'UNPAID';
ALTER TABLE melt_quote ADD payment_preimage TEXT;
ALTER TABLE melt_quote DROP COLUMN paid;
CREATE INDEX IF NOT EXISTS melt_quote_state_index ON melt_quote(state);
DROP INDEX IF EXISTS paid_index;

View File

@@ -0,0 +1,3 @@
ALTER TABLE mint_quote ADD state TEXT CHECK ( state IN ('UNPAID', 'PENDING', 'PAID', 'ISSUED' ) ) NOT NULL DEFAULT 'UNPAID';
ALTER TABLE mint_quote DROP COLUMN paid;
CREATE INDEX IF NOT EXISTS mint_quote_state_index ON mint_quote(state);

View File

@@ -0,0 +1,5 @@
ALTER TABLE mint_quote ADD COLUMN request_lookup_id TEXT;
ALTER TABLE melt_quote ADD COLUMN request_lookup_id TEXT;
CREATE UNIQUE INDEX unique_request_lookup_id_mint ON mint_quote(request_lookup_id);
CREATE UNIQUE INDEX unique_request_lookup_id_melt ON melt_quote(request_lookup_id);

View File

@@ -0,0 +1 @@
ALTER TABLE keyset ADD input_fee_ppk INTEGER;

View File

@@ -0,0 +1 @@
ALTER TABLE keyset ADD derivation_path_index INTEGER;

View File

@@ -0,0 +1,21 @@
-- Create a new table with the updated CHECK constraint
CREATE TABLE proof_new (
y BLOB PRIMARY KEY,
amount INTEGER NOT NULL,
keyset_id TEXT NOT NULL,
secret TEXT NOT NULL,
c BLOB NOT NULL,
witness TEXT,
state TEXT CHECK (state IN ('SPENT', 'PENDING', 'UNSPENT')) NOT NULL
);
-- Copy the data from the old table to the new table
INSERT INTO proof_new (y, amount, keyset_id, secret, c, witness, state)
SELECT y, amount, keyset_id, secret, c, witness, state
FROM proof;
-- Drop the old table
DROP TABLE proof;
-- Rename the new table to the original table name
ALTER TABLE proof_new RENAME TO proof;

View File

@@ -0,0 +1 @@
UPDATE `mint_quote` SET `mint_url` = RTRIM(`mint_url`, '/');

View File

@@ -0,0 +1,2 @@
ALTER TABLE proof ADD COLUMN quote_id TEXT;
ALTER TABLE blind_signature ADD COLUMN quote_id TEXT;

View File

@@ -0,0 +1,8 @@
-- Melt Request Table
CREATE TABLE IF NOT EXISTS melt_request (
id TEXT PRIMARY KEY,
inputs TEXT NOT NULL,
outputs TEXT,
method TEXT NOT NULL,
unit TEXT NOT NULL
);

View File

@@ -0,0 +1,2 @@
ALTER TABLE blind_signature ADD COLUMN dleq_e TEXT;
ALTER TABLE blind_signature ADD COLUMN dleq_s TEXT;

View File

@@ -0,0 +1 @@
ALTER TABLE mint_quote ADD pubkey TEXT;

View File

@@ -0,0 +1 @@
ALTER TABLE melt_quote ADD COLUMN msat_to_pay INTEGER;

View File

@@ -0,0 +1 @@
ALTER TABLE mint_quote DROP COLUMN mint_url;

View File

@@ -0,0 +1,4 @@
CREATE TABLE IF NOT EXISTS config (
id TEXT PRIMARY KEY,
value TEXT NOT NULL
);

View File

@@ -0,0 +1,51 @@
-- Add foreign key constraints for keyset_id in SQLite
-- SQL requires recreating tables to add foreign keys
-- First, ensure we have the right schema information
PRAGMA foreign_keys = OFF;
-- Create new proof table with foreign key constraint
CREATE TABLE proof_new (
y BLOB PRIMARY KEY,
amount INTEGER NOT NULL,
keyset_id TEXT NOT NULL REFERENCES keyset(id),
secret TEXT NOT NULL,
c BLOB NOT NULL,
witness TEXT,
state TEXT CHECK (state IN ('SPENT', 'PENDING', 'UNSPENT', 'RESERVED', 'UNKNOWN')) NOT NULL,
quote_id TEXT
);
-- Copy data from old proof table to new one
INSERT INTO proof_new SELECT * FROM proof;
-- Create new blind_signature table with foreign key constraint
CREATE TABLE blind_signature_new (
y BLOB PRIMARY KEY,
amount INTEGER NOT NULL,
keyset_id TEXT NOT NULL REFERENCES keyset(id),
c BLOB NOT NULL,
dleq_e TEXT,
dleq_s TEXT,
quote_id TEXT
);
-- Copy data from old blind_signature table to new one
INSERT INTO blind_signature_new SELECT * FROM blind_signature;
-- Drop old tables
DROP TABLE IF EXISTS proof;
DROP TABLE IF EXISTS blind_signature;
-- Rename new tables to original names
ALTER TABLE proof_new RENAME TO proof;
ALTER TABLE blind_signature_new RENAME TO blind_signature;
-- Recreate all indexes
CREATE INDEX IF NOT EXISTS proof_keyset_id_index ON proof(keyset_id);
CREATE INDEX IF NOT EXISTS state_index ON proof(state);
CREATE INDEX IF NOT EXISTS secret_index ON proof(secret);
CREATE INDEX IF NOT EXISTS blind_signature_keyset_id_index ON blind_signature(keyset_id);
-- Re-enable foreign keys
PRAGMA foreign_keys = ON;

View File

@@ -0,0 +1,8 @@
-- Add timestamp columns to mint_quote table
ALTER TABLE mint_quote ADD COLUMN created_time INTEGER NOT NULL DEFAULT 0;
ALTER TABLE mint_quote ADD COLUMN paid_time INTEGER;
ALTER TABLE mint_quote ADD COLUMN issued_time INTEGER;
-- Add timestamp columns to melt_quote table
ALTER TABLE melt_quote ADD COLUMN created_time INTEGER NOT NULL DEFAULT 0;
ALTER TABLE melt_quote ADD COLUMN paid_time INTEGER;

View File

@@ -0,0 +1,4 @@
-- Add created_time column to blind_signature table
ALTER TABLE blind_signature ADD COLUMN created_time INTEGER NOT NULL DEFAULT 0;
-- Add created_time column to proof table
ALTER TABLE proof ADD COLUMN created_time INTEGER NOT NULL DEFAULT 0;

View File

@@ -0,0 +1,31 @@
CREATE TABLE proof_new (
y BLOB PRIMARY KEY,
amount INTEGER NOT NULL,
keyset_id TEXT NOT NULL, -- no FK constraint here
secret TEXT NOT NULL,
c BLOB NOT NULL,
witness TEXT,
state TEXT CHECK (state IN ('SPENT', 'PENDING', 'UNSPENT', 'RESERVED', 'UNKNOWN')) NOT NULL,
quote_id TEXT,
created_time INTEGER NOT NULL DEFAULT 0
);
INSERT INTO proof_new SELECT * FROM proof;
DROP TABLE proof;
ALTER TABLE proof_new RENAME TO proof;
CREATE TABLE blind_signature_new (
y BLOB PRIMARY KEY,
amount INTEGER NOT NULL,
keyset_id TEXT NOT NULL, -- FK removed
c BLOB NOT NULL,
dleq_e TEXT,
dleq_s TEXT,
quote_id TEXT,
created_time INTEGER NOT NULL DEFAULT 0
);
INSERT INTO blind_signature_new SELECT * FROM blind_signature;
DROP TABLE blind_signature;
ALTER TABLE blind_signature_new RENAME TO blind_signature;

View File

@@ -0,0 +1,2 @@
-- Rename column y to b
ALTER TABLE blind_signature RENAME COLUMN y TO blinded_message;

View File

@@ -0,0 +1,81 @@
-- Add new columns to mint_quote table
ALTER TABLE mint_quote ADD COLUMN amount_paid INTEGER NOT NULL DEFAULT 0;
ALTER TABLE mint_quote ADD COLUMN amount_issued INTEGER NOT NULL DEFAULT 0;
ALTER TABLE mint_quote ADD COLUMN payment_method TEXT NOT NULL DEFAULT 'BOLT11';
ALTER TABLE mint_quote DROP COLUMN issued_time;
ALTER TABLE mint_quote DROP COLUMN paid_time;
-- Set amount_paid equal to amount for quotes with PAID or ISSUED state
UPDATE mint_quote SET amount_paid = amount WHERE state = 'PAID' OR state = 'ISSUED';
-- Set amount_issued equal to amount for quotes with ISSUED state
UPDATE mint_quote SET amount_issued = amount WHERE state = 'ISSUED';
DROP INDEX IF EXISTS mint_quote_state_index;
-- Remove the state column from mint_quote table
ALTER TABLE mint_quote DROP COLUMN state;
-- Remove NOT NULL constraint from amount column
CREATE TABLE mint_quote_temp (
id TEXT PRIMARY KEY,
amount INTEGER,
unit TEXT NOT NULL,
request TEXT NOT NULL,
expiry INTEGER NOT NULL,
request_lookup_id TEXT UNIQUE,
pubkey TEXT,
created_time INTEGER NOT NULL DEFAULT 0,
amount_paid INTEGER NOT NULL DEFAULT 0,
amount_issued INTEGER NOT NULL DEFAULT 0,
payment_method TEXT NOT NULL DEFAULT 'BOLT11'
);
INSERT INTO mint_quote_temp (id, amount, unit, request, expiry, request_lookup_id, pubkey, created_time, amount_paid, amount_issued, payment_method)
SELECT id, amount, unit, request, expiry, request_lookup_id, pubkey, created_time, amount_paid, amount_issued, payment_method
FROM mint_quote;
DROP TABLE mint_quote;
ALTER TABLE mint_quote_temp RENAME TO mint_quote;
ALTER TABLE mint_quote ADD COLUMN request_lookup_id_kind TEXT NOT NULL DEFAULT 'payment_hash';
CREATE INDEX IF NOT EXISTS idx_mint_quote_created_time ON mint_quote(created_time);
CREATE INDEX IF NOT EXISTS idx_mint_quote_expiry ON mint_quote(expiry);
CREATE INDEX IF NOT EXISTS idx_mint_quote_request_lookup_id ON mint_quote(request_lookup_id);
CREATE INDEX IF NOT EXISTS idx_mint_quote_request_lookup_id_and_kind ON mint_quote(request_lookup_id, request_lookup_id_kind);
-- Create mint_quote_payments table
CREATE TABLE mint_quote_payments (
id INTEGER PRIMARY KEY AUTOINCREMENT,
quote_id TEXT NOT NULL,
payment_id TEXT NOT NULL UNIQUE,
timestamp INTEGER NOT NULL,
amount INTEGER NOT NULL,
FOREIGN KEY (quote_id) REFERENCES mint_quote(id)
);
-- Create index on payment_id for faster lookups
CREATE INDEX idx_mint_quote_payments_payment_id ON mint_quote_payments(payment_id);
CREATE INDEX idx_mint_quote_payments_quote_id ON mint_quote_payments(quote_id);
-- Create mint_quote_issued table
CREATE TABLE mint_quote_issued (
id INTEGER PRIMARY KEY AUTOINCREMENT,
quote_id TEXT NOT NULL,
amount INTEGER NOT NULL,
timestamp INTEGER NOT NULL,
FOREIGN KEY (quote_id) REFERENCES mint_quote(id)
);
-- Create index on quote_id for faster lookups
CREATE INDEX idx_mint_quote_issued_quote_id ON mint_quote_issued(quote_id);
-- Add new columns to melt_quote table
ALTER TABLE melt_quote ADD COLUMN payment_method TEXT NOT NULL DEFAULT 'bolt11';
ALTER TABLE melt_quote ADD COLUMN options TEXT;
ALTER TABLE melt_quote ADD COLUMN request_lookup_id_kind TEXT NOT NULL DEFAULT 'payment_hash';
CREATE INDEX IF NOT EXISTS idx_melt_quote_request_lookup_id_and_kind ON mint_quote(request_lookup_id, request_lookup_id_kind);
ALTER TABLE melt_quote DROP COLUMN msat_to_pay;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,206 @@
//! Very simple connection pool, to avoid an external dependency on r2d2 and other crates. If this
//! endup work it can be re-used in other parts of the project and may be promoted to its own
//! generic crate
use std::fmt::Debug;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::{Arc, Condvar, Mutex};
use std::time::Duration;
/// Pool error
#[derive(thiserror::Error, Debug)]
pub enum Error<E> {
/// Mutex Poison Error
#[error("Internal: PoisonError")]
Poison,
/// Timeout error
#[error("Timed out waiting for a resource")]
Timeout,
/// Internal database error
#[error(transparent)]
Resource(#[from] E),
}
/// Trait to manage resources
pub trait ResourceManager: Debug {
/// The resource to be pooled
type Resource: Debug;
/// The configuration that is needed in order to create the resource
type Config: Clone + Debug;
/// The error the resource may return when creating a new instance
type Error: Debug;
/// Creates a new resource with a given config
fn new_resource(
config: &Self::Config,
still_valid: Arc<AtomicBool>,
timeout: Duration,
) -> Result<Self::Resource, Error<Self::Error>>;
/// The object is dropped
fn drop(_resource: Self::Resource) {}
}
/// Generic connection pool of resources R
#[derive(Debug)]
pub struct Pool<RM>
where
RM: ResourceManager,
{
config: RM::Config,
queue: Mutex<Vec<(Arc<AtomicBool>, RM::Resource)>>,
in_use: AtomicUsize,
max_size: usize,
default_timeout: Duration,
waiter: Condvar,
}
/// The pooled resource
pub struct PooledResource<RM>
where
RM: ResourceManager,
{
resource: Option<(Arc<AtomicBool>, RM::Resource)>,
pool: Arc<Pool<RM>>,
}
impl<RM> Drop for PooledResource<RM>
where
RM: ResourceManager,
{
fn drop(&mut self) {
if let Some(resource) = self.resource.take() {
let mut active_resource = self.pool.queue.lock().expect("active_resource");
active_resource.push(resource);
self.pool.in_use.fetch_sub(1, Ordering::AcqRel);
// Notify a waiting thread
self.pool.waiter.notify_one();
}
}
}
impl<RM> Deref for PooledResource<RM>
where
RM: ResourceManager,
{
type Target = RM::Resource;
fn deref(&self) -> &Self::Target {
&self.resource.as_ref().expect("resource already dropped").1
}
}
impl<RM> DerefMut for PooledResource<RM>
where
RM: ResourceManager,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.resource.as_mut().expect("resource already dropped").1
}
}
impl<RM> Pool<RM>
where
RM: ResourceManager,
{
/// Creates a new pool
pub fn new(config: RM::Config, max_size: usize, default_timeout: Duration) -> Arc<Self> {
Arc::new(Self {
config,
queue: Default::default(),
in_use: Default::default(),
waiter: Default::default(),
default_timeout,
max_size,
})
}
/// Similar to get_timeout but uses the default timeout value.
#[inline(always)]
pub fn get(self: &Arc<Self>) -> Result<PooledResource<RM>, Error<RM::Error>> {
self.get_timeout(self.default_timeout)
}
/// Get a new resource or fail after timeout is reached.
///
/// This function will return a free resource or create a new one if there is still room for it;
/// otherwise, it will wait for a resource to be released for reuse.
#[inline(always)]
pub fn get_timeout(
self: &Arc<Self>,
timeout: Duration,
) -> Result<PooledResource<RM>, Error<RM::Error>> {
let mut resources = self.queue.lock().map_err(|_| Error::Poison)?;
loop {
if let Some(resource) = resources.pop() {
if resource.0.load(Ordering::SeqCst) {
drop(resources);
self.in_use.fetch_add(1, Ordering::AcqRel);
return Ok(PooledResource {
resource: Some(resource),
pool: self.clone(),
});
}
}
if self.in_use.load(Ordering::Relaxed) < self.max_size {
drop(resources);
self.in_use.fetch_add(1, Ordering::AcqRel);
let still_valid: Arc<AtomicBool> = Arc::new(true.into());
return Ok(PooledResource {
resource: Some((
still_valid.clone(),
RM::new_resource(&self.config, still_valid, timeout)?,
)),
pool: self.clone(),
});
}
resources = self
.waiter
.wait_timeout(resources, timeout)
.map_err(|_| Error::Poison)
.and_then(|(lock, timeout_result)| {
if timeout_result.timed_out() {
Err(Error::Timeout)
} else {
Ok(lock)
}
})?;
}
}
}
impl<RM> Drop for Pool<RM>
where
RM: ResourceManager,
{
fn drop(&mut self) {
if let Ok(mut resources) = self.queue.lock() {
loop {
while let Some(resource) = resources.pop() {
RM::drop(resource.1);
}
if self.in_use.load(Ordering::Relaxed) == 0 {
break;
}
resources = if let Ok(resources) = self.waiter.wait(resources) {
resources
} else {
break;
};
}
}
}
}

View File

@@ -0,0 +1,292 @@
//! Stataments mod
use std::sync::Arc;
use cdk_common::database::Error;
use crate::database::DatabaseExecutor;
use crate::value::Value;
/// The Column type
pub type Column = Value;
/// Expected response type for a given SQL statement
#[derive(Debug, Clone, Copy, Default)]
pub enum ExpectedSqlResponse {
/// A single row
SingleRow,
/// All the rows that matches a query
#[default]
ManyRows,
/// How many rows were affected by the query
AffectedRows,
/// Return the first column of the first row
Pluck,
/// Batch
Batch,
}
/// Part value
#[derive(Debug, Clone)]
pub enum PlaceholderValue {
/// Value
Value(Value),
/// Set
Set(Vec<Value>),
}
impl From<Value> for PlaceholderValue {
fn from(value: Value) -> Self {
PlaceholderValue::Value(value)
}
}
impl From<Vec<Value>> for PlaceholderValue {
fn from(value: Vec<Value>) -> Self {
PlaceholderValue::Set(value)
}
}
/// SQL Part
#[derive(Debug, Clone)]
pub enum SqlPart {
/// Raw SQL statement
Raw(Arc<str>),
/// Placeholder
Placeholder(Arc<str>, Option<PlaceholderValue>),
}
/// SQL parser error
#[derive(Debug, PartialEq, thiserror::Error)]
pub enum SqlParseError {
/// Invalid SQL
#[error("Unterminated String literal")]
UnterminatedStringLiteral,
/// Invalid placeholder name
#[error("Invalid placeholder name")]
InvalidPlaceholder,
}
/// Rudimentary SQL parser.
///
/// This function does not validate the SQL statement, it only extracts the placeholder to be
/// database agnostic.
pub fn split_sql_parts(input: &str) -> Result<Vec<SqlPart>, SqlParseError> {
let mut parts = Vec::new();
let mut current = String::new();
let mut chars = input.chars().peekable();
while let Some(&c) = chars.peek() {
match c {
'\'' | '"' => {
// Start of string literal
let quote = c;
current.push(chars.next().unwrap());
let mut closed = false;
while let Some(&next) = chars.peek() {
current.push(chars.next().unwrap());
if next == quote {
if chars.peek() == Some(&quote) {
// Escaped quote (e.g. '' inside strings)
current.push(chars.next().unwrap());
} else {
closed = true;
break;
}
}
}
if !closed {
return Err(SqlParseError::UnterminatedStringLiteral);
}
}
':' => {
// Flush current raw SQL
if !current.is_empty() {
parts.push(SqlPart::Raw(current.clone().into()));
current.clear();
}
chars.next(); // consume ':'
let mut name = String::new();
while let Some(&next) = chars.peek() {
if next.is_alphanumeric() || next == '_' {
name.push(chars.next().unwrap());
} else {
break;
}
}
if name.is_empty() {
return Err(SqlParseError::InvalidPlaceholder);
}
parts.push(SqlPart::Placeholder(name.into(), None));
}
_ => {
current.push(chars.next().unwrap());
}
}
}
if !current.is_empty() {
parts.push(SqlPart::Raw(current.into()));
}
Ok(parts)
}
/// Sql message
#[derive(Debug, Default)]
pub struct Statement {
/// The SQL statement
pub parts: Vec<SqlPart>,
/// The expected response type
pub expected_response: ExpectedSqlResponse,
}
impl Statement {
/// Creates a new statement
pub fn new(sql: &str) -> Result<Self, SqlParseError> {
Ok(Self {
parts: split_sql_parts(sql)?,
..Default::default()
})
}
/// Convert Statement into a SQL statement and the list of placeholders
///
/// By default it converts the statement into placeholder using $1..$n placeholders which seems
/// to be more widely supported, although it can be reimplemented with other formats since part
/// is public
pub fn to_sql(self) -> Result<(String, Vec<Value>), Error> {
let mut placeholder_values = Vec::new();
let sql = self
.parts
.into_iter()
.map(|x| match x {
SqlPart::Placeholder(name, value) => {
match value.ok_or(Error::MissingPlaceholder(name.to_string()))? {
PlaceholderValue::Value(value) => {
placeholder_values.push(value);
Ok::<_, Error>(format!("${}", placeholder_values.len()))
}
PlaceholderValue::Set(mut values) => {
let start_size = placeholder_values.len();
placeholder_values.append(&mut values);
let placeholders = (start_size + 1..=placeholder_values.len())
.map(|i| format!("${i}"))
.collect::<Vec<_>>()
.join(", ");
Ok(placeholders)
}
}
}
SqlPart::Raw(raw) => Ok(raw.trim().to_string()),
})
.collect::<Result<Vec<String>, _>>()?
.join(" ");
Ok((sql, placeholder_values))
}
/// Binds a given placeholder to a value.
#[inline]
pub fn bind<C, V>(mut self, name: C, value: V) -> Self
where
C: ToString,
V: Into<Value>,
{
let name = name.to_string();
let value = value.into();
let value: PlaceholderValue = value.into();
for part in self.parts.iter_mut() {
if let SqlPart::Placeholder(part_name, part_value) = part {
if **part_name == *name.as_str() {
*part_value = Some(value.clone());
}
}
}
self
}
/// Binds a single variable with a vector.
///
/// This will rewrite the function from `:foo` (where value is vec![1, 2, 3]) to `:foo0, :foo1,
/// :foo2` and binds each value from the value vector accordingly.
#[inline]
pub fn bind_vec<C, V>(mut self, name: C, value: Vec<V>) -> Self
where
C: ToString,
V: Into<Value>,
{
let name = name.to_string();
let value: PlaceholderValue = value
.into_iter()
.map(|x| x.into())
.collect::<Vec<Value>>()
.into();
for part in self.parts.iter_mut() {
if let SqlPart::Placeholder(part_name, part_value) = part {
if **part_name == *name.as_str() {
*part_value = Some(value.clone());
}
}
}
self
}
/// Executes a query and returns the affected rows
pub async fn pluck<C>(self, conn: &C) -> Result<Option<Value>, Error>
where
C: DatabaseExecutor,
{
conn.pluck(self).await
}
/// Executes a query and returns the affected rows
pub async fn batch<C>(self, conn: &C) -> Result<(), Error>
where
C: DatabaseExecutor,
{
conn.batch(self).await
}
/// Executes a query and returns the affected rows
pub async fn execute<C>(self, conn: &C) -> Result<usize, Error>
where
C: DatabaseExecutor,
{
conn.execute(self).await
}
/// Runs the query and returns the first row or None
pub async fn fetch_one<C>(self, conn: &C) -> Result<Option<Vec<Column>>, Error>
where
C: DatabaseExecutor,
{
conn.fetch_one(self).await
}
/// Runs the query and returns the first row or None
pub async fn fetch_all<C>(self, conn: &C) -> Result<Vec<Vec<Column>>, Error>
where
C: DatabaseExecutor,
{
conn.fetch_all(self).await
}
}
/// Creates a new query statement
#[inline(always)]
pub fn query(sql: &str) -> Result<Statement, Error> {
Statement::new(sql).map_err(|e| Error::Database(Box::new(e)))
}

View File

@@ -0,0 +1,82 @@
//! Generic Rust value representation for data from the database
/// Generic Value representation of data from the any database
#[derive(Clone, Debug, PartialEq)]
pub enum Value {
/// The value is a `NULL` value.
Null,
/// The value is a signed integer.
Integer(i64),
/// The value is a floating point number.
Real(f64),
/// The value is a text string.
Text(String),
/// The value is a blob of data
Blob(Vec<u8>),
}
impl From<String> for Value {
fn from(value: String) -> Self {
Self::Text(value)
}
}
impl From<&str> for Value {
fn from(value: &str) -> Self {
Self::Text(value.to_owned())
}
}
impl From<&&str> for Value {
fn from(value: &&str) -> Self {
Self::Text(value.to_string())
}
}
impl From<Vec<u8>> for Value {
fn from(value: Vec<u8>) -> Self {
Self::Blob(value)
}
}
impl From<&[u8]> for Value {
fn from(value: &[u8]) -> Self {
Self::Blob(value.to_owned())
}
}
impl From<u8> for Value {
fn from(value: u8) -> Self {
Self::Integer(value.into())
}
}
impl From<i64> for Value {
fn from(value: i64) -> Self {
Self::Integer(value)
}
}
impl From<u32> for Value {
fn from(value: u32) -> Self {
Self::Integer(value.into())
}
}
impl From<bool> for Value {
fn from(value: bool) -> Self {
Self::Integer(if value { 1 } else { 0 })
}
}
impl<T> From<Option<T>> for Value
where
T: Into<Value>,
{
fn from(value: Option<T>) -> Self {
match value {
Some(v) => v.into(),
None => Value::Null,
}
}
}

View File

@@ -0,0 +1,75 @@
//! SQLite Wallet Error
use thiserror::Error;
/// SQL Wallet Error
#[derive(Debug, Error)]
pub enum Error {
/// SQLX Error
#[error(transparent)]
Sqlite(#[from] rusqlite::Error),
/// Pool error
#[error(transparent)]
Pool(#[from] crate::pool::Error<rusqlite::Error>),
/// Missing columns
#[error("Not enough elements: expected {0}, got {1}")]
MissingColumn(usize, usize),
/// Invalid db type
#[error("Invalid type from db, expected {0} got {1}")]
InvalidType(String, String),
/// Invalid data conversion in column
#[error("Error converting {0} to {1}")]
InvalidConversion(String, String),
/// Serde Error
#[error(transparent)]
Serde(#[from] serde_json::Error),
/// CDK Error
#[error(transparent)]
CDK(#[from] cdk_common::Error),
/// NUT00 Error
#[error(transparent)]
CDKNUT00(#[from] cdk_common::nuts::nut00::Error),
/// NUT01 Error
#[error(transparent)]
CDKNUT01(#[from] cdk_common::nuts::nut01::Error),
/// NUT02 Error
#[error(transparent)]
CDKNUT02(#[from] cdk_common::nuts::nut02::Error),
/// NUT04 Error
#[error(transparent)]
CDKNUT04(#[from] cdk_common::nuts::nut04::Error),
/// NUT05 Error
#[error(transparent)]
CDKNUT05(#[from] cdk_common::nuts::nut05::Error),
/// NUT07 Error
#[error(transparent)]
CDKNUT07(#[from] cdk_common::nuts::nut07::Error),
/// NUT23 Error
#[error(transparent)]
CDKNUT23(#[from] cdk_common::nuts::nut23::Error),
/// Secret Error
#[error(transparent)]
CDKSECRET(#[from] cdk_common::secret::Error),
/// Mint Url
#[error(transparent)]
MintUrl(#[from] cdk_common::mint_url::Error),
/// BIP32 Error
#[error(transparent)]
BIP32(#[from] bitcoin::bip32::Error),
/// Could Not Initialize Database
#[error("Could not initialize database")]
CouldNotInitialize,
/// Invalid Database Path
#[error("Invalid database path")]
InvalidDbPath,
}
impl From<Error> for cdk_common::database::Error {
fn from(e: Error) -> Self {
Self::Database(Box::new(e))
}
}

View File

@@ -0,0 +1,21 @@
/// @generated
/// Auto-generated by build.rs
pub static MIGRATIONS: &[(&str, &str)] = &[
("sqlite/20240612132920_init.sql", include_str!(r#"./migrations/sqlite/20240612132920_init.sql"#)),
("sqlite/20240618200350_quote_state.sql", include_str!(r#"./migrations/sqlite/20240618200350_quote_state.sql"#)),
("sqlite/20240626091921_nut04_state.sql", include_str!(r#"./migrations/sqlite/20240626091921_nut04_state.sql"#)),
("sqlite/20240710144711_input_fee.sql", include_str!(r#"./migrations/sqlite/20240710144711_input_fee.sql"#)),
("sqlite/20240810214105_mint_icon_url.sql", include_str!(r#"./migrations/sqlite/20240810214105_mint_icon_url.sql"#)),
("sqlite/20240810233905_update_mint_url.sql", include_str!(r#"./migrations/sqlite/20240810233905_update_mint_url.sql"#)),
("sqlite/20240902151515_icon_url.sql", include_str!(r#"./migrations/sqlite/20240902151515_icon_url.sql"#)),
("sqlite/20240902210905_mint_time.sql", include_str!(r#"./migrations/sqlite/20240902210905_mint_time.sql"#)),
("sqlite/20241011125207_mint_urls.sql", include_str!(r#"./migrations/sqlite/20241011125207_mint_urls.sql"#)),
("sqlite/20241108092756_wallet_mint_quote_secretkey.sql", include_str!(r#"./migrations/sqlite/20241108092756_wallet_mint_quote_secretkey.sql"#)),
("sqlite/20250214135017_mint_tos.sql", include_str!(r#"./migrations/sqlite/20250214135017_mint_tos.sql"#)),
("sqlite/20250310111513_drop_nostr_last_checked.sql", include_str!(r#"./migrations/sqlite/20250310111513_drop_nostr_last_checked.sql"#)),
("sqlite/20250314082116_allow_pending_spent.sql", include_str!(r#"./migrations/sqlite/20250314082116_allow_pending_spent.sql"#)),
("sqlite/20250323152040_wallet_dleq_proofs.sql", include_str!(r#"./migrations/sqlite/20250323152040_wallet_dleq_proofs.sql"#)),
("sqlite/20250401120000_add_transactions_table.sql", include_str!(r#"./migrations/sqlite/20250401120000_add_transactions_table.sql"#)),
("sqlite/20250616144830_add_keyset_expiry.sql", include_str!(r#"./migrations/sqlite/20250616144830_add_keyset_expiry.sql"#)),
("sqlite/20250707093445_bolt12.sql", include_str!(r#"./migrations/sqlite/20250707093445_bolt12.sql"#)),
];

View File

@@ -0,0 +1,81 @@
-- Mints
CREATE TABLE IF NOT EXISTS mint (
mint_url TEXT PRIMARY KEY,
name TEXT,
pubkey BLOB,
version TEXT,
description TEXT,
description_long TEXT,
contact TEXT,
nuts TEXT,
motd TEXT
);
CREATE TABLE IF NOT EXISTS keyset (
id TEXT PRIMARY KEY,
mint_url TEXT NOT NULL,
unit TEXT NOT NULL,
active BOOL NOT NULL,
counter INTEGER NOT NULL DEFAULT 0,
FOREIGN KEY(mint_url) REFERENCES mint(mint_url) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS mint_quote (
id TEXT PRIMARY KEY,
mint_url TEXT NOT NULL,
amount INTEGER NOT NULL,
unit TEXT NOT NULL,
request TEXT NOT NULL,
paid BOOL NOT NULL DEFAULT FALSE,
expiry INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS paid_index ON mint_quote(paid);
CREATE INDEX IF NOT EXISTS request_index ON mint_quote(request);
CREATE TABLE IF NOT EXISTS melt_quote (
id TEXT PRIMARY KEY,
unit TEXT NOT NULL,
amount INTEGER NOT NULL,
request TEXT NOT NULL,
fee_reserve INTEGER NOT NULL,
paid BOOL NOT NULL DEFAULT FALSE,
expiry INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS paid_index ON melt_quote(paid);
CREATE INDEX IF NOT EXISTS request_index ON melt_quote(request);
CREATE TABLE IF NOT EXISTS key (
id TEXT PRIMARY KEY,
keys TEXT NOT NULL
);
-- Proof Table
CREATE TABLE IF NOT EXISTS proof (
y BLOB PRIMARY KEY,
mint_url TEXT NOT NULL,
state TEXT CHECK ( state IN ('SPENT', 'UNSPENT', 'PENDING', 'RESERVED' ) ) NOT NULL,
spending_condition TEXT,
unit TEXT NOT NULL,
amount INTEGER NOT NULL,
keyset_id TEXT NOT NULL,
secret TEXT NOT NULL,
c BLOB NOT NULL,
witness TEXT
);
CREATE INDEX IF NOT EXISTS secret_index ON proof(secret);
CREATE INDEX IF NOT EXISTS state_index ON proof(state);
CREATE INDEX IF NOT EXISTS spending_condition_index ON proof(spending_condition);
CREATE INDEX IF NOT EXISTS unit_index ON proof(unit);
CREATE INDEX IF NOT EXISTS amount_index ON proof(amount);
CREATE INDEX IF NOT EXISTS mint_url_index ON proof(mint_url);
CREATE TABLE IF NOT EXISTS nostr_last_checked (
key BLOB PRIMARY KEY,
last_check INTEGER NOT NULL
);

View File

@@ -0,0 +1,5 @@
ALTER TABLE melt_quote ADD state TEXT CHECK ( state IN ('UNPAID', 'PENDING', 'PAID' ) ) NOT NULL DEFAULT 'UNPAID';
ALTER TABLE melt_quote ADD payment_preimage TEXT;
ALTER TABLE melt_quote DROP COLUMN paid;
CREATE INDEX IF NOT EXISTS melt_quote_state_index ON melt_quote(state);
DROP INDEX IF EXISTS paid_index;

View File

@@ -0,0 +1,3 @@
ALTER TABLE mint_quote ADD state TEXT CHECK ( state IN ('UNPAID', 'PENDING', 'PAID', 'ISSUED' ) ) NOT NULL DEFAULT 'UNPAID';
ALTER TABLE mint_quote DROP COLUMN paid;
CREATE INDEX IF NOT EXISTS mint_quote_state_index ON mint_quote(state);

View File

@@ -0,0 +1 @@
ALTER TABLE keyset ADD input_fee_ppk INTEGER;

View File

@@ -0,0 +1 @@
ALTER TABLE mint ADD mint_icon_url TEXT;

View File

@@ -0,0 +1,21 @@
-- Delete duplicates from `mint`
DELETE FROM `mint`
WHERE `mint_url` IN (
SELECT `mint_url`
FROM (
SELECT RTRIM(`mint_url`, '/') AS trimmed_url, MIN(rowid) AS keep_id
FROM `mint`
GROUP BY trimmed_url
HAVING COUNT(*) > 1
)
)
AND rowid NOT IN (
SELECT MIN(rowid)
FROM `mint`
GROUP BY RTRIM(`mint_url`, '/')
);
UPDATE `mint` SET `mint_url` = RTRIM(`mint_url`, '/');
UPDATE `keyset` SET `mint_url` = RTRIM(`mint_url`, '/');
UPDATE `mint_quote` SET `mint_url` = RTRIM(`mint_url`, '/');
UPDATE `proof` SET `mint_url` = RTRIM(`mint_url`, '/');

View File

@@ -0,0 +1 @@
ALTER TABLE mint RENAME COLUMN mint_icon_url TO icon_url;

View File

@@ -0,0 +1 @@
ALTER TABLE mint ADD mint_time INTEGER;

View File

@@ -0,0 +1 @@
ALTER TABLE mint ADD urls TEXT;

View File

@@ -0,0 +1 @@
ALTER TABLE mint_quote ADD secret_key TEXT;

View File

@@ -0,0 +1 @@
ALTER TABLE mint ADD tos_url TEXT;

View File

@@ -0,0 +1 @@
DROP TABLE IF EXISTS nostr_last_checked;

View File

@@ -0,0 +1,31 @@
-- Create a new table with the updated CHECK constraint
CREATE TABLE IF NOT EXISTS proof_new (
y BLOB PRIMARY KEY,
mint_url TEXT NOT NULL,
state TEXT CHECK ( state IN ('SPENT', 'UNSPENT', 'PENDING', 'RESERVED', 'PENDING_SPENT' ) ) NOT NULL,
spending_condition TEXT,
unit TEXT NOT NULL,
amount INTEGER NOT NULL,
keyset_id TEXT NOT NULL,
secret TEXT NOT NULL,
c BLOB NOT NULL,
witness TEXT
);
CREATE INDEX IF NOT EXISTS secret_index ON proof_new(secret);
CREATE INDEX IF NOT EXISTS state_index ON proof_new(state);
CREATE INDEX IF NOT EXISTS spending_condition_index ON proof_new(spending_condition);
CREATE INDEX IF NOT EXISTS unit_index ON proof_new(unit);
CREATE INDEX IF NOT EXISTS amount_index ON proof_new(amount);
CREATE INDEX IF NOT EXISTS mint_url_index ON proof_new(mint_url);
-- Copy data from old proof table to new proof table
INSERT INTO proof_new (y, mint_url, state, spending_condition, unit, amount, keyset_id, secret, c, witness)
SELECT y, mint_url, state, spending_condition, unit, amount, keyset_id, secret, c, witness
FROM proof;
-- Drop the old proof table
DROP TABLE proof;
-- Rename the new proof table to proof
ALTER TABLE proof_new RENAME TO proof;

View File

@@ -0,0 +1,4 @@
-- Migration to add DLEQ proof storage to the proof table
ALTER TABLE proof ADD COLUMN dleq_e BLOB;
ALTER TABLE proof ADD COLUMN dleq_s BLOB;
ALTER TABLE proof ADD COLUMN dleq_r BLOB;

View File

@@ -0,0 +1,18 @@
-- Migration to add transactions table
CREATE TABLE IF NOT EXISTS transactions (
id BLOB PRIMARY KEY,
mint_url TEXT NOT NULL,
direction TEXT CHECK (direction IN ('Incoming', 'Outgoing')) NOT NULL,
amount INTEGER NOT NULL,
fee INTEGER NOT NULL,
unit TEXT NOT NULL,
ys BLOB NOT NULL,
timestamp INTEGER NOT NULL,
memo TEXT,
metadata TEXT
);
CREATE INDEX IF NOT EXISTS mint_url_index ON transactions(mint_url);
CREATE INDEX IF NOT EXISTS direction_index ON transactions(direction);
CREATE INDEX IF NOT EXISTS unit_index ON transactions(unit);
CREATE INDEX IF NOT EXISTS timestamp_index ON transactions(timestamp);

View File

@@ -0,0 +1 @@
ALTER TABLE keyset ADD COLUMN final_expiry INTEGER DEFAULT NULL;

View File

@@ -0,0 +1,58 @@
ALTER TABLE mint_quote ADD COLUMN amount_paid INTEGER NOT NULL DEFAULT 0;
ALTER TABLE mint_quote ADD COLUMN amount_minted INTEGER NOT NULL DEFAULT 0;
ALTER TABLE mint_quote ADD COLUMN payment_method TEXT NOT NULL DEFAULT 'BOLT11';
-- Remove NOT NULL constraint from amount column
PRAGMA foreign_keys=off;
CREATE TABLE mint_quote_new (
id TEXT PRIMARY KEY,
mint_url TEXT NOT NULL,
payment_method TEXT NOT NULL DEFAULT 'bolt11',
amount INTEGER,
unit TEXT NOT NULL,
request TEXT NOT NULL,
state TEXT NOT NULL,
expiry INTEGER NOT NULL,
amount_paid INTEGER NOT NULL DEFAULT 0,
amount_issued INTEGER NOT NULL DEFAULT 0,
secret_key TEXT
);
-- Explicitly specify columns for proper mapping
INSERT INTO mint_quote_new (
id,
mint_url,
payment_method,
amount,
unit,
request,
state,
expiry,
amount_paid,
amount_issued,
secret_key
)
SELECT
id,
mint_url,
'bolt11', -- Default value for the new payment_method column
amount,
unit,
request,
state,
expiry,
0, -- Default value for amount_paid
0, -- Default value for amount_minted
secret_key
FROM mint_quote;
DROP TABLE mint_quote;
ALTER TABLE mint_quote_new RENAME TO mint_quote;
PRAGMA foreign_keys=on;
-- Set amount_paid equal to amount for quotes with PAID or ISSUED state
UPDATE mint_quote SET amount_paid = amount WHERE state = 'PAID' OR state = 'ISSUED';
-- Set amount_issued equal to amount for quotes with ISSUED state
UPDATE mint_quote SET amount_issued = amount WHERE state = 'ISSUED';

File diff suppressed because it is too large Load Diff