mirror of
https://github.com/aljazceru/cdk.git
synced 2025-12-20 14:14:49 +01:00
Add PostgreSQL support for mint and wallet (#878)
* Add PostgreSQL support for mint and wallet * Fixed bug to avoid empty calls `get_proofs_states` * Fixed SQL bug * Avoid redudant clone() * Add more tests for the storage layer * Minor enhacements * Add a generic function to execute db operations This function would log slow operations and log errors * Provision a postgres db for tests * Update deps for msrv * Add postgres to pipeline * feat: add psgl to example and docker * feat: db url fmt --------- Co-authored-by: thesimplekid <tsk@thesimplekid.com>
This commit is contained in:
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
@@ -160,6 +160,7 @@ jobs:
|
|||||||
database:
|
database:
|
||||||
[
|
[
|
||||||
SQLITE,
|
SQLITE,
|
||||||
|
POSTGRES
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
@@ -262,6 +263,8 @@ jobs:
|
|||||||
shared-key: "stable"
|
shared-key: "stable"
|
||||||
- name: Test fake mint
|
- name: Test fake mint
|
||||||
run: nix develop -i -L .#stable --command just test-pure ${{ matrix.database }}
|
run: nix develop -i -L .#stable --command just test-pure ${{ matrix.database }}
|
||||||
|
- name: Install Postgres
|
||||||
|
run: bash -x crates/cdk-postgres/start_db_for_test.sh
|
||||||
- name: Test mint
|
- name: Test mint
|
||||||
run: nix develop -i -L .#stable --command just test
|
run: nix develop -i -L .#stable --command just test
|
||||||
|
|
||||||
|
|||||||
@@ -56,6 +56,7 @@ cdk-mint-rpc = { path = "./crates/cdk-mint-rpc", version = "=0.11.0" }
|
|||||||
cdk-redb = { path = "./crates/cdk-redb", default-features = true, version = "=0.11.0" }
|
cdk-redb = { path = "./crates/cdk-redb", default-features = true, version = "=0.11.0" }
|
||||||
cdk-sql-common = { path = "./crates/cdk-sql-common", default-features = true, version = "=0.11.0" }
|
cdk-sql-common = { path = "./crates/cdk-sql-common", default-features = true, version = "=0.11.0" }
|
||||||
cdk-sqlite = { path = "./crates/cdk-sqlite", default-features = true, version = "=0.11.0" }
|
cdk-sqlite = { path = "./crates/cdk-sqlite", default-features = true, version = "=0.11.0" }
|
||||||
|
cdk-postgres = { path = "./crates/cdk-postgres", default-features = true, version = "=0.11.0" }
|
||||||
cdk-signatory = { path = "./crates/cdk-signatory", version = "=0.11.0", default-features = false }
|
cdk-signatory = { path = "./crates/cdk-signatory", version = "=0.11.0", default-features = false }
|
||||||
cdk-mintd = { path = "./crates/cdk-mintd", version = "=0.11.0", default-features = false }
|
cdk-mintd = { path = "./crates/cdk-mintd", version = "=0.11.0", default-features = false }
|
||||||
clap = { version = "4.5.31", features = ["derive"] }
|
clap = { version = "4.5.31", features = ["derive"] }
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ The project is split up into several crates in the `crates/` directory:
|
|||||||
* Libraries:
|
* Libraries:
|
||||||
* [**cdk**](./crates/cdk/): Rust implementation of Cashu protocol.
|
* [**cdk**](./crates/cdk/): Rust implementation of Cashu protocol.
|
||||||
* [**cdk-sqlite**](./crates/cdk-sqlite/): SQLite Storage backend.
|
* [**cdk-sqlite**](./crates/cdk-sqlite/): SQLite Storage backend.
|
||||||
|
* [**cdk-postgres**](./crates/cdk-postgres/): PostgreSQL Storage backend.
|
||||||
* [**cdk-redb**](./crates/cdk-redb/): Redb Storage backend.
|
* [**cdk-redb**](./crates/cdk-redb/): Redb Storage backend.
|
||||||
* [**cdk-axum**](./crates/cdk-axum/): Axum webserver for mint.
|
* [**cdk-axum**](./crates/cdk-axum/): Axum webserver for mint.
|
||||||
* [**cdk-cln**](./crates/cdk-cln/): CLN Lightning backend for mint.
|
* [**cdk-cln**](./crates/cdk-cln/): CLN Lightning backend for mint.
|
||||||
|
|||||||
@@ -78,11 +78,56 @@ where
|
|||||||
tx.commit().await.unwrap();
|
tx.commit().await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Test the basic storing and retrieving proofs from the database. Probably the database would use
|
||||||
|
/// binary/`Vec<u8>` to store data, that's why this test would quickly identify issues before running
|
||||||
|
/// other tests
|
||||||
|
pub async fn add_and_find_proofs<DB>(db: DB)
|
||||||
|
where
|
||||||
|
DB: Database<database::Error> + KeysDatabase<Err = database::Error>,
|
||||||
|
{
|
||||||
|
let keyset_id = setup_keyset(&db).await;
|
||||||
|
|
||||||
|
let quote_id = Uuid::max();
|
||||||
|
|
||||||
|
let proofs = vec![
|
||||||
|
Proof {
|
||||||
|
amount: Amount::from(100),
|
||||||
|
keyset_id,
|
||||||
|
secret: Secret::generate(),
|
||||||
|
c: SecretKey::generate().public_key(),
|
||||||
|
witness: None,
|
||||||
|
dleq: None,
|
||||||
|
},
|
||||||
|
Proof {
|
||||||
|
amount: Amount::from(200),
|
||||||
|
keyset_id,
|
||||||
|
secret: Secret::generate(),
|
||||||
|
c: SecretKey::generate().public_key(),
|
||||||
|
witness: None,
|
||||||
|
dleq: None,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
// Add proofs to database
|
||||||
|
let mut tx = Database::begin_transaction(&db).await.unwrap();
|
||||||
|
tx.add_proofs(proofs.clone(), Some(quote_id)).await.unwrap();
|
||||||
|
assert!(tx.commit().await.is_ok());
|
||||||
|
|
||||||
|
let proofs_from_db = db.get_proofs_by_ys(&[proofs[0].c, proofs[1].c]).await;
|
||||||
|
assert!(proofs_from_db.is_ok());
|
||||||
|
assert_eq!(proofs_from_db.unwrap().len(), 2);
|
||||||
|
|
||||||
|
let proofs_from_db = db.get_proof_ys_by_quote_id("e_id).await;
|
||||||
|
assert!(proofs_from_db.is_ok());
|
||||||
|
assert_eq!(proofs_from_db.unwrap().len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
/// Unit test that is expected to be passed for a correct database implementation
|
/// Unit test that is expected to be passed for a correct database implementation
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! mint_db_test {
|
macro_rules! mint_db_test {
|
||||||
($make_db_fn:ident) => {
|
($make_db_fn:ident) => {
|
||||||
mint_db_test!(state_transition, $make_db_fn);
|
mint_db_test!(state_transition, $make_db_fn);
|
||||||
|
mint_db_test!(add_and_find_proofs, $make_db_fn);
|
||||||
};
|
};
|
||||||
($name:ident, $make_db_fn:ident) => {
|
($name:ident, $make_db_fn:ident) => {
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ struct Args {
|
|||||||
/// Start a fake mint with authentication using the library
|
/// Start a fake mint with authentication using the library
|
||||||
async fn start_fake_auth_mint(
|
async fn start_fake_auth_mint(
|
||||||
temp_dir: &Path,
|
temp_dir: &Path,
|
||||||
|
database: &str,
|
||||||
port: u16,
|
port: u16,
|
||||||
openid_discovery: String,
|
openid_discovery: String,
|
||||||
shutdown: Arc<Notify>,
|
shutdown: Arc<Notify>,
|
||||||
@@ -62,6 +63,7 @@ async fn start_fake_auth_mint(
|
|||||||
|
|
||||||
let mut settings = shared::create_fake_wallet_settings(
|
let mut settings = shared::create_fake_wallet_settings(
|
||||||
port,
|
port,
|
||||||
|
database,
|
||||||
Some(Mnemonic::generate(12)?.to_string()),
|
Some(Mnemonic::generate(12)?.to_string()),
|
||||||
None,
|
None,
|
||||||
Some(fake_wallet_config),
|
Some(fake_wallet_config),
|
||||||
@@ -123,6 +125,7 @@ async fn main() -> Result<()> {
|
|||||||
|
|
||||||
let handle = start_fake_auth_mint(
|
let handle = start_fake_auth_mint(
|
||||||
&temp_dir,
|
&temp_dir,
|
||||||
|
&args.database_type,
|
||||||
args.port,
|
args.port,
|
||||||
args.openid_discovery.clone(),
|
args.openid_discovery.clone(),
|
||||||
shutdown_clone,
|
shutdown_clone,
|
||||||
|
|||||||
@@ -46,6 +46,7 @@ struct Args {
|
|||||||
async fn start_fake_mint(
|
async fn start_fake_mint(
|
||||||
temp_dir: &Path,
|
temp_dir: &Path,
|
||||||
port: u16,
|
port: u16,
|
||||||
|
database: &str,
|
||||||
shutdown: Arc<Notify>,
|
shutdown: Arc<Notify>,
|
||||||
external_signatory: bool,
|
external_signatory: bool,
|
||||||
) -> Result<tokio::task::JoinHandle<()>> {
|
) -> Result<tokio::task::JoinHandle<()>> {
|
||||||
@@ -77,8 +78,13 @@ async fn start_fake_mint(
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Create settings struct for fake mint using shared function
|
// Create settings struct for fake mint using shared function
|
||||||
let settings =
|
let settings = shared::create_fake_wallet_settings(
|
||||||
shared::create_fake_wallet_settings(port, mnemonic, signatory_config, fake_wallet_config);
|
port,
|
||||||
|
database,
|
||||||
|
mnemonic,
|
||||||
|
signatory_config,
|
||||||
|
fake_wallet_config,
|
||||||
|
);
|
||||||
|
|
||||||
println!("Starting fake mintd on port {port}");
|
println!("Starting fake mintd on port {port}");
|
||||||
|
|
||||||
@@ -129,6 +135,7 @@ async fn main() -> Result<()> {
|
|||||||
let handle = start_fake_mint(
|
let handle = start_fake_mint(
|
||||||
&temp_dir,
|
&temp_dir,
|
||||||
args.port,
|
args.port,
|
||||||
|
&args.database_type,
|
||||||
shutdown_clone,
|
shutdown_clone,
|
||||||
args.external_signatory,
|
args.external_signatory,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use cdk_axum::cache;
|
use cdk_axum::cache;
|
||||||
|
use cdk_mintd::config::{Database, DatabaseEngine};
|
||||||
use tokio::signal;
|
use tokio::signal;
|
||||||
use tokio::sync::Notify;
|
use tokio::sync::Notify;
|
||||||
|
|
||||||
@@ -150,6 +151,7 @@ pub fn display_mint_info(port: u16, temp_dir: &Path, database_type: &str) {
|
|||||||
/// Create settings for a fake wallet mint
|
/// Create settings for a fake wallet mint
|
||||||
pub fn create_fake_wallet_settings(
|
pub fn create_fake_wallet_settings(
|
||||||
port: u16,
|
port: u16,
|
||||||
|
database: &str,
|
||||||
mnemonic: Option<String>,
|
mnemonic: Option<String>,
|
||||||
signatory_config: Option<(String, String)>, // (url, certs_dir)
|
signatory_config: Option<(String, String)>, // (url, certs_dir)
|
||||||
fake_wallet_config: Option<cdk_mintd::config::FakeWallet>,
|
fake_wallet_config: Option<cdk_mintd::config::FakeWallet>,
|
||||||
@@ -182,7 +184,10 @@ pub fn create_fake_wallet_settings(
|
|||||||
lnd: None,
|
lnd: None,
|
||||||
fake_wallet: fake_wallet_config,
|
fake_wallet: fake_wallet_config,
|
||||||
grpc_processor: None,
|
grpc_processor: None,
|
||||||
database: cdk_mintd::config::Database::default(),
|
database: Database {
|
||||||
|
engine: DatabaseEngine::from_str(database).expect("valid database"),
|
||||||
|
postgres: None,
|
||||||
|
},
|
||||||
mint_management_rpc: None,
|
mint_management_rpc: None,
|
||||||
auth: None,
|
auth: None,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ cdk = { workspace = true, features = [
|
|||||||
cdk-sqlite = { workspace = true, features = [
|
cdk-sqlite = { workspace = true, features = [
|
||||||
"mint",
|
"mint",
|
||||||
] }
|
] }
|
||||||
|
cdk-postgres = { workspace = true, features = ["mint"]}
|
||||||
cdk-cln = { workspace = true, optional = true }
|
cdk-cln = { workspace = true, optional = true }
|
||||||
cdk-lnbits = { workspace = true, optional = true }
|
cdk-lnbits = { workspace = true, optional = true }
|
||||||
cdk-lnd = { workspace = true, optional = true }
|
cdk-lnd = { workspace = true, optional = true }
|
||||||
|
|||||||
@@ -6,7 +6,15 @@
|
|||||||
|
|
||||||
**ALPHA** This library is in early development, the API will change and should be used with caution.
|
**ALPHA** This library is in early development, the API will change and should be used with caution.
|
||||||
|
|
||||||
Cashu mint daemon implementation for the Cashu Development Kit (CDK). This binary provides a complete Cashu mint server implementation.
|
Cashu mint daemon implementation for the Cashu Development Kit (CDK). This binary provides a complete Cashu mint server implementation with support for multiple database backends and Lightning Network integrations.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **Multiple Database Backends**: SQLite and PostgreSQL
|
||||||
|
- **Lightning Network Integration**: Support for CLN, LND, LNbits, and test backends
|
||||||
|
- **Authentication**: Optional user authentication with OpenID Connect
|
||||||
|
- **Management RPC**: gRPC interface for mint management
|
||||||
|
- **Docker Support**: Ready-to-use Docker configurations
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@@ -20,9 +28,64 @@ From source:
|
|||||||
cargo install --path .
|
cargo install --path .
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### Using SQLite (Default)
|
||||||
|
```bash
|
||||||
|
# Start with SQLite (no additional setup required)
|
||||||
|
cdk-mintd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using PostgreSQL
|
||||||
|
```bash
|
||||||
|
# Set environment variables
|
||||||
|
export CDK_MINTD_DATABASE=postgres
|
||||||
|
export CDK_MINTD_DATABASE_URL="postgresql://postgres:password@localhost:5432/cdk_mint"
|
||||||
|
|
||||||
|
# Start the mint
|
||||||
|
cdk-mintd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using Docker
|
||||||
|
```bash
|
||||||
|
# SQLite
|
||||||
|
docker-compose up
|
||||||
|
|
||||||
|
# PostgreSQL
|
||||||
|
docker-compose -f docker-compose.postgres.yaml up
|
||||||
|
```
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
The mint can be configured through environment variables or a configuration file. See the documentation for available options.
|
The mint can be configured through environment variables or a configuration file. See `example.config.toml` for all available options.
|
||||||
|
|
||||||
|
### Database Configuration
|
||||||
|
|
||||||
|
#### SQLite (Default)
|
||||||
|
```toml
|
||||||
|
[database]
|
||||||
|
engine = "sqlite"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### PostgreSQL
|
||||||
|
```toml
|
||||||
|
[database]
|
||||||
|
engine = "postgres"
|
||||||
|
```
|
||||||
|
Set `CDK_MINTD_DATABASE_URL` environment variable for connection string.
|
||||||
|
|
||||||
|
#### ReDB
|
||||||
|
```toml
|
||||||
|
[database]
|
||||||
|
engine = "redb"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Lightning Backend Configuration
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[ln]
|
||||||
|
ln_backend = "fakewallet" # Options: cln, lnd, lnbits, fakewallet
|
||||||
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
@@ -33,10 +96,29 @@ cdk-mintd
|
|||||||
# Start with custom config file
|
# Start with custom config file
|
||||||
cdk-mintd --config /path/to/config.toml
|
cdk-mintd --config /path/to/config.toml
|
||||||
|
|
||||||
|
# Start with specific work directory
|
||||||
|
cdk-mintd --work-dir /path/to/work/dir
|
||||||
|
|
||||||
# Show help
|
# Show help
|
||||||
cdk-mintd --help
|
cdk-mintd --help
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
Key environment variables:
|
||||||
|
|
||||||
|
- `CDK_MINTD_DATABASE`: Database engine (sqlite/postgres/redb)
|
||||||
|
- `CDK_MINTD_DATABASE_URL`: PostgreSQL connection string
|
||||||
|
- `CDK_MINTD_LN_BACKEND`: Lightning backend type
|
||||||
|
- `CDK_MINTD_LISTEN_HOST`: Host to bind to
|
||||||
|
- `CDK_MINTD_LISTEN_PORT`: Port to bind to
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
- [Configuration Examples](./example.config.toml)
|
||||||
|
- [PostgreSQL Setup Guide](../../POSTGRES.md)
|
||||||
|
- [Development Guide](../../DEVELOPMENT.md)
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
This project is licensed under the [MIT License](../../LICENSE).
|
This project is licensed under the [MIT License](../../LICENSE).
|
||||||
@@ -38,8 +38,21 @@ tti = 60
|
|||||||
|
|
||||||
|
|
||||||
[database]
|
[database]
|
||||||
# Database engine (sqlite/redb) defaults to sqlite
|
# Database engine (sqlite/postgres) defaults to sqlite
|
||||||
# engine = "sqlite"
|
engine = "sqlite"
|
||||||
|
|
||||||
|
# PostgreSQL configuration (when engine = "postgres")
|
||||||
|
[database.postgres]
|
||||||
|
# PostgreSQL connection URL
|
||||||
|
# Can also be set via CDK_MINTD_POSTGRES_URL or CDK_MINTD_DATABASE_URL environment variables
|
||||||
|
# Environment variables take precedence over config file settings
|
||||||
|
url = "postgresql://user:password@localhost:5432/cdk_mint"
|
||||||
|
# TLS mode: "disable", "prefer", "require" (optional, defaults to "disable")
|
||||||
|
tls_mode = "disable"
|
||||||
|
# Maximum number of connections in the pool (optional, defaults to 20)
|
||||||
|
max_connections = 20
|
||||||
|
# Connection timeout in seconds (optional, defaults to 10)
|
||||||
|
connection_timeout_seconds = 10
|
||||||
|
|
||||||
[ln]
|
[ln]
|
||||||
# Required ln backend `cln`, `lnd`, `fakewallet`, 'lnbits'
|
# Required ln backend `cln`, `lnd`, `fakewallet`, 'lnbits'
|
||||||
|
|||||||
@@ -190,6 +190,7 @@ pub struct GrpcProcessor {
|
|||||||
pub enum DatabaseEngine {
|
pub enum DatabaseEngine {
|
||||||
#[default]
|
#[default]
|
||||||
Sqlite,
|
Sqlite,
|
||||||
|
Postgres,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::str::FromStr for DatabaseEngine {
|
impl std::str::FromStr for DatabaseEngine {
|
||||||
@@ -198,6 +199,7 @@ impl std::str::FromStr for DatabaseEngine {
|
|||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
match s.to_lowercase().as_str() {
|
match s.to_lowercase().as_str() {
|
||||||
"sqlite" => Ok(DatabaseEngine::Sqlite),
|
"sqlite" => Ok(DatabaseEngine::Sqlite),
|
||||||
|
"postgres" => Ok(DatabaseEngine::Postgres),
|
||||||
_ => Err(format!("Unknown database engine: {s}")),
|
_ => Err(format!("Unknown database engine: {s}")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -206,6 +208,26 @@ impl std::str::FromStr for DatabaseEngine {
|
|||||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
pub struct Database {
|
pub struct Database {
|
||||||
pub engine: DatabaseEngine,
|
pub engine: DatabaseEngine,
|
||||||
|
pub postgres: Option<PostgresConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct PostgresConfig {
|
||||||
|
pub url: String,
|
||||||
|
pub tls_mode: Option<String>,
|
||||||
|
pub max_connections: Option<usize>,
|
||||||
|
pub connection_timeout_seconds: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for PostgresConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
url: String::new(),
|
||||||
|
tls_mode: Some("disable".to_string()),
|
||||||
|
max_connections: Some(20),
|
||||||
|
connection_timeout_seconds: Some(10),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
pub const ENV_WORK_DIR: &str = "CDK_MINTD_WORK_DIR";
|
pub const ENV_WORK_DIR: &str = "CDK_MINTD_WORK_DIR";
|
||||||
pub const DATABASE_ENV_VAR: &str = "CDK_MINTD_DATABASE";
|
pub const DATABASE_ENV_VAR: &str = "CDK_MINTD_DATABASE";
|
||||||
|
pub const DATABASE_URL_ENV_VAR: &str = "CDK_MINTD_DATABASE_URL"; // Legacy, maintained for backward compatibility
|
||||||
pub const ENV_URL: &str = "CDK_MINTD_URL";
|
pub const ENV_URL: &str = "CDK_MINTD_URL";
|
||||||
pub const ENV_LISTEN_HOST: &str = "CDK_MINTD_LISTEN_HOST";
|
pub const ENV_LISTEN_HOST: &str = "CDK_MINTD_LISTEN_HOST";
|
||||||
pub const ENV_LISTEN_PORT: &str = "CDK_MINTD_LISTEN_PORT";
|
pub const ENV_LISTEN_PORT: &str = "CDK_MINTD_LISTEN_PORT";
|
||||||
|
|||||||
40
crates/cdk-mintd/src/env_vars/database.rs
Normal file
40
crates/cdk-mintd/src/env_vars/database.rs
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
//! Database environment variables
|
||||||
|
|
||||||
|
use std::env;
|
||||||
|
|
||||||
|
use crate::config::PostgresConfig;
|
||||||
|
|
||||||
|
pub const ENV_POSTGRES_URL: &str = "CDK_MINTD_POSTGRES_URL";
|
||||||
|
pub const ENV_POSTGRES_TLS_MODE: &str = "CDK_MINTD_POSTGRES_TLS_MODE";
|
||||||
|
pub const ENV_POSTGRES_MAX_CONNECTIONS: &str = "CDK_MINTD_POSTGRES_MAX_CONNECTIONS";
|
||||||
|
pub const ENV_POSTGRES_CONNECTION_TIMEOUT: &str = "CDK_MINTD_POSTGRES_CONNECTION_TIMEOUT_SECONDS";
|
||||||
|
|
||||||
|
impl PostgresConfig {
|
||||||
|
pub fn from_env(mut self) -> Self {
|
||||||
|
// Check for new PostgreSQL URL env var first, then fallback to legacy DATABASE_URL
|
||||||
|
if let Ok(url) = env::var(ENV_POSTGRES_URL) {
|
||||||
|
self.url = url;
|
||||||
|
} else if let Ok(url) = env::var(super::DATABASE_URL_ENV_VAR) {
|
||||||
|
// Backward compatibility with the existing DATABASE_URL env var
|
||||||
|
self.url = url;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(tls_mode) = env::var(ENV_POSTGRES_TLS_MODE) {
|
||||||
|
self.tls_mode = Some(tls_mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(max_connections) = env::var(ENV_POSTGRES_MAX_CONNECTIONS) {
|
||||||
|
if let Ok(parsed) = max_connections.parse::<usize>() {
|
||||||
|
self.max_connections = Some(parsed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(timeout) = env::var(ENV_POSTGRES_CONNECTION_TIMEOUT) {
|
||||||
|
if let Ok(parsed) = timeout.parse::<u64>() {
|
||||||
|
self.connection_timeout_seconds = Some(parsed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,6 +4,7 @@
|
|||||||
//! organized by component.
|
//! organized by component.
|
||||||
|
|
||||||
mod common;
|
mod common;
|
||||||
|
mod database;
|
||||||
mod info;
|
mod info;
|
||||||
mod ln;
|
mod ln;
|
||||||
mod mint_info;
|
mod mint_info;
|
||||||
@@ -32,6 +33,7 @@ pub use auth::*;
|
|||||||
#[cfg(feature = "cln")]
|
#[cfg(feature = "cln")]
|
||||||
pub use cln::*;
|
pub use cln::*;
|
||||||
pub use common::*;
|
pub use common::*;
|
||||||
|
pub use database::*;
|
||||||
#[cfg(feature = "fakewallet")]
|
#[cfg(feature = "fakewallet")]
|
||||||
pub use fake_wallet::*;
|
pub use fake_wallet::*;
|
||||||
#[cfg(feature = "grpc-processor")]
|
#[cfg(feature = "grpc-processor")]
|
||||||
@@ -45,13 +47,24 @@ pub use lnd::*;
|
|||||||
pub use management_rpc::*;
|
pub use management_rpc::*;
|
||||||
pub use mint_info::*;
|
pub use mint_info::*;
|
||||||
|
|
||||||
use crate::config::{Database, DatabaseEngine, LnBackend, Settings};
|
use crate::config::{DatabaseEngine, LnBackend, Settings};
|
||||||
|
|
||||||
impl Settings {
|
impl Settings {
|
||||||
pub fn from_env(&mut self) -> Result<Self> {
|
pub fn from_env(&mut self) -> Result<Self> {
|
||||||
if let Ok(database) = env::var(DATABASE_ENV_VAR) {
|
if let Ok(database) = env::var(DATABASE_ENV_VAR) {
|
||||||
let engine = DatabaseEngine::from_str(&database).map_err(|err| anyhow!(err))?;
|
let engine = DatabaseEngine::from_str(&database).map_err(|err| anyhow!(err))?;
|
||||||
self.database = Database { engine };
|
self.database.engine = engine;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse PostgreSQL-specific configuration from environment variables
|
||||||
|
if self.database.engine == DatabaseEngine::Postgres {
|
||||||
|
self.database.postgres = Some(
|
||||||
|
self.database
|
||||||
|
.postgres
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_default()
|
||||||
|
.from_env(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.info = self.info.clone().from_env();
|
self.info = self.info.clone().from_env();
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
// std
|
// std
|
||||||
#[cfg(feature = "auth")]
|
#[cfg(feature = "auth")]
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::env;
|
use std::env::{self};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@@ -39,6 +39,7 @@ use cdk::nuts::{AuthRequired, Method, ProtectedEndpoint, RoutePath};
|
|||||||
use cdk::nuts::{ContactInfo, MintVersion, PaymentMethod};
|
use cdk::nuts::{ContactInfo, MintVersion, PaymentMethod};
|
||||||
use cdk::types::QuoteTTL;
|
use cdk::types::QuoteTTL;
|
||||||
use cdk_axum::cache::HttpCache;
|
use cdk_axum::cache::HttpCache;
|
||||||
|
use cdk_postgres::{MintPgAuthDatabase, MintPgDatabase};
|
||||||
#[cfg(feature = "auth")]
|
#[cfg(feature = "auth")]
|
||||||
use cdk_sqlite::mint::MintSqliteAuthDatabase;
|
use cdk_sqlite::mint::MintSqliteAuthDatabase;
|
||||||
use cdk_sqlite::MintSqliteDatabase;
|
use cdk_sqlite::MintSqliteDatabase;
|
||||||
@@ -156,6 +157,23 @@ async fn setup_database(
|
|||||||
let keystore: Arc<dyn MintKeysDatabase<Err = cdk_database::Error> + Send + Sync> = db;
|
let keystore: Arc<dyn MintKeysDatabase<Err = cdk_database::Error> + Send + Sync> = db;
|
||||||
Ok((localstore, keystore))
|
Ok((localstore, keystore))
|
||||||
}
|
}
|
||||||
|
DatabaseEngine::Postgres => {
|
||||||
|
// Get the PostgreSQL configuration, ensuring it exists
|
||||||
|
let pg_config = settings.database.postgres.as_ref().ok_or_else(|| {
|
||||||
|
anyhow!("PostgreSQL configuration is required when using PostgreSQL engine")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if pg_config.url.is_empty() {
|
||||||
|
bail!("PostgreSQL URL is required. Set it in config file [database.postgres] section or via CDK_MINTD_POSTGRES_URL/CDK_MINTD_DATABASE_URL environment variable");
|
||||||
|
}
|
||||||
|
|
||||||
|
let pg_db = Arc::new(MintPgDatabase::new(pg_config.url.as_str()).await?);
|
||||||
|
let localstore: Arc<dyn MintDatabase<cdk_database::Error> + Send + Sync> =
|
||||||
|
pg_db.clone();
|
||||||
|
let keystore: Arc<dyn MintKeysDatabase<Err = cdk_database::Error> + Send + Sync> =
|
||||||
|
pg_db;
|
||||||
|
Ok((localstore, keystore))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -454,6 +472,18 @@ async fn setup_authentication(
|
|||||||
|
|
||||||
Arc::new(sqlite_db)
|
Arc::new(sqlite_db)
|
||||||
}
|
}
|
||||||
|
DatabaseEngine::Postgres => {
|
||||||
|
// Get the PostgreSQL configuration, ensuring it exists
|
||||||
|
let pg_config = settings.database.postgres.as_ref().ok_or_else(|| {
|
||||||
|
anyhow!("PostgreSQL configuration is required when using PostgreSQL engine")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if pg_config.url.is_empty() {
|
||||||
|
bail!("PostgreSQL URL is required for auth database. Set it in config file [database.postgres] section or via CDK_MINTD_POSTGRES_URL/CDK_MINTD_DATABASE_URL environment variable");
|
||||||
|
}
|
||||||
|
|
||||||
|
Arc::new(MintPgAuthDatabase::new(pg_config.url.as_str()).await?)
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut protected_endpoints = HashMap::new();
|
let mut protected_endpoints = HashMap::new();
|
||||||
|
|||||||
35
crates/cdk-postgres/Cargo.toml
Normal file
35
crates/cdk-postgres/Cargo.toml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
[package]
|
||||||
|
name = "cdk-postgres"
|
||||||
|
version.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
authors = ["CDK Developers"]
|
||||||
|
description = "PostgreSQL storage backend for CDK"
|
||||||
|
license.workspace = true
|
||||||
|
homepage = "https://github.com/cashubtc/cdk"
|
||||||
|
repository = "https://github.com/cashubtc/cdk.git"
|
||||||
|
rust-version.workspace = true # MSRV
|
||||||
|
readme = "README.md"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
[features]
|
||||||
|
default = ["mint", "wallet", "auth"]
|
||||||
|
mint = ["cdk-common/mint", "cdk-sql-common/mint"]
|
||||||
|
wallet = ["cdk-common/wallet", "cdk-sql-common/wallet"]
|
||||||
|
auth = ["cdk-common/auth", "cdk-sql-common/auth"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
async-trait.workspace = true
|
||||||
|
cdk-common = { workspace = true, features = ["test"] }
|
||||||
|
bitcoin.workspace = true
|
||||||
|
cdk-sql-common = { workspace = true }
|
||||||
|
thiserror.workspace = true
|
||||||
|
tokio = { workspace = true, features = ["rt-multi-thread"] }
|
||||||
|
tracing.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
lightning-invoice.workspace = true
|
||||||
|
uuid.workspace = true
|
||||||
|
tokio-postgres = "0.7.13"
|
||||||
|
futures-util = "0.3.31"
|
||||||
|
postgres-native-tls = "0.5.1"
|
||||||
|
once_cell.workspace = true
|
||||||
155
crates/cdk-postgres/src/db.rs
Normal file
155
crates/cdk-postgres/src/db.rs
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
use cdk_common::database::Error;
|
||||||
|
use cdk_sql_common::run_db_operation;
|
||||||
|
use cdk_sql_common::stmt::{Column, Statement};
|
||||||
|
use futures_util::{pin_mut, TryStreamExt};
|
||||||
|
use tokio_postgres::error::SqlState;
|
||||||
|
use tokio_postgres::{Client, Error as PgError};
|
||||||
|
|
||||||
|
use crate::value::PgValue;
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn to_pgsql_error(err: PgError) -> Error {
|
||||||
|
if let Some(err) = err.as_db_error() {
|
||||||
|
let code = err.code().to_owned();
|
||||||
|
if code == SqlState::INTEGRITY_CONSTRAINT_VIOLATION || code == SqlState::UNIQUE_VIOLATION {
|
||||||
|
return Error::Duplicate;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Error::Database(Box::new(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub async fn pg_batch(conn: &Client, statement: Statement) -> Result<(), Error> {
|
||||||
|
let (sql, _placeholder_values) = statement.to_sql()?;
|
||||||
|
|
||||||
|
run_db_operation(&sql, conn.batch_execute(&sql), to_pgsql_error).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub async fn pg_execute(conn: &Client, statement: Statement) -> Result<usize, Error> {
|
||||||
|
let (sql, placeholder_values) = statement.to_sql()?;
|
||||||
|
let prepared_statement = conn.prepare(&sql).await.map_err(to_pgsql_error)?;
|
||||||
|
|
||||||
|
run_db_operation(
|
||||||
|
&sql,
|
||||||
|
async {
|
||||||
|
conn.execute_raw(
|
||||||
|
&prepared_statement,
|
||||||
|
placeholder_values
|
||||||
|
.iter()
|
||||||
|
.map(|x| x.into())
|
||||||
|
.collect::<Vec<PgValue>>(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map(|x| x as usize)
|
||||||
|
},
|
||||||
|
to_pgsql_error,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub async fn pg_fetch_one(
|
||||||
|
conn: &Client,
|
||||||
|
statement: Statement,
|
||||||
|
) -> Result<Option<Vec<Column>>, Error> {
|
||||||
|
let (sql, placeholder_values) = statement.to_sql()?;
|
||||||
|
let prepared_statement = conn.prepare(&sql).await.map_err(to_pgsql_error)?;
|
||||||
|
|
||||||
|
run_db_operation(
|
||||||
|
&sql,
|
||||||
|
async {
|
||||||
|
let stream = conn
|
||||||
|
.query_raw(
|
||||||
|
&prepared_statement,
|
||||||
|
placeholder_values
|
||||||
|
.iter()
|
||||||
|
.map(|x| x.into())
|
||||||
|
.collect::<Vec<PgValue>>(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
pin_mut!(stream);
|
||||||
|
|
||||||
|
stream
|
||||||
|
.try_next()
|
||||||
|
.await?
|
||||||
|
.map(|row| {
|
||||||
|
(0..row.len())
|
||||||
|
.map(|i| row.try_get::<_, PgValue>(i).map(|value| value.into()))
|
||||||
|
.collect::<Result<Vec<_>, _>>()
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
},
|
||||||
|
to_pgsql_error,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub async fn pg_fetch_all(conn: &Client, statement: Statement) -> Result<Vec<Vec<Column>>, Error> {
|
||||||
|
let (sql, placeholder_values) = statement.to_sql()?;
|
||||||
|
let prepared_statement = conn.prepare(&sql).await.map_err(to_pgsql_error)?;
|
||||||
|
|
||||||
|
run_db_operation(
|
||||||
|
&sql,
|
||||||
|
async {
|
||||||
|
let stream = conn
|
||||||
|
.query_raw(
|
||||||
|
&prepared_statement,
|
||||||
|
placeholder_values
|
||||||
|
.iter()
|
||||||
|
.map(|x| x.into())
|
||||||
|
.collect::<Vec<PgValue>>(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
pin_mut!(stream);
|
||||||
|
|
||||||
|
let mut rows = vec![];
|
||||||
|
while let Some(row) = stream.try_next().await? {
|
||||||
|
rows.push(
|
||||||
|
(0..row.len())
|
||||||
|
.map(|i| row.try_get::<_, PgValue>(i).map(|value| value.into()))
|
||||||
|
.collect::<Result<Vec<_>, _>>()?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(rows)
|
||||||
|
},
|
||||||
|
to_pgsql_error,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub async fn pg_pluck(conn: &Client, statement: Statement) -> Result<Option<Column>, Error> {
|
||||||
|
let (sql, placeholder_values) = statement.to_sql()?;
|
||||||
|
let prepared_statement = conn.prepare(&sql).await.map_err(to_pgsql_error)?;
|
||||||
|
|
||||||
|
run_db_operation(
|
||||||
|
&sql,
|
||||||
|
async {
|
||||||
|
let stream = conn
|
||||||
|
.query_raw(
|
||||||
|
&prepared_statement,
|
||||||
|
placeholder_values
|
||||||
|
.iter()
|
||||||
|
.map(|x| x.into())
|
||||||
|
.collect::<Vec<PgValue>>(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
pin_mut!(stream);
|
||||||
|
|
||||||
|
stream
|
||||||
|
.try_next()
|
||||||
|
.await?
|
||||||
|
.map(|row| row.try_get::<_, PgValue>(0).map(|value| value.into()))
|
||||||
|
.transpose()
|
||||||
|
},
|
||||||
|
to_pgsql_error,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
257
crates/cdk-postgres/src/lib.rs
Normal file
257
crates/cdk-postgres/src/lib.rs
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
use std::fmt::Debug;
|
||||||
|
use std::sync::atomic::AtomicBool;
|
||||||
|
use std::sync::{Arc, OnceLock};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use cdk_common::database::Error;
|
||||||
|
use cdk_sql_common::database::{DatabaseConnector, DatabaseExecutor, GenericTransactionHandler};
|
||||||
|
use cdk_sql_common::mint::SQLMintAuthDatabase;
|
||||||
|
use cdk_sql_common::pool::{DatabaseConfig, DatabasePool};
|
||||||
|
use cdk_sql_common::stmt::{Column, Statement};
|
||||||
|
use cdk_sql_common::{SQLMintDatabase, SQLWalletDatabase};
|
||||||
|
use db::{pg_batch, pg_execute, pg_fetch_all, pg_fetch_one, pg_pluck};
|
||||||
|
use tokio::sync::{Mutex, Notify};
|
||||||
|
use tokio::time::timeout;
|
||||||
|
use tokio_postgres::{connect, Client, Error as PgError, NoTls};
|
||||||
|
|
||||||
|
mod db;
|
||||||
|
mod value;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct PgConnectionPool;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub enum SslMode {
|
||||||
|
NoTls(NoTls),
|
||||||
|
NativeTls(postgres_native_tls::MakeTlsConnector),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for SslMode {
|
||||||
|
fn default() -> Self {
|
||||||
|
SslMode::NoTls(NoTls {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Debug for SslMode {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let debug_text = match self {
|
||||||
|
Self::NoTls(_) => "NoTls",
|
||||||
|
Self::NativeTls(_) => "NativeTls",
|
||||||
|
};
|
||||||
|
|
||||||
|
write!(f, "SslMode::{debug_text}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Postgres configuration
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct PgConfig {
|
||||||
|
url: String,
|
||||||
|
tls: SslMode,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DatabaseConfig for PgConfig {
|
||||||
|
fn default_timeout(&self) -> Duration {
|
||||||
|
Duration::from_secs(10)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_size(&self) -> usize {
|
||||||
|
20
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&str> for PgConfig {
|
||||||
|
fn from(value: &str) -> Self {
|
||||||
|
PgConfig {
|
||||||
|
url: value.to_owned(),
|
||||||
|
tls: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DatabasePool for PgConnectionPool {
|
||||||
|
type Config = PgConfig;
|
||||||
|
|
||||||
|
type Connection = PostgresConnection;
|
||||||
|
|
||||||
|
type Error = PgError;
|
||||||
|
|
||||||
|
fn new_resource(
|
||||||
|
config: &Self::Config,
|
||||||
|
still_valid: Arc<AtomicBool>,
|
||||||
|
timeout: Duration,
|
||||||
|
) -> Result<Self::Connection, cdk_sql_common::pool::Error<Self::Error>> {
|
||||||
|
Ok(PostgresConnection::new(
|
||||||
|
config.to_owned(),
|
||||||
|
timeout,
|
||||||
|
still_valid,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A postgres connection
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct PostgresConnection {
|
||||||
|
timeout: Duration,
|
||||||
|
error: Arc<Mutex<Option<cdk_common::database::Error>>>,
|
||||||
|
result: Arc<OnceLock<Client>>,
|
||||||
|
notify: Arc<Notify>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PostgresConnection {
|
||||||
|
/// Creates a new instance
|
||||||
|
pub fn new(config: PgConfig, timeout: Duration, still_valid: Arc<AtomicBool>) -> Self {
|
||||||
|
let failed = Arc::new(Mutex::new(None));
|
||||||
|
let result = Arc::new(OnceLock::new());
|
||||||
|
let notify = Arc::new(Notify::new());
|
||||||
|
let error_clone = failed.clone();
|
||||||
|
let result_clone = result.clone();
|
||||||
|
let notify_clone = notify.clone();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
match config.tls {
|
||||||
|
SslMode::NoTls(tls) => {
|
||||||
|
let (client, connection) = match connect(&config.url, tls).await {
|
||||||
|
Ok((client, connection)) => (client, connection),
|
||||||
|
Err(err) => {
|
||||||
|
*error_clone.lock().await =
|
||||||
|
Some(cdk_common::database::Error::Database(Box::new(err)));
|
||||||
|
still_valid.store(false, std::sync::atomic::Ordering::Release);
|
||||||
|
notify_clone.notify_waiters();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let _ = connection.await;
|
||||||
|
still_valid.store(false, std::sync::atomic::Ordering::Release);
|
||||||
|
});
|
||||||
|
|
||||||
|
let _ = result_clone.set(client);
|
||||||
|
notify_clone.notify_waiters();
|
||||||
|
}
|
||||||
|
SslMode::NativeTls(tls) => {
|
||||||
|
let (client, connection) = match connect(&config.url, tls).await {
|
||||||
|
Ok((client, connection)) => (client, connection),
|
||||||
|
Err(err) => {
|
||||||
|
*error_clone.lock().await =
|
||||||
|
Some(cdk_common::database::Error::Database(Box::new(err)));
|
||||||
|
still_valid.store(false, std::sync::atomic::Ordering::Release);
|
||||||
|
notify_clone.notify_waiters();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let _ = connection.await;
|
||||||
|
still_valid.store(false, std::sync::atomic::Ordering::Release);
|
||||||
|
});
|
||||||
|
|
||||||
|
let _ = result_clone.set(client);
|
||||||
|
notify_clone.notify_waiters();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Self {
|
||||||
|
error: failed,
|
||||||
|
timeout,
|
||||||
|
result,
|
||||||
|
notify,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the wrapped instance or the connection error. The connection is returned as reference,
|
||||||
|
/// and the actual error is returned once, next times a generic error would be returned
|
||||||
|
async fn inner(&self) -> Result<&Client, cdk_common::database::Error> {
|
||||||
|
if let Some(client) = self.result.get() {
|
||||||
|
return Ok(client);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(error) = self.error.lock().await.take() {
|
||||||
|
return Err(error);
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeout(self.timeout, self.notify.notified()).await.is_err() {
|
||||||
|
return Err(cdk_common::database::Error::Internal("Timeout".to_owned()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check result again
|
||||||
|
if let Some(client) = self.result.get() {
|
||||||
|
Ok(client)
|
||||||
|
} else if let Some(error) = self.error.lock().await.take() {
|
||||||
|
Err(error)
|
||||||
|
} else {
|
||||||
|
Err(cdk_common::database::Error::Internal(
|
||||||
|
"Failed connection".to_owned(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl DatabaseConnector for PostgresConnection {
|
||||||
|
type Transaction = GenericTransactionHandler<Self>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl DatabaseExecutor for PostgresConnection {
|
||||||
|
fn name() -> &'static str {
|
||||||
|
"postgres"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn execute(&self, statement: Statement) -> Result<usize, Error> {
|
||||||
|
pg_execute(self.inner().await?, statement).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_one(&self, statement: Statement) -> Result<Option<Vec<Column>>, Error> {
|
||||||
|
pg_fetch_one(self.inner().await?, statement).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_all(&self, statement: Statement) -> Result<Vec<Vec<Column>>, Error> {
|
||||||
|
pg_fetch_all(self.inner().await?, statement).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn pluck(&self, statement: Statement) -> Result<Option<Column>, Error> {
|
||||||
|
pg_pluck(self.inner().await?, statement).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn batch(&self, statement: Statement) -> Result<(), Error> {
|
||||||
|
pg_batch(self.inner().await?, statement).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Mint DB implementation with PostgreSQL
|
||||||
|
pub type MintPgDatabase = SQLMintDatabase<PgConnectionPool>;
|
||||||
|
|
||||||
|
/// Mint Auth database with Postgres
|
||||||
|
#[cfg(feature = "auth")]
|
||||||
|
pub type MintPgAuthDatabase = SQLMintAuthDatabase<PgConnectionPool>;
|
||||||
|
|
||||||
|
/// Mint DB implementation with PostgresSQL
|
||||||
|
pub type WalletPgDatabase = SQLWalletDatabase<PgConnectionPool>;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use cdk_common::mint_db_test;
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
static MIGRATION_LOCK: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
|
||||||
|
|
||||||
|
async fn provide_db() -> MintPgDatabase {
|
||||||
|
let m = MIGRATION_LOCK.lock().await;
|
||||||
|
let db_url = std::env::var("CDK_MINTD_DATABASE_URL")
|
||||||
|
.or_else(|_| std::env::var("PG_DB_URL")) // Fallback for compatibility
|
||||||
|
.unwrap_or("host=localhost user=test password=test dbname=testdb port=5433".to_owned());
|
||||||
|
let db = MintPgDatabase::new(db_url.as_str())
|
||||||
|
.await
|
||||||
|
.expect("database");
|
||||||
|
drop(m);
|
||||||
|
db
|
||||||
|
}
|
||||||
|
|
||||||
|
mint_db_test!(provide_db);
|
||||||
|
}
|
||||||
130
crates/cdk-postgres/src/value.rs
Normal file
130
crates/cdk-postgres/src/value.rs
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
use cdk_sql_common::value::Value;
|
||||||
|
use tokio_postgres::types::{self, FromSql, ToSql};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum PgValue<'a> {
|
||||||
|
Null,
|
||||||
|
Integer(i64),
|
||||||
|
Real(f64),
|
||||||
|
Text(&'a str),
|
||||||
|
Blob(&'a [u8]),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> From<&'a Value> for PgValue<'a> {
|
||||||
|
fn from(value: &'a Value) -> Self {
|
||||||
|
match value {
|
||||||
|
Value::Blob(b) => PgValue::Blob(b),
|
||||||
|
Value::Text(text) => PgValue::Text(text.as_str()),
|
||||||
|
Value::Null => PgValue::Null,
|
||||||
|
Value::Integer(i) => PgValue::Integer(*i),
|
||||||
|
Value::Real(r) => PgValue::Real(*r),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> From<PgValue<'a>> for Value {
|
||||||
|
fn from(val: PgValue<'a>) -> Self {
|
||||||
|
match val {
|
||||||
|
PgValue::Blob(value) => Value::Blob(value.to_owned()),
|
||||||
|
PgValue::Text(value) => Value::Text(value.to_owned()),
|
||||||
|
PgValue::Null => Value::Null,
|
||||||
|
PgValue::Integer(n) => Value::Integer(n),
|
||||||
|
PgValue::Real(r) => Value::Real(r),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> FromSql<'a> for PgValue<'a> {
|
||||||
|
fn accepts(_ty: &types::Type) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_sql(
|
||||||
|
ty: &types::Type,
|
||||||
|
raw: &'a [u8],
|
||||||
|
) -> Result<Self, Box<dyn std::error::Error + Sync + Send>> {
|
||||||
|
Ok(match *ty {
|
||||||
|
types::Type::VARCHAR | types::Type::TEXT | types::Type::BPCHAR | types::Type::NAME => {
|
||||||
|
PgValue::Text(<&str as FromSql>::from_sql(ty, raw)?)
|
||||||
|
}
|
||||||
|
types::Type::BOOL => PgValue::Integer(if <bool as FromSql>::from_sql(ty, raw)? {
|
||||||
|
1
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
}),
|
||||||
|
types::Type::INT2 => PgValue::Integer(<i8 as FromSql>::from_sql(ty, raw)? as i64),
|
||||||
|
types::Type::INT4 => PgValue::Integer(<i32 as FromSql>::from_sql(ty, raw)? as i64),
|
||||||
|
types::Type::INT8 => PgValue::Integer(<i64 as FromSql>::from_sql(ty, raw)?),
|
||||||
|
types::Type::BIT_ARRAY | types::Type::BYTEA | types::Type::UNKNOWN => {
|
||||||
|
PgValue::Blob(<&[u8] as FromSql>::from_sql(ty, raw)?)
|
||||||
|
}
|
||||||
|
_ => panic!("Unsupported type {ty:?}"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_sql_null(_ty: &types::Type) -> Result<Self, Box<dyn std::error::Error + Sync + Send>> {
|
||||||
|
Ok(PgValue::Null)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToSql for PgValue<'_> {
|
||||||
|
fn to_sql(
|
||||||
|
&self,
|
||||||
|
ty: &types::Type,
|
||||||
|
out: &mut types::private::BytesMut,
|
||||||
|
) -> Result<types::IsNull, Box<dyn std::error::Error + Sync + Send>>
|
||||||
|
where
|
||||||
|
Self: Sized,
|
||||||
|
{
|
||||||
|
match self {
|
||||||
|
PgValue::Blob(blob) => (*blob).to_sql(ty, out),
|
||||||
|
PgValue::Text(text) => (*text).to_sql(ty, out),
|
||||||
|
PgValue::Null => Ok(types::IsNull::Yes),
|
||||||
|
PgValue::Real(r) => r.to_sql(ty, out),
|
||||||
|
PgValue::Integer(i) => match *ty {
|
||||||
|
types::Type::BOOL => (*i != 0).to_sql(ty, out),
|
||||||
|
types::Type::INT2 => (*i as i16).to_sql(ty, out),
|
||||||
|
types::Type::INT4 => (*i as i32).to_sql(ty, out),
|
||||||
|
_ => i.to_sql_checked(ty, out),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn accepts(_ty: &types::Type) -> bool
|
||||||
|
where
|
||||||
|
Self: Sized,
|
||||||
|
{
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
fn encode_format(&self, ty: &types::Type) -> types::Format {
|
||||||
|
match self {
|
||||||
|
PgValue::Blob(blob) => blob.encode_format(ty),
|
||||||
|
PgValue::Text(text) => text.encode_format(ty),
|
||||||
|
PgValue::Null => types::Format::Text,
|
||||||
|
PgValue::Real(r) => r.encode_format(ty),
|
||||||
|
PgValue::Integer(i) => i.encode_format(ty),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_sql_checked(
|
||||||
|
&self,
|
||||||
|
ty: &types::Type,
|
||||||
|
out: &mut types::private::BytesMut,
|
||||||
|
) -> Result<types::IsNull, Box<dyn std::error::Error + Sync + Send>> {
|
||||||
|
match self {
|
||||||
|
PgValue::Blob(blob) => blob.to_sql_checked(ty, out),
|
||||||
|
PgValue::Text(text) => text.to_sql_checked(ty, out),
|
||||||
|
PgValue::Null => Ok(types::IsNull::Yes),
|
||||||
|
PgValue::Real(r) => r.to_sql_checked(ty, out),
|
||||||
|
PgValue::Integer(i) => match *ty {
|
||||||
|
types::Type::BOOL => (*i != 0).to_sql_checked(ty, out),
|
||||||
|
types::Type::INT2 => (*i as i16).to_sql_checked(ty, out),
|
||||||
|
types::Type::INT4 => (*i as i32).to_sql_checked(ty, out),
|
||||||
|
_ => i.to_sql_checked(ty, out),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
30
crates/cdk-postgres/start_db_for_test.sh
Normal file
30
crates/cdk-postgres/start_db_for_test.sh
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
CONTAINER_NAME="rust-test-pg"
|
||||||
|
DB_USER="test"
|
||||||
|
DB_PASS="test"
|
||||||
|
DB_NAME="testdb"
|
||||||
|
DB_PORT="5433"
|
||||||
|
|
||||||
|
echo "Starting fresh PostgreSQL container..."
|
||||||
|
docker run -d --rm \
|
||||||
|
--name "${CONTAINER_NAME}" \
|
||||||
|
-e POSTGRES_USER="${DB_USER}" \
|
||||||
|
-e POSTGRES_PASSWORD="${DB_PASS}" \
|
||||||
|
-e POSTGRES_DB="${DB_NAME}" \
|
||||||
|
-p ${DB_PORT}:5432 \
|
||||||
|
postgres:16
|
||||||
|
|
||||||
|
echo "Waiting for PostgreSQL to be ready and database '${DB_NAME}' to exist..."
|
||||||
|
until docker exec -e PGPASSWORD="${DB_PASS}" "${CONTAINER_NAME}" \
|
||||||
|
psql -U "${DB_USER}" -d "${DB_NAME}" -c "SELECT 1;" >/dev/null 2>&1; do
|
||||||
|
sleep 0.5
|
||||||
|
done
|
||||||
|
|
||||||
|
docker exec -e PGPASSWORD="${DB_PASS}" "${CONTAINER_NAME}" \
|
||||||
|
psql -U "${DB_USER}" -d "${DB_NAME}" -c "CREATE DATABASE mintdb;"
|
||||||
|
docker exec -e PGPASSWORD="${DB_PASS}" "${CONTAINER_NAME}" \
|
||||||
|
psql -U "${DB_USER}" -d "${DB_NAME}" -c "CREATE DATABASE mintdb_auth;"
|
||||||
|
|
||||||
|
export DATABASE_URL="host=localhost user=${DB_USER} password=${DB_PASS} dbname=${DB_NAME} port=${DB_PORT}"
|
||||||
@@ -182,7 +182,7 @@ where
|
|||||||
|
|
||||||
/// Begin a transaction
|
/// Begin a transaction
|
||||||
async fn begin(conn: &mut W) -> Result<(), Error> {
|
async fn begin(conn: &mut W) -> Result<(), Error> {
|
||||||
query("BEGIN")?.execute(conn).await?;
|
query("START TRANSACTION")?.execute(conn).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
/// @generated
|
/// @generated
|
||||||
/// Auto-generated by build.rs
|
/// Auto-generated by build.rs
|
||||||
pub static MIGRATIONS: &[(&str, &str, &str)] = &[
|
pub static MIGRATIONS: &[(&str, &str, &str)] = &[
|
||||||
|
("postgres", "1_init.sql", include_str!(r#"./migrations/postgres/1_init.sql"#)),
|
||||||
("sqlite", "1_fix_sqlx_migration.sql", include_str!(r#"./migrations/sqlite/1_fix_sqlx_migration.sql"#)),
|
("sqlite", "1_fix_sqlx_migration.sql", include_str!(r#"./migrations/sqlite/1_fix_sqlx_migration.sql"#)),
|
||||||
("sqlite", "20250109143347_init.sql", include_str!(r#"./migrations/sqlite/20250109143347_init.sql"#)),
|
("sqlite", "20250109143347_init.sql", include_str!(r#"./migrations/sqlite/20250109143347_init.sql"#)),
|
||||||
];
|
];
|
||||||
|
|||||||
@@ -0,0 +1,43 @@
|
|||||||
|
CREATE TABLE IF NOT EXISTS proof (
|
||||||
|
y BYTEA PRIMARY KEY,
|
||||||
|
keyset_id TEXT NOT NULL,
|
||||||
|
secret TEXT NOT NULL,
|
||||||
|
c BYTEA NOT NULL,
|
||||||
|
state TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS state_index ON proof(state);
|
||||||
|
CREATE INDEX IF NOT EXISTS secret_index ON proof(secret);
|
||||||
|
|
||||||
|
|
||||||
|
-- Keysets Table
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS keyset (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
unit TEXT NOT NULL,
|
||||||
|
active BOOL NOT NULL,
|
||||||
|
valid_from INTEGER NOT NULL,
|
||||||
|
valid_to INTEGER,
|
||||||
|
derivation_path TEXT NOT NULL,
|
||||||
|
max_order INTEGER NOT NULL,
|
||||||
|
derivation_path_index INTEGER NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS unit_index ON keyset(unit);
|
||||||
|
CREATE INDEX IF NOT EXISTS active_index ON keyset(active);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS blind_signature (
|
||||||
|
y BYTEA PRIMARY KEY,
|
||||||
|
amount INTEGER NOT NULL,
|
||||||
|
keyset_id TEXT NOT NULL,
|
||||||
|
c BYTEA NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS keyset_id_index ON blind_signature(keyset_id);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS protected_endpoints (
|
||||||
|
endpoint TEXT PRIMARY KEY,
|
||||||
|
auth TEXT NOT NULL
|
||||||
|
);
|
||||||
@@ -262,9 +262,10 @@ where
|
|||||||
FROM
|
FROM
|
||||||
keyset
|
keyset
|
||||||
WHERE
|
WHERE
|
||||||
active = 1;
|
active = :active;
|
||||||
"#,
|
"#,
|
||||||
)?
|
)?
|
||||||
|
.bind("active", true)
|
||||||
.pluck(&*conn)
|
.pluck(&*conn)
|
||||||
.await?
|
.await?
|
||||||
.map(|id| Ok::<_, Error>(column_as_string!(id, Id::from_str, Id::from_bytes)))
|
.map(|id| Ok::<_, Error>(column_as_string!(id, Id::from_str, Id::from_bytes)))
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
/// @generated
|
/// @generated
|
||||||
/// Auto-generated by build.rs
|
/// Auto-generated by build.rs
|
||||||
pub static MIGRATIONS: &[(&str, &str, &str)] = &[
|
pub static MIGRATIONS: &[(&str, &str, &str)] = &[
|
||||||
|
("postgres", "1_initial.sql", include_str!(r#"./migrations/postgres/1_initial.sql"#)),
|
||||||
("sqlite", "1_fix_sqlx_migration.sql", include_str!(r#"./migrations/sqlite/1_fix_sqlx_migration.sql"#)),
|
("sqlite", "1_fix_sqlx_migration.sql", include_str!(r#"./migrations/sqlite/1_fix_sqlx_migration.sql"#)),
|
||||||
("sqlite", "20240612124932_init.sql", include_str!(r#"./migrations/sqlite/20240612124932_init.sql"#)),
|
("sqlite", "20240612124932_init.sql", include_str!(r#"./migrations/sqlite/20240612124932_init.sql"#)),
|
||||||
("sqlite", "20240618195700_quote_state.sql", include_str!(r#"./migrations/sqlite/20240618195700_quote_state.sql"#)),
|
("sqlite", "20240618195700_quote_state.sql", include_str!(r#"./migrations/sqlite/20240618195700_quote_state.sql"#)),
|
||||||
|
|||||||
100
crates/cdk-sql-common/src/mint/migrations/postgres/1_initial.sql
Normal file
100
crates/cdk-sql-common/src/mint/migrations/postgres/1_initial.sql
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
CREATE TABLE keyset (
|
||||||
|
id TEXT PRIMARY KEY, unit TEXT NOT NULL,
|
||||||
|
active BOOL NOT NULL, valid_from INTEGER NOT NULL,
|
||||||
|
valid_to INTEGER, derivation_path TEXT NOT NULL,
|
||||||
|
max_order INTEGER NOT NULL, input_fee_ppk INTEGER,
|
||||||
|
derivation_path_index INTEGER
|
||||||
|
);
|
||||||
|
CREATE INDEX unit_index ON keyset(unit);
|
||||||
|
CREATE INDEX active_index ON keyset(active);
|
||||||
|
CREATE TABLE melt_quote (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
unit TEXT NOT NULL,
|
||||||
|
amount INTEGER NOT NULL,
|
||||||
|
request TEXT NOT NULL,
|
||||||
|
fee_reserve INTEGER NOT NULL,
|
||||||
|
expiry INTEGER NOT NULL,
|
||||||
|
state TEXT CHECK (
|
||||||
|
state IN ('UNPAID', 'PENDING', 'PAID')
|
||||||
|
) NOT NULL DEFAULT 'UNPAID',
|
||||||
|
payment_preimage TEXT,
|
||||||
|
request_lookup_id TEXT,
|
||||||
|
created_time INTEGER NOT NULL DEFAULT 0,
|
||||||
|
paid_time INTEGER,
|
||||||
|
payment_method TEXT NOT NULL DEFAULT 'bolt11',
|
||||||
|
options TEXT,
|
||||||
|
request_lookup_id_kind TEXT NOT NULL DEFAULT 'payment_hash'
|
||||||
|
);
|
||||||
|
CREATE INDEX melt_quote_state_index ON melt_quote(state);
|
||||||
|
CREATE UNIQUE INDEX unique_request_lookup_id_melt ON melt_quote(request_lookup_id);
|
||||||
|
CREATE TABLE melt_request (
|
||||||
|
id TEXT PRIMARY KEY, inputs TEXT NOT NULL,
|
||||||
|
outputs TEXT, method TEXT NOT NULL,
|
||||||
|
unit TEXT NOT NULL
|
||||||
|
);
|
||||||
|
CREATE TABLE config (
|
||||||
|
id TEXT PRIMARY KEY, value TEXT NOT NULL
|
||||||
|
);
|
||||||
|
CREATE TABLE IF NOT EXISTS "proof" (
|
||||||
|
y BYTEA PRIMARY KEY,
|
||||||
|
amount INTEGER NOT NULL,
|
||||||
|
keyset_id TEXT NOT NULL,
|
||||||
|
secret TEXT NOT NULL,
|
||||||
|
c BYTEA NOT NULL,
|
||||||
|
witness TEXT,
|
||||||
|
state TEXT CHECK (
|
||||||
|
state IN (
|
||||||
|
'SPENT', 'PENDING', 'UNSPENT', 'RESERVED',
|
||||||
|
'UNKNOWN'
|
||||||
|
)
|
||||||
|
) NOT NULL,
|
||||||
|
quote_id TEXT,
|
||||||
|
created_time INTEGER NOT NULL DEFAULT 0
|
||||||
|
);
|
||||||
|
CREATE TABLE IF NOT EXISTS "blind_signature" (
|
||||||
|
blinded_message BYTEA PRIMARY KEY,
|
||||||
|
amount INTEGER NOT NULL,
|
||||||
|
keyset_id TEXT NOT NULL,
|
||||||
|
c BYTEA NOT NULL,
|
||||||
|
dleq_e TEXT,
|
||||||
|
dleq_s TEXT,
|
||||||
|
quote_id TEXT,
|
||||||
|
created_time INTEGER NOT NULL DEFAULT 0
|
||||||
|
);
|
||||||
|
CREATE TABLE IF NOT EXISTS "mint_quote" (
|
||||||
|
id TEXT PRIMARY KEY, amount INTEGER,
|
||||||
|
unit TEXT NOT NULL, request TEXT NOT NULL,
|
||||||
|
expiry INTEGER NOT NULL, request_lookup_id TEXT UNIQUE,
|
||||||
|
pubkey TEXT, created_time INTEGER NOT NULL DEFAULT 0,
|
||||||
|
amount_paid INTEGER NOT NULL DEFAULT 0,
|
||||||
|
amount_issued INTEGER NOT NULL DEFAULT 0,
|
||||||
|
payment_method TEXT NOT NULL DEFAULT 'BOLT11',
|
||||||
|
request_lookup_id_kind TEXT NOT NULL DEFAULT 'payment_hash'
|
||||||
|
);
|
||||||
|
CREATE INDEX idx_mint_quote_created_time ON mint_quote(created_time);
|
||||||
|
CREATE INDEX idx_mint_quote_expiry ON mint_quote(expiry);
|
||||||
|
CREATE INDEX idx_mint_quote_request_lookup_id ON mint_quote(request_lookup_id);
|
||||||
|
CREATE INDEX idx_mint_quote_request_lookup_id_and_kind ON mint_quote(
|
||||||
|
request_lookup_id, request_lookup_id_kind
|
||||||
|
);
|
||||||
|
CREATE TABLE mint_quote_payments (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
quote_id TEXT NOT NULL,
|
||||||
|
payment_id TEXT NOT NULL UNIQUE,
|
||||||
|
timestamp INTEGER NOT NULL,
|
||||||
|
amount INTEGER NOT NULL,
|
||||||
|
FOREIGN KEY (quote_id) REFERENCES mint_quote(id)
|
||||||
|
);
|
||||||
|
CREATE INDEX idx_mint_quote_payments_payment_id ON mint_quote_payments(payment_id);
|
||||||
|
CREATE INDEX idx_mint_quote_payments_quote_id ON mint_quote_payments(quote_id);
|
||||||
|
CREATE TABLE mint_quote_issued (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
quote_id TEXT NOT NULL,
|
||||||
|
amount INTEGER NOT NULL,
|
||||||
|
timestamp INTEGER NOT NULL,
|
||||||
|
FOREIGN KEY (quote_id) REFERENCES mint_quote(id)
|
||||||
|
);
|
||||||
|
CREATE INDEX idx_mint_quote_issued_quote_id ON mint_quote_issued(quote_id);
|
||||||
|
CREATE INDEX idx_melt_quote_request_lookup_id_and_kind ON mint_quote(
|
||||||
|
request_lookup_id, request_lookup_id_kind
|
||||||
|
);
|
||||||
@@ -255,6 +255,9 @@ where
|
|||||||
ys: &[PublicKey],
|
ys: &[PublicKey],
|
||||||
_quote_id: Option<Uuid>,
|
_quote_id: Option<Uuid>,
|
||||||
) -> Result<(), Self::Err> {
|
) -> Result<(), Self::Err> {
|
||||||
|
if ys.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
let total_deleted = query(
|
let total_deleted = query(
|
||||||
r#"
|
r#"
|
||||||
DELETE FROM proof WHERE y IN (:ys) AND state NOT IN (:exclude_state)
|
DELETE FROM proof WHERE y IN (:ys) AND state NOT IN (:exclude_state)
|
||||||
@@ -314,9 +317,14 @@ where
|
|||||||
// Get payment IDs and timestamps from the mint_quote_payments table
|
// Get payment IDs and timestamps from the mint_quote_payments table
|
||||||
query(
|
query(
|
||||||
r#"
|
r#"
|
||||||
SELECT payment_id, timestamp, amount
|
SELECT
|
||||||
FROM mint_quote_payments
|
payment_id,
|
||||||
WHERE quote_id=:quote_id;
|
timestamp,
|
||||||
|
amount
|
||||||
|
FROM
|
||||||
|
mint_quote_payments
|
||||||
|
WHERE
|
||||||
|
quote_id=:quote_id
|
||||||
"#,
|
"#,
|
||||||
)?
|
)?
|
||||||
.bind("quote_id", quote_id.as_hyphenated().to_string())
|
.bind("quote_id", quote_id.as_hyphenated().to_string())
|
||||||
@@ -407,12 +415,12 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn set_active_keyset(&mut self, unit: CurrencyUnit, id: Id) -> Result<(), Error> {
|
async fn set_active_keyset(&mut self, unit: CurrencyUnit, id: Id) -> Result<(), Error> {
|
||||||
query(r#"UPDATE keyset SET active=FALSE WHERE unit IS :unit"#)?
|
query(r#"UPDATE keyset SET active=FALSE WHERE unit = :unit"#)?
|
||||||
.bind("unit", unit.to_string())
|
.bind("unit", unit.to_string())
|
||||||
.execute(&self.inner)
|
.execute(&self.inner)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
query(r#"UPDATE keyset SET active=TRUE WHERE unit IS :unit AND id IS :id"#)?
|
query(r#"UPDATE keyset SET active=TRUE WHERE unit = :unit AND id = :id"#)?
|
||||||
.bind("unit", unit.to_string())
|
.bind("unit", unit.to_string())
|
||||||
.bind("id", id.to_string())
|
.bind("id", id.to_string())
|
||||||
.execute(&self.inner)
|
.execute(&self.inner)
|
||||||
@@ -443,7 +451,8 @@ where
|
|||||||
async fn get_active_keyset_id(&self, unit: &CurrencyUnit) -> Result<Option<Id>, Self::Err> {
|
async fn get_active_keyset_id(&self, unit: &CurrencyUnit) -> Result<Option<Id>, Self::Err> {
|
||||||
let conn = self.pool.get().map_err(|e| Error::Database(Box::new(e)))?;
|
let conn = self.pool.get().map_err(|e| Error::Database(Box::new(e)))?;
|
||||||
Ok(
|
Ok(
|
||||||
query(r#" SELECT id FROM keyset WHERE active = 1 AND unit IS :unit"#)?
|
query(r#" SELECT id FROM keyset WHERE active = :active AND unit = :unit"#)?
|
||||||
|
.bind("active", true)
|
||||||
.bind("unit", unit.to_string())
|
.bind("unit", unit.to_string())
|
||||||
.pluck(&*conn)
|
.pluck(&*conn)
|
||||||
.await?
|
.await?
|
||||||
@@ -458,7 +467,9 @@ where
|
|||||||
|
|
||||||
async fn get_active_keysets(&self) -> Result<HashMap<CurrencyUnit, Id>, Self::Err> {
|
async fn get_active_keysets(&self) -> Result<HashMap<CurrencyUnit, Id>, Self::Err> {
|
||||||
let conn = self.pool.get().map_err(|e| Error::Database(Box::new(e)))?;
|
let conn = self.pool.get().map_err(|e| Error::Database(Box::new(e)))?;
|
||||||
Ok(query(r#"SELECT id, unit FROM keyset WHERE active = 1"#)?
|
Ok(
|
||||||
|
query(r#"SELECT id, unit FROM keyset WHERE active = :active"#)?
|
||||||
|
.bind("active", true)
|
||||||
.fetch_all(&*conn)
|
.fetch_all(&*conn)
|
||||||
.await?
|
.await?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -468,7 +479,8 @@ where
|
|||||||
column_as_string!(&row[0], Id::from_str, Id::from_bytes),
|
column_as_string!(&row[0], Id::from_str, Id::from_bytes),
|
||||||
))
|
))
|
||||||
})
|
})
|
||||||
.collect::<Result<HashMap<_, _>, Error>>()?)
|
.collect::<Result<HashMap<_, _>, Error>>()?,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_keyset_info(&self, id: &Id) -> Result<Option<MintKeySetInfo>, Self::Err> {
|
async fn get_keyset_info(&self, id: &Id) -> Result<Option<MintKeySetInfo>, Self::Err> {
|
||||||
@@ -658,7 +670,6 @@ where
|
|||||||
UPDATE mint_quote
|
UPDATE mint_quote
|
||||||
SET amount_issued = :amount_issued
|
SET amount_issued = :amount_issued
|
||||||
WHERE id = :quote_id
|
WHERE id = :quote_id
|
||||||
FOR UPDATE
|
|
||||||
"#,
|
"#,
|
||||||
)?
|
)?
|
||||||
.bind("amount_issued", new_amount_issued.to_i64())
|
.bind("amount_issued", new_amount_issued.to_i64())
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
/// @generated
|
/// @generated
|
||||||
/// Auto-generated by build.rs
|
/// Auto-generated by build.rs
|
||||||
pub static MIGRATIONS: &[(&str, &str, &str)] = &[
|
pub static MIGRATIONS: &[(&str, &str, &str)] = &[
|
||||||
|
("postgres", "1_initial.sql", include_str!(r#"./migrations/postgres/1_initial.sql"#)),
|
||||||
("sqlite", "1_fix_sqlx_migration.sql", include_str!(r#"./migrations/sqlite/1_fix_sqlx_migration.sql"#)),
|
("sqlite", "1_fix_sqlx_migration.sql", include_str!(r#"./migrations/sqlite/1_fix_sqlx_migration.sql"#)),
|
||||||
("sqlite", "20240612132920_init.sql", include_str!(r#"./migrations/sqlite/20240612132920_init.sql"#)),
|
("sqlite", "20240612132920_init.sql", include_str!(r#"./migrations/sqlite/20240612132920_init.sql"#)),
|
||||||
("sqlite", "20240618200350_quote_state.sql", include_str!(r#"./migrations/sqlite/20240618200350_quote_state.sql"#)),
|
("sqlite", "20240618200350_quote_state.sql", include_str!(r#"./migrations/sqlite/20240618200350_quote_state.sql"#)),
|
||||||
|
|||||||
@@ -0,0 +1,80 @@
|
|||||||
|
CREATE TABLE mint (
|
||||||
|
mint_url TEXT PRIMARY KEY, name TEXT,
|
||||||
|
pubkey BYTEA, version TEXT, description TEXT,
|
||||||
|
description_long TEXT, contact TEXT,
|
||||||
|
nuts TEXT, motd TEXT, icon_url TEXT,
|
||||||
|
mint_time INTEGER, urls TEXT, tos_url TEXT
|
||||||
|
);
|
||||||
|
CREATE TABLE keyset (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
mint_url TEXT NOT NULL,
|
||||||
|
unit TEXT NOT NULL,
|
||||||
|
active BOOL NOT NULL,
|
||||||
|
counter INTEGER NOT NULL DEFAULT 0,
|
||||||
|
input_fee_ppk INTEGER,
|
||||||
|
final_expiry INTEGER DEFAULT NULL,
|
||||||
|
FOREIGN KEY(mint_url) REFERENCES mint(mint_url) ON UPDATE CASCADE ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
CREATE TABLE melt_quote (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
unit TEXT NOT NULL,
|
||||||
|
amount INTEGER NOT NULL,
|
||||||
|
request TEXT NOT NULL,
|
||||||
|
fee_reserve INTEGER NOT NULL,
|
||||||
|
expiry INTEGER NOT NULL,
|
||||||
|
state TEXT CHECK (
|
||||||
|
state IN ('UNPAID', 'PENDING', 'PAID')
|
||||||
|
) NOT NULL DEFAULT 'UNPAID',
|
||||||
|
payment_preimage TEXT
|
||||||
|
);
|
||||||
|
CREATE TABLE key (
|
||||||
|
id TEXT PRIMARY KEY, keys TEXT NOT NULL
|
||||||
|
);
|
||||||
|
CREATE INDEX melt_quote_state_index ON melt_quote(state);
|
||||||
|
CREATE TABLE IF NOT EXISTS "proof" (
|
||||||
|
y BYTEA PRIMARY KEY,
|
||||||
|
mint_url TEXT NOT NULL,
|
||||||
|
state TEXT CHECK (
|
||||||
|
state IN (
|
||||||
|
'SPENT', 'UNSPENT', 'PENDING', 'RESERVED',
|
||||||
|
'PENDING_SPENT'
|
||||||
|
)
|
||||||
|
) NOT NULL,
|
||||||
|
spending_condition TEXT,
|
||||||
|
unit TEXT NOT NULL,
|
||||||
|
amount INTEGER NOT NULL,
|
||||||
|
keyset_id TEXT NOT NULL,
|
||||||
|
secret TEXT NOT NULL,
|
||||||
|
c BYTEA NOT NULL,
|
||||||
|
witness TEXT,
|
||||||
|
dleq_e BYTEA,
|
||||||
|
dleq_s BYTEA,
|
||||||
|
dleq_r BYTEA
|
||||||
|
);
|
||||||
|
CREATE TABLE transactions (
|
||||||
|
id BYTEA PRIMARY KEY,
|
||||||
|
mint_url TEXT NOT NULL,
|
||||||
|
direction TEXT CHECK (
|
||||||
|
direction IN ('Incoming', 'Outgoing')
|
||||||
|
) NOT NULL,
|
||||||
|
amount INTEGER NOT NULL,
|
||||||
|
fee INTEGER NOT NULL,
|
||||||
|
unit TEXT NOT NULL,
|
||||||
|
ys BYTEA NOT NULL,
|
||||||
|
timestamp INTEGER NOT NULL,
|
||||||
|
memo TEXT,
|
||||||
|
metadata TEXT
|
||||||
|
);
|
||||||
|
CREATE INDEX mint_url_index ON transactions(mint_url);
|
||||||
|
CREATE INDEX direction_index ON transactions(direction);
|
||||||
|
CREATE INDEX unit_index ON transactions(unit);
|
||||||
|
CREATE INDEX timestamp_index ON transactions(timestamp);
|
||||||
|
CREATE TABLE IF NOT EXISTS "mint_quote" (
|
||||||
|
id TEXT PRIMARY KEY, mint_url TEXT NOT NULL,
|
||||||
|
payment_method TEXT NOT NULL DEFAULT 'bolt11',
|
||||||
|
amount INTEGER, unit TEXT NOT NULL,
|
||||||
|
request TEXT NOT NULL, state TEXT NOT NULL,
|
||||||
|
expiry INTEGER NOT NULL, amount_paid INTEGER NOT NULL DEFAULT 0,
|
||||||
|
amount_issued INTEGER NOT NULL DEFAULT 0,
|
||||||
|
secret_key TEXT
|
||||||
|
);
|
||||||
@@ -193,7 +193,9 @@ async fn reset_proofs_to_original_state(
|
|||||||
tx.update_proofs_states(&ys, state).await?;
|
tx.update_proofs_states(&ys, state).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !unknown_proofs.is_empty() {
|
||||||
tx.remove_proofs(&unknown_proofs, None).await?;
|
tx.remove_proofs(&unknown_proofs, None).await?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -344,7 +344,7 @@ impl Wallet {
|
|||||||
.swap(
|
.swap(
|
||||||
Some(exact_amount),
|
Some(exact_amount),
|
||||||
SplitTarget::None,
|
SplitTarget::None,
|
||||||
vec![proof.clone()],
|
vec![proof],
|
||||||
None,
|
None,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
|||||||
52
docker-compose.postgres.yaml
Normal file
52
docker-compose.postgres.yaml
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Docker Compose configuration for CDK Mint with PostgreSQL
|
||||||
|
# Usage: docker-compose -f docker-compose.postgres.yaml up
|
||||||
|
|
||||||
|
services:
|
||||||
|
# CDK Mint service with PostgreSQL
|
||||||
|
mintd:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
container_name: mint
|
||||||
|
ports:
|
||||||
|
- "8085:8085"
|
||||||
|
environment:
|
||||||
|
- CDK_MINTD_URL=https://example.com
|
||||||
|
- CDK_MINTD_LN_BACKEND=fakewallet
|
||||||
|
- CDK_MINTD_LISTEN_HOST=0.0.0.0
|
||||||
|
- CDK_MINTD_LISTEN_PORT=8085
|
||||||
|
- CDK_MINTD_MNEMONIC=
|
||||||
|
# PostgreSQL database configuration
|
||||||
|
- CDK_MINTD_DATABASE=postgres
|
||||||
|
- CDK_MINTD_DATABASE_URL=postgresql://cdk_user:cdk_password@postgres:5432/cdk_mint
|
||||||
|
# Cache configuration
|
||||||
|
- CDK_MINTD_CACHE_BACKEND=memory
|
||||||
|
command: ["cdk-mintd"]
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
# PostgreSQL database service
|
||||||
|
postgres:
|
||||||
|
image: postgres:16-alpine
|
||||||
|
container_name: mint_postgres
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=cdk_user
|
||||||
|
- POSTGRES_PASSWORD=cdk_password
|
||||||
|
- POSTGRES_DB=cdk_mint
|
||||||
|
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U cdk_user -d cdk_mint"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
postgres_data:
|
||||||
|
driver: local
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
services:
|
services:
|
||||||
|
# CDK Mint service
|
||||||
mintd:
|
mintd:
|
||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
@@ -12,22 +13,68 @@ services:
|
|||||||
- CDK_MINTD_LISTEN_HOST=0.0.0.0
|
- CDK_MINTD_LISTEN_HOST=0.0.0.0
|
||||||
- CDK_MINTD_LISTEN_PORT=8085
|
- CDK_MINTD_LISTEN_PORT=8085
|
||||||
- CDK_MINTD_MNEMONIC=
|
- CDK_MINTD_MNEMONIC=
|
||||||
- CDK_MINTD_DATABASE=redb
|
# Database configuration - choose one:
|
||||||
|
# Option 1: SQLite (embedded, no additional setup needed)
|
||||||
|
- CDK_MINTD_DATABASE=sqlite
|
||||||
|
# Option 2: ReDB (embedded, no additional setup needed)
|
||||||
|
# - CDK_MINTD_DATABASE=redb
|
||||||
|
# Option 3: PostgreSQL (requires postgres service, enable with: docker-compose --profile postgres up)
|
||||||
|
# - CDK_MINTD_DATABASE=postgres
|
||||||
|
# - CDK_MINTD_DATABASE_URL=postgresql://cdk_user:cdk_password@postgres:5432/cdk_mint
|
||||||
|
# Cache configuration
|
||||||
- CDK_MINTD_CACHE_BACKEND=memory
|
- CDK_MINTD_CACHE_BACKEND=memory
|
||||||
|
# For Redis cache (requires redis service, enable with: docker-compose --profile redis up):
|
||||||
# - CDK_MINTD_CACHE_REDIS_URL=redis://redis:6379
|
# - CDK_MINTD_CACHE_REDIS_URL=redis://redis:6379
|
||||||
# - CDK_MINTD_CACHE_REDIS_KEY_PREFIX=cdk-mintd
|
# - CDK_MINTD_CACHE_REDIS_KEY_PREFIX=cdk-mintd
|
||||||
command: ["cdk-mintd"]
|
command: ["cdk-mintd"]
|
||||||
|
# Uncomment when using PostgreSQL:
|
||||||
# depends_on:
|
# depends_on:
|
||||||
# - redis
|
# - postgres
|
||||||
|
|
||||||
|
# PostgreSQL database service
|
||||||
|
# Enable with: docker-compose --profile postgres up
|
||||||
|
# postgres:
|
||||||
|
# image: postgres:16-alpine
|
||||||
|
# container_name: mint_postgres
|
||||||
|
# restart: unless-stopped
|
||||||
|
# profiles:
|
||||||
|
# - postgres
|
||||||
|
# environment:
|
||||||
|
# - POSTGRES_USER=cdk_user
|
||||||
|
# - POSTGRES_PASSWORD=cdk_password
|
||||||
|
# - POSTGRES_DB=cdk_mint
|
||||||
|
# - POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
|
||||||
|
# ports:
|
||||||
|
# - "5432:5432"
|
||||||
|
# volumes:
|
||||||
|
# - postgres_data:/var/lib/postgresql/data
|
||||||
|
# healthcheck:
|
||||||
|
# test: ["CMD-SHELL", "pg_isready -U cdk_user -d cdk_mint"]
|
||||||
|
# interval: 10s
|
||||||
|
# timeout: 5s
|
||||||
|
# retries: 5
|
||||||
|
|
||||||
|
# Redis cache service (optional)
|
||||||
|
# Enable with: docker-compose --profile redis up
|
||||||
# redis:
|
# redis:
|
||||||
# image: redis:7-alpine
|
# image: redis:7-alpine
|
||||||
# container_name: mint_redis
|
# container_name: mint_redis
|
||||||
|
# restart: unless-stopped
|
||||||
|
# profiles:
|
||||||
|
# - redis
|
||||||
# ports:
|
# ports:
|
||||||
# - "6379:6379"
|
# - "6379:6379"
|
||||||
# volumes:
|
# volumes:
|
||||||
# - redis_data:/data
|
# - redis_data:/data
|
||||||
# command: redis-server --save 60 1 --loglevel warning
|
# command: redis-server --save 60 1 --loglevel warning
|
||||||
|
# healthcheck:
|
||||||
|
# test: ["CMD", "redis-cli", "ping"]
|
||||||
|
# interval: 10s
|
||||||
|
# timeout: 3s
|
||||||
|
# retries: 5
|
||||||
|
|
||||||
# volumes:
|
volumes:
|
||||||
|
postgres_data:
|
||||||
|
driver: local
|
||||||
# redis_data:
|
# redis_data:
|
||||||
|
# driver: local
|
||||||
|
|||||||
@@ -28,6 +28,10 @@ cleanup() {
|
|||||||
echo "Temp directory removed: $CDK_ITESTS_DIR"
|
echo "Temp directory removed: $CDK_ITESTS_DIR"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -n "$CONTAINER_NAME" ]; then
|
||||||
|
docker rm "${CONTAINER_NAME}" -f
|
||||||
|
fi
|
||||||
|
|
||||||
# Unset all environment variables
|
# Unset all environment variables
|
||||||
unset CDK_ITESTS_DIR
|
unset CDK_ITESTS_DIR
|
||||||
unset CDK_TEST_MINT_URL
|
unset CDK_TEST_MINT_URL
|
||||||
@@ -56,6 +60,31 @@ cargo build -p cdk-integration-tests
|
|||||||
|
|
||||||
# Start the fake mint binary with the new Rust-based approach
|
# Start the fake mint binary with the new Rust-based approach
|
||||||
echo "Starting fake mint using Rust binary..."
|
echo "Starting fake mint using Rust binary..."
|
||||||
|
|
||||||
|
if [ "${CDK_MINTD_DATABASE}" = "POSTGRES" ]; then
|
||||||
|
export CONTAINER_NAME="rust-fake-test-pg"
|
||||||
|
DB_USER="test"
|
||||||
|
DB_PASS="test"
|
||||||
|
DB_NAME="testdb"
|
||||||
|
DB_PORT="15433"
|
||||||
|
|
||||||
|
docker run -d --rm \
|
||||||
|
--name "${CONTAINER_NAME}" \
|
||||||
|
-e POSTGRES_USER="${DB_USER}" \
|
||||||
|
-e POSTGRES_PASSWORD="${DB_PASS}" \
|
||||||
|
-e POSTGRES_DB="${DB_NAME}" \
|
||||||
|
-p ${DB_PORT}:5432 \
|
||||||
|
postgres:16
|
||||||
|
export CDK_MINTD_DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:${DB_PORT}/${DB_NAME}"
|
||||||
|
|
||||||
|
echo "Waiting for PostgreSQL to be ready and database '${DB_NAME}' to exist..."
|
||||||
|
until docker exec -e PGPASSWORD="${DB_PASS}" "${CONTAINER_NAME}" \
|
||||||
|
psql -U "${DB_USER}" -d "${DB_NAME}" -c "SELECT 1;" >/dev/null 2>&1; do
|
||||||
|
sleep 0.5
|
||||||
|
done
|
||||||
|
echo "PostgreSQL container is ready"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "$2" = "external_signatory" ]; then
|
if [ "$2" = "external_signatory" ]; then
|
||||||
echo "Starting with external signatory support"
|
echo "Starting with external signatory support"
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user