diff --git a/api_auth_docker/Dockerfile b/api_auth_docker/Dockerfile
index 1c2aa27..99f16b7 100644
--- a/api_auth_docker/Dockerfile
+++ b/api_auth_docker/Dockerfile
@@ -1,7 +1,6 @@
FROM nginx:1.18.0-alpine
RUN apk add --update --no-cache \
- bash \
git \
openssl \
fcgiwrap \
diff --git a/api_auth_docker/entrypoint.sh b/api_auth_docker/entrypoint.sh
index 56e93fb..ee53172 100644
--- a/api_auth_docker/entrypoint.sh
+++ b/api_auth_docker/entrypoint.sh
@@ -1,17 +1,16 @@
-#!/bin/bash
+#!/bin/sh
-user='nginx'
+while [ ! -f "/container_monitor/proxy_ready" ]; do echo "proxy not ready" ; sleep 10 ; done
-if [[ $1 ]]; then
- IFS=':' read -ra arr <<< "$1"
-
- if [[ ${arr[0]} ]]; then
- user=${arr[0]};
- fi
+echo "proxy ready"
+if [ -n "$1" ]; then
+ user=$(echo "$1" | cut -d ':' -f 1)
+else
+ user='nginx'
fi
spawn-fcgi -M 0660 -s /var/run/fcgiwrap.socket -u $user -g nginx -U $user -- `which fcgiwrap`
chmod -R g+rw /var/run/fcgiwrap.socket /etc/nginx/conf.d/*
chown -R :nginx /etc/nginx/conf.d/*
-nginx -g "daemon off;"
+exec nginx -g "daemon off;"
diff --git a/cyphernodeconf_docker/help.json b/cyphernodeconf_docker/help.json
index eeb89fe..9e41b6a 100644
--- a/cyphernodeconf_docker/help.json
+++ b/cyphernodeconf_docker/help.json
@@ -17,6 +17,9 @@
"gatekeeper_edit_apiproperties": "If you know what you are doing, it is possible to manually edit the API endpoints/groups authorization. (Not recommended)",
"gatekeeper_apiproperties": "You are about to edit the api.properties file. The format of the file is pretty simple: for each action, you will find what access group can access it. Admin group can do what Spender group can, and Spender group can do what Watcher group can. Internal group is for the endpoints accessible only within the Docker network, like the backoffice tasks used by the Cron container. The access groups for each API id/key are found in the keys.properties file.",
"gatekeeper_cns": "I use domain names and/or IP addresses to create valid TLS certificates. For example, if https://cyphernodehost/getbestblockhash and https://192.168.7.44/getbestblockhash will be used, enter cyphernodehost, 192.168.7.44 as a possible domains. 127.0.0.1, localhost, gatekeeper will be automatically added to your list. Make sure the provided domain names are in your DNS or client's hosts file and is reachable.",
+ "postgres_datapath": "The Cyphernode's Postgres files will be stored in a container's mounted directory. Please provide the local mounted path to that directory. If running on OSX, check mountable directories in Docker's File Sharing configs.",
+ "postgres_datapath_custom": "Provide the full path name where Postgres files will be saved.",
+ "postgres_password": "PostgreSQL cyphernode's password used by Cyphernode when calling the database.",
"logs_datapath": "The Cyphernode's log files will be stored in a container's mounted directory. Please provide the local mounted path to that directory. If running on OSX, check mountable directories in Docker's File Sharing configs.",
"logs_datapath_custom": "Provide the full path name where Cyphernodes log files will be saved.",
"traefik_datapath": "The Traefik's files will be stored in a container's mounted directory. Please provide the local mounted path to that directory. If running on OSX, check mountable directories in Docker's File Sharing configs.",
diff --git a/cyphernodeconf_docker/lib/app.js b/cyphernodeconf_docker/lib/app.js
index 2e34f18..e9beae1 100644
--- a/cyphernodeconf_docker/lib/app.js
+++ b/cyphernodeconf_docker/lib/app.js
@@ -87,6 +87,7 @@ module.exports = class App {
proxy_version: process.env.PROXY_VERSION,
proxycron_version: process.env.PROXYCRON_VERSION,
pycoin_version: process.env.PYCOIN_VERSION,
+ postgres_version: process.env.POSTGRES_VERSION,
traefik_version: process.env.TRAEFIK_VERSION,
mosquitto_version: process.env.MOSQUITTO_VERSION,
otsclient_version: process.env.OTSCLIENT_VERSION,
@@ -148,6 +149,7 @@ module.exports = class App {
'cyphernode/proxy': this.sessionData.proxy_version,
'cyphernode/proxycron': this.sessionData.proxycron_version,
'cyphernode/pycoin': this.sessionData.pycoin_version,
+ 'cyphernode/postgres': this.sessionData.postgres_version,
'cyphernode/otsclient': this.sessionData.otsclient_version,
'traefik': this.sessionData.traefik_version,
'cyphernode/clightning': this.sessionData.lightning_version,
@@ -359,6 +361,7 @@ module.exports = class App {
const pathProps = [
'gatekeeper_datapath',
+ 'postgres_datapath',
'logs_datapath',
'traefik_datapath',
'tor_datapath',
@@ -483,6 +486,13 @@ module.exports = class App {
networks: ['cyphernodenet'],
docker: 'cyphernode/pycoin:'+this.config.docker_versions['cyphernode/pycoin']
},
+ {
+ name: 'Postgres',
+ label: 'postgres',
+ host: 'postgres',
+ networks: ['cyphernodenet'],
+ docker: 'postgres:'+this.config.docker_versions['cyphernode/postgres']
+ },
{
name: 'Notifier',
label: 'notifier',
diff --git a/cyphernodeconf_docker/lib/config.js b/cyphernodeconf_docker/lib/config.js
index 73cfd28..1cd4ff6 100644
--- a/cyphernodeconf_docker/lib/config.js
+++ b/cyphernodeconf_docker/lib/config.js
@@ -12,10 +12,11 @@ const schemas = {
'0.2.2': require('../schema/config-v0.2.2.json'),
'0.2.3': require('../schema/config-v0.2.3.json'),
'0.2.4': require('../schema/config-v0.2.4.json'),
- '0.2.5': require('../schema/config-v0.2.5.json')
+ '0.2.5': require('../schema/config-v0.2.5.json'),
+ '0.2.6': require('../schema/config-v0.2.6.json')
};
-const versionHistory = [ '0.1.0', '0.2.0', '0.2.2', '0.2.3', '0.2.4', '0.2.5' ];
+const versionHistory = [ '0.1.0', '0.2.0', '0.2.2', '0.2.3', '0.2.4', '0.2.5', '0.2.6' ];
const defaultSchemaVersion=versionHistory[0];
const latestSchemaVersion=versionHistory[versionHistory.length-1];
@@ -46,7 +47,8 @@ module.exports = class Config {
'0.2.0->0.2.2': this.migrate_0_2_0_to_0_2_2,
'0.2.2->0.2.3': this.migrate_0_2_2_to_0_2_3,
'0.2.3->0.2.4': this.migrate_0_2_3_to_0_2_4,
- '0.2.4->0.2.5': this.migrate_0_2_4_to_0_2_5
+ '0.2.4->0.2.5': this.migrate_0_2_4_to_0_2_5,
+ '0.2.5->0.2.6': this.migrate_0_2_5_to_0_2_6
};
this.setData( { schema_version: latestSchemaVersion } );
@@ -247,4 +249,12 @@ module.exports = class Config {
this.data.schema_version = '0.2.5';
}
+ async migrate_0_2_5_to_0_2_6() {
+ const currentVersion = this.data.schema_version;
+ if( currentVersion != '0.2.5' ) {
+ return;
+ }
+ this.data.schema_version = '0.2.6';
+ }
+
};
diff --git a/cyphernodeconf_docker/prompters/200_postgres.js b/cyphernodeconf_docker/prompters/200_postgres.js
new file mode 100644
index 0000000..9b9540b
--- /dev/null
+++ b/cyphernodeconf_docker/prompters/200_postgres.js
@@ -0,0 +1,30 @@
+const chalk = require('chalk');
+
+const name = 'postgres';
+
+const capitalise = function( txt ) {
+ return txt.charAt(0).toUpperCase() + txt.substr(1);
+};
+
+const prefix = function() {
+ return chalk.green(capitalise(name)+': ');
+};
+
+module.exports = {
+ name: function() {
+ return name;
+ },
+ prompts: function( utils ) {
+ return [
+ {
+ type: 'password',
+ name: 'postgres_password',
+ default: utils.getDefault( 'postgres_password' ),
+ message: prefix()+'Password of Postgres cyphernode user?'+utils.getHelp('postgres_password'),
+ filter: utils.trimFilter,
+ }];
+ },
+ templates: function( props ) {
+ return ['pgpass'];
+ }
+};
\ No newline at end of file
diff --git a/cyphernodeconf_docker/prompters/999_installer.js b/cyphernodeconf_docker/prompters/999_installer.js
index 67ba104..1301cff 100644
--- a/cyphernodeconf_docker/prompters/999_installer.js
+++ b/cyphernodeconf_docker/prompters/999_installer.js
@@ -30,6 +30,44 @@ module.exports = {
value: "docker"
}]
},
+ {
+ when: installerDocker,
+ type: 'list',
+ name: 'postgres_datapath',
+ default: utils.getDefault( 'postgres_datapath' ),
+ choices: [
+ {
+ name: utils.setupDir()+"/cyphernode/postgres",
+ value: utils.setupDir()+"/cyphernode/postgres"
+ },
+ {
+ name: utils.defaultDataDirBase()+"/cyphernode/postgres",
+ value: utils.defaultDataDirBase()+"/cyphernode/postgres"
+ },
+ {
+ name: utils.defaultDataDirBase()+"/.cyphernode/postgres",
+ value: utils.defaultDataDirBase()+"/.cyphernode/postgres"
+ },
+ {
+ name: utils.defaultDataDirBase()+"/postgres",
+ value: utils.defaultDataDirBase()+"/postgres"
+ },
+ {
+ name: "Custom path",
+ value: "_custom"
+ }
+ ],
+ message: prefix()+'Where do you want to store your Postgres files?'+utils.getHelp('postgres_datapath'),
+ },
+ {
+ when: (props)=>{ return installerDocker(props) && (props.postgres_datapath === '_custom') },
+ type: 'input',
+ name: 'postgres_datapath_custom',
+ default: utils.getDefault( 'postgres_datapath_custom' ),
+ filter: utils.trimFilter,
+ validate: utils.pathValidator,
+ message: prefix()+'Custom path for Postgres files?'+utils.getHelp('postgres_datapath_custom'),
+ },
{
when: installerDocker,
type: 'list',
diff --git a/cyphernodeconf_docker/schema/config-v0.2.6.json b/cyphernodeconf_docker/schema/config-v0.2.6.json
new file mode 100644
index 0000000..3fdb34c
--- /dev/null
+++ b/cyphernodeconf_docker/schema/config-v0.2.6.json
@@ -0,0 +1,726 @@
+{
+ "definitions": {},
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://cyphernode.io/config-v0.2.6.json",
+ "type": "object",
+ "title": "Cyphernode config file structure v0.2.6",
+ "additionalProperties": false,
+ "required": [
+ "schema_version",
+ "setup_version",
+ "features",
+ "net",
+ "use_xpub",
+ "installer_mode",
+ "run_as_different_user",
+ "docker_mode",
+ "docker_versions",
+ "adminhash",
+ "bitcoin_rpcuser",
+ "bitcoin_rpcpassword",
+ "bitcoin_prune",
+ "bitcoin_datapath",
+ "bitcoin_mode",
+ "bitcoin_expose",
+ "gatekeeper_expose",
+ "gatekeeper_keys",
+ "gatekeeper_sslcert",
+ "gatekeeper_sslkey",
+ "gatekeeper_cns",
+ "gatekeeper_clientkeyspassword",
+ "gatekeeper_datapath",
+ "gatekeeper_port",
+ "proxy_datapath",
+ "postgres_password",
+ "postgres_datapath",
+ "logs_datapath",
+ "traefik_datapath",
+ "traefik_http_port",
+ "traefik_https_port"
+ ],
+ "allOf": [
+ {
+ "if": {
+ "properties": {
+ "run_as_different_user": {
+ "enum": [
+ true
+ ]
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "username"
+ ]
+ }
+ },
+ {
+ "if": {
+ "properties": {
+ "use_xpub": {
+ "enum": [
+ true
+ ]
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "xpub",
+ "derivation_path"
+ ]
+ }
+ },
+ {
+ "if": {
+ "properties": {
+ "bitcoin_prune": {
+ "enum": [
+ true
+ ]
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "bitcoin_prune_size"
+ ]
+ }
+ },
+ {
+ "if": {
+ "properties": {
+ "features": {
+ "contains": {
+ "enum": [
+ "tor"
+ ]
+ }
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "tor_datapath",
+ "torifyables",
+ "clearnet"
+ ]
+ }
+ },
+ {
+ "if": {
+ "properties": {
+ "features": {
+ "contains": {
+ "enum": [
+ "lightning"
+ ]
+ }
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "lightning_announce",
+ "lightning_expose",
+ "lightning_implementation",
+ "lightning_datapath",
+ "lightning_nodename",
+ "lightning_nodecolor"
+ ]
+ }
+ },
+ {
+ "if": {
+ "properties": {
+ "features": {
+ "contains": {
+ "enum": [
+ "otsclient"
+ ]
+ }
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "otsclient_datapath"
+ ]
+ }
+ }
+ ],
+ "properties": {
+ "schema_version": {
+ "type": "string",
+ "enum": [
+ "0.2.5"
+ ],
+ "default": "0.3.0",
+ "examples": [
+ "0.2.5"
+ ]
+ },
+ "setup_version": {
+ "type": "string",
+ "examples": [
+ "v0.2.0"
+ ]
+ },
+ "docker_versions": {
+ "$id": "#/properties/dockerVersions",
+ "type": "object",
+ "title": "All versions of the docker containers",
+ "default": {},
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "features": {
+ "$id": "#/properties/features",
+ "type": "array",
+ "title": "The optional features of this cyphernode",
+ "default": ["specter"],
+ "items": {
+ "$id": "#/properties/features/items",
+ "type": "string",
+ "enum": [
+ "tor",
+ "lightning",
+ "otsclient",
+ "batcher",
+ "specter"
+ ],
+ "title": "The feature",
+ "default": "",
+ "examples": [
+ "tor",
+ "lightning",
+ "otsclient",
+ "batcher",
+ "specter"
+ ]
+ }
+ },
+ "torifyables": {
+ "$id": "#/properties/torifyables",
+ "type": "array",
+ "title": "The Torified features of this cyphernode",
+ "default": [],
+ "items": {
+ "$id": "#/properties/torifyables/items",
+ "type": "string",
+ "enum": [
+ "tor_traefik",
+ "tor_bitcoin",
+ "tor_lightning",
+ "tor_otsoperations",
+ "tor_otswebhooks",
+ "tor_addrwatcheswebhooks",
+ "tor_txidwatcheswebhooks"
+ ],
+ "title": "The Torified feature",
+ "default": "",
+ "examples": [
+ "tor_traefik",
+ "tor_bitcoin",
+ "tor_lightning",
+ "tor_otsoperations",
+ "tor_otswebhooks",
+ "tor_addrwatcheswebhooks",
+ "tor_txidwatcheswebhooks"
+ ]
+ }
+ },
+ "clearnet": {
+ "$id": "#/properties/clearnet",
+ "type": "array",
+ "title": "The clearnet-allowed Torified features of this cyphernode",
+ "default": [],
+ "items": {
+ "$id": "#/properties/clearnet/items",
+ "type": "string",
+ "enum": [
+ "clearnet_bitcoin",
+ "clearnet_lightning"
+ ],
+ "title": "The clearnet-allowed Torified feature",
+ "default": "",
+ "examples": [
+ "clearnet_bitcoin",
+ "clearnet_lightning"
+ ]
+ }
+ },
+ "net": {
+ "$id": "#/properties/net",
+ "type": "string",
+ "enum": [
+ "testnet",
+ "mainnet",
+ "regtest"
+ ],
+ "title": "The net cyphernode is running on",
+ "default": "testnet",
+ "examples": [
+ "testnet"
+ ]
+ },
+ "use_xpub": {
+ "$id": "#/properties/use_xpub",
+ "type": "boolean",
+ "title": "Use xpub key?",
+ "default": false,
+ "examples": [
+ false
+ ]
+ },
+ "xpub": {
+ "$id": "#/properties/xpub",
+ "type": "string",
+ "title": "Default xpub to derive addresses from",
+ "pattern": "^(\\w+)$"
+ },
+ "derivation_path": {
+ "$id": "#/properties/derivation_path",
+ "type": "string",
+ "title": "Default derivation path",
+ "default": "0/n",
+ "examples": [
+ "0/n"
+ ]
+ },
+ "installer_mode": {
+ "$id": "#/properties/installer_mode",
+ "type": "string",
+ "enum": [
+ "docker"
+ ],
+ "title": "Install mode",
+ "default": "docker",
+ "examples": [
+ "docker"
+ ]
+ },
+ "run_as_different_user": {
+ "$id": "#/properties/run_as_different_user",
+ "type": "boolean",
+ "title": "Run as different user",
+ "default": true,
+ "examples": [
+ true
+ ]
+ },
+ "username": {
+ "$id": "#/properties/username",
+ "type": "string",
+ "title": "Username to run under",
+ "default": "cyphernode",
+ "examples": [
+ "cyphernode"
+ ]
+ },
+ "docker_mode": {
+ "$id": "#/properties/docker_mode",
+ "type": "string",
+ "enum": [
+ "swarm",
+ "compose"
+ ],
+ "title": "How to run the containers",
+ "default": "swarm",
+ "examples": [
+ "compose"
+ ]
+ },
+ "bitcoin_rpcuser": {
+ "$id": "#/properties/bitcoin_rpcuser",
+ "type": "string",
+ "title": "Bitcoin rpc user",
+ "default": "bitcoin",
+ "examples": [
+ "bitcoin"
+ ]
+ },
+ "bitcoin_rpcpassword": {
+ "$id": "#/properties/bitcoin_rpcpassword",
+ "type": "string",
+ "title": "Bitcoin rpc password",
+ "default": "CHANGEME",
+ "examples": [
+ "CHANGEME"
+ ]
+ },
+ "bitcoin_uacomment": {
+ "$id": "#/properties/bitcoin_uacomment",
+ "type": "string",
+ "title": "Bitcoin user agent comment",
+ "examples": [
+ "cyphernode"
+ ]
+ },
+ "bitcoin_prune": {
+ "$id": "#/properties/bitcoin_prune",
+ "type": "boolean",
+ "title": "Bitcoin prune",
+ "default": false,
+ "examples": [
+ "false"
+ ]
+ },
+ "bitcoin_prune_size": {
+ "$id": "#/properties/bitcoin_prune_size",
+ "type": "integer",
+ "title": "Bitcoin prune size",
+ "default": 550,
+ "examples": [
+ 550
+ ]
+ },
+ "bitcoin_datapath": {
+ "$id": "#/properties/bitcoin_datapath",
+ "type": "string",
+ "title": "Bitcoin datapath",
+ "examples": [
+ "/tmp/cyphernode/bitcoin"
+ ]
+ },
+ "bitcoin_datapath_custom": {
+ "$id": "#/properties/bitcoin_datapath_custom",
+ "type": "string",
+ "title": "Bitcoin custom datapath",
+ "examples": [
+ "/tmp/cyphernode/bitcoin"
+ ]
+ },
+ "lightning_datapath": {
+ "$id": "#/properties/lightning_datapath",
+ "type": "string",
+ "title": "Lightning datapath",
+ "examples": [
+ "/tmp/cyphernode/lightning"
+ ]
+ },
+ "lightning_datapath_custom": {
+ "$id": "#/properties/lightning_datapath_custom",
+ "type": "string",
+ "title": "Lightning custom datapath",
+ "examples": [
+ "/tmp/cyphernode/lightning"
+ ]
+ },
+ "proxy_datapath": {
+ "$id": "#/properties/proxy_datapath",
+ "type": "string",
+ "title": "Proxy datapath",
+ "examples": [
+ "/tmp/cyphernode/proxy"
+ ]
+ },
+ "proxy_datapath_custom": {
+ "$id": "#/properties/proxy_datapath_custom",
+ "type": "string",
+ "title": "Proxy custom datapath",
+ "examples": [
+ "/tmp/cyphernode/proxy"
+ ]
+ },
+ "otsclient_datapath": {
+ "$id": "#/properties/otsclient_datapath",
+ "type": "string",
+ "title": "OTS Client datapath",
+ "examples": [
+ "/tmp/cyphernode/otsclient"
+ ]
+ },
+ "otsclient_datapath_custom": {
+ "$id": "#/properties/otsclient_datapath_custom",
+ "type": "string",
+ "title": "OTS Client custom datapath",
+ "examples": [
+ "/tmp/cyphernode/otsclient"
+ ]
+ },
+ "traefik_http_port": {
+ "$id": "#/properties/traefik_port",
+ "type": "integer",
+ "title": "Traefik HTTP port",
+ "default": 80,
+ "examples": [
+ 80
+ ]
+ },
+ "traefik_https_port": {
+ "$id": "#/properties/traefik_https_port",
+ "type": "integer",
+ "title": "Traefik HTTPS port",
+ "default": 443,
+ "examples": [
+ 443
+ ]
+ },
+ "traefik_datapath": {
+ "$id": "#/properties/traefik_datapath",
+ "type": "string",
+ "title": "Traefik datapath",
+ "examples": [
+ "/tmp/cyphernode/traefik"
+ ]
+ },
+ "traefik_datapath_custom": {
+ "$id": "#/properties/traefik_datapath_custom",
+ "type": "string",
+ "title": "Traefik custom datapath",
+ "examples": [
+ "/tmp/cyphernode/traefik"
+ ]
+ },
+ "postgres_password": {
+ "$id": "#/properties/postgres_password",
+ "type": "string",
+ "title": "Postgres cyphernode's password",
+ "default": "CHANGEME",
+ "examples": [
+ "CHANGEME"
+ ]
+ },
+ "postgres_datapath": {
+ "$id": "#/properties/postgres_datapath",
+ "type": "string",
+ "title": "Postgres datapath",
+ "examples": [
+ "/tmp/cyphernode/postgres"
+ ]
+ },
+ "postgres_datapath_custom": {
+ "$id": "#/properties/postgres_datapath_custom",
+ "type": "string",
+ "title": "Postgres custom datapath",
+ "examples": [
+ "/tmp/cyphernode/postgres"
+ ]
+ },
+ "logs_datapath": {
+ "$id": "#/properties/logs_datapath",
+ "type": "string",
+ "title": "Logs datapath",
+ "examples": [
+ "/tmp/cyphernode/logs"
+ ]
+ },
+ "logs_datapath_custom": {
+ "$id": "#/properties/logs_datapath_custom",
+ "type": "string",
+ "title": "Logs custom datapath",
+ "examples": [
+ "/tmp/cyphernode/logs"
+ ]
+ },
+ "tor_datapath": {
+ "$id": "#/properties/tor_datapath",
+ "type": "string",
+ "title": "Tor datapath",
+ "examples": [
+ "/tmp/cyphernode/tor"
+ ]
+ },
+ "tor_datapath_custom": {
+ "$id": "#/properties/tor_datapath_custom",
+ "type": "string",
+ "title": "Tor custom datapath",
+ "examples": [
+ "/tmp/cyphernode/tor"
+ ]
+ },
+ "lightning_announce": {
+ "$id": "#/properties/lightning_announce",
+ "type": "boolean",
+ "title": "Announce lightning ip",
+ "default": false,
+ "examples": [
+ false
+ ]
+ },
+ "lightning_external_ip": {
+ "$id": "#/properties/lightning_external_ip",
+ "type": "string",
+ "format": "ipv4",
+ "title": "External lightning node ip",
+ "examples": [
+ "123.123.123.123"
+ ]
+ },
+ "bitcoin_mode": {
+ "$id": "#/properties/bitcoin_mode",
+ "type": "string",
+ "enum": [
+ "internal"
+ ],
+ "title": "Bitcoin mode",
+ "default": "internal",
+ "examples": [
+ "internal"
+ ]
+ },
+ "bitcoin_expose": {
+ "$id": "#/properties/bitcoin_expose",
+ "type": "boolean",
+ "title": "Expose bitcoin node",
+ "default": false,
+ "examples": [
+ true
+ ]
+ },
+ "lightning_expose": {
+ "$id": "#/properties/lightning_expose",
+ "type": "boolean",
+ "title": "Expose lightning node",
+ "default": true,
+ "examples": [
+ false
+ ]
+ },
+ "gatekeeper_expose": {
+ "$id": "#/properties/gatekeeper_expose",
+ "type": "boolean",
+ "title": "Expose gatekeeper port",
+ "default": false,
+ "examples": [
+ true
+ ]
+ },
+ "gatekeeper_datapath": {
+ "$id": "#/properties/gatekeeper_datapath",
+ "type": "string",
+ "title": "Gatekeeper datapath",
+ "examples": [
+ "/tmp/cyphernode/gatekeeper"
+ ]
+ },
+ "gatekeeper_datapath_custom": {
+ "$id": "#/properties/gatekeeper_datapath_custom",
+ "type": "string",
+ "title": "Gatekeeper custom datapath",
+ "examples": [
+ "/tmp/cyphernode/gatekeeper"
+ ]
+ },
+ "gatekeeper_port": {
+ "$id": "#/properties/gatekeeper_port",
+ "type": "integer",
+ "title": "Gatekeeper port",
+ "default": 2009,
+ "examples": [
+ 2009
+ ]
+ },
+ "gatekeeper_keys": {
+ "$id": "#/properties/gatekeeper_keys",
+ "type": "object",
+ "title": "Gatekeeper keys",
+ "default": {
+ "configEntries": [],
+ "clientInformation": []
+ },
+ "required": [
+ "configEntries",
+ "clientInformation"
+ ],
+ "properties": {
+ "configEntries": {
+ "$id": "#/properties/gatekeeper_keys/configEntries",
+ "type": "array",
+ "items": {
+ "$id": "#/properties/gatekeeper_keys/configEntries/entry",
+ "type": "string",
+ "pattern": "^kapi_id=\".+\";kapi_key=\".+\";kapi_groups=\".+\";.+$"
+ },
+ "examples": [
+ [
+ "kapi_id=\"000\";kapi_key=\"a27f9e73fdde6a5005879c259c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a\";kapi_groups=\"stats\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}",
+ "kapi_id=\"001\";kapi_key=\"a27f9e73fdde6a5005879c273c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a\";kapi_groups=\"stats,watcher\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}",
+ "kapi_id=\"002\";kapi_key=\"fe58ddbb66d7302a7087af3242a98b6326c51a257f5eab1c06bb8cc02e25890d\";kapi_groups=\"stats,watcher,spender\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}",
+ "kapi_id=\"003\";kapi_key=\"f0b8bb52f4c7007938757bcdfc73b452d6ce08cc0c660ce57c5464ae95f35417\";kapi_groups=\"stats,watcher,spender,admin\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}"
+ ]
+ ]
+ },
+ "clientInformation": {
+ "$id": "#/properties/gatekeeper_keys/clientInformation",
+ "type": "array",
+ "items": {
+ "$id": "#/properties/gatekeeper_keys/clientInformation/entry",
+ "type": "string",
+ "pattern": "^.+=.+$"
+ },
+ "examples": [
+ [
+ "000=a27f9e73fdde6a5005879c259c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a",
+ "001=a27f9e73fdde6a5005879c273c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a",
+ "002=fe58ddbb66d7302a7087af3242a98b6326c51a257f5eab1c06bb8cc02e25890d",
+ "003=f0b8bb52f4c7007938757bcdfc73b452d6ce08cc0c660ce57c5464ae95f35417"
+ ]
+ ]
+ }
+ }
+ },
+ "gatekeeper_sslcert": {
+ "$id": "#/properties/gatekeeper_sslcert",
+ "type": "string",
+ "title": "Gatekeeper SSL Cert"
+ },
+ "gatekeeper_sslkey": {
+ "$id": "#/properties/gatekeeper_sslkey",
+ "type": "string",
+ "title": "Gatekeeper SSL Key"
+ },
+ "gatekeeper_cns": {
+ "$id": "#/properties/gatekeeper_cns",
+ "type": "string",
+ "title": "Gatekeeper cns",
+ "examples": [
+ "myhost.mydomain.com,*.myotherdomain.com,123.123.123.123"
+ ]
+ },
+ "gatekeeper_clientkeyspassword": {
+ "$id": "#/properties/gatekeeper_clientkeyspassword",
+ "type": "string",
+ "title": "Password for the encrypted client keys archive"
+ },
+ "adminhash": {
+ "$id": "#/properties/adminhash",
+ "type": "string",
+ "title": "Bcrypted hash of admin password"
+ },
+ "lightning_implementation": {
+ "$id": "#/properties/lightning_implementation",
+ "type": "string",
+ "enum": [
+ "c-lightning"
+ ],
+ "title": "The lightning implementation",
+ "default": "c-lightning",
+ "examples": [
+ "c-lightning"
+ ]
+ },
+ "lightning_nodename": {
+ "$id": "#/properties/lightning_nodename",
+ "type": "string",
+ "title": "The lightning node name",
+ "examples": [
+ "🚀 Mighty Moose 🚀"
+ ]
+ },
+ "lightning_nodecolor": {
+ "$id": "#/properties/lightning_nodecolor",
+ "type": "string",
+ "pattern": "^[0-9A-Fa-f]{6}$",
+ "title": "The lightning node color",
+ "examples": [
+ "ff0000",
+ "00ff00",
+ "00ffff"
+ ]
+ }
+ }
+}
diff --git a/cyphernodeconf_docker/templates/installer/config.sh b/cyphernodeconf_docker/templates/installer/config.sh
index f54fcd5..0936b43 100644
--- a/cyphernodeconf_docker/templates/installer/config.sh
+++ b/cyphernodeconf_docker/templates/installer/config.sh
@@ -8,6 +8,7 @@ LIGHTNING_IMPLEMENTATION=<%= lightning_implementation %>
PROXY_DATAPATH=<%= proxy_datapath %>
GATEKEEPER_DATAPATH=<%= gatekeeper_datapath %>
GATEKEEPER_PORT=<%= gatekeeper_port %>
+POSTGRES_DATAPATH=<%= postgres_datapath %>
LOGS_DATAPATH=<%= logs_datapath %>
TRAEFIK_DATAPATH=<%= traefik_datapath %>
FEATURE_TOR=<%= (features.indexOf('tor') != -1)?'true':'false' %>
diff --git a/cyphernodeconf_docker/templates/installer/docker/docker-compose.yaml b/cyphernodeconf_docker/templates/installer/docker/docker-compose.yaml
index b53e149..6250e32 100644
--- a/cyphernodeconf_docker/templates/installer/docker/docker-compose.yaml
+++ b/cyphernodeconf_docker/templates/installer/docker/docker-compose.yaml
@@ -2,6 +2,46 @@ version: "3"
services:
+ ##########################
+ # POSTGRESQL #
+ ##########################
+
+ postgres:
+ image: postgres:<%= postgres_version %>
+ user: $USER
+ entrypoint: sh -c 'rm -f /container_monitor/postgres_ready ; exec docker-entrypoint.sh -c logging_collector=true -c log_directory=/cnlogs/'
+ environment:
+ - "POSTGRES_USER=cyphernode"
+ - "POSTGRES_PASSWORD=<%= postgres_password %>"
+ - "POSTGRES_DB=cyphernode"
+ - "PGDATA=/var/lib/postgresql/data/pgdata"
+ volumes:
+ - "<%= postgres_datapath %>:/var/lib/postgresql/data"
+ - "<%= logs_datapath %>:/cnlogs"
+ - container_monitor:/container_monitor
+ healthcheck:
+ test: sh -c 'psql -U cyphernode -c "select 1;" && touch /container_monitor/postgres_ready && chown $USER /container_monitor/postgres_ready || rm -f /container_monitor/postgres_ready'
+ interval: 30s
+ timeout: 10s
+ retries: 10
+ stop_grace_period: 90s
+ networks:
+ - cyphernodenet
+ <% if ( docker_mode === 'swarm' ) { %>
+ deploy:
+ replicas: 1
+ placement:
+ constraints:
+ - node.labels.io.cyphernode == true
+ restart_policy:
+ condition: "any"
+ delay: 1s
+ update_config:
+ parallelism: 1
+ <% } else { %>
+ restart: always
+ <% } %>
+
<% if ( features.indexOf('tor') !== -1 ) { %>
##########################
# TOR #
@@ -9,14 +49,14 @@ services:
tor:
image: cyphernode/tor:<%= tor_version %>
- # Sleeping 7 seconds to let lightning and traefik start
- command: $USER sh -c 'rm -f /container_monitor/tor_ready ; sleep 10 ; export HOME=/tor ; tor -f /tor/torrc'
+ # Sleeping 10 seconds to let lightning and traefik start
+ command: $USER sh -c 'rm -f /container_monitor/tor_ready ; sleep 10 ; export HOME=/tor ; exec tor -f /tor/torrc'
volumes:
- "<%= tor_datapath %>:/tor"
- container_monitor:/container_monitor
healthcheck:
- test: chown $USER /container_monitor && su-exec $USER sh -c 'tor-resolve torproject.org && touch /container_monitor/tor_ready && chown $USER /container_monitor/tor_ready || rm -f /container_monitor/tor_ready'
- interval: 20s
+ test: chown -R $USER /container_monitor && su-exec $USER sh -c 'tor-resolve torproject.org && touch /container_monitor/tor_ready || rm -f /container_monitor/tor_ready'
+ interval: 30s
timeout: 10s
retries: 10
networks:
@@ -55,8 +95,8 @@ services:
- "<%= bitcoin_datapath %>/bitcoin-client.conf:/.bitcoin/bitcoin.conf:ro"
- container_monitor:/container_monitor
healthcheck:
- test: chown $USER /container_monitor && su-exec $USER sh -c 'lightning-cli getinfo && touch /container_monitor/lightning_ready && chown $USER /container_monitor/lightning_ready || rm -f /container_monitor/lightning_ready'
- interval: 20s
+ test: chown -R $USER /container_monitor && su-exec $USER sh -c 'lightning-cli getinfo && touch /container_monitor/lightning_ready || rm -f /container_monitor/lightning_ready'
+ interval: 30s
timeout: 10s
retries: 10
stop_grace_period: 30s
@@ -89,7 +129,7 @@ services:
bitcoin:
image: cyphernode/bitcoin:<%= bitcoin_version %>
- command: $USER /.bitcoin/entrypoint.sh
+ command: $USER /.bitcoin/entrypoint.sh
<% if( bitcoin_expose ) { %>
ports:
- "<%= (net === 'regtest') ? '18444:18444' : ((net === 'testnet') ? '18333:18333' : '8333:8333') %>"
@@ -99,8 +139,8 @@ services:
- "<%= bitcoin_datapath %>/createWallets.sh:/.bitcoin/createWallets.sh:ro"
- container_monitor:/container_monitor
healthcheck:
- test: chown $USER /container_monitor && su-exec $USER sh -c 'bitcoin-cli echo && touch /container_monitor/bitcoin_ready || rm -f /container_monitor/bitcoin_ready'
- interval: 20s
+ test: chown -R $USER /container_monitor && su-exec $USER sh -c 'bitcoin-cli echo && touch /container_monitor/bitcoin_ready || rm -f /container_monitor/bitcoin_ready'
+ interval: 30s
timeout: 10s
retries: 10
stop_grace_period: 30s
@@ -156,6 +196,7 @@ services:
- "OTSCLIENT_CONTAINER=otsclient:6666"
- "OTS_FILES=/proxy/otsfiles"
- "XPUB_DERIVATION_GAP=100"
+ - "PGPASSFILE=/proxy/db/pgpass"
<% if ( devmode ) { %>
ports:
- "8888:8888"
@@ -172,8 +213,17 @@ services:
<% if ( features.indexOf('tor') !== -1 ) { %>
- "<%= tor_datapath %>:/proxy/tor"
<% } %>
+ - container_monitor:/container_monitor
+ healthcheck:
+ test: chown -R $USER /container_monitor && su-exec $USER sh -c 'curl localhost:8888/helloworld && touch /container_monitor/proxy_ready || rm -f /container_monitor/proxy_ready'
+ interval: 30s
+ timeout: 10s
+ retries: 10
+ stop_grace_period: 30s
networks:
- cyphernodenet
+ depends_on:
+ - postgres
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
@@ -250,6 +300,7 @@ services:
command: $USER ./startnotifier.sh
<% if ( features.indexOf('tor') !== -1 ) { %>
environment:
+ - "TRACING=1"
- "TOR_HOST=tor"
- "TOR_PORT=9050"
<% } %>
@@ -373,6 +424,7 @@ services:
- "<%= gatekeeper_datapath %>/installation.json:/etc/nginx/conf.d/s/stats/installation.json"
- "<%= gatekeeper_datapath %>/client.7z:/etc/nginx/conf.d/s/stats/client.7z"
- "<%= gatekeeper_datapath %>/config.7z:/etc/nginx/conf.d/s/stats/config.7z"
+ - container_monitor:/container_monitor
networks:
- cyphernodenet
- cyphernodeappsnet
diff --git a/cyphernodeconf_docker/templates/installer/start.sh b/cyphernodeconf_docker/templates/installer/start.sh
index 0ec0163..647cab4 100644
--- a/cyphernodeconf_docker/templates/installer/start.sh
+++ b/cyphernodeconf_docker/templates/installer/start.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
. ./.cyphernodeconf/installer/config.sh
@@ -60,6 +60,9 @@ export USER=$(id -u <%= default_username %>):$(id -g <%= default_username %>)
current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)"
+# Let's make sure the container readyness files are deleted before starting the stack
+docker run --rm -v cyphernode_container_monitor:/container_monitor alpine sh -c 'rm -f /container_monitor/*_ready'
+
<% if (docker_mode == 'swarm') { %>
docker stack deploy -c $current_path/docker-compose.yaml cyphernode
<% } else if(docker_mode == 'compose') { %>
@@ -68,11 +71,13 @@ docker-compose -f $current_path/docker-compose.yaml up -d --remove-orphans
start_apps
-export ARCH=$(uname -m)
-case "${ARCH}" in arm*)
- printf "\r\n\033[1;31mSince we're on a slow RPi, let's give Docker 60 more seconds before performing our tests...\033[0m\r\n\r\n"
+printf "\r\nDetermining the speed of your machine..."
+speedseconds=$(bash -c ' : {1..500000} ; echo $SECONDS')
+if [ "${speedseconds}" -gt "2" ]; then
+ printf "\r\n\033[1;31mSince we're on a slow computer, let's give Docker 60 more seconds before performing our tests...\033[0m\r\n\r\n"
sleep 60
-;;
-esac
+else
+ printf " It's pretty fast!\r\n"
+fi
. ./testdeployment.sh
diff --git a/cyphernodeconf_docker/templates/installer/stop.sh b/cyphernodeconf_docker/templates/installer/stop.sh
index 9ac13ab..6096e57 100644
--- a/cyphernodeconf_docker/templates/installer/stop.sh
+++ b/cyphernodeconf_docker/templates/installer/stop.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)"
diff --git a/cyphernodeconf_docker/templates/installer/testdeployment.sh b/cyphernodeconf_docker/templates/installer/testdeployment.sh
index cb3a963..2cfa327 100644
--- a/cyphernodeconf_docker/templates/installer/testdeployment.sh
+++ b/cyphernodeconf_docker/templates/installer/testdeployment.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
. ./.cyphernodeconf/installer/config.sh
@@ -65,14 +65,21 @@ if [ -f $current_path/exitStatus.sh ]; then
rm -f $current_path/exitStatus.sh
fi
-test_apps
+if [ "$EXIT_STATUS" -ne "0" ]; then
+ printf "\r\n\033[1;31mSkipping cypherapps deployment because of previous errors.\r\n\r\n\033[0m"
+else
+ test_apps
+fi
EXIT_STATUS=$(($? | ${EXIT_STATUS}))
printf "\r\n\e[1;32mTests finished.\e[0m\n"
if [ "$EXIT_STATUS" -ne "0" ]; then
- printf "\r\n\033[1;31mThere was an error during cyphernode installation. full logs: docker ps -q | xargs -L 1 docker logs , Containers logs: docker logs , list containers: docker ps .Please see Docker's logs for more information. Run ./testdeployment.sh to rerun the tests. Run ./stop.sh to stop cyphernode.\r\n\r\n\033[0m"
+ printf "\r\n\033[1;31mThere was an error during cyphernode installation.\r\n\033[0m"
+ printf "\r\n\033[1;31mCheck logs in your logs directory (${LOGS_DATAPATH}).\r\n\033[0m"
+ printf "\r\n\033[1;31mRun ./testdeployment.sh to rerun the tests.\033[0m"
+ printf "\r\n\033[1;31mRun ./stop.sh to stop cyphernode.\r\n\033[0m"
exit 1
fi
diff --git a/cyphernodeconf_docker/templates/installer/testfeatures.sh b/cyphernodeconf_docker/templates/installer/testfeatures.sh
index 3d113d7..6a2b045 100644
--- a/cyphernodeconf_docker/templates/installer/testfeatures.sh
+++ b/cyphernodeconf_docker/templates/installer/testfeatures.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-apk add --update --no-cache openssl curl jq coreutils > /dev/null
+apk add --update --no-cache openssl curl jq coreutils postgresql > /dev/null
. /gatekeeper/keys.properties
@@ -72,7 +72,7 @@ checkpycoin() {
echo -en "\r\n\e[1;36mTesting Pycoin... " > /dev/console
local rc
- rc=$(curl -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/25-30\"}" -s -o /dev/null -w "%{http_code}" http://proxy:8888/derivepubpath)
+ rc=$(curl -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/25-30\"}" -s -o /dev/null -w "%{http_code}" http://pycoin:7777/derive)
[ "${rc}" -ne "200" ] && return 100
echo -e "\e[1;36mPycoin rocks!" > /dev/console
@@ -80,6 +80,18 @@ checkpycoin() {
return 0
}
+checkpostgres() {
+ echo -en "\r\n\e[1;36mTesting Postgres... " > /dev/console
+ local rc
+
+ pg_isready -h postgres -U cyphernode
+ [ "${?}" -ne "0" ] && return 105
+
+ echo -e "\e[1;36mPostgres rocks!" > /dev/console
+
+ return 0
+}
+
checkbroker() {
echo -en "\r\n\e[1;36mTesting Broker... " > /dev/console
local rc
@@ -97,7 +109,8 @@ checknotifier() {
local response
local returncode
- response=$(mosquitto_rr -h broker -W 15 -t notifier -e "response/$$" -m "{\"response-topic\":\"response/$$\",\"cmd\":\"web\",\"url\":\"http://proxy:8888/helloworld\",\"tor\":false}")
+ nc -vlp1111 -e sh -c 'echo -en "HTTP/1.1 200 OK\\r\\n\\r\\n" ; date >&2 ; timeout 1 tee /dev/tty | cat ; ' &
+ response=$(mosquitto_rr -h broker -W 15 -t notifier -e "response/$$" -m "{\"response-topic\":\"response/$$\",\"cmd\":\"web\",\"url\":\"http://$(hostname):1111/notifiertest\",\"tor\":false}")
returncode=$?
[ "${returncode}" -ne "0" ] && return 115
http_code=$(echo "${response}" | jq -r ".http_code")
@@ -112,7 +125,8 @@ checkots() {
echo -en "\r\n\e[1;36mTesting OTSclient... " > /dev/console
local rc
- rc=$(curl -s -H "Content-Type: application/json" -d '{"hash":"123","callbackUrl":"http://callback"}' http://proxy:8888/ots_stamp)
+ # rc=$(curl -s -H "Content-Type: application/json" -d '{"hash":"123","callbackUrl":"http://callback"}' http://proxy:8888/ots_stamp)
+ rc=$(curl -s otsclient:6666/stamp/123)
echo "${rc}" | grep "Invalid hash 123 for sha256" > /dev/null
[ "$?" -ne "0" ] && return 200
@@ -170,12 +184,12 @@ checkservice() {
while :
do
outcome=0
- for container in gatekeeper proxy proxycron broker notifier pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
+ for container in gatekeeper proxy proxycron broker notifier pycoin postgres <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
echo -e " \e[0;32mVerifying \e[0;33m${container}\e[0;32m..." > /dev/console
(ping -c 10 ${container} 2> /dev/null | grep "0% packet loss" > /dev/null) &
eval ${container}=$!
done
- for container in gatekeeper proxy proxycron broker notifier pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
+ for container in gatekeeper proxy proxycron broker notifier pycoin postgres <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
eval wait '$'${container} ; returncode=$? ; outcome=$((${outcome} + ${returncode}))
eval c_${container}=${returncode}
done
@@ -193,12 +207,13 @@ checkservice() {
# { "name": "proxy", "active":true },
# { "name": "proxycron", "active":true },
# { "name": "pycoin", "active":true },
+ # { "name": "postgres", "active":true },
# { "name": "otsclient", "active":true },
# { "name": "tor", "active":true },
# { "name": "bitcoin", "active":true },
# { "name": "lightning", "active":true },
# ]
- for container in gatekeeper proxy proxycron broker notifier pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
+ for container in gatekeeper proxy proxycron broker notifier pycoin postgres <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
[ -n "${result}" ] && result="${result},"
result="${result}{\"name\":\"${container}\",\"active\":"
eval "returncode=\$c_${container}"
@@ -218,7 +233,7 @@ checkservice() {
timeout_feature() {
local interval=15
- local totaltime=120
+ local totaltime=${2:-120}
local testwhat=${1}
local returncode
local endtime=$(($(date +%s) + ${totaltime}))
@@ -254,6 +269,7 @@ feature_status() {
# { "name": "proxy", "active":true },
# { "name": "proxycron", "active":true },
# { "name": "pycoin", "active":true },
+# { "name": "postgres", "active":true },
# { "name": "otsclient", "active":true },
# { "name": "tor", "active":true },
# { "name": "bitcoin", "active":true },
@@ -262,6 +278,7 @@ feature_status() {
# "features": [
# { "name": "gatekeeper", "working":true },
# { "name": "pycoin", "working":true },
+# { "name": "postgres", "working":true },
# { "name": "otsclient", "working":true },
# { "name": "tor", "working":true },
# { "name": "bitcoin", "working":true },
@@ -285,7 +302,7 @@ if [ "${returncode}" -ne "0" ]; then
echo -e "\e[1;31mCyphernode could not fully start properly within delay." > /dev/console
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"proxy\") | .active")
if [ "${status}" = "false" ]; then
- echo -e "\e[1;31mThe Proxy, the main Cyphernode's component, is not responding. We will only test the gatekeeper if its container is up, but you'll see errors for the other components. Please check the logs." > /dev/console
+ echo -e "\r\n\e[1;31mThe Proxy, the main Cyphernode's component, is not responding. You'll see errors for the other components. Please check the logs." > /dev/console
workingproxy="false"
fi
else
@@ -296,19 +313,60 @@ fi
# "features": [
# { "name": "gatekeeper", "working":true },
# { "name": "pycoin", "working":true },
+# { "name": "postgres", "working":true },
# { "name": "otsclient", "working":true },
# { "name": "tor", "working":true },
# { "name": "bitcoin", "working":true },
# { "name": "lightning", "working":true },
# ]
+#############################
+# PROXY #
+#############################
+
+if [ ! -f /container_monitor/proxy_dbfailed ]; then
+ echo -e "\r\n\e[1;36mWaiting for Proxy to be ready... " > /dev/console
+ timeout_feature '[ -f "/container_monitor/proxy_ready" ]' 300
+ returncode=$?
+ if [ "${returncode}" -ne "0" ]; then
+ echo -e "\r\n\e[1;31mThe proxy is still not ready. It may be migrating large quantity of data? Please check the logs for more details." > /dev/console
+ workingproxy="false"
+ fi
+fi
+if [ -f /container_monitor/proxy_dbfailed ]; then
+ echo -e "\r\n\e[1;31mThe proxy's database migration failed. Please check proxy.log for more details." > /dev/console
+ workingproxy="false"
+fi
+
+if [ "${workingproxy}" = "false" ]; then
+ echo -e "\r\n\e[1;31mThe Proxy, the main Cyphernode's component, is not ready. Cyphernode can't be run without the proxy component." > /dev/console
+ echo -e "\r\n\e[1;31mThe other components will fail next, this is normal." > /dev/console
+fi
+
+result="${containers},\"features\":[{\"coreFeature\":true,\"name\":\"proxy\",\"working\":${workingproxy}}"
+
+#############################
+# POSTGRES #
+#############################
+
+result="${result},{\"coreFeature\":true,\"name\":\"postgres\",\"working\":"
+status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"postgres\") | .active")
+if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
+ timeout_feature checkpostgres
+ returncode=$?
+else
+ returncode=1
+fi
+finalreturncode=$((${returncode} | ${finalreturncode}))
+result="${result}$(feature_status ${returncode} 'Postgres error!')}"
+
#############################
# GATEKEEPER #
#############################
-result="${containers},\"features\":[{\"coreFeature\":true, \"name\":\"proxy\",\"working\":${workingproxy}}, {\"coreFeature\":true, \"name\":\"gatekeeper\",\"working\":"
+result="${result},{\"coreFeature\":true,\"name\":\"gatekeeper\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"gatekeeper\") | .active")
-if [ "${status}" = "true" ]; then
+if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
timeout_feature checkgatekeeper
returncode=$?
else
@@ -321,7 +379,7 @@ result="${result}$(feature_status ${returncode} 'Gatekeeper error!')}"
# BROKER #
#############################
-result="${result},{\"coreFeature\":true, \"name\":\"broker\",\"working\":"
+result="${result},{\"coreFeature\":true,\"name\":\"broker\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"broker\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
timeout_feature checkbroker
@@ -336,7 +394,7 @@ result="${result}$(feature_status ${returncode} 'Broker error!')}"
# NOTIFIER #
#############################
-result="${result},{\"coreFeature\":true, \"name\":\"notifier\",\"working\":"
+result="${result},{\"coreFeature\":true,\"name\":\"notifier\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"notifier\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
timeout_feature checknotifier
@@ -351,7 +409,7 @@ result="${result}$(feature_status ${returncode} 'Notifier error!')}"
# PYCOIN #
#############################
-result="${result},{\"coreFeature\":true, \"name\":\"pycoin\",\"working\":"
+result="${result},{\"coreFeature\":true,\"name\":\"pycoin\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"pycoin\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
timeout_feature checkpycoin
@@ -367,7 +425,7 @@ result="${result}$(feature_status ${returncode} 'Pycoin error!')}"
# OTSCLIENT #
#############################
-result="${result},{\"coreFeature\":false, \"name\":\"otsclient\",\"working\":"
+result="${result},{\"coreFeature\":false,\"name\":\"otsclient\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"otsclient\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
timeout_feature checkots
@@ -384,12 +442,11 @@ result="${result}$(feature_status ${returncode} 'OTSclient error!')}"
# TOR #
#############################
-echo -e "\r\n\e[1;36mWaiting for Tor to be ready... " > /dev/console
-timeout_feature '[ -f "/container_monitor/tor_ready" ]'
-
-result="${result},{\"coreFeature\":false, \"name\":\"tor\",\"working\":"
+result="${result},{\"coreFeature\":false,\"name\":\"tor\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"tor\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
+ echo -e "\r\n\e[1;36mWaiting for Tor to be ready... " > /dev/console
+ timeout_feature '[ -f "/container_monitor/tor_ready" ]'
timeout_feature checktor
returncode=$?
else
@@ -403,12 +460,11 @@ result="${result}$(feature_status ${returncode} 'Tor error!')}"
# BITCOIN #
#############################
-echo -e "\r\n\e[1;36mWaiting for Bitcoin Core to be ready... " > /dev/console
-timeout_feature '[ -f "/container_monitor/bitcoin_ready" ]'
-
-result="${result},{\"coreFeature\":true, \"name\":\"bitcoin\",\"working\":"
+result="${result},{\"coreFeature\":true,\"name\":\"bitcoin\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"bitcoin\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
+ echo -e "\r\n\e[1;36mWaiting for Bitcoin Core to be ready... " > /dev/console
+ timeout_feature '[ -f "/container_monitor/bitcoin_ready" ]'
timeout_feature checkbitcoinnode
returncode=$?
else
@@ -422,12 +478,11 @@ result="${result}$(feature_status ${returncode} 'Bitcoin error!')}"
# LIGHTNING #
#############################
-echo -e "\r\n\e[1;36mWaiting for C-Lightning to be ready... " > /dev/console
-timeout_feature '[ -f "/container_monitor/lightning_ready" ]'
-
-result="${result},{\"coreFeature\":false, \"name\":\"lightning\",\"working\":"
+result="${result},{\"coreFeature\":false,\"name\":\"lightning\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"lightning\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
+ echo -e "\r\n\e[1;36mWaiting for C-Lightning to be ready... " > /dev/console
+ timeout_feature '[ -f "/container_monitor/lightning_ready" ]'
timeout_feature checklnnode
returncode=$?
else
@@ -438,6 +493,8 @@ result="${result}$(feature_status ${returncode} 'Lightning error!')}"
<% } %>
+#############################
+
result="{${result}]}"
echo "${result}" > /gatekeeper/installation.json
diff --git a/cyphernodeconf_docker/templates/postgres/pgpass b/cyphernodeconf_docker/templates/postgres/pgpass
new file mode 100644
index 0000000..9d20cfe
--- /dev/null
+++ b/cyphernodeconf_docker/templates/postgres/pgpass
@@ -0,0 +1 @@
+postgres:5432:cyphernode:cyphernode:<%= postgres_password %>
\ No newline at end of file
diff --git a/dist/setup.sh b/dist/setup.sh
index f1d336d..39b221c 100755
--- a/dist/setup.sh
+++ b/dist/setup.sh
@@ -1,17 +1,34 @@
#!/bin/bash
-### Execute this on a freshly install ubuntu luna node
-# curl -fsSL get.docker.com -o get-docker.sh
-# sh get-docker.sh
-# sudo usermod -aG docker $USER
-## logout and relogin
-# git clone --branch features/install --recursive https://github.com/schulterklopfer/cyphernode.git
-# sudo curl -L "https://github.com/docker/compose/releases/download/1.22.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
-# sudo chmod +x /usr/local/bin/docker-compose
-# cd cyphernode
-# ./setup.sh -ci
-# docker-compose -f docker-compose.yaml up [-d]
+# This is where everything is configured.
+# To determine speed of machine...
+#
+# bash -c ' : {1..500000} ; echo $SECONDS'
+#
+# MBP M1: 0
+# MBP Intel: 0
+# x86_64 avg machine: 0
+# RockPi Debian 64-bits: 1
+# RPi4 RaspiOS 64-bits: 1
+# RPi3 RaspiOS 32-bits: 4
+# RPi2 RaspiOS 32-bits: 7
+#
+# Let's say if timer > 2, we're on a slow machine.
+
+# At first we tried using uname -m te determine slow devices, but:
+#
+# uname -m result:
+# RPi2: armv7l
+# RPi3: armv7l
+# RPi4 on 32-bit OS: armv7l
+# RPi4 on 64-bit OS: aarch64
+# RockPi: aarch64
+# Apple M1: arm64
+# Intel 64: x86_64#
+#
+# There are a ton of other possible values... and can't rely on them to detect
+# a slow device.
# FROM: https://stackoverflow.com/questions/5195607/checking-bash-exit-status-of-several-commands-efficiently
# Use step(), try(), and next() to perform a series of commands and print
@@ -110,7 +127,7 @@ sudo_if_required() {
}
modify_permissions() {
- local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
+ local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
for d in "${directories[@]}"
do
if [[ -e $d ]]; then
@@ -122,7 +139,7 @@ modify_permissions() {
}
modify_owner() {
- local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
+ local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER)
for d in "${directories[@]}"
do
@@ -142,9 +159,6 @@ configure() {
recreate=" recreate"
fi
-
-
- local arch=$(uname -m)
local pw_env=''
local interactive=''
local gen_options=''
@@ -157,11 +171,12 @@ configure() {
pw_env=" -e CFG_PASSWORD=$CFG_PASSWORD"
fi
-
- if [[ $arch =~ ^arm ]]; then
- clear && echo "Thinking. This may take a while, since I'm a Raspberry PI and my brain is so tiny. :("
+ echo "\nDetermining the speed of your machine..."
+ local speedseconds=$(bash -c ' : {1..500000} ; echo $SECONDS')
+ if [[ $speedseconds > 2 ]]; then
+ clear && echo "This may take a while, since it seems we're running on a slow machine."
else
- clear && echo "Thinking..."
+ clear && echo "Fast machine..."
fi
# before starting a new cyphernodeconf, kill all the others
@@ -193,6 +208,7 @@ configure() {
-e PROXYCRON_VERSION=$PROXYCRON_VERSION \
-e OTSCLIENT_VERSION=$OTSCLIENT_VERSION \
-e PYCOIN_VERSION=$PYCOIN_VERSION \
+ -e POSTGRES_VERSION=$POSTGRES_VERSION \
-e BITCOIN_VERSION=$BITCOIN_VERSION \
-e LIGHTNING_VERSION=$LIGHTNING_VERSION \
-e CONF_VERSION=$CONF_VERSION \
@@ -348,14 +364,6 @@ compare_bitcoinconf() {
}
install_docker() {
- local archpath=$(uname -m)
-
- # compat mode for SatoshiPortal repo
- # TODO: add more mappings?
- if [[ $archpath == 'armv7l' ]]; then
- archpath="rpi"
- fi
-
if [ ! -d $GATEKEEPER_DATAPATH ]; then
step " [32mcreate[0m $GATEKEEPER_DATAPATH"
sudo_if_required mkdir -p $GATEKEEPER_DATAPATH
@@ -385,6 +393,13 @@ install_docker() {
copy_file $cyphernodeconf_filepath/traefik/htpasswd $GATEKEEPER_DATAPATH/htpasswd 1 $SUDO_REQUIRED
+ if [ ! -d $POSTGRES_DATAPATH ]; then
+ step " [32mcreate[0m $POSTGRES_DATAPATH"
+ sudo_if_required mkdir -p $POSTGRES_DATAPATH/pgdata
+ next
+ fi
+
+
if [ ! -d $LOGS_DATAPATH ]; then
step " [32mcreate[0m $LOGS_DATAPATH"
sudo_if_required mkdir -p $LOGS_DATAPATH
@@ -461,6 +476,8 @@ install_docker() {
copy_file $cyphernodeconf_filepath/installer/config.sh $PROXY_DATAPATH/config.sh 1 $SUDO_REQUIRED
copy_file $cyphernodeconf_filepath/cyphernode/info.json $PROXY_DATAPATH/info.json 1 $SUDO_REQUIRED
+ copy_file $cyphernodeconf_filepath/postgres/pgpass $PROXY_DATAPATH/pgpass 1 $SUDO_REQUIRED
+ sudo_if_required chmod 0600 $PROXY_DATAPATH/pgpass
if [[ $BITCOIN_INTERNAL == true ]]; then
if [ ! -d $BITCOIN_DATAPATH ]; then
@@ -652,7 +669,7 @@ install_docker() {
check_directory_owner() {
# if one directory does not have access rights for $RUN_AS_USER, we echo 1, else we echo 0
- local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
+ local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local status=0
for d in "${directories[@]}"
do
@@ -756,7 +773,7 @@ sanity_checks_pre_install() {
if [[ $sudo_reason == 'directories' ]]; then
echo " [31mor check your data volumes if they have the right owner.[0m"
echo " [31mThe owner of the following folders should be '$RUN_AS_USER':[0m"
- local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
+ local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local status=0
for d in "${directories[@]}"
do
@@ -855,10 +872,11 @@ PROXYCRON_VERSION="v0.7.0-dev"
OTSCLIENT_VERSION="v0.7.0-dev"
PYCOIN_VERSION="v0.7.0-dev"
CYPHERAPPS_VERSION="dev"
-BITCOIN_VERSION="v0.21.1"
-LIGHTNING_VERSION="v0.10.1"
+BITCOIN_VERSION="v22.0"
+LIGHTNING_VERSION="v0.10.2"
TRAEFIK_VERSION="v1.7.9-alpine"
MOSQUITTO_VERSION="1.6-openssl"
+POSTGRES_VERSION="14.0-bullseye"
SETUP_DIR=$(dirname $(realpath $0))
diff --git a/doc/openapi/v0/cyphernode-api.yaml b/doc/openapi/v0/cyphernode-api.yaml
index b794adb..5af8c10 100644
--- a/doc/openapi/v0/cyphernode-api.yaml
+++ b/doc/openapi/v0/cyphernode-api.yaml
@@ -224,8 +224,6 @@ paths:
- "pub32"
- "path"
- "nstart"
- - "unconfirmedCallbackURL"
- - "confirmedCallbackURL"
properties:
label:
description: "Label for that xpub. Can be used, instead for xpub, for future references in xpub-related endpoints."
diff --git a/notifier_docker/script/requesthandler.sh b/notifier_docker/script/requesthandler.sh
index f5888c8..5a57102 100644
--- a/notifier_docker/script/requesthandler.sh
+++ b/notifier_docker/script/requesthandler.sh
@@ -34,8 +34,7 @@ main() {
done
}
-export TRACING=1
-
main
+returncode=$?
trace "[requesthandler] exiting"
-exit $?
+exit ${returncode}
diff --git a/notifier_docker/script/startnotifier.sh b/notifier_docker/script/startnotifier.sh
index 64bc336..4df3e15 100644
--- a/notifier_docker/script/startnotifier.sh
+++ b/notifier_docker/script/startnotifier.sh
@@ -2,4 +2,6 @@
. ./trace.sh
-mosquitto_sub -h broker -t notifier | ./requesthandler.sh
+trace "Starting mosquitto and subscribing to the notifier topic..."
+
+exec sh -c 'mosquitto_sub -h broker -t notifier | ./requesthandler.sh'
diff --git a/otsclient_docker/script/requesthandler.sh b/otsclient_docker/script/requesthandler.sh
index 6e9cb5c..1821e1a 100644
--- a/otsclient_docker/script/requesthandler.sh
+++ b/otsclient_docker/script/requesthandler.sh
@@ -64,41 +64,43 @@ main()
# GET http://192.168.111.152:8080/stamp/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
response=$(stamp $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
upgrade)
# GET http://192.168.111.152:8080/upgrade/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
response=$(upgrade $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
verify)
# POST http://192.168.111.152:8080/verify
# BODY {"hash":"1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7","base64otsfile":"AE9wZW5UaW1lc3RhbXBzAABQcm9vZ...gABYiWDXPXGQEDxNch"}
response=$(verify "${line}")
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
info)
# POST http://192.168.111.152:8080/info
# BODY {"base64otsfile":"AE9wZW5UaW1lc3RhbXBzAABQcm9vZ...gABYiWDXPXGQEDxNch"}
response=$(info "${line}")
- response_to_client "${response}" ${?}
- break
+ returncode=$?
+ ;;
+ *)
+ response='{"error": {"code": -32601, "message": "Method not found"}, "id": "1"}'
+ returncode=1
;;
esac
+ response=$(echo "${response}" | jq -Mc)
+ response_to_client "${response}" ${returncode}
break
fi
done
trace "[main] exiting"
- return 0
+ return ${returncode}
}
-export TRACING
-
main
-exit $?
+returncode=$?
+trace "[requesthandler] exiting"
+exit ${returncode}
diff --git a/otsclient_docker/script/startotsclient.sh b/otsclient_docker/script/startotsclient.sh
index 589e0a8..076fad7 100644
--- a/otsclient_docker/script/startotsclient.sh
+++ b/otsclient_docker/script/startotsclient.sh
@@ -1,6 +1,3 @@
#!/bin/sh
-export TRACING
-export OTSCLIENT_LISTENING_PORT
-
-nc -vlkp${OTSCLIENT_LISTENING_PORT} -e ./requesthandler.sh
+exec nc -vlkp${OTSCLIENT_LISTENING_PORT} -e ./requesthandler.sh
diff --git a/proxy_docker/Dockerfile b/proxy_docker/Dockerfile
index 967b4ee..057f2da 100644
--- a/proxy_docker/Dockerfile
+++ b/proxy_docker/Dockerfile
@@ -8,18 +8,18 @@ RUN apk add --update --no-cache \
curl \
su-exec \
py3-pip \
- xxd
+ xxd \
+ postgresql
WORKDIR ${HOME}
COPY app/data/* ./
COPY app/script/* ./
COPY app/tests/* ./tests/
-COPY --from=cyphernode/clightning:v0.10.1 /usr/local/bin/lightning-cli ./
+COPY --from=cyphernode/clightning:v0.10.2 /usr/local/bin/lightning-cli ./
COPY --from=eclipse-mosquitto:1.6-openssl /usr/bin/mosquitto_rr /usr/bin/mosquitto_sub /usr/bin/mosquitto_pub /usr/bin/
COPY --from=eclipse-mosquitto:1.6-openssl /usr/lib/libmosquitto* /usr/lib/
-COPY --from=eclipse-mosquitto:1.6-openssl /usr/lib/libcrypto* /usr/lib/
-COPY --from=eclipse-mosquitto:1.6-openssl /usr/lib/libssl* /usr/lib/
+COPY --from=eclipse-mosquitto:1.6-openssl /lib/ld-musl-* /lib/
RUN chmod +x startproxy.sh requesthandler.sh lightning-cli sqlmigrate*.sh waitanyinvoice.sh tests/* \
&& chmod o+w . \
diff --git a/proxy_docker/app/data/cyphernode.postgresql b/proxy_docker/app/data/cyphernode.postgresql
new file mode 100644
index 0000000..32dc4e9
--- /dev/null
+++ b/proxy_docker/app/data/cyphernode.postgresql
@@ -0,0 +1,166 @@
+BEGIN;
+
+CREATE TABLE watching_by_pub32 (
+ id SERIAL PRIMARY KEY,
+ pub32 VARCHAR UNIQUE,
+ label VARCHAR UNIQUE,
+ derivation_path VARCHAR,
+ callback0conf VARCHAR,
+ callback1conf VARCHAR,
+ last_imported_n INTEGER,
+ watching BOOLEAN DEFAULT FALSE,
+ inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE TABLE watching (
+ id SERIAL PRIMARY KEY,
+ address VARCHAR,
+ label VARCHAR,
+ watching BOOLEAN DEFAULT FALSE,
+ callback0conf VARCHAR,
+ calledback0conf BOOLEAN DEFAULT FALSE,
+ callback1conf VARCHAR,
+ calledback1conf BOOLEAN DEFAULT FALSE,
+ imported BOOLEAN DEFAULT FALSE,
+ watching_by_pub32_id INTEGER REFERENCES watching_by_pub32,
+ pub32_index INTEGER,
+ event_message VARCHAR,
+ inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+CREATE INDEX idx_watching_address ON watching (address);
+CREATE UNIQUE INDEX idx_watching_01 ON watching (address, COALESCE(callback0conf, ''), COALESCE(callback1conf, ''));
+CREATE INDEX idx_watching_label ON watching (label);
+CREATE INDEX idx_watching_watching ON watching (watching);
+CREATE INDEX idx_watching_imported ON watching (imported);
+CREATE INDEX idx_watching_watching_by_pub32_id ON watching (watching_by_pub32_id);
+
+CREATE TABLE tx (
+ id SERIAL PRIMARY KEY,
+ txid VARCHAR UNIQUE,
+ hash VARCHAR UNIQUE,
+ confirmations INTEGER DEFAULT 0,
+ timereceived BIGINT,
+ fee REAL,
+ size INTEGER,
+ vsize INTEGER,
+ is_replaceable BOOLEAN,
+ blockhash VARCHAR,
+ blockheight INTEGER,
+ blocktime BIGINT,
+ conf_target SMALLINT,
+ inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+CREATE INDEX idx_tx_timereceived ON tx (timereceived);
+CREATE INDEX idx_tx_fee ON tx (fee);
+CREATE INDEX idx_tx_size ON tx (size);
+CREATE INDEX idx_tx_vsize ON tx (vsize);
+CREATE INDEX idx_tx_blockhash ON tx (blockhash);
+CREATE INDEX idx_tx_blockheight ON tx (blockheight);
+CREATE INDEX idx_tx_blocktime ON tx (blocktime);
+CREATE INDEX idx_tx_confirmations ON tx (confirmations);
+
+CREATE TABLE watching_tx (
+ watching_id INTEGER REFERENCES watching,
+ tx_id INTEGER REFERENCES tx,
+ vout INTEGER,
+ amount REAL
+);
+CREATE UNIQUE INDEX idx_watching_tx ON watching_tx (watching_id, tx_id);
+CREATE INDEX idx_watching_tx_watching_id ON watching_tx (watching_id);
+CREATE INDEX idx_watching_tx_tx_id ON watching_tx (tx_id);
+
+CREATE TABLE batcher (
+ id SERIAL PRIMARY KEY,
+ label VARCHAR UNIQUE,
+ conf_target SMALLINT,
+ feerate REAL,
+ inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+INSERT INTO batcher (id, label, conf_target, feerate) VALUES (1, 'default', 6, NULL);
+SELECT SETVAL('batcher_id_seq', 1);
+
+CREATE TABLE recipient (
+ id SERIAL PRIMARY KEY,
+ address VARCHAR,
+ amount REAL,
+ tx_id INTEGER REFERENCES tx,
+ inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ webhook_url VARCHAR,
+ calledback BOOLEAN DEFAULT FALSE,
+ calledback_ts TIMESTAMP,
+ batcher_id INTEGER REFERENCES batcher,
+ label VARCHAR
+);
+CREATE INDEX idx_recipient_address ON recipient (address);
+CREATE INDEX idx_recipient_label ON recipient (label);
+CREATE INDEX idx_recipient_calledback ON recipient (calledback);
+CREATE INDEX idx_recipient_webhook_url ON recipient (webhook_url);
+CREATE INDEX idx_recipient_tx_id ON recipient (tx_id);
+CREATE INDEX idx_recipient_batcher_id ON recipient (batcher_id);
+
+CREATE TABLE watching_by_txid (
+ id SERIAL PRIMARY KEY,
+ txid VARCHAR,
+ watching BOOLEAN DEFAULT FALSE,
+ callback1conf VARCHAR,
+ calledback1conf BOOLEAN DEFAULT FALSE,
+ callbackxconf VARCHAR,
+ calledbackxconf BOOLEAN DEFAULT FALSE,
+ nbxconf INTEGER,
+ inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+CREATE INDEX idx_watching_by_txid_txid ON watching_by_txid (txid);
+CREATE UNIQUE INDEX idx_watching_by_txid_1x ON watching_by_txid (txid, COALESCE(callback1conf, ''), COALESCE(callbackxconf, ''));
+CREATE INDEX idx_watching_by_txid_watching ON watching_by_txid (watching);
+CREATE INDEX idx_watching_by_txid_callback1conf ON watching_by_txid (callback1conf);
+CREATE INDEX idx_watching_by_txid_calledback1conf ON watching_by_txid (calledback1conf);
+CREATE INDEX idx_watching_by_txid_callbackxconf ON watching_by_txid (callbackxconf);
+CREATE INDEX idx_watching_by_txid_calledbackxconf ON watching_by_txid (calledbackxconf);
+
+CREATE TABLE stamp (
+ id SERIAL PRIMARY KEY,
+ hash VARCHAR UNIQUE,
+ callbackUrl VARCHAR,
+ requested BOOLEAN DEFAULT FALSE,
+ upgraded BOOLEAN DEFAULT FALSE,
+ calledback BOOLEAN DEFAULT FALSE,
+ inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+CREATE INDEX idx_stamp_calledback ON stamp (calledback);
+
+CREATE TABLE cyphernode_props (
+ id SERIAL PRIMARY KEY,
+ property VARCHAR,
+ value VARCHAR,
+ inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+CREATE INDEX idx_cp_property ON cyphernode_props (property);
+CREATE UNIQUE INDEX idx_cp_propval ON cyphernode_props (property, value);
+
+INSERT INTO cyphernode_props (id, property, value) VALUES (1, 'version', '0.1');
+INSERT INTO cyphernode_props (id, property, value) VALUES (2, 'pay_index', '0');
+SELECT SETVAL('cyphernode_props_id_seq', 2);
+
+CREATE TABLE ln_invoice (
+ id SERIAL PRIMARY KEY,
+ label VARCHAR UNIQUE,
+ bolt11 VARCHAR UNIQUE,
+ payment_hash VARCHAR,
+ msatoshi BIGINT,
+ status VARCHAR,
+ pay_index INTEGER,
+ msatoshi_received BIGINT,
+ paid_at BIGINT,
+ description VARCHAR,
+ expires_at BIGINT,
+ callback_url VARCHAR,
+ calledback BOOLEAN DEFAULT FALSE,
+ callback_failed BOOLEAN DEFAULT FALSE,
+ inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+CREATE INDEX idx_lninvoice_label ON ln_invoice (label);
+CREATE INDEX idx_lninvoice_bolt11 ON ln_invoice (bolt11);
+CREATE INDEX idx_lninvoice_calledback ON ln_invoice (calledback);
+CREATE INDEX idx_lninvoice_callback_failed ON ln_invoice (callback_failed);
+
+COMMIT;
diff --git a/proxy_docker/app/data/rawtx.sql b/proxy_docker/app/data/rawtx.sql
deleted file mode 100644
index 1028845..0000000
--- a/proxy_docker/app/data/rawtx.sql
+++ /dev/null
@@ -1,27 +0,0 @@
-PRAGMA foreign_keys = ON;
-
-CREATE TABLE rawtx (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- txid TEXT UNIQUE,
- hash TEXT UNIQUE,
- confirmations INTEGER DEFAULT 0,
- timereceived INTEGER,
- fee REAL,
- size INTEGER,
- vsize INTEGER,
- is_replaceable INTEGER,
- blockhash TEXT,
- blockheight INTEGER,
- blocktime INTEGER,
- conf_target INTEGER,
- raw_tx TEXT,
- inserted_ts INTEGER DEFAULT CURRENT_TIMESTAMP
-);
-CREATE INDEX idx_rawtx_timereceived ON rawtx (timereceived);
-CREATE INDEX idx_rawtx_fee ON rawtx (fee);
-CREATE INDEX idx_rawtx_size ON rawtx (size);
-CREATE INDEX idx_rawtx_vsize ON rawtx (vsize);
-CREATE INDEX idx_rawtx_blockhash ON rawtx (blockhash);
-CREATE INDEX idx_rawtx_blockheight ON rawtx (blockheight);
-CREATE INDEX idx_rawtx_blocktime ON rawtx (blocktime);
-CREATE INDEX idx_rawtx_confirmations ON rawtx (confirmations);
diff --git a/proxy_docker/app/data/sqlmigrate20210808_0.7.0-0.8.0.sh b/proxy_docker/app/data/sqlmigrate20210808_0.7.0-0.8.0.sh
index f868254..b76cd3b 100644
--- a/proxy_docker/app/data/sqlmigrate20210808_0.7.0-0.8.0.sh
+++ b/proxy_docker/app/data/sqlmigrate20210808_0.7.0-0.8.0.sh
@@ -1,14 +1,19 @@
#!/bin/sh
-echo "Checking for labels for watched addresses support in DB..."
+. ./trace.sh
+
+trace "[sqlmigrate20210808_0.7.0-0.8.0.sh] Checking for labels for watched addresses support in DB..."
count=$(sqlite3 $DB_FILE "select count(*) from pragma_table_info('watching') where name='label'")
if [ "${count}" -eq "0" ]; then
- # label not there, we have to migrate
- echo "Migrating database for labels for watched addresses support..."
- echo "Backing up current DB..."
+ # label not there, we have to migrate
+ trace "[sqlmigrate20210808_0.7.0-0.8.0.sh] Migrating database for labels for watched addresses support..."
+ trace "[sqlmigrate20210808_0.7.0-0.8.0.sh] Backing up current DB..."
cp $DB_FILE $DB_FILE-sqlmigrate20210808_0.7.0-0.8.0
- echo "Altering DB..."
- cat sqlmigrate20210808_0.7.0-0.8.0.sql | sqlite3 $DB_FILE
+ trace "[sqlmigrate20210808_0.7.0-0.8.0.sh] Altering DB..."
+ cat sqlmigrate20210808_0.7.0-0.8.0.sql | sqlite3 $DB_FILE
+ returncode=$?
+ trace_rc ${returncode}
+ exit ${returncode}
else
- echo "Database labels for watched addresses support migration already done, skipping!"
+ trace "[sqlmigrate20210808_0.7.0-0.8.0.sh] Database labels for watched addresses support migration already done, skipping!"
fi
diff --git a/proxy_docker/app/data/sqlmigrate20210928_0.7.0-0.8.0.sh b/proxy_docker/app/data/sqlmigrate20210928_0.7.0-0.8.0.sh
index 1b62004..87fa327 100644
--- a/proxy_docker/app/data/sqlmigrate20210928_0.7.0-0.8.0.sh
+++ b/proxy_docker/app/data/sqlmigrate20210928_0.7.0-0.8.0.sh
@@ -1,17 +1,19 @@
#!/bin/sh
-echo "Checking for rawtx database support in DB..."
-if [ ! -e ${DB_FILE}_rawtx ]; then
- # rawtx database not found
- echo "Migrating database for rawtx database support..."
- echo "Backing up current DB..."
- cp $DB_FILE $DB_FILE-sqlmigrate20210928_0.7.0-0.8.0
- echo "Altering DB..."
+. ./trace.sh
+
+trace "[sqlmigrate20210928_0.7.0-0.8.0.sh] Checking for new indexes in DB..."
+sqlite3 $DB_FILE ".indexes" | grep "idx_watching_watching" > /dev/null
+if [ "$?" -eq "1" ]; then
+ # idx_watching_watching index not found
+ trace "[sqlmigrate20210928_0.7.0-0.8.0.sh] Migrating database with new indexes..."
+ trace "[sqlmigrate20210928_0.7.0-0.8.0.sh] Backing up current DB..."
+ cp $DB_FILE $DB_FILE-sqlmigrate20210928_0.7.0-0.8.0
+ trace "[sqlmigrate20210928_0.7.0-0.8.0.sh] Altering DB..."
cat sqlmigrate20210928_0.7.0-0.8.0.sql | sqlite3 $DB_FILE
- echo "Creating new DB..."
- cat rawtx.sql | sqlite3 ${DB_FILE}_rawtx
- echo "Inserting table in new DB..."
- sqlite3 -cmd ".timeout 25000" ${DB_FILE} "ATTACH DATABASE \"${DB_FILE}_rawtx\" AS other; INSERT INTO other.rawtx SELECT * FROM tx; DETACH other;"
+ returncode=$?
+ trace_rc ${returncode}
+ exit ${returncode}
else
- echo "rawtx database support migration already done, skipping!"
+ trace "[sqlmigrate20210928_0.7.0-0.8.0.sh] New indexes migration already done, skipping!"
fi
diff --git a/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0.sh b/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0.sh
new file mode 100644
index 0000000..68990f2
--- /dev/null
+++ b/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0.sh
@@ -0,0 +1,55 @@
+#!/bin/sh
+
+. ./trace.sh
+
+trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Checking if postgres is set up..."
+psql -h postgres -U cyphernode -c "\d" | grep "cyphernode_props" > /dev/null
+if [ "$?" -eq "1" ]; then
+ # if cyphernode_props table doesn't exist, it's probably because database hasn't been setup yet
+ trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Creating postgres database..."
+ psql -h postgres -f cyphernode.postgresql -U cyphernode
+ returncode=$?
+ trace_rc ${returncode}
+ [ "${returncode}" -eq "0" ] || exit ${returncode}
+else
+ trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] PostgreSQL database already created, skipping!"
+fi
+
+trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Checking if postgres is loaded/imported..."
+version=$(psql -qAtX -h postgres -U cyphernode -c "select value from cyphernode_props where property='version'")
+returncode=$?
+if [ "${version}" != "0.2" ]; then
+ # if cyphernode_props_id_seq isn't set, it's probably because database hasn't been loaded/imported yet
+ trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Extracting and converting sqlite3 data..."
+ cat sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extract.sql | sqlite3 $DB_FILE
+ returncode=$?
+ trace_rc ${returncode}
+ [ "${returncode}" -eq "0" ] || exit ${returncode}
+
+ trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Creating import file for postgres..."
+ mv sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql ${DB_PATH}/
+ sed -ie 's/^\(INSERT.*\);$/\1 ON CONFLICT DO NOTHING;/g' ${DB_PATH}/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql
+
+ trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Appending postgresql sequence creation..."
+ echo "
+select setval('cyphernode_props_id_seq', (SELECT MAX(id) FROM cyphernode_props));
+select setval('ln_invoice_id_seq', (SELECT MAX(id) FROM ln_invoice));
+select setval('recipient_id_seq', (SELECT MAX(id) FROM recipient));
+select setval('stamp_id_seq', (SELECT MAX(id) FROM stamp));
+select setval('tx_id_seq', (SELECT MAX(id) FROM tx));
+select setval('watching_by_pub32_id_seq', (SELECT MAX(id) FROM watching_by_pub32));
+select setval('watching_by_txid_id_seq', (SELECT MAX(id) FROM watching_by_txid));
+select setval('watching_id_seq', (SELECT MAX(id) FROM watching));
+select setval('batcher_id_seq', (SELECT MAX(id) FROM batcher));
+update cyphernode_props set value='0.2' where property='version';
+commit;
+" >> ${DB_PATH}/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql
+
+ trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Importing sqlite3 data into postgresql..."
+ psql -v ON_ERROR_STOP=on -h postgres -f ${DB_PATH}/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql -U cyphernode
+ returncode=$?
+ trace_rc ${returncode}
+ [ "${returncode}" -eq "0" ] || exit ${returncode}
+else
+ trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] PostgreSQL database already loaded, skipping!"
+fi
diff --git a/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extract.sql b/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extract.sql
new file mode 100644
index 0000000..0291770
--- /dev/null
+++ b/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extract.sql
@@ -0,0 +1,26 @@
+.output sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql
+select "BEGIN;";
+.headers on
+.mode insert watching_by_pub32
+select id,pub32,label,derivation_path,callback0conf,callback1conf,last_imported_n,case when watching=1 then 'TRUE' else 'FALSE' end as watching,inserted_ts from watching_by_pub32;
+.mode insert watching
+select id,address,label,case when watching=1 then 'TRUE' else 'FALSE' end as watching,callback0conf,case when calledback0conf=1 then 'TRUE' else 'FALSE' end as calledback0conf,callback1conf,case when calledback1conf=1 then 'TRUE' else 'FALSE' end as calledback1conf,case when imported=1 then 'TRUE' else 'FALSE' end as imported,watching_by_pub32_id,pub32_index,event_message,inserted_ts from watching;
+.mode insert tx
+select id,txid,hash,confirmations,timereceived,fee,size,vsize,case when is_replaceable=1 then 'TRUE' else 'FALSE' end as is_replaceable,blockhash,blockheight,blocktime,conf_target,inserted_ts from tx;
+.mode insert watching_tx
+select * from watching_tx;
+.mode insert batcher
+select * from batcher;
+.mode insert recipient
+select id,address,amount,tx_id,inserted_ts,webhook_url,case when calledback=1 then 'TRUE' else 'FALSE' end as calledback,calledback_ts,batcher_id,label from recipient;
+.mode insert watching_by_txid
+select id,txid,case when watching=1 then 'TRUE' else 'FALSE' end as watching,callback1conf,case when calledback1conf=1 then 'TRUE' else 'FALSE' end as calledback1conf,callbackxconf,case when calledbackxconf=1 then 'TRUE' else 'FALSE' end as calledbackxconf,nbxconf,inserted_ts from watching_by_txid;
+.mode insert stamp
+select id,hash,callbackUrl,case when requested=1 then 'TRUE' else 'FALSE' end as requested,case when upgraded=1 then 'TRUE' else 'FALSE' end as upgraded,case when calledback=1 then 'TRUE' else 'FALSE' end as calledback,inserted_ts from stamp;
+.mode insert ln_invoice
+select id,label,bolt11,payment_hash,msatoshi,status,pay_index,msatoshi_received,paid_at,description,expires_at,callback_url,case when calledback=1 then 'TRUE' else 'FALSE' end as calledback,case when callback_failed=1 then 'TRUE' else 'FALSE' end as callback_failed,inserted_ts from ln_invoice;
+-- cyphernode_props rows were already inserted in db creation, let's update them here
+.headers off
+.mode list cyphernode_props
+select 'update cyphernode_props set value=''' || value || ''', inserted_ts=''' || inserted_ts || ''' where id=' || id || ';' from cyphernode_props;
+.quit
diff --git a/proxy_docker/app/script/batching.sh b/proxy_docker/app/script/batching.sh
index 427d9a4..33bc770 100644
--- a/proxy_docker/app/script/batching.sh
+++ b/proxy_docker/app/script/batching.sh
@@ -9,6 +9,8 @@ createbatcher() {
# POST http://192.168.111.152:8080/createbatcher
#
+ # Will UPDATE the batcher if it already exists (as per label)
+ #
# args:
# - batcherLabel, optional, id can be used to reference the batcher
# - confTarget, optional, overriden by batchspend's confTarget, default Bitcoin Core conf_target will be used if not supplied
@@ -22,7 +24,8 @@ createbatcher() {
local request=${1}
local response
- local label=$(echo "${request}" | jq ".batcherLabel")
+ local returncode
+ local label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[createbatcher] label=${label}"
local conf_target=$(echo "${request}" | jq ".confTarget")
trace "[createbatcher] conf_target=${conf_target}"
@@ -37,13 +40,20 @@ createbatcher() {
local batcher_id
- batcher_id=$(sql "INSERT OR IGNORE INTO batcher (label, conf_target, feerate) VALUES (${label}, ${conf_target}, ${feerate}); SELECT LAST_INSERT_ROWID();")
+ batcher_id=$(sql "INSERT INTO batcher (label, conf_target, feerate)"\
+" VALUES ('${label}', ${conf_target}, ${feerate})"\
+" ON CONFLICT (label) DO"\
+" UPDATE SET conf_target=${conf_target}, feerate=${feerate}"\
+" RETURNING id" \
+ "SELECT id FROM batcher WHERE label='${label}'")
+ returncode=$?
+ trace_rc ${returncode}
- if ("${batcher_id}" -eq "0"); then
+ if [ "${returncode}" -ne "0" ]; then
trace "[createbatcher] Could not insert"
- response='{"result":null,"error":{"code":-32700,"message":"Could not create batcher, label probably already exists","data":'${request}'}}'
+ response='{"result":null,"error":{"code":-32700,"message":"Could not create/update batcher","data":'${request}'}}'
else
- trace "[createbatcher] Inserted"
+ trace "[createbatcher] Inserted or updated, response=${batcher_id}"
response='{"result":{"batcherId":'${batcher_id}'},"error":null}'
fi
@@ -79,7 +89,7 @@ updatebatcher() {
local id=$(echo "${request}" | jq ".batcherId")
trace "[updatebatcher] id=${id}"
- local label=$(echo "${request}" | jq ".batcherLabel")
+ local label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[updatebatcher] label=${label}"
local conf_target=$(echo "${request}" | jq ".confTarget")
trace "[updatebatcher] conf_target=${conf_target}"
@@ -99,12 +109,12 @@ updatebatcher() {
# fi
if [ "${id}" = "null" ]; then
- whereclause="label=${label}"
+ whereclause="label='${label}'"
else
whereclause="id = ${id}"
fi
- sql "UPDATE batcher set label=${label}, conf_target=${conf_target}, feerate=${feerate} WHERE ${whereclause}"
+ sql "UPDATE batcher set label='${label}', conf_target=${conf_target}, feerate=${feerate} WHERE ${whereclause}"
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -ne 0 ]; then
@@ -151,13 +161,13 @@ addtobatch() {
trace "[addtobatch] address=${address}"
local amount=$(echo "${request}" | jq ".amount")
trace "[addtobatch] amount=${amount}"
- local label=$(echo "${request}" | jq ".outputLabel")
+ local label=$(echo "${request}" | jq -r ".outputLabel")
trace "[addtobatch] label=${label}"
local batcher_id=$(echo "${request}" | jq ".batcherId")
trace "[addtobatch] batcher_id=${batcher_id}"
- local batcher_label=$(echo "${request}" | jq ".batcherLabel")
+ local batcher_label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[addtobatch] batcher_label=${batcher_label}"
- local webhook_url=$(echo "${request}" | jq ".webhookUrl")
+ local webhook_url=$(echo "${request}" | jq -r ".webhookUrl")
trace "[addtobatch] webhook_url=${webhook_url}"
# Let's lowercase bech32 addresses
@@ -185,7 +195,7 @@ addtobatch() {
if [ "${batcher_id}" = "null" ]; then
# Using batcher_label
- batcher_id=$(sql "SELECT id FROM batcher WHERE label=${batcher_label}")
+ batcher_id=$(sql "SELECT id FROM batcher WHERE label='${batcher_label}'")
returncode=$?
trace_rc ${returncode}
fi
@@ -195,7 +205,7 @@ addtobatch() {
response='{"result":null,"error":{"code":-32700,"message":"batcher not found","data":'${request}'}}'
else
# Check if address already pending for this batcher...
- inserted_id=$(sql "SELECT id FROM recipient WHERE LOWER(address)=LOWER(\"${address}\") AND tx_id IS NULL AND batcher_id=${batcher_id}")
+ inserted_id=$(sql "SELECT id FROM recipient WHERE LOWER(address)=LOWER('${address}') AND tx_id IS NULL AND batcher_id=${batcher_id}")
returncode=$?
trace_rc ${returncode}
@@ -211,7 +221,9 @@ addtobatch() {
fi
# Insert the new destination
- inserted_id=$(sql "INSERT INTO recipient (address, amount, webhook_url, batcher_id, label) VALUES (\"${address}\", ${amount}, ${webhook_url}, ${batcher_id}, ${label}); SELECT LAST_INSERT_ROWID();")
+ inserted_id=$(sql "INSERT INTO recipient (address, amount, webhook_url, batcher_id, label)"\
+" VALUES ('${address}', ${amount}, '${webhook_url}', ${batcher_id}, '${label}')"\
+" RETURNING id")
returncode=$?
trace_rc ${returncode}
@@ -280,7 +292,7 @@ removefrombatch() {
if [ "${returncode}" -ne 0 ]; then
response='{"result":null,"error":{"code":-32700,"message":"Output was not removed","data":'${request}'}}'
else
- row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), 0), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}")
+ row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), DATE '0001-01-01'), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}")
returncode=$?
trace_rc ${returncode}
@@ -336,7 +348,7 @@ batchspend() {
local batcher_id=$(echo "${request}" | jq ".batcherId")
trace "[batchspend] batcher_id=${batcher_id}"
- local batcher_label=$(echo "${request}" | jq ".batcherLabel")
+ local batcher_label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[batchspend] batcher_label=${batcher_label}"
local conf_target=$(echo "${request}" | jq ".confTarget")
trace "[batchspend] conf_target=${conf_target}"
@@ -351,7 +363,7 @@ batchspend() {
if [ "${batcher_id}" = "null" ]; then
# Using batcher_label
- whereclause="label=${batcher_label}"
+ whereclause="label='${batcher_label}'"
else
whereclause="id=${batcher_id}"
fi
@@ -423,11 +435,11 @@ batchspend() {
trace "[batchspend] webhook_url=${webhook_url}"
if [ -z "${recipientsjson}" ]; then
- whereclause="\"${recipient_id}\""
+ whereclause="${recipient_id}"
recipientsjson="\"${address}\":${amount}"
webhooks_data="{\"outputId\":${recipient_id},\"address\":\"${address}\",\"amount\":${amount},\"webhookUrl\":\"${webhook_url}\"}"
else
- whereclause="${whereclause},\"${recipient_id}\""
+ whereclause="${whereclause},${recipient_id}"
recipientsjson="${recipientsjson},\"${address}\":${amount}"
webhooks_data="${webhooks_data},{\"outputId\":${recipient_id},\"address\":\"${address}\",\"amount\":${amount},\"webhookUrl\":\"${webhook_url}\"}"
fi
@@ -452,7 +464,7 @@ batchspend() {
tx_raw_details=$(get_rawtransaction ${txid} | tr -d '\n')
# Amounts and fees are negative when spending so we absolute those fields
- local tx_hash=$(echo "${tx_raw_details}" | jq '.result.hash')
+ local tx_hash=$(echo "${tx_raw_details}" | jq -r '.result.hash')
local tx_ts_firstseen=$(echo "${tx_details}" | jq '.result.timereceived')
local tx_amount=$(echo "${tx_details}" | jq '.result.amount | fabs' | awk '{ printf "%.8f", $0 }')
local tx_size=$(echo "${tx_raw_details}" | jq '.result.size')
@@ -462,25 +474,20 @@ batchspend() {
tx_replaceable=$([ "${tx_replaceable}" = "yes" ] && echo "true" || echo "false")
trace "[batchspend] tx_replaceable=${tx_replaceable}"
local fees=$(echo "${tx_details}" | jq '.result.fee | fabs' | awk '{ printf "%.8f", $0 }')
- # Sometimes raw tx are too long to be passed as paramater, so let's write
- # it to a temp file for it to be read by sqlite3 and then delete the file
- echo "${tx_raw_details}" > batchspend-rawtx-${txid}-$$.blob
# Get the info on the batch before setting it to done
- row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), 0), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}")
+ row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), DATE '0001-01-01'), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}")
returncode=$?
trace_rc ${returncode}
# Let's insert the txid in our little DB -- then we'll already have it when receiving confirmation
- sql_rawtx "INSERT OR IGNORE INTO rawtx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target, raw_tx) VALUES (\"${txid}\", ${tx_hash}, 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target}, readfile('batchspend-rawtx-${txid}-$$.blob'))"
- trace_rc $?
- id_inserted=$(sql "INSERT OR IGNORE INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target) VALUES (\"${txid}\", ${tx_hash}, 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target}); SELECT LAST_INSERT_ROWID();")
+ id_inserted=$(sql "INSERT INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target)"\
+" VALUES ('${txid}', '${tx_hash}', 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target})"\
+" RETURNING id" \
+"SELECT id FROM tx WHERE txid='${txid}'")
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -eq 0 ]; then
- if [ "${id_inserted}" -eq 0 ]; then
- id_inserted=$(sql "SELECT id FROM tx WHERE txid=\"${txid}\"")
- fi
trace "[batchspend] id_inserted: ${id_inserted}"
sql "UPDATE recipient SET tx_id=${id_inserted} WHERE id IN (${whereclause})"
trace_rc $?
@@ -495,13 +502,10 @@ batchspend() {
trace "[batchspend] total=${total}"
response='{"result":{"batcherId":'${batcher_id}',"confTarget":'${conf_target}',"nbOutputs":'${count}',"oldest":"'${oldest}'","total":'${total}
- response="${response},\"status\":\"accepted\",\"txid\":\"${txid}\",\"hash\":${tx_hash},\"details\":{\"firstseen\":${tx_ts_firstseen},\"size\":${tx_size},\"vsize\":${tx_vsize},\"replaceable\":${tx_replaceable},\"fee\":${fees}},\"outputs\":[${webhooks_data}]}"
+ response="${response},\"status\":\"accepted\",\"txid\":\"${txid}\",\"hash\":\"${tx_hash}\",\"details\":{\"firstseen\":${tx_ts_firstseen},\"size\":${tx_size},\"vsize\":${tx_vsize},\"replaceable\":${tx_replaceable},\"fee\":${fees}},\"outputs\":[${webhooks_data}]}"
response="${response},\"error\":null}"
- # Delete the temp file containing the raw tx (see above)
- rm batchspend-rawtx-${txid}-$$.blob
-
- batch_webhooks "[${webhooks_data}]" '"batcherId":'${batcher_id}',"confTarget":'${conf_target}',"nbOutputs":'${count}',"oldest":"'${oldest}'","total":'${total}',"status":"accepted","txid":"'${txid}'","hash":'${tx_hash}',"details":{"firstseen":'${tx_ts_firstseen}',"size":'${tx_size}',"vsize":'${tx_vsize}',"replaceable":'${tx_replaceable}',"fee":'${fees}'}'
+ batch_webhooks "[${webhooks_data}]" '"batcherId":'${batcher_id}',"confTarget":'${conf_target}',"nbOutputs":'${count}',"oldest":"'${oldest}'","total":'${total}',"status":"accepted","txid":"'${txid}'","hash":"'${tx_hash}'","details":{"firstseen":'${tx_ts_firstseen}',"size":'${tx_size}',"vsize":'${tx_vsize}',"replaceable":'${tx_replaceable}',"fee":'${fees}'}'
else
local message=$(echo "${data}" | jq -e ".error.message")
@@ -536,7 +540,7 @@ batch_check_webhooks() {
local total
local tx_id
- local batching=$(sql "SELECT address, amount, r.id, webhook_url, b.id, t.txid, t.hash, t.timereceived, t.fee, t.size, t.vsize, t.is_replaceable, t.conf_target, t.id FROM recipient r, batcher b, tx t WHERE r.batcher_id=b.id AND r.tx_id=t.id AND NOT calledback AND tx_id IS NOT NULL AND webhook_url IS NOT NULL")
+ local batching=$(sql "SELECT address, amount, r.id, webhook_url, b.id, t.txid, t.hash, t.timereceived, t.fee, t.size, t.vsize, t.is_replaceable::text, t.conf_target, t.id FROM recipient r, batcher b, tx t WHERE r.batcher_id=b.id AND r.tx_id=t.id AND NOT calledback AND tx_id IS NOT NULL AND webhook_url IS NOT NULL")
trace "[batch_check_webhooks] batching=${batching}"
local IFS=$'\n'
@@ -566,7 +570,6 @@ batch_check_webhooks() {
tx_vsize=$(echo "${row}" | cut -d '|' -f11)
trace "[batch_check_webhooks] tx_vsize=${tx_vsize}"
tx_replaceable=$(echo "${row}" | cut -d '|' -f12)
- tx_replaceable=$([ "${tx_replaceable}" -eq "1" ] && echo "true" || echo "false")
trace "[batch_check_webhooks] tx_replaceable=${tx_replaceable}"
conf_target=$(echo "${row}" | cut -d '|' -f13)
trace "[batch_check_webhooks] conf_target=${conf_target}"
@@ -578,7 +581,7 @@ batch_check_webhooks() {
# I know this query for each output is not very efficient, but this function should not execute often, only in case of
# failed callbacks on batches...
# Get the info on the batch
- row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), 0), COALESCE(SUM(amount), 0.00000000) FROM recipient r WHERE tx_id=\"${tx_id}\"")
+ row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), DATE '0001-01-01'), COALESCE(SUM(amount), 0.00000000) FROM recipient r WHERE tx_id='${tx_id}'")
# Use the selected row above
count=$(echo "${row}" | cut -d '|' -f1)
@@ -654,8 +657,13 @@ batch_webhooks() {
fi
done
- sql "UPDATE recipient SET calledback=1, calledback_ts=CURRENT_TIMESTAMP WHERE id IN (${successful_recipient_ids})"
- trace_rc $?
+ if [ -n "${successful_recipient_ids}" ]; then
+ trace "[batch_webhooks] We have successful callbacks, let's update the db..."
+ sql "UPDATE recipient SET calledback=true, calledback_ts=CURRENT_TIMESTAMP WHERE id IN (${successful_recipient_ids})"
+ trace_rc $?
+ else
+ trace "[batch_webhooks] We don't have successful callbacks, no need to update the db!"
+ fi
}
listbatchers() {
@@ -671,7 +679,7 @@ listbatchers() {
# "error":null}
- local batchers=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), 0) || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL GROUP BY b.id")
+ local batchers=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), DATE '0001-01-01') || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL GROUP BY b.id ORDER BY b.id")
trace "[listbatchers] batchers=${batchers}"
local returncode
@@ -717,7 +725,7 @@ getbatcher() {
local batcher_id=$(echo "${request}" | jq ".batcherId")
trace "[getbatcher] batcher_id=${batcher_id}"
- local batcher_label=$(echo "${request}" | jq ".batcherLabel")
+ local batcher_label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[getbatcher] batcher_label=${batcher_label}"
if [ "${batcher_id}" = "null" ] && [ "${batcher_label}" = "null" ]; then
@@ -728,13 +736,13 @@ getbatcher() {
if [ "${batcher_id}" = "null" ]; then
# Using batcher_label
- whereclause="b.label=${batcher_label}"
+ whereclause="b.label='${batcher_label}'"
else
# Using batcher_id
whereclause="b.id=${batcher_id}"
fi
- batcher=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), 0) || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL WHERE ${whereclause} GROUP BY b.id")
+ batcher=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), DATE '0001-01-01') || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL WHERE ${whereclause} GROUP BY b.id")
trace "[getbatcher] batcher=${batcher}"
if [ -n "${batcher}" ]; then
@@ -797,9 +805,9 @@ getbatchdetails() {
local batcher_id=$(echo "${request}" | jq ".batcherId")
trace "[getbatchdetails] batcher_id=${batcher_id}"
- local batcher_label=$(echo "${request}" | jq ".batcherLabel")
+ local batcher_label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[getbatchdetails] batcher_label=${batcher_label}"
- local txid=$(echo "${request}" | jq ".txid")
+ local txid=$(echo "${request}" | jq -r ".txid")
trace "[getbatchdetails] txid=${txid}"
if [ "${batcher_id}" = "null" ] && [ "${batcher_label}" = "null" ]; then
@@ -810,7 +818,7 @@ getbatchdetails() {
if [ "${batcher_id}" = "null" ]; then
# Using batcher_label
- whereclause="b.label=${batcher_label}"
+ whereclause="b.label='${batcher_label}'"
else
# Using batcher_id
whereclause="b.id=${batcher_id}"
@@ -818,7 +826,7 @@ getbatchdetails() {
if [ "${txid}" != "null" ]; then
# Using txid
- whereclause="${whereclause} AND t.txid=${txid}"
+ whereclause="${whereclause} AND t.txid='${txid}'"
else
# null txid
whereclause="${whereclause} AND t.txid IS NULL"
@@ -826,7 +834,7 @@ getbatchdetails() {
fi
# First get the batch summary
- batch=$(sql "SELECT b.id, COALESCE(t.id, NULL), '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || b.conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), 0) || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id ${outerclause} LEFT JOIN tx t ON t.id=r.tx_id WHERE ${whereclause} GROUP BY b.id")
+ batch=$(sql "SELECT b.id, COALESCE(t.id, NULL), '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || b.conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' || COALESCE(MIN(r.inserted_ts), DATE '0001-01-01') || '\",\"total\":' || COALESCE(SUM(amount), 0.00000000) FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id ${outerclause} LEFT JOIN tx t ON t.id=r.tx_id WHERE ${whereclause} GROUP BY b.id, t.id")
trace "[getbatchdetails] batch=${batch}"
if [ -n "${batch}" ]; then
@@ -839,7 +847,7 @@ getbatchdetails() {
# Using txid
outerclause="AND r.tx_id=${tx_id}"
- tx=$(sql "SELECT '\"txid\":\"' || txid || '\",\"hash\":\"' || hash || '\",\"details\":{\"firstseen\":' || timereceived || ',\"size\":' || size || ',\"vsize\":' || vsize || ',\"replaceable\":' || CASE is_replaceable WHEN 1 THEN 'true' ELSE 'false' END || ',\"fee\":' || fee || '}' FROM tx WHERE id=${tx_id}")
+ tx=$(sql "SELECT '\"txid\":\"' || txid || '\",\"hash\":\"' || hash || '\",\"details\":{\"firstseen\":' || timereceived || ',\"size\":' || size || ',\"vsize\":' || vsize || ',\"replaceable\":' || is_replaceable || ',\"fee\":' || fee || '}' FROM tx WHERE id=${tx_id}")
else
# null txid
outerclause="AND r.tx_id IS NULL"
diff --git a/proxy_docker/app/script/bitcoin.sh b/proxy_docker/app/script/bitcoin.sh
index e851eef..ec47ea2 100644
--- a/proxy_docker/app/script/bitcoin.sh
+++ b/proxy_docker/app/script/bitcoin.sh
@@ -78,6 +78,14 @@ convert_pub32() {
local checksum
local pub32_dest
+ case "${pub32_from}" in
+ ${to_type}*)
+ trace "[convert_pub32] Already in the right format, exiting"
+ echo "${pub32_from}"
+ return
+ ;;
+ esac
+
case "${to_type}" in
tpub)
versionbytes="043587cf"
diff --git a/proxy_docker/app/script/call_lightningd.sh b/proxy_docker/app/script/call_lightningd.sh
index ecfbdb1..45793d1 100644
--- a/proxy_docker/app/script/call_lightningd.sh
+++ b/proxy_docker/app/script/call_lightningd.sh
@@ -8,8 +8,8 @@ ln_call_lightningd() {
local response
local returncode
- trace "[ln_call_lightningd] ./lightning-cli $@"
- response=$(./lightning-cli $@)
+ trace "[ln_call_lightningd] ./lightning-cli $(printf " \"%s\"" "$@")"
+ response=$(./lightning-cli "$@")
returncode=$?
trace_rc ${returncode}
@@ -39,7 +39,7 @@ ln_create_invoice() {
if [ "${callback_url}" != "null" ]; then
# If not null, let's add double-quotes so we don't need to add the double-quotes in the sql insert,
# so if it's null, it will insert the actual sql NULL value.
- callback_url="\"${callback_url}\""
+ callback_url="'${callback_url}'"
fi
#/proxy $ ./lightning-cli invoice 10000 "t1" "t1d" 60
@@ -71,36 +71,33 @@ ln_create_invoice() {
# Let's get the connect string if provided in configuration
local connectstring=$(get_connection_string)
- if [ "${msatoshi}" = "null" ]; then
- sql "INSERT OR IGNORE INTO ln_invoice (label, bolt11, callback_url, payment_hash, expires_at, description, status) VALUES (\"${label}\", \"${bolt11}\", ${callback_url}, \"${payment_hash}\", ${expires_at}, \"${description}\", \"unpaid\")"
- else
- sql "INSERT OR IGNORE INTO ln_invoice (label, bolt11, callback_url, payment_hash, expires_at, msatoshi, description, status) VALUES (\"${label}\", \"${bolt11}\", ${callback_url}, \"${payment_hash}\", ${expires_at}, ${msatoshi}, \"${description}\", \"unpaid\")"
- fi
- trace_rc $?
- id=$(sql "SELECT id FROM ln_invoice WHERE bolt11=\"${bolt11}\"")
+ id=$(sql "INSERT INTO ln_invoice (label, bolt11, callback_url, payment_hash, expires_at, msatoshi, description, status)"\
+" VALUES ('${label}','${bolt11}', ${callback_url},'${payment_hash}', ${expires_at}, ${msatoshi}, '${description}', 'unpaid')"\
+" RETURNING id" \
+ "SELECT id FROM ln_invoice WHERE bolt11='${bolt11}'")
trace_rc $?
# {
- # "id":"",
+ # "id":123,
# "label":"",
# "bolt11":"",
# "connectstring":"",
# "callbackUrl":"",
# "payment_hash":"",
- # "msatoshi":,
+ # "msatoshi":123456,
# "status":"unpaid",
# "description":"",
- # "expires_at":
+ # "expires_at":21312312
# }
- data="{\"id\":\"${id}\","
+ data="{\"id\":${id},"
data="${data}\"label\":\"${label}\","
data="${data}\"bolt11\":\"${bolt11}\","
if [ -n "${connectstring}" ]; then
data="${data}\"connectstring\":\"${connectstring}\","
fi
if [ "${callback_url}" != "null" ]; then
- data="${data}\"callbackUrl\":${callback_url},"
+ data="${data}\"callbackUrl\":\"${callback_url}\","
fi
data="${data}\"payment_hash\":\"${payment_hash}\","
if [ "${msatoshi}" != "null" ]; then
diff --git a/proxy_docker/app/script/callbacks_job.sh b/proxy_docker/app/script/callbacks_job.sh
index 5ad03ea..1091445 100644
--- a/proxy_docker/app/script/callbacks_job.sh
+++ b/proxy_docker/app/script/callbacks_job.sh
@@ -10,8 +10,17 @@ do_callbacks() {
trace "Entering do_callbacks()..."
+ # If called because we received a confirmation for a specific txid, let's only
+ # process that txid-related callbacks...
+ local txid=${1}
+ local txid_where
+ if [ -n "${txid}" ]; then
+ trace "[do_callbacks] txid=${txid}"
+ txid_where=" AND txid='${txid}'"
+ fi
+
# Let's fetch all the watching addresses still being watched but not called back
- local callbacks=$(sql 'SELECT DISTINCT w.callback0conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable, pub32_index, pub32, w.label, derivation_path, event_message, hash FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx ON tx.id = tx_id LEFT JOIN watching_by_pub32 w32 ON watching_by_pub32_id = w32.id WHERE NOT calledback0conf AND watching_id NOT NULL AND w.callback0conf NOT NULL AND w.watching')
+ local callbacks=$(sql "SELECT DISTINCT w.callback0conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable::text, pub32_index, pub32, w.label, derivation_path, event_message, hash FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx ON tx.id = tx_id LEFT JOIN watching_by_pub32 w32 ON w.watching_by_pub32_id = w32.id WHERE NOT calledback0conf AND watching_id IS NOT NULL AND w.callback0conf IS NOT NULL AND w.watching${txid_where}")
trace "[do_callbacks] callbacks0conf=${callbacks}"
local returncode
@@ -25,12 +34,12 @@ do_callbacks() {
trace_rc ${returncode}
if [ "${returncode}" -eq 0 ]; then
address=$(echo "${row}" | cut -d '|' -f2)
- sql "UPDATE watching SET calledback0conf=1 WHERE address=\"${address}\""
+ sql "UPDATE watching SET calledback0conf=true WHERE address='${address}'"
trace_rc $?
fi
done
- callbacks=$(sql 'SELECT DISTINCT w.callback1conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable, pub32_index, pub32, w.label, derivation_path, event_message, hash FROM watching w, watching_tx wt, tx t LEFT JOIN watching_by_pub32 w32 ON watching_by_pub32_id = w32.id WHERE w.id = watching_id AND tx_id = t.id AND NOT calledback1conf AND confirmations>0 AND w.callback1conf NOT NULL AND w.watching')
+ callbacks=$(sql "SELECT DISTINCT w.callback1conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable::text, pub32_index, pub32, w.label, derivation_path, event_message, hash FROM watching w JOIN watching_tx wt ON w.id = wt.watching_id JOIN tx t ON wt.tx_id = t.id LEFT JOIN watching_by_pub32 w32 ON watching_by_pub32_id = w32.id WHERE NOT calledback1conf AND confirmations>0 AND w.callback1conf IS NOT NULL AND w.watching${txid_where}")
trace "[do_callbacks] callbacks1conf=${callbacks}"
for row in ${callbacks}
@@ -39,19 +48,25 @@ do_callbacks() {
returncode=$?
if [ "${returncode}" -eq 0 ]; then
address=$(echo "${row}" | cut -d '|' -f2)
- sql "UPDATE watching SET calledback1conf=1, watching=0 WHERE address=\"${address}\""
+ sql "UPDATE watching SET calledback1conf=true, watching=false WHERE address='${address}'"
trace_rc $?
fi
done
- callbacks=$(sql "SELECT id, label, bolt11, callback_url, payment_hash, msatoshi, status, pay_index, msatoshi_received, paid_at, description, expires_at FROM ln_invoice WHERE NOT calledback AND callback_failed")
- trace "[do_callbacks] ln_callbacks=${callbacks}"
+ if [ -z "${txid}" ]; then
+ trace "[do_callbacks] Processing LN callbacks..."
- for row in ${callbacks}
- do
- ln_manage_callback ${row}
- trace_rc $?
- done
+ callbacks=$(sql "SELECT id, label, bolt11, callback_url, payment_hash, msatoshi, status, pay_index, msatoshi_received, paid_at, description, expires_at FROM ln_invoice WHERE NOT calledback AND callback_failed")
+ trace "[do_callbacks] ln_callbacks=${callbacks}"
+
+ for row in ${callbacks}
+ do
+ ln_manage_callback ${row}
+ trace_rc $?
+ done
+ else
+ trace "[do_callbacks] called for a specific txid, skipping LN callbacks"
+ fi
) 200>./.callbacks.lock
}
@@ -70,7 +85,7 @@ ln_manage_callback() {
if [ -z "${callback_url}" ]; then
# No callback url provided for that invoice
trace "[ln_manage_callback] No callback url provided for that invoice"
- sql "UPDATE ln_invoice SET calledback=1 WHERE id=\"${id}\""
+ sql "UPDATE ln_invoice SET calledback=true WHERE id=${id}"
trace_rc $?
return
fi
@@ -112,7 +127,7 @@ ln_manage_callback() {
# "expires_at":
# }
- data="{\"id\":\"${id}\","
+ data="{\"id\":${id},"
data="${data}\"label\":\"${label}\","
data="${data}\"bolt11\":\"${bolt11}\","
data="${data}\"callback_url\":\"${callback_url}\","
@@ -132,11 +147,11 @@ ln_manage_callback() {
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -eq 0 ]; then
- sql "UPDATE ln_invoice SET calledback=1 WHERE id=\"${id}\""
+ sql "UPDATE ln_invoice SET calledback=true WHERE id=${id}"
trace_rc $?
else
trace "[ln_manage_callback] callback failed: ${callback_url}"
- sql "UPDATE ln_invoice SET callback_failed=1 WHERE id=\"${id}\""
+ sql "UPDATE ln_invoice SET callback_failed=true WHERE id=${id}"
trace_rc $?
fi
@@ -212,7 +227,6 @@ build_callback() {
vsize=$(echo "${row}" | cut -d '|' -f10)
trace "[build_callback] vsize=${vsize}"
is_replaceable=$(echo "${row}" | cut -d '|' -f15)
- is_replaceable=$([ "${is_replaceable}" -eq "1" ] && echo "true" || echo "false")
trace "[build_callback] is_replaceable=${is_replaceable}"
blockhash=$(echo "${row}" | cut -d '|' -f11)
trace "[build_callback] blockhash=${blockhash}"
@@ -234,7 +248,7 @@ build_callback() {
event_message=$(echo "${row}" | cut -d '|' -f20)
trace "[build_callback] event_message=${event_message}"
- data="{\"id\":\"${id}\","
+ data="{\"id\":${id},"
data="${data}\"address\":\"${address}\","
data="${data}\"txid\":\"${txid}\","
data="${data}\"hash\":\"${hash}\","
diff --git a/proxy_docker/app/script/callbacks_txid.sh b/proxy_docker/app/script/callbacks_txid.sh
index 7ef7823..f442fd3 100644
--- a/proxy_docker/app/script/callbacks_txid.sh
+++ b/proxy_docker/app/script/callbacks_txid.sh
@@ -9,8 +9,10 @@ do_callbacks_txid() {
trace "Entering do_callbacks_txid()..."
+ # Let's check the 1-conf (newly mined) watched txid that are included in the new block...
+
# Let's fetch all the watching txid still being watched but not called back
- local callbacks=$(sql 'SELECT id, txid, callback1conf, 1 FROM watching_by_txid WHERE watching AND callback1conf NOT NULL AND NOT calledback1conf')
+ local callbacks=$(sql "SELECT id, txid, callback1conf, 1 FROM watching_by_txid WHERE watching AND callback1conf IS NOT NULL AND NOT calledback1conf")
trace "[do_callbacks_txid] callbacks1conf=${callbacks}"
local returncode
@@ -25,14 +27,16 @@ do_callbacks_txid() {
trace_rc ${returncode}
if [ "${returncode}" -eq "0" ]; then
id=$(echo "${row}" | cut -d '|' -f1)
- sql "UPDATE watching_by_txid SET calledback1conf=1 WHERE id=\"${id}\""
+ sql "UPDATE watching_by_txid SET calledback1conf=true WHERE id=${id}"
trace_rc $?
else
trace "[do_callbacks_txid] callback returncode has error, we don't flag as calledback yet."
fi
done
- local callbacks=$(sql 'SELECT id, txid, callbackxconf, nbxconf FROM watching_by_txid WHERE watching AND calledback1conf AND callbackxconf NOT NULL AND NOT calledbackxconf')
+ # For the n-conf, let's only check the watched txids that are already at least 1-conf...
+
+ local callbacks=$(sql "SELECT id, txid, callbackxconf, nbxconf FROM watching_by_txid WHERE watching AND calledback1conf AND callbackxconf IS NOT NULL AND NOT calledbackxconf")
trace "[do_callbacks_txid] callbacksxconf=${callbacks}"
for row in ${callbacks}
@@ -42,7 +46,7 @@ do_callbacks_txid() {
trace_rc ${returncode}
if [ "${returncode}" -eq "0" ]; then
id=$(echo "${row}" | cut -d '|' -f1)
- sql "UPDATE watching_by_txid SET calledbackxconf=1, watching=0 WHERE id=\"${id}\""
+ sql "UPDATE watching_by_txid SET calledbackxconf=true, watching=false WHERE id=${id}"
trace_rc $?
else
trace "[do_callbacks_txid] callback returncode has error, we don't flag as calledback yet."
diff --git a/proxy_docker/app/script/computefees.sh b/proxy_docker/app/script/computefees.sh
index 04d2dee..4f57874 100644
--- a/proxy_docker/app/script/computefees.sh
+++ b/proxy_docker/app/script/computefees.sh
@@ -11,7 +11,7 @@ compute_fees() {
trace "[compute_fees] pruned=${pruned}"
# We want null instead of 0.00000000 in this case.
echo "null"
- exit 0
+ return
fi
local txid=${1}
@@ -64,16 +64,10 @@ compute_vin_total_amount()
for vin_txid_vout in ${vin_txids_vout}
do
vin_txid=$(echo "${vin_txid_vout}" | tr -d '"' | cut -d '-' -f1)
- # Check if we already have the tx in our DB
- vin_raw_tx=$(sql_rawtx "SELECT raw_tx FROM rawtx WHERE txid=\"${vin_txid}\"")
- trace_rc $?
- if [ -z "${vin_raw_tx}" ]; then
- txid_already_inserted=false
- vin_raw_tx=$(get_rawtransaction "${vin_txid}" | tr -d '\n')
- returncode=$?
- if [ "${returncode}" -ne 0 ]; then
- return ${returncode}
- fi
+ vin_raw_tx=$(get_rawtransaction "${vin_txid}" | tr -d '\n')
+ returncode=$?
+ if [ "${returncode}" -ne 0 ]; then
+ return ${returncode}
fi
vout=$(echo "${vin_txid_vout}" | tr -d '"' | cut -d '-' -f2)
trace "[compute_vin_total_amount] vout=${vout}"
@@ -81,27 +75,21 @@ compute_vin_total_amount()
trace "[compute_vin_total_amount] vin_vout_amount=${vin_vout_amount}"
vin_total_amount=$(awk "BEGIN { printf(\"%.8f\", ${vin_total_amount}+${vin_vout_amount}); exit}")
trace "[compute_vin_total_amount] vin_total_amount=${vin_total_amount}"
- vin_hash=$(echo "${vin_raw_tx}" | jq ".result.hash")
+ vin_hash=$(echo "${vin_raw_tx}" | jq -r ".result.hash")
vin_confirmations=$(echo "${vin_raw_tx}" | jq ".result.confirmations")
vin_timereceived=$(echo "${vin_raw_tx}" | jq ".result.time")
vin_size=$(echo "${vin_raw_tx}" | jq ".result.size")
vin_vsize=$(echo "${vin_raw_tx}" | jq ".result.vsize")
- vin_blockhash=$(echo "${vin_raw_tx}" | jq ".result.blockhash")
+ vin_blockhash=$(echo "${vin_raw_tx}" | jq -r ".result.blockhash")
vin_blockheight=$(echo "${vin_raw_tx}" | jq ".result.blockheight")
vin_blocktime=$(echo "${vin_raw_tx}" | jq ".result.blocktime")
# Let's insert the vin tx in the DB just in case it would be useful
- if ! ${txid_already_inserted}; then
- # Sometimes raw tx are too long to be passed as paramater, so let's write
- # it to a temp file for it to be read by sqlite3 and then delete the file
- echo "${vin_raw_tx}" > vin-rawtx-${vin_txid}-$$.blob
- sql "INSERT OR IGNORE INTO tx (txid, hash, confirmations, timereceived, size, vsize, blockhash, blockheight, blocktime) VALUES (\"${vin_txid}\", ${vin_hash}, ${vin_confirmations}, ${vin_timereceived}, ${vin_size}, ${vin_vsize}, ${vin_blockhash}, ${vin_blockheight}, ${vin_blocktime})"
- trace_rc $?
- sql_rawtx "INSERT OR IGNORE INTO rawtx (txid, hash, confirmations, timereceived, size, vsize, blockhash, blockheight, blocktime, raw_tx) VALUES (\"${vin_txid}\", ${vin_hash}, ${vin_confirmations}, ${vin_timereceived}, ${vin_size}, ${vin_vsize}, ${vin_blockhash}, ${vin_blockheight}, ${vin_blocktime}, readfile('vin-rawtx-${vin_txid}-$$.blob'))"
- trace_rc $?
- rm vin-rawtx-${vin_txid}-$$.blob
- txid_already_inserted=true
- fi
+ sql "INSERT INTO tx (txid, hash, confirmations, timereceived, size, vsize, blockhash, blockheight, blocktime)"\
+" VALUES ('${vin_txid}', '${vin_hash}', ${vin_confirmations}, ${vin_timereceived}, ${vin_size}, ${vin_vsize}, '${vin_blockhash}', ${vin_blockheight}, ${vin_blocktime})"\
+" ON CONFLICT (txid) DO"\
+" UPDATE SET blockhash='${vin_blockhash}', blockheight=${vin_blockheight}, blocktime=${vin_blocktime}, confirmations=${vin_confirmations}"
+ trace_rc $?
done
echo "${vin_total_amount}"
diff --git a/proxy_docker/app/script/confirmation.sh b/proxy_docker/app/script/confirmation.sh
index 4dd81d0..534e84d 100644
--- a/proxy_docker/app/script/confirmation.sh
+++ b/proxy_docker/app/script/confirmation.sh
@@ -44,7 +44,7 @@ confirmation() {
# First of all, let's make sure we're working on watched addresses...
local address
local addresseswhere
- local addresses=$(echo "${tx_details}" | jq ".result.details[].address")
+ local addresses=$(echo "${tx_details}" | jq -r ".result.details[].address")
local notfirst=false
local IFS=$'\n'
@@ -53,9 +53,9 @@ confirmation() {
trace "[confirmation] address=${address}"
if ${notfirst}; then
- addresseswhere="${addresseswhere},${address}"
+ addresseswhere="${addresseswhere},'${address}'"
else
- addresseswhere="${address}"
+ addresseswhere="'${address}'"
notfirst=true
fi
done
@@ -66,11 +66,11 @@ confirmation() {
fi
########################################################################################################
- local tx=$(sql "SELECT id FROM tx WHERE txid=\"${txid}\"")
+ local tx=$(sql "SELECT id FROM tx WHERE txid='${txid}'")
local id_inserted
local tx_raw_details=$(get_rawtransaction ${txid} | tr -d '\n')
local tx_nb_conf=$(echo "${tx_details}" | jq -r '.result.confirmations // 0')
- local tx_hash=$(echo "${tx_raw_details}" | jq '.result.hash')
+ local tx_hash=$(echo "${tx_raw_details}" | jq -r '.result.hash')
# Sometimes raw tx are too long to be passed as paramater, so let's write
# it to a temp file for it to be read by sqlite3 and then delete the file
@@ -100,45 +100,33 @@ confirmation() {
local tx_blocktime=null
if [ "${tx_nb_conf}" -gt "0" ]; then
trace "[confirmation] tx_nb_conf=${tx_nb_conf}"
- tx_blockhash=$(echo "${tx_details}" | jq '.result.blockhash')
- tx_blockheight=$(get_block_info $(echo ${tx_blockhash} | tr -d '"') | jq '.result.height')
+ tx_blockhash="$(echo "${tx_details}" | jq -r '.result.blockhash')"
+ tx_blockheight=$(get_block_info ${tx_blockhash} | jq '.result.height')
+ tx_blockhash="'${tx_blockhash}'"
tx_blocktime=$(echo "${tx_details}" | jq '.result.blocktime')
fi
- sql "INSERT OR IGNORE INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, blockhash, blockheight, blocktime) VALUES (\"${txid}\", ${tx_hash}, ${tx_nb_conf}, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${tx_blockhash}, ${tx_blockheight}, ${tx_blocktime})"
- trace_rc $?
- sql_rawtx "INSERT OR IGNORE INTO rawtx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, blockhash, blockheight, blocktime, raw_tx) VALUES (\"${txid}\", ${tx_hash}, ${tx_nb_conf}, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${tx_blockhash}, ${tx_blockheight}, ${tx_blocktime}, readfile('rawtx-${txid}-$$.blob'))"
- trace_rc $?
-
- id_inserted=$(sql "SELECT id FROM tx WHERE txid=\"${txid}\"")
+ id_inserted=$(sql "INSERT INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, blockhash, blockheight, blocktime)"\
+" VALUES ('${txid}', '${tx_hash}', ${tx_nb_conf}, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${tx_blockhash}, ${tx_blockheight}, ${tx_blocktime})"\
+" ON CONFLICT (txid) DO"\
+" UPDATE SET blockhash=${tx_blockhash}, blockheight=${tx_blockheight}, blocktime=${tx_blocktime}, confirmations=${tx_nb_conf}"\
+" RETURNING id" \
+ "SELECT id FROM tx WHERE txid='${txid}'")
trace_rc $?
else
# TX found in our DB.
# 1-conf or executecallbacks on an unconfirmed tx or spending watched address (in this case, we probably missed conf) or spending to a watched address (in this case, spend inserted the tx in the DB)
- local tx_blockhash=$(echo "${tx_details}" | jq '.result.blockhash')
+ local tx_blockhash=$(echo "${tx_details}" | jq -r '.result.blockhash')
trace "[confirmation] tx_blockhash=${tx_blockhash}"
if [ "${tx_blockhash}" = "null" ]; then
trace "[confirmation] probably being called by executecallbacks without any confirmations since the last time we checked"
else
- local tx_blockheight=$(get_block_info $(echo "${tx_blockhash}" | tr -d '"') | jq '.result.height')
+ local tx_blockheight=$(get_block_info "${tx_blockhash}" | jq '.result.height')
local tx_blocktime=$(echo "${tx_details}" | jq '.result.blocktime')
- sql "UPDATE tx SET
- confirmations=${tx_nb_conf},
- blockhash=${tx_blockhash},
- blockheight=${tx_blockheight},
- blocktime=${tx_blocktime}
- WHERE txid=\"${txid}\""
- trace_rc $?
- sql_rawtx "UPDATE rawtx SET
- confirmations=${tx_nb_conf},
- blockhash=${tx_blockhash},
- blockheight=${tx_blockheight},
- blocktime=${tx_blocktime},
- raw_tx=readfile('rawtx-${txid}-$$.blob')
- WHERE txid=\"${txid}\""
+ sql "UPDATE tx SET confirmations=${tx_nb_conf}, blockhash='${tx_blockhash}', blockheight=${tx_blockheight}, blocktime=${tx_blocktime} WHERE txid='${txid}'"
trace_rc $?
fi
id_inserted=${tx}
@@ -171,7 +159,8 @@ confirmation() {
# If the tx is batched and pays multiple watched addresses, we have to insert
# those additional addresses in watching_tx!
watching_id=$(echo "${row}" | cut -d '|' -f1)
- sql "INSERT OR IGNORE INTO watching_tx (watching_id, tx_id, vout, amount) VALUES (${watching_id}, ${id_inserted}, ${tx_vout_n}, ${tx_vout_amount})"
+ sql "INSERT INTO watching_tx (watching_id, tx_id, vout, amount) VALUES (${watching_id}, ${id_inserted}, ${tx_vout_n}, ${tx_vout_amount})"\
+" ON CONFLICT DO NOTHING"
trace_rc $?
else
trace "[confirmation] For this tx, there's already watching_tx rows"
@@ -211,7 +200,7 @@ confirmation() {
# for next cron.
if [ -z "${bypass_callbacks}" ]; then
trace "[confirmation] Let's do the callbacks!"
- do_callbacks
+ do_callbacks "${txid}"
fi
echo '{"result":"confirmed"}'
diff --git a/proxy_docker/app/script/getactivewatches.sh b/proxy_docker/app/script/getactivewatches.sh
index 7cb25a3..33fc731 100644
--- a/proxy_docker/app/script/getactivewatches.sh
+++ b/proxy_docker/app/script/getactivewatches.sh
@@ -12,8 +12,8 @@ get_txns_by_watchlabel(){
INNER JOIN watching AS w ON w32.id = w.watching_by_pub32_id
INNER JOIN watching_tx AS wtxn ON w.id = wtxn.watching_id
INNER JOIN tx AS tx ON wtxn.tx_id = tx.id
- WHERE w32.label="$1"
- LIMIT 0,${2-10}
+ WHERE w32.label='${1}'
+ LIMIT ${2-10} OFFSET 0
HERE
)
label_txns=$(sql "$query")
@@ -38,12 +38,12 @@ get_unused_addresses_by_watchlabel(){
SELECT w32.id, w32.label, w32.pub32, w.pub32_index, w.address
FROM watching as w
INNER JOIN watching_by_pub32 AS w32 ON w.watching_by_pub32_id = w32.id
- WHERE w32.label="$1"
+ WHERE w32.label='${1}'
AND NOT EXISTS (
SELECT 1 FROM watching_tx WHERE watching_id = w.id
)
ORDER BY w.pub32_index ASC
- LIMIT 0,${2-10}
+ LIMIT ${2-10} OFFSET 0
HERE
)
label_unused_addrs=$(sql "$query")
@@ -65,9 +65,9 @@ getactivewatches() {
trace "Entering getactivewatches()..."
local watches
- # Let's build the string directly with sqlite instead of manipulating multiple strings afterwards, it's faster.
+ # Let's build the string directly with dbms instead of manipulating multiple strings afterwards, it's faster.
# {"id":"${id}","address":"${address}","imported":"${imported}","unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}"}
- watches=$(sql "SELECT '{\"id\":' || id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":\"' || COALESCE(callback0conf, '') || '\",\"confirmedCallbackURL\":\"' || COALESCE(callback1conf, '') || '\",\"label\":\"' || COALESCE(label, '') || '\",\"watching_since\":\"' || inserted_ts || '\"}' FROM watching WHERE watching AND NOT calledback1conf")
+ watches=$(sql "SELECT '{\"id\":' || id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":' || CASE WHEN callback0conf IS NULL THEN 'null' ELSE ('\"' || callback0conf || '\"') END || ',\"confirmedCallbackURL\":' || CASE WHEN callback1conf IS NULL THEN 'null' ELSE ('\"' || callback1conf || '\"') END || ',\"label\":\"' || COALESCE(label, '') || '\",\"watching_since\":\"' || inserted_ts || '\"}' FROM watching WHERE watching AND NOT calledback1conf ORDER BY id")
returncode=$?
trace_rc ${returncode}
@@ -99,7 +99,7 @@ getactivewatchesbyxpub() {
local xpub=${1}
local returncode
- getactivewatchesxpub "pub32" ${xpub}
+ getactivewatchesxpub "pub32" "${xpub}"
returncode=$?
trace_rc ${returncode}
@@ -112,7 +112,7 @@ getactivewatchesbylabel() {
local label=${1}
local returncode
- getactivewatchesxpub "label" ${label}
+ getactivewatchesxpub "label" "${label}"
returncode=$?
trace_rc ${returncode}
@@ -128,9 +128,9 @@ getactivewatchesxpub() {
trace "[getactivewatchesxpub] value=${value}"
local watches
- # Let's build the string directly with sqlite instead of manipulating multiple strings afterwards, it's faster.
+ # Let's build the string directly with dbms instead of manipulating multiple strings afterwards, it's faster.
# {"id":"${id}","address":"${address}","imported":"${imported}","unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}","derivation_path":"${derivation_path}","pub32_index":"${pub32_index}"}
- watches=$(sql "SELECT '{\"id\":' || w.id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":\"' || COALESCE(w.callback0conf, '') || '\",\"confirmedCallbackURL\":\"' || COALESCE(w.callback1conf, '') || '\",\"watching_since\":\"' || w.inserted_ts || '\",\"derivation_path\":\"' || derivation_path || '\",\"pub32_index\":' || pub32_index || '}' FROM watching w, watching_by_pub32 w32 WHERE watching_by_pub32_id = w32.id AND ${where} = \"${value}\" AND w.watching AND NOT calledback1conf")
+ watches=$(sql "SELECT '{\"id\":' || w.id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":' || CASE WHEN w.callback0conf IS NULL THEN 'null' ELSE ('\"' || w.callback0conf || '\"') END || ',\"confirmedCallbackURL\":' || CASE WHEN w.callback1conf IS NULL THEN 'null' ELSE ('\"' || w.callback1conf || '\"') END || ',\"watching_since\":\"' || w.inserted_ts || '\",\"derivation_path\":\"' || derivation_path || '\",\"pub32_index\":' || pub32_index || '}' FROM watching w, watching_by_pub32 w32 WHERE watching_by_pub32_id = w32.id AND w32.${where} = '${value}' AND w.watching AND NOT calledback1conf ORDER BY w.id")
returncode=$?
trace_rc ${returncode}
@@ -160,9 +160,9 @@ getactivexpubwatches() {
trace "Entering getactivexpubwatches()..."
local watches
- # Let's build the string directly with sqlite instead of manipulating multiple strings afterwards, it's faster.
+ # Let's build the string directly with dbms instead of manipulating multiple strings afterwards, it's faster.
# {"id":"${id}","pub32":"${pub32}","label":"${label}","derivation_path":"${derivation_path}","last_imported_n":${last_imported_n},"unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}"}
- watches=$(sql "SELECT '{\"id\":' || id || ',\"pub32\":\"' || pub32 || '\",\"label\":\"' || label || '\",\"derivation_path\":\"' || derivation_path || '\",\"last_imported_n\":' || last_imported_n || ',\"unconfirmedCallbackURL\":\"' || COALESCE(callback0conf, '') || '\",\"confirmedCallbackURL\":\"' || COALESCE(callback1conf, '') || '\",\"watching_since\":\"' || inserted_ts || '\"}' FROM watching_by_pub32 WHERE watching")
+ watches=$(sql "SELECT '{\"id\":' || id || ',\"pub32\":\"' || pub32 || '\",\"label\":\"' || label || '\",\"derivation_path\":\"' || derivation_path || '\",\"last_imported_n\":' || last_imported_n || ',\"unconfirmedCallbackURL\":' || CASE WHEN callback0conf IS NULL THEN 'null' ELSE ('\"' || callback0conf || '\"') END || ',\"confirmedCallbackURL\":' || CASE WHEN callback1conf IS NULL THEN 'null' ELSE ('\"' || callback1conf || '\"') END || ',\"watching_since\":\"' || inserted_ts || '\"}' FROM watching_by_pub32 WHERE watching ORDER BY id")
returncode=$?
trace_rc ${returncode}
diff --git a/proxy_docker/app/script/importaddress.sh b/proxy_docker/app/script/importaddress.sh
index 8b2128c..874c54b 100644
--- a/proxy_docker/app/script/importaddress.sh
+++ b/proxy_docker/app/script/importaddress.sh
@@ -11,7 +11,7 @@ importaddress_rpc() {
if [ -z "${label}" ]; then
label="null"
fi
- local data='{"method":"importaddress","params":{"address":"'${address}'","label":'${label}',"rescan":false}}'
+ local data='{"method":"importaddress","params":{"address":"'${address}'","label":"'${label}'","rescan":false}}'
# local data="{\"method\":\"importaddress\",\"params\":[\"${address}\",\"\",false]}"
local result
result=$(send_to_watcher_node ${data})
@@ -39,7 +39,7 @@ importmulti_rpc() {
# {"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"},
# {"scriptPubKey":{"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"},"timestamp":"now","watchonly":true,"label":"xpub"},
- addresses=$(echo "${addresses}" | sed "s/\"address\"/\"scriptPubKey\":\{\"address\"/g" | sed "s/}/},\"timestamp\":\"now\",\"watchonly\":true,\"label\":${label}}/g")
+ addresses=$(echo "${addresses}" | sed "s/\"address\"/\"scriptPubKey\":\{\"address\"/g" | sed "s/}/},\"timestamp\":\"now\",\"watchonly\":true,\"label\":\"${label}\"}/g")
# trace "[importmulti_rpc] addresses=${addresses}"
# Now we use that in the RPC string
diff --git a/proxy_docker/app/script/manage_missed_conf.sh b/proxy_docker/app/script/manage_missed_conf.sh
index 5102004..6d745df 100644
--- a/proxy_docker/app/script/manage_missed_conf.sh
+++ b/proxy_docker/app/script/manage_missed_conf.sh
@@ -25,7 +25,7 @@ manage_not_imported() {
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -eq 0 ]; then
- sql "UPDATE watching SET imported=1 WHERE address=\"${address}\""
+ sql "UPDATE watching SET imported=true WHERE address='${address}'"
fi
done
@@ -33,7 +33,7 @@ manage_not_imported() {
}
manage_missed_conf() {
- # Maybe we missed confirmations, because we were down or no network or
+ # Maybe we missed 0-conf or 1-conf watched txs, because we were down or no network or
# whatever, so we look at what might be missed and do confirmations.
# The strategy here: get the list of watched addresses, see if they received something on the Bitcoin node,
@@ -41,7 +41,7 @@ manage_missed_conf() {
trace "[Entering manage_missed_conf()]"
- local watches=$(sql 'SELECT DISTINCT address FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx t ON t.id = tx_id WHERE watching AND imported AND (tx_id IS NULL OR t.confirmations=0) ORDER BY address')
+ local watches=$(sql "SELECT DISTINCT address FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx t ON t.id = tx_id WHERE watching AND imported ORDER BY address")
trace "[manage_missed_conf] watches=${watches}"
if [ ${#watches} -eq 0 ]; then
trace "[manage_missed_conf] Nothing missed!"
@@ -66,6 +66,7 @@ manage_missed_conf() {
local received
local received_address
+ local confirmations
local watching
local latesttxid
local tx
@@ -76,48 +77,55 @@ manage_missed_conf() {
local row
local address
local inserted_ts
+ local calledback0conf
local txid
local txids
local IFS=$'\n'
for address in ${received_watches}
do
- watching=$(sql 'SELECT address, inserted_ts FROM watching WHERE address="'${address}'"')
+ watching=$(sql "SELECT address, inserted_ts, calledback0conf FROM watching WHERE address='${address}'")
trace "[manage_missed_conf] watching=${watching}"
if [ ${#watching} -eq 0 ]; then
trace "[manage_missed_conf] Nothing missed!"
continue
fi
- # Let's get confirmed received txs for the address
- # address=$(echo "${watches}" | cut -d '|' -f1)
- inserted_ts=$(date -d "$(echo "${watching}" | cut -d '|' -f2)" +"%s")
+ inserted_ts=$(date -d "$(echo "${watching}" | cut -d '|' -f2)" -D '%Y-%m-%d %H:%M:%S' +"%s")
trace "[manage_missed_conf] inserted_ts=${inserted_ts}"
+ calledback0conf=$(echo "${watching}" | cut -d '|' -f3)
+ trace "[manage_missed_conf] calledback0conf=${calledback0conf}"
- received_address=$(echo "${received}" | jq -Mc ".result | map(select(.address==\"${address}\" and .confirmations>0))[0]")
+ received_address=$(echo "${received}" | jq -Mc ".result | map(select(.address==\"${address}\"))[0]")
trace "[manage_missed_conf] received_address=${received_address}"
- if [ "${received_address}" = "null" ]; then
- # Not confirmed while we were away...
- trace "[manage_missed_conf] Nothing missed here"
+ confirmations=$(echo "${received_address}" | jq -r ".confirmations")
+ trace "[manage_missed_conf] confirmations=${confirmations}"
+
+ if [ "${confirmations}" -eq "0" ] && [ "${calledback0conf}" = "t" ]; then
+ # 0-conf and calledback0conf is true, so let's skip this one
+ trace "[manage_missed_conf] Nothing missed!"
else
- # We got something confirmed
- # Let's find out if it was confirmed after being watched
- trace "[manage_missed_conf] We got something confirmed"
+ # 0-conf and calledback0conf false, let's call confirmation
+ # or
+ # 1-conf and calledback1conf false, let's call confirmation
+ trace "[manage_missed_conf] We got something to check..."
+
latesttxid=$(echo "${received_address}" | jq -r ".txids | last")
trace "[manage_missed_conf] latesttxid=${latesttxid}"
data='{"method":"gettransaction","params":["'${latesttxid}'"]}'
tx=$(send_to_watcher_node ${data})
blocktime=$(echo "${tx}" | jq '.result.blocktime')
txtime=$(echo "${tx}" | jq '.result.time')
- confirmations=$(echo "${tx}" | jq '.result.confirmations')
trace "[manage_missed_conf] blocktime=${blocktime}"
trace "[manage_missed_conf] txtime=${txtime}"
trace "[manage_missed_conf] inserted_ts=${inserted_ts}"
trace "[manage_missed_conf] confirmations=${confirmations}"
- if [ "${txtime}" -gt "${inserted_ts}" ] && [ "${confirmations}" -gt "0" ]; then
- # Mined after watch, we missed it!
- trace "[manage_missed_conf] Mined after watch, we missed it!"
+ if [ "${txtime}" -ge "${inserted_ts}" ]; then
+ # Broadcast or mined after watch, we missed it!
+ trace "[manage_missed_conf] Broadcast or mined after watch, we missed it!"
+ # We skip the callbacks because do_callbacks is called right after in
+ # requesthandler.executecallbacks (where we're from)
confirmation "${latesttxid}" "true"
fi
fi
diff --git a/proxy_docker/app/script/newblock.sh b/proxy_docker/app/script/newblock.sh
index 6db875a..ce096fd 100644
--- a/proxy_docker/app/script/newblock.sh
+++ b/proxy_docker/app/script/newblock.sh
@@ -25,6 +25,7 @@ newblock() {
returncode=$?
trace_rc ${returncode}
+ # do_callbacks_txid "$(echo "${blockinfo}" | jq ".result.tx[]")"
do_callbacks_txid
batch_check_webhooks
diff --git a/proxy_docker/app/script/ots.sh b/proxy_docker/app/script/ots.sh
index 2db2b11..24c177b 100644
--- a/proxy_docker/app/script/ots.sh
+++ b/proxy_docker/app/script/ots.sh
@@ -39,7 +39,7 @@ serve_ots_stamp() {
id_inserted=$(echo "${row}" | cut -d '|' -f1)
trace "[serve_ots_stamp] id_inserted=${id_inserted}"
- if [ "${requested}" -eq "1" ]; then
+ if [ "${requested}" = "t" ]; then
# Stamp already requested
trace "[serve_ots_stamp] Stamp already requested"
errorstring="Duplicate stamping request, hash already exists in DB and been OTS requested"
@@ -49,18 +49,20 @@ serve_ots_stamp() {
returncode=$?
fi
else
- sql "INSERT OR IGNORE INTO stamp (hash, callbackUrl) VALUES (\"${hash}\", \"${callbackUrl}\")"
+ id_inserted=$(sql "INSERT INTO stamp (hash, callbackUrl)"\
+" VALUES ('${hash}','${callbackUrl}')"\
+" RETURNING id" \
+ "SELECT id FROM stamp WHERE hash='${hash}'")
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -eq "0" ]; then
- id_inserted=$(sql "SELECT id FROM stamp WHERE hash='${hash}'")
- trace_rc $?
errorstring=$(request_ots_stamp "${hash}" ${id_inserted})
returncode=$?
trace_rc ${returncode}
else
trace "[serve_ots_stamp] Stamp request could not be inserted in DB"
errorstring="Stamp request could not be inserted in DB, please retry later"
+ id_inserted=null
returncode=1
fi
fi
@@ -114,7 +116,7 @@ request_ots_stamp() {
if [ "${returncode}" -eq "0" ]; then
# "already exists" found, let's try updating DB again
trace "[request_ots_stamp] was already requested to the OTS server... let's update the DB, looks like it didn't work on first try"
- sql "UPDATE stamp SET requested=1 WHERE id=${id}"
+ sql "UPDATE stamp SET requested=true WHERE id=${id}"
errorstring="Duplicate stamping request, hash already exists in DB and been OTS requested"
returncode=1
else
@@ -125,7 +127,7 @@ request_ots_stamp() {
fi
else
trace "[request_ots_stamp] Stamping request sent successfully!"
- sql "UPDATE stamp SET requested=1 WHERE id=${id}"
+ sql "UPDATE stamp SET requested=true WHERE id=${id}"
errorstring=""
returncode=0
fi
@@ -174,12 +176,12 @@ serve_ots_backoffice() {
id=$(echo "${row}" | cut -d '|' -f5)
trace "[serve_ots_backoffice] id=${id}"
- if [ "${requested}" -ne "1" ]; then
+ if [ "${requested}" != "t" ]; then
# Re-request the unrequested calls to ots_stamp
request_ots_stamp "${hash}" ${id}
returncode=$?
else
- if [ "${upgraded}" -ne "1" ]; then
+ if [ "${upgraded}" != "t" ]; then
# Upgrade requested calls to ots_stamp that have not been called back yet
trace "[serve_ots_backoffice] curl -s ${OTSCLIENT_CONTAINER}/upgrade/${hash}"
result=$(curl -s ${OTSCLIENT_CONTAINER}/upgrade/${hash})
@@ -194,18 +196,18 @@ serve_ots_backoffice() {
# Error tag not null, so there's an error
trace "[serve_ots_backoffice] not upgraded!"
- upgraded=0
+ upgraded="f"
else
# No failure, upgraded
trace "[serve_ots_backoffice] just upgraded!"
- sql "UPDATE stamp SET upgraded=1 WHERE id=${id}"
+ sql "UPDATE stamp SET upgraded=true WHERE id=${id}"
trace_rc $?
- upgraded=1
+ upgraded="t"
fi
fi
fi
- if [ "${upgraded}" -eq "1" ]; then
+ if [ "${upgraded}" = "t" ]; then
trace "[serve_ots_backoffice] upgraded! Let's call the callback..."
url=$(echo "${row}" | cut -d '|' -f2)
trace "[serve_ots_backoffice] url=${url}"
@@ -221,13 +223,13 @@ serve_ots_backoffice() {
# Even if curl executed ok, we need to make sure the http return code is also ok
if [ "${returncode}" -eq "0" ]; then
- sql "UPDATE stamp SET calledback=1 WHERE id=${id}"
+ sql "UPDATE stamp SET calledback=true WHERE id=${id}"
trace_rc $?
fi
else
trace "[serve_ots_backoffice] url is empty, obviously won't try to call it!"
- sql "UPDATE stamp SET calledback=1 WHERE id=${id}"
+ sql "UPDATE stamp SET calledback=true WHERE id=${id}"
trace_rc $?
fi
fi
diff --git a/proxy_docker/app/script/requesthandler.sh b/proxy_docker/app/script/requesthandler.sh
index 21cac52..351668c 100644
--- a/proxy_docker/app/script/requesthandler.sh
+++ b/proxy_docker/app/script/requesthandler.sh
@@ -76,8 +76,10 @@ main() {
case "${cmd}" in
helloworld)
# GET http://192.168.111.152:8080/helloworld
- response_to_client "Hello, world!" 0
- break
+ response='{"hello":"world"}'
+ returncode=0
+ # response_to_client "Hello, world!" 0
+ # break
;;
installation_info)
# GET http://192.168.111.152:8080/info
@@ -86,8 +88,7 @@ main() {
else
response='{ "error": "missing installation data" }'
fi
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
watch)
# POST http://192.168.111.152:8080/watch
@@ -96,8 +97,7 @@ main() {
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","confirmedCallbackURL":"192.168.111.233:1111/callback1conf","eventMessage":"eyJib3VuY2VfYWRkcmVzcyI6IjJNdkEzeHIzOHIxNXRRZWhGblBKMVhBdXJDUFR2ZTZOamNGIiwibmJfY29uZiI6MH0K","label":"myLabel"}
response=$(watchrequest "${line}")
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
unwatch)
# curl (GET) 192.168.111.152:8080/unwatch/2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp
@@ -122,16 +122,15 @@ main() {
# Let's make it work even for a GET request (equivalent to a POST with empty json object body)
if [ "$http_method" = "POST" ]; then
address=$(echo "${line}" | jq -r ".address")
- unconfirmedCallbackURL=$(echo "${line}" | jq ".unconfirmedCallbackURL")
- confirmedCallbackURL=$(echo "${line}" | jq ".confirmedCallbackURL")
+ unconfirmedCallbackURL=$(echo "${line}" | jq -r ".unconfirmedCallbackURL")
+ confirmedCallbackURL=$(echo "${line}" | jq -r ".confirmedCallbackURL")
watchid=$(echo "${line}" | jq ".id")
else
address=$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)
fi
response=$(unwatchrequest "${watchid}" "${address}" "${unconfirmedCallbackURL}" "${confirmedCallbackURL}")
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
watchxpub)
# POST http://192.168.111.152:8080/watchxpub
@@ -139,43 +138,37 @@ main() {
# curl -H "Content-Type: application/json" -d '{"label":"2219","pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/1/n","nstart":55,"unconfirmedCallbackURL":"192.168.111.233:1111/callback0conf","confirmedCallbackURL":"192.168.111.233:1111/callback1conf"}' proxy:8888/watchxpub
response=$(watchpub32request "${line}")
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
unwatchxpubbyxpub)
# GET http://192.168.111.152:8080/unwatchxpubbyxpub/tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk
response=$(unwatchpub32request "${line}")
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
unwatchxpubbylabel)
# GET http://192.168.111.152:8080/unwatchxpubbylabel/4421
response=$(unwatchpub32labelrequest "${line}")
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
getactivewatchesbyxpub)
# GET http://192.168.111.152:8080/getactivewatchesbyxpub/tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk
response=$(getactivewatchesbyxpub "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
getactivewatchesbylabel)
# GET http://192.168.111.152:8080/getactivewatchesbylabel/4421
response=$(getactivewatchesbylabel "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
getactivexpubwatches)
# GET http://192.168.111.152:8080/getactivexpubwatches
response=$(getactivexpubwatches)
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
watchtxid)
# POST http://192.168.111.152:8080/watchtxid
@@ -183,8 +176,7 @@ main() {
# curl -H "Content-Type: application/json" -d '{"txid":"b081ca7724386f549cf0c16f71db6affeb52ff7a0d9b606fb2e5c43faffd3387","confirmedCallbackURL":"192.168.111.233:1111/callback1conf","xconfCallbackURL":"192.168.111.233:1111/callbackXconf","nbxconf":6}' proxy:8888/watchtxid
response=$(watchtxidrequest "${line}")
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
unwatchtxid)
# POST http://192.168.111.152:8080/unwatchtxid
@@ -200,88 +192,76 @@ main() {
# - id: the id returned by watchtxid
local txid=$(echo "${line}" | jq -r ".txid")
- local unconfirmedCallbackURL=$(echo "${line}" | jq ".unconfirmedCallbackURL")
- local confirmedCallbackURL=$(echo "${line}" | jq ".confirmedCallbackURL")
+ local unconfirmedCallbackURL=$(echo "${line}" | jq -r ".unconfirmedCallbackURL")
+ local confirmedCallbackURL=$(echo "${line}" | jq -r ".confirmedCallbackURL")
local watchid=$(echo "${line}" | jq ".id")
response=$(unwatchtxidrequest "${watchid}" "${txid}" "${unconfirmedCallbackURL}" "${confirmedCallbackURL}")
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
getactivewatches)
# curl (GET) 192.168.111.152:8080/getactivewatches
response=$(getactivewatches)
- response_to_client "${response}" ${?}
- break
+ returncode=$?
;;
get_txns_by_watchlabel)
# curl (GET) 192.168.111.152:8080/get_txns_by_watchlabel/