diff --git a/cyphernodeconf_docker/help.json b/cyphernodeconf_docker/help.json index eeb89fe..9e41b6a 100644 --- a/cyphernodeconf_docker/help.json +++ b/cyphernodeconf_docker/help.json @@ -17,6 +17,9 @@ "gatekeeper_edit_apiproperties": "If you know what you are doing, it is possible to manually edit the API endpoints/groups authorization. (Not recommended)", "gatekeeper_apiproperties": "You are about to edit the api.properties file. The format of the file is pretty simple: for each action, you will find what access group can access it. Admin group can do what Spender group can, and Spender group can do what Watcher group can. Internal group is for the endpoints accessible only within the Docker network, like the backoffice tasks used by the Cron container. The access groups for each API id/key are found in the keys.properties file.", "gatekeeper_cns": "I use domain names and/or IP addresses to create valid TLS certificates. For example, if https://cyphernodehost/getbestblockhash and https://192.168.7.44/getbestblockhash will be used, enter cyphernodehost, 192.168.7.44 as a possible domains. 127.0.0.1, localhost, gatekeeper will be automatically added to your list. Make sure the provided domain names are in your DNS or client's hosts file and is reachable.", + "postgres_datapath": "The Cyphernode's Postgres files will be stored in a container's mounted directory. Please provide the local mounted path to that directory. If running on OSX, check mountable directories in Docker's File Sharing configs.", + "postgres_datapath_custom": "Provide the full path name where Postgres files will be saved.", + "postgres_password": "PostgreSQL cyphernode's password used by Cyphernode when calling the database.", "logs_datapath": "The Cyphernode's log files will be stored in a container's mounted directory. Please provide the local mounted path to that directory. If running on OSX, check mountable directories in Docker's File Sharing configs.", "logs_datapath_custom": "Provide the full path name where Cyphernodes log files will be saved.", "traefik_datapath": "The Traefik's files will be stored in a container's mounted directory. Please provide the local mounted path to that directory. If running on OSX, check mountable directories in Docker's File Sharing configs.", diff --git a/cyphernodeconf_docker/lib/app.js b/cyphernodeconf_docker/lib/app.js index 2e34f18..e9beae1 100644 --- a/cyphernodeconf_docker/lib/app.js +++ b/cyphernodeconf_docker/lib/app.js @@ -87,6 +87,7 @@ module.exports = class App { proxy_version: process.env.PROXY_VERSION, proxycron_version: process.env.PROXYCRON_VERSION, pycoin_version: process.env.PYCOIN_VERSION, + postgres_version: process.env.POSTGRES_VERSION, traefik_version: process.env.TRAEFIK_VERSION, mosquitto_version: process.env.MOSQUITTO_VERSION, otsclient_version: process.env.OTSCLIENT_VERSION, @@ -148,6 +149,7 @@ module.exports = class App { 'cyphernode/proxy': this.sessionData.proxy_version, 'cyphernode/proxycron': this.sessionData.proxycron_version, 'cyphernode/pycoin': this.sessionData.pycoin_version, + 'cyphernode/postgres': this.sessionData.postgres_version, 'cyphernode/otsclient': this.sessionData.otsclient_version, 'traefik': this.sessionData.traefik_version, 'cyphernode/clightning': this.sessionData.lightning_version, @@ -359,6 +361,7 @@ module.exports = class App { const pathProps = [ 'gatekeeper_datapath', + 'postgres_datapath', 'logs_datapath', 'traefik_datapath', 'tor_datapath', @@ -483,6 +486,13 @@ module.exports = class App { networks: ['cyphernodenet'], docker: 'cyphernode/pycoin:'+this.config.docker_versions['cyphernode/pycoin'] }, + { + name: 'Postgres', + label: 'postgres', + host: 'postgres', + networks: ['cyphernodenet'], + docker: 'postgres:'+this.config.docker_versions['cyphernode/postgres'] + }, { name: 'Notifier', label: 'notifier', diff --git a/cyphernodeconf_docker/lib/config.js b/cyphernodeconf_docker/lib/config.js index 73cfd28..1cd4ff6 100644 --- a/cyphernodeconf_docker/lib/config.js +++ b/cyphernodeconf_docker/lib/config.js @@ -12,10 +12,11 @@ const schemas = { '0.2.2': require('../schema/config-v0.2.2.json'), '0.2.3': require('../schema/config-v0.2.3.json'), '0.2.4': require('../schema/config-v0.2.4.json'), - '0.2.5': require('../schema/config-v0.2.5.json') + '0.2.5': require('../schema/config-v0.2.5.json'), + '0.2.6': require('../schema/config-v0.2.6.json') }; -const versionHistory = [ '0.1.0', '0.2.0', '0.2.2', '0.2.3', '0.2.4', '0.2.5' ]; +const versionHistory = [ '0.1.0', '0.2.0', '0.2.2', '0.2.3', '0.2.4', '0.2.5', '0.2.6' ]; const defaultSchemaVersion=versionHistory[0]; const latestSchemaVersion=versionHistory[versionHistory.length-1]; @@ -46,7 +47,8 @@ module.exports = class Config { '0.2.0->0.2.2': this.migrate_0_2_0_to_0_2_2, '0.2.2->0.2.3': this.migrate_0_2_2_to_0_2_3, '0.2.3->0.2.4': this.migrate_0_2_3_to_0_2_4, - '0.2.4->0.2.5': this.migrate_0_2_4_to_0_2_5 + '0.2.4->0.2.5': this.migrate_0_2_4_to_0_2_5, + '0.2.5->0.2.6': this.migrate_0_2_5_to_0_2_6 }; this.setData( { schema_version: latestSchemaVersion } ); @@ -247,4 +249,12 @@ module.exports = class Config { this.data.schema_version = '0.2.5'; } + async migrate_0_2_5_to_0_2_6() { + const currentVersion = this.data.schema_version; + if( currentVersion != '0.2.5' ) { + return; + } + this.data.schema_version = '0.2.6'; + } + }; diff --git a/cyphernodeconf_docker/prompters/200_postgres.js b/cyphernodeconf_docker/prompters/200_postgres.js new file mode 100644 index 0000000..9b9540b --- /dev/null +++ b/cyphernodeconf_docker/prompters/200_postgres.js @@ -0,0 +1,30 @@ +const chalk = require('chalk'); + +const name = 'postgres'; + +const capitalise = function( txt ) { + return txt.charAt(0).toUpperCase() + txt.substr(1); +}; + +const prefix = function() { + return chalk.green(capitalise(name)+': '); +}; + +module.exports = { + name: function() { + return name; + }, + prompts: function( utils ) { + return [ + { + type: 'password', + name: 'postgres_password', + default: utils.getDefault( 'postgres_password' ), + message: prefix()+'Password of Postgres cyphernode user?'+utils.getHelp('postgres_password'), + filter: utils.trimFilter, + }]; + }, + templates: function( props ) { + return ['pgpass']; + } +}; \ No newline at end of file diff --git a/cyphernodeconf_docker/prompters/999_installer.js b/cyphernodeconf_docker/prompters/999_installer.js index 67ba104..1301cff 100644 --- a/cyphernodeconf_docker/prompters/999_installer.js +++ b/cyphernodeconf_docker/prompters/999_installer.js @@ -30,6 +30,44 @@ module.exports = { value: "docker" }] }, + { + when: installerDocker, + type: 'list', + name: 'postgres_datapath', + default: utils.getDefault( 'postgres_datapath' ), + choices: [ + { + name: utils.setupDir()+"/cyphernode/postgres", + value: utils.setupDir()+"/cyphernode/postgres" + }, + { + name: utils.defaultDataDirBase()+"/cyphernode/postgres", + value: utils.defaultDataDirBase()+"/cyphernode/postgres" + }, + { + name: utils.defaultDataDirBase()+"/.cyphernode/postgres", + value: utils.defaultDataDirBase()+"/.cyphernode/postgres" + }, + { + name: utils.defaultDataDirBase()+"/postgres", + value: utils.defaultDataDirBase()+"/postgres" + }, + { + name: "Custom path", + value: "_custom" + } + ], + message: prefix()+'Where do you want to store your Postgres files?'+utils.getHelp('postgres_datapath'), + }, + { + when: (props)=>{ return installerDocker(props) && (props.postgres_datapath === '_custom') }, + type: 'input', + name: 'postgres_datapath_custom', + default: utils.getDefault( 'postgres_datapath_custom' ), + filter: utils.trimFilter, + validate: utils.pathValidator, + message: prefix()+'Custom path for Postgres files?'+utils.getHelp('postgres_datapath_custom'), + }, { when: installerDocker, type: 'list', diff --git a/cyphernodeconf_docker/schema/config-v0.2.6.json b/cyphernodeconf_docker/schema/config-v0.2.6.json new file mode 100644 index 0000000..3fdb34c --- /dev/null +++ b/cyphernodeconf_docker/schema/config-v0.2.6.json @@ -0,0 +1,726 @@ +{ + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://cyphernode.io/config-v0.2.6.json", + "type": "object", + "title": "Cyphernode config file structure v0.2.6", + "additionalProperties": false, + "required": [ + "schema_version", + "setup_version", + "features", + "net", + "use_xpub", + "installer_mode", + "run_as_different_user", + "docker_mode", + "docker_versions", + "adminhash", + "bitcoin_rpcuser", + "bitcoin_rpcpassword", + "bitcoin_prune", + "bitcoin_datapath", + "bitcoin_mode", + "bitcoin_expose", + "gatekeeper_expose", + "gatekeeper_keys", + "gatekeeper_sslcert", + "gatekeeper_sslkey", + "gatekeeper_cns", + "gatekeeper_clientkeyspassword", + "gatekeeper_datapath", + "gatekeeper_port", + "proxy_datapath", + "postgres_password", + "postgres_datapath", + "logs_datapath", + "traefik_datapath", + "traefik_http_port", + "traefik_https_port" + ], + "allOf": [ + { + "if": { + "properties": { + "run_as_different_user": { + "enum": [ + true + ] + } + } + }, + "then": { + "required": [ + "username" + ] + } + }, + { + "if": { + "properties": { + "use_xpub": { + "enum": [ + true + ] + } + } + }, + "then": { + "required": [ + "xpub", + "derivation_path" + ] + } + }, + { + "if": { + "properties": { + "bitcoin_prune": { + "enum": [ + true + ] + } + } + }, + "then": { + "required": [ + "bitcoin_prune_size" + ] + } + }, + { + "if": { + "properties": { + "features": { + "contains": { + "enum": [ + "tor" + ] + } + } + } + }, + "then": { + "required": [ + "tor_datapath", + "torifyables", + "clearnet" + ] + } + }, + { + "if": { + "properties": { + "features": { + "contains": { + "enum": [ + "lightning" + ] + } + } + } + }, + "then": { + "required": [ + "lightning_announce", + "lightning_expose", + "lightning_implementation", + "lightning_datapath", + "lightning_nodename", + "lightning_nodecolor" + ] + } + }, + { + "if": { + "properties": { + "features": { + "contains": { + "enum": [ + "otsclient" + ] + } + } + } + }, + "then": { + "required": [ + "otsclient_datapath" + ] + } + } + ], + "properties": { + "schema_version": { + "type": "string", + "enum": [ + "0.2.5" + ], + "default": "0.3.0", + "examples": [ + "0.2.5" + ] + }, + "setup_version": { + "type": "string", + "examples": [ + "v0.2.0" + ] + }, + "docker_versions": { + "$id": "#/properties/dockerVersions", + "type": "object", + "title": "All versions of the docker containers", + "default": {}, + "additionalProperties": { + "type": "string" + } + }, + "features": { + "$id": "#/properties/features", + "type": "array", + "title": "The optional features of this cyphernode", + "default": ["specter"], + "items": { + "$id": "#/properties/features/items", + "type": "string", + "enum": [ + "tor", + "lightning", + "otsclient", + "batcher", + "specter" + ], + "title": "The feature", + "default": "", + "examples": [ + "tor", + "lightning", + "otsclient", + "batcher", + "specter" + ] + } + }, + "torifyables": { + "$id": "#/properties/torifyables", + "type": "array", + "title": "The Torified features of this cyphernode", + "default": [], + "items": { + "$id": "#/properties/torifyables/items", + "type": "string", + "enum": [ + "tor_traefik", + "tor_bitcoin", + "tor_lightning", + "tor_otsoperations", + "tor_otswebhooks", + "tor_addrwatcheswebhooks", + "tor_txidwatcheswebhooks" + ], + "title": "The Torified feature", + "default": "", + "examples": [ + "tor_traefik", + "tor_bitcoin", + "tor_lightning", + "tor_otsoperations", + "tor_otswebhooks", + "tor_addrwatcheswebhooks", + "tor_txidwatcheswebhooks" + ] + } + }, + "clearnet": { + "$id": "#/properties/clearnet", + "type": "array", + "title": "The clearnet-allowed Torified features of this cyphernode", + "default": [], + "items": { + "$id": "#/properties/clearnet/items", + "type": "string", + "enum": [ + "clearnet_bitcoin", + "clearnet_lightning" + ], + "title": "The clearnet-allowed Torified feature", + "default": "", + "examples": [ + "clearnet_bitcoin", + "clearnet_lightning" + ] + } + }, + "net": { + "$id": "#/properties/net", + "type": "string", + "enum": [ + "testnet", + "mainnet", + "regtest" + ], + "title": "The net cyphernode is running on", + "default": "testnet", + "examples": [ + "testnet" + ] + }, + "use_xpub": { + "$id": "#/properties/use_xpub", + "type": "boolean", + "title": "Use xpub key?", + "default": false, + "examples": [ + false + ] + }, + "xpub": { + "$id": "#/properties/xpub", + "type": "string", + "title": "Default xpub to derive addresses from", + "pattern": "^(\\w+)$" + }, + "derivation_path": { + "$id": "#/properties/derivation_path", + "type": "string", + "title": "Default derivation path", + "default": "0/n", + "examples": [ + "0/n" + ] + }, + "installer_mode": { + "$id": "#/properties/installer_mode", + "type": "string", + "enum": [ + "docker" + ], + "title": "Install mode", + "default": "docker", + "examples": [ + "docker" + ] + }, + "run_as_different_user": { + "$id": "#/properties/run_as_different_user", + "type": "boolean", + "title": "Run as different user", + "default": true, + "examples": [ + true + ] + }, + "username": { + "$id": "#/properties/username", + "type": "string", + "title": "Username to run under", + "default": "cyphernode", + "examples": [ + "cyphernode" + ] + }, + "docker_mode": { + "$id": "#/properties/docker_mode", + "type": "string", + "enum": [ + "swarm", + "compose" + ], + "title": "How to run the containers", + "default": "swarm", + "examples": [ + "compose" + ] + }, + "bitcoin_rpcuser": { + "$id": "#/properties/bitcoin_rpcuser", + "type": "string", + "title": "Bitcoin rpc user", + "default": "bitcoin", + "examples": [ + "bitcoin" + ] + }, + "bitcoin_rpcpassword": { + "$id": "#/properties/bitcoin_rpcpassword", + "type": "string", + "title": "Bitcoin rpc password", + "default": "CHANGEME", + "examples": [ + "CHANGEME" + ] + }, + "bitcoin_uacomment": { + "$id": "#/properties/bitcoin_uacomment", + "type": "string", + "title": "Bitcoin user agent comment", + "examples": [ + "cyphernode" + ] + }, + "bitcoin_prune": { + "$id": "#/properties/bitcoin_prune", + "type": "boolean", + "title": "Bitcoin prune", + "default": false, + "examples": [ + "false" + ] + }, + "bitcoin_prune_size": { + "$id": "#/properties/bitcoin_prune_size", + "type": "integer", + "title": "Bitcoin prune size", + "default": 550, + "examples": [ + 550 + ] + }, + "bitcoin_datapath": { + "$id": "#/properties/bitcoin_datapath", + "type": "string", + "title": "Bitcoin datapath", + "examples": [ + "/tmp/cyphernode/bitcoin" + ] + }, + "bitcoin_datapath_custom": { + "$id": "#/properties/bitcoin_datapath_custom", + "type": "string", + "title": "Bitcoin custom datapath", + "examples": [ + "/tmp/cyphernode/bitcoin" + ] + }, + "lightning_datapath": { + "$id": "#/properties/lightning_datapath", + "type": "string", + "title": "Lightning datapath", + "examples": [ + "/tmp/cyphernode/lightning" + ] + }, + "lightning_datapath_custom": { + "$id": "#/properties/lightning_datapath_custom", + "type": "string", + "title": "Lightning custom datapath", + "examples": [ + "/tmp/cyphernode/lightning" + ] + }, + "proxy_datapath": { + "$id": "#/properties/proxy_datapath", + "type": "string", + "title": "Proxy datapath", + "examples": [ + "/tmp/cyphernode/proxy" + ] + }, + "proxy_datapath_custom": { + "$id": "#/properties/proxy_datapath_custom", + "type": "string", + "title": "Proxy custom datapath", + "examples": [ + "/tmp/cyphernode/proxy" + ] + }, + "otsclient_datapath": { + "$id": "#/properties/otsclient_datapath", + "type": "string", + "title": "OTS Client datapath", + "examples": [ + "/tmp/cyphernode/otsclient" + ] + }, + "otsclient_datapath_custom": { + "$id": "#/properties/otsclient_datapath_custom", + "type": "string", + "title": "OTS Client custom datapath", + "examples": [ + "/tmp/cyphernode/otsclient" + ] + }, + "traefik_http_port": { + "$id": "#/properties/traefik_port", + "type": "integer", + "title": "Traefik HTTP port", + "default": 80, + "examples": [ + 80 + ] + }, + "traefik_https_port": { + "$id": "#/properties/traefik_https_port", + "type": "integer", + "title": "Traefik HTTPS port", + "default": 443, + "examples": [ + 443 + ] + }, + "traefik_datapath": { + "$id": "#/properties/traefik_datapath", + "type": "string", + "title": "Traefik datapath", + "examples": [ + "/tmp/cyphernode/traefik" + ] + }, + "traefik_datapath_custom": { + "$id": "#/properties/traefik_datapath_custom", + "type": "string", + "title": "Traefik custom datapath", + "examples": [ + "/tmp/cyphernode/traefik" + ] + }, + "postgres_password": { + "$id": "#/properties/postgres_password", + "type": "string", + "title": "Postgres cyphernode's password", + "default": "CHANGEME", + "examples": [ + "CHANGEME" + ] + }, + "postgres_datapath": { + "$id": "#/properties/postgres_datapath", + "type": "string", + "title": "Postgres datapath", + "examples": [ + "/tmp/cyphernode/postgres" + ] + }, + "postgres_datapath_custom": { + "$id": "#/properties/postgres_datapath_custom", + "type": "string", + "title": "Postgres custom datapath", + "examples": [ + "/tmp/cyphernode/postgres" + ] + }, + "logs_datapath": { + "$id": "#/properties/logs_datapath", + "type": "string", + "title": "Logs datapath", + "examples": [ + "/tmp/cyphernode/logs" + ] + }, + "logs_datapath_custom": { + "$id": "#/properties/logs_datapath_custom", + "type": "string", + "title": "Logs custom datapath", + "examples": [ + "/tmp/cyphernode/logs" + ] + }, + "tor_datapath": { + "$id": "#/properties/tor_datapath", + "type": "string", + "title": "Tor datapath", + "examples": [ + "/tmp/cyphernode/tor" + ] + }, + "tor_datapath_custom": { + "$id": "#/properties/tor_datapath_custom", + "type": "string", + "title": "Tor custom datapath", + "examples": [ + "/tmp/cyphernode/tor" + ] + }, + "lightning_announce": { + "$id": "#/properties/lightning_announce", + "type": "boolean", + "title": "Announce lightning ip", + "default": false, + "examples": [ + false + ] + }, + "lightning_external_ip": { + "$id": "#/properties/lightning_external_ip", + "type": "string", + "format": "ipv4", + "title": "External lightning node ip", + "examples": [ + "123.123.123.123" + ] + }, + "bitcoin_mode": { + "$id": "#/properties/bitcoin_mode", + "type": "string", + "enum": [ + "internal" + ], + "title": "Bitcoin mode", + "default": "internal", + "examples": [ + "internal" + ] + }, + "bitcoin_expose": { + "$id": "#/properties/bitcoin_expose", + "type": "boolean", + "title": "Expose bitcoin node", + "default": false, + "examples": [ + true + ] + }, + "lightning_expose": { + "$id": "#/properties/lightning_expose", + "type": "boolean", + "title": "Expose lightning node", + "default": true, + "examples": [ + false + ] + }, + "gatekeeper_expose": { + "$id": "#/properties/gatekeeper_expose", + "type": "boolean", + "title": "Expose gatekeeper port", + "default": false, + "examples": [ + true + ] + }, + "gatekeeper_datapath": { + "$id": "#/properties/gatekeeper_datapath", + "type": "string", + "title": "Gatekeeper datapath", + "examples": [ + "/tmp/cyphernode/gatekeeper" + ] + }, + "gatekeeper_datapath_custom": { + "$id": "#/properties/gatekeeper_datapath_custom", + "type": "string", + "title": "Gatekeeper custom datapath", + "examples": [ + "/tmp/cyphernode/gatekeeper" + ] + }, + "gatekeeper_port": { + "$id": "#/properties/gatekeeper_port", + "type": "integer", + "title": "Gatekeeper port", + "default": 2009, + "examples": [ + 2009 + ] + }, + "gatekeeper_keys": { + "$id": "#/properties/gatekeeper_keys", + "type": "object", + "title": "Gatekeeper keys", + "default": { + "configEntries": [], + "clientInformation": [] + }, + "required": [ + "configEntries", + "clientInformation" + ], + "properties": { + "configEntries": { + "$id": "#/properties/gatekeeper_keys/configEntries", + "type": "array", + "items": { + "$id": "#/properties/gatekeeper_keys/configEntries/entry", + "type": "string", + "pattern": "^kapi_id=\".+\";kapi_key=\".+\";kapi_groups=\".+\";.+$" + }, + "examples": [ + [ + "kapi_id=\"000\";kapi_key=\"a27f9e73fdde6a5005879c259c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a\";kapi_groups=\"stats\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}", + "kapi_id=\"001\";kapi_key=\"a27f9e73fdde6a5005879c273c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a\";kapi_groups=\"stats,watcher\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}", + "kapi_id=\"002\";kapi_key=\"fe58ddbb66d7302a7087af3242a98b6326c51a257f5eab1c06bb8cc02e25890d\";kapi_groups=\"stats,watcher,spender\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}", + "kapi_id=\"003\";kapi_key=\"f0b8bb52f4c7007938757bcdfc73b452d6ce08cc0c660ce57c5464ae95f35417\";kapi_groups=\"stats,watcher,spender,admin\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}" + ] + ] + }, + "clientInformation": { + "$id": "#/properties/gatekeeper_keys/clientInformation", + "type": "array", + "items": { + "$id": "#/properties/gatekeeper_keys/clientInformation/entry", + "type": "string", + "pattern": "^.+=.+$" + }, + "examples": [ + [ + "000=a27f9e73fdde6a5005879c259c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a", + "001=a27f9e73fdde6a5005879c273c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a", + "002=fe58ddbb66d7302a7087af3242a98b6326c51a257f5eab1c06bb8cc02e25890d", + "003=f0b8bb52f4c7007938757bcdfc73b452d6ce08cc0c660ce57c5464ae95f35417" + ] + ] + } + } + }, + "gatekeeper_sslcert": { + "$id": "#/properties/gatekeeper_sslcert", + "type": "string", + "title": "Gatekeeper SSL Cert" + }, + "gatekeeper_sslkey": { + "$id": "#/properties/gatekeeper_sslkey", + "type": "string", + "title": "Gatekeeper SSL Key" + }, + "gatekeeper_cns": { + "$id": "#/properties/gatekeeper_cns", + "type": "string", + "title": "Gatekeeper cns", + "examples": [ + "myhost.mydomain.com,*.myotherdomain.com,123.123.123.123" + ] + }, + "gatekeeper_clientkeyspassword": { + "$id": "#/properties/gatekeeper_clientkeyspassword", + "type": "string", + "title": "Password for the encrypted client keys archive" + }, + "adminhash": { + "$id": "#/properties/adminhash", + "type": "string", + "title": "Bcrypted hash of admin password" + }, + "lightning_implementation": { + "$id": "#/properties/lightning_implementation", + "type": "string", + "enum": [ + "c-lightning" + ], + "title": "The lightning implementation", + "default": "c-lightning", + "examples": [ + "c-lightning" + ] + }, + "lightning_nodename": { + "$id": "#/properties/lightning_nodename", + "type": "string", + "title": "The lightning node name", + "examples": [ + "🚀 Mighty Moose 🚀" + ] + }, + "lightning_nodecolor": { + "$id": "#/properties/lightning_nodecolor", + "type": "string", + "pattern": "^[0-9A-Fa-f]{6}$", + "title": "The lightning node color", + "examples": [ + "ff0000", + "00ff00", + "00ffff" + ] + } + } +} diff --git a/cyphernodeconf_docker/templates/installer/config.sh b/cyphernodeconf_docker/templates/installer/config.sh index f54fcd5..0936b43 100644 --- a/cyphernodeconf_docker/templates/installer/config.sh +++ b/cyphernodeconf_docker/templates/installer/config.sh @@ -8,6 +8,7 @@ LIGHTNING_IMPLEMENTATION=<%= lightning_implementation %> PROXY_DATAPATH=<%= proxy_datapath %> GATEKEEPER_DATAPATH=<%= gatekeeper_datapath %> GATEKEEPER_PORT=<%= gatekeeper_port %> +POSTGRES_DATAPATH=<%= postgres_datapath %> LOGS_DATAPATH=<%= logs_datapath %> TRAEFIK_DATAPATH=<%= traefik_datapath %> FEATURE_TOR=<%= (features.indexOf('tor') != -1)?'true':'false' %> diff --git a/cyphernodeconf_docker/templates/installer/docker/docker-compose.yaml b/cyphernodeconf_docker/templates/installer/docker/docker-compose.yaml index b53e149..ce178d1 100644 --- a/cyphernodeconf_docker/templates/installer/docker/docker-compose.yaml +++ b/cyphernodeconf_docker/templates/installer/docker/docker-compose.yaml @@ -2,6 +2,36 @@ version: "3" services: + ########################## + # POSTGRESQL # + ########################## + + postgres: + image: postgres:<%= postgres_version %> + environment: + - "POSTGRES_USER=cyphernode" + - "POSTGRES_PASSWORD=<%= postgres_password %>" + - "POSTGRES_DB=cyphernode" + - "PGDATA=/var/lib/postgresql/data/pgdata" + volumes: + - "<%= postgres_datapath %>:/var/lib/postgresql/data" + networks: + - cyphernodenet + <% if ( docker_mode === 'swarm' ) { %> + deploy: + replicas: 1 + placement: + constraints: + - node.labels.io.cyphernode == true + restart_policy: + condition: "any" + delay: 1s + update_config: + parallelism: 1 + <% } else { %> + restart: always + <% } %> + <% if ( features.indexOf('tor') !== -1 ) { %> ########################## # TOR # @@ -156,6 +186,7 @@ services: - "OTSCLIENT_CONTAINER=otsclient:6666" - "OTS_FILES=/proxy/otsfiles" - "XPUB_DERIVATION_GAP=100" + - "PGPASSFILE=/proxy/db/pgpass" <% if ( devmode ) { %> ports: - "8888:8888" @@ -174,6 +205,8 @@ services: <% } %> networks: - cyphernodenet + depends_on: + - postgres <% if ( docker_mode === 'swarm' ) { %> deploy: replicas: 1 diff --git a/cyphernodeconf_docker/templates/installer/testfeatures.sh b/cyphernodeconf_docker/templates/installer/testfeatures.sh index 3d113d7..c97447e 100644 --- a/cyphernodeconf_docker/templates/installer/testfeatures.sh +++ b/cyphernodeconf_docker/templates/installer/testfeatures.sh @@ -80,6 +80,19 @@ checkpycoin() { return 0 } +checkpostgres() { + echo -en "\r\n\e[1;36mTesting Postgres... " > /dev/console + local rc + + # getbatcher needs the database to return correctly... + rc=$(curl -s -o /dev/null -w "%{http_code}" http://proxy:8888/getbatcher) + [ "${rc}" -ne "200" ] && return 105 + + echo -e "\e[1;36mPostgres rocks!" > /dev/console + + return 0 +} + checkbroker() { echo -en "\r\n\e[1;36mTesting Broker... " > /dev/console local rc @@ -170,12 +183,12 @@ checkservice() { while : do outcome=0 - for container in gatekeeper proxy proxycron broker notifier pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do + for container in gatekeeper proxy proxycron broker notifier pycoin postgres <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do echo -e " \e[0;32mVerifying \e[0;33m${container}\e[0;32m..." > /dev/console (ping -c 10 ${container} 2> /dev/null | grep "0% packet loss" > /dev/null) & eval ${container}=$! done - for container in gatekeeper proxy proxycron broker notifier pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do + for container in gatekeeper proxy proxycron broker notifier pycoin postgres <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do eval wait '$'${container} ; returncode=$? ; outcome=$((${outcome} + ${returncode})) eval c_${container}=${returncode} done @@ -193,12 +206,13 @@ checkservice() { # { "name": "proxy", "active":true }, # { "name": "proxycron", "active":true }, # { "name": "pycoin", "active":true }, + # { "name": "postgres", "active":true }, # { "name": "otsclient", "active":true }, # { "name": "tor", "active":true }, # { "name": "bitcoin", "active":true }, # { "name": "lightning", "active":true }, # ] - for container in gatekeeper proxy proxycron broker notifier pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do + for container in gatekeeper proxy proxycron broker notifier pycoin postgres <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do [ -n "${result}" ] && result="${result}," result="${result}{\"name\":\"${container}\",\"active\":" eval "returncode=\$c_${container}" @@ -254,6 +268,7 @@ feature_status() { # { "name": "proxy", "active":true }, # { "name": "proxycron", "active":true }, # { "name": "pycoin", "active":true }, +# { "name": "postgres", "active":true }, # { "name": "otsclient", "active":true }, # { "name": "tor", "active":true }, # { "name": "bitcoin", "active":true }, @@ -262,6 +277,7 @@ feature_status() { # "features": [ # { "name": "gatekeeper", "working":true }, # { "name": "pycoin", "working":true }, +# { "name": "postgres", "working":true }, # { "name": "otsclient", "working":true }, # { "name": "tor", "working":true }, # { "name": "bitcoin", "working":true }, @@ -296,6 +312,7 @@ fi # "features": [ # { "name": "gatekeeper", "working":true }, # { "name": "pycoin", "working":true }, +# { "name": "postgres", "working":true }, # { "name": "otsclient", "working":true }, # { "name": "tor", "working":true }, # { "name": "bitcoin", "working":true }, @@ -362,6 +379,21 @@ fi finalreturncode=$((${returncode} | ${finalreturncode})) result="${result}$(feature_status ${returncode} 'Pycoin error!')}" +############################# +# POSTGRES # +############################# + +result="${result},{\"coreFeature\":true, \"name\":\"postgres\",\"working\":" +status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"postgres\") | .active") +if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then + timeout_feature checkpostgres + returncode=$? +else + returncode=1 +fi +finalreturncode=$((${returncode} | ${finalreturncode})) +result="${result}$(feature_status ${returncode} 'Postgres error!')}" + <% if (features.indexOf('otsclient') != -1) { %> ############################# # OTSCLIENT # diff --git a/cyphernodeconf_docker/templates/postgres/pgpass b/cyphernodeconf_docker/templates/postgres/pgpass new file mode 100644 index 0000000..9d20cfe --- /dev/null +++ b/cyphernodeconf_docker/templates/postgres/pgpass @@ -0,0 +1 @@ +postgres:5432:cyphernode:cyphernode:<%= postgres_password %> \ No newline at end of file diff --git a/dist/setup.sh b/dist/setup.sh index f1d336d..4d9e34c 100755 --- a/dist/setup.sh +++ b/dist/setup.sh @@ -110,7 +110,7 @@ sudo_if_required() { } modify_permissions() { - local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") + local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") for d in "${directories[@]}" do if [[ -e $d ]]; then @@ -122,7 +122,7 @@ modify_permissions() { } modify_owner() { - local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") + local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER) for d in "${directories[@]}" do @@ -193,6 +193,7 @@ configure() { -e PROXYCRON_VERSION=$PROXYCRON_VERSION \ -e OTSCLIENT_VERSION=$OTSCLIENT_VERSION \ -e PYCOIN_VERSION=$PYCOIN_VERSION \ + -e POSTGRES_VERSION=$POSTGRES_VERSION \ -e BITCOIN_VERSION=$BITCOIN_VERSION \ -e LIGHTNING_VERSION=$LIGHTNING_VERSION \ -e CONF_VERSION=$CONF_VERSION \ @@ -385,9 +386,16 @@ install_docker() { copy_file $cyphernodeconf_filepath/traefik/htpasswd $GATEKEEPER_DATAPATH/htpasswd 1 $SUDO_REQUIRED + if [ ! -d $POSTGRES_DATAPATH ]; then + step " create $POSTGRES_DATAPATH" + sudo_if_required mkdir -p $POSTGRES_DATAPATH + next + fi + + if [ ! -d $LOGS_DATAPATH ]; then - step " create $LOGS_DATAPATH" - sudo_if_required mkdir -p $LOGS_DATAPATH + step " create $POSTGRES_DATAPATH" + sudo_if_required mkdir -p $POSTGRES_DATAPATH next fi @@ -461,6 +469,8 @@ install_docker() { copy_file $cyphernodeconf_filepath/installer/config.sh $PROXY_DATAPATH/config.sh 1 $SUDO_REQUIRED copy_file $cyphernodeconf_filepath/cyphernode/info.json $PROXY_DATAPATH/info.json 1 $SUDO_REQUIRED + copy_file $cyphernodeconf_filepath/postgres/pgpass $PROXY_DATAPATH/pgpass 1 $SUDO_REQUIRED + sudo_if_required chmod 0600 $PROXY_DATAPATH/pgpass if [[ $BITCOIN_INTERNAL == true ]]; then if [ ! -d $BITCOIN_DATAPATH ]; then @@ -652,7 +662,7 @@ install_docker() { check_directory_owner() { # if one directory does not have access rights for $RUN_AS_USER, we echo 1, else we echo 0 - local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") + local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local status=0 for d in "${directories[@]}" do @@ -756,7 +766,7 @@ sanity_checks_pre_install() { if [[ $sudo_reason == 'directories' ]]; then echo " or check your data volumes if they have the right owner." echo " The owner of the following folders should be '$RUN_AS_USER':" - local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") + local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local status=0 for d in "${directories[@]}" do @@ -859,6 +869,7 @@ BITCOIN_VERSION="v0.21.1" LIGHTNING_VERSION="v0.10.1" TRAEFIK_VERSION="v1.7.9-alpine" MOSQUITTO_VERSION="1.6-openssl" +POSTGRES_VERSION="14.0-alpine" SETUP_DIR=$(dirname $(realpath $0)) diff --git a/doc/openapi/v0/cyphernode-api.yaml b/doc/openapi/v0/cyphernode-api.yaml index b794adb..5af8c10 100644 --- a/doc/openapi/v0/cyphernode-api.yaml +++ b/doc/openapi/v0/cyphernode-api.yaml @@ -224,8 +224,6 @@ paths: - "pub32" - "path" - "nstart" - - "unconfirmedCallbackURL" - - "confirmedCallbackURL" properties: label: description: "Label for that xpub. Can be used, instead for xpub, for future references in xpub-related endpoints." diff --git a/proxy_docker/Dockerfile b/proxy_docker/Dockerfile index 967b4ee..21d460b 100644 --- a/proxy_docker/Dockerfile +++ b/proxy_docker/Dockerfile @@ -8,7 +8,8 @@ RUN apk add --update --no-cache \ curl \ su-exec \ py3-pip \ - xxd + xxd \ + postgresql WORKDIR ${HOME} diff --git a/proxy_docker/app/data/cyphernode.postgresql b/proxy_docker/app/data/cyphernode.postgresql new file mode 100644 index 0000000..b69f300 --- /dev/null +++ b/proxy_docker/app/data/cyphernode.postgresql @@ -0,0 +1,164 @@ +BEGIN; + +CREATE TABLE watching_by_pub32 ( + id SERIAL PRIMARY KEY, + pub32 VARCHAR UNIQUE, + label VARCHAR UNIQUE, + derivation_path VARCHAR, + callback0conf VARCHAR, + callback1conf VARCHAR, + last_imported_n INTEGER, + watching BOOLEAN DEFAULT FALSE, + inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE watching ( + id SERIAL PRIMARY KEY, + address VARCHAR, + label VARCHAR, + watching BOOLEAN DEFAULT FALSE, + callback0conf VARCHAR, + calledback0conf BOOLEAN DEFAULT FALSE, + callback1conf VARCHAR, + calledback1conf BOOLEAN DEFAULT FALSE, + imported BOOLEAN DEFAULT FALSE, + watching_by_pub32_id INTEGER REFERENCES watching_by_pub32, + pub32_index INTEGER, + event_message VARCHAR, + inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); +CREATE INDEX idx_watching_address ON watching (address); +CREATE UNIQUE INDEX idx_watching_01 ON watching (address, COALESCE(callback0conf, ''), COALESCE(callback1conf, '')); +CREATE INDEX idx_watching_label ON watching (label); +CREATE INDEX idx_watching_watching ON watching (watching); +CREATE INDEX idx_watching_imported ON watching (imported); +CREATE INDEX idx_watching_watching_by_pub32_id ON watching (watching_by_pub32_id); + +CREATE TABLE tx ( + id SERIAL PRIMARY KEY, + txid VARCHAR UNIQUE, + hash VARCHAR UNIQUE, + confirmations INTEGER DEFAULT 0, + timereceived INTEGER, + fee REAL, + size INTEGER, + vsize INTEGER, + is_replaceable BOOLEAN, + blockhash VARCHAR, + blockheight INTEGER, + blocktime INTEGER, + conf_target INTEGER, + inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); +CREATE INDEX idx_tx_timereceived ON tx (timereceived); +CREATE INDEX idx_tx_fee ON tx (fee); +CREATE INDEX idx_tx_size ON tx (size); +CREATE INDEX idx_tx_vsize ON tx (vsize); +CREATE INDEX idx_tx_blockhash ON tx (blockhash); +CREATE INDEX idx_tx_blockheight ON tx (blockheight); +CREATE INDEX idx_tx_blocktime ON tx (blocktime); +CREATE INDEX idx_tx_confirmations ON tx (confirmations); + +CREATE TABLE watching_tx ( + watching_id INTEGER REFERENCES watching, + tx_id INTEGER REFERENCES tx, + vout INTEGER, + amount REAL +); +CREATE UNIQUE INDEX idx_watching_tx ON watching_tx (watching_id, tx_id); +CREATE INDEX idx_watching_tx_watching_id ON watching_tx (watching_id); +CREATE INDEX idx_watching_tx_tx_id ON watching_tx (tx_id); + +CREATE TABLE batcher ( + id SERIAL PRIMARY KEY, + label VARCHAR UNIQUE, + conf_target INTEGER, + feerate REAL, + inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); +INSERT INTO batcher (id, label, conf_target, feerate) VALUES (1, 'default', 6, NULL); + +CREATE TABLE recipient ( + id SERIAL PRIMARY KEY, + address VARCHAR, + amount REAL, + tx_id INTEGER REFERENCES tx, + inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + webhook_url VARCHAR, + calledback BOOLEAN DEFAULT FALSE, + calledback_ts TIMESTAMP, + batcher_id INTEGER REFERENCES batcher, + label VARCHAR +); +CREATE INDEX idx_recipient_address ON recipient (address); +CREATE INDEX idx_recipient_label ON recipient (label); +CREATE INDEX idx_recipient_calledback ON recipient (calledback); +CREATE INDEX idx_recipient_webhook_url ON recipient (webhook_url); +CREATE INDEX idx_recipient_tx_id ON recipient (tx_id); +CREATE INDEX idx_recipient_batcher_id ON recipient (batcher_id); + +CREATE TABLE watching_by_txid ( + id SERIAL PRIMARY KEY, + txid VARCHAR, + watching BOOLEAN DEFAULT FALSE, + callback1conf VARCHAR, + calledback1conf BOOLEAN DEFAULT FALSE, + callbackxconf VARCHAR, + calledbackxconf BOOLEAN DEFAULT FALSE, + nbxconf INTEGER, + inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); +CREATE INDEX idx_watching_by_txid_txid ON watching_by_txid (txid); +CREATE UNIQUE INDEX idx_watching_by_txid_1x ON watching_by_txid (txid, callback1conf, callbackxconf); +CREATE INDEX idx_watching_by_txid_watching ON watching_by_txid (watching); +CREATE INDEX idx_watching_by_txid_callback1conf ON watching_by_txid (callback1conf); +CREATE INDEX idx_watching_by_txid_calledback1conf ON watching_by_txid (calledback1conf); +CREATE INDEX idx_watching_by_txid_callbackxconf ON watching_by_txid (callbackxconf); +CREATE INDEX idx_watching_by_txid_calledbackxconf ON watching_by_txid (calledbackxconf); + +CREATE TABLE stamp ( + id SERIAL PRIMARY KEY, + hash VARCHAR UNIQUE, + callbackUrl VARCHAR, + requested BOOLEAN DEFAULT FALSE, + upgraded BOOLEAN DEFAULT FALSE, + calledback BOOLEAN DEFAULT FALSE, + inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); +CREATE INDEX idx_stamp_calledback ON stamp (calledback); + +CREATE TABLE cyphernode_props ( + id SERIAL PRIMARY KEY, + property VARCHAR, + value VARCHAR, + inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); +CREATE INDEX idx_cp_property ON cyphernode_props (property); +CREATE UNIQUE INDEX idx_cp_propval ON cyphernode_props (property, value); + +INSERT INTO cyphernode_props (property, value) VALUES ('version', '0.1'); +INSERT INTO cyphernode_props (property, value) VALUES ('pay_index', '0'); + +CREATE TABLE ln_invoice ( + id SERIAL PRIMARY KEY, + label VARCHAR UNIQUE, + bolt11 VARCHAR UNIQUE, + payment_hash VARCHAR, + msatoshi INTEGER, + status VARCHAR, + pay_index INTEGER, + msatoshi_received INTEGER, + paid_at INTEGER, + description VARCHAR, + expires_at INTEGER, + callback_url VARCHAR, + calledback BOOLEAN DEFAULT FALSE, + callback_failed BOOLEAN DEFAULT FALSE, + inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); +CREATE INDEX idx_lninvoice_label ON ln_invoice (label); +CREATE INDEX idx_lninvoice_bolt11 ON ln_invoice (bolt11); +CREATE INDEX idx_lninvoice_calledback ON ln_invoice (calledback); +CREATE INDEX idx_lninvoice_callback_failed ON ln_invoice (callback_failed); + +COMMIT; diff --git a/proxy_docker/app/data/rawtx.sql b/proxy_docker/app/data/rawtx.sql deleted file mode 100644 index 1028845..0000000 --- a/proxy_docker/app/data/rawtx.sql +++ /dev/null @@ -1,27 +0,0 @@ -PRAGMA foreign_keys = ON; - -CREATE TABLE rawtx ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - txid TEXT UNIQUE, - hash TEXT UNIQUE, - confirmations INTEGER DEFAULT 0, - timereceived INTEGER, - fee REAL, - size INTEGER, - vsize INTEGER, - is_replaceable INTEGER, - blockhash TEXT, - blockheight INTEGER, - blocktime INTEGER, - conf_target INTEGER, - raw_tx TEXT, - inserted_ts INTEGER DEFAULT CURRENT_TIMESTAMP -); -CREATE INDEX idx_rawtx_timereceived ON rawtx (timereceived); -CREATE INDEX idx_rawtx_fee ON rawtx (fee); -CREATE INDEX idx_rawtx_size ON rawtx (size); -CREATE INDEX idx_rawtx_vsize ON rawtx (vsize); -CREATE INDEX idx_rawtx_blockhash ON rawtx (blockhash); -CREATE INDEX idx_rawtx_blockheight ON rawtx (blockheight); -CREATE INDEX idx_rawtx_blocktime ON rawtx (blocktime); -CREATE INDEX idx_rawtx_confirmations ON rawtx (confirmations); diff --git a/proxy_docker/app/data/sqlmigrate20210928_0.7.0-0.8.0.sh b/proxy_docker/app/data/sqlmigrate20210928_0.7.0-0.8.0.sh index 1b62004..65fb860 100644 --- a/proxy_docker/app/data/sqlmigrate20210928_0.7.0-0.8.0.sh +++ b/proxy_docker/app/data/sqlmigrate20210928_0.7.0-0.8.0.sh @@ -1,17 +1,14 @@ #!/bin/sh -echo "Checking for rawtx database support in DB..." -if [ ! -e ${DB_FILE}_rawtx ]; then - # rawtx database not found - echo "Migrating database for rawtx database support..." +echo "Checking for new indexes in DB..." +sqlite3 $DB_FILE ".indexes" | grep "idx_watching_watching" > /dev/null +if [ "$?" -eq "1" ]; then + # idx_watching_watching index not found + echo "Migrating database with new indexes..." echo "Backing up current DB..." cp $DB_FILE $DB_FILE-sqlmigrate20210928_0.7.0-0.8.0 echo "Altering DB..." cat sqlmigrate20210928_0.7.0-0.8.0.sql | sqlite3 $DB_FILE - echo "Creating new DB..." - cat rawtx.sql | sqlite3 ${DB_FILE}_rawtx - echo "Inserting table in new DB..." - sqlite3 -cmd ".timeout 25000" ${DB_FILE} "ATTACH DATABASE \"${DB_FILE}_rawtx\" AS other; INSERT INTO other.rawtx SELECT * FROM tx; DETACH other;" else - echo "rawtx database support migration already done, skipping!" + echo "New indexes migration already done, skipping!" fi diff --git a/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0.sh b/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0.sh new file mode 100644 index 0000000..0b4e251 --- /dev/null +++ b/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0.sh @@ -0,0 +1,36 @@ +#!/bin/sh + +echo "Waiting for postgres to be ready..." +(while true ; do psql -h postgres -U cyphernode -c "select 1;" ; [ "$?" -eq "0" ] && break ; sleep 10; done) & +wait + +echo "Checking if postgres is setup..." +psql -h postgres -U cyphernode -c "\d" | grep "cyphernode_props" > /dev/null +if [ "$?" -eq "1" ]; then + # if cyphernode_props table doesn't exist, it's probably because database hasn't been setup yet + echo "Creating postgres database..." + psql -h postgres -f cyphernode.postgresql -U cyphernode + + echo "Extracting and converting sqlite3 data..." + cat sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extract.sql | sqlite3 $DB_FILE + sed -ie 's/^\(INSERT.*\);$/\1 ON CONFLICT DO NOTHING;/g' sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql + + echo "...appending postgresql sequences..." + echo " + select setval('cyphernode_props_id_seq', (SELECT MAX(id) FROM cyphernode_props)); + select setval('ln_invoice_id_seq', (SELECT MAX(id) FROM ln_invoice)); + select setval('recipient_id_seq', (SELECT MAX(id) FROM recipient)); + select setval('stamp_id_seq', (SELECT MAX(id) FROM stamp)); + select setval('tx_id_seq', (SELECT MAX(id) FROM tx)); + select setval('watching_by_pub32_id_seq', (SELECT MAX(id) FROM watching_by_pub32)); + select setval('watching_by_txid_id_seq', (SELECT MAX(id) FROM watching_by_txid)); + select setval('watching_id_seq', (SELECT MAX(id) FROM watching)); + select setval('batcher_id_seq', (SELECT MAX(id) FROM batcher)); + commit; + " >> sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql + + echo "Importing sqlite3 data into postgresql..." + psql -h postgres -f sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql -U cyphernode +else + echo "New indexes migration already done, skipping!" +fi diff --git a/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extract.sql b/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extract.sql new file mode 100644 index 0000000..61a8265 --- /dev/null +++ b/proxy_docker/app/data/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extract.sql @@ -0,0 +1,24 @@ +.output sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql +select "BEGIN;"; +.headers on +.mode insert watching_by_pub32 +select id,pub32,label,derivation_path,callback0conf,callback1conf,last_imported_n,case when watching=1 then 'TRUE' else 'FALSE' end as watching,inserted_ts from watching_by_pub32; +.mode insert watching +select id,address,label,case when watching=1 then 'TRUE' else 'FALSE' end as watching,callback0conf,case when calledback0conf=1 then 'TRUE' else 'FALSE' end as calledback0conf,callback1conf,case when calledback1conf=1 then 'TRUE' else 'FALSE' end as calledback1conf,case when imported=1 then 'TRUE' else 'FALSE' end as imported,watching_by_pub32_id,pub32_index,event_message,inserted_ts from watching; +.mode insert tx +select id,txid,hash,confirmations,timereceived,fee,size,vsize,case when is_replaceable=1 then 'TRUE' else 'FALSE' end as is_replaceable,blockhash,blockheight,blocktime,conf_target,inserted_ts from tx; +.mode insert watching_tx +select * from watching_tx; +.mode insert batcher +select * from batcher; +.mode insert recipient +select id,address,amount,tx_id,inserted_ts,webhook_url,case when calledback=1 then 'TRUE' else 'FALSE' end as calledback,calledback_ts,batcher_id,label from recipient; +.mode insert watching_by_txid +select id,txid,case when watching=1 then 'TRUE' else 'FALSE' end as watching,callback1conf,case when calledback1conf=1 then 'TRUE' else 'FALSE' end as calledback1conf,callbackxconf,case when calledbackxconf=1 then 'TRUE' else 'FALSE' end as calledbackxconf,nbxconf,inserted_ts from watching_by_txid; +.mode insert stamp +select id,hash,callbackUrl,case when requested=1 then 'TRUE' else 'FALSE' end as requested,case when upgraded=1 then 'TRUE' else 'FALSE' end as upgraded,case when calledback=1 then 'TRUE' else 'FALSE' end as calledback,inserted_ts from stamp; +.mode insert cyphernode_props +select * from cyphernode_props; +.mode insert ln_invoice +select id,label,bolt11,payment_hash,msatoshi,status,pay_index,msatoshi_received,paid_at,description,expires_at,callback_url,case when calledback=1 then 'TRUE' else 'FALSE' end as calledback,case when callback_failed=1 then 'TRUE' else 'FALSE' end as callback_failed,inserted_ts from ln_invoice; +.quit diff --git a/proxy_docker/app/script/batching.sh b/proxy_docker/app/script/batching.sh index 427d9a4..5fbc88e 100644 --- a/proxy_docker/app/script/batching.sh +++ b/proxy_docker/app/script/batching.sh @@ -22,7 +22,8 @@ createbatcher() { local request=${1} local response - local label=$(echo "${request}" | jq ".batcherLabel") + local returncode + local label=$(echo "${request}" | jq -r ".batcherLabel") trace "[createbatcher] label=${label}" local conf_target=$(echo "${request}" | jq ".confTarget") trace "[createbatcher] conf_target=${conf_target}" @@ -37,13 +38,18 @@ createbatcher() { local batcher_id - batcher_id=$(sql "INSERT OR IGNORE INTO batcher (label, conf_target, feerate) VALUES (${label}, ${conf_target}, ${feerate}); SELECT LAST_INSERT_ROWID();") + batcher_id=$(sql "INSERT INTO batcher (label, conf_target, feerate)"\ +" VALUES ('${label}', ${conf_target}, ${feerate})"\ +" RETURNING id" \ + "SELECT id FROM batcher WHERE label='${label}'") + returncode=$? + trace_rc ${returncode} - if ("${batcher_id}" -eq "0"); then + if ("${returncode}" -ne "0"); then trace "[createbatcher] Could not insert" response='{"result":null,"error":{"code":-32700,"message":"Could not create batcher, label probably already exists","data":'${request}'}}' else - trace "[createbatcher] Inserted" + trace "[createbatcher] Inserted, response=${batcher_id}" response='{"result":{"batcherId":'${batcher_id}'},"error":null}' fi @@ -79,7 +85,7 @@ updatebatcher() { local id=$(echo "${request}" | jq ".batcherId") trace "[updatebatcher] id=${id}" - local label=$(echo "${request}" | jq ".batcherLabel") + local label=$(echo "${request}" | jq -r ".batcherLabel") trace "[updatebatcher] label=${label}" local conf_target=$(echo "${request}" | jq ".confTarget") trace "[updatebatcher] conf_target=${conf_target}" @@ -99,12 +105,12 @@ updatebatcher() { # fi if [ "${id}" = "null" ]; then - whereclause="label=${label}" + whereclause="label='${label}'" else whereclause="id = ${id}" fi - sql "UPDATE batcher set label=${label}, conf_target=${conf_target}, feerate=${feerate} WHERE ${whereclause}" + sql "UPDATE batcher set label='${label}', conf_target=${conf_target}, feerate=${feerate} WHERE ${whereclause}" returncode=$? trace_rc ${returncode} if [ "${returncode}" -ne 0 ]; then @@ -151,13 +157,13 @@ addtobatch() { trace "[addtobatch] address=${address}" local amount=$(echo "${request}" | jq ".amount") trace "[addtobatch] amount=${amount}" - local label=$(echo "${request}" | jq ".outputLabel") + local label=$(echo "${request}" | jq -r ".outputLabel") trace "[addtobatch] label=${label}" local batcher_id=$(echo "${request}" | jq ".batcherId") trace "[addtobatch] batcher_id=${batcher_id}" - local batcher_label=$(echo "${request}" | jq ".batcherLabel") + local batcher_label=$(echo "${request}" | jq -r ".batcherLabel") trace "[addtobatch] batcher_label=${batcher_label}" - local webhook_url=$(echo "${request}" | jq ".webhookUrl") + local webhook_url=$(echo "${request}" | jq -r ".webhookUrl") trace "[addtobatch] webhook_url=${webhook_url}" # Let's lowercase bech32 addresses @@ -185,7 +191,7 @@ addtobatch() { if [ "${batcher_id}" = "null" ]; then # Using batcher_label - batcher_id=$(sql "SELECT id FROM batcher WHERE label=${batcher_label}") + batcher_id=$(sql "SELECT id FROM batcher WHERE label='${batcher_label}'") returncode=$? trace_rc ${returncode} fi @@ -195,7 +201,7 @@ addtobatch() { response='{"result":null,"error":{"code":-32700,"message":"batcher not found","data":'${request}'}}' else # Check if address already pending for this batcher... - inserted_id=$(sql "SELECT id FROM recipient WHERE LOWER(address)=LOWER(\"${address}\") AND tx_id IS NULL AND batcher_id=${batcher_id}") + inserted_id=$(sql "SELECT id FROM recipient WHERE LOWER(address)=LOWER('${address}') AND tx_id IS NULL AND batcher_id=${batcher_id}") returncode=$? trace_rc ${returncode} @@ -211,7 +217,9 @@ addtobatch() { fi # Insert the new destination - inserted_id=$(sql "INSERT INTO recipient (address, amount, webhook_url, batcher_id, label) VALUES (\"${address}\", ${amount}, ${webhook_url}, ${batcher_id}, ${label}); SELECT LAST_INSERT_ROWID();") + inserted_id=$(sql "INSERT INTO recipient (address, amount, webhook_url, batcher_id, label)"\ +" VALUES ('${address}', ${amount}, '${webhook_url}', ${batcher_id}, '${label}')"\ +" RETURNING id") returncode=$? trace_rc ${returncode} @@ -280,7 +288,7 @@ removefrombatch() { if [ "${returncode}" -ne 0 ]; then response='{"result":null,"error":{"code":-32700,"message":"Output was not removed","data":'${request}'}}' else - row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), 0), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}") + row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), DATE '0001-01-01'), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}") returncode=$? trace_rc ${returncode} @@ -336,7 +344,7 @@ batchspend() { local batcher_id=$(echo "${request}" | jq ".batcherId") trace "[batchspend] batcher_id=${batcher_id}" - local batcher_label=$(echo "${request}" | jq ".batcherLabel") + local batcher_label=$(echo "${request}" | jq -r ".batcherLabel") trace "[batchspend] batcher_label=${batcher_label}" local conf_target=$(echo "${request}" | jq ".confTarget") trace "[batchspend] conf_target=${conf_target}" @@ -351,7 +359,7 @@ batchspend() { if [ "${batcher_id}" = "null" ]; then # Using batcher_label - whereclause="label=${batcher_label}" + whereclause="label='${batcher_label}'" else whereclause="id=${batcher_id}" fi @@ -423,11 +431,11 @@ batchspend() { trace "[batchspend] webhook_url=${webhook_url}" if [ -z "${recipientsjson}" ]; then - whereclause="\"${recipient_id}\"" + whereclause="${recipient_id}" recipientsjson="\"${address}\":${amount}" webhooks_data="{\"outputId\":${recipient_id},\"address\":\"${address}\",\"amount\":${amount},\"webhookUrl\":\"${webhook_url}\"}" else - whereclause="${whereclause},\"${recipient_id}\"" + whereclause="${whereclause},${recipient_id}" recipientsjson="${recipientsjson},\"${address}\":${amount}" webhooks_data="${webhooks_data},{\"outputId\":${recipient_id},\"address\":\"${address}\",\"amount\":${amount},\"webhookUrl\":\"${webhook_url}\"}" fi @@ -452,7 +460,7 @@ batchspend() { tx_raw_details=$(get_rawtransaction ${txid} | tr -d '\n') # Amounts and fees are negative when spending so we absolute those fields - local tx_hash=$(echo "${tx_raw_details}" | jq '.result.hash') + local tx_hash=$(echo "${tx_raw_details}" | jq -r '.result.hash') local tx_ts_firstseen=$(echo "${tx_details}" | jq '.result.timereceived') local tx_amount=$(echo "${tx_details}" | jq '.result.amount | fabs' | awk '{ printf "%.8f", $0 }') local tx_size=$(echo "${tx_raw_details}" | jq '.result.size') @@ -462,25 +470,20 @@ batchspend() { tx_replaceable=$([ "${tx_replaceable}" = "yes" ] && echo "true" || echo "false") trace "[batchspend] tx_replaceable=${tx_replaceable}" local fees=$(echo "${tx_details}" | jq '.result.fee | fabs' | awk '{ printf "%.8f", $0 }') - # Sometimes raw tx are too long to be passed as paramater, so let's write - # it to a temp file for it to be read by sqlite3 and then delete the file - echo "${tx_raw_details}" > batchspend-rawtx-${txid}-$$.blob # Get the info on the batch before setting it to done - row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), 0), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}") + row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), DATE '0001-01-01'), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}") returncode=$? trace_rc ${returncode} # Let's insert the txid in our little DB -- then we'll already have it when receiving confirmation - sql_rawtx "INSERT OR IGNORE INTO rawtx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target, raw_tx) VALUES (\"${txid}\", ${tx_hash}, 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target}, readfile('batchspend-rawtx-${txid}-$$.blob'))" - trace_rc $? - id_inserted=$(sql "INSERT OR IGNORE INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target) VALUES (\"${txid}\", ${tx_hash}, 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target}); SELECT LAST_INSERT_ROWID();") + id_inserted=$(sql "INSERT INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target)"\ +" VALUES ('${txid}', '${tx_hash}', 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target})"\ +" RETURNING id" \ +"SELECT id FROM tx WHERE txid='${txid}'") returncode=$? trace_rc ${returncode} if [ "${returncode}" -eq 0 ]; then - if [ "${id_inserted}" -eq 0 ]; then - id_inserted=$(sql "SELECT id FROM tx WHERE txid=\"${txid}\"") - fi trace "[batchspend] id_inserted: ${id_inserted}" sql "UPDATE recipient SET tx_id=${id_inserted} WHERE id IN (${whereclause})" trace_rc $? @@ -495,13 +498,10 @@ batchspend() { trace "[batchspend] total=${total}" response='{"result":{"batcherId":'${batcher_id}',"confTarget":'${conf_target}',"nbOutputs":'${count}',"oldest":"'${oldest}'","total":'${total} - response="${response},\"status\":\"accepted\",\"txid\":\"${txid}\",\"hash\":${tx_hash},\"details\":{\"firstseen\":${tx_ts_firstseen},\"size\":${tx_size},\"vsize\":${tx_vsize},\"replaceable\":${tx_replaceable},\"fee\":${fees}},\"outputs\":[${webhooks_data}]}" + response="${response},\"status\":\"accepted\",\"txid\":\"${txid}\",\"hash\":\"${tx_hash}\",\"details\":{\"firstseen\":${tx_ts_firstseen},\"size\":${tx_size},\"vsize\":${tx_vsize},\"replaceable\":${tx_replaceable},\"fee\":${fees}},\"outputs\":[${webhooks_data}]}" response="${response},\"error\":null}" - # Delete the temp file containing the raw tx (see above) - rm batchspend-rawtx-${txid}-$$.blob - - batch_webhooks "[${webhooks_data}]" '"batcherId":'${batcher_id}',"confTarget":'${conf_target}',"nbOutputs":'${count}',"oldest":"'${oldest}'","total":'${total}',"status":"accepted","txid":"'${txid}'","hash":'${tx_hash}',"details":{"firstseen":'${tx_ts_firstseen}',"size":'${tx_size}',"vsize":'${tx_vsize}',"replaceable":'${tx_replaceable}',"fee":'${fees}'}' + batch_webhooks "[${webhooks_data}]" '"batcherId":'${batcher_id}',"confTarget":'${conf_target}',"nbOutputs":'${count}',"oldest":"'${oldest}'","total":'${total}',"status":"accepted","txid":"'${txid}'","hash":"'${tx_hash}'","details":{"firstseen":'${tx_ts_firstseen}',"size":'${tx_size}',"vsize":'${tx_vsize}',"replaceable":'${tx_replaceable}',"fee":'${fees}'}' else local message=$(echo "${data}" | jq -e ".error.message") @@ -578,7 +578,7 @@ batch_check_webhooks() { # I know this query for each output is not very efficient, but this function should not execute often, only in case of # failed callbacks on batches... # Get the info on the batch - row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), 0), COALESCE(SUM(amount), 0.00000000) FROM recipient r WHERE tx_id=\"${tx_id}\"") + row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), DATE '0001-01-01'), COALESCE(SUM(amount), 0.00000000) FROM recipient r WHERE tx_id='${tx_id}'") # Use the selected row above count=$(echo "${row}" | cut -d '|' -f1) @@ -654,7 +654,7 @@ batch_webhooks() { fi done - sql "UPDATE recipient SET calledback=1, calledback_ts=CURRENT_TIMESTAMP WHERE id IN (${successful_recipient_ids})" + sql "UPDATE recipient SET calledback=true, calledback_ts=CURRENT_TIMESTAMP WHERE id IN (${successful_recipient_ids})" trace_rc $? } @@ -671,7 +671,7 @@ listbatchers() { # "error":null} - local batchers=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), 0) || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL GROUP BY b.id") + local batchers=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), DATE '0001-01-01') || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL GROUP BY b.id ORDER BY b.id") trace "[listbatchers] batchers=${batchers}" local returncode @@ -717,7 +717,7 @@ getbatcher() { local batcher_id=$(echo "${request}" | jq ".batcherId") trace "[getbatcher] batcher_id=${batcher_id}" - local batcher_label=$(echo "${request}" | jq ".batcherLabel") + local batcher_label=$(echo "${request}" | jq -r ".batcherLabel") trace "[getbatcher] batcher_label=${batcher_label}" if [ "${batcher_id}" = "null" ] && [ "${batcher_label}" = "null" ]; then @@ -728,13 +728,13 @@ getbatcher() { if [ "${batcher_id}" = "null" ]; then # Using batcher_label - whereclause="b.label=${batcher_label}" + whereclause="b.label='${batcher_label}'" else # Using batcher_id whereclause="b.id=${batcher_id}" fi - batcher=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), 0) || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL WHERE ${whereclause} GROUP BY b.id") + batcher=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), DATE '0001-01-01') || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL WHERE ${whereclause} GROUP BY b.id") trace "[getbatcher] batcher=${batcher}" if [ -n "${batcher}" ]; then @@ -797,9 +797,9 @@ getbatchdetails() { local batcher_id=$(echo "${request}" | jq ".batcherId") trace "[getbatchdetails] batcher_id=${batcher_id}" - local batcher_label=$(echo "${request}" | jq ".batcherLabel") + local batcher_label=$(echo "${request}" | jq -r ".batcherLabel") trace "[getbatchdetails] batcher_label=${batcher_label}" - local txid=$(echo "${request}" | jq ".txid") + local txid=$(echo "${request}" | jq -r ".txid") trace "[getbatchdetails] txid=${txid}" if [ "${batcher_id}" = "null" ] && [ "${batcher_label}" = "null" ]; then @@ -810,7 +810,7 @@ getbatchdetails() { if [ "${batcher_id}" = "null" ]; then # Using batcher_label - whereclause="b.label=${batcher_label}" + whereclause="b.label='${batcher_label}'" else # Using batcher_id whereclause="b.id=${batcher_id}" @@ -818,7 +818,7 @@ getbatchdetails() { if [ "${txid}" != "null" ]; then # Using txid - whereclause="${whereclause} AND t.txid=${txid}" + whereclause="${whereclause} AND t.txid='${txid}'" else # null txid whereclause="${whereclause} AND t.txid IS NULL" @@ -826,7 +826,7 @@ getbatchdetails() { fi # First get the batch summary - batch=$(sql "SELECT b.id, COALESCE(t.id, NULL), '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || b.conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), 0) || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id ${outerclause} LEFT JOIN tx t ON t.id=r.tx_id WHERE ${whereclause} GROUP BY b.id") + batch=$(sql "SELECT b.id, COALESCE(t.id, NULL), '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || b.conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), DATE '0001-01-01') || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id ${outerclause} LEFT JOIN tx t ON t.id=r.tx_id WHERE ${whereclause} GROUP BY b.id, t.id") trace "[getbatchdetails] batch=${batch}" if [ -n "${batch}" ]; then @@ -839,7 +839,7 @@ getbatchdetails() { # Using txid outerclause="AND r.tx_id=${tx_id}" - tx=$(sql "SELECT '\"txid\":\"' || txid || '\",\"hash\":\"' || hash || '\",\"details\":{\"firstseen\":' || timereceived || ',\"size\":' || size || ',\"vsize\":' || vsize || ',\"replaceable\":' || CASE is_replaceable WHEN 1 THEN 'true' ELSE 'false' END || ',\"fee\":' || fee || '}' FROM tx WHERE id=${tx_id}") + tx=$(sql "SELECT '\"txid\":\"' || txid || '\",\"hash\":\"' || hash || '\",\"details\":{\"firstseen\":' || timereceived || ',\"size\":' || size || ',\"vsize\":' || vsize || ',\"replaceable\":' || CASE is_replaceable WHEN true THEN 'true' ELSE 'false' END || ',\"fee\":' || fee || '}' FROM tx WHERE id=${tx_id}") else # null txid outerclause="AND r.tx_id IS NULL" diff --git a/proxy_docker/app/script/bitcoin.sh b/proxy_docker/app/script/bitcoin.sh index e851eef..ec47ea2 100644 --- a/proxy_docker/app/script/bitcoin.sh +++ b/proxy_docker/app/script/bitcoin.sh @@ -78,6 +78,14 @@ convert_pub32() { local checksum local pub32_dest + case "${pub32_from}" in + ${to_type}*) + trace "[convert_pub32] Already in the right format, exiting" + echo "${pub32_from}" + return + ;; + esac + case "${to_type}" in tpub) versionbytes="043587cf" diff --git a/proxy_docker/app/script/call_lightningd.sh b/proxy_docker/app/script/call_lightningd.sh index ecfbdb1..45793d1 100644 --- a/proxy_docker/app/script/call_lightningd.sh +++ b/proxy_docker/app/script/call_lightningd.sh @@ -8,8 +8,8 @@ ln_call_lightningd() { local response local returncode - trace "[ln_call_lightningd] ./lightning-cli $@" - response=$(./lightning-cli $@) + trace "[ln_call_lightningd] ./lightning-cli $(printf " \"%s\"" "$@")" + response=$(./lightning-cli "$@") returncode=$? trace_rc ${returncode} @@ -39,7 +39,7 @@ ln_create_invoice() { if [ "${callback_url}" != "null" ]; then # If not null, let's add double-quotes so we don't need to add the double-quotes in the sql insert, # so if it's null, it will insert the actual sql NULL value. - callback_url="\"${callback_url}\"" + callback_url="'${callback_url}'" fi #/proxy $ ./lightning-cli invoice 10000 "t1" "t1d" 60 @@ -71,36 +71,33 @@ ln_create_invoice() { # Let's get the connect string if provided in configuration local connectstring=$(get_connection_string) - if [ "${msatoshi}" = "null" ]; then - sql "INSERT OR IGNORE INTO ln_invoice (label, bolt11, callback_url, payment_hash, expires_at, description, status) VALUES (\"${label}\", \"${bolt11}\", ${callback_url}, \"${payment_hash}\", ${expires_at}, \"${description}\", \"unpaid\")" - else - sql "INSERT OR IGNORE INTO ln_invoice (label, bolt11, callback_url, payment_hash, expires_at, msatoshi, description, status) VALUES (\"${label}\", \"${bolt11}\", ${callback_url}, \"${payment_hash}\", ${expires_at}, ${msatoshi}, \"${description}\", \"unpaid\")" - fi - trace_rc $? - id=$(sql "SELECT id FROM ln_invoice WHERE bolt11=\"${bolt11}\"") + id=$(sql "INSERT INTO ln_invoice (label, bolt11, callback_url, payment_hash, expires_at, msatoshi, description, status)"\ +" VALUES ('${label}','${bolt11}', ${callback_url},'${payment_hash}', ${expires_at}, ${msatoshi}, '${description}', 'unpaid')"\ +" RETURNING id" \ + "SELECT id FROM ln_invoice WHERE bolt11='${bolt11}'") trace_rc $? # { - # "id":"", + # "id":123, # "label":"", # "bolt11":"", # "connectstring":"", # "callbackUrl":"", # "payment_hash":"", - # "msatoshi":, + # "msatoshi":123456, # "status":"unpaid", # "description":"", - # "expires_at": + # "expires_at":21312312 # } - data="{\"id\":\"${id}\"," + data="{\"id\":${id}," data="${data}\"label\":\"${label}\"," data="${data}\"bolt11\":\"${bolt11}\"," if [ -n "${connectstring}" ]; then data="${data}\"connectstring\":\"${connectstring}\"," fi if [ "${callback_url}" != "null" ]; then - data="${data}\"callbackUrl\":${callback_url}," + data="${data}\"callbackUrl\":\"${callback_url}\"," fi data="${data}\"payment_hash\":\"${payment_hash}\"," if [ "${msatoshi}" != "null" ]; then diff --git a/proxy_docker/app/script/callbacks_job.sh b/proxy_docker/app/script/callbacks_job.sh index 39337db..ad19096 100644 --- a/proxy_docker/app/script/callbacks_job.sh +++ b/proxy_docker/app/script/callbacks_job.sh @@ -10,8 +10,17 @@ do_callbacks() { trace "Entering do_callbacks()..." + # If called because we received a confirmation for a specific txid, let's only + # process that txid-related callbacks... + local txid=${1} + local txid_where + if [ -n "${txid}" ]; then + trace "[do_callbacks] txid=${txid}" + txid_where=" AND txid='${txid}'" + fi + # Let's fetch all the watching addresses still being watched but not called back - local callbacks=$(sql 'SELECT DISTINCT w.callback0conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable, pub32_index, pub32, w32.label, derivation_path, event_message, hash FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx ON tx.id = tx_id LEFT JOIN watching_by_pub32 w32 ON watching_by_pub32_id = w32.id WHERE NOT calledback0conf AND watching_id NOT NULL AND w.callback0conf NOT NULL AND w.watching') + local callbacks=$(sql "SELECT DISTINCT w.callback0conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable, pub32_index, pub32, w32.label, derivation_path, event_message, hash FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx ON tx.id = tx_id LEFT JOIN watching_by_pub32 w32 ON w.watching_by_pub32_id = w32.id WHERE NOT calledback0conf AND watching_id IS NOT NULL AND w.callback0conf IS NOT NULL AND w.watching${txid_where}") trace "[do_callbacks] callbacks0conf=${callbacks}" local returncode @@ -25,12 +34,12 @@ do_callbacks() { trace_rc ${returncode} if [ "${returncode}" -eq 0 ]; then address=$(echo "${row}" | cut -d '|' -f2) - sql "UPDATE watching SET calledback0conf=1 WHERE address=\"${address}\"" + sql "UPDATE watching SET calledback0conf=true WHERE address='${address}'" trace_rc $? fi done - callbacks=$(sql 'SELECT DISTINCT w.callback1conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable, pub32_index, pub32, w32.label, derivation_path, event_message, hash FROM watching w, watching_tx wt, tx t LEFT JOIN watching_by_pub32 w32 ON watching_by_pub32_id = w32.id WHERE w.id = watching_id AND tx_id = t.id AND NOT calledback1conf AND confirmations>0 AND w.callback1conf NOT NULL AND w.watching') + callbacks=$(sql "SELECT DISTINCT w.callback1conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable, pub32_index, pub32, w32.label, derivation_path, event_message, hash FROM watching w JOIN watching_tx wt ON w.id = wt.watching_id JOIN tx t ON wt.tx_id = t.id LEFT JOIN watching_by_pub32 w32 ON watching_by_pub32_id = w32.id WHERE NOT calledback1conf AND confirmations>0 AND w.callback1conf IS NOT NULL AND w.watching${txid_where}") trace "[do_callbacks] callbacks1conf=${callbacks}" for row in ${callbacks} @@ -39,19 +48,25 @@ do_callbacks() { returncode=$? if [ "${returncode}" -eq 0 ]; then address=$(echo "${row}" | cut -d '|' -f2) - sql "UPDATE watching SET calledback1conf=1, watching=0 WHERE address=\"${address}\"" + sql "UPDATE watching SET calledback1conf=true, watching=false WHERE address='${address}'" trace_rc $? fi done - callbacks=$(sql "SELECT id, label, bolt11, callback_url, payment_hash, msatoshi, status, pay_index, msatoshi_received, paid_at, description, expires_at FROM ln_invoice WHERE NOT calledback AND callback_failed") - trace "[do_callbacks] ln_callbacks=${callbacks}" + if [ -z "${txid}" ]; then + trace "[do_callbacks] Processing LN callbacks..." - for row in ${callbacks} - do - ln_manage_callback ${row} - trace_rc $? - done + callbacks=$(sql "SELECT id, label, bolt11, callback_url, payment_hash, msatoshi, status, pay_index, msatoshi_received, paid_at, description, expires_at FROM ln_invoice WHERE NOT calledback AND callback_failed") + trace "[do_callbacks] ln_callbacks=${callbacks}" + + for row in ${callbacks} + do + ln_manage_callback ${row} + trace_rc $? + done + else + trace "[do_callbacks] called for a specific txid, skipping LN callbacks" + fi ) 200>./.callbacks.lock } @@ -70,7 +85,7 @@ ln_manage_callback() { if [ -z "${callback_url}" ]; then # No callback url provided for that invoice trace "[ln_manage_callback] No callback url provided for that invoice" - sql "UPDATE ln_invoice SET calledback=1 WHERE id=\"${id}\"" + sql "UPDATE ln_invoice SET calledback=true WHERE id=${id}" trace_rc $? return fi @@ -112,7 +127,7 @@ ln_manage_callback() { # "expires_at": # } - data="{\"id\":\"${id}\"," + data="{\"id\":${id}," data="${data}\"label\":\"${label}\"," data="${data}\"bolt11\":\"${bolt11}\"," data="${data}\"callback_url\":\"${callback_url}\"," @@ -132,11 +147,11 @@ ln_manage_callback() { returncode=$? trace_rc ${returncode} if [ "${returncode}" -eq 0 ]; then - sql "UPDATE ln_invoice SET calledback=1 WHERE id=\"${id}\"" + sql "UPDATE ln_invoice SET calledback=true WHERE id=${id}" trace_rc $? else trace "[ln_manage_callback] callback failed: ${callback_url}" - sql "UPDATE ln_invoice SET callback_failed=1 WHERE id=\"${id}\"" + sql "UPDATE ln_invoice SET callback_failed=true WHERE id=${id}" trace_rc $? fi @@ -212,7 +227,7 @@ build_callback() { vsize=$(echo "${row}" | cut -d '|' -f10) trace "[build_callback] vsize=${vsize}" is_replaceable=$(echo "${row}" | cut -d '|' -f15) - is_replaceable=$([ "${is_replaceable}" -eq "1" ] && echo "true" || echo "false") + is_replaceable=$([ "${is_replaceable}" = "t" ] && echo "true" || echo "false") trace "[build_callback] is_replaceable=${is_replaceable}" blockhash=$(echo "${row}" | cut -d '|' -f11) trace "[build_callback] blockhash=${blockhash}" @@ -234,7 +249,7 @@ build_callback() { event_message=$(echo "${row}" | cut -d '|' -f20) trace "[build_callback] event_message=${event_message}" - data="{\"id\":\"${id}\"," + data="{\"id\":${id}," data="${data}\"address\":\"${address}\"," data="${data}\"txid\":\"${txid}\"," data="${data}\"hash\":\"${hash}\"," diff --git a/proxy_docker/app/script/callbacks_txid.sh b/proxy_docker/app/script/callbacks_txid.sh index 7ef7823..f442fd3 100644 --- a/proxy_docker/app/script/callbacks_txid.sh +++ b/proxy_docker/app/script/callbacks_txid.sh @@ -9,8 +9,10 @@ do_callbacks_txid() { trace "Entering do_callbacks_txid()..." + # Let's check the 1-conf (newly mined) watched txid that are included in the new block... + # Let's fetch all the watching txid still being watched but not called back - local callbacks=$(sql 'SELECT id, txid, callback1conf, 1 FROM watching_by_txid WHERE watching AND callback1conf NOT NULL AND NOT calledback1conf') + local callbacks=$(sql "SELECT id, txid, callback1conf, 1 FROM watching_by_txid WHERE watching AND callback1conf IS NOT NULL AND NOT calledback1conf") trace "[do_callbacks_txid] callbacks1conf=${callbacks}" local returncode @@ -25,14 +27,16 @@ do_callbacks_txid() { trace_rc ${returncode} if [ "${returncode}" -eq "0" ]; then id=$(echo "${row}" | cut -d '|' -f1) - sql "UPDATE watching_by_txid SET calledback1conf=1 WHERE id=\"${id}\"" + sql "UPDATE watching_by_txid SET calledback1conf=true WHERE id=${id}" trace_rc $? else trace "[do_callbacks_txid] callback returncode has error, we don't flag as calledback yet." fi done - local callbacks=$(sql 'SELECT id, txid, callbackxconf, nbxconf FROM watching_by_txid WHERE watching AND calledback1conf AND callbackxconf NOT NULL AND NOT calledbackxconf') + # For the n-conf, let's only check the watched txids that are already at least 1-conf... + + local callbacks=$(sql "SELECT id, txid, callbackxconf, nbxconf FROM watching_by_txid WHERE watching AND calledback1conf AND callbackxconf IS NOT NULL AND NOT calledbackxconf") trace "[do_callbacks_txid] callbacksxconf=${callbacks}" for row in ${callbacks} @@ -42,7 +46,7 @@ do_callbacks_txid() { trace_rc ${returncode} if [ "${returncode}" -eq "0" ]; then id=$(echo "${row}" | cut -d '|' -f1) - sql "UPDATE watching_by_txid SET calledbackxconf=1, watching=0 WHERE id=\"${id}\"" + sql "UPDATE watching_by_txid SET calledbackxconf=true, watching=false WHERE id=${id}" trace_rc $? else trace "[do_callbacks_txid] callback returncode has error, we don't flag as calledback yet." diff --git a/proxy_docker/app/script/computefees.sh b/proxy_docker/app/script/computefees.sh index 04d2dee..57a3484 100644 --- a/proxy_docker/app/script/computefees.sh +++ b/proxy_docker/app/script/computefees.sh @@ -64,16 +64,10 @@ compute_vin_total_amount() for vin_txid_vout in ${vin_txids_vout} do vin_txid=$(echo "${vin_txid_vout}" | tr -d '"' | cut -d '-' -f1) - # Check if we already have the tx in our DB - vin_raw_tx=$(sql_rawtx "SELECT raw_tx FROM rawtx WHERE txid=\"${vin_txid}\"") - trace_rc $? - if [ -z "${vin_raw_tx}" ]; then - txid_already_inserted=false - vin_raw_tx=$(get_rawtransaction "${vin_txid}" | tr -d '\n') - returncode=$? - if [ "${returncode}" -ne 0 ]; then - return ${returncode} - fi + vin_raw_tx=$(get_rawtransaction "${vin_txid}" | tr -d '\n') + returncode=$? + if [ "${returncode}" -ne 0 ]; then + return ${returncode} fi vout=$(echo "${vin_txid_vout}" | tr -d '"' | cut -d '-' -f2) trace "[compute_vin_total_amount] vout=${vout}" @@ -81,27 +75,23 @@ compute_vin_total_amount() trace "[compute_vin_total_amount] vin_vout_amount=${vin_vout_amount}" vin_total_amount=$(awk "BEGIN { printf(\"%.8f\", ${vin_total_amount}+${vin_vout_amount}); exit}") trace "[compute_vin_total_amount] vin_total_amount=${vin_total_amount}" - vin_hash=$(echo "${vin_raw_tx}" | jq ".result.hash") + vin_hash=$(echo "${vin_raw_tx}" | jq -r ".result.hash") vin_confirmations=$(echo "${vin_raw_tx}" | jq ".result.confirmations") vin_timereceived=$(echo "${vin_raw_tx}" | jq ".result.time") vin_size=$(echo "${vin_raw_tx}" | jq ".result.size") vin_vsize=$(echo "${vin_raw_tx}" | jq ".result.vsize") - vin_blockhash=$(echo "${vin_raw_tx}" | jq ".result.blockhash") + vin_blockhash=$(echo "${vin_raw_tx}" | jq -r ".result.blockhash") vin_blockheight=$(echo "${vin_raw_tx}" | jq ".result.blockheight") vin_blocktime=$(echo "${vin_raw_tx}" | jq ".result.blocktime") # Let's insert the vin tx in the DB just in case it would be useful - if ! ${txid_already_inserted}; then - # Sometimes raw tx are too long to be passed as paramater, so let's write - # it to a temp file for it to be read by sqlite3 and then delete the file - echo "${vin_raw_tx}" > vin-rawtx-${vin_txid}-$$.blob - sql "INSERT OR IGNORE INTO tx (txid, hash, confirmations, timereceived, size, vsize, blockhash, blockheight, blocktime) VALUES (\"${vin_txid}\", ${vin_hash}, ${vin_confirmations}, ${vin_timereceived}, ${vin_size}, ${vin_vsize}, ${vin_blockhash}, ${vin_blockheight}, ${vin_blocktime})" - trace_rc $? - sql_rawtx "INSERT OR IGNORE INTO rawtx (txid, hash, confirmations, timereceived, size, vsize, blockhash, blockheight, blocktime, raw_tx) VALUES (\"${vin_txid}\", ${vin_hash}, ${vin_confirmations}, ${vin_timereceived}, ${vin_size}, ${vin_vsize}, ${vin_blockhash}, ${vin_blockheight}, ${vin_blocktime}, readfile('vin-rawtx-${vin_txid}-$$.blob'))" - trace_rc $? - rm vin-rawtx-${vin_txid}-$$.blob - txid_already_inserted=true - fi + sql "INSERT INTO tx (txid, hash, confirmations, timereceived, size, vsize, blockhash, blockheight, blocktime)"\ +" VALUES ('${vin_txid}', '${vin_hash}', ${vin_confirmations}, ${vin_timereceived}, ${vin_size}, ${vin_vsize}, '${vin_blockhash}', ${vin_blockheight}, ${vin_blocktime})"\ +" ON CONFLICT (txid) DO"\ +" UPDATE SET blockhash='${vin_blockhash}', blockheight=${vin_blockheight}, blocktime=${vin_blocktime}, confirmations=${vin_confirmations}"\ +" RETURNING id" \ + "SELECT id FROM tx WHERE txid='${vin_txid}'" + trace_rc $? done echo "${vin_total_amount}" diff --git a/proxy_docker/app/script/confirmation.sh b/proxy_docker/app/script/confirmation.sh index 4dd81d0..534e84d 100644 --- a/proxy_docker/app/script/confirmation.sh +++ b/proxy_docker/app/script/confirmation.sh @@ -44,7 +44,7 @@ confirmation() { # First of all, let's make sure we're working on watched addresses... local address local addresseswhere - local addresses=$(echo "${tx_details}" | jq ".result.details[].address") + local addresses=$(echo "${tx_details}" | jq -r ".result.details[].address") local notfirst=false local IFS=$'\n' @@ -53,9 +53,9 @@ confirmation() { trace "[confirmation] address=${address}" if ${notfirst}; then - addresseswhere="${addresseswhere},${address}" + addresseswhere="${addresseswhere},'${address}'" else - addresseswhere="${address}" + addresseswhere="'${address}'" notfirst=true fi done @@ -66,11 +66,11 @@ confirmation() { fi ######################################################################################################## - local tx=$(sql "SELECT id FROM tx WHERE txid=\"${txid}\"") + local tx=$(sql "SELECT id FROM tx WHERE txid='${txid}'") local id_inserted local tx_raw_details=$(get_rawtransaction ${txid} | tr -d '\n') local tx_nb_conf=$(echo "${tx_details}" | jq -r '.result.confirmations // 0') - local tx_hash=$(echo "${tx_raw_details}" | jq '.result.hash') + local tx_hash=$(echo "${tx_raw_details}" | jq -r '.result.hash') # Sometimes raw tx are too long to be passed as paramater, so let's write # it to a temp file for it to be read by sqlite3 and then delete the file @@ -100,45 +100,33 @@ confirmation() { local tx_blocktime=null if [ "${tx_nb_conf}" -gt "0" ]; then trace "[confirmation] tx_nb_conf=${tx_nb_conf}" - tx_blockhash=$(echo "${tx_details}" | jq '.result.blockhash') - tx_blockheight=$(get_block_info $(echo ${tx_blockhash} | tr -d '"') | jq '.result.height') + tx_blockhash="$(echo "${tx_details}" | jq -r '.result.blockhash')" + tx_blockheight=$(get_block_info ${tx_blockhash} | jq '.result.height') + tx_blockhash="'${tx_blockhash}'" tx_blocktime=$(echo "${tx_details}" | jq '.result.blocktime') fi - sql "INSERT OR IGNORE INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, blockhash, blockheight, blocktime) VALUES (\"${txid}\", ${tx_hash}, ${tx_nb_conf}, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${tx_blockhash}, ${tx_blockheight}, ${tx_blocktime})" - trace_rc $? - sql_rawtx "INSERT OR IGNORE INTO rawtx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, blockhash, blockheight, blocktime, raw_tx) VALUES (\"${txid}\", ${tx_hash}, ${tx_nb_conf}, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${tx_blockhash}, ${tx_blockheight}, ${tx_blocktime}, readfile('rawtx-${txid}-$$.blob'))" - trace_rc $? - - id_inserted=$(sql "SELECT id FROM tx WHERE txid=\"${txid}\"") + id_inserted=$(sql "INSERT INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, blockhash, blockheight, blocktime)"\ +" VALUES ('${txid}', '${tx_hash}', ${tx_nb_conf}, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${tx_blockhash}, ${tx_blockheight}, ${tx_blocktime})"\ +" ON CONFLICT (txid) DO"\ +" UPDATE SET blockhash=${tx_blockhash}, blockheight=${tx_blockheight}, blocktime=${tx_blocktime}, confirmations=${tx_nb_conf}"\ +" RETURNING id" \ + "SELECT id FROM tx WHERE txid='${txid}'") trace_rc $? else # TX found in our DB. # 1-conf or executecallbacks on an unconfirmed tx or spending watched address (in this case, we probably missed conf) or spending to a watched address (in this case, spend inserted the tx in the DB) - local tx_blockhash=$(echo "${tx_details}" | jq '.result.blockhash') + local tx_blockhash=$(echo "${tx_details}" | jq -r '.result.blockhash') trace "[confirmation] tx_blockhash=${tx_blockhash}" if [ "${tx_blockhash}" = "null" ]; then trace "[confirmation] probably being called by executecallbacks without any confirmations since the last time we checked" else - local tx_blockheight=$(get_block_info $(echo "${tx_blockhash}" | tr -d '"') | jq '.result.height') + local tx_blockheight=$(get_block_info "${tx_blockhash}" | jq '.result.height') local tx_blocktime=$(echo "${tx_details}" | jq '.result.blocktime') - sql "UPDATE tx SET - confirmations=${tx_nb_conf}, - blockhash=${tx_blockhash}, - blockheight=${tx_blockheight}, - blocktime=${tx_blocktime} - WHERE txid=\"${txid}\"" - trace_rc $? - sql_rawtx "UPDATE rawtx SET - confirmations=${tx_nb_conf}, - blockhash=${tx_blockhash}, - blockheight=${tx_blockheight}, - blocktime=${tx_blocktime}, - raw_tx=readfile('rawtx-${txid}-$$.blob') - WHERE txid=\"${txid}\"" + sql "UPDATE tx SET confirmations=${tx_nb_conf}, blockhash='${tx_blockhash}', blockheight=${tx_blockheight}, blocktime=${tx_blocktime} WHERE txid='${txid}'" trace_rc $? fi id_inserted=${tx} @@ -171,7 +159,8 @@ confirmation() { # If the tx is batched and pays multiple watched addresses, we have to insert # those additional addresses in watching_tx! watching_id=$(echo "${row}" | cut -d '|' -f1) - sql "INSERT OR IGNORE INTO watching_tx (watching_id, tx_id, vout, amount) VALUES (${watching_id}, ${id_inserted}, ${tx_vout_n}, ${tx_vout_amount})" + sql "INSERT INTO watching_tx (watching_id, tx_id, vout, amount) VALUES (${watching_id}, ${id_inserted}, ${tx_vout_n}, ${tx_vout_amount})"\ +" ON CONFLICT DO NOTHING" trace_rc $? else trace "[confirmation] For this tx, there's already watching_tx rows" @@ -211,7 +200,7 @@ confirmation() { # for next cron. if [ -z "${bypass_callbacks}" ]; then trace "[confirmation] Let's do the callbacks!" - do_callbacks + do_callbacks "${txid}" fi echo '{"result":"confirmed"}' diff --git a/proxy_docker/app/script/getactivewatches.sh b/proxy_docker/app/script/getactivewatches.sh index 7cb25a3..5923b35 100644 --- a/proxy_docker/app/script/getactivewatches.sh +++ b/proxy_docker/app/script/getactivewatches.sh @@ -12,8 +12,8 @@ get_txns_by_watchlabel(){ INNER JOIN watching AS w ON w32.id = w.watching_by_pub32_id INNER JOIN watching_tx AS wtxn ON w.id = wtxn.watching_id INNER JOIN tx AS tx ON wtxn.tx_id = tx.id - WHERE w32.label="$1" - LIMIT 0,${2-10} + WHERE w32.label='${1}' + LIMIT ${2-10} OFFSET 0 HERE ) label_txns=$(sql "$query") @@ -38,12 +38,12 @@ get_unused_addresses_by_watchlabel(){ SELECT w32.id, w32.label, w32.pub32, w.pub32_index, w.address FROM watching as w INNER JOIN watching_by_pub32 AS w32 ON w.watching_by_pub32_id = w32.id - WHERE w32.label="$1" + WHERE w32.label='${1}' AND NOT EXISTS ( SELECT 1 FROM watching_tx WHERE watching_id = w.id ) ORDER BY w.pub32_index ASC - LIMIT 0,${2-10} + LIMIT ${2-10} OFFSET 0 HERE ) label_unused_addrs=$(sql "$query") @@ -67,7 +67,7 @@ getactivewatches() { local watches # Let's build the string directly with sqlite instead of manipulating multiple strings afterwards, it's faster. # {"id":"${id}","address":"${address}","imported":"${imported}","unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}"} - watches=$(sql "SELECT '{\"id\":' || id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":\"' || COALESCE(callback0conf, '') || '\",\"confirmedCallbackURL\":\"' || COALESCE(callback1conf, '') || '\",\"label\":\"' || COALESCE(label, '') || '\",\"watching_since\":\"' || inserted_ts || '\"}' FROM watching WHERE watching AND NOT calledback1conf") + watches=$(sql "SELECT '{\"id\":' || id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":' || CASE WHEN callback0conf IS NULL THEN 'null' ELSE ('\"' || callback0conf || '\"') END || ',\"confirmedCallbackURL\":' || CASE WHEN callback1conf IS NULL THEN 'null' ELSE ('\"' || callback1conf || '\"') END || ',\"label\":\"' || COALESCE(label, '') || '\",\"watching_since\":\"' || inserted_ts || '\"}' FROM watching WHERE watching AND NOT calledback1conf ORDER BY id") returncode=$? trace_rc ${returncode} @@ -99,7 +99,7 @@ getactivewatchesbyxpub() { local xpub=${1} local returncode - getactivewatchesxpub "pub32" ${xpub} + getactivewatchesxpub "pub32" "${xpub}" returncode=$? trace_rc ${returncode} @@ -112,7 +112,7 @@ getactivewatchesbylabel() { local label=${1} local returncode - getactivewatchesxpub "label" ${label} + getactivewatchesxpub "label" "${label}" returncode=$? trace_rc ${returncode} @@ -130,7 +130,7 @@ getactivewatchesxpub() { # Let's build the string directly with sqlite instead of manipulating multiple strings afterwards, it's faster. # {"id":"${id}","address":"${address}","imported":"${imported}","unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}","derivation_path":"${derivation_path}","pub32_index":"${pub32_index}"} - watches=$(sql "SELECT '{\"id\":' || w.id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":\"' || COALESCE(w.callback0conf, '') || '\",\"confirmedCallbackURL\":\"' || COALESCE(w.callback1conf, '') || '\",\"watching_since\":\"' || w.inserted_ts || '\",\"derivation_path\":\"' || derivation_path || '\",\"pub32_index\":' || pub32_index || '}' FROM watching w, watching_by_pub32 w32 WHERE watching_by_pub32_id = w32.id AND ${where} = \"${value}\" AND w.watching AND NOT calledback1conf") + watches=$(sql "SELECT '{\"id\":' || w.id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":' || CASE WHEN w.callback0conf IS NULL THEN 'null' ELSE ('\"' || w.callback0conf || '\"') END || ',\"confirmedCallbackURL\":' || CASE WHEN w.callback1conf IS NULL THEN 'null' ELSE ('\"' || w.callback1conf || '\"') END || ',\"watching_since\":\"' || w.inserted_ts || '\",\"derivation_path\":\"' || derivation_path || '\",\"pub32_index\":' || pub32_index || '}' FROM watching w, watching_by_pub32 w32 WHERE watching_by_pub32_id = w32.id AND w32.${where} = '${value}' AND w.watching AND NOT calledback1conf ORDER BY w.id") returncode=$? trace_rc ${returncode} @@ -162,7 +162,7 @@ getactivexpubwatches() { local watches # Let's build the string directly with sqlite instead of manipulating multiple strings afterwards, it's faster. # {"id":"${id}","pub32":"${pub32}","label":"${label}","derivation_path":"${derivation_path}","last_imported_n":${last_imported_n},"unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}"} - watches=$(sql "SELECT '{\"id\":' || id || ',\"pub32\":\"' || pub32 || '\",\"label\":\"' || label || '\",\"derivation_path\":\"' || derivation_path || '\",\"last_imported_n\":' || last_imported_n || ',\"unconfirmedCallbackURL\":\"' || COALESCE(callback0conf, '') || '\",\"confirmedCallbackURL\":\"' || COALESCE(callback1conf, '') || '\",\"watching_since\":\"' || inserted_ts || '\"}' FROM watching_by_pub32 WHERE watching") + watches=$(sql "SELECT '{\"id\":' || id || ',\"pub32\":\"' || pub32 || '\",\"label\":\"' || label || '\",\"derivation_path\":\"' || derivation_path || '\",\"last_imported_n\":' || last_imported_n || ',\"unconfirmedCallbackURL\":' || CASE WHEN callback0conf IS NULL THEN 'null' ELSE ('\"' || callback0conf || '\"') END || ',\"confirmedCallbackURL\":' || CASE WHEN callback1conf IS NULL THEN 'null' ELSE ('\"' || callback1conf || '\"') END || ',\"watching_since\":\"' || inserted_ts || '\"}' FROM watching_by_pub32 WHERE watching ORDER BY id") returncode=$? trace_rc ${returncode} diff --git a/proxy_docker/app/script/importaddress.sh b/proxy_docker/app/script/importaddress.sh index 8b2128c..874c54b 100644 --- a/proxy_docker/app/script/importaddress.sh +++ b/proxy_docker/app/script/importaddress.sh @@ -11,7 +11,7 @@ importaddress_rpc() { if [ -z "${label}" ]; then label="null" fi - local data='{"method":"importaddress","params":{"address":"'${address}'","label":'${label}',"rescan":false}}' + local data='{"method":"importaddress","params":{"address":"'${address}'","label":"'${label}'","rescan":false}}' # local data="{\"method\":\"importaddress\",\"params\":[\"${address}\",\"\",false]}" local result result=$(send_to_watcher_node ${data}) @@ -39,7 +39,7 @@ importmulti_rpc() { # {"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"}, # {"scriptPubKey":{"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"},"timestamp":"now","watchonly":true,"label":"xpub"}, - addresses=$(echo "${addresses}" | sed "s/\"address\"/\"scriptPubKey\":\{\"address\"/g" | sed "s/}/},\"timestamp\":\"now\",\"watchonly\":true,\"label\":${label}}/g") + addresses=$(echo "${addresses}" | sed "s/\"address\"/\"scriptPubKey\":\{\"address\"/g" | sed "s/}/},\"timestamp\":\"now\",\"watchonly\":true,\"label\":\"${label}\"}/g") # trace "[importmulti_rpc] addresses=${addresses}" # Now we use that in the RPC string diff --git a/proxy_docker/app/script/manage_missed_conf.sh b/proxy_docker/app/script/manage_missed_conf.sh index 5102004..4d7cfe2 100644 --- a/proxy_docker/app/script/manage_missed_conf.sh +++ b/proxy_docker/app/script/manage_missed_conf.sh @@ -25,7 +25,7 @@ manage_not_imported() { returncode=$? trace_rc ${returncode} if [ "${returncode}" -eq 0 ]; then - sql "UPDATE watching SET imported=1 WHERE address=\"${address}\"" + sql "UPDATE watching SET imported=true WHERE address='${address}'" fi done @@ -41,8 +41,8 @@ manage_missed_conf() { trace "[Entering manage_missed_conf()]" - local watches=$(sql 'SELECT DISTINCT address FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx t ON t.id = tx_id WHERE watching AND imported AND (tx_id IS NULL OR t.confirmations=0) ORDER BY address') - trace "[manage_missed_conf] watches=${watches}" + local watches=$(sql "SELECT DISTINCT address FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx t ON t.id = tx_id WHERE watching AND imported AND (tx_id IS NULL OR t.confirmations=0) ORDER BY address") + # trace "[manage_missed_conf] watches=${watches}" if [ ${#watches} -eq 0 ]; then trace "[manage_missed_conf] Nothing missed!" return 0 @@ -55,7 +55,7 @@ manage_missed_conf() { data='{"method":"listreceivedbyaddress","params":[0,false,true]}' received=$(send_to_watcher_node "${data}") received_addresses=$(echo "${received}" | jq -r ".result[].address" | sort) - trace "[manage_missed_conf] received_addresses=${received_addresses}" + # trace "[manage_missed_conf] received_addresses=${received_addresses}" # Let's extract addresses that are in the watches list as well as in the received_addresses list echo "${watches}" > watches-$$ @@ -81,7 +81,7 @@ manage_missed_conf() { local IFS=$'\n' for address in ${received_watches} do - watching=$(sql 'SELECT address, inserted_ts FROM watching WHERE address="'${address}'"') + watching=$(sql "SELECT address, inserted_ts FROM watching WHERE address='${address}'") trace "[manage_missed_conf] watching=${watching}" if [ ${#watching} -eq 0 ]; then trace "[manage_missed_conf] Nothing missed!" @@ -90,7 +90,7 @@ manage_missed_conf() { # Let's get confirmed received txs for the address # address=$(echo "${watches}" | cut -d '|' -f1) - inserted_ts=$(date -d "$(echo "${watching}" | cut -d '|' -f2)" +"%s") + inserted_ts=$(date -d "$(echo "${watching}" | cut -d '|' -f2)" -D '%Y-%m-%d %H:%M:%S' +"%s") trace "[manage_missed_conf] inserted_ts=${inserted_ts}" received_address=$(echo "${received}" | jq -Mc ".result | map(select(.address==\"${address}\" and .confirmations>0))[0]") diff --git a/proxy_docker/app/script/newblock.sh b/proxy_docker/app/script/newblock.sh index 6db875a..ce096fd 100644 --- a/proxy_docker/app/script/newblock.sh +++ b/proxy_docker/app/script/newblock.sh @@ -25,6 +25,7 @@ newblock() { returncode=$? trace_rc ${returncode} + # do_callbacks_txid "$(echo "${blockinfo}" | jq ".result.tx[]")" do_callbacks_txid batch_check_webhooks diff --git a/proxy_docker/app/script/ots.sh b/proxy_docker/app/script/ots.sh index 2db2b11..1cfe66d 100644 --- a/proxy_docker/app/script/ots.sh +++ b/proxy_docker/app/script/ots.sh @@ -49,18 +49,20 @@ serve_ots_stamp() { returncode=$? fi else - sql "INSERT OR IGNORE INTO stamp (hash, callbackUrl) VALUES (\"${hash}\", \"${callbackUrl}\")" + id_inserted=$(sql "INSERT INTO stamp (hash, callbackUrl)"\ +" VALUES ('${hash}','${callbackUrl}')"\ +" RETURNING id" \ + "SELECT id FROM stamp WHERE hash='${hash}'") returncode=$? trace_rc ${returncode} if [ "${returncode}" -eq "0" ]; then - id_inserted=$(sql "SELECT id FROM stamp WHERE hash='${hash}'") - trace_rc $? errorstring=$(request_ots_stamp "${hash}" ${id_inserted}) returncode=$? trace_rc ${returncode} else trace "[serve_ots_stamp] Stamp request could not be inserted in DB" errorstring="Stamp request could not be inserted in DB, please retry later" + id_inserted=null returncode=1 fi fi @@ -114,7 +116,7 @@ request_ots_stamp() { if [ "${returncode}" -eq "0" ]; then # "already exists" found, let's try updating DB again trace "[request_ots_stamp] was already requested to the OTS server... let's update the DB, looks like it didn't work on first try" - sql "UPDATE stamp SET requested=1 WHERE id=${id}" + sql "UPDATE stamp SET requested=true WHERE id=${id}" errorstring="Duplicate stamping request, hash already exists in DB and been OTS requested" returncode=1 else @@ -125,7 +127,7 @@ request_ots_stamp() { fi else trace "[request_ots_stamp] Stamping request sent successfully!" - sql "UPDATE stamp SET requested=1 WHERE id=${id}" + sql "UPDATE stamp SET requested=true WHERE id=${id}" errorstring="" returncode=0 fi @@ -198,7 +200,7 @@ serve_ots_backoffice() { else # No failure, upgraded trace "[serve_ots_backoffice] just upgraded!" - sql "UPDATE stamp SET upgraded=1 WHERE id=${id}" + sql "UPDATE stamp SET upgraded=true WHERE id=${id}" trace_rc $? upgraded=1 @@ -221,13 +223,13 @@ serve_ots_backoffice() { # Even if curl executed ok, we need to make sure the http return code is also ok if [ "${returncode}" -eq "0" ]; then - sql "UPDATE stamp SET calledback=1 WHERE id=${id}" + sql "UPDATE stamp SET calledback=true WHERE id=${id}" trace_rc $? fi else trace "[serve_ots_backoffice] url is empty, obviously won't try to call it!" - sql "UPDATE stamp SET calledback=1 WHERE id=${id}" + sql "UPDATE stamp SET calledback=true WHERE id=${id}" trace_rc $? fi fi diff --git a/proxy_docker/app/script/requesthandler.sh b/proxy_docker/app/script/requesthandler.sh index 1eaec2d..79d7130 100644 --- a/proxy_docker/app/script/requesthandler.sh +++ b/proxy_docker/app/script/requesthandler.sh @@ -76,8 +76,10 @@ main() { case "${cmd}" in helloworld) # GET http://192.168.111.152:8080/helloworld - response_to_client "Hello, world!" 0 - break + response="Hello, world!" + returncode=0 + # response_to_client "Hello, world!" 0 + # break ;; installation_info) # GET http://192.168.111.152:8080/info @@ -86,8 +88,7 @@ main() { else response='{ "error": "missing installation data" }' fi - response_to_client "${response}" ${?} - break + returncode=$? ;; watch) # POST http://192.168.111.152:8080/watch @@ -96,8 +97,7 @@ main() { # BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","confirmedCallbackURL":"192.168.111.233:1111/callback1conf","eventMessage":"eyJib3VuY2VfYWRkcmVzcyI6IjJNdkEzeHIzOHIxNXRRZWhGblBKMVhBdXJDUFR2ZTZOamNGIiwibmJfY29uZiI6MH0K","label":"myLabel"} response=$(watchrequest "${line}") - response_to_client "${response}" ${?} - break + returncode=$? ;; unwatch) # curl (GET) 192.168.111.152:8080/unwatch/2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp @@ -122,16 +122,15 @@ main() { # Let's make it work even for a GET request (equivalent to a POST with empty json object body) if [ "$http_method" = "POST" ]; then address=$(echo "${line}" | jq -r ".address") - unconfirmedCallbackURL=$(echo "${line}" | jq ".unconfirmedCallbackURL") - confirmedCallbackURL=$(echo "${line}" | jq ".confirmedCallbackURL") + unconfirmedCallbackURL=$(echo "${line}" | jq -r ".unconfirmedCallbackURL") + confirmedCallbackURL=$(echo "${line}" | jq -r ".confirmedCallbackURL") watchid=$(echo "${line}" | jq ".id") else address=$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3) fi response=$(unwatchrequest "${watchid}" "${address}" "${unconfirmedCallbackURL}" "${confirmedCallbackURL}") - response_to_client "${response}" ${?} - break + returncode=$? ;; watchxpub) # POST http://192.168.111.152:8080/watchxpub @@ -139,43 +138,37 @@ main() { # curl -H "Content-Type: application/json" -d '{"label":"2219","pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/1/n","nstart":55,"unconfirmedCallbackURL":"192.168.111.233:1111/callback0conf","confirmedCallbackURL":"192.168.111.233:1111/callback1conf"}' proxy:8888/watchxpub response=$(watchpub32request "${line}") - response_to_client "${response}" ${?} - break + returncode=$? ;; unwatchxpubbyxpub) # GET http://192.168.111.152:8080/unwatchxpubbyxpub/tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk response=$(unwatchpub32request "${line}") - response_to_client "${response}" ${?} - break + returncode=$? ;; unwatchxpubbylabel) # GET http://192.168.111.152:8080/unwatchxpubbylabel/4421 response=$(unwatchpub32labelrequest "${line}") - response_to_client "${response}" ${?} - break + returncode=$? ;; getactivewatchesbyxpub) # GET http://192.168.111.152:8080/getactivewatchesbyxpub/tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk response=$(getactivewatchesbyxpub "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") - response_to_client "${response}" ${?} - break + returncode=$? ;; getactivewatchesbylabel) # GET http://192.168.111.152:8080/getactivewatchesbylabel/4421 response=$(getactivewatchesbylabel "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") - response_to_client "${response}" ${?} - break + returncode=$? ;; getactivexpubwatches) # GET http://192.168.111.152:8080/getactivexpubwatches response=$(getactivexpubwatches) - response_to_client "${response}" ${?} - break + returncode=$? ;; watchtxid) # POST http://192.168.111.152:8080/watchtxid @@ -183,8 +176,7 @@ main() { # curl -H "Content-Type: application/json" -d '{"txid":"b081ca7724386f549cf0c16f71db6affeb52ff7a0d9b606fb2e5c43faffd3387","confirmedCallbackURL":"192.168.111.233:1111/callback1conf","xconfCallbackURL":"192.168.111.233:1111/callbackXconf","nbxconf":6}' proxy:8888/watchtxid response=$(watchtxidrequest "${line}") - response_to_client "${response}" ${?} - break + returncode=$? ;; unwatchtxid) # POST http://192.168.111.152:8080/unwatchtxid @@ -200,87 +192,76 @@ main() { # - id: the id returned by watchtxid local txid=$(echo "${line}" | jq -r ".txid") - local unconfirmedCallbackURL=$(echo "${line}" | jq ".unconfirmedCallbackURL") - local confirmedCallbackURL=$(echo "${line}" | jq ".confirmedCallbackURL") + local unconfirmedCallbackURL=$(echo "${line}" | jq -r ".unconfirmedCallbackURL") + local confirmedCallbackURL=$(echo "${line}" | jq -r ".confirmedCallbackURL") local watchid=$(echo "${line}" | jq ".id") response=$(unwatchtxidrequest "${watchid}" "${txid}" "${unconfirmedCallbackURL}" "${confirmedCallbackURL}") - response_to_client "${response}" ${?} - break + returncode=$? ;; getactivewatches) # curl (GET) 192.168.111.152:8080/getactivewatches response=$(getactivewatches) - response_to_client "${response}" ${?} - break + returncode=$? ;; get_txns_by_watchlabel) # curl (GET) 192.168.111.152:8080/get_txns_by_watchlabel/