Merge pull request #239 from SatoshiPortal/features/postgres

Integration of PostgreSQL replacing SQLite3
This commit is contained in:
kexkey
2021-12-30 11:20:20 -05:00
committed by GitHub
59 changed files with 3033 additions and 1009 deletions

View File

@@ -1,7 +1,6 @@
FROM nginx:1.18.0-alpine FROM nginx:1.18.0-alpine
RUN apk add --update --no-cache \ RUN apk add --update --no-cache \
bash \
git \ git \
openssl \ openssl \
fcgiwrap \ fcgiwrap \

View File

@@ -1,17 +1,16 @@
#!/bin/bash #!/bin/sh
user='nginx' while [ ! -f "/container_monitor/proxy_ready" ]; do echo "proxy not ready" ; sleep 10 ; done
if [[ $1 ]]; then echo "proxy ready"
IFS=':' read -ra arr <<< "$1"
if [[ ${arr[0]} ]]; then
user=${arr[0]};
fi
if [ -n "$1" ]; then
user=$(echo "$1" | cut -d ':' -f 1)
else
user='nginx'
fi fi
spawn-fcgi -M 0660 -s /var/run/fcgiwrap.socket -u $user -g nginx -U $user -- `which fcgiwrap` spawn-fcgi -M 0660 -s /var/run/fcgiwrap.socket -u $user -g nginx -U $user -- `which fcgiwrap`
chmod -R g+rw /var/run/fcgiwrap.socket /etc/nginx/conf.d/* chmod -R g+rw /var/run/fcgiwrap.socket /etc/nginx/conf.d/*
chown -R :nginx /etc/nginx/conf.d/* chown -R :nginx /etc/nginx/conf.d/*
nginx -g "daemon off;" exec nginx -g "daemon off;"

View File

@@ -17,6 +17,9 @@
"gatekeeper_edit_apiproperties": "If you know what you are doing, it is possible to manually edit the API endpoints/groups authorization. (Not recommended)", "gatekeeper_edit_apiproperties": "If you know what you are doing, it is possible to manually edit the API endpoints/groups authorization. (Not recommended)",
"gatekeeper_apiproperties": "You are about to edit the api.properties file. The format of the file is pretty simple: for each action, you will find what access group can access it. <font color='# 0000ff'>Admin</font> group can do what <font color='# 0000ff'>Spender</font> group can, and <font color='# 0000ff'>Spender</font> group can do what <font color='# 0000ff'>Watcher</font> group can. <font color='# 0000ff'>Internal</font> group is for the endpoints accessible only within the Docker network, like the backoffice tasks used by the Cron container. The access groups for each API id/key are found in the <font color='# 0000ff'>keys.properties</font> file.", "gatekeeper_apiproperties": "You are about to edit the api.properties file. The format of the file is pretty simple: for each action, you will find what access group can access it. <font color='# 0000ff'>Admin</font> group can do what <font color='# 0000ff'>Spender</font> group can, and <font color='# 0000ff'>Spender</font> group can do what <font color='# 0000ff'>Watcher</font> group can. <font color='# 0000ff'>Internal</font> group is for the endpoints accessible only within the Docker network, like the backoffice tasks used by the Cron container. The access groups for each API id/key are found in the <font color='# 0000ff'>keys.properties</font> file.",
"gatekeeper_cns": "I use <font underline='true'>domain names</font> and/or <font underline='true'>IP addresses</font> to create valid TLS certificates. For example, if <font color='# 0000ff'>https://cyphernodehost/getbestblockhash</font> and <font color='# 0000ff'>https://192.168.7.44/getbestblockhash</font> will be used, enter <font color='# 0000ff'>cyphernodehost, 192.168.7.44</font> as a possible domains. <font color='# 0000ff'>127.0.0.1, localhost, gatekeeper</font> will be automatically added to your list. Make sure the provided domain names are in your DNS or client's hosts file and is reachable.", "gatekeeper_cns": "I use <font underline='true'>domain names</font> and/or <font underline='true'>IP addresses</font> to create valid TLS certificates. For example, if <font color='# 0000ff'>https://cyphernodehost/getbestblockhash</font> and <font color='# 0000ff'>https://192.168.7.44/getbestblockhash</font> will be used, enter <font color='# 0000ff'>cyphernodehost, 192.168.7.44</font> as a possible domains. <font color='# 0000ff'>127.0.0.1, localhost, gatekeeper</font> will be automatically added to your list. Make sure the provided domain names are in your DNS or client's hosts file and is reachable.",
"postgres_datapath": "The Cyphernode's Postgres files will be stored in a container's mounted directory. Please provide the <font underline='true'>local mounted path</font> to that directory. <font color='#ff0000'>If running on OSX, check mountable directories in Docker's File Sharing configs.</font>",
"postgres_datapath_custom": "Provide the <font underline='true'>full path name</font> where Postgres files will be saved.",
"postgres_password": "PostgreSQL cyphernode's <font underline='true'>password</font> used by Cyphernode when calling the database.",
"logs_datapath": "The Cyphernode's log files will be stored in a container's mounted directory. Please provide the <font underline='true'>local mounted path</font> to that directory. <font color='#ff0000'>If running on OSX, check mountable directories in Docker's File Sharing configs.</font>", "logs_datapath": "The Cyphernode's log files will be stored in a container's mounted directory. Please provide the <font underline='true'>local mounted path</font> to that directory. <font color='#ff0000'>If running on OSX, check mountable directories in Docker's File Sharing configs.</font>",
"logs_datapath_custom": "Provide the <font underline='true'>full path name</font> where Cyphernodes log files will be saved.", "logs_datapath_custom": "Provide the <font underline='true'>full path name</font> where Cyphernodes log files will be saved.",
"traefik_datapath": "The Traefik's files will be stored in a container's mounted directory. Please provide the <font underline='true'>local mounted path</font> to that directory. <font color='#ff0000'>If running on OSX, check mountable directories in Docker's File Sharing configs.</font>", "traefik_datapath": "The Traefik's files will be stored in a container's mounted directory. Please provide the <font underline='true'>local mounted path</font> to that directory. <font color='#ff0000'>If running on OSX, check mountable directories in Docker's File Sharing configs.</font>",

View File

@@ -87,6 +87,7 @@ module.exports = class App {
proxy_version: process.env.PROXY_VERSION, proxy_version: process.env.PROXY_VERSION,
proxycron_version: process.env.PROXYCRON_VERSION, proxycron_version: process.env.PROXYCRON_VERSION,
pycoin_version: process.env.PYCOIN_VERSION, pycoin_version: process.env.PYCOIN_VERSION,
postgres_version: process.env.POSTGRES_VERSION,
traefik_version: process.env.TRAEFIK_VERSION, traefik_version: process.env.TRAEFIK_VERSION,
mosquitto_version: process.env.MOSQUITTO_VERSION, mosquitto_version: process.env.MOSQUITTO_VERSION,
otsclient_version: process.env.OTSCLIENT_VERSION, otsclient_version: process.env.OTSCLIENT_VERSION,
@@ -148,6 +149,7 @@ module.exports = class App {
'cyphernode/proxy': this.sessionData.proxy_version, 'cyphernode/proxy': this.sessionData.proxy_version,
'cyphernode/proxycron': this.sessionData.proxycron_version, 'cyphernode/proxycron': this.sessionData.proxycron_version,
'cyphernode/pycoin': this.sessionData.pycoin_version, 'cyphernode/pycoin': this.sessionData.pycoin_version,
'cyphernode/postgres': this.sessionData.postgres_version,
'cyphernode/otsclient': this.sessionData.otsclient_version, 'cyphernode/otsclient': this.sessionData.otsclient_version,
'traefik': this.sessionData.traefik_version, 'traefik': this.sessionData.traefik_version,
'cyphernode/clightning': this.sessionData.lightning_version, 'cyphernode/clightning': this.sessionData.lightning_version,
@@ -359,6 +361,7 @@ module.exports = class App {
const pathProps = [ const pathProps = [
'gatekeeper_datapath', 'gatekeeper_datapath',
'postgres_datapath',
'logs_datapath', 'logs_datapath',
'traefik_datapath', 'traefik_datapath',
'tor_datapath', 'tor_datapath',
@@ -483,6 +486,13 @@ module.exports = class App {
networks: ['cyphernodenet'], networks: ['cyphernodenet'],
docker: 'cyphernode/pycoin:'+this.config.docker_versions['cyphernode/pycoin'] docker: 'cyphernode/pycoin:'+this.config.docker_versions['cyphernode/pycoin']
}, },
{
name: 'Postgres',
label: 'postgres',
host: 'postgres',
networks: ['cyphernodenet'],
docker: 'postgres:'+this.config.docker_versions['cyphernode/postgres']
},
{ {
name: 'Notifier', name: 'Notifier',
label: 'notifier', label: 'notifier',

View File

@@ -12,10 +12,11 @@ const schemas = {
'0.2.2': require('../schema/config-v0.2.2.json'), '0.2.2': require('../schema/config-v0.2.2.json'),
'0.2.3': require('../schema/config-v0.2.3.json'), '0.2.3': require('../schema/config-v0.2.3.json'),
'0.2.4': require('../schema/config-v0.2.4.json'), '0.2.4': require('../schema/config-v0.2.4.json'),
'0.2.5': require('../schema/config-v0.2.5.json') '0.2.5': require('../schema/config-v0.2.5.json'),
'0.2.6': require('../schema/config-v0.2.6.json')
}; };
const versionHistory = [ '0.1.0', '0.2.0', '0.2.2', '0.2.3', '0.2.4', '0.2.5' ]; const versionHistory = [ '0.1.0', '0.2.0', '0.2.2', '0.2.3', '0.2.4', '0.2.5', '0.2.6' ];
const defaultSchemaVersion=versionHistory[0]; const defaultSchemaVersion=versionHistory[0];
const latestSchemaVersion=versionHistory[versionHistory.length-1]; const latestSchemaVersion=versionHistory[versionHistory.length-1];
@@ -46,7 +47,8 @@ module.exports = class Config {
'0.2.0->0.2.2': this.migrate_0_2_0_to_0_2_2, '0.2.0->0.2.2': this.migrate_0_2_0_to_0_2_2,
'0.2.2->0.2.3': this.migrate_0_2_2_to_0_2_3, '0.2.2->0.2.3': this.migrate_0_2_2_to_0_2_3,
'0.2.3->0.2.4': this.migrate_0_2_3_to_0_2_4, '0.2.3->0.2.4': this.migrate_0_2_3_to_0_2_4,
'0.2.4->0.2.5': this.migrate_0_2_4_to_0_2_5 '0.2.4->0.2.5': this.migrate_0_2_4_to_0_2_5,
'0.2.5->0.2.6': this.migrate_0_2_5_to_0_2_6
}; };
this.setData( { schema_version: latestSchemaVersion } ); this.setData( { schema_version: latestSchemaVersion } );
@@ -247,4 +249,12 @@ module.exports = class Config {
this.data.schema_version = '0.2.5'; this.data.schema_version = '0.2.5';
} }
async migrate_0_2_5_to_0_2_6() {
const currentVersion = this.data.schema_version;
if( currentVersion != '0.2.5' ) {
return;
}
this.data.schema_version = '0.2.6';
}
}; };

View File

@@ -0,0 +1,30 @@
const chalk = require('chalk');
const name = 'postgres';
const capitalise = function( txt ) {
return txt.charAt(0).toUpperCase() + txt.substr(1);
};
const prefix = function() {
return chalk.green(capitalise(name)+': ');
};
module.exports = {
name: function() {
return name;
},
prompts: function( utils ) {
return [
{
type: 'password',
name: 'postgres_password',
default: utils.getDefault( 'postgres_password' ),
message: prefix()+'Password of Postgres cyphernode user?'+utils.getHelp('postgres_password'),
filter: utils.trimFilter,
}];
},
templates: function( props ) {
return ['pgpass'];
}
};

View File

@@ -30,6 +30,44 @@ module.exports = {
value: "docker" value: "docker"
}] }]
}, },
{
when: installerDocker,
type: 'list',
name: 'postgres_datapath',
default: utils.getDefault( 'postgres_datapath' ),
choices: [
{
name: utils.setupDir()+"/cyphernode/postgres",
value: utils.setupDir()+"/cyphernode/postgres"
},
{
name: utils.defaultDataDirBase()+"/cyphernode/postgres",
value: utils.defaultDataDirBase()+"/cyphernode/postgres"
},
{
name: utils.defaultDataDirBase()+"/.cyphernode/postgres",
value: utils.defaultDataDirBase()+"/.cyphernode/postgres"
},
{
name: utils.defaultDataDirBase()+"/postgres",
value: utils.defaultDataDirBase()+"/postgres"
},
{
name: "Custom path",
value: "_custom"
}
],
message: prefix()+'Where do you want to store your Postgres files?'+utils.getHelp('postgres_datapath'),
},
{
when: (props)=>{ return installerDocker(props) && (props.postgres_datapath === '_custom') },
type: 'input',
name: 'postgres_datapath_custom',
default: utils.getDefault( 'postgres_datapath_custom' ),
filter: utils.trimFilter,
validate: utils.pathValidator,
message: prefix()+'Custom path for Postgres files?'+utils.getHelp('postgres_datapath_custom'),
},
{ {
when: installerDocker, when: installerDocker,
type: 'list', type: 'list',

View File

@@ -0,0 +1,726 @@
{
"definitions": {},
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "http://cyphernode.io/config-v0.2.6.json",
"type": "object",
"title": "Cyphernode config file structure v0.2.6",
"additionalProperties": false,
"required": [
"schema_version",
"setup_version",
"features",
"net",
"use_xpub",
"installer_mode",
"run_as_different_user",
"docker_mode",
"docker_versions",
"adminhash",
"bitcoin_rpcuser",
"bitcoin_rpcpassword",
"bitcoin_prune",
"bitcoin_datapath",
"bitcoin_mode",
"bitcoin_expose",
"gatekeeper_expose",
"gatekeeper_keys",
"gatekeeper_sslcert",
"gatekeeper_sslkey",
"gatekeeper_cns",
"gatekeeper_clientkeyspassword",
"gatekeeper_datapath",
"gatekeeper_port",
"proxy_datapath",
"postgres_password",
"postgres_datapath",
"logs_datapath",
"traefik_datapath",
"traefik_http_port",
"traefik_https_port"
],
"allOf": [
{
"if": {
"properties": {
"run_as_different_user": {
"enum": [
true
]
}
}
},
"then": {
"required": [
"username"
]
}
},
{
"if": {
"properties": {
"use_xpub": {
"enum": [
true
]
}
}
},
"then": {
"required": [
"xpub",
"derivation_path"
]
}
},
{
"if": {
"properties": {
"bitcoin_prune": {
"enum": [
true
]
}
}
},
"then": {
"required": [
"bitcoin_prune_size"
]
}
},
{
"if": {
"properties": {
"features": {
"contains": {
"enum": [
"tor"
]
}
}
}
},
"then": {
"required": [
"tor_datapath",
"torifyables",
"clearnet"
]
}
},
{
"if": {
"properties": {
"features": {
"contains": {
"enum": [
"lightning"
]
}
}
}
},
"then": {
"required": [
"lightning_announce",
"lightning_expose",
"lightning_implementation",
"lightning_datapath",
"lightning_nodename",
"lightning_nodecolor"
]
}
},
{
"if": {
"properties": {
"features": {
"contains": {
"enum": [
"otsclient"
]
}
}
}
},
"then": {
"required": [
"otsclient_datapath"
]
}
}
],
"properties": {
"schema_version": {
"type": "string",
"enum": [
"0.2.5"
],
"default": "0.3.0",
"examples": [
"0.2.5"
]
},
"setup_version": {
"type": "string",
"examples": [
"v0.2.0"
]
},
"docker_versions": {
"$id": "#/properties/dockerVersions",
"type": "object",
"title": "All versions of the docker containers",
"default": {},
"additionalProperties": {
"type": "string"
}
},
"features": {
"$id": "#/properties/features",
"type": "array",
"title": "The optional features of this cyphernode",
"default": ["specter"],
"items": {
"$id": "#/properties/features/items",
"type": "string",
"enum": [
"tor",
"lightning",
"otsclient",
"batcher",
"specter"
],
"title": "The feature",
"default": "",
"examples": [
"tor",
"lightning",
"otsclient",
"batcher",
"specter"
]
}
},
"torifyables": {
"$id": "#/properties/torifyables",
"type": "array",
"title": "The Torified features of this cyphernode",
"default": [],
"items": {
"$id": "#/properties/torifyables/items",
"type": "string",
"enum": [
"tor_traefik",
"tor_bitcoin",
"tor_lightning",
"tor_otsoperations",
"tor_otswebhooks",
"tor_addrwatcheswebhooks",
"tor_txidwatcheswebhooks"
],
"title": "The Torified feature",
"default": "",
"examples": [
"tor_traefik",
"tor_bitcoin",
"tor_lightning",
"tor_otsoperations",
"tor_otswebhooks",
"tor_addrwatcheswebhooks",
"tor_txidwatcheswebhooks"
]
}
},
"clearnet": {
"$id": "#/properties/clearnet",
"type": "array",
"title": "The clearnet-allowed Torified features of this cyphernode",
"default": [],
"items": {
"$id": "#/properties/clearnet/items",
"type": "string",
"enum": [
"clearnet_bitcoin",
"clearnet_lightning"
],
"title": "The clearnet-allowed Torified feature",
"default": "",
"examples": [
"clearnet_bitcoin",
"clearnet_lightning"
]
}
},
"net": {
"$id": "#/properties/net",
"type": "string",
"enum": [
"testnet",
"mainnet",
"regtest"
],
"title": "The net cyphernode is running on",
"default": "testnet",
"examples": [
"testnet"
]
},
"use_xpub": {
"$id": "#/properties/use_xpub",
"type": "boolean",
"title": "Use xpub key?",
"default": false,
"examples": [
false
]
},
"xpub": {
"$id": "#/properties/xpub",
"type": "string",
"title": "Default xpub to derive addresses from",
"pattern": "^(\\w+)$"
},
"derivation_path": {
"$id": "#/properties/derivation_path",
"type": "string",
"title": "Default derivation path",
"default": "0/n",
"examples": [
"0/n"
]
},
"installer_mode": {
"$id": "#/properties/installer_mode",
"type": "string",
"enum": [
"docker"
],
"title": "Install mode",
"default": "docker",
"examples": [
"docker"
]
},
"run_as_different_user": {
"$id": "#/properties/run_as_different_user",
"type": "boolean",
"title": "Run as different user",
"default": true,
"examples": [
true
]
},
"username": {
"$id": "#/properties/username",
"type": "string",
"title": "Username to run under",
"default": "cyphernode",
"examples": [
"cyphernode"
]
},
"docker_mode": {
"$id": "#/properties/docker_mode",
"type": "string",
"enum": [
"swarm",
"compose"
],
"title": "How to run the containers",
"default": "swarm",
"examples": [
"compose"
]
},
"bitcoin_rpcuser": {
"$id": "#/properties/bitcoin_rpcuser",
"type": "string",
"title": "Bitcoin rpc user",
"default": "bitcoin",
"examples": [
"bitcoin"
]
},
"bitcoin_rpcpassword": {
"$id": "#/properties/bitcoin_rpcpassword",
"type": "string",
"title": "Bitcoin rpc password",
"default": "CHANGEME",
"examples": [
"CHANGEME"
]
},
"bitcoin_uacomment": {
"$id": "#/properties/bitcoin_uacomment",
"type": "string",
"title": "Bitcoin user agent comment",
"examples": [
"cyphernode"
]
},
"bitcoin_prune": {
"$id": "#/properties/bitcoin_prune",
"type": "boolean",
"title": "Bitcoin prune",
"default": false,
"examples": [
"false"
]
},
"bitcoin_prune_size": {
"$id": "#/properties/bitcoin_prune_size",
"type": "integer",
"title": "Bitcoin prune size",
"default": 550,
"examples": [
550
]
},
"bitcoin_datapath": {
"$id": "#/properties/bitcoin_datapath",
"type": "string",
"title": "Bitcoin datapath",
"examples": [
"/tmp/cyphernode/bitcoin"
]
},
"bitcoin_datapath_custom": {
"$id": "#/properties/bitcoin_datapath_custom",
"type": "string",
"title": "Bitcoin custom datapath",
"examples": [
"/tmp/cyphernode/bitcoin"
]
},
"lightning_datapath": {
"$id": "#/properties/lightning_datapath",
"type": "string",
"title": "Lightning datapath",
"examples": [
"/tmp/cyphernode/lightning"
]
},
"lightning_datapath_custom": {
"$id": "#/properties/lightning_datapath_custom",
"type": "string",
"title": "Lightning custom datapath",
"examples": [
"/tmp/cyphernode/lightning"
]
},
"proxy_datapath": {
"$id": "#/properties/proxy_datapath",
"type": "string",
"title": "Proxy datapath",
"examples": [
"/tmp/cyphernode/proxy"
]
},
"proxy_datapath_custom": {
"$id": "#/properties/proxy_datapath_custom",
"type": "string",
"title": "Proxy custom datapath",
"examples": [
"/tmp/cyphernode/proxy"
]
},
"otsclient_datapath": {
"$id": "#/properties/otsclient_datapath",
"type": "string",
"title": "OTS Client datapath",
"examples": [
"/tmp/cyphernode/otsclient"
]
},
"otsclient_datapath_custom": {
"$id": "#/properties/otsclient_datapath_custom",
"type": "string",
"title": "OTS Client custom datapath",
"examples": [
"/tmp/cyphernode/otsclient"
]
},
"traefik_http_port": {
"$id": "#/properties/traefik_port",
"type": "integer",
"title": "Traefik HTTP port",
"default": 80,
"examples": [
80
]
},
"traefik_https_port": {
"$id": "#/properties/traefik_https_port",
"type": "integer",
"title": "Traefik HTTPS port",
"default": 443,
"examples": [
443
]
},
"traefik_datapath": {
"$id": "#/properties/traefik_datapath",
"type": "string",
"title": "Traefik datapath",
"examples": [
"/tmp/cyphernode/traefik"
]
},
"traefik_datapath_custom": {
"$id": "#/properties/traefik_datapath_custom",
"type": "string",
"title": "Traefik custom datapath",
"examples": [
"/tmp/cyphernode/traefik"
]
},
"postgres_password": {
"$id": "#/properties/postgres_password",
"type": "string",
"title": "Postgres cyphernode's password",
"default": "CHANGEME",
"examples": [
"CHANGEME"
]
},
"postgres_datapath": {
"$id": "#/properties/postgres_datapath",
"type": "string",
"title": "Postgres datapath",
"examples": [
"/tmp/cyphernode/postgres"
]
},
"postgres_datapath_custom": {
"$id": "#/properties/postgres_datapath_custom",
"type": "string",
"title": "Postgres custom datapath",
"examples": [
"/tmp/cyphernode/postgres"
]
},
"logs_datapath": {
"$id": "#/properties/logs_datapath",
"type": "string",
"title": "Logs datapath",
"examples": [
"/tmp/cyphernode/logs"
]
},
"logs_datapath_custom": {
"$id": "#/properties/logs_datapath_custom",
"type": "string",
"title": "Logs custom datapath",
"examples": [
"/tmp/cyphernode/logs"
]
},
"tor_datapath": {
"$id": "#/properties/tor_datapath",
"type": "string",
"title": "Tor datapath",
"examples": [
"/tmp/cyphernode/tor"
]
},
"tor_datapath_custom": {
"$id": "#/properties/tor_datapath_custom",
"type": "string",
"title": "Tor custom datapath",
"examples": [
"/tmp/cyphernode/tor"
]
},
"lightning_announce": {
"$id": "#/properties/lightning_announce",
"type": "boolean",
"title": "Announce lightning ip",
"default": false,
"examples": [
false
]
},
"lightning_external_ip": {
"$id": "#/properties/lightning_external_ip",
"type": "string",
"format": "ipv4",
"title": "External lightning node ip",
"examples": [
"123.123.123.123"
]
},
"bitcoin_mode": {
"$id": "#/properties/bitcoin_mode",
"type": "string",
"enum": [
"internal"
],
"title": "Bitcoin mode",
"default": "internal",
"examples": [
"internal"
]
},
"bitcoin_expose": {
"$id": "#/properties/bitcoin_expose",
"type": "boolean",
"title": "Expose bitcoin node",
"default": false,
"examples": [
true
]
},
"lightning_expose": {
"$id": "#/properties/lightning_expose",
"type": "boolean",
"title": "Expose lightning node",
"default": true,
"examples": [
false
]
},
"gatekeeper_expose": {
"$id": "#/properties/gatekeeper_expose",
"type": "boolean",
"title": "Expose gatekeeper port",
"default": false,
"examples": [
true
]
},
"gatekeeper_datapath": {
"$id": "#/properties/gatekeeper_datapath",
"type": "string",
"title": "Gatekeeper datapath",
"examples": [
"/tmp/cyphernode/gatekeeper"
]
},
"gatekeeper_datapath_custom": {
"$id": "#/properties/gatekeeper_datapath_custom",
"type": "string",
"title": "Gatekeeper custom datapath",
"examples": [
"/tmp/cyphernode/gatekeeper"
]
},
"gatekeeper_port": {
"$id": "#/properties/gatekeeper_port",
"type": "integer",
"title": "Gatekeeper port",
"default": 2009,
"examples": [
2009
]
},
"gatekeeper_keys": {
"$id": "#/properties/gatekeeper_keys",
"type": "object",
"title": "Gatekeeper keys",
"default": {
"configEntries": [],
"clientInformation": []
},
"required": [
"configEntries",
"clientInformation"
],
"properties": {
"configEntries": {
"$id": "#/properties/gatekeeper_keys/configEntries",
"type": "array",
"items": {
"$id": "#/properties/gatekeeper_keys/configEntries/entry",
"type": "string",
"pattern": "^kapi_id=\".+\";kapi_key=\".+\";kapi_groups=\".+\";.+$"
},
"examples": [
[
"kapi_id=\"000\";kapi_key=\"a27f9e73fdde6a5005879c259c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a\";kapi_groups=\"stats\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}",
"kapi_id=\"001\";kapi_key=\"a27f9e73fdde6a5005879c273c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a\";kapi_groups=\"stats,watcher\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}",
"kapi_id=\"002\";kapi_key=\"fe58ddbb66d7302a7087af3242a98b6326c51a257f5eab1c06bb8cc02e25890d\";kapi_groups=\"stats,watcher,spender\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}",
"kapi_id=\"003\";kapi_key=\"f0b8bb52f4c7007938757bcdfc73b452d6ce08cc0c660ce57c5464ae95f35417\";kapi_groups=\"stats,watcher,spender,admin\";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}"
]
]
},
"clientInformation": {
"$id": "#/properties/gatekeeper_keys/clientInformation",
"type": "array",
"items": {
"$id": "#/properties/gatekeeper_keys/clientInformation/entry",
"type": "string",
"pattern": "^.+=.+$"
},
"examples": [
[
"000=a27f9e73fdde6a5005879c259c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a",
"001=a27f9e73fdde6a5005879c273c9aea5e8d917eec77bbdfd73272c0af9b4c6b7a",
"002=fe58ddbb66d7302a7087af3242a98b6326c51a257f5eab1c06bb8cc02e25890d",
"003=f0b8bb52f4c7007938757bcdfc73b452d6ce08cc0c660ce57c5464ae95f35417"
]
]
}
}
},
"gatekeeper_sslcert": {
"$id": "#/properties/gatekeeper_sslcert",
"type": "string",
"title": "Gatekeeper SSL Cert"
},
"gatekeeper_sslkey": {
"$id": "#/properties/gatekeeper_sslkey",
"type": "string",
"title": "Gatekeeper SSL Key"
},
"gatekeeper_cns": {
"$id": "#/properties/gatekeeper_cns",
"type": "string",
"title": "Gatekeeper cns",
"examples": [
"myhost.mydomain.com,*.myotherdomain.com,123.123.123.123"
]
},
"gatekeeper_clientkeyspassword": {
"$id": "#/properties/gatekeeper_clientkeyspassword",
"type": "string",
"title": "Password for the encrypted client keys archive"
},
"adminhash": {
"$id": "#/properties/adminhash",
"type": "string",
"title": "Bcrypted hash of admin password"
},
"lightning_implementation": {
"$id": "#/properties/lightning_implementation",
"type": "string",
"enum": [
"c-lightning"
],
"title": "The lightning implementation",
"default": "c-lightning",
"examples": [
"c-lightning"
]
},
"lightning_nodename": {
"$id": "#/properties/lightning_nodename",
"type": "string",
"title": "The lightning node name",
"examples": [
"🚀 Mighty Moose 🚀"
]
},
"lightning_nodecolor": {
"$id": "#/properties/lightning_nodecolor",
"type": "string",
"pattern": "^[0-9A-Fa-f]{6}$",
"title": "The lightning node color",
"examples": [
"ff0000",
"00ff00",
"00ffff"
]
}
}
}

View File

@@ -8,6 +8,7 @@ LIGHTNING_IMPLEMENTATION=<%= lightning_implementation %>
PROXY_DATAPATH=<%= proxy_datapath %> PROXY_DATAPATH=<%= proxy_datapath %>
GATEKEEPER_DATAPATH=<%= gatekeeper_datapath %> GATEKEEPER_DATAPATH=<%= gatekeeper_datapath %>
GATEKEEPER_PORT=<%= gatekeeper_port %> GATEKEEPER_PORT=<%= gatekeeper_port %>
POSTGRES_DATAPATH=<%= postgres_datapath %>
LOGS_DATAPATH=<%= logs_datapath %> LOGS_DATAPATH=<%= logs_datapath %>
TRAEFIK_DATAPATH=<%= traefik_datapath %> TRAEFIK_DATAPATH=<%= traefik_datapath %>
FEATURE_TOR=<%= (features.indexOf('tor') != -1)?'true':'false' %> FEATURE_TOR=<%= (features.indexOf('tor') != -1)?'true':'false' %>

View File

@@ -2,6 +2,46 @@ version: "3"
services: services:
##########################
# POSTGRESQL #
##########################
postgres:
image: postgres:<%= postgres_version %>
user: $USER
entrypoint: sh -c 'rm -f /container_monitor/postgres_ready ; exec docker-entrypoint.sh -c logging_collector=true -c log_directory=/cnlogs/'
environment:
- "POSTGRES_USER=cyphernode"
- "POSTGRES_PASSWORD=<%= postgres_password %>"
- "POSTGRES_DB=cyphernode"
- "PGDATA=/var/lib/postgresql/data/pgdata"
volumes:
- "<%= postgres_datapath %>:/var/lib/postgresql/data"
- "<%= logs_datapath %>:/cnlogs"
- container_monitor:/container_monitor
healthcheck:
test: sh -c 'psql -U cyphernode -c "select 1;" && touch /container_monitor/postgres_ready && chown $USER /container_monitor/postgres_ready || rm -f /container_monitor/postgres_ready'
interval: 30s
timeout: 10s
retries: 10
stop_grace_period: 90s
networks:
- cyphernodenet
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
<% if ( features.indexOf('tor') !== -1 ) { %> <% if ( features.indexOf('tor') !== -1 ) { %>
########################## ##########################
# TOR # # TOR #
@@ -9,14 +49,14 @@ services:
tor: tor:
image: cyphernode/tor:<%= tor_version %> image: cyphernode/tor:<%= tor_version %>
# Sleeping 7 seconds to let lightning and traefik start # Sleeping 10 seconds to let lightning and traefik start
command: $USER sh -c 'rm -f /container_monitor/tor_ready ; sleep 10 ; export HOME=/tor ; tor -f /tor/torrc' command: $USER sh -c 'rm -f /container_monitor/tor_ready ; sleep 10 ; export HOME=/tor ; exec tor -f /tor/torrc'
volumes: volumes:
- "<%= tor_datapath %>:/tor" - "<%= tor_datapath %>:/tor"
- container_monitor:/container_monitor - container_monitor:/container_monitor
healthcheck: healthcheck:
test: chown $USER /container_monitor && su-exec $USER sh -c 'tor-resolve torproject.org && touch /container_monitor/tor_ready && chown $USER /container_monitor/tor_ready || rm -f /container_monitor/tor_ready' test: chown -R $USER /container_monitor && su-exec $USER sh -c 'tor-resolve torproject.org && touch /container_monitor/tor_ready || rm -f /container_monitor/tor_ready'
interval: 20s interval: 30s
timeout: 10s timeout: 10s
retries: 10 retries: 10
networks: networks:
@@ -55,8 +95,8 @@ services:
- "<%= bitcoin_datapath %>/bitcoin-client.conf:/.bitcoin/bitcoin.conf:ro" - "<%= bitcoin_datapath %>/bitcoin-client.conf:/.bitcoin/bitcoin.conf:ro"
- container_monitor:/container_monitor - container_monitor:/container_monitor
healthcheck: healthcheck:
test: chown $USER /container_monitor && su-exec $USER sh -c 'lightning-cli getinfo && touch /container_monitor/lightning_ready && chown $USER /container_monitor/lightning_ready || rm -f /container_monitor/lightning_ready' test: chown -R $USER /container_monitor && su-exec $USER sh -c 'lightning-cli getinfo && touch /container_monitor/lightning_ready || rm -f /container_monitor/lightning_ready'
interval: 20s interval: 30s
timeout: 10s timeout: 10s
retries: 10 retries: 10
stop_grace_period: 30s stop_grace_period: 30s
@@ -89,7 +129,7 @@ services:
bitcoin: bitcoin:
image: cyphernode/bitcoin:<%= bitcoin_version %> image: cyphernode/bitcoin:<%= bitcoin_version %>
command: $USER /.bitcoin/entrypoint.sh command: $USER /.bitcoin/entrypoint.sh
<% if( bitcoin_expose ) { %> <% if( bitcoin_expose ) { %>
ports: ports:
- "<%= (net === 'regtest') ? '18444:18444' : ((net === 'testnet') ? '18333:18333' : '8333:8333') %>" - "<%= (net === 'regtest') ? '18444:18444' : ((net === 'testnet') ? '18333:18333' : '8333:8333') %>"
@@ -99,8 +139,8 @@ services:
- "<%= bitcoin_datapath %>/createWallets.sh:/.bitcoin/createWallets.sh:ro" - "<%= bitcoin_datapath %>/createWallets.sh:/.bitcoin/createWallets.sh:ro"
- container_monitor:/container_monitor - container_monitor:/container_monitor
healthcheck: healthcheck:
test: chown $USER /container_monitor && su-exec $USER sh -c 'bitcoin-cli echo && touch /container_monitor/bitcoin_ready || rm -f /container_monitor/bitcoin_ready' test: chown -R $USER /container_monitor && su-exec $USER sh -c 'bitcoin-cli echo && touch /container_monitor/bitcoin_ready || rm -f /container_monitor/bitcoin_ready'
interval: 20s interval: 30s
timeout: 10s timeout: 10s
retries: 10 retries: 10
stop_grace_period: 30s stop_grace_period: 30s
@@ -156,6 +196,7 @@ services:
- "OTSCLIENT_CONTAINER=otsclient:6666" - "OTSCLIENT_CONTAINER=otsclient:6666"
- "OTS_FILES=/proxy/otsfiles" - "OTS_FILES=/proxy/otsfiles"
- "XPUB_DERIVATION_GAP=100" - "XPUB_DERIVATION_GAP=100"
- "PGPASSFILE=/proxy/db/pgpass"
<% if ( devmode ) { %> <% if ( devmode ) { %>
ports: ports:
- "8888:8888" - "8888:8888"
@@ -172,8 +213,17 @@ services:
<% if ( features.indexOf('tor') !== -1 ) { %> <% if ( features.indexOf('tor') !== -1 ) { %>
- "<%= tor_datapath %>:/proxy/tor" - "<%= tor_datapath %>:/proxy/tor"
<% } %> <% } %>
- container_monitor:/container_monitor
healthcheck:
test: chown -R $USER /container_monitor && su-exec $USER sh -c 'curl localhost:8888/helloworld && touch /container_monitor/proxy_ready || rm -f /container_monitor/proxy_ready'
interval: 30s
timeout: 10s
retries: 10
stop_grace_period: 30s
networks: networks:
- cyphernodenet - cyphernodenet
depends_on:
- postgres
<% if ( docker_mode === 'swarm' ) { %> <% if ( docker_mode === 'swarm' ) { %>
deploy: deploy:
replicas: 1 replicas: 1
@@ -250,6 +300,7 @@ services:
command: $USER ./startnotifier.sh command: $USER ./startnotifier.sh
<% if ( features.indexOf('tor') !== -1 ) { %> <% if ( features.indexOf('tor') !== -1 ) { %>
environment: environment:
- "TRACING=1"
- "TOR_HOST=tor" - "TOR_HOST=tor"
- "TOR_PORT=9050" - "TOR_PORT=9050"
<% } %> <% } %>
@@ -373,6 +424,7 @@ services:
- "<%= gatekeeper_datapath %>/installation.json:/etc/nginx/conf.d/s/stats/installation.json" - "<%= gatekeeper_datapath %>/installation.json:/etc/nginx/conf.d/s/stats/installation.json"
- "<%= gatekeeper_datapath %>/client.7z:/etc/nginx/conf.d/s/stats/client.7z" - "<%= gatekeeper_datapath %>/client.7z:/etc/nginx/conf.d/s/stats/client.7z"
- "<%= gatekeeper_datapath %>/config.7z:/etc/nginx/conf.d/s/stats/config.7z" - "<%= gatekeeper_datapath %>/config.7z:/etc/nginx/conf.d/s/stats/config.7z"
- container_monitor:/container_monitor
networks: networks:
- cyphernodenet - cyphernodenet
- cyphernodeappsnet - cyphernodeappsnet

View File

@@ -1,4 +1,4 @@
#!/bin/sh #!/bin/bash
. ./.cyphernodeconf/installer/config.sh . ./.cyphernodeconf/installer/config.sh
@@ -60,6 +60,9 @@ export USER=$(id -u <%= default_username %>):$(id -g <%= default_username %>)
current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)" current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)"
# Let's make sure the container readyness files are deleted before starting the stack
docker run --rm -v cyphernode_container_monitor:/container_monitor alpine sh -c 'rm -f /container_monitor/*_ready'
<% if (docker_mode == 'swarm') { %> <% if (docker_mode == 'swarm') { %>
docker stack deploy -c $current_path/docker-compose.yaml cyphernode docker stack deploy -c $current_path/docker-compose.yaml cyphernode
<% } else if(docker_mode == 'compose') { %> <% } else if(docker_mode == 'compose') { %>
@@ -68,11 +71,13 @@ docker-compose -f $current_path/docker-compose.yaml up -d --remove-orphans
start_apps start_apps
export ARCH=$(uname -m) printf "\r\nDetermining the speed of your machine..."
case "${ARCH}" in arm*) speedseconds=$(bash -c ' : {1..500000} ; echo $SECONDS')
printf "\r\n\033[1;31mSince we're on a slow RPi, let's give Docker 60 more seconds before performing our tests...\033[0m\r\n\r\n" if [ "${speedseconds}" -gt "2" ]; then
printf "\r\n\033[1;31mSince we're on a slow computer, let's give Docker 60 more seconds before performing our tests...\033[0m\r\n\r\n"
sleep 60 sleep 60
;; else
esac printf " It's pretty fast!\r\n"
fi
. ./testdeployment.sh . ./testdeployment.sh

View File

@@ -1,4 +1,4 @@
#!/bin/sh #!/bin/bash
current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)" current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)"

View File

@@ -1,4 +1,4 @@
#!/bin/sh #!/bin/bash
. ./.cyphernodeconf/installer/config.sh . ./.cyphernodeconf/installer/config.sh
@@ -65,14 +65,21 @@ if [ -f $current_path/exitStatus.sh ]; then
rm -f $current_path/exitStatus.sh rm -f $current_path/exitStatus.sh
fi fi
test_apps if [ "$EXIT_STATUS" -ne "0" ]; then
printf "\r\n\033[1;31mSkipping cypherapps deployment because of previous errors.\r\n\r\n\033[0m"
else
test_apps
fi
EXIT_STATUS=$(($? | ${EXIT_STATUS})) EXIT_STATUS=$(($? | ${EXIT_STATUS}))
printf "\r\n\e[1;32mTests finished.\e[0m\n" printf "\r\n\e[1;32mTests finished.\e[0m\n"
if [ "$EXIT_STATUS" -ne "0" ]; then if [ "$EXIT_STATUS" -ne "0" ]; then
printf "\r\n\033[1;31mThere was an error during cyphernode installation. full logs: docker ps -q | xargs -L 1 docker logs , Containers logs: docker logs <containerid> , list containers: docker ps .Please see Docker's logs for more information. Run ./testdeployment.sh to rerun the tests. Run ./stop.sh to stop cyphernode.\r\n\r\n\033[0m" printf "\r\n\033[1;31mThere was an error during cyphernode installation.\r\n\033[0m"
printf "\r\n\033[1;31mCheck logs in your logs directory (${LOGS_DATAPATH}).\r\n\033[0m"
printf "\r\n\033[1;31mRun ./testdeployment.sh to rerun the tests.\033[0m"
printf "\r\n\033[1;31mRun ./stop.sh to stop cyphernode.\r\n\033[0m"
exit 1 exit 1
fi fi

View File

@@ -1,6 +1,6 @@
#!/bin/sh #!/bin/sh
apk add --update --no-cache openssl curl jq coreutils > /dev/null apk add --update --no-cache openssl curl jq coreutils postgresql > /dev/null
. /gatekeeper/keys.properties . /gatekeeper/keys.properties
@@ -72,7 +72,7 @@ checkpycoin() {
echo -en "\r\n\e[1;36mTesting Pycoin... " > /dev/console echo -en "\r\n\e[1;36mTesting Pycoin... " > /dev/console
local rc local rc
rc=$(curl -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/25-30\"}" -s -o /dev/null -w "%{http_code}" http://proxy:8888/derivepubpath) rc=$(curl -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/25-30\"}" -s -o /dev/null -w "%{http_code}" http://pycoin:7777/derive)
[ "${rc}" -ne "200" ] && return 100 [ "${rc}" -ne "200" ] && return 100
echo -e "\e[1;36mPycoin rocks!" > /dev/console echo -e "\e[1;36mPycoin rocks!" > /dev/console
@@ -80,6 +80,18 @@ checkpycoin() {
return 0 return 0
} }
checkpostgres() {
echo -en "\r\n\e[1;36mTesting Postgres... " > /dev/console
local rc
pg_isready -h postgres -U cyphernode
[ "${?}" -ne "0" ] && return 105
echo -e "\e[1;36mPostgres rocks!" > /dev/console
return 0
}
checkbroker() { checkbroker() {
echo -en "\r\n\e[1;36mTesting Broker... " > /dev/console echo -en "\r\n\e[1;36mTesting Broker... " > /dev/console
local rc local rc
@@ -97,7 +109,8 @@ checknotifier() {
local response local response
local returncode local returncode
response=$(mosquitto_rr -h broker -W 15 -t notifier -e "response/$$" -m "{\"response-topic\":\"response/$$\",\"cmd\":\"web\",\"url\":\"http://proxy:8888/helloworld\",\"tor\":false}") nc -vlp1111 -e sh -c 'echo -en "HTTP/1.1 200 OK\\r\\n\\r\\n" ; date >&2 ; timeout 1 tee /dev/tty | cat ; ' &
response=$(mosquitto_rr -h broker -W 15 -t notifier -e "response/$$" -m "{\"response-topic\":\"response/$$\",\"cmd\":\"web\",\"url\":\"http://$(hostname):1111/notifiertest\",\"tor\":false}")
returncode=$? returncode=$?
[ "${returncode}" -ne "0" ] && return 115 [ "${returncode}" -ne "0" ] && return 115
http_code=$(echo "${response}" | jq -r ".http_code") http_code=$(echo "${response}" | jq -r ".http_code")
@@ -112,7 +125,8 @@ checkots() {
echo -en "\r\n\e[1;36mTesting OTSclient... " > /dev/console echo -en "\r\n\e[1;36mTesting OTSclient... " > /dev/console
local rc local rc
rc=$(curl -s -H "Content-Type: application/json" -d '{"hash":"123","callbackUrl":"http://callback"}' http://proxy:8888/ots_stamp) # rc=$(curl -s -H "Content-Type: application/json" -d '{"hash":"123","callbackUrl":"http://callback"}' http://proxy:8888/ots_stamp)
rc=$(curl -s otsclient:6666/stamp/123)
echo "${rc}" | grep "Invalid hash 123 for sha256" > /dev/null echo "${rc}" | grep "Invalid hash 123 for sha256" > /dev/null
[ "$?" -ne "0" ] && return 200 [ "$?" -ne "0" ] && return 200
@@ -170,12 +184,12 @@ checkservice() {
while : while :
do do
outcome=0 outcome=0
for container in gatekeeper proxy proxycron broker notifier pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do for container in gatekeeper proxy proxycron broker notifier pycoin postgres <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
echo -e " \e[0;32mVerifying \e[0;33m${container}\e[0;32m..." > /dev/console echo -e " \e[0;32mVerifying \e[0;33m${container}\e[0;32m..." > /dev/console
(ping -c 10 ${container} 2> /dev/null | grep "0% packet loss" > /dev/null) & (ping -c 10 ${container} 2> /dev/null | grep "0% packet loss" > /dev/null) &
eval ${container}=$! eval ${container}=$!
done done
for container in gatekeeper proxy proxycron broker notifier pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do for container in gatekeeper proxy proxycron broker notifier pycoin postgres <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
eval wait '$'${container} ; returncode=$? ; outcome=$((${outcome} + ${returncode})) eval wait '$'${container} ; returncode=$? ; outcome=$((${outcome} + ${returncode}))
eval c_${container}=${returncode} eval c_${container}=${returncode}
done done
@@ -193,12 +207,13 @@ checkservice() {
# { "name": "proxy", "active":true }, # { "name": "proxy", "active":true },
# { "name": "proxycron", "active":true }, # { "name": "proxycron", "active":true },
# { "name": "pycoin", "active":true }, # { "name": "pycoin", "active":true },
# { "name": "postgres", "active":true },
# { "name": "otsclient", "active":true }, # { "name": "otsclient", "active":true },
# { "name": "tor", "active":true }, # { "name": "tor", "active":true },
# { "name": "bitcoin", "active":true }, # { "name": "bitcoin", "active":true },
# { "name": "lightning", "active":true }, # { "name": "lightning", "active":true },
# ] # ]
for container in gatekeeper proxy proxycron broker notifier pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do for container in gatekeeper proxy proxycron broker notifier pycoin postgres <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %><%= (features.indexOf('tor') != -1)?'tor ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
[ -n "${result}" ] && result="${result}," [ -n "${result}" ] && result="${result},"
result="${result}{\"name\":\"${container}\",\"active\":" result="${result}{\"name\":\"${container}\",\"active\":"
eval "returncode=\$c_${container}" eval "returncode=\$c_${container}"
@@ -218,7 +233,7 @@ checkservice() {
timeout_feature() { timeout_feature() {
local interval=15 local interval=15
local totaltime=120 local totaltime=${2:-120}
local testwhat=${1} local testwhat=${1}
local returncode local returncode
local endtime=$(($(date +%s) + ${totaltime})) local endtime=$(($(date +%s) + ${totaltime}))
@@ -254,6 +269,7 @@ feature_status() {
# { "name": "proxy", "active":true }, # { "name": "proxy", "active":true },
# { "name": "proxycron", "active":true }, # { "name": "proxycron", "active":true },
# { "name": "pycoin", "active":true }, # { "name": "pycoin", "active":true },
# { "name": "postgres", "active":true },
# { "name": "otsclient", "active":true }, # { "name": "otsclient", "active":true },
# { "name": "tor", "active":true }, # { "name": "tor", "active":true },
# { "name": "bitcoin", "active":true }, # { "name": "bitcoin", "active":true },
@@ -262,6 +278,7 @@ feature_status() {
# "features": [ # "features": [
# { "name": "gatekeeper", "working":true }, # { "name": "gatekeeper", "working":true },
# { "name": "pycoin", "working":true }, # { "name": "pycoin", "working":true },
# { "name": "postgres", "working":true },
# { "name": "otsclient", "working":true }, # { "name": "otsclient", "working":true },
# { "name": "tor", "working":true }, # { "name": "tor", "working":true },
# { "name": "bitcoin", "working":true }, # { "name": "bitcoin", "working":true },
@@ -285,7 +302,7 @@ if [ "${returncode}" -ne "0" ]; then
echo -e "\e[1;31mCyphernode could not fully start properly within delay." > /dev/console echo -e "\e[1;31mCyphernode could not fully start properly within delay." > /dev/console
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"proxy\") | .active") status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"proxy\") | .active")
if [ "${status}" = "false" ]; then if [ "${status}" = "false" ]; then
echo -e "\e[1;31mThe Proxy, the main Cyphernode's component, is not responding. We will only test the gatekeeper if its container is up, but you'll see errors for the other components. Please check the logs." > /dev/console echo -e "\r\n\e[1;31mThe Proxy, the main Cyphernode's component, is not responding. You'll see errors for the other components. Please check the logs." > /dev/console
workingproxy="false" workingproxy="false"
fi fi
else else
@@ -296,19 +313,60 @@ fi
# "features": [ # "features": [
# { "name": "gatekeeper", "working":true }, # { "name": "gatekeeper", "working":true },
# { "name": "pycoin", "working":true }, # { "name": "pycoin", "working":true },
# { "name": "postgres", "working":true },
# { "name": "otsclient", "working":true }, # { "name": "otsclient", "working":true },
# { "name": "tor", "working":true }, # { "name": "tor", "working":true },
# { "name": "bitcoin", "working":true }, # { "name": "bitcoin", "working":true },
# { "name": "lightning", "working":true }, # { "name": "lightning", "working":true },
# ] # ]
#############################
# PROXY #
#############################
if [ ! -f /container_monitor/proxy_dbfailed ]; then
echo -e "\r\n\e[1;36mWaiting for Proxy to be ready... " > /dev/console
timeout_feature '[ -f "/container_monitor/proxy_ready" ]' 300
returncode=$?
if [ "${returncode}" -ne "0" ]; then
echo -e "\r\n\e[1;31mThe proxy is still not ready. It may be migrating large quantity of data? Please check the logs for more details." > /dev/console
workingproxy="false"
fi
fi
if [ -f /container_monitor/proxy_dbfailed ]; then
echo -e "\r\n\e[1;31mThe proxy's database migration failed. Please check proxy.log for more details." > /dev/console
workingproxy="false"
fi
if [ "${workingproxy}" = "false" ]; then
echo -e "\r\n\e[1;31mThe Proxy, the main Cyphernode's component, is not ready. Cyphernode can't be run without the proxy component." > /dev/console
echo -e "\r\n\e[1;31mThe other components will fail next, this is normal." > /dev/console
fi
result="${containers},\"features\":[{\"coreFeature\":true,\"name\":\"proxy\",\"working\":${workingproxy}}"
#############################
# POSTGRES #
#############################
result="${result},{\"coreFeature\":true,\"name\":\"postgres\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"postgres\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
timeout_feature checkpostgres
returncode=$?
else
returncode=1
fi
finalreturncode=$((${returncode} | ${finalreturncode}))
result="${result}$(feature_status ${returncode} 'Postgres error!')}"
############################# #############################
# GATEKEEPER # # GATEKEEPER #
############################# #############################
result="${containers},\"features\":[{\"coreFeature\":true, \"name\":\"proxy\",\"working\":${workingproxy}}, {\"coreFeature\":true, \"name\":\"gatekeeper\",\"working\":" result="${result},{\"coreFeature\":true,\"name\":\"gatekeeper\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"gatekeeper\") | .active") status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"gatekeeper\") | .active")
if [ "${status}" = "true" ]; then if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
timeout_feature checkgatekeeper timeout_feature checkgatekeeper
returncode=$? returncode=$?
else else
@@ -321,7 +379,7 @@ result="${result}$(feature_status ${returncode} 'Gatekeeper error!')}"
# BROKER # # BROKER #
############################# #############################
result="${result},{\"coreFeature\":true, \"name\":\"broker\",\"working\":" result="${result},{\"coreFeature\":true,\"name\":\"broker\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"broker\") | .active") status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"broker\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
timeout_feature checkbroker timeout_feature checkbroker
@@ -336,7 +394,7 @@ result="${result}$(feature_status ${returncode} 'Broker error!')}"
# NOTIFIER # # NOTIFIER #
############################# #############################
result="${result},{\"coreFeature\":true, \"name\":\"notifier\",\"working\":" result="${result},{\"coreFeature\":true,\"name\":\"notifier\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"notifier\") | .active") status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"notifier\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
timeout_feature checknotifier timeout_feature checknotifier
@@ -351,7 +409,7 @@ result="${result}$(feature_status ${returncode} 'Notifier error!')}"
# PYCOIN # # PYCOIN #
############################# #############################
result="${result},{\"coreFeature\":true, \"name\":\"pycoin\",\"working\":" result="${result},{\"coreFeature\":true,\"name\":\"pycoin\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"pycoin\") | .active") status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"pycoin\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
timeout_feature checkpycoin timeout_feature checkpycoin
@@ -367,7 +425,7 @@ result="${result}$(feature_status ${returncode} 'Pycoin error!')}"
# OTSCLIENT # # OTSCLIENT #
############################# #############################
result="${result},{\"coreFeature\":false, \"name\":\"otsclient\",\"working\":" result="${result},{\"coreFeature\":false,\"name\":\"otsclient\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"otsclient\") | .active") status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"otsclient\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
timeout_feature checkots timeout_feature checkots
@@ -384,12 +442,11 @@ result="${result}$(feature_status ${returncode} 'OTSclient error!')}"
# TOR # # TOR #
############################# #############################
echo -e "\r\n\e[1;36mWaiting for Tor to be ready... " > /dev/console result="${result},{\"coreFeature\":false,\"name\":\"tor\",\"working\":"
timeout_feature '[ -f "/container_monitor/tor_ready" ]'
result="${result},{\"coreFeature\":false, \"name\":\"tor\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"tor\") | .active") status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"tor\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
echo -e "\r\n\e[1;36mWaiting for Tor to be ready... " > /dev/console
timeout_feature '[ -f "/container_monitor/tor_ready" ]'
timeout_feature checktor timeout_feature checktor
returncode=$? returncode=$?
else else
@@ -403,12 +460,11 @@ result="${result}$(feature_status ${returncode} 'Tor error!')}"
# BITCOIN # # BITCOIN #
############################# #############################
echo -e "\r\n\e[1;36mWaiting for Bitcoin Core to be ready... " > /dev/console result="${result},{\"coreFeature\":true,\"name\":\"bitcoin\",\"working\":"
timeout_feature '[ -f "/container_monitor/bitcoin_ready" ]'
result="${result},{\"coreFeature\":true, \"name\":\"bitcoin\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"bitcoin\") | .active") status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"bitcoin\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
echo -e "\r\n\e[1;36mWaiting for Bitcoin Core to be ready... " > /dev/console
timeout_feature '[ -f "/container_monitor/bitcoin_ready" ]'
timeout_feature checkbitcoinnode timeout_feature checkbitcoinnode
returncode=$? returncode=$?
else else
@@ -422,12 +478,11 @@ result="${result}$(feature_status ${returncode} 'Bitcoin error!')}"
# LIGHTNING # # LIGHTNING #
############################# #############################
echo -e "\r\n\e[1;36mWaiting for C-Lightning to be ready... " > /dev/console result="${result},{\"coreFeature\":false,\"name\":\"lightning\",\"working\":"
timeout_feature '[ -f "/container_monitor/lightning_ready" ]'
result="${result},{\"coreFeature\":false, \"name\":\"lightning\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"lightning\") | .active") status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"lightning\") | .active")
if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then if [[ "${workingproxy}" = "true" && "${status}" = "true" ]]; then
echo -e "\r\n\e[1;36mWaiting for C-Lightning to be ready... " > /dev/console
timeout_feature '[ -f "/container_monitor/lightning_ready" ]'
timeout_feature checklnnode timeout_feature checklnnode
returncode=$? returncode=$?
else else
@@ -438,6 +493,8 @@ result="${result}$(feature_status ${returncode} 'Lightning error!')}"
<% } %> <% } %>
#############################
result="{${result}]}" result="{${result}]}"
echo "${result}" > /gatekeeper/installation.json echo "${result}" > /gatekeeper/installation.json

View File

@@ -0,0 +1 @@
postgres:5432:cyphernode:cyphernode:<%= postgres_password %>

82
dist/setup.sh vendored
View File

@@ -1,17 +1,34 @@
#!/bin/bash #!/bin/bash
### Execute this on a freshly install ubuntu luna node # This is where everything is configured.
# curl -fsSL get.docker.com -o get-docker.sh
# sh get-docker.sh
# sudo usermod -aG docker $USER
## logout and relogin
# git clone --branch features/install --recursive https://github.com/schulterklopfer/cyphernode.git
# sudo curl -L "https://github.com/docker/compose/releases/download/1.22.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
# sudo chmod +x /usr/local/bin/docker-compose
# cd cyphernode
# ./setup.sh -ci
# docker-compose -f docker-compose.yaml up [-d]
# To determine speed of machine...
#
# bash -c ' : {1..500000} ; echo $SECONDS'
#
# MBP M1: 0
# MBP Intel: 0
# x86_64 avg machine: 0
# RockPi Debian 64-bits: 1
# RPi4 RaspiOS 64-bits: 1
# RPi3 RaspiOS 32-bits: 4
# RPi2 RaspiOS 32-bits: 7
#
# Let's say if timer > 2, we're on a slow machine.
# At first we tried using uname -m te determine slow devices, but:
#
# uname -m result:
# RPi2: armv7l
# RPi3: armv7l
# RPi4 on 32-bit OS: armv7l
# RPi4 on 64-bit OS: aarch64
# RockPi: aarch64
# Apple M1: arm64
# Intel 64: x86_64#
#
# There are a ton of other possible values... and can't rely on them to detect
# a slow device.
# FROM: https://stackoverflow.com/questions/5195607/checking-bash-exit-status-of-several-commands-efficiently # FROM: https://stackoverflow.com/questions/5195607/checking-bash-exit-status-of-several-commands-efficiently
# Use step(), try(), and next() to perform a series of commands and print # Use step(), try(), and next() to perform a series of commands and print
@@ -110,7 +127,7 @@ sudo_if_required() {
} }
modify_permissions() { modify_permissions() {
local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
for d in "${directories[@]}" for d in "${directories[@]}"
do do
if [[ -e $d ]]; then if [[ -e $d ]]; then
@@ -122,7 +139,7 @@ modify_permissions() {
} }
modify_owner() { modify_owner() {
local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER) local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER)
for d in "${directories[@]}" for d in "${directories[@]}"
do do
@@ -142,9 +159,6 @@ configure() {
recreate=" recreate" recreate=" recreate"
fi fi
local arch=$(uname -m)
local pw_env='' local pw_env=''
local interactive='' local interactive=''
local gen_options='' local gen_options=''
@@ -157,11 +171,12 @@ configure() {
pw_env=" -e CFG_PASSWORD=$CFG_PASSWORD" pw_env=" -e CFG_PASSWORD=$CFG_PASSWORD"
fi fi
echo "\nDetermining the speed of your machine..."
if [[ $arch =~ ^arm ]]; then local speedseconds=$(bash -c ' : {1..500000} ; echo $SECONDS')
clear && echo "Thinking. This may take a while, since I'm a Raspberry PI and my brain is so tiny. :(" if [[ $speedseconds > 2 ]]; then
clear && echo "This may take a while, since it seems we're running on a slow machine."
else else
clear && echo "Thinking..." clear && echo "Fast machine..."
fi fi
# before starting a new cyphernodeconf, kill all the others # before starting a new cyphernodeconf, kill all the others
@@ -193,6 +208,7 @@ configure() {
-e PROXYCRON_VERSION=$PROXYCRON_VERSION \ -e PROXYCRON_VERSION=$PROXYCRON_VERSION \
-e OTSCLIENT_VERSION=$OTSCLIENT_VERSION \ -e OTSCLIENT_VERSION=$OTSCLIENT_VERSION \
-e PYCOIN_VERSION=$PYCOIN_VERSION \ -e PYCOIN_VERSION=$PYCOIN_VERSION \
-e POSTGRES_VERSION=$POSTGRES_VERSION \
-e BITCOIN_VERSION=$BITCOIN_VERSION \ -e BITCOIN_VERSION=$BITCOIN_VERSION \
-e LIGHTNING_VERSION=$LIGHTNING_VERSION \ -e LIGHTNING_VERSION=$LIGHTNING_VERSION \
-e CONF_VERSION=$CONF_VERSION \ -e CONF_VERSION=$CONF_VERSION \
@@ -348,14 +364,6 @@ compare_bitcoinconf() {
} }
install_docker() { install_docker() {
local archpath=$(uname -m)
# compat mode for SatoshiPortal repo
# TODO: add more mappings?
if [[ $archpath == 'armv7l' ]]; then
archpath="rpi"
fi
if [ ! -d $GATEKEEPER_DATAPATH ]; then if [ ! -d $GATEKEEPER_DATAPATH ]; then
step " create $GATEKEEPER_DATAPATH" step " create $GATEKEEPER_DATAPATH"
sudo_if_required mkdir -p $GATEKEEPER_DATAPATH sudo_if_required mkdir -p $GATEKEEPER_DATAPATH
@@ -385,6 +393,13 @@ install_docker() {
copy_file $cyphernodeconf_filepath/traefik/htpasswd $GATEKEEPER_DATAPATH/htpasswd 1 $SUDO_REQUIRED copy_file $cyphernodeconf_filepath/traefik/htpasswd $GATEKEEPER_DATAPATH/htpasswd 1 $SUDO_REQUIRED
if [ ! -d $POSTGRES_DATAPATH ]; then
step " create $POSTGRES_DATAPATH"
sudo_if_required mkdir -p $POSTGRES_DATAPATH/pgdata
next
fi
if [ ! -d $LOGS_DATAPATH ]; then if [ ! -d $LOGS_DATAPATH ]; then
step " create $LOGS_DATAPATH" step " create $LOGS_DATAPATH"
sudo_if_required mkdir -p $LOGS_DATAPATH sudo_if_required mkdir -p $LOGS_DATAPATH
@@ -461,6 +476,8 @@ install_docker() {
copy_file $cyphernodeconf_filepath/installer/config.sh $PROXY_DATAPATH/config.sh 1 $SUDO_REQUIRED copy_file $cyphernodeconf_filepath/installer/config.sh $PROXY_DATAPATH/config.sh 1 $SUDO_REQUIRED
copy_file $cyphernodeconf_filepath/cyphernode/info.json $PROXY_DATAPATH/info.json 1 $SUDO_REQUIRED copy_file $cyphernodeconf_filepath/cyphernode/info.json $PROXY_DATAPATH/info.json 1 $SUDO_REQUIRED
copy_file $cyphernodeconf_filepath/postgres/pgpass $PROXY_DATAPATH/pgpass 1 $SUDO_REQUIRED
sudo_if_required chmod 0600 $PROXY_DATAPATH/pgpass
if [[ $BITCOIN_INTERNAL == true ]]; then if [[ $BITCOIN_INTERNAL == true ]]; then
if [ ! -d $BITCOIN_DATAPATH ]; then if [ ! -d $BITCOIN_DATAPATH ]; then
@@ -652,7 +669,7 @@ install_docker() {
check_directory_owner() { check_directory_owner() {
# if one directory does not have access rights for $RUN_AS_USER, we echo 1, else we echo 0 # if one directory does not have access rights for $RUN_AS_USER, we echo 1, else we echo 0
local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local status=0 local status=0
for d in "${directories[@]}" for d in "${directories[@]}"
do do
@@ -756,7 +773,7 @@ sanity_checks_pre_install() {
if [[ $sudo_reason == 'directories' ]]; then if [[ $sudo_reason == 'directories' ]]; then
echo " or check your data volumes if they have the right owner." echo " or check your data volumes if they have the right owner."
echo " The owner of the following folders should be '$RUN_AS_USER':" echo " The owner of the following folders should be '$RUN_AS_USER':"
local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local status=0 local status=0
for d in "${directories[@]}" for d in "${directories[@]}"
do do
@@ -855,10 +872,11 @@ PROXYCRON_VERSION="v0.7.0-dev"
OTSCLIENT_VERSION="v0.7.0-dev" OTSCLIENT_VERSION="v0.7.0-dev"
PYCOIN_VERSION="v0.7.0-dev" PYCOIN_VERSION="v0.7.0-dev"
CYPHERAPPS_VERSION="dev" CYPHERAPPS_VERSION="dev"
BITCOIN_VERSION="v0.21.1" BITCOIN_VERSION="v22.0"
LIGHTNING_VERSION="v0.10.1" LIGHTNING_VERSION="v0.10.2"
TRAEFIK_VERSION="v1.7.9-alpine" TRAEFIK_VERSION="v1.7.9-alpine"
MOSQUITTO_VERSION="1.6-openssl" MOSQUITTO_VERSION="1.6-openssl"
POSTGRES_VERSION="14.0-bullseye"
SETUP_DIR=$(dirname $(realpath $0)) SETUP_DIR=$(dirname $(realpath $0))

View File

@@ -224,8 +224,6 @@ paths:
- "pub32" - "pub32"
- "path" - "path"
- "nstart" - "nstart"
- "unconfirmedCallbackURL"
- "confirmedCallbackURL"
properties: properties:
label: label:
description: "Label for that xpub. Can be used, instead for xpub, for future references in xpub-related endpoints." description: "Label for that xpub. Can be used, instead for xpub, for future references in xpub-related endpoints."

View File

@@ -34,8 +34,7 @@ main() {
done done
} }
export TRACING=1
main main
returncode=$?
trace "[requesthandler] exiting" trace "[requesthandler] exiting"
exit $? exit ${returncode}

View File

@@ -2,4 +2,6 @@
. ./trace.sh . ./trace.sh
mosquitto_sub -h broker -t notifier | ./requesthandler.sh trace "Starting mosquitto and subscribing to the notifier topic..."
exec sh -c 'mosquitto_sub -h broker -t notifier | ./requesthandler.sh'

View File

@@ -64,41 +64,43 @@ main()
# GET http://192.168.111.152:8080/stamp/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7 # GET http://192.168.111.152:8080/stamp/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
response=$(stamp $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)) response=$(stamp $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
upgrade) upgrade)
# GET http://192.168.111.152:8080/upgrade/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7 # GET http://192.168.111.152:8080/upgrade/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
response=$(upgrade $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)) response=$(upgrade $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
verify) verify)
# POST http://192.168.111.152:8080/verify # POST http://192.168.111.152:8080/verify
# BODY {"hash":"1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7","base64otsfile":"AE9wZW5UaW1lc3RhbXBzAABQcm9vZ...gABYiWDXPXGQEDxNch"} # BODY {"hash":"1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7","base64otsfile":"AE9wZW5UaW1lc3RhbXBzAABQcm9vZ...gABYiWDXPXGQEDxNch"}
response=$(verify "${line}") response=$(verify "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
info) info)
# POST http://192.168.111.152:8080/info # POST http://192.168.111.152:8080/info
# BODY {"base64otsfile":"AE9wZW5UaW1lc3RhbXBzAABQcm9vZ...gABYiWDXPXGQEDxNch"} # BODY {"base64otsfile":"AE9wZW5UaW1lc3RhbXBzAABQcm9vZ...gABYiWDXPXGQEDxNch"}
response=$(info "${line}") response=$(info "${line}")
response_to_client "${response}" ${?} returncode=$?
break ;;
*)
response='{"error": {"code": -32601, "message": "Method not found"}, "id": "1"}'
returncode=1
;; ;;
esac esac
response=$(echo "${response}" | jq -Mc)
response_to_client "${response}" ${returncode}
break break
fi fi
done done
trace "[main] exiting" trace "[main] exiting"
return 0 return ${returncode}
} }
export TRACING
main main
exit $? returncode=$?
trace "[requesthandler] exiting"
exit ${returncode}

View File

@@ -1,6 +1,3 @@
#!/bin/sh #!/bin/sh
export TRACING exec nc -vlkp${OTSCLIENT_LISTENING_PORT} -e ./requesthandler.sh
export OTSCLIENT_LISTENING_PORT
nc -vlkp${OTSCLIENT_LISTENING_PORT} -e ./requesthandler.sh

View File

@@ -8,18 +8,18 @@ RUN apk add --update --no-cache \
curl \ curl \
su-exec \ su-exec \
py3-pip \ py3-pip \
xxd xxd \
postgresql
WORKDIR ${HOME} WORKDIR ${HOME}
COPY app/data/* ./ COPY app/data/* ./
COPY app/script/* ./ COPY app/script/* ./
COPY app/tests/* ./tests/ COPY app/tests/* ./tests/
COPY --from=cyphernode/clightning:v0.10.1 /usr/local/bin/lightning-cli ./ COPY --from=cyphernode/clightning:v0.10.2 /usr/local/bin/lightning-cli ./
COPY --from=eclipse-mosquitto:1.6-openssl /usr/bin/mosquitto_rr /usr/bin/mosquitto_sub /usr/bin/mosquitto_pub /usr/bin/ COPY --from=eclipse-mosquitto:1.6-openssl /usr/bin/mosquitto_rr /usr/bin/mosquitto_sub /usr/bin/mosquitto_pub /usr/bin/
COPY --from=eclipse-mosquitto:1.6-openssl /usr/lib/libmosquitto* /usr/lib/ COPY --from=eclipse-mosquitto:1.6-openssl /usr/lib/libmosquitto* /usr/lib/
COPY --from=eclipse-mosquitto:1.6-openssl /usr/lib/libcrypto* /usr/lib/ COPY --from=eclipse-mosquitto:1.6-openssl /lib/ld-musl-* /lib/
COPY --from=eclipse-mosquitto:1.6-openssl /usr/lib/libssl* /usr/lib/
RUN chmod +x startproxy.sh requesthandler.sh lightning-cli sqlmigrate*.sh waitanyinvoice.sh tests/* \ RUN chmod +x startproxy.sh requesthandler.sh lightning-cli sqlmigrate*.sh waitanyinvoice.sh tests/* \
&& chmod o+w . \ && chmod o+w . \

View File

@@ -0,0 +1,166 @@
BEGIN;
CREATE TABLE watching_by_pub32 (
id SERIAL PRIMARY KEY,
pub32 VARCHAR UNIQUE,
label VARCHAR UNIQUE,
derivation_path VARCHAR,
callback0conf VARCHAR,
callback1conf VARCHAR,
last_imported_n INTEGER,
watching BOOLEAN DEFAULT FALSE,
inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE watching (
id SERIAL PRIMARY KEY,
address VARCHAR,
label VARCHAR,
watching BOOLEAN DEFAULT FALSE,
callback0conf VARCHAR,
calledback0conf BOOLEAN DEFAULT FALSE,
callback1conf VARCHAR,
calledback1conf BOOLEAN DEFAULT FALSE,
imported BOOLEAN DEFAULT FALSE,
watching_by_pub32_id INTEGER REFERENCES watching_by_pub32,
pub32_index INTEGER,
event_message VARCHAR,
inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_watching_address ON watching (address);
CREATE UNIQUE INDEX idx_watching_01 ON watching (address, COALESCE(callback0conf, ''), COALESCE(callback1conf, ''));
CREATE INDEX idx_watching_label ON watching (label);
CREATE INDEX idx_watching_watching ON watching (watching);
CREATE INDEX idx_watching_imported ON watching (imported);
CREATE INDEX idx_watching_watching_by_pub32_id ON watching (watching_by_pub32_id);
CREATE TABLE tx (
id SERIAL PRIMARY KEY,
txid VARCHAR UNIQUE,
hash VARCHAR UNIQUE,
confirmations INTEGER DEFAULT 0,
timereceived BIGINT,
fee REAL,
size INTEGER,
vsize INTEGER,
is_replaceable BOOLEAN,
blockhash VARCHAR,
blockheight INTEGER,
blocktime BIGINT,
conf_target SMALLINT,
inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_tx_timereceived ON tx (timereceived);
CREATE INDEX idx_tx_fee ON tx (fee);
CREATE INDEX idx_tx_size ON tx (size);
CREATE INDEX idx_tx_vsize ON tx (vsize);
CREATE INDEX idx_tx_blockhash ON tx (blockhash);
CREATE INDEX idx_tx_blockheight ON tx (blockheight);
CREATE INDEX idx_tx_blocktime ON tx (blocktime);
CREATE INDEX idx_tx_confirmations ON tx (confirmations);
CREATE TABLE watching_tx (
watching_id INTEGER REFERENCES watching,
tx_id INTEGER REFERENCES tx,
vout INTEGER,
amount REAL
);
CREATE UNIQUE INDEX idx_watching_tx ON watching_tx (watching_id, tx_id);
CREATE INDEX idx_watching_tx_watching_id ON watching_tx (watching_id);
CREATE INDEX idx_watching_tx_tx_id ON watching_tx (tx_id);
CREATE TABLE batcher (
id SERIAL PRIMARY KEY,
label VARCHAR UNIQUE,
conf_target SMALLINT,
feerate REAL,
inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
INSERT INTO batcher (id, label, conf_target, feerate) VALUES (1, 'default', 6, NULL);
SELECT SETVAL('batcher_id_seq', 1);
CREATE TABLE recipient (
id SERIAL PRIMARY KEY,
address VARCHAR,
amount REAL,
tx_id INTEGER REFERENCES tx,
inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
webhook_url VARCHAR,
calledback BOOLEAN DEFAULT FALSE,
calledback_ts TIMESTAMP,
batcher_id INTEGER REFERENCES batcher,
label VARCHAR
);
CREATE INDEX idx_recipient_address ON recipient (address);
CREATE INDEX idx_recipient_label ON recipient (label);
CREATE INDEX idx_recipient_calledback ON recipient (calledback);
CREATE INDEX idx_recipient_webhook_url ON recipient (webhook_url);
CREATE INDEX idx_recipient_tx_id ON recipient (tx_id);
CREATE INDEX idx_recipient_batcher_id ON recipient (batcher_id);
CREATE TABLE watching_by_txid (
id SERIAL PRIMARY KEY,
txid VARCHAR,
watching BOOLEAN DEFAULT FALSE,
callback1conf VARCHAR,
calledback1conf BOOLEAN DEFAULT FALSE,
callbackxconf VARCHAR,
calledbackxconf BOOLEAN DEFAULT FALSE,
nbxconf INTEGER,
inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_watching_by_txid_txid ON watching_by_txid (txid);
CREATE UNIQUE INDEX idx_watching_by_txid_1x ON watching_by_txid (txid, COALESCE(callback1conf, ''), COALESCE(callbackxconf, ''));
CREATE INDEX idx_watching_by_txid_watching ON watching_by_txid (watching);
CREATE INDEX idx_watching_by_txid_callback1conf ON watching_by_txid (callback1conf);
CREATE INDEX idx_watching_by_txid_calledback1conf ON watching_by_txid (calledback1conf);
CREATE INDEX idx_watching_by_txid_callbackxconf ON watching_by_txid (callbackxconf);
CREATE INDEX idx_watching_by_txid_calledbackxconf ON watching_by_txid (calledbackxconf);
CREATE TABLE stamp (
id SERIAL PRIMARY KEY,
hash VARCHAR UNIQUE,
callbackUrl VARCHAR,
requested BOOLEAN DEFAULT FALSE,
upgraded BOOLEAN DEFAULT FALSE,
calledback BOOLEAN DEFAULT FALSE,
inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_stamp_calledback ON stamp (calledback);
CREATE TABLE cyphernode_props (
id SERIAL PRIMARY KEY,
property VARCHAR,
value VARCHAR,
inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_cp_property ON cyphernode_props (property);
CREATE UNIQUE INDEX idx_cp_propval ON cyphernode_props (property, value);
INSERT INTO cyphernode_props (id, property, value) VALUES (1, 'version', '0.1');
INSERT INTO cyphernode_props (id, property, value) VALUES (2, 'pay_index', '0');
SELECT SETVAL('cyphernode_props_id_seq', 2);
CREATE TABLE ln_invoice (
id SERIAL PRIMARY KEY,
label VARCHAR UNIQUE,
bolt11 VARCHAR UNIQUE,
payment_hash VARCHAR,
msatoshi BIGINT,
status VARCHAR,
pay_index INTEGER,
msatoshi_received BIGINT,
paid_at BIGINT,
description VARCHAR,
expires_at BIGINT,
callback_url VARCHAR,
calledback BOOLEAN DEFAULT FALSE,
callback_failed BOOLEAN DEFAULT FALSE,
inserted_ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_lninvoice_label ON ln_invoice (label);
CREATE INDEX idx_lninvoice_bolt11 ON ln_invoice (bolt11);
CREATE INDEX idx_lninvoice_calledback ON ln_invoice (calledback);
CREATE INDEX idx_lninvoice_callback_failed ON ln_invoice (callback_failed);
COMMIT;

View File

@@ -1,27 +0,0 @@
PRAGMA foreign_keys = ON;
CREATE TABLE rawtx (
id INTEGER PRIMARY KEY AUTOINCREMENT,
txid TEXT UNIQUE,
hash TEXT UNIQUE,
confirmations INTEGER DEFAULT 0,
timereceived INTEGER,
fee REAL,
size INTEGER,
vsize INTEGER,
is_replaceable INTEGER,
blockhash TEXT,
blockheight INTEGER,
blocktime INTEGER,
conf_target INTEGER,
raw_tx TEXT,
inserted_ts INTEGER DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_rawtx_timereceived ON rawtx (timereceived);
CREATE INDEX idx_rawtx_fee ON rawtx (fee);
CREATE INDEX idx_rawtx_size ON rawtx (size);
CREATE INDEX idx_rawtx_vsize ON rawtx (vsize);
CREATE INDEX idx_rawtx_blockhash ON rawtx (blockhash);
CREATE INDEX idx_rawtx_blockheight ON rawtx (blockheight);
CREATE INDEX idx_rawtx_blocktime ON rawtx (blocktime);
CREATE INDEX idx_rawtx_confirmations ON rawtx (confirmations);

View File

@@ -1,14 +1,19 @@
#!/bin/sh #!/bin/sh
echo "Checking for labels for watched addresses support in DB..." . ./trace.sh
trace "[sqlmigrate20210808_0.7.0-0.8.0.sh] Checking for labels for watched addresses support in DB..."
count=$(sqlite3 $DB_FILE "select count(*) from pragma_table_info('watching') where name='label'") count=$(sqlite3 $DB_FILE "select count(*) from pragma_table_info('watching') where name='label'")
if [ "${count}" -eq "0" ]; then if [ "${count}" -eq "0" ]; then
# label not there, we have to migrate # label not there, we have to migrate
echo "Migrating database for labels for watched addresses support..." trace "[sqlmigrate20210808_0.7.0-0.8.0.sh] Migrating database for labels for watched addresses support..."
echo "Backing up current DB..." trace "[sqlmigrate20210808_0.7.0-0.8.0.sh] Backing up current DB..."
cp $DB_FILE $DB_FILE-sqlmigrate20210808_0.7.0-0.8.0 cp $DB_FILE $DB_FILE-sqlmigrate20210808_0.7.0-0.8.0
echo "Altering DB..." trace "[sqlmigrate20210808_0.7.0-0.8.0.sh] Altering DB..."
cat sqlmigrate20210808_0.7.0-0.8.0.sql | sqlite3 $DB_FILE cat sqlmigrate20210808_0.7.0-0.8.0.sql | sqlite3 $DB_FILE
returncode=$?
trace_rc ${returncode}
exit ${returncode}
else else
echo "Database labels for watched addresses support migration already done, skipping!" trace "[sqlmigrate20210808_0.7.0-0.8.0.sh] Database labels for watched addresses support migration already done, skipping!"
fi fi

View File

@@ -1,17 +1,19 @@
#!/bin/sh #!/bin/sh
echo "Checking for rawtx database support in DB..." . ./trace.sh
if [ ! -e ${DB_FILE}_rawtx ]; then
# rawtx database not found trace "[sqlmigrate20210928_0.7.0-0.8.0.sh] Checking for new indexes in DB..."
echo "Migrating database for rawtx database support..." sqlite3 $DB_FILE ".indexes" | grep "idx_watching_watching" > /dev/null
echo "Backing up current DB..." if [ "$?" -eq "1" ]; then
cp $DB_FILE $DB_FILE-sqlmigrate20210928_0.7.0-0.8.0 # idx_watching_watching index not found
echo "Altering DB..." trace "[sqlmigrate20210928_0.7.0-0.8.0.sh] Migrating database with new indexes..."
trace "[sqlmigrate20210928_0.7.0-0.8.0.sh] Backing up current DB..."
cp $DB_FILE $DB_FILE-sqlmigrate20210928_0.7.0-0.8.0
trace "[sqlmigrate20210928_0.7.0-0.8.0.sh] Altering DB..."
cat sqlmigrate20210928_0.7.0-0.8.0.sql | sqlite3 $DB_FILE cat sqlmigrate20210928_0.7.0-0.8.0.sql | sqlite3 $DB_FILE
echo "Creating new DB..." returncode=$?
cat rawtx.sql | sqlite3 ${DB_FILE}_rawtx trace_rc ${returncode}
echo "Inserting table in new DB..." exit ${returncode}
sqlite3 -cmd ".timeout 25000" ${DB_FILE} "ATTACH DATABASE \"${DB_FILE}_rawtx\" AS other; INSERT INTO other.rawtx SELECT * FROM tx; DETACH other;"
else else
echo "rawtx database support migration already done, skipping!" trace "[sqlmigrate20210928_0.7.0-0.8.0.sh] New indexes migration already done, skipping!"
fi fi

View File

@@ -0,0 +1,55 @@
#!/bin/sh
. ./trace.sh
trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Checking if postgres is set up..."
psql -h postgres -U cyphernode -c "\d" | grep "cyphernode_props" > /dev/null
if [ "$?" -eq "1" ]; then
# if cyphernode_props table doesn't exist, it's probably because database hasn't been setup yet
trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Creating postgres database..."
psql -h postgres -f cyphernode.postgresql -U cyphernode
returncode=$?
trace_rc ${returncode}
[ "${returncode}" -eq "0" ] || exit ${returncode}
else
trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] PostgreSQL database already created, skipping!"
fi
trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Checking if postgres is loaded/imported..."
version=$(psql -qAtX -h postgres -U cyphernode -c "select value from cyphernode_props where property='version'")
returncode=$?
if [ "${version}" != "0.2" ]; then
# if cyphernode_props_id_seq isn't set, it's probably because database hasn't been loaded/imported yet
trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Extracting and converting sqlite3 data..."
cat sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extract.sql | sqlite3 $DB_FILE
returncode=$?
trace_rc ${returncode}
[ "${returncode}" -eq "0" ] || exit ${returncode}
trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Creating import file for postgres..."
mv sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql ${DB_PATH}/
sed -ie 's/^\(INSERT.*\);$/\1 ON CONFLICT DO NOTHING;/g' ${DB_PATH}/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql
trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Appending postgresql sequence creation..."
echo "
select setval('cyphernode_props_id_seq', (SELECT MAX(id) FROM cyphernode_props));
select setval('ln_invoice_id_seq', (SELECT MAX(id) FROM ln_invoice));
select setval('recipient_id_seq', (SELECT MAX(id) FROM recipient));
select setval('stamp_id_seq', (SELECT MAX(id) FROM stamp));
select setval('tx_id_seq', (SELECT MAX(id) FROM tx));
select setval('watching_by_pub32_id_seq', (SELECT MAX(id) FROM watching_by_pub32));
select setval('watching_by_txid_id_seq', (SELECT MAX(id) FROM watching_by_txid));
select setval('watching_id_seq', (SELECT MAX(id) FROM watching));
select setval('batcher_id_seq', (SELECT MAX(id) FROM batcher));
update cyphernode_props set value='0.2' where property='version';
commit;
" >> ${DB_PATH}/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql
trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] Importing sqlite3 data into postgresql..."
psql -v ON_ERROR_STOP=on -h postgres -f ${DB_PATH}/sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql -U cyphernode
returncode=$?
trace_rc ${returncode}
[ "${returncode}" -eq "0" ] || exit ${returncode}
else
trace "[sqlmigrate20211105_0.7.0-0.8.0.sh] PostgreSQL database already loaded, skipping!"
fi

View File

@@ -0,0 +1,26 @@
.output sqlmigrate20211105_0.7.0-0.8.0_sqlite3-extracted-data.sql
select "BEGIN;";
.headers on
.mode insert watching_by_pub32
select id,pub32,label,derivation_path,callback0conf,callback1conf,last_imported_n,case when watching=1 then 'TRUE' else 'FALSE' end as watching,inserted_ts from watching_by_pub32;
.mode insert watching
select id,address,label,case when watching=1 then 'TRUE' else 'FALSE' end as watching,callback0conf,case when calledback0conf=1 then 'TRUE' else 'FALSE' end as calledback0conf,callback1conf,case when calledback1conf=1 then 'TRUE' else 'FALSE' end as calledback1conf,case when imported=1 then 'TRUE' else 'FALSE' end as imported,watching_by_pub32_id,pub32_index,event_message,inserted_ts from watching;
.mode insert tx
select id,txid,hash,confirmations,timereceived,fee,size,vsize,case when is_replaceable=1 then 'TRUE' else 'FALSE' end as is_replaceable,blockhash,blockheight,blocktime,conf_target,inserted_ts from tx;
.mode insert watching_tx
select * from watching_tx;
.mode insert batcher
select * from batcher;
.mode insert recipient
select id,address,amount,tx_id,inserted_ts,webhook_url,case when calledback=1 then 'TRUE' else 'FALSE' end as calledback,calledback_ts,batcher_id,label from recipient;
.mode insert watching_by_txid
select id,txid,case when watching=1 then 'TRUE' else 'FALSE' end as watching,callback1conf,case when calledback1conf=1 then 'TRUE' else 'FALSE' end as calledback1conf,callbackxconf,case when calledbackxconf=1 then 'TRUE' else 'FALSE' end as calledbackxconf,nbxconf,inserted_ts from watching_by_txid;
.mode insert stamp
select id,hash,callbackUrl,case when requested=1 then 'TRUE' else 'FALSE' end as requested,case when upgraded=1 then 'TRUE' else 'FALSE' end as upgraded,case when calledback=1 then 'TRUE' else 'FALSE' end as calledback,inserted_ts from stamp;
.mode insert ln_invoice
select id,label,bolt11,payment_hash,msatoshi,status,pay_index,msatoshi_received,paid_at,description,expires_at,callback_url,case when calledback=1 then 'TRUE' else 'FALSE' end as calledback,case when callback_failed=1 then 'TRUE' else 'FALSE' end as callback_failed,inserted_ts from ln_invoice;
-- cyphernode_props rows were already inserted in db creation, let's update them here
.headers off
.mode list cyphernode_props
select 'update cyphernode_props set value=''' || value || ''', inserted_ts=''' || inserted_ts || ''' where id=' || id || ';' from cyphernode_props;
.quit

View File

@@ -9,6 +9,8 @@ createbatcher() {
# POST http://192.168.111.152:8080/createbatcher # POST http://192.168.111.152:8080/createbatcher
# #
# Will UPDATE the batcher if it already exists (as per label)
#
# args: # args:
# - batcherLabel, optional, id can be used to reference the batcher # - batcherLabel, optional, id can be used to reference the batcher
# - confTarget, optional, overriden by batchspend's confTarget, default Bitcoin Core conf_target will be used if not supplied # - confTarget, optional, overriden by batchspend's confTarget, default Bitcoin Core conf_target will be used if not supplied
@@ -22,7 +24,8 @@ createbatcher() {
local request=${1} local request=${1}
local response local response
local label=$(echo "${request}" | jq ".batcherLabel") local returncode
local label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[createbatcher] label=${label}" trace "[createbatcher] label=${label}"
local conf_target=$(echo "${request}" | jq ".confTarget") local conf_target=$(echo "${request}" | jq ".confTarget")
trace "[createbatcher] conf_target=${conf_target}" trace "[createbatcher] conf_target=${conf_target}"
@@ -37,13 +40,20 @@ createbatcher() {
local batcher_id local batcher_id
batcher_id=$(sql "INSERT OR IGNORE INTO batcher (label, conf_target, feerate) VALUES (${label}, ${conf_target}, ${feerate}); SELECT LAST_INSERT_ROWID();") batcher_id=$(sql "INSERT INTO batcher (label, conf_target, feerate)"\
" VALUES ('${label}', ${conf_target}, ${feerate})"\
" ON CONFLICT (label) DO"\
" UPDATE SET conf_target=${conf_target}, feerate=${feerate}"\
" RETURNING id" \
"SELECT id FROM batcher WHERE label='${label}'")
returncode=$?
trace_rc ${returncode}
if ("${batcher_id}" -eq "0"); then if [ "${returncode}" -ne "0" ]; then
trace "[createbatcher] Could not insert" trace "[createbatcher] Could not insert"
response='{"result":null,"error":{"code":-32700,"message":"Could not create batcher, label probably already exists","data":'${request}'}}' response='{"result":null,"error":{"code":-32700,"message":"Could not create/update batcher","data":'${request}'}}'
else else
trace "[createbatcher] Inserted" trace "[createbatcher] Inserted or updated, response=${batcher_id}"
response='{"result":{"batcherId":'${batcher_id}'},"error":null}' response='{"result":{"batcherId":'${batcher_id}'},"error":null}'
fi fi
@@ -79,7 +89,7 @@ updatebatcher() {
local id=$(echo "${request}" | jq ".batcherId") local id=$(echo "${request}" | jq ".batcherId")
trace "[updatebatcher] id=${id}" trace "[updatebatcher] id=${id}"
local label=$(echo "${request}" | jq ".batcherLabel") local label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[updatebatcher] label=${label}" trace "[updatebatcher] label=${label}"
local conf_target=$(echo "${request}" | jq ".confTarget") local conf_target=$(echo "${request}" | jq ".confTarget")
trace "[updatebatcher] conf_target=${conf_target}" trace "[updatebatcher] conf_target=${conf_target}"
@@ -99,12 +109,12 @@ updatebatcher() {
# fi # fi
if [ "${id}" = "null" ]; then if [ "${id}" = "null" ]; then
whereclause="label=${label}" whereclause="label='${label}'"
else else
whereclause="id = ${id}" whereclause="id = ${id}"
fi fi
sql "UPDATE batcher set label=${label}, conf_target=${conf_target}, feerate=${feerate} WHERE ${whereclause}" sql "UPDATE batcher set label='${label}', conf_target=${conf_target}, feerate=${feerate} WHERE ${whereclause}"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
if [ "${returncode}" -ne 0 ]; then if [ "${returncode}" -ne 0 ]; then
@@ -151,13 +161,13 @@ addtobatch() {
trace "[addtobatch] address=${address}" trace "[addtobatch] address=${address}"
local amount=$(echo "${request}" | jq ".amount") local amount=$(echo "${request}" | jq ".amount")
trace "[addtobatch] amount=${amount}" trace "[addtobatch] amount=${amount}"
local label=$(echo "${request}" | jq ".outputLabel") local label=$(echo "${request}" | jq -r ".outputLabel")
trace "[addtobatch] label=${label}" trace "[addtobatch] label=${label}"
local batcher_id=$(echo "${request}" | jq ".batcherId") local batcher_id=$(echo "${request}" | jq ".batcherId")
trace "[addtobatch] batcher_id=${batcher_id}" trace "[addtobatch] batcher_id=${batcher_id}"
local batcher_label=$(echo "${request}" | jq ".batcherLabel") local batcher_label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[addtobatch] batcher_label=${batcher_label}" trace "[addtobatch] batcher_label=${batcher_label}"
local webhook_url=$(echo "${request}" | jq ".webhookUrl") local webhook_url=$(echo "${request}" | jq -r ".webhookUrl")
trace "[addtobatch] webhook_url=${webhook_url}" trace "[addtobatch] webhook_url=${webhook_url}"
# Let's lowercase bech32 addresses # Let's lowercase bech32 addresses
@@ -185,7 +195,7 @@ addtobatch() {
if [ "${batcher_id}" = "null" ]; then if [ "${batcher_id}" = "null" ]; then
# Using batcher_label # Using batcher_label
batcher_id=$(sql "SELECT id FROM batcher WHERE label=${batcher_label}") batcher_id=$(sql "SELECT id FROM batcher WHERE label='${batcher_label}'")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
fi fi
@@ -195,7 +205,7 @@ addtobatch() {
response='{"result":null,"error":{"code":-32700,"message":"batcher not found","data":'${request}'}}' response='{"result":null,"error":{"code":-32700,"message":"batcher not found","data":'${request}'}}'
else else
# Check if address already pending for this batcher... # Check if address already pending for this batcher...
inserted_id=$(sql "SELECT id FROM recipient WHERE LOWER(address)=LOWER(\"${address}\") AND tx_id IS NULL AND batcher_id=${batcher_id}") inserted_id=$(sql "SELECT id FROM recipient WHERE LOWER(address)=LOWER('${address}') AND tx_id IS NULL AND batcher_id=${batcher_id}")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -211,7 +221,9 @@ addtobatch() {
fi fi
# Insert the new destination # Insert the new destination
inserted_id=$(sql "INSERT INTO recipient (address, amount, webhook_url, batcher_id, label) VALUES (\"${address}\", ${amount}, ${webhook_url}, ${batcher_id}, ${label}); SELECT LAST_INSERT_ROWID();") inserted_id=$(sql "INSERT INTO recipient (address, amount, webhook_url, batcher_id, label)"\
" VALUES ('${address}', ${amount}, '${webhook_url}', ${batcher_id}, '${label}')"\
" RETURNING id")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -280,7 +292,7 @@ removefrombatch() {
if [ "${returncode}" -ne 0 ]; then if [ "${returncode}" -ne 0 ]; then
response='{"result":null,"error":{"code":-32700,"message":"Output was not removed","data":'${request}'}}' response='{"result":null,"error":{"code":-32700,"message":"Output was not removed","data":'${request}'}}'
else else
row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), 0), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}") row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), DATE '0001-01-01'), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -336,7 +348,7 @@ batchspend() {
local batcher_id=$(echo "${request}" | jq ".batcherId") local batcher_id=$(echo "${request}" | jq ".batcherId")
trace "[batchspend] batcher_id=${batcher_id}" trace "[batchspend] batcher_id=${batcher_id}"
local batcher_label=$(echo "${request}" | jq ".batcherLabel") local batcher_label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[batchspend] batcher_label=${batcher_label}" trace "[batchspend] batcher_label=${batcher_label}"
local conf_target=$(echo "${request}" | jq ".confTarget") local conf_target=$(echo "${request}" | jq ".confTarget")
trace "[batchspend] conf_target=${conf_target}" trace "[batchspend] conf_target=${conf_target}"
@@ -351,7 +363,7 @@ batchspend() {
if [ "${batcher_id}" = "null" ]; then if [ "${batcher_id}" = "null" ]; then
# Using batcher_label # Using batcher_label
whereclause="label=${batcher_label}" whereclause="label='${batcher_label}'"
else else
whereclause="id=${batcher_id}" whereclause="id=${batcher_id}"
fi fi
@@ -423,11 +435,11 @@ batchspend() {
trace "[batchspend] webhook_url=${webhook_url}" trace "[batchspend] webhook_url=${webhook_url}"
if [ -z "${recipientsjson}" ]; then if [ -z "${recipientsjson}" ]; then
whereclause="\"${recipient_id}\"" whereclause="${recipient_id}"
recipientsjson="\"${address}\":${amount}" recipientsjson="\"${address}\":${amount}"
webhooks_data="{\"outputId\":${recipient_id},\"address\":\"${address}\",\"amount\":${amount},\"webhookUrl\":\"${webhook_url}\"}" webhooks_data="{\"outputId\":${recipient_id},\"address\":\"${address}\",\"amount\":${amount},\"webhookUrl\":\"${webhook_url}\"}"
else else
whereclause="${whereclause},\"${recipient_id}\"" whereclause="${whereclause},${recipient_id}"
recipientsjson="${recipientsjson},\"${address}\":${amount}" recipientsjson="${recipientsjson},\"${address}\":${amount}"
webhooks_data="${webhooks_data},{\"outputId\":${recipient_id},\"address\":\"${address}\",\"amount\":${amount},\"webhookUrl\":\"${webhook_url}\"}" webhooks_data="${webhooks_data},{\"outputId\":${recipient_id},\"address\":\"${address}\",\"amount\":${amount},\"webhookUrl\":\"${webhook_url}\"}"
fi fi
@@ -452,7 +464,7 @@ batchspend() {
tx_raw_details=$(get_rawtransaction ${txid} | tr -d '\n') tx_raw_details=$(get_rawtransaction ${txid} | tr -d '\n')
# Amounts and fees are negative when spending so we absolute those fields # Amounts and fees are negative when spending so we absolute those fields
local tx_hash=$(echo "${tx_raw_details}" | jq '.result.hash') local tx_hash=$(echo "${tx_raw_details}" | jq -r '.result.hash')
local tx_ts_firstseen=$(echo "${tx_details}" | jq '.result.timereceived') local tx_ts_firstseen=$(echo "${tx_details}" | jq '.result.timereceived')
local tx_amount=$(echo "${tx_details}" | jq '.result.amount | fabs' | awk '{ printf "%.8f", $0 }') local tx_amount=$(echo "${tx_details}" | jq '.result.amount | fabs' | awk '{ printf "%.8f", $0 }')
local tx_size=$(echo "${tx_raw_details}" | jq '.result.size') local tx_size=$(echo "${tx_raw_details}" | jq '.result.size')
@@ -462,25 +474,20 @@ batchspend() {
tx_replaceable=$([ "${tx_replaceable}" = "yes" ] && echo "true" || echo "false") tx_replaceable=$([ "${tx_replaceable}" = "yes" ] && echo "true" || echo "false")
trace "[batchspend] tx_replaceable=${tx_replaceable}" trace "[batchspend] tx_replaceable=${tx_replaceable}"
local fees=$(echo "${tx_details}" | jq '.result.fee | fabs' | awk '{ printf "%.8f", $0 }') local fees=$(echo "${tx_details}" | jq '.result.fee | fabs' | awk '{ printf "%.8f", $0 }')
# Sometimes raw tx are too long to be passed as paramater, so let's write
# it to a temp file for it to be read by sqlite3 and then delete the file
echo "${tx_raw_details}" > batchspend-rawtx-${txid}-$$.blob
# Get the info on the batch before setting it to done # Get the info on the batch before setting it to done
row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), 0), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}") row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), DATE '0001-01-01'), COALESCE(SUM(amount), 0.00000000) FROM recipient WHERE tx_id IS NULL AND batcher_id=${batcher_id}")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
# Let's insert the txid in our little DB -- then we'll already have it when receiving confirmation # Let's insert the txid in our little DB -- then we'll already have it when receiving confirmation
sql_rawtx "INSERT OR IGNORE INTO rawtx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target, raw_tx) VALUES (\"${txid}\", ${tx_hash}, 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target}, readfile('batchspend-rawtx-${txid}-$$.blob'))" id_inserted=$(sql "INSERT INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target)"\
trace_rc $? " VALUES ('${txid}', '${tx_hash}', 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target})"\
id_inserted=$(sql "INSERT OR IGNORE INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target) VALUES (\"${txid}\", ${tx_hash}, 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target}); SELECT LAST_INSERT_ROWID();") " RETURNING id" \
"SELECT id FROM tx WHERE txid='${txid}'")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
if [ "${returncode}" -eq 0 ]; then if [ "${returncode}" -eq 0 ]; then
if [ "${id_inserted}" -eq 0 ]; then
id_inserted=$(sql "SELECT id FROM tx WHERE txid=\"${txid}\"")
fi
trace "[batchspend] id_inserted: ${id_inserted}" trace "[batchspend] id_inserted: ${id_inserted}"
sql "UPDATE recipient SET tx_id=${id_inserted} WHERE id IN (${whereclause})" sql "UPDATE recipient SET tx_id=${id_inserted} WHERE id IN (${whereclause})"
trace_rc $? trace_rc $?
@@ -495,13 +502,10 @@ batchspend() {
trace "[batchspend] total=${total}" trace "[batchspend] total=${total}"
response='{"result":{"batcherId":'${batcher_id}',"confTarget":'${conf_target}',"nbOutputs":'${count}',"oldest":"'${oldest}'","total":'${total} response='{"result":{"batcherId":'${batcher_id}',"confTarget":'${conf_target}',"nbOutputs":'${count}',"oldest":"'${oldest}'","total":'${total}
response="${response},\"status\":\"accepted\",\"txid\":\"${txid}\",\"hash\":${tx_hash},\"details\":{\"firstseen\":${tx_ts_firstseen},\"size\":${tx_size},\"vsize\":${tx_vsize},\"replaceable\":${tx_replaceable},\"fee\":${fees}},\"outputs\":[${webhooks_data}]}" response="${response},\"status\":\"accepted\",\"txid\":\"${txid}\",\"hash\":\"${tx_hash}\",\"details\":{\"firstseen\":${tx_ts_firstseen},\"size\":${tx_size},\"vsize\":${tx_vsize},\"replaceable\":${tx_replaceable},\"fee\":${fees}},\"outputs\":[${webhooks_data}]}"
response="${response},\"error\":null}" response="${response},\"error\":null}"
# Delete the temp file containing the raw tx (see above) batch_webhooks "[${webhooks_data}]" '"batcherId":'${batcher_id}',"confTarget":'${conf_target}',"nbOutputs":'${count}',"oldest":"'${oldest}'","total":'${total}',"status":"accepted","txid":"'${txid}'","hash":"'${tx_hash}'","details":{"firstseen":'${tx_ts_firstseen}',"size":'${tx_size}',"vsize":'${tx_vsize}',"replaceable":'${tx_replaceable}',"fee":'${fees}'}'
rm batchspend-rawtx-${txid}-$$.blob
batch_webhooks "[${webhooks_data}]" '"batcherId":'${batcher_id}',"confTarget":'${conf_target}',"nbOutputs":'${count}',"oldest":"'${oldest}'","total":'${total}',"status":"accepted","txid":"'${txid}'","hash":'${tx_hash}',"details":{"firstseen":'${tx_ts_firstseen}',"size":'${tx_size}',"vsize":'${tx_vsize}',"replaceable":'${tx_replaceable}',"fee":'${fees}'}'
else else
local message=$(echo "${data}" | jq -e ".error.message") local message=$(echo "${data}" | jq -e ".error.message")
@@ -536,7 +540,7 @@ batch_check_webhooks() {
local total local total
local tx_id local tx_id
local batching=$(sql "SELECT address, amount, r.id, webhook_url, b.id, t.txid, t.hash, t.timereceived, t.fee, t.size, t.vsize, t.is_replaceable, t.conf_target, t.id FROM recipient r, batcher b, tx t WHERE r.batcher_id=b.id AND r.tx_id=t.id AND NOT calledback AND tx_id IS NOT NULL AND webhook_url IS NOT NULL") local batching=$(sql "SELECT address, amount, r.id, webhook_url, b.id, t.txid, t.hash, t.timereceived, t.fee, t.size, t.vsize, t.is_replaceable::text, t.conf_target, t.id FROM recipient r, batcher b, tx t WHERE r.batcher_id=b.id AND r.tx_id=t.id AND NOT calledback AND tx_id IS NOT NULL AND webhook_url IS NOT NULL")
trace "[batch_check_webhooks] batching=${batching}" trace "[batch_check_webhooks] batching=${batching}"
local IFS=$'\n' local IFS=$'\n'
@@ -566,7 +570,6 @@ batch_check_webhooks() {
tx_vsize=$(echo "${row}" | cut -d '|' -f11) tx_vsize=$(echo "${row}" | cut -d '|' -f11)
trace "[batch_check_webhooks] tx_vsize=${tx_vsize}" trace "[batch_check_webhooks] tx_vsize=${tx_vsize}"
tx_replaceable=$(echo "${row}" | cut -d '|' -f12) tx_replaceable=$(echo "${row}" | cut -d '|' -f12)
tx_replaceable=$([ "${tx_replaceable}" -eq "1" ] && echo "true" || echo "false")
trace "[batch_check_webhooks] tx_replaceable=${tx_replaceable}" trace "[batch_check_webhooks] tx_replaceable=${tx_replaceable}"
conf_target=$(echo "${row}" | cut -d '|' -f13) conf_target=$(echo "${row}" | cut -d '|' -f13)
trace "[batch_check_webhooks] conf_target=${conf_target}" trace "[batch_check_webhooks] conf_target=${conf_target}"
@@ -578,7 +581,7 @@ batch_check_webhooks() {
# I know this query for each output is not very efficient, but this function should not execute often, only in case of # I know this query for each output is not very efficient, but this function should not execute often, only in case of
# failed callbacks on batches... # failed callbacks on batches...
# Get the info on the batch # Get the info on the batch
row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), 0), COALESCE(SUM(amount), 0.00000000) FROM recipient r WHERE tx_id=\"${tx_id}\"") row=$(sql "SELECT COUNT(id), COALESCE(MIN(inserted_ts), DATE '0001-01-01'), COALESCE(SUM(amount), 0.00000000) FROM recipient r WHERE tx_id='${tx_id}'")
# Use the selected row above # Use the selected row above
count=$(echo "${row}" | cut -d '|' -f1) count=$(echo "${row}" | cut -d '|' -f1)
@@ -654,8 +657,13 @@ batch_webhooks() {
fi fi
done done
sql "UPDATE recipient SET calledback=1, calledback_ts=CURRENT_TIMESTAMP WHERE id IN (${successful_recipient_ids})" if [ -n "${successful_recipient_ids}" ]; then
trace_rc $? trace "[batch_webhooks] We have successful callbacks, let's update the db..."
sql "UPDATE recipient SET calledback=true, calledback_ts=CURRENT_TIMESTAMP WHERE id IN (${successful_recipient_ids})"
trace_rc $?
else
trace "[batch_webhooks] We don't have successful callbacks, no need to update the db!"
fi
} }
listbatchers() { listbatchers() {
@@ -671,7 +679,7 @@ listbatchers() {
# "error":null} # "error":null}
local batchers=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), 0) || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL GROUP BY b.id") local batchers=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), DATE '0001-01-01') || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL GROUP BY b.id ORDER BY b.id")
trace "[listbatchers] batchers=${batchers}" trace "[listbatchers] batchers=${batchers}"
local returncode local returncode
@@ -717,7 +725,7 @@ getbatcher() {
local batcher_id=$(echo "${request}" | jq ".batcherId") local batcher_id=$(echo "${request}" | jq ".batcherId")
trace "[getbatcher] batcher_id=${batcher_id}" trace "[getbatcher] batcher_id=${batcher_id}"
local batcher_label=$(echo "${request}" | jq ".batcherLabel") local batcher_label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[getbatcher] batcher_label=${batcher_label}" trace "[getbatcher] batcher_label=${batcher_label}"
if [ "${batcher_id}" = "null" ] && [ "${batcher_label}" = "null" ]; then if [ "${batcher_id}" = "null" ] && [ "${batcher_label}" = "null" ]; then
@@ -728,13 +736,13 @@ getbatcher() {
if [ "${batcher_id}" = "null" ]; then if [ "${batcher_id}" = "null" ]; then
# Using batcher_label # Using batcher_label
whereclause="b.label=${batcher_label}" whereclause="b.label='${batcher_label}'"
else else
# Using batcher_id # Using batcher_id
whereclause="b.id=${batcher_id}" whereclause="b.id=${batcher_id}"
fi fi
batcher=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), 0) || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL WHERE ${whereclause} GROUP BY b.id") batcher=$(sql "SELECT b.id, '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), DATE '0001-01-01') || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) || '}' FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id AND r.tx_id IS NULL WHERE ${whereclause} GROUP BY b.id")
trace "[getbatcher] batcher=${batcher}" trace "[getbatcher] batcher=${batcher}"
if [ -n "${batcher}" ]; then if [ -n "${batcher}" ]; then
@@ -797,9 +805,9 @@ getbatchdetails() {
local batcher_id=$(echo "${request}" | jq ".batcherId") local batcher_id=$(echo "${request}" | jq ".batcherId")
trace "[getbatchdetails] batcher_id=${batcher_id}" trace "[getbatchdetails] batcher_id=${batcher_id}"
local batcher_label=$(echo "${request}" | jq ".batcherLabel") local batcher_label=$(echo "${request}" | jq -r ".batcherLabel")
trace "[getbatchdetails] batcher_label=${batcher_label}" trace "[getbatchdetails] batcher_label=${batcher_label}"
local txid=$(echo "${request}" | jq ".txid") local txid=$(echo "${request}" | jq -r ".txid")
trace "[getbatchdetails] txid=${txid}" trace "[getbatchdetails] txid=${txid}"
if [ "${batcher_id}" = "null" ] && [ "${batcher_label}" = "null" ]; then if [ "${batcher_id}" = "null" ] && [ "${batcher_label}" = "null" ]; then
@@ -810,7 +818,7 @@ getbatchdetails() {
if [ "${batcher_id}" = "null" ]; then if [ "${batcher_id}" = "null" ]; then
# Using batcher_label # Using batcher_label
whereclause="b.label=${batcher_label}" whereclause="b.label='${batcher_label}'"
else else
# Using batcher_id # Using batcher_id
whereclause="b.id=${batcher_id}" whereclause="b.id=${batcher_id}"
@@ -818,7 +826,7 @@ getbatchdetails() {
if [ "${txid}" != "null" ]; then if [ "${txid}" != "null" ]; then
# Using txid # Using txid
whereclause="${whereclause} AND t.txid=${txid}" whereclause="${whereclause} AND t.txid='${txid}'"
else else
# null txid # null txid
whereclause="${whereclause} AND t.txid IS NULL" whereclause="${whereclause} AND t.txid IS NULL"
@@ -826,7 +834,7 @@ getbatchdetails() {
fi fi
# First get the batch summary # First get the batch summary
batch=$(sql "SELECT b.id, COALESCE(t.id, NULL), '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || b.conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' ||COALESCE(MIN(r.inserted_ts), 0) || '\",\"total\":' ||COALESCE(SUM(amount), 0.00000000) FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id ${outerclause} LEFT JOIN tx t ON t.id=r.tx_id WHERE ${whereclause} GROUP BY b.id") batch=$(sql "SELECT b.id, COALESCE(t.id, NULL), '{\"batcherId\":' || b.id || ',\"batcherLabel\":\"' || b.label || '\",\"confTarget\":' || b.conf_target || ',\"nbOutputs\":' || COUNT(r.id) || ',\"oldest\":\"' || COALESCE(MIN(r.inserted_ts), DATE '0001-01-01') || '\",\"total\":' || COALESCE(SUM(amount), 0.00000000) FROM batcher b LEFT JOIN recipient r ON r.batcher_id=b.id ${outerclause} LEFT JOIN tx t ON t.id=r.tx_id WHERE ${whereclause} GROUP BY b.id, t.id")
trace "[getbatchdetails] batch=${batch}" trace "[getbatchdetails] batch=${batch}"
if [ -n "${batch}" ]; then if [ -n "${batch}" ]; then
@@ -839,7 +847,7 @@ getbatchdetails() {
# Using txid # Using txid
outerclause="AND r.tx_id=${tx_id}" outerclause="AND r.tx_id=${tx_id}"
tx=$(sql "SELECT '\"txid\":\"' || txid || '\",\"hash\":\"' || hash || '\",\"details\":{\"firstseen\":' || timereceived || ',\"size\":' || size || ',\"vsize\":' || vsize || ',\"replaceable\":' || CASE is_replaceable WHEN 1 THEN 'true' ELSE 'false' END || ',\"fee\":' || fee || '}' FROM tx WHERE id=${tx_id}") tx=$(sql "SELECT '\"txid\":\"' || txid || '\",\"hash\":\"' || hash || '\",\"details\":{\"firstseen\":' || timereceived || ',\"size\":' || size || ',\"vsize\":' || vsize || ',\"replaceable\":' || is_replaceable || ',\"fee\":' || fee || '}' FROM tx WHERE id=${tx_id}")
else else
# null txid # null txid
outerclause="AND r.tx_id IS NULL" outerclause="AND r.tx_id IS NULL"

View File

@@ -78,6 +78,14 @@ convert_pub32() {
local checksum local checksum
local pub32_dest local pub32_dest
case "${pub32_from}" in
${to_type}*)
trace "[convert_pub32] Already in the right format, exiting"
echo "${pub32_from}"
return
;;
esac
case "${to_type}" in case "${to_type}" in
tpub) tpub)
versionbytes="043587cf" versionbytes="043587cf"

View File

@@ -8,8 +8,8 @@ ln_call_lightningd() {
local response local response
local returncode local returncode
trace "[ln_call_lightningd] ./lightning-cli $@" trace "[ln_call_lightningd] ./lightning-cli $(printf " \"%s\"" "$@")"
response=$(./lightning-cli $@) response=$(./lightning-cli "$@")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -39,7 +39,7 @@ ln_create_invoice() {
if [ "${callback_url}" != "null" ]; then if [ "${callback_url}" != "null" ]; then
# If not null, let's add double-quotes so we don't need to add the double-quotes in the sql insert, # If not null, let's add double-quotes so we don't need to add the double-quotes in the sql insert,
# so if it's null, it will insert the actual sql NULL value. # so if it's null, it will insert the actual sql NULL value.
callback_url="\"${callback_url}\"" callback_url="'${callback_url}'"
fi fi
#/proxy $ ./lightning-cli invoice 10000 "t1" "t1d" 60 #/proxy $ ./lightning-cli invoice 10000 "t1" "t1d" 60
@@ -71,36 +71,33 @@ ln_create_invoice() {
# Let's get the connect string if provided in configuration # Let's get the connect string if provided in configuration
local connectstring=$(get_connection_string) local connectstring=$(get_connection_string)
if [ "${msatoshi}" = "null" ]; then id=$(sql "INSERT INTO ln_invoice (label, bolt11, callback_url, payment_hash, expires_at, msatoshi, description, status)"\
sql "INSERT OR IGNORE INTO ln_invoice (label, bolt11, callback_url, payment_hash, expires_at, description, status) VALUES (\"${label}\", \"${bolt11}\", ${callback_url}, \"${payment_hash}\", ${expires_at}, \"${description}\", \"unpaid\")" " VALUES ('${label}','${bolt11}', ${callback_url},'${payment_hash}', ${expires_at}, ${msatoshi}, '${description}', 'unpaid')"\
else " RETURNING id" \
sql "INSERT OR IGNORE INTO ln_invoice (label, bolt11, callback_url, payment_hash, expires_at, msatoshi, description, status) VALUES (\"${label}\", \"${bolt11}\", ${callback_url}, \"${payment_hash}\", ${expires_at}, ${msatoshi}, \"${description}\", \"unpaid\")" "SELECT id FROM ln_invoice WHERE bolt11='${bolt11}'")
fi
trace_rc $?
id=$(sql "SELECT id FROM ln_invoice WHERE bolt11=\"${bolt11}\"")
trace_rc $? trace_rc $?
# { # {
# "id":"", # "id":123,
# "label":"", # "label":"",
# "bolt11":"", # "bolt11":"",
# "connectstring":"", # "connectstring":"",
# "callbackUrl":"", # "callbackUrl":"",
# "payment_hash":"", # "payment_hash":"",
# "msatoshi":, # "msatoshi":123456,
# "status":"unpaid", # "status":"unpaid",
# "description":"", # "description":"",
# "expires_at": # "expires_at":21312312
# } # }
data="{\"id\":\"${id}\"," data="{\"id\":${id},"
data="${data}\"label\":\"${label}\"," data="${data}\"label\":\"${label}\","
data="${data}\"bolt11\":\"${bolt11}\"," data="${data}\"bolt11\":\"${bolt11}\","
if [ -n "${connectstring}" ]; then if [ -n "${connectstring}" ]; then
data="${data}\"connectstring\":\"${connectstring}\"," data="${data}\"connectstring\":\"${connectstring}\","
fi fi
if [ "${callback_url}" != "null" ]; then if [ "${callback_url}" != "null" ]; then
data="${data}\"callbackUrl\":${callback_url}," data="${data}\"callbackUrl\":\"${callback_url}\","
fi fi
data="${data}\"payment_hash\":\"${payment_hash}\"," data="${data}\"payment_hash\":\"${payment_hash}\","
if [ "${msatoshi}" != "null" ]; then if [ "${msatoshi}" != "null" ]; then

View File

@@ -10,8 +10,17 @@ do_callbacks() {
trace "Entering do_callbacks()..." trace "Entering do_callbacks()..."
# If called because we received a confirmation for a specific txid, let's only
# process that txid-related callbacks...
local txid=${1}
local txid_where
if [ -n "${txid}" ]; then
trace "[do_callbacks] txid=${txid}"
txid_where=" AND txid='${txid}'"
fi
# Let's fetch all the watching addresses still being watched but not called back # Let's fetch all the watching addresses still being watched but not called back
local callbacks=$(sql 'SELECT DISTINCT w.callback0conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable, pub32_index, pub32, w.label, derivation_path, event_message, hash FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx ON tx.id = tx_id LEFT JOIN watching_by_pub32 w32 ON watching_by_pub32_id = w32.id WHERE NOT calledback0conf AND watching_id NOT NULL AND w.callback0conf NOT NULL AND w.watching') local callbacks=$(sql "SELECT DISTINCT w.callback0conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable::text, pub32_index, pub32, w.label, derivation_path, event_message, hash FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx ON tx.id = tx_id LEFT JOIN watching_by_pub32 w32 ON w.watching_by_pub32_id = w32.id WHERE NOT calledback0conf AND watching_id IS NOT NULL AND w.callback0conf IS NOT NULL AND w.watching${txid_where}")
trace "[do_callbacks] callbacks0conf=${callbacks}" trace "[do_callbacks] callbacks0conf=${callbacks}"
local returncode local returncode
@@ -25,12 +34,12 @@ do_callbacks() {
trace_rc ${returncode} trace_rc ${returncode}
if [ "${returncode}" -eq 0 ]; then if [ "${returncode}" -eq 0 ]; then
address=$(echo "${row}" | cut -d '|' -f2) address=$(echo "${row}" | cut -d '|' -f2)
sql "UPDATE watching SET calledback0conf=1 WHERE address=\"${address}\"" sql "UPDATE watching SET calledback0conf=true WHERE address='${address}'"
trace_rc $? trace_rc $?
fi fi
done done
callbacks=$(sql 'SELECT DISTINCT w.callback1conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable, pub32_index, pub32, w.label, derivation_path, event_message, hash FROM watching w, watching_tx wt, tx t LEFT JOIN watching_by_pub32 w32 ON watching_by_pub32_id = w32.id WHERE w.id = watching_id AND tx_id = t.id AND NOT calledback1conf AND confirmations>0 AND w.callback1conf NOT NULL AND w.watching') callbacks=$(sql "SELECT DISTINCT w.callback1conf, address, txid, vout, amount, confirmations, timereceived, fee, size, vsize, blockhash, blockheight, blocktime, w.id, is_replaceable::text, pub32_index, pub32, w.label, derivation_path, event_message, hash FROM watching w JOIN watching_tx wt ON w.id = wt.watching_id JOIN tx t ON wt.tx_id = t.id LEFT JOIN watching_by_pub32 w32 ON watching_by_pub32_id = w32.id WHERE NOT calledback1conf AND confirmations>0 AND w.callback1conf IS NOT NULL AND w.watching${txid_where}")
trace "[do_callbacks] callbacks1conf=${callbacks}" trace "[do_callbacks] callbacks1conf=${callbacks}"
for row in ${callbacks} for row in ${callbacks}
@@ -39,19 +48,25 @@ do_callbacks() {
returncode=$? returncode=$?
if [ "${returncode}" -eq 0 ]; then if [ "${returncode}" -eq 0 ]; then
address=$(echo "${row}" | cut -d '|' -f2) address=$(echo "${row}" | cut -d '|' -f2)
sql "UPDATE watching SET calledback1conf=1, watching=0 WHERE address=\"${address}\"" sql "UPDATE watching SET calledback1conf=true, watching=false WHERE address='${address}'"
trace_rc $? trace_rc $?
fi fi
done done
callbacks=$(sql "SELECT id, label, bolt11, callback_url, payment_hash, msatoshi, status, pay_index, msatoshi_received, paid_at, description, expires_at FROM ln_invoice WHERE NOT calledback AND callback_failed") if [ -z "${txid}" ]; then
trace "[do_callbacks] ln_callbacks=${callbacks}" trace "[do_callbacks] Processing LN callbacks..."
for row in ${callbacks} callbacks=$(sql "SELECT id, label, bolt11, callback_url, payment_hash, msatoshi, status, pay_index, msatoshi_received, paid_at, description, expires_at FROM ln_invoice WHERE NOT calledback AND callback_failed")
do trace "[do_callbacks] ln_callbacks=${callbacks}"
ln_manage_callback ${row}
trace_rc $? for row in ${callbacks}
done do
ln_manage_callback ${row}
trace_rc $?
done
else
trace "[do_callbacks] called for a specific txid, skipping LN callbacks"
fi
) 200>./.callbacks.lock ) 200>./.callbacks.lock
} }
@@ -70,7 +85,7 @@ ln_manage_callback() {
if [ -z "${callback_url}" ]; then if [ -z "${callback_url}" ]; then
# No callback url provided for that invoice # No callback url provided for that invoice
trace "[ln_manage_callback] No callback url provided for that invoice" trace "[ln_manage_callback] No callback url provided for that invoice"
sql "UPDATE ln_invoice SET calledback=1 WHERE id=\"${id}\"" sql "UPDATE ln_invoice SET calledback=true WHERE id=${id}"
trace_rc $? trace_rc $?
return return
fi fi
@@ -112,7 +127,7 @@ ln_manage_callback() {
# "expires_at": # "expires_at":
# } # }
data="{\"id\":\"${id}\"," data="{\"id\":${id},"
data="${data}\"label\":\"${label}\"," data="${data}\"label\":\"${label}\","
data="${data}\"bolt11\":\"${bolt11}\"," data="${data}\"bolt11\":\"${bolt11}\","
data="${data}\"callback_url\":\"${callback_url}\"," data="${data}\"callback_url\":\"${callback_url}\","
@@ -132,11 +147,11 @@ ln_manage_callback() {
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
if [ "${returncode}" -eq 0 ]; then if [ "${returncode}" -eq 0 ]; then
sql "UPDATE ln_invoice SET calledback=1 WHERE id=\"${id}\"" sql "UPDATE ln_invoice SET calledback=true WHERE id=${id}"
trace_rc $? trace_rc $?
else else
trace "[ln_manage_callback] callback failed: ${callback_url}" trace "[ln_manage_callback] callback failed: ${callback_url}"
sql "UPDATE ln_invoice SET callback_failed=1 WHERE id=\"${id}\"" sql "UPDATE ln_invoice SET callback_failed=true WHERE id=${id}"
trace_rc $? trace_rc $?
fi fi
@@ -212,7 +227,6 @@ build_callback() {
vsize=$(echo "${row}" | cut -d '|' -f10) vsize=$(echo "${row}" | cut -d '|' -f10)
trace "[build_callback] vsize=${vsize}" trace "[build_callback] vsize=${vsize}"
is_replaceable=$(echo "${row}" | cut -d '|' -f15) is_replaceable=$(echo "${row}" | cut -d '|' -f15)
is_replaceable=$([ "${is_replaceable}" -eq "1" ] && echo "true" || echo "false")
trace "[build_callback] is_replaceable=${is_replaceable}" trace "[build_callback] is_replaceable=${is_replaceable}"
blockhash=$(echo "${row}" | cut -d '|' -f11) blockhash=$(echo "${row}" | cut -d '|' -f11)
trace "[build_callback] blockhash=${blockhash}" trace "[build_callback] blockhash=${blockhash}"
@@ -234,7 +248,7 @@ build_callback() {
event_message=$(echo "${row}" | cut -d '|' -f20) event_message=$(echo "${row}" | cut -d '|' -f20)
trace "[build_callback] event_message=${event_message}" trace "[build_callback] event_message=${event_message}"
data="{\"id\":\"${id}\"," data="{\"id\":${id},"
data="${data}\"address\":\"${address}\"," data="${data}\"address\":\"${address}\","
data="${data}\"txid\":\"${txid}\"," data="${data}\"txid\":\"${txid}\","
data="${data}\"hash\":\"${hash}\"," data="${data}\"hash\":\"${hash}\","

View File

@@ -9,8 +9,10 @@ do_callbacks_txid() {
trace "Entering do_callbacks_txid()..." trace "Entering do_callbacks_txid()..."
# Let's check the 1-conf (newly mined) watched txid that are included in the new block...
# Let's fetch all the watching txid still being watched but not called back # Let's fetch all the watching txid still being watched but not called back
local callbacks=$(sql 'SELECT id, txid, callback1conf, 1 FROM watching_by_txid WHERE watching AND callback1conf NOT NULL AND NOT calledback1conf') local callbacks=$(sql "SELECT id, txid, callback1conf, 1 FROM watching_by_txid WHERE watching AND callback1conf IS NOT NULL AND NOT calledback1conf")
trace "[do_callbacks_txid] callbacks1conf=${callbacks}" trace "[do_callbacks_txid] callbacks1conf=${callbacks}"
local returncode local returncode
@@ -25,14 +27,16 @@ do_callbacks_txid() {
trace_rc ${returncode} trace_rc ${returncode}
if [ "${returncode}" -eq "0" ]; then if [ "${returncode}" -eq "0" ]; then
id=$(echo "${row}" | cut -d '|' -f1) id=$(echo "${row}" | cut -d '|' -f1)
sql "UPDATE watching_by_txid SET calledback1conf=1 WHERE id=\"${id}\"" sql "UPDATE watching_by_txid SET calledback1conf=true WHERE id=${id}"
trace_rc $? trace_rc $?
else else
trace "[do_callbacks_txid] callback returncode has error, we don't flag as calledback yet." trace "[do_callbacks_txid] callback returncode has error, we don't flag as calledback yet."
fi fi
done done
local callbacks=$(sql 'SELECT id, txid, callbackxconf, nbxconf FROM watching_by_txid WHERE watching AND calledback1conf AND callbackxconf NOT NULL AND NOT calledbackxconf') # For the n-conf, let's only check the watched txids that are already at least 1-conf...
local callbacks=$(sql "SELECT id, txid, callbackxconf, nbxconf FROM watching_by_txid WHERE watching AND calledback1conf AND callbackxconf IS NOT NULL AND NOT calledbackxconf")
trace "[do_callbacks_txid] callbacksxconf=${callbacks}" trace "[do_callbacks_txid] callbacksxconf=${callbacks}"
for row in ${callbacks} for row in ${callbacks}
@@ -42,7 +46,7 @@ do_callbacks_txid() {
trace_rc ${returncode} trace_rc ${returncode}
if [ "${returncode}" -eq "0" ]; then if [ "${returncode}" -eq "0" ]; then
id=$(echo "${row}" | cut -d '|' -f1) id=$(echo "${row}" | cut -d '|' -f1)
sql "UPDATE watching_by_txid SET calledbackxconf=1, watching=0 WHERE id=\"${id}\"" sql "UPDATE watching_by_txid SET calledbackxconf=true, watching=false WHERE id=${id}"
trace_rc $? trace_rc $?
else else
trace "[do_callbacks_txid] callback returncode has error, we don't flag as calledback yet." trace "[do_callbacks_txid] callback returncode has error, we don't flag as calledback yet."

View File

@@ -11,7 +11,7 @@ compute_fees() {
trace "[compute_fees] pruned=${pruned}" trace "[compute_fees] pruned=${pruned}"
# We want null instead of 0.00000000 in this case. # We want null instead of 0.00000000 in this case.
echo "null" echo "null"
exit 0 return
fi fi
local txid=${1} local txid=${1}
@@ -64,16 +64,10 @@ compute_vin_total_amount()
for vin_txid_vout in ${vin_txids_vout} for vin_txid_vout in ${vin_txids_vout}
do do
vin_txid=$(echo "${vin_txid_vout}" | tr -d '"' | cut -d '-' -f1) vin_txid=$(echo "${vin_txid_vout}" | tr -d '"' | cut -d '-' -f1)
# Check if we already have the tx in our DB vin_raw_tx=$(get_rawtransaction "${vin_txid}" | tr -d '\n')
vin_raw_tx=$(sql_rawtx "SELECT raw_tx FROM rawtx WHERE txid=\"${vin_txid}\"") returncode=$?
trace_rc $? if [ "${returncode}" -ne 0 ]; then
if [ -z "${vin_raw_tx}" ]; then return ${returncode}
txid_already_inserted=false
vin_raw_tx=$(get_rawtransaction "${vin_txid}" | tr -d '\n')
returncode=$?
if [ "${returncode}" -ne 0 ]; then
return ${returncode}
fi
fi fi
vout=$(echo "${vin_txid_vout}" | tr -d '"' | cut -d '-' -f2) vout=$(echo "${vin_txid_vout}" | tr -d '"' | cut -d '-' -f2)
trace "[compute_vin_total_amount] vout=${vout}" trace "[compute_vin_total_amount] vout=${vout}"
@@ -81,27 +75,21 @@ compute_vin_total_amount()
trace "[compute_vin_total_amount] vin_vout_amount=${vin_vout_amount}" trace "[compute_vin_total_amount] vin_vout_amount=${vin_vout_amount}"
vin_total_amount=$(awk "BEGIN { printf(\"%.8f\", ${vin_total_amount}+${vin_vout_amount}); exit}") vin_total_amount=$(awk "BEGIN { printf(\"%.8f\", ${vin_total_amount}+${vin_vout_amount}); exit}")
trace "[compute_vin_total_amount] vin_total_amount=${vin_total_amount}" trace "[compute_vin_total_amount] vin_total_amount=${vin_total_amount}"
vin_hash=$(echo "${vin_raw_tx}" | jq ".result.hash") vin_hash=$(echo "${vin_raw_tx}" | jq -r ".result.hash")
vin_confirmations=$(echo "${vin_raw_tx}" | jq ".result.confirmations") vin_confirmations=$(echo "${vin_raw_tx}" | jq ".result.confirmations")
vin_timereceived=$(echo "${vin_raw_tx}" | jq ".result.time") vin_timereceived=$(echo "${vin_raw_tx}" | jq ".result.time")
vin_size=$(echo "${vin_raw_tx}" | jq ".result.size") vin_size=$(echo "${vin_raw_tx}" | jq ".result.size")
vin_vsize=$(echo "${vin_raw_tx}" | jq ".result.vsize") vin_vsize=$(echo "${vin_raw_tx}" | jq ".result.vsize")
vin_blockhash=$(echo "${vin_raw_tx}" | jq ".result.blockhash") vin_blockhash=$(echo "${vin_raw_tx}" | jq -r ".result.blockhash")
vin_blockheight=$(echo "${vin_raw_tx}" | jq ".result.blockheight") vin_blockheight=$(echo "${vin_raw_tx}" | jq ".result.blockheight")
vin_blocktime=$(echo "${vin_raw_tx}" | jq ".result.blocktime") vin_blocktime=$(echo "${vin_raw_tx}" | jq ".result.blocktime")
# Let's insert the vin tx in the DB just in case it would be useful # Let's insert the vin tx in the DB just in case it would be useful
if ! ${txid_already_inserted}; then sql "INSERT INTO tx (txid, hash, confirmations, timereceived, size, vsize, blockhash, blockheight, blocktime)"\
# Sometimes raw tx are too long to be passed as paramater, so let's write " VALUES ('${vin_txid}', '${vin_hash}', ${vin_confirmations}, ${vin_timereceived}, ${vin_size}, ${vin_vsize}, '${vin_blockhash}', ${vin_blockheight}, ${vin_blocktime})"\
# it to a temp file for it to be read by sqlite3 and then delete the file " ON CONFLICT (txid) DO"\
echo "${vin_raw_tx}" > vin-rawtx-${vin_txid}-$$.blob " UPDATE SET blockhash='${vin_blockhash}', blockheight=${vin_blockheight}, blocktime=${vin_blocktime}, confirmations=${vin_confirmations}"
sql "INSERT OR IGNORE INTO tx (txid, hash, confirmations, timereceived, size, vsize, blockhash, blockheight, blocktime) VALUES (\"${vin_txid}\", ${vin_hash}, ${vin_confirmations}, ${vin_timereceived}, ${vin_size}, ${vin_vsize}, ${vin_blockhash}, ${vin_blockheight}, ${vin_blocktime})" trace_rc $?
trace_rc $?
sql_rawtx "INSERT OR IGNORE INTO rawtx (txid, hash, confirmations, timereceived, size, vsize, blockhash, blockheight, blocktime, raw_tx) VALUES (\"${vin_txid}\", ${vin_hash}, ${vin_confirmations}, ${vin_timereceived}, ${vin_size}, ${vin_vsize}, ${vin_blockhash}, ${vin_blockheight}, ${vin_blocktime}, readfile('vin-rawtx-${vin_txid}-$$.blob'))"
trace_rc $?
rm vin-rawtx-${vin_txid}-$$.blob
txid_already_inserted=true
fi
done done
echo "${vin_total_amount}" echo "${vin_total_amount}"

View File

@@ -44,7 +44,7 @@ confirmation() {
# First of all, let's make sure we're working on watched addresses... # First of all, let's make sure we're working on watched addresses...
local address local address
local addresseswhere local addresseswhere
local addresses=$(echo "${tx_details}" | jq ".result.details[].address") local addresses=$(echo "${tx_details}" | jq -r ".result.details[].address")
local notfirst=false local notfirst=false
local IFS=$'\n' local IFS=$'\n'
@@ -53,9 +53,9 @@ confirmation() {
trace "[confirmation] address=${address}" trace "[confirmation] address=${address}"
if ${notfirst}; then if ${notfirst}; then
addresseswhere="${addresseswhere},${address}" addresseswhere="${addresseswhere},'${address}'"
else else
addresseswhere="${address}" addresseswhere="'${address}'"
notfirst=true notfirst=true
fi fi
done done
@@ -66,11 +66,11 @@ confirmation() {
fi fi
######################################################################################################## ########################################################################################################
local tx=$(sql "SELECT id FROM tx WHERE txid=\"${txid}\"") local tx=$(sql "SELECT id FROM tx WHERE txid='${txid}'")
local id_inserted local id_inserted
local tx_raw_details=$(get_rawtransaction ${txid} | tr -d '\n') local tx_raw_details=$(get_rawtransaction ${txid} | tr -d '\n')
local tx_nb_conf=$(echo "${tx_details}" | jq -r '.result.confirmations // 0') local tx_nb_conf=$(echo "${tx_details}" | jq -r '.result.confirmations // 0')
local tx_hash=$(echo "${tx_raw_details}" | jq '.result.hash') local tx_hash=$(echo "${tx_raw_details}" | jq -r '.result.hash')
# Sometimes raw tx are too long to be passed as paramater, so let's write # Sometimes raw tx are too long to be passed as paramater, so let's write
# it to a temp file for it to be read by sqlite3 and then delete the file # it to a temp file for it to be read by sqlite3 and then delete the file
@@ -100,45 +100,33 @@ confirmation() {
local tx_blocktime=null local tx_blocktime=null
if [ "${tx_nb_conf}" -gt "0" ]; then if [ "${tx_nb_conf}" -gt "0" ]; then
trace "[confirmation] tx_nb_conf=${tx_nb_conf}" trace "[confirmation] tx_nb_conf=${tx_nb_conf}"
tx_blockhash=$(echo "${tx_details}" | jq '.result.blockhash') tx_blockhash="$(echo "${tx_details}" | jq -r '.result.blockhash')"
tx_blockheight=$(get_block_info $(echo ${tx_blockhash} | tr -d '"') | jq '.result.height') tx_blockheight=$(get_block_info ${tx_blockhash} | jq '.result.height')
tx_blockhash="'${tx_blockhash}'"
tx_blocktime=$(echo "${tx_details}" | jq '.result.blocktime') tx_blocktime=$(echo "${tx_details}" | jq '.result.blocktime')
fi fi
sql "INSERT OR IGNORE INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, blockhash, blockheight, blocktime) VALUES (\"${txid}\", ${tx_hash}, ${tx_nb_conf}, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${tx_blockhash}, ${tx_blockheight}, ${tx_blocktime})" id_inserted=$(sql "INSERT INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, blockhash, blockheight, blocktime)"\
trace_rc $? " VALUES ('${txid}', '${tx_hash}', ${tx_nb_conf}, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${tx_blockhash}, ${tx_blockheight}, ${tx_blocktime})"\
sql_rawtx "INSERT OR IGNORE INTO rawtx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, blockhash, blockheight, blocktime, raw_tx) VALUES (\"${txid}\", ${tx_hash}, ${tx_nb_conf}, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${tx_blockhash}, ${tx_blockheight}, ${tx_blocktime}, readfile('rawtx-${txid}-$$.blob'))" " ON CONFLICT (txid) DO"\
trace_rc $? " UPDATE SET blockhash=${tx_blockhash}, blockheight=${tx_blockheight}, blocktime=${tx_blocktime}, confirmations=${tx_nb_conf}"\
" RETURNING id" \
id_inserted=$(sql "SELECT id FROM tx WHERE txid=\"${txid}\"") "SELECT id FROM tx WHERE txid='${txid}'")
trace_rc $? trace_rc $?
else else
# TX found in our DB. # TX found in our DB.
# 1-conf or executecallbacks on an unconfirmed tx or spending watched address (in this case, we probably missed conf) or spending to a watched address (in this case, spend inserted the tx in the DB) # 1-conf or executecallbacks on an unconfirmed tx or spending watched address (in this case, we probably missed conf) or spending to a watched address (in this case, spend inserted the tx in the DB)
local tx_blockhash=$(echo "${tx_details}" | jq '.result.blockhash') local tx_blockhash=$(echo "${tx_details}" | jq -r '.result.blockhash')
trace "[confirmation] tx_blockhash=${tx_blockhash}" trace "[confirmation] tx_blockhash=${tx_blockhash}"
if [ "${tx_blockhash}" = "null" ]; then if [ "${tx_blockhash}" = "null" ]; then
trace "[confirmation] probably being called by executecallbacks without any confirmations since the last time we checked" trace "[confirmation] probably being called by executecallbacks without any confirmations since the last time we checked"
else else
local tx_blockheight=$(get_block_info $(echo "${tx_blockhash}" | tr -d '"') | jq '.result.height') local tx_blockheight=$(get_block_info "${tx_blockhash}" | jq '.result.height')
local tx_blocktime=$(echo "${tx_details}" | jq '.result.blocktime') local tx_blocktime=$(echo "${tx_details}" | jq '.result.blocktime')
sql "UPDATE tx SET sql "UPDATE tx SET confirmations=${tx_nb_conf}, blockhash='${tx_blockhash}', blockheight=${tx_blockheight}, blocktime=${tx_blocktime} WHERE txid='${txid}'"
confirmations=${tx_nb_conf},
blockhash=${tx_blockhash},
blockheight=${tx_blockheight},
blocktime=${tx_blocktime}
WHERE txid=\"${txid}\""
trace_rc $?
sql_rawtx "UPDATE rawtx SET
confirmations=${tx_nb_conf},
blockhash=${tx_blockhash},
blockheight=${tx_blockheight},
blocktime=${tx_blocktime},
raw_tx=readfile('rawtx-${txid}-$$.blob')
WHERE txid=\"${txid}\""
trace_rc $? trace_rc $?
fi fi
id_inserted=${tx} id_inserted=${tx}
@@ -171,7 +159,8 @@ confirmation() {
# If the tx is batched and pays multiple watched addresses, we have to insert # If the tx is batched and pays multiple watched addresses, we have to insert
# those additional addresses in watching_tx! # those additional addresses in watching_tx!
watching_id=$(echo "${row}" | cut -d '|' -f1) watching_id=$(echo "${row}" | cut -d '|' -f1)
sql "INSERT OR IGNORE INTO watching_tx (watching_id, tx_id, vout, amount) VALUES (${watching_id}, ${id_inserted}, ${tx_vout_n}, ${tx_vout_amount})" sql "INSERT INTO watching_tx (watching_id, tx_id, vout, amount) VALUES (${watching_id}, ${id_inserted}, ${tx_vout_n}, ${tx_vout_amount})"\
" ON CONFLICT DO NOTHING"
trace_rc $? trace_rc $?
else else
trace "[confirmation] For this tx, there's already watching_tx rows" trace "[confirmation] For this tx, there's already watching_tx rows"
@@ -211,7 +200,7 @@ confirmation() {
# for next cron. # for next cron.
if [ -z "${bypass_callbacks}" ]; then if [ -z "${bypass_callbacks}" ]; then
trace "[confirmation] Let's do the callbacks!" trace "[confirmation] Let's do the callbacks!"
do_callbacks do_callbacks "${txid}"
fi fi
echo '{"result":"confirmed"}' echo '{"result":"confirmed"}'

View File

@@ -12,8 +12,8 @@ get_txns_by_watchlabel(){
INNER JOIN watching AS w ON w32.id = w.watching_by_pub32_id INNER JOIN watching AS w ON w32.id = w.watching_by_pub32_id
INNER JOIN watching_tx AS wtxn ON w.id = wtxn.watching_id INNER JOIN watching_tx AS wtxn ON w.id = wtxn.watching_id
INNER JOIN tx AS tx ON wtxn.tx_id = tx.id INNER JOIN tx AS tx ON wtxn.tx_id = tx.id
WHERE w32.label="$1" WHERE w32.label='${1}'
LIMIT 0,${2-10} LIMIT ${2-10} OFFSET 0
HERE HERE
) )
label_txns=$(sql "$query") label_txns=$(sql "$query")
@@ -38,12 +38,12 @@ get_unused_addresses_by_watchlabel(){
SELECT w32.id, w32.label, w32.pub32, w.pub32_index, w.address SELECT w32.id, w32.label, w32.pub32, w.pub32_index, w.address
FROM watching as w FROM watching as w
INNER JOIN watching_by_pub32 AS w32 ON w.watching_by_pub32_id = w32.id INNER JOIN watching_by_pub32 AS w32 ON w.watching_by_pub32_id = w32.id
WHERE w32.label="$1" WHERE w32.label='${1}'
AND NOT EXISTS ( AND NOT EXISTS (
SELECT 1 FROM watching_tx WHERE watching_id = w.id SELECT 1 FROM watching_tx WHERE watching_id = w.id
) )
ORDER BY w.pub32_index ASC ORDER BY w.pub32_index ASC
LIMIT 0,${2-10} LIMIT ${2-10} OFFSET 0
HERE HERE
) )
label_unused_addrs=$(sql "$query") label_unused_addrs=$(sql "$query")
@@ -65,9 +65,9 @@ getactivewatches() {
trace "Entering getactivewatches()..." trace "Entering getactivewatches()..."
local watches local watches
# Let's build the string directly with sqlite instead of manipulating multiple strings afterwards, it's faster. # Let's build the string directly with dbms instead of manipulating multiple strings afterwards, it's faster.
# {"id":"${id}","address":"${address}","imported":"${imported}","unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}"} # {"id":"${id}","address":"${address}","imported":"${imported}","unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}"}
watches=$(sql "SELECT '{\"id\":' || id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":\"' || COALESCE(callback0conf, '') || '\",\"confirmedCallbackURL\":\"' || COALESCE(callback1conf, '') || '\",\"label\":\"' || COALESCE(label, '') || '\",\"watching_since\":\"' || inserted_ts || '\"}' FROM watching WHERE watching AND NOT calledback1conf") watches=$(sql "SELECT '{\"id\":' || id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":' || CASE WHEN callback0conf IS NULL THEN 'null' ELSE ('\"' || callback0conf || '\"') END || ',\"confirmedCallbackURL\":' || CASE WHEN callback1conf IS NULL THEN 'null' ELSE ('\"' || callback1conf || '\"') END || ',\"label\":\"' || COALESCE(label, '') || '\",\"watching_since\":\"' || inserted_ts || '\"}' FROM watching WHERE watching AND NOT calledback1conf ORDER BY id")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -99,7 +99,7 @@ getactivewatchesbyxpub() {
local xpub=${1} local xpub=${1}
local returncode local returncode
getactivewatchesxpub "pub32" ${xpub} getactivewatchesxpub "pub32" "${xpub}"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -112,7 +112,7 @@ getactivewatchesbylabel() {
local label=${1} local label=${1}
local returncode local returncode
getactivewatchesxpub "label" ${label} getactivewatchesxpub "label" "${label}"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -128,9 +128,9 @@ getactivewatchesxpub() {
trace "[getactivewatchesxpub] value=${value}" trace "[getactivewatchesxpub] value=${value}"
local watches local watches
# Let's build the string directly with sqlite instead of manipulating multiple strings afterwards, it's faster. # Let's build the string directly with dbms instead of manipulating multiple strings afterwards, it's faster.
# {"id":"${id}","address":"${address}","imported":"${imported}","unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}","derivation_path":"${derivation_path}","pub32_index":"${pub32_index}"} # {"id":"${id}","address":"${address}","imported":"${imported}","unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}","derivation_path":"${derivation_path}","pub32_index":"${pub32_index}"}
watches=$(sql "SELECT '{\"id\":' || w.id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":\"' || COALESCE(w.callback0conf, '') || '\",\"confirmedCallbackURL\":\"' || COALESCE(w.callback1conf, '') || '\",\"watching_since\":\"' || w.inserted_ts || '\",\"derivation_path\":\"' || derivation_path || '\",\"pub32_index\":' || pub32_index || '}' FROM watching w, watching_by_pub32 w32 WHERE watching_by_pub32_id = w32.id AND ${where} = \"${value}\" AND w.watching AND NOT calledback1conf") watches=$(sql "SELECT '{\"id\":' || w.id || ',\"address\":\"' || address || '\",\"imported\":' || imported || ',\"unconfirmedCallbackURL\":' || CASE WHEN w.callback0conf IS NULL THEN 'null' ELSE ('\"' || w.callback0conf || '\"') END || ',\"confirmedCallbackURL\":' || CASE WHEN w.callback1conf IS NULL THEN 'null' ELSE ('\"' || w.callback1conf || '\"') END || ',\"watching_since\":\"' || w.inserted_ts || '\",\"derivation_path\":\"' || derivation_path || '\",\"pub32_index\":' || pub32_index || '}' FROM watching w, watching_by_pub32 w32 WHERE watching_by_pub32_id = w32.id AND w32.${where} = '${value}' AND w.watching AND NOT calledback1conf ORDER BY w.id")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -160,9 +160,9 @@ getactivexpubwatches() {
trace "Entering getactivexpubwatches()..." trace "Entering getactivexpubwatches()..."
local watches local watches
# Let's build the string directly with sqlite instead of manipulating multiple strings afterwards, it's faster. # Let's build the string directly with dbms instead of manipulating multiple strings afterwards, it's faster.
# {"id":"${id}","pub32":"${pub32}","label":"${label}","derivation_path":"${derivation_path}","last_imported_n":${last_imported_n},"unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}"} # {"id":"${id}","pub32":"${pub32}","label":"${label}","derivation_path":"${derivation_path}","last_imported_n":${last_imported_n},"unconfirmedCallbackURL":"${cb0conf_url}","confirmedCallbackURL":"${cb1conf_url}","watching_since":"${timestamp}"}
watches=$(sql "SELECT '{\"id\":' || id || ',\"pub32\":\"' || pub32 || '\",\"label\":\"' || label || '\",\"derivation_path\":\"' || derivation_path || '\",\"last_imported_n\":' || last_imported_n || ',\"unconfirmedCallbackURL\":\"' || COALESCE(callback0conf, '') || '\",\"confirmedCallbackURL\":\"' || COALESCE(callback1conf, '') || '\",\"watching_since\":\"' || inserted_ts || '\"}' FROM watching_by_pub32 WHERE watching") watches=$(sql "SELECT '{\"id\":' || id || ',\"pub32\":\"' || pub32 || '\",\"label\":\"' || label || '\",\"derivation_path\":\"' || derivation_path || '\",\"last_imported_n\":' || last_imported_n || ',\"unconfirmedCallbackURL\":' || CASE WHEN callback0conf IS NULL THEN 'null' ELSE ('\"' || callback0conf || '\"') END || ',\"confirmedCallbackURL\":' || CASE WHEN callback1conf IS NULL THEN 'null' ELSE ('\"' || callback1conf || '\"') END || ',\"watching_since\":\"' || inserted_ts || '\"}' FROM watching_by_pub32 WHERE watching ORDER BY id")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}

View File

@@ -11,7 +11,7 @@ importaddress_rpc() {
if [ -z "${label}" ]; then if [ -z "${label}" ]; then
label="null" label="null"
fi fi
local data='{"method":"importaddress","params":{"address":"'${address}'","label":'${label}',"rescan":false}}' local data='{"method":"importaddress","params":{"address":"'${address}'","label":"'${label}'","rescan":false}}'
# local data="{\"method\":\"importaddress\",\"params\":[\"${address}\",\"\",false]}" # local data="{\"method\":\"importaddress\",\"params\":[\"${address}\",\"\",false]}"
local result local result
result=$(send_to_watcher_node ${data}) result=$(send_to_watcher_node ${data})
@@ -39,7 +39,7 @@ importmulti_rpc() {
# {"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"}, # {"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"},
# {"scriptPubKey":{"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"},"timestamp":"now","watchonly":true,"label":"xpub"}, # {"scriptPubKey":{"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"},"timestamp":"now","watchonly":true,"label":"xpub"},
addresses=$(echo "${addresses}" | sed "s/\"address\"/\"scriptPubKey\":\{\"address\"/g" | sed "s/}/},\"timestamp\":\"now\",\"watchonly\":true,\"label\":${label}}/g") addresses=$(echo "${addresses}" | sed "s/\"address\"/\"scriptPubKey\":\{\"address\"/g" | sed "s/}/},\"timestamp\":\"now\",\"watchonly\":true,\"label\":\"${label}\"}/g")
# trace "[importmulti_rpc] addresses=${addresses}" # trace "[importmulti_rpc] addresses=${addresses}"
# Now we use that in the RPC string # Now we use that in the RPC string

View File

@@ -25,7 +25,7 @@ manage_not_imported() {
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
if [ "${returncode}" -eq 0 ]; then if [ "${returncode}" -eq 0 ]; then
sql "UPDATE watching SET imported=1 WHERE address=\"${address}\"" sql "UPDATE watching SET imported=true WHERE address='${address}'"
fi fi
done done
@@ -33,7 +33,7 @@ manage_not_imported() {
} }
manage_missed_conf() { manage_missed_conf() {
# Maybe we missed confirmations, because we were down or no network or # Maybe we missed 0-conf or 1-conf watched txs, because we were down or no network or
# whatever, so we look at what might be missed and do confirmations. # whatever, so we look at what might be missed and do confirmations.
# The strategy here: get the list of watched addresses, see if they received something on the Bitcoin node, # The strategy here: get the list of watched addresses, see if they received something on the Bitcoin node,
@@ -41,7 +41,7 @@ manage_missed_conf() {
trace "[Entering manage_missed_conf()]" trace "[Entering manage_missed_conf()]"
local watches=$(sql 'SELECT DISTINCT address FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx t ON t.id = tx_id WHERE watching AND imported AND (tx_id IS NULL OR t.confirmations=0) ORDER BY address') local watches=$(sql "SELECT DISTINCT address FROM watching w LEFT JOIN watching_tx ON w.id = watching_id LEFT JOIN tx t ON t.id = tx_id WHERE watching AND imported ORDER BY address")
trace "[manage_missed_conf] watches=${watches}" trace "[manage_missed_conf] watches=${watches}"
if [ ${#watches} -eq 0 ]; then if [ ${#watches} -eq 0 ]; then
trace "[manage_missed_conf] Nothing missed!" trace "[manage_missed_conf] Nothing missed!"
@@ -66,6 +66,7 @@ manage_missed_conf() {
local received local received
local received_address local received_address
local confirmations
local watching local watching
local latesttxid local latesttxid
local tx local tx
@@ -76,48 +77,55 @@ manage_missed_conf() {
local row local row
local address local address
local inserted_ts local inserted_ts
local calledback0conf
local txid local txid
local txids local txids
local IFS=$'\n' local IFS=$'\n'
for address in ${received_watches} for address in ${received_watches}
do do
watching=$(sql 'SELECT address, inserted_ts FROM watching WHERE address="'${address}'"') watching=$(sql "SELECT address, inserted_ts, calledback0conf FROM watching WHERE address='${address}'")
trace "[manage_missed_conf] watching=${watching}" trace "[manage_missed_conf] watching=${watching}"
if [ ${#watching} -eq 0 ]; then if [ ${#watching} -eq 0 ]; then
trace "[manage_missed_conf] Nothing missed!" trace "[manage_missed_conf] Nothing missed!"
continue continue
fi fi
# Let's get confirmed received txs for the address inserted_ts=$(date -d "$(echo "${watching}" | cut -d '|' -f2)" -D '%Y-%m-%d %H:%M:%S' +"%s")
# address=$(echo "${watches}" | cut -d '|' -f1)
inserted_ts=$(date -d "$(echo "${watching}" | cut -d '|' -f2)" +"%s")
trace "[manage_missed_conf] inserted_ts=${inserted_ts}" trace "[manage_missed_conf] inserted_ts=${inserted_ts}"
calledback0conf=$(echo "${watching}" | cut -d '|' -f3)
trace "[manage_missed_conf] calledback0conf=${calledback0conf}"
received_address=$(echo "${received}" | jq -Mc ".result | map(select(.address==\"${address}\" and .confirmations>0))[0]") received_address=$(echo "${received}" | jq -Mc ".result | map(select(.address==\"${address}\"))[0]")
trace "[manage_missed_conf] received_address=${received_address}" trace "[manage_missed_conf] received_address=${received_address}"
if [ "${received_address}" = "null" ]; then confirmations=$(echo "${received_address}" | jq -r ".confirmations")
# Not confirmed while we were away... trace "[manage_missed_conf] confirmations=${confirmations}"
trace "[manage_missed_conf] Nothing missed here"
if [ "${confirmations}" -eq "0" ] && [ "${calledback0conf}" = "t" ]; then
# 0-conf and calledback0conf is true, so let's skip this one
trace "[manage_missed_conf] Nothing missed!"
else else
# We got something confirmed # 0-conf and calledback0conf false, let's call confirmation
# Let's find out if it was confirmed after being watched # or
trace "[manage_missed_conf] We got something confirmed" # 1-conf and calledback1conf false, let's call confirmation
trace "[manage_missed_conf] We got something to check..."
latesttxid=$(echo "${received_address}" | jq -r ".txids | last") latesttxid=$(echo "${received_address}" | jq -r ".txids | last")
trace "[manage_missed_conf] latesttxid=${latesttxid}" trace "[manage_missed_conf] latesttxid=${latesttxid}"
data='{"method":"gettransaction","params":["'${latesttxid}'"]}' data='{"method":"gettransaction","params":["'${latesttxid}'"]}'
tx=$(send_to_watcher_node ${data}) tx=$(send_to_watcher_node ${data})
blocktime=$(echo "${tx}" | jq '.result.blocktime') blocktime=$(echo "${tx}" | jq '.result.blocktime')
txtime=$(echo "${tx}" | jq '.result.time') txtime=$(echo "${tx}" | jq '.result.time')
confirmations=$(echo "${tx}" | jq '.result.confirmations')
trace "[manage_missed_conf] blocktime=${blocktime}" trace "[manage_missed_conf] blocktime=${blocktime}"
trace "[manage_missed_conf] txtime=${txtime}" trace "[manage_missed_conf] txtime=${txtime}"
trace "[manage_missed_conf] inserted_ts=${inserted_ts}" trace "[manage_missed_conf] inserted_ts=${inserted_ts}"
trace "[manage_missed_conf] confirmations=${confirmations}" trace "[manage_missed_conf] confirmations=${confirmations}"
if [ "${txtime}" -gt "${inserted_ts}" ] && [ "${confirmations}" -gt "0" ]; then if [ "${txtime}" -ge "${inserted_ts}" ]; then
# Mined after watch, we missed it! # Broadcast or mined after watch, we missed it!
trace "[manage_missed_conf] Mined after watch, we missed it!" trace "[manage_missed_conf] Broadcast or mined after watch, we missed it!"
# We skip the callbacks because do_callbacks is called right after in
# requesthandler.executecallbacks (where we're from)
confirmation "${latesttxid}" "true" confirmation "${latesttxid}" "true"
fi fi
fi fi

View File

@@ -25,6 +25,7 @@ newblock() {
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
# do_callbacks_txid "$(echo "${blockinfo}" | jq ".result.tx[]")"
do_callbacks_txid do_callbacks_txid
batch_check_webhooks batch_check_webhooks

View File

@@ -39,7 +39,7 @@ serve_ots_stamp() {
id_inserted=$(echo "${row}" | cut -d '|' -f1) id_inserted=$(echo "${row}" | cut -d '|' -f1)
trace "[serve_ots_stamp] id_inserted=${id_inserted}" trace "[serve_ots_stamp] id_inserted=${id_inserted}"
if [ "${requested}" -eq "1" ]; then if [ "${requested}" = "t" ]; then
# Stamp already requested # Stamp already requested
trace "[serve_ots_stamp] Stamp already requested" trace "[serve_ots_stamp] Stamp already requested"
errorstring="Duplicate stamping request, hash already exists in DB and been OTS requested" errorstring="Duplicate stamping request, hash already exists in DB and been OTS requested"
@@ -49,18 +49,20 @@ serve_ots_stamp() {
returncode=$? returncode=$?
fi fi
else else
sql "INSERT OR IGNORE INTO stamp (hash, callbackUrl) VALUES (\"${hash}\", \"${callbackUrl}\")" id_inserted=$(sql "INSERT INTO stamp (hash, callbackUrl)"\
" VALUES ('${hash}','${callbackUrl}')"\
" RETURNING id" \
"SELECT id FROM stamp WHERE hash='${hash}'")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
if [ "${returncode}" -eq "0" ]; then if [ "${returncode}" -eq "0" ]; then
id_inserted=$(sql "SELECT id FROM stamp WHERE hash='${hash}'")
trace_rc $?
errorstring=$(request_ots_stamp "${hash}" ${id_inserted}) errorstring=$(request_ots_stamp "${hash}" ${id_inserted})
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
else else
trace "[serve_ots_stamp] Stamp request could not be inserted in DB" trace "[serve_ots_stamp] Stamp request could not be inserted in DB"
errorstring="Stamp request could not be inserted in DB, please retry later" errorstring="Stamp request could not be inserted in DB, please retry later"
id_inserted=null
returncode=1 returncode=1
fi fi
fi fi
@@ -114,7 +116,7 @@ request_ots_stamp() {
if [ "${returncode}" -eq "0" ]; then if [ "${returncode}" -eq "0" ]; then
# "already exists" found, let's try updating DB again # "already exists" found, let's try updating DB again
trace "[request_ots_stamp] was already requested to the OTS server... let's update the DB, looks like it didn't work on first try" trace "[request_ots_stamp] was already requested to the OTS server... let's update the DB, looks like it didn't work on first try"
sql "UPDATE stamp SET requested=1 WHERE id=${id}" sql "UPDATE stamp SET requested=true WHERE id=${id}"
errorstring="Duplicate stamping request, hash already exists in DB and been OTS requested" errorstring="Duplicate stamping request, hash already exists in DB and been OTS requested"
returncode=1 returncode=1
else else
@@ -125,7 +127,7 @@ request_ots_stamp() {
fi fi
else else
trace "[request_ots_stamp] Stamping request sent successfully!" trace "[request_ots_stamp] Stamping request sent successfully!"
sql "UPDATE stamp SET requested=1 WHERE id=${id}" sql "UPDATE stamp SET requested=true WHERE id=${id}"
errorstring="" errorstring=""
returncode=0 returncode=0
fi fi
@@ -174,12 +176,12 @@ serve_ots_backoffice() {
id=$(echo "${row}" | cut -d '|' -f5) id=$(echo "${row}" | cut -d '|' -f5)
trace "[serve_ots_backoffice] id=${id}" trace "[serve_ots_backoffice] id=${id}"
if [ "${requested}" -ne "1" ]; then if [ "${requested}" != "t" ]; then
# Re-request the unrequested calls to ots_stamp # Re-request the unrequested calls to ots_stamp
request_ots_stamp "${hash}" ${id} request_ots_stamp "${hash}" ${id}
returncode=$? returncode=$?
else else
if [ "${upgraded}" -ne "1" ]; then if [ "${upgraded}" != "t" ]; then
# Upgrade requested calls to ots_stamp that have not been called back yet # Upgrade requested calls to ots_stamp that have not been called back yet
trace "[serve_ots_backoffice] curl -s ${OTSCLIENT_CONTAINER}/upgrade/${hash}" trace "[serve_ots_backoffice] curl -s ${OTSCLIENT_CONTAINER}/upgrade/${hash}"
result=$(curl -s ${OTSCLIENT_CONTAINER}/upgrade/${hash}) result=$(curl -s ${OTSCLIENT_CONTAINER}/upgrade/${hash})
@@ -194,18 +196,18 @@ serve_ots_backoffice() {
# Error tag not null, so there's an error # Error tag not null, so there's an error
trace "[serve_ots_backoffice] not upgraded!" trace "[serve_ots_backoffice] not upgraded!"
upgraded=0 upgraded="f"
else else
# No failure, upgraded # No failure, upgraded
trace "[serve_ots_backoffice] just upgraded!" trace "[serve_ots_backoffice] just upgraded!"
sql "UPDATE stamp SET upgraded=1 WHERE id=${id}" sql "UPDATE stamp SET upgraded=true WHERE id=${id}"
trace_rc $? trace_rc $?
upgraded=1 upgraded="t"
fi fi
fi fi
fi fi
if [ "${upgraded}" -eq "1" ]; then if [ "${upgraded}" = "t" ]; then
trace "[serve_ots_backoffice] upgraded! Let's call the callback..." trace "[serve_ots_backoffice] upgraded! Let's call the callback..."
url=$(echo "${row}" | cut -d '|' -f2) url=$(echo "${row}" | cut -d '|' -f2)
trace "[serve_ots_backoffice] url=${url}" trace "[serve_ots_backoffice] url=${url}"
@@ -221,13 +223,13 @@ serve_ots_backoffice() {
# Even if curl executed ok, we need to make sure the http return code is also ok # Even if curl executed ok, we need to make sure the http return code is also ok
if [ "${returncode}" -eq "0" ]; then if [ "${returncode}" -eq "0" ]; then
sql "UPDATE stamp SET calledback=1 WHERE id=${id}" sql "UPDATE stamp SET calledback=true WHERE id=${id}"
trace_rc $? trace_rc $?
fi fi
else else
trace "[serve_ots_backoffice] url is empty, obviously won't try to call it!" trace "[serve_ots_backoffice] url is empty, obviously won't try to call it!"
sql "UPDATE stamp SET calledback=1 WHERE id=${id}" sql "UPDATE stamp SET calledback=true WHERE id=${id}"
trace_rc $? trace_rc $?
fi fi
fi fi

View File

@@ -76,8 +76,10 @@ main() {
case "${cmd}" in case "${cmd}" in
helloworld) helloworld)
# GET http://192.168.111.152:8080/helloworld # GET http://192.168.111.152:8080/helloworld
response_to_client "Hello, world!" 0 response='{"hello":"world"}'
break returncode=0
# response_to_client "Hello, world!" 0
# break
;; ;;
installation_info) installation_info)
# GET http://192.168.111.152:8080/info # GET http://192.168.111.152:8080/info
@@ -86,8 +88,7 @@ main() {
else else
response='{ "error": "missing installation data" }' response='{ "error": "missing installation data" }'
fi fi
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
watch) watch)
# POST http://192.168.111.152:8080/watch # POST http://192.168.111.152:8080/watch
@@ -96,8 +97,7 @@ main() {
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","confirmedCallbackURL":"192.168.111.233:1111/callback1conf","eventMessage":"eyJib3VuY2VfYWRkcmVzcyI6IjJNdkEzeHIzOHIxNXRRZWhGblBKMVhBdXJDUFR2ZTZOamNGIiwibmJfY29uZiI6MH0K","label":"myLabel"} # BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","confirmedCallbackURL":"192.168.111.233:1111/callback1conf","eventMessage":"eyJib3VuY2VfYWRkcmVzcyI6IjJNdkEzeHIzOHIxNXRRZWhGblBKMVhBdXJDUFR2ZTZOamNGIiwibmJfY29uZiI6MH0K","label":"myLabel"}
response=$(watchrequest "${line}") response=$(watchrequest "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
unwatch) unwatch)
# curl (GET) 192.168.111.152:8080/unwatch/2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp # curl (GET) 192.168.111.152:8080/unwatch/2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp
@@ -122,16 +122,15 @@ main() {
# Let's make it work even for a GET request (equivalent to a POST with empty json object body) # Let's make it work even for a GET request (equivalent to a POST with empty json object body)
if [ "$http_method" = "POST" ]; then if [ "$http_method" = "POST" ]; then
address=$(echo "${line}" | jq -r ".address") address=$(echo "${line}" | jq -r ".address")
unconfirmedCallbackURL=$(echo "${line}" | jq ".unconfirmedCallbackURL") unconfirmedCallbackURL=$(echo "${line}" | jq -r ".unconfirmedCallbackURL")
confirmedCallbackURL=$(echo "${line}" | jq ".confirmedCallbackURL") confirmedCallbackURL=$(echo "${line}" | jq -r ".confirmedCallbackURL")
watchid=$(echo "${line}" | jq ".id") watchid=$(echo "${line}" | jq ".id")
else else
address=$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3) address=$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)
fi fi
response=$(unwatchrequest "${watchid}" "${address}" "${unconfirmedCallbackURL}" "${confirmedCallbackURL}") response=$(unwatchrequest "${watchid}" "${address}" "${unconfirmedCallbackURL}" "${confirmedCallbackURL}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
watchxpub) watchxpub)
# POST http://192.168.111.152:8080/watchxpub # POST http://192.168.111.152:8080/watchxpub
@@ -139,43 +138,37 @@ main() {
# curl -H "Content-Type: application/json" -d '{"label":"2219","pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/1/n","nstart":55,"unconfirmedCallbackURL":"192.168.111.233:1111/callback0conf","confirmedCallbackURL":"192.168.111.233:1111/callback1conf"}' proxy:8888/watchxpub # curl -H "Content-Type: application/json" -d '{"label":"2219","pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/1/n","nstart":55,"unconfirmedCallbackURL":"192.168.111.233:1111/callback0conf","confirmedCallbackURL":"192.168.111.233:1111/callback1conf"}' proxy:8888/watchxpub
response=$(watchpub32request "${line}") response=$(watchpub32request "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
unwatchxpubbyxpub) unwatchxpubbyxpub)
# GET http://192.168.111.152:8080/unwatchxpubbyxpub/tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk # GET http://192.168.111.152:8080/unwatchxpubbyxpub/tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk
response=$(unwatchpub32request "${line}") response=$(unwatchpub32request "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
unwatchxpubbylabel) unwatchxpubbylabel)
# GET http://192.168.111.152:8080/unwatchxpubbylabel/4421 # GET http://192.168.111.152:8080/unwatchxpubbylabel/4421
response=$(unwatchpub32labelrequest "${line}") response=$(unwatchpub32labelrequest "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getactivewatchesbyxpub) getactivewatchesbyxpub)
# GET http://192.168.111.152:8080/getactivewatchesbyxpub/tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk # GET http://192.168.111.152:8080/getactivewatchesbyxpub/tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk
response=$(getactivewatchesbyxpub "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(getactivewatchesbyxpub "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getactivewatchesbylabel) getactivewatchesbylabel)
# GET http://192.168.111.152:8080/getactivewatchesbylabel/4421 # GET http://192.168.111.152:8080/getactivewatchesbylabel/4421
response=$(getactivewatchesbylabel "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(getactivewatchesbylabel "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getactivexpubwatches) getactivexpubwatches)
# GET http://192.168.111.152:8080/getactivexpubwatches # GET http://192.168.111.152:8080/getactivexpubwatches
response=$(getactivexpubwatches) response=$(getactivexpubwatches)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
watchtxid) watchtxid)
# POST http://192.168.111.152:8080/watchtxid # POST http://192.168.111.152:8080/watchtxid
@@ -183,8 +176,7 @@ main() {
# curl -H "Content-Type: application/json" -d '{"txid":"b081ca7724386f549cf0c16f71db6affeb52ff7a0d9b606fb2e5c43faffd3387","confirmedCallbackURL":"192.168.111.233:1111/callback1conf","xconfCallbackURL":"192.168.111.233:1111/callbackXconf","nbxconf":6}' proxy:8888/watchtxid # curl -H "Content-Type: application/json" -d '{"txid":"b081ca7724386f549cf0c16f71db6affeb52ff7a0d9b606fb2e5c43faffd3387","confirmedCallbackURL":"192.168.111.233:1111/callback1conf","xconfCallbackURL":"192.168.111.233:1111/callbackXconf","nbxconf":6}' proxy:8888/watchtxid
response=$(watchtxidrequest "${line}") response=$(watchtxidrequest "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
unwatchtxid) unwatchtxid)
# POST http://192.168.111.152:8080/unwatchtxid # POST http://192.168.111.152:8080/unwatchtxid
@@ -200,88 +192,76 @@ main() {
# - id: the id returned by watchtxid # - id: the id returned by watchtxid
local txid=$(echo "${line}" | jq -r ".txid") local txid=$(echo "${line}" | jq -r ".txid")
local unconfirmedCallbackURL=$(echo "${line}" | jq ".unconfirmedCallbackURL") local unconfirmedCallbackURL=$(echo "${line}" | jq -r ".unconfirmedCallbackURL")
local confirmedCallbackURL=$(echo "${line}" | jq ".confirmedCallbackURL") local confirmedCallbackURL=$(echo "${line}" | jq -r ".confirmedCallbackURL")
local watchid=$(echo "${line}" | jq ".id") local watchid=$(echo "${line}" | jq ".id")
response=$(unwatchtxidrequest "${watchid}" "${txid}" "${unconfirmedCallbackURL}" "${confirmedCallbackURL}") response=$(unwatchtxidrequest "${watchid}" "${txid}" "${unconfirmedCallbackURL}" "${confirmedCallbackURL}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getactivewatches) getactivewatches)
# curl (GET) 192.168.111.152:8080/getactivewatches # curl (GET) 192.168.111.152:8080/getactivewatches
response=$(getactivewatches) response=$(getactivewatches)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
get_txns_by_watchlabel) get_txns_by_watchlabel)
# curl (GET) 192.168.111.152:8080/get_txns_by_watchlabel/<label>/<count> # curl (GET) 192.168.111.152:8080/get_txns_by_watchlabel/<label>/<count>
response=$(get_txns_by_watchlabel "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)" "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f4)") response=$(get_txns_by_watchlabel "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)" "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f4)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
get_unused_addresses_by_watchlabel) get_unused_addresses_by_watchlabel)
# curl (GET) 192.168.111.152:8080/get_unused_addresses_by_watchlabel/<label>/<count> # curl (GET) 192.168.111.152:8080/get_unused_addresses_by_watchlabel/<label>/<count>
response=$(get_unused_addresses_by_watchlabel "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)" "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f4)") response=$(get_unused_addresses_by_watchlabel "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)" "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f4)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
conf) conf)
# curl (GET) 192.168.111.152:8080/conf/b081ca7724386f549cf0c16f71db6affeb52ff7a0d9b606fb2e5c43faffd3387 # curl (GET) 192.168.111.152:8080/conf/b081ca7724386f549cf0c16f71db6affeb52ff7a0d9b606fb2e5c43faffd3387
response=$(confirmation_request "${line}") response=$(confirmation_request "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
newblock) newblock)
# curl (GET) 192.168.111.152:8080/newblock/000000000000005c987120f3b6f995c95749977ef1a109c89aa74ce4bba97c1f # curl (GET) 192.168.111.152:8080/newblock/000000000000005c987120f3b6f995c95749977ef1a109c89aa74ce4bba97c1f
response=$(newblock "${line}") response=$(newblock "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getbestblockhash) getbestblockhash)
# curl (GET) http://192.168.111.152:8080/getbestblockhash # curl (GET) http://192.168.111.152:8080/getbestblockhash
response=$(get_best_block_hash) response=$(get_best_block_hash)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getblockhash) getblockhash)
# curl (GET) http://192.168.111.152:8080/getblockhash/522322 # curl (GET) http://192.168.111.152:8080/getblockhash/522322
response=$(get_blockhash "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(get_blockhash "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getblockinfo) getblockinfo)
# curl (GET) http://192.168.111.152:8080/getblockinfo/000000006f82a384c208ecfa04d05beea02d420f3f398ddda5c7f900de5718ea # curl (GET) http://192.168.111.152:8080/getblockinfo/000000006f82a384c208ecfa04d05beea02d420f3f398ddda5c7f900de5718ea
response=$(get_block_info "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(get_block_info "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getblockchaininfo) getblockchaininfo)
# http://192.168.111.152:8080/getblockchaininfo # http://192.168.111.152:8080/getblockchaininfo
response=$(get_blockchain_info) response=$(get_blockchain_info)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
gettransaction) gettransaction)
# curl (GET) http://192.168.111.152:8080/gettransaction/af867c86000da76df7ddb1054b273ca9e034e8c89d049b5b2795f9f590f67648 # curl (GET) http://192.168.111.152:8080/gettransaction/af867c86000da76df7ddb1054b273ca9e034e8c89d049b5b2795f9f590f67648
response=$(get_rawtransaction "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(get_rawtransaction "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getbestblockinfo) getbestblockinfo)
# curl (GET) http://192.168.111.152:8080/getbestblockinfo # curl (GET) http://192.168.111.152:8080/getbestblockinfo
response=$(get_best_block_info) response=$(get_best_block_info)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
executecallbacks) executecallbacks)
# curl (GET) http://192.168.111.152:8080/executecallbacks # curl (GET) http://192.168.111.152:8080/executecallbacks
@@ -289,43 +269,37 @@ main() {
manage_not_imported manage_not_imported
manage_missed_conf manage_missed_conf
response=$(do_callbacks) response=$(do_callbacks)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
get_txns_spending) get_txns_spending)
# curl (GET) http://192.168.111.152:8080/get_txns_spending/20/10 # curl (GET) http://192.168.111.152:8080/get_txns_spending/20/10
response=$(get_txns_spending "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)" "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f4)") response=$(get_txns_spending "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)" "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f4)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getbalance) getbalance)
# curl (GET) http://192.168.111.152:8080/getbalance # curl (GET) http://192.168.111.152:8080/getbalance
response=$(getbalance) response=$(getbalance)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getbalances) getbalances)
# curl (GET) http://192.168.111.152:8080/getbalances # curl (GET) http://192.168.111.152:8080/getbalances
response=$(getbalances) response=$(getbalances)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getbalancebyxpub) getbalancebyxpub)
# curl (GET) http://192.168.111.152:8080/getbalancebyxpub/upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb # curl (GET) http://192.168.111.152:8080/getbalancebyxpub/upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb
response=$(getbalancebyxpub "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(getbalancebyxpub "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getbalancebyxpublabel) getbalancebyxpublabel)
# curl (GET) http://192.168.111.152:8080/getbalancebyxpublabel/2219 # curl (GET) http://192.168.111.152:8080/getbalancebyxpublabel/2219
response=$(getbalancebyxpublabel "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(getbalancebyxpublabel "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getnewaddress) getnewaddress)
# curl (GET) http://192.168.111.152:8080/getnewaddress # curl (GET) http://192.168.111.152:8080/getnewaddress
@@ -347,23 +321,20 @@ main() {
fi fi
response=$(getnewaddress "${address_type}" "${label}") response=$(getnewaddress "${address_type}" "${label}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
validateaddress) validateaddress)
# GET http://192.168.111.152:8080/validateaddress/tb1p5cyxnuxmeuwuvkwfem96lqzszd02n6xdcjrs20cac6yqjjwudpxqp3mvzv # GET http://192.168.111.152:8080/validateaddress/tb1p5cyxnuxmeuwuvkwfem96lqzszd02n6xdcjrs20cac6yqjjwudpxqp3mvzv
response=$(validateaddress $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)) response=$(validateaddress $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
spend) spend)
# POST http://192.168.111.152:8080/spend # POST http://192.168.111.152:8080/spend
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","amount":0.00233,"eventMessage":"eyJ3aGF0ZXZlciI6MTIzfQo=","confTarget":6,"replaceable":true,"subtractfeefromamount":false} # BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","amount":0.00233,"eventMessage":"eyJ3aGF0ZXZlciI6MTIzfQo=","confTarget":6,"replaceable":true,"subtractfeefromamount":false}
response=$(spend "${line}") response=$(spend "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
bumpfee) bumpfee)
# POST http://192.168.111.152:8080/bumpfee # POST http://192.168.111.152:8080/bumpfee
@@ -371,8 +342,7 @@ main() {
# BODY {"txid":"af867c86000da76df7ddb1054b273ca9e034e8c89d049b5b2795f9f590f67648"} # BODY {"txid":"af867c86000da76df7ddb1054b273ca9e034e8c89d049b5b2795f9f590f67648"}
response=$(bumpfee "${line}") response=$(bumpfee "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
createbatcher) createbatcher)
# POST http://192.168.111.152:8080/createbatcher # POST http://192.168.111.152:8080/createbatcher
@@ -389,8 +359,7 @@ main() {
# NOTYET BODY {"batcherLabel":"highfees","feeRate":231.8} # NOTYET BODY {"batcherLabel":"highfees","feeRate":231.8}
response=$(createbatcher "${line}") response=$(createbatcher "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
updatebatcher) updatebatcher)
# POST http://192.168.111.152:8080/updatebatcher # POST http://192.168.111.152:8080/updatebatcher
@@ -413,8 +382,7 @@ main() {
# BODY {"batcherLabel":"fast","confTarget":2} # BODY {"batcherLabel":"fast","confTarget":2}
response=$(updatebatcher "${line}") response=$(updatebatcher "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
addtobatch) addtobatch)
# POST http://192.168.111.152:8080/addtobatch # POST http://192.168.111.152:8080/addtobatch
@@ -440,8 +408,7 @@ main() {
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","amount":0.00233,"batcherId":34,"webhookUrl":"https://myCypherApp:3000/batchExecuted"} # BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","amount":0.00233,"batcherId":34,"webhookUrl":"https://myCypherApp:3000/batchExecuted"}
response=$(addtobatch "${line}") response=$(addtobatch "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
removefrombatch) removefrombatch)
# POST http://192.168.111.152:8080/removefrombatch # POST http://192.168.111.152:8080/removefrombatch
@@ -459,8 +426,7 @@ main() {
# BODY {"outputId":72} # BODY {"outputId":72}
response=$(removefrombatch "${line}") response=$(removefrombatch "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
batchspend) batchspend)
# POST http://192.168.111.152:8080/batchspend # POST http://192.168.111.152:8080/batchspend
@@ -512,8 +478,7 @@ main() {
# BODY {"batcherId":411,"confTarget":6} # BODY {"batcherId":411,"confTarget":6}
response=$(batchspend "${line}") response=$(batchspend "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getbatcher) getbatcher)
# POST (GET) http://192.168.111.152:8080/getbatcher # POST (GET) http://192.168.111.152:8080/getbatcher
@@ -528,15 +493,18 @@ main() {
# BODY {} # BODY {}
# BODY {"batcherId":34} # BODY {"batcherId":34}
if [ "$http_method" = "GET" ]; then
line='{}'
fi
response=$(getbatcher "${line}") response=$(getbatcher "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getbatchdetails) getbatchdetails)
# POST (GET) http://192.168.111.152:8080/getbatchdetails # POST (GET) http://192.168.111.152:8080/getbatchdetails
# #
# args: # args:
# - batcherId, optional, id of the batcher, overrides batcherLabel, default batcher will be spent if not supplied # - batcherId, optional, id of the batcher, overrides batcherLabel, default batcher will be used if not supplied
# - batcherLabel, optional, label of the batcher, default batcher will be used if not supplied # - batcherLabel, optional, label of the batcher, default batcher will be used if not supplied
# - txid, optional, if you want the details of an executed batch, supply the batch txid, will return current pending batch # - txid, optional, if you want the details of an executed batch, supply the batch txid, will return current pending batch
# if not supplied # if not supplied
@@ -571,8 +539,7 @@ main() {
# BODY {"batcherId":34} # BODY {"batcherId":34}
response=$(getbatchdetails "${line}") response=$(getbatchdetails "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
listbatchers) listbatchers)
# curl (GET) http://192.168.111.152:8080/listbatchers # curl (GET) http://192.168.111.152:8080/listbatchers
@@ -586,24 +553,21 @@ main() {
# "error":null} # "error":null}
response=$(listbatchers) response=$(listbatchers)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
bitcoin_estimatesmartfee) bitcoin_estimatesmartfee)
# POST http://192.168.111.152:8080/bitcoin_estimatesmartfee # POST http://192.168.111.152:8080/bitcoin_estimatesmartfee
# BODY {"confTarget":2} # BODY {"confTarget":2}
response=$(bitcoin_estimatesmartfee "$(echo "${line}" | jq -r ".confTarget")") response=$(bitcoin_estimatesmartfee "$(echo "${line}" | jq -r ".confTarget")")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
deriveindex) deriveindex)
# curl GET http://192.168.111.152:8080/deriveindex/25-30 # curl GET http://192.168.111.152:8080/deriveindex/25-30
# curl GET http://192.168.111.152:8080/deriveindex/34 # curl GET http://192.168.111.152:8080/deriveindex/34
response=$(deriveindex "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(deriveindex "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
derivepubpath) derivepubpath)
# POST http://192.168.111.152:8080/derivepubpath # POST http://192.168.111.152:8080/derivepubpath
@@ -612,16 +576,14 @@ main() {
# BODY {"pub32":"vpub5SLqN2bLY4WeZF3kL4VqiWF1itbf3A6oRrq9aPf16AZMVWYCuN9TxpAZwCzVgW94TNzZPNc9XAHD4As6pdnExBtCDGYRmNJrcJ4eV9hNqcv","path":"0/25-30"} # BODY {"pub32":"vpub5SLqN2bLY4WeZF3kL4VqiWF1itbf3A6oRrq9aPf16AZMVWYCuN9TxpAZwCzVgW94TNzZPNc9XAHD4As6pdnExBtCDGYRmNJrcJ4eV9hNqcv","path":"0/25-30"}
response=$(derivepubpath "${line}") response=$(derivepubpath "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
deriveindex_bitcoind) deriveindex_bitcoind)
# curl GET http://192.168.111.152:8080/deriveindex_bitcoind/25-30 # curl GET http://192.168.111.152:8080/deriveindex_bitcoind/25-30
# curl GET http://192.168.111.152:8080/deriveindex_bitcoind/34 # curl GET http://192.168.111.152:8080/deriveindex_bitcoind/34
response=$(deriveindex_bitcoind "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(deriveindex_bitcoind "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
derivepubpath_bitcoind) derivepubpath_bitcoind)
# POST http://192.168.111.152:8080/derivepubpath_bitcoind # POST http://192.168.111.152:8080/derivepubpath_bitcoind
@@ -630,45 +592,39 @@ main() {
# BODY {"pub32":"vpub5SLqN2bLY4WeZF3kL4VqiWF1itbf3A6oRrq9aPf16AZMVWYCuN9TxpAZwCzVgW94TNzZPNc9XAHD4As6pdnExBtCDGYRmNJrcJ4eV9hNqcv","path":"0/25-30"} # BODY {"pub32":"vpub5SLqN2bLY4WeZF3kL4VqiWF1itbf3A6oRrq9aPf16AZMVWYCuN9TxpAZwCzVgW94TNzZPNc9XAHD4As6pdnExBtCDGYRmNJrcJ4eV9hNqcv","path":"0/25-30"}
response=$(derivepubpath_bitcoind "${line}") response=$(derivepubpath_bitcoind "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
getmempoolinfo) getmempoolinfo)
# curl GET http://192.168.111.152:8080/getmempoolinfo # curl GET http://192.168.111.152:8080/getmempoolinfo
response=$(get_mempool_info) response=$(get_mempool_info)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_getinfo) ln_getinfo)
# GET http://192.168.111.152:8080/ln_getinfo # GET http://192.168.111.152:8080/ln_getinfo
response=$(ln_getinfo) response=$(ln_getinfo)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_getconnectionstring) ln_getconnectionstring)
# GET http://192.168.111.152:8080/ln_getconnectionstring # GET http://192.168.111.152:8080/ln_getconnectionstring
response=$(ln_get_connection_string) response=$(ln_get_connection_string)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_create_invoice) ln_create_invoice)
# POST http://192.168.111.152:8080/ln_create_invoice # POST http://192.168.111.152:8080/ln_create_invoice
# BODY {"msatoshi":"10000","label":"koNCcrSvhX3dmyFhW","description":"Bylls order #10649","expiry":"900","callback_url":"http://192.168.122.159"} # BODY {"msatoshi":"10000","label":"koNCcrSvhX3dmyFhW","description":"Bylls order #10649","expiry":"900","callback_url":"http://192.168.122.159"}
response=$(ln_create_invoice "${line}") response=$(ln_create_invoice "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_pay) ln_pay)
# POST http://192.168.111.152:8080/ln_pay # POST http://192.168.111.152:8080/ln_pay
# BODY {"bolt11":"lntb1pdca82tpp5gv8mn5jqlj6xztpnt4r472zcyrwf3y2c3cvm4uzg2gqcnj90f83qdp2gf5hgcm0d9hzqnm4w3kx2apqdaexgetjyq3nwvpcxgcqp2g3d86wwdfvyxcz7kce7d3n26d2rw3wf5tzpm2m5fl2z3mm8msa3xk8nv2y32gmzlhwjved980mcmkgq83u9wafq9n4w28amnmwzujgqpmapcr3","expected_msatoshi":"10000","expected_description":"Bitcoin Outlet order #7082"} # BODY {"bolt11":"lntb1pdca82tpp5gv8mn5jqlj6xztpnt4r472zcyrwf3y2c3cvm4uzg2gqcnj90f83qdp2gf5hgcm0d9hzqnm4w3kx2apqdaexgetjyq3nwvpcxgcqp2g3d86wwdfvyxcz7kce7d3n26d2rw3wf5tzpm2m5fl2z3mm8msa3xk8nv2y32gmzlhwjved980mcmkgq83u9wafq9n4w28amnmwzujgqpmapcr3","expected_msatoshi":"10000","expected_description":"Bitcoin Outlet order #7082"}
response=$(ln_pay "${line}") response=$(ln_pay "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_listpays) ln_listpays)
# GET http://192.168.111.152:8080/ln_listpays # GET http://192.168.111.152:8080/ln_listpays
@@ -684,8 +640,7 @@ main() {
fi fi
response=$(ln_listpays "${bolt11}") response=$(ln_listpays "${bolt11}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_paystatus) ln_paystatus)
# GET http://192.168.111.152:8080/ln_paystatus # GET http://192.168.111.152:8080/ln_paystatus
@@ -701,15 +656,13 @@ main() {
fi fi
response=$(ln_paystatus "${bolt11}") response=$(ln_paystatus "${bolt11}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_newaddr) ln_newaddr)
# GET http://192.168.111.152:8080/ln_newaddr # GET http://192.168.111.152:8080/ln_newaddr
response=$(ln_newaddr) response=$(ln_newaddr)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_connectfund) ln_connectfund)
# POST http://192.168.111.152:8080/ln_connectfund # POST http://192.168.111.152:8080/ln_connectfund
@@ -717,64 +670,50 @@ main() {
# curl -H "Content-Type: application/json" -d '{"peer":"nodeId@ip:port","msatoshi":"100000","callbackUrl":"https://callbackUrl/?channelReady=f3y2c3cvm4uzg2gq"}' proxy:8888/ln_connectfund # curl -H "Content-Type: application/json" -d '{"peer":"nodeId@ip:port","msatoshi":"100000","callbackUrl":"https://callbackUrl/?channelReady=f3y2c3cvm4uzg2gq"}' proxy:8888/ln_connectfund
response=$(ln_connectfund "${line}") response=$(ln_connectfund "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_getinvoice) ln_getinvoice)
# GET http://192.168.111.152:8080/ln_getinvoice/label # GET http://192.168.111.152:8080/ln_getinvoice/label
# GET http://192.168.111.152:8080/ln_getinvoice/koNCcrSvhX3dmyFhW # GET http://192.168.111.152:8080/ln_getinvoice/koNCcrSvhX3dmyFhW
response=$(ln_getinvoice "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(ln_getinvoice "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_delinvoice) ln_delinvoice)
# GET http://192.168.111.152:8080/ln_delinvoice/label # GET http://192.168.111.152:8080/ln_delinvoice/label
# GET http://192.168.111.152:8080/ln_delinvoice/koNCcrSvhX3dmyFhW # GET http://192.168.111.152:8080/ln_delinvoice/koNCcrSvhX3dmyFhW
response=$(ln_delinvoice "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(ln_delinvoice "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_decodebolt11) ln_decodebolt11)
# GET http://192.168.111.152:8080/ln_decodebolt11/bolt11 # GET http://192.168.111.152:8080/ln_decodebolt11/bolt11
# GET http://192.168.111.152:8080/ln_decodebolt11/lntb1pdca82tpp5gv8mn5jqlj6xztpnt4r472zcyrwf3y2c3cvm4uzg2gqcnj90f83qdp2gf5hgcm0d9hzqnm4w3kx2apqdaexgetjyq3nwvpcxgcqp2g3d86wwdfvyxcz7kce7d3n26d2rw3wf5tzpm2m5fl2z3mm8msa3xk8nv2y32gmzlhwjved980mcmkgq83u9wafq9n4w28amnmwzujgqpmapcr3 # GET http://192.168.111.152:8080/ln_decodebolt11/lntb1pdca82tpp5gv8mn5jqlj6xztpnt4r472zcyrwf3y2c3cvm4uzg2gqcnj90f83qdp2gf5hgcm0d9hzqnm4w3kx2apqdaexgetjyq3nwvpcxgcqp2g3d86wwdfvyxcz7kce7d3n26d2rw3wf5tzpm2m5fl2z3mm8msa3xk8nv2y32gmzlhwjved980mcmkgq83u9wafq9n4w28amnmwzujgqpmapcr3
response=$(ln_decodebolt11 "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)") response=$(ln_decodebolt11 "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_listpeers) ln_listpeers)
# GET http://192.168.111.152:8080/ln_listpeers # GET http://192.168.111.152:8080/ln_listpeers
response=$(ln_listpeers) response=$(ln_listpeers)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_listfunds) ln_listfunds)
# GET http://192.168.111.152:8080/ln_listfunds # GET http://192.168.111.152:8080/ln_listfunds
response=$(ln_listfunds) response=$(ln_listfunds)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
# ln_listpays)
# # GET http://192.168.111.152:8080/ln_listpays
# response=$(ln_listpays)
# response_to_client "${response}" ${?}
# break
# ;;
ln_getroute) ln_getroute)
# GET http://192.168.111.152:8080/ln_getroute/<node_id>/<msatoshi>/<riskfactor> # GET http://192.168.111.152:8080/ln_getroute/<node_id>/<msatoshi>/<riskfactor>
response=$(ln_getroute "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)" "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f4)" "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f5)") response=$(ln_getroute "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)" "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f4)" "$(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f5)")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ln_withdraw) ln_withdraw)
# POST http://192.168.111.152:8080/ln_withdraw # POST http://192.168.111.152:8080/ln_withdraw
# BODY {"destination":"segwitAddress","satoshi":"100000","feerate":0,all: false} # BODY {"destination":"segwitAddress","satoshi":"100000","feerate":0,all: false}
response=$(ln_withdraw "${line}") response=$(ln_withdraw "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ots_stamp) ots_stamp)
# POST http://192.168.111.152:8080/ots_stamp # POST http://192.168.111.152:8080/ots_stamp
@@ -783,15 +722,13 @@ main() {
# curl -v -d "{\"hash\":\"a6ea81a46fec3d02d40815b8667b388351edecedc1cc9f97aab55b566db7aac8\"}" localhost:8888/ots_stamp # curl -v -d "{\"hash\":\"a6ea81a46fec3d02d40815b8667b388351edecedc1cc9f97aab55b566db7aac8\"}" localhost:8888/ots_stamp
response=$(serve_ots_stamp "${line}") response=$(serve_ots_stamp "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ots_backoffice) ots_backoffice)
# curl (GET) http://192.168.111.152:8080/ots_upgradeandcallback # curl (GET) http://192.168.111.152:8080/ots_upgradeandcallback
response=$(serve_ots_backoffice) response=$(serve_ots_backoffice)
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ots_getfile) ots_getfile)
# curl (GET) http://192.168.111.152:8080/ots_getfile/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7 # curl (GET) http://192.168.111.152:8080/ots_getfile/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
@@ -808,8 +745,7 @@ main() {
# curl -v -d "{\"hash\":\"a6ea81a46fec3d02d40815b8667b388351edecedc1cc9f97aab55b566db7aac8\",\"base64otsfile\":\"$(cat a6ea81a46fec3d02d40815b8667b388351edecedc1cc9f97aab55b566db7aac8.ots | base64 | tr -d '\n')\"}" localhost:8888/ots_verify # curl -v -d "{\"hash\":\"a6ea81a46fec3d02d40815b8667b388351edecedc1cc9f97aab55b566db7aac8\",\"base64otsfile\":\"$(cat a6ea81a46fec3d02d40815b8667b388351edecedc1cc9f97aab55b566db7aac8.ots | base64 | tr -d '\n')\"}" localhost:8888/ots_verify
response=$(serve_ots_verify "${line}") response=$(serve_ots_verify "${line}")
response_to_client "${response}" ${?} returncode=$?
break
;; ;;
ots_info) ots_info)
# POST http://192.168.111.152:8080/ots_info # POST http://192.168.111.152:8080/ots_info
@@ -822,22 +758,23 @@ main() {
# curl -v -d "{\"hash\":\"a6ea81a46fec3d02d40815b8667b388351edecedc1cc9f97aab55b566db7aac8\",\"base64otsfile\":\"$(cat a6ea81a46fec3d02d40815b8667b388351edecedc1cc9f97aab55b566db7aac8.ots | base64 | tr -d '\n')\"}" localhost:8888/ots_info # curl -v -d "{\"hash\":\"a6ea81a46fec3d02d40815b8667b388351edecedc1cc9f97aab55b566db7aac8\",\"base64otsfile\":\"$(cat a6ea81a46fec3d02d40815b8667b388351edecedc1cc9f97aab55b566db7aac8.ots | base64 | tr -d '\n')\"}" localhost:8888/ots_info
response=$(serve_ots_info "${line}") response=$(serve_ots_info "${line}")
response_to_client "${response}" ${?} returncode=$?
break ;;
*)
response='{"error": {"code": -32601, "message": "Method not found"}, "id": "1"}'
returncode=1
;; ;;
esac esac
response=$(echo "${response}" | jq -Mc)
response_to_client "${response}" ${returncode}
break break
fi fi
done done
trace "[main] exiting" trace "[main] exiting"
return 0 return ${returncode}
} }
export NODE_RPC_URL=$BTC_NODE_RPC_URL
export TRACING
export DB_PATH
export DB_FILE
main main
returncode=$?
trace "[requesthandler] exiting" trace "[requesthandler] exiting"
exit $? exit ${returncode}

View File

@@ -5,7 +5,7 @@
send_to_watcher_node() { send_to_watcher_node() {
trace "Entering send_to_watcher_node()..." trace "Entering send_to_watcher_node()..."
local node_payload local node_payload
node_payload="$(send_to_bitcoin_node ${WATCHER_NODE_RPC_URL}/${WATCHER_BTC_NODE_DEFAULT_WALLET} ${WATCHER_NODE_RPC_CFG} $@)" node_payload="$(send_to_bitcoin_node ${WATCHER_BTC_NODE_RPC_URL}/${WATCHER_BTC_NODE_DEFAULT_WALLET} ${WATCHER_BTC_NODE_RPC_CFG} $@)"
local returncode=$? local returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
if [ "${returncode}" -ne 0 ]; then if [ "${returncode}" -ne 0 ]; then
@@ -22,7 +22,7 @@ send_to_watcher_node() {
send_to_xpub_watcher_wallet() { send_to_xpub_watcher_wallet() {
trace "Entering send_to_xpub_watcher_wallet()..." trace "Entering send_to_xpub_watcher_wallet()..."
send_to_bitcoin_node ${WATCHER_NODE_RPC_URL}/${WATCHER_BTC_NODE_XPUB_WALLET} ${WATCHER_NODE_RPC_CFG} $@ send_to_bitcoin_node ${WATCHER_BTC_NODE_RPC_URL}/${WATCHER_BTC_NODE_XPUB_WALLET} ${WATCHER_BTC_NODE_RPC_CFG} $@
local returncode=$? local returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
return ${returncode} return ${returncode}
@@ -33,7 +33,7 @@ send_to_watcher_node_wallet() {
local walletname=$1 local walletname=$1
shift shift
trace "[send_to_watcher_node_wallet] walletname=${walletname}" trace "[send_to_watcher_node_wallet] walletname=${walletname}"
send_to_bitcoin_node ${WATCHER_NODE_RPC_URL}/${walletname} ${WATCHER_NODE_RPC_CFG} $@ send_to_bitcoin_node ${WATCHER_BTC_NODE_RPC_URL}/${walletname} ${WATCHER_BTC_NODE_RPC_CFG} $@
local returncode=$? local returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
return ${returncode} return ${returncode}
@@ -42,7 +42,7 @@ send_to_watcher_node_wallet() {
send_to_spender_node() send_to_spender_node()
{ {
trace "Entering send_to_spender_node()..." trace "Entering send_to_spender_node()..."
send_to_bitcoin_node ${SPENDER_NODE_RPC_URL}/${SPENDER_BTC_NODE_DEFAULT_WALLET} ${SPENDER_NODE_RPC_CFG} $@ send_to_bitcoin_node ${SPENDER_BTC_NODE_RPC_URL}/${SPENDER_BTC_NODE_DEFAULT_WALLET} ${SPENDER_BTC_NODE_RPC_CFG} $@
local returncode=$? local returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
return ${returncode} return ${returncode}

View File

@@ -3,6 +3,35 @@
. ./trace.sh . ./trace.sh
sql() { sql() {
trace "Entering sql()..."
local select_id=${2}
local response
local inserted_id
trace "[sql] psql -qAtX -h postgres -U cyphernode -c \"${1}\""
response=$(psql -qAtX -h postgres -U cyphernode -c "${1}")
returncode=$?
trace_rc ${returncode}
if [ -n "${select_id}" ]; then
if [ "${returncode}" -eq "0" ]; then
inserted_id=$(echo "${response}" | cut -d ' ' -f1)
else
trace "[sql] psql -qAtX -h postgres -U cyphernode -c \"${select_id}\""
inserted_id=$(psql -qAtX -h postgres -U cyphernode -c "${select_id}")
returncode=$?
trace_rc ${returncode}
fi
echo -n "${inserted_id}"
else
echo -n "${response}"
fi
return ${returncode}
}
sql_sqlite() {
trace "sqlite3 -cmd \".timeout 40000\" ${DB_FILE} \"${1}\"" trace "sqlite3 -cmd \".timeout 40000\" ${DB_FILE} \"${1}\""
sqlite3 -cmd ".timeout 40000" ${DB_FILE} "${1}" sqlite3 -cmd ".timeout 40000" ${DB_FILE} "${1}"
@@ -14,16 +43,3 @@ sql() {
return $? return $?
} }
sql_rawtx() {
trace "sqlite3 -cmd \".timeout 40000\" ${DB_FILE}_rawtx \"${1}\""
sqlite3 -cmd ".timeout 40000" ${DB_FILE}_rawtx "${1}"
if [ "$?" -ne 0 ]; then
# SQL didn't work, let's retry to be sure...
trace "SQL didn't work, let's retry..."
sqlite3 -cmd ".timeout 40000" ${DB_FILE}_rawtx "${1}"
fi
return $?
}

View File

@@ -1,13 +1,6 @@
#!/bin/sh #!/bin/sh
export PROXY_LISTENING_PORT . ./trace.sh
export WATCHER_NODE_RPC_URL=$WATCHER_BTC_NODE_RPC_URL
export SPENDER_NODE_RPC_URL=$SPENDER_BTC_NODE_RPC_URL
export WATCHER_NODE_RPC_CFG=$WATCHER_BTC_NODE_RPC_CFG
export SPENDER_NODE_RPC_CFG=$SPENDER_BTC_NODE_RPC_CFG
export TRACING
export DB_PATH
export DB_FILE
trim() { trim() {
echo -e "$1" | sed -e 's/^[[:space:]]*//' | sed -e 's/[[:space:]]*$//' echo -e "$1" | sed -e 's/^[[:space:]]*//' | sed -e 's/[[:space:]]*$//'
@@ -16,12 +9,12 @@ trim() {
createCurlConfig() { createCurlConfig() {
if [[ ''$1 == '' ]]; then if [[ ''$1 == '' ]]; then
echo "Missing file name: Check your *_BTC_NODE_RPC_CFG" trace "[startproxy] Missing file name: Check your *_BTC_NODE_RPC_CFG"
return return
fi fi
if [[ ''$2 == '' ]]; then if [[ ''$2 == '' ]]; then
echo "Missing content: Check your *_BTC_NODE_RPC_USER" trace "[startproxy] Missing content: Check your *_BTC_NODE_RPC_USER"
return return
fi fi
@@ -30,19 +23,52 @@ createCurlConfig() {
} }
if [ ! -e ${DB_FILE} ]; then if [ -e ${DB_PATH}/.dbfailed ]; then
echo "DB not found, creating..." touch /container_monitor/proxy_dbfailed
cat cyphernode.sql | sqlite3 $DB_FILE trace "[startproxy] A previous database creation/migration failed. Stopping."
cat rawtx.sql | sqlite3 ${DB_FILE}_rawtx trace "[startproxy] A file called .dbfailed has been created. Fix the migration errors, remove .dbfailed and retry."
trace "[startproxy] Exiting."
sleep 30
exit 1
else else
echo "DB found, migrating..." rm -f /container_monitor/proxy_dbfailed
fi
trace "[startproxy] Waiting for PostgreSQL to be ready..."
while [ ! -f "/container_monitor/postgres_ready" ]; do trace "[startproxy] PostgreSQL not ready" ; sleep 10 ; done
trace "[startproxy] PostgreSQL ready!"
if [ ! -e ${DB_FILE} ]; then
trace "[startproxy] DB not found, creating..."
cat cyphernode.sql | sqlite3 $DB_FILE
psql -h postgres -f cyphernode.postgresql -U cyphernode
returncode=$?
trace_rc ${returncode}
else
trace "[startproxy] DB found, migrating..."
for script in sqlmigrate*.sh; do for script in sqlmigrate*.sh; do
sh $script sh $script
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -ne "0" ]; then
break
fi
done done
fi fi
if [ "${returncode}" -ne "0" ]; then
touch ${DB_PATH}/.dbfailed
touch /container_monitor/proxy_dbfailed
trace "[startproxy] Database creation/migration failed. Stopping."
trace "[startproxy] A file called .dbfailed has been created in your proxy datapath. Fix the migration errors, remove .dbfailed and retry."
trace "[startproxy] Exiting."
sleep 30
exit ${returncode}
fi
rm -f /container_monitor/proxy_ready
chmod 0600 $DB_FILE chmod 0600 $DB_FILE
chmod 0600 ${DB_FILE}_rawtx
createCurlConfig ${WATCHER_BTC_NODE_RPC_CFG} ${WATCHER_BTC_NODE_RPC_USER} createCurlConfig ${WATCHER_BTC_NODE_RPC_CFG} ${WATCHER_BTC_NODE_RPC_USER}
createCurlConfig ${SPENDER_BTC_NODE_RPC_CFG} ${SPENDER_BTC_NODE_RPC_USER} createCurlConfig ${SPENDER_BTC_NODE_RPC_CFG} ${SPENDER_BTC_NODE_RPC_USER}
@@ -52,4 +78,4 @@ if [ "${FEATURE_LIGHTNING}" = "true" ]; then
./waitanyinvoice.sh & ./waitanyinvoice.sh &
fi fi
nc -vlkp${PROXY_LISTENING_PORT} -e ./requesthandler.sh exec nc -vlkp${PROXY_LISTENING_PORT} -e ./requesthandler.sh

View File

@@ -1,7 +1,6 @@
#!/bin/sh #!/bin/sh
trace() trace() {
{
if [ -n "${TRACING}" ]; then if [ -n "${TRACING}" ]; then
local str="$(date -Is) $$ ${1}" local str="$(date -Is) $$ ${1}"
echo "${str}" 1>&2 echo "${str}" 1>&2
@@ -9,8 +8,7 @@ trace()
fi fi
} }
trace_rc() trace_rc() {
{
if [ -n "${TRACING}" ]; then if [ -n "${TRACING}" ]; then
local str="$(date -Is) $$ Last return code: ${1}" local str="$(date -Is) $$ Last return code: ${1}"
echo "${str}" 1>&2 echo "${str}" 1>&2

View File

@@ -16,10 +16,10 @@ unwatchrequest() {
# Let's lowercase bech32 addresses # Let's lowercase bech32 addresses
address=$(lowercase_if_bech32 "${address}") address=$(lowercase_if_bech32 "${address}")
trace "[unwatchrequest] Unwatch request id ${watchid} on address ${address} with url0conf ${unconfirmedCallbackURL} and url1conf ${confirmedCallbackURL}" trace "[unwatchrequest] Unwatch request id ${watchid} on address \"${address}\" with url0conf \"${unconfirmedCallbackURL}\" and url1conf \"${confirmedCallbackURL}\""
if [ "${watchid}" != "null" ]; then if [ "${watchid}" != "null" ]; then
sql "UPDATE watching SET watching=0 WHERE id=${watchid}" sql "UPDATE watching SET watching=false WHERE id=${watchid}"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -35,11 +35,11 @@ unwatchrequest() {
cb1_where=" AND callback1conf='${confirmedCallbackURL}'" cb1_where=" AND callback1conf='${confirmedCallbackURL}'"
fi fi
sql "UPDATE watching SET watching=0 WHERE address='${address}'${cb0_where}${cb1_where}" sql "UPDATE watching SET watching=false WHERE address='${address}'${cb0_where}${cb1_where}"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
data="{\"event\":\"unwatch\",\"address\":\"${address}\",\"unconfirmedCallbackURL\":${unconfirmedCallbackURL},\"confirmedCallbackURL\":${confirmedCallbackURL}}" data="{\"event\":\"unwatch\",\"address\":\"${address}\",\"unconfirmedCallbackURL\":\"${unconfirmedCallbackURL}\",\"confirmedCallbackURL\":\"${confirmedCallbackURL}\"}"
fi fi
trace "[unwatchrequest] responding=${data}" trace "[unwatchrequest] responding=${data}"
@@ -52,20 +52,19 @@ unwatchrequest() {
unwatchpub32request() { unwatchpub32request() {
trace "Entering unwatchpub32request()..." trace "Entering unwatchpub32request()..."
# GET http://192.168.111.152:8080/unwatchxpubbyxpub/tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk
local request=${1} local request=${1}
local pub32=$(echo "${request}" | cut -d ' ' -f2 | cut -d '/' -f3) local pub32=$(echo "${request}" | cut -d ' ' -f2 | cut -d '/' -f3)
local id local id
local returncode local returncode
trace "[unwatchpub32request] Unwatch pub32 ${pub32}" trace "[unwatchpub32request] Unwatch pub32 ${pub32}"
id=$(sql "SELECT id FROM watching_by_pub32 WHERE pub32='${pub32}'") sql "UPDATE watching w SET watching=false FROM watching_by_pub32 w32 WHERE w.watching_by_pub32_id=w32.id AND pub32='${pub32}'"
trace "[unwatchpub32request] id: ${id}"
sql "UPDATE watching_by_pub32 SET watching=0 WHERE id=${id}"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
sql "UPDATE watching SET watching=0 WHERE watching_by_pub32_id=\"${id}\"" sql "UPDATE watching_by_pub32 SET watching=false WHERE pub32='${pub32}'"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -86,16 +85,11 @@ unwatchpub32labelrequest() {
local returncode local returncode
trace "[unwatchpub32labelrequest] Unwatch xpub label ${label}" trace "[unwatchpub32labelrequest] Unwatch xpub label ${label}"
id=$(sql "SELECT id FROM watching_by_pub32 WHERE label='${label}'") sql "UPDATE watching w SET watching=false FROM watching_by_pub32 w32 WHERE w.watching_by_pub32_id=w32.id AND w32.label='${label}'"
returncode=$?
trace_rc ${returncode}
trace "[unwatchpub32labelrequest] id: ${id}"
sql "UPDATE watching_by_pub32 SET watching=0 WHERE id=${id}"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
sql "UPDATE watching SET watching=0 WHERE watching_by_pub32_id=\"${id}\"" sql "UPDATE watching_by_pub32 SET watching=false WHERE label='${label}'"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -112,23 +106,30 @@ unwatchtxidrequest() {
local watchid=${1} local watchid=${1}
local txid=${2} local txid=${2}
local unconfirmedCallbackURL=${3} local unconfirmedCallbackURL=${3}
local uc_pg uc_json
[ "${unconfirmedCallbackURL}" = "null" ] && uc_pg=" IS NULL" && uc_json="null" || uc_pg="='${unconfirmedCallbackURL}'" && uc_json="\"${unconfirmedCallbackURL}\""
local confirmedCallbackURL=${4} local confirmedCallbackURL=${4}
local c_pg c_json
[ "${confirmedCallbackURL}" = "null" ] && c_pg=" IS NULL" && c_json="null" || c_pg="='${confirmedCallbackURL}'" && c_json="\"${confirmedCallbackURL}\""
local returncode local returncode
trace "[unwatchtxidrequest] Unwatch request id ${watchid} on txid ${txid} with url0conf ${unconfirmedCallbackURL} and url1conf ${confirmedCallbackURL}" trace "[unwatchtxidrequest] Unwatch request id ${watchid} on txid \"${txid}\" with url0conf \"${unconfirmedCallbackURL}\" and url1conf \"${confirmedCallbackURL}\""
if [ "${watchid}" != "null" ]; then if [ "${watchid}" != "null" ]; then
sql "UPDATE watching_by_txid SET watching=0 WHERE id=${watchid}" sql "UPDATE watching_by_txid SET watching=false WHERE id=${watchid}"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
data="{\"event\":\"unwatchtxid\",\"id\":${watchid}}" data="{\"event\":\"unwatchtxid\",\"id\":${watchid}}"
else else
sql "UPDATE watching_by_txid SET watching=0 WHERE txid='${txid}' AND callback0conf=${unconfirmedCallbackURL} AND callback1conf=${confirmedCallbackURL}" sql "UPDATE watching_by_txid SET watching=false WHERE txid='${txid}' AND callback0conf${uc_pg} AND callback1conf${c_pg}"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
data="{\"event\":\"unwatchtxid\",\"txid\":\"${txid}\",\"unconfirmedCallbackURL\":${unconfirmedCallbackURL},\"confirmedCallbackURL\":${confirmedCallbackURL}}" data="{\"event\":\"unwatchtxid\",\"txid\":\"${txid}\",\"unconfirmedCallbackURL\":${uc_json},\"confirmedCallbackURL\":${c_json}}"
fi fi
trace "[unwatchtxidrequest] responding=${data}" trace "[unwatchtxidrequest] responding=${data}"

View File

@@ -42,15 +42,17 @@ ln_waitanyinvoice() {
status=$(echo "${result}" | jq -r ".status") status=$(echo "${result}" | jq -r ".status")
paid_at=$(echo "${result}" | jq -r ".paid_at") paid_at=$(echo "${result}" | jq -r ".paid_at")
sql "UPDATE ln_invoice SET status=\"${status}\", pay_index=${pay_index}, msatoshi_received=${msatoshi_received}, paid_at=${paid_at} WHERE bolt11=\"${bolt11}\"" sql "UPDATE ln_invoice SET status='${status}', pay_index=${pay_index}, msatoshi_received=${msatoshi_received}, paid_at=${paid_at} WHERE bolt11='${bolt11}'"
row=$(sql "SELECT id, label, bolt11, callback_url, payment_hash, msatoshi, status, pay_index, msatoshi_received, paid_at, description, expires_at FROM ln_invoice WHERE callback_url<>\"\" AND NOT calledback AND bolt11=\"${bolt11}\"") row=$(sql "SELECT id, label, bolt11, callback_url, payment_hash, msatoshi, status, pay_index, msatoshi_received, paid_at, description, expires_at FROM ln_invoice WHERE callback_url<>'' AND NOT calledback AND bolt11='${bolt11}'")
if [ -n "${row}" ]; then if [ -n "${row}" ]; then
ln_manage_callback ${row} ln_manage_callback ${row}
fi fi
sql "UPDATE cyphernode_props SET value="${pay_index}" WHERE property=\"pay_index\"" sql "UPDATE cyphernode_props SET value='${pay_index}' WHERE property='pay_index'"
fi fi
return ${returncode}
} }
while : while :
@@ -58,5 +60,12 @@ do
pay_index=$(sql "SELECT value FROM cyphernode_props WHERE property='pay_index'") pay_index=$(sql "SELECT value FROM cyphernode_props WHERE property='pay_index'")
trace "[waitanyinvoice] pay_index=${pay_index}" trace "[waitanyinvoice] pay_index=${pay_index}"
ln_waitanyinvoice ${pay_index} ln_waitanyinvoice ${pay_index}
sleep 5
if [ "$?" -eq "0" ]; then
# lightning is ready, let's wait 1 sec to fetch next pay_index...
sleep 1
else
# lightning is not ready, let's wait a little more...
sleep 5
fi
done done

View File

@@ -42,7 +42,7 @@ spend() {
tx_raw_details=$(get_rawtransaction ${txid} | tr -d '\n') tx_raw_details=$(get_rawtransaction ${txid} | tr -d '\n')
# Amounts and fees are negative when spending so we absolute those fields # Amounts and fees are negative when spending so we absolute those fields
local tx_hash=$(echo "${tx_raw_details}" | jq '.result.hash') local tx_hash=$(echo "${tx_raw_details}" | jq -r '.result.hash')
local tx_ts_firstseen=$(echo "${tx_details}" | jq '.result.timereceived') local tx_ts_firstseen=$(echo "${tx_details}" | jq '.result.timereceived')
local tx_amount=$(echo "${tx_details}" | jq '.result.amount | fabs' | awk '{ printf "%.8f", $0 }') local tx_amount=$(echo "${tx_details}" | jq '.result.amount | fabs' | awk '{ printf "%.8f", $0 }')
local tx_size=$(echo "${tx_raw_details}" | jq '.result.size') local tx_size=$(echo "${tx_raw_details}" | jq '.result.size')
@@ -50,9 +50,6 @@ spend() {
local tx_replaceable=$(echo "${tx_details}" | jq -r '.result."bip125-replaceable"') local tx_replaceable=$(echo "${tx_details}" | jq -r '.result."bip125-replaceable"')
tx_replaceable=$([ ${tx_replaceable} = "yes" ] && echo "true" || echo "false") tx_replaceable=$([ ${tx_replaceable} = "yes" ] && echo "true" || echo "false")
local fees=$(echo "${tx_details}" | jq '.result.fee | fabs' | awk '{ printf "%.8f", $0 }') local fees=$(echo "${tx_details}" | jq '.result.fee | fabs' | awk '{ printf "%.8f", $0 }')
# Sometimes raw tx are too long to be passed as paramater, so let's write
# it to a temp file for it to be read by sqlite3 and then delete the file
echo "${tx_raw_details}" > spend-rawtx-${txid}-$$.blob
######################################################################################################## ########################################################################################################
# Let's publish the event if needed # Let's publish the event if needed
@@ -73,20 +70,17 @@ spend() {
######################################################################################################## ########################################################################################################
# Let's insert the txid in our little DB -- then we'll already have it when receiving confirmation # Let's insert the txid in our little DB -- then we'll already have it when receiving confirmation
sql "INSERT OR IGNORE INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target) VALUES (\"${txid}\", ${tx_hash}, 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target})" id_inserted=$(sql "INSERT INTO tx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target)"\
" VALUES ('${txid}', '${tx_hash}', 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target})"\
" RETURNING id" \
"SELECT id FROM tx WHERE txid='${txid}'")
trace_rc $? trace_rc $?
sql_rawtx "INSERT OR IGNORE INTO rawtx (txid, hash, confirmations, timereceived, fee, size, vsize, is_replaceable, conf_target, raw_tx) VALUES (\"${txid}\", ${tx_hash}, 0, ${tx_ts_firstseen}, ${fees}, ${tx_size}, ${tx_vsize}, ${tx_replaceable}, ${conf_target}, readfile('spend-rawtx-${txid}-$$.blob'))" sql "INSERT INTO recipient (address, amount, tx_id) VALUES ('${address}', ${amount}, ${id_inserted})"\
trace_rc $? " ON CONFLICT DO NOTHING"
id_inserted=$(sql "SELECT id FROM tx WHERE txid=\"${txid}\"")
trace_rc $?
sql "INSERT OR IGNORE INTO recipient (address, amount, tx_id) VALUES (\"${address}\", ${amount}, ${id_inserted})"
trace_rc $? trace_rc $?
data="{\"status\":\"accepted\"" data="{\"status\":\"accepted\""
data="${data},\"txid\":\"${txid}\",\"hash\":${tx_hash},\"details\":{\"address\":\"${address}\",\"amount\":${amount},\"firstseen\":${tx_ts_firstseen},\"size\":${tx_size},\"vsize\":${tx_vsize},\"replaceable\":${tx_replaceable},\"fee\":${fees},\"subtractfeefromamount\":${subtractfeefromamount}}}" data="${data},\"txid\":\"${txid}\",\"hash\":\"${tx_hash}\",\"details\":{\"address\":\"${address}\",\"amount\":${amount},\"firstseen\":${tx_ts_firstseen},\"size\":${tx_size},\"vsize\":${tx_vsize},\"replaceable\":${tx_replaceable},\"fee\":${fees},\"subtractfeefromamount\":${subtractfeefromamount}}}"
# Delete the temp file containing the raw tx (see above)
rm spend-rawtx-${txid}-$$.blob
else else
local message=$(echo "${response}" | jq -e ".error.message") local message=$(echo "${response}" | jq -e ".error.message")
data="{\"message\":${message}}" data="{\"message\":${message}}"
@@ -222,7 +216,7 @@ getbalancebyxpublabel() {
trace "[getbalancebyxpublabel] label=${label}" trace "[getbalancebyxpublabel] label=${label}"
local xpub local xpub
xpub=$(sql "SELECT pub32 FROM watching_by_pub32 WHERE label=\"${label}\"") xpub=$(sql "SELECT pub32 FROM watching_by_pub32 WHERE label='${label}'")
trace "[getbalancebyxpublabel] xpub=${xpub}" trace "[getbalancebyxpublabel] xpub=${xpub}"
getbalancebyxpub ${xpub} "getbalancebyxpublabel" getbalancebyxpub ${xpub} "getbalancebyxpublabel"

View File

@@ -11,11 +11,72 @@ watchrequest() {
local returncode local returncode
local request=${1} local request=${1}
local address=$(echo "${request}" | jq -r ".address") local address address_pg
local cb0conf_url=$(echo "${request}" | jq ".unconfirmedCallbackURL") address=$(echo "${request}" | jq -re ".address")
local cb1conf_url=$(echo "${request}" | jq ".confirmedCallbackURL") if [ "$?" -ne "0" ]; then
local event_message=$(echo "${request}" | jq ".eventMessage") # address not found or null
local label=$(echo "${request}" | jq ".label") result='{"result":null,'\
'"error":{'\
'"code":-5,'\
'"message":"address required"}}'
trace "[watchrequest] address required"
trace "[watchrequest] responding=${result}"
echo "${result}"
return 1
else
address_pg="'${address}'"
fi
local cb0conf_url cb0conf_url_pg cb0conf_url_pg_where cb0conf_url_json
cb0conf_url=$(echo "${request}" | jq -re ".unconfirmedCallbackURL")
if [ "$?" -ne "0" ]; then
# unconfirmedCallbackURL not found or null
cb0conf_url_json="null"
cb0conf_url_pg="null"
cb0conf_url_pg_where=" IS NULL"
else
cb0conf_url_json="\"${cb0conf_url}\""
cb0conf_url_pg="'${cb0conf_url}'"
cb0conf_url_pg_where="=${cb0conf_url_pg}"
fi
local cb1conf_url cb1conf_url_pg cb1conf_url_pg_where cb1conf_url_json
cb1conf_url=$(echo "${request}" | jq -re ".confirmedCallbackURL")
if [ "$?" -ne "0" ]; then
# confirmedCallbackURL not found or null
cb1conf_url_json="null"
cb1conf_url_pg="null"
cb1conf_url_pg_where=" IS NULL"
else
cb1conf_url_json="\"${cb1conf_url}\""
cb1conf_url_pg="'${cb1conf_url}'"
cb1conf_url_pg_where="=${cb1conf_url_pg}"
fi
local event_message event_message_pg event_message_json
event_message=$(echo "${request}" | jq -re ".eventMessage")
if [ "$?" -ne "0" ]; then
# eventMessage not found or null
event_message_json="null"
event_message_pg="null"
else
event_message_json="\"${event_message}\""
event_message_pg="'${event_message}'"
fi
local label label_pg label_json
label=$(echo "${request}" | jq -re ".label")
if [ "$?" -ne "0" ]; then
# label not found or null
label_json="null"
label_pg="null"
else
label_json="\"${label}\""
label_pg="'${label}'"
fi
local imported local imported
local inserted local inserted
local id_inserted local id_inserted
@@ -24,23 +85,23 @@ watchrequest() {
# Let's lowercase bech32 addresses # Let's lowercase bech32 addresses
address=$(lowercase_if_bech32 "${address}") address=$(lowercase_if_bech32 "${address}")
trace "[watchrequest] Watch request on address (\"${address}\"), cb 0-conf (${cb0conf_url}), cb 1-conf (${cb1conf_url}) with event_message=${event_message} and label=${label}" trace "[watchrequest] Watch request on address (${address}), cb 0-conf (${cb0conf_url_json}), cb 1-conf (${cb1conf_url_json}) with event_message=${event_message_json} and label=${label_json}"
local isvalid local isvalid
isvalid=$(validateaddress "${address}" | jq ".result.isvalid") isvalid=$(validateaddress "${address}" | jq ".result.isvalid")
if [ "${isvalid}" != "true" ]; then if [ "${isvalid}" != "true" ]; then
result="{ result='{'\
\"result\":null, '"result":null,'\
\"error\":{ '"error":{'\
\"code\":-5, '"code":-5,'\
\"message\":\"Invalid address\", '"message":"Invalid address",'\
\"data\":{ '"data":{'\
\"event\":\"watch\", '"event":"watch",'\
\"address\":\"${address}\", '"address":'"${address}"','\
\"unconfirmedCallbackURL\":${cb0conf_url}, '"unconfirmedCallbackURL":'${cb0conf_url_json}','\
\"confirmedCallbackURL\":${cb1conf_url}, '"confirmedCallbackURL":'${cb1conf_url_json}','\
\"label\":${label}, '"label":'${label_json}','\
\"eventMessage\":${event_message}}}}" '"eventMessage":'${event_message_json}'}}}'
trace "[watchrequest] Invalid address" trace "[watchrequest] Invalid address"
trace "[watchrequest] responding=${result}" trace "[watchrequest] responding=${result}"
@@ -53,21 +114,26 @@ watchrequest() {
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
if [ "${returncode}" -eq 0 ]; then if [ "${returncode}" -eq 0 ]; then
imported=1 imported=true
else else
imported=0 imported=false
fi fi
sql "INSERT INTO watching (address, watching, callback0conf, callback1conf, imported, event_message, label) VALUES (\"${address}\", 1, ${cb0conf_url}, ${cb1conf_url}, ${imported}, ${event_message}, ${label}) ON CONFLICT(address,callback0conf,callback1conf) DO UPDATE SET watching=1, event_message=${event_message}, calledback0conf=0, calledback1conf=0, label=${label}" id_inserted=$(sql "INSERT INTO watching (address, watching, callback0conf, callback1conf, imported, event_message, label)"\
" VALUES (${address_pg}, true, ${cb0conf_url_pg}, ${cb1conf_url_pg}, ${imported}, ${event_message_pg}, ${label_pg})"\
" ON CONFLICT (address, COALESCE(callback0conf, ''), COALESCE(callback1conf, '')) DO"\
" UPDATE SET watching=true, event_message=${event_message_pg}, calledback0conf=false, calledback1conf=false, label=${label_pg}"\
" RETURNING id" \
"SELECT id FROM watching WHERE address=${address_pg} AND callback0conf${cb0conf_url_pg_where} AND callback1conf${cb1conf_url_pg_where}")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
trace "[watchrequest] id_inserted=${id_inserted}"
if [ "${returncode}" -eq 0 ]; then if [ "${returncode}" -eq 0 ]; then
inserted=1 inserted=true
id_inserted=$(sql "SELECT id FROM watching WHERE address='${address}' AND callback0conf=${cb0conf_url} AND callback1conf=${cb1conf_url}")
trace "[watchrequest] id_inserted: ${id_inserted}" trace "[watchrequest] id_inserted: ${id_inserted}"
else else
inserted=0 inserted=false
fi fi
local fees2blocks local fees2blocks
@@ -83,19 +149,19 @@ watchrequest() {
fees144blocks=$(getestimatesmartfee 144) fees144blocks=$(getestimatesmartfee 144)
trace_rc $? trace_rc $?
result="{\"id\":\"${id_inserted}\", result='{"id":'${id_inserted}','\
\"event\":\"watch\", '"event":"watch",'\
\"imported\":${imported}, '"imported":'${imported}','\
\"inserted\":${inserted}, '"inserted":'${inserted}','\
\"address\":\"${address}\", '"address":"'${address}'",'\
\"unconfirmedCallbackURL\":${cb0conf_url}, '"unconfirmedCallbackURL":'${cb0conf_url_json}','\
\"confirmedCallbackURL\":${cb1conf_url}, '"confirmedCallbackURL":'${cb1conf_url_json}','\
\"label\":${label}, '"label":'${label_json}','\
\"estimatesmartfee2blocks\":${fees2blocks}, '"estimatesmartfee2blocks":'${fees2blocks}','\
\"estimatesmartfee6blocks\":${fees6blocks}, '"estimatesmartfee6blocks":'${fees6blocks}','\
\"estimatesmartfee36blocks\":${fees36blocks}, '"estimatesmartfee36blocks":'${fees36blocks}','\
\"estimatesmartfee144blocks\":${fees144blocks}, '"estimatesmartfee144blocks":'${fees144blocks}','\
\"eventMessage\":${event_message}}" '"eventMessage":'${event_message_json}'}'
trace "[watchrequest] responding=${result}" trace "[watchrequest] responding=${result}"
echo "${result}" echo "${result}"
@@ -106,19 +172,56 @@ watchrequest() {
watchpub32request() { watchpub32request() {
trace "Entering watchpub32request()..." trace "Entering watchpub32request()..."
# BODY {"label":"4421","pub32":"tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk","path":"0/n","nstart":0,"unconfirmedCallbackURL":"192.168.111.233:1111/callback0conf","confirmedCallbackURL":"192.168.111.233:1111/callback1conf"}
# Required:
# - "label"
# - "pub32"
# - "path"
# - "nstart"
local returncode local returncode
local request=${1} local request=${1}
local label=$(echo "${request}" | jq ".label") local label=$(echo "${request}" | jq -er ".label")
if [ "$?" -ne "0" ]; then
# label not found or null
trace "[watchpub32request] label required"
echo '{"error":"label required","event":"watchxpub"}'
return 1
fi
trace "[watchpub32request] label=${label}" trace "[watchpub32request] label=${label}"
local pub32=$(echo "${request}" | jq ".pub32") local pub32=$(echo "${request}" | jq -er ".pub32")
if [ "$?" -ne "0" ]; then
# pub32 not found or null
trace "[watchpub32request] pub32 required"
echo '{"error":"pub32 required","event":"watchxpub"}'
return 1
fi
trace "[watchpub32request] pub32=${pub32}" trace "[watchpub32request] pub32=${pub32}"
local path=$(echo "${request}" | jq ".path") local path=$(echo "${request}" | jq -er ".path")
if [ "$?" -ne "0" ]; then
# path not found or null
trace "[watchpub32request] path required"
echo '{"error":"path required","event":"watchxpub"}'
return 1
fi
trace "[watchpub32request] path=${path}" trace "[watchpub32request] path=${path}"
local nstart=$(echo "${request}" | jq ".nstart") local nstart=$(echo "${request}" | jq -er ".nstart")
if [ "$?" -ne "0" ]; then
# nstart not found or null
trace "[watchpub32request] nstart required"
echo '{"error":"nstart required","event":"watchxpub"}'
return 1
fi
trace "[watchpub32request] nstart=${nstart}" trace "[watchpub32request] nstart=${nstart}"
local cb0conf_url=$(echo "${request}" | jq ".unconfirmedCallbackURL")
local cb0conf_url=$(echo "${request}" | jq -r ".unconfirmedCallbackURL // empty")
trace "[watchpub32request] cb0conf_url=${cb0conf_url}" trace "[watchpub32request] cb0conf_url=${cb0conf_url}"
local cb1conf_url=$(echo "${request}" | jq ".confirmedCallbackURL") local cb1conf_url=$(echo "${request}" | jq -r ".confirmedCallbackURL // empty")
trace "[watchpub32request] cb1conf_url=${cb1conf_url}" trace "[watchpub32request] cb1conf_url=${cb1conf_url}"
watchpub32 "${label}" "${pub32}" "${path}" "${nstart}" "${cb0conf_url}" "${cb1conf_url}" watchpub32 "${label}" "${pub32}" "${path}" "${nstart}" "${cb0conf_url}" "${cb1conf_url}"
@@ -131,23 +234,50 @@ watchpub32request() {
watchpub32() { watchpub32() {
trace "Entering watchpub32()..." trace "Entering watchpub32()..."
# Expecting args without quotes
# label, pub32, path and nstart are required
# When cb0conf_url and cb1conf_url are empty, means null
local returncode local returncode
local label=${1} local label=${1}
trace "[watchpub32] label=${label}" local label_pg="'${label}'"
trace "[watchpub32] label=${label}, label_pg=${label_pg}"
local pub32=${2} local pub32=${2}
trace "[watchpub32] pub32=${pub32}" local pub32_pg="'${pub32}'"
trace "[watchpub32] pub32=${pub32}, pub32_pg=${pub32_pg}"
local path=${3} local path=${3}
trace "[watchpub32] path=${path}" local path_pg="'${path}'"
trace "[watchpub32] path=${path}, path_pg=${path_pg}"
local nstart=${4} local nstart=${4}
trace "[watchpub32] nstart=${nstart}" trace "[watchpub32] nstart=${nstart}"
local last_n=$((${nstart}+${XPUB_DERIVATION_GAP})) local last_n=$((${nstart}+${XPUB_DERIVATION_GAP}))
trace "[watchpub32] last_n=${last_n}" trace "[watchpub32] last_n=${last_n}"
local cb0conf_url=${5} local cb0conf_url=${5}
trace "[watchpub32] cb0conf_url=${cb0conf_url}" local cb0conf_url_pg cb0conf_url_json
if [ -z "${cb0conf_url}" ]; then
# Empty url
cb0conf_url_json="null"
cb0conf_url_pg="null"
else
cb0conf_url_json="\"${cb0conf_url}\""
cb0conf_url_pg="'${cb0conf_url}'"
fi
trace "[watchpub32] cb0conf_url=${cb0conf_url}, cb0conf_url_pg=${cb0conf_url_pg}"
local cb1conf_url=${6} local cb1conf_url=${6}
trace "[watchpub32] cb1conf_url=${cb1conf_url}" local cb1conf_url_pg cb1conf_url_json
if [ -z "${cb1conf_url}" ]; then
# Empty url
cb1conf_url_json="null"
cb1conf_url_pg="null"
else
cb1conf_url_json="\"${cb1conf_url}\""
cb1conf_url_pg="'${cb1conf_url}'"
fi
trace "[watchpub32] cb1conf_url=${cb1conf_url}, cb1conf_url_pg=${cb1conf_url_pg}"
# upto_n is used when extending the watching window # upto_n is used when extending the watching window
# If this is supplied, it means we will not INSERT into watching_by_pub32, just add
# corresponding rows into watching
local upto_n=${7} local upto_n=${7}
trace "[watchpub32] upto_n=${upto_n}" trace "[watchpub32] upto_n=${upto_n}"
@@ -156,93 +286,111 @@ watchpub32() {
local error_msg local error_msg
local data local data
# Derive with pycoin... # Derive with bitcoind...
# {"pub32":"tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk","path":"0/25-30"} # {"pub32":"tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk","path":"0/25-30"}
if [ -n "${upto_n}" ]; then if [ -n "${upto_n}" ]; then
# If upto_n provided, then we create from nstart to upto_n (instead of + GAP) # If upto_n provided, then we create from nstart to upto_n (instead of + GAP)
last_n=${upto_n} last_n=${upto_n}
fi
local subspath=$(echo -e $path | sed -En "s/n/${nstart}-${last_n}/p")
trace "[watchpub32] subspath=${subspath}"
local addresses
addresses=$(derivepubpath "{\"pub32\":${pub32},\"path\":${subspath}}")
returncode=$?
trace_rc ${returncode}
# trace "[watchpub32] addresses=${addresses}"
if [ "${returncode}" -eq 0 ]; then
# result=$(create_wallet "${pub32}")
# returncode=$?
# trace_rc ${returncode}
# trace "[watchpub32request] result=${result}"
trace "[watchpub32] Skipping create_wallet"
if [ "${returncode}" -eq 0 ]; then
# Importmulti in Bitcoin Core...
result=$(importmulti_rpc "${WATCHER_BTC_NODE_XPUB_WALLET}" ${pub32} "${addresses}")
returncode=$?
trace_rc ${returncode}
trace "[watchpub32] result=${result}"
if [ "${returncode}" -eq 0 ]; then
if [ -n "${upto_n}" ]; then
# Update existing row, we are extending the watching window
sql "UPDATE watching_by_pub32 set last_imported_n=${upto_n} WHERE pub32=${pub32}"
returncode=$?
trace_rc ${returncode}
else
# Insert in our DB...
sql "INSERT INTO watching_by_pub32 (pub32, label, derivation_path, watching, callback0conf, callback1conf, last_imported_n) VALUES (${pub32}, ${label}, ${path}, 1, ${cb0conf_url}, ${cb1conf_url}, ${last_n})"
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -ne "0" ]; then
trace "[watchpub32] xpub or label already being watched, updating with new values based on supplied xpub..."
sql "UPDATE watching_by_pub32 SET watching=1, label=${label}, callback0conf=${cb0conf_url}, callback1conf=${cb1conf_url} WHERE pub32=${pub32}"
returncode=$?
trace_rc ${returncode}
fi
fi
if [ "${returncode}" -eq 0 ]; then
id_inserted=$(sql "SELECT id FROM watching_by_pub32 WHERE pub32=${pub32}")
trace "[watchpub32] id_inserted: ${id_inserted}"
addresses=$(echo ${addresses} | jq ".addresses[].address")
insert_watches "${addresses}" "${cb0conf_url}" "${cb1conf_url}" "${id_inserted}" "${nstart}"
else
error_msg="Can't insert xpub watcher in DB"
fi
else
error_msg="Can't import addresses"
fi
else
error_msg="Can't create wallet"
fi
else else
error_msg="Can't derive addresses" # If upto_n is not provided, it means it's a new watching_by_pub32 to insert,
# so let's make sure the label is not already in the table since label must
# be unique... but the key driver is pub32.
local row
row=$(sql "SELECT id, pub32, derivation_path, callback0conf, callback1conf, last_imported_n, watching, inserted_ts FROM watching_by_pub32 WHERE label=${label_pg}")
returncode=$?
trace_rc ${returncode}
if [ ${#row} -ne 0 ]; then
trace "[watchpub32] This label already exists in watching_by_pub32, must be unique."
error_msg="This label already exists in watching_by_pub32, must be unique."
fi
fi fi
if [ -z "${error_msg}" ]; then if [ -z "${error_msg}" ]; then
data="{\"id\":${id_inserted}, local subspath=$(echo -e $path | sed -En "s/n/${nstart}-${last_n}/p")
\"event\":\"watchxpub\", trace "[watchpub32] subspath=${subspath}"
\"pub32\":${pub32}, local addresses
\"label\":${label}, addresses=$(derivepubpath '{"pub32":"'${pub32}'","path":"'${subspath}'"}')
\"path\":${path}, returncode=$?
\"nstart\":${nstart}, trace_rc ${returncode}
\"unconfirmedCallbackURL\":${cb0conf_url}, # trace "[watchpub32] addresses=${addresses}"
\"confirmedCallbackURL\":${cb1conf_url}}"
if [ "${returncode}" -eq 0 ]; then
# result=$(create_wallet "${pub32}")
# returncode=$?
# trace_rc ${returncode}
# trace "[watchpub32request] result=${result}"
trace "[watchpub32] Skipping create_wallet"
if [ "${returncode}" -eq 0 ]; then
# Importmulti in Bitcoin Core...
result=$(importmulti_rpc "${WATCHER_BTC_NODE_XPUB_WALLET}" "${pub32}" "${addresses}")
returncode=$?
trace_rc ${returncode}
trace "[watchpub32] result=${result}"
if [ "${returncode}" -eq 0 ]; then
if [ -n "${upto_n}" ]; then
# Update existing row, we are extending the watching window
id_inserted=$(sql "UPDATE watching_by_pub32 set last_imported_n=${upto_n} WHERE pub32=${pub32_pg} RETURNING id")
returncode=$?
trace_rc ${returncode}
else
# Insert in our DB...
id_inserted=$(sql "INSERT INTO watching_by_pub32 (pub32, label, derivation_path, watching, callback0conf, callback1conf, last_imported_n)"\
" VALUES (${pub32_pg}, ${label_pg}, ${path_pg}, true, ${cb0conf_url_pg}, ${cb1conf_url_pg}, ${last_n})"\
" ON CONFLICT (pub32) DO"\
" UPDATE SET watching=true, label=${label_pg}, callback0conf=${cb0conf_url_pg}, callback1conf=${cb1conf_url_pg}, derivation_path=${path_pg}, last_imported_n=${last_n}"\
" RETURNING id" \
"SELECT id FROM watching_by_pub32 WHERE pub32=${pub32_pg}")
returncode=$?
trace_rc ${returncode}
fi
if [ -n "${id_inserted}" ] && [ "${returncode}" -eq 0 ]; then
trace "[watchpub32] id_inserted: ${id_inserted}"
addresses=$(echo ${addresses} | jq -r ".addresses[].address")
insert_watches "${addresses}" "${label}" "${cb0conf_url}" "${cb1conf_url}" "${id_inserted}" "${nstart}"
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -ne 0 ]; then
error_msg="Can't insert xpub watches in DB"
fi
else
error_msg="Can't insert xpub watcher in DB"
fi
else
error_msg="Can't import addresses"
fi
else
error_msg="Can't create wallet"
fi
else
error_msg="Can't derive addresses"
fi
fi
if [ -z "${error_msg}" ]; then
data='{"id":'${id_inserted}','\
'"event":"watchxpub",'\
'"pub32":"'${pub32}'",'\
'"label":"'${label}'",'\
'"path":"'${path}'",'\
'"nstart":'${nstart}','\
'"unconfirmedCallbackURL":'${cb0conf_url_json}','\
'"confirmedCallbackURL":'${cb1conf_url_json}'}'
returncode=0 returncode=0
else else
data="{\"error\":\"${error_msg}\", data='{"error":"'${error_msg}'",'\
\"event\":\"watchxpub\", '"event":"watchxpub",'\
\"pub32\":${pub32}, '"pub32":"'${pub32}'",'\
\"label\":${label}, '"label":"'${label}'",'\
\"path\":${path}, '"path":"'${path}'",'\
\"nstart\":${nstart}, '"nstart":'${nstart}','\
\"unconfirmedCallbackURL\":${cb0conf_url}, '"unconfirmedCallbackURL":'${cb0conf_url_json}','\
\"confirmedCallbackURL\":${cb1conf_url}}" '"confirmedCallbackURL":'${cb1conf_url_json}'}'
returncode=1 returncode=1
fi fi
@@ -256,29 +404,54 @@ watchpub32() {
insert_watches() { insert_watches() {
trace "Entering insert_watches()..." trace "Entering insert_watches()..."
# Expecting args without quotes
# When callback0conf and callback1conf are empty, means null
local addresses=${1} local addresses=${1}
local callback0conf=${2} local label=${2}
local callback1conf=${3} local label_pg
local xpub_id=${4} if [ -z "${label}" ]; then
local nstart=${5} # Empty url
local inserted_values="" label_pg="null"
else
label_pg="'${label}'"
fi
local callback0conf=${3}
local callback0conf_pg
if [ -z "${callback0conf}" ]; then
# Empty url
callback0conf_pg="null"
else
callback0conf_pg="'${callback0conf}'"
fi
local callback1conf=${4}
local callback1conf_pg
if [ -z "${callback1conf}" ]; then
# Empty url
callback1conf_pg="null"
else
callback1conf_pg="'${callback1conf}'"
fi
local xpub_id=${5}
local nstart=${6}
local inserted_values
local IFS=$'\n' local IFS=$'\n'
for address in ${addresses} for address in ${addresses}
do do
# (address, watching, callback0conf, callback1conf, imported, watching_by_pub32_id) # (address, label, watching, callback0conf, callback1conf, imported, watching_by_pub32_id)
if [ -n "${inserted_values}" ]; then if [ -n "${inserted_values}" ]; then
inserted_values="${inserted_values}," inserted_values="${inserted_values},"
fi fi
inserted_values="${inserted_values}(${address}, 1, ${callback0conf}, ${callback1conf}, 1" inserted_values="${inserted_values}('${address}', ${label_pg}, true, ${callback0conf_pg}, ${callback1conf_pg}, true, ${xpub_id}, ${nstart})"
if [ -n "${xpub_id}" ]; then
inserted_values="${inserted_values}, ${xpub_id}, ${nstart}" nstart=$((${nstart} + 1))
nstart=$((${nstart} + 1))
fi
inserted_values="${inserted_values})"
done done
sql "INSERT INTO watching (address, watching, callback0conf, callback1conf, imported, watching_by_pub32_id, pub32_index) VALUES ${inserted_values} ON CONFLICT(address,callback0conf,callback1conf) DO UPDATE SET watching=1, event_message=${event_message}, calledback0conf=0, calledback1conf=0" sql "INSERT INTO watching (address, label, watching, callback0conf, callback1conf, imported, watching_by_pub32_id, pub32_index)"\
" VALUES ${inserted_values}"\
" ON CONFLICT (address, COALESCE(callback0conf, ''), COALESCE(callback1conf, '')) DO"\
" UPDATE SET watching=true, calledback0conf=false, calledback1conf=false, label=${label_pg}"
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -288,6 +461,8 @@ insert_watches() {
extend_watchers() { extend_watchers() {
trace "Entering extend_watchers()..." trace "Entering extend_watchers()..."
# Expecting args without quotes
local watching_by_pub32_id=${1} local watching_by_pub32_id=${1}
trace "[extend_watchers] watching_by_pub32_id=${watching_by_pub32_id}" trace "[extend_watchers] watching_by_pub32_id=${watching_by_pub32_id}"
local pub32_index=${2} local pub32_index=${2}
@@ -297,7 +472,7 @@ extend_watchers() {
local last_imported_n local last_imported_n
local row local row
row=$(sql "SELECT COALESCE('\"'||pub32||'\"', 'null'), COALESCE('\"'||label||'\"', 'null'), COALESCE('\"'||derivation_path||'\"', 'null'), COALESCE('\"'||callback0conf||'\"', 'null'), COALESCE('\"'||callback1conf||'\"', 'null'), last_imported_n FROM watching_by_pub32 WHERE id=${watching_by_pub32_id} AND watching") row=$(sql "SELECT pub32, label, derivation_path, callback0conf, callback1conf, last_imported_n FROM watching_by_pub32 WHERE id=${watching_by_pub32_id} AND watching")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
@@ -335,52 +510,88 @@ watchtxidrequest() {
trace "Entering watchtxidrequest()..." trace "Entering watchtxidrequest()..."
local returncode local returncode
local result
local request=${1} local request=${1}
trace "[watchtxidrequest] request=${request}" trace "[watchtxidrequest] request=${request}"
local txid=$(echo "${request}" | jq ".txid") local txid txid_pg txid_pg_where
trace "[watchtxidrequest] txid=${txid}" txid=$(echo "${request}" | jq -re ".txid")
local cb1conf_url=$(echo "${request}" | jq ".confirmedCallbackURL") if [ "$?" -ne "0" ]; then
trace "[watchtxidrequest] cb1conf_url=${cb1conf_url}" # txid not found or null
local cbxconf_url=$(echo "${request}" | jq ".xconfCallbackURL") result='{"result":null,'\
trace "[watchtxidrequest] cbxconf_url=${cbxconf_url}" '"error":{'\
'"code":-5,'\
'"message":"txid required"}}'
trace "[watchrequest] txid required"
trace "[watchrequest] responding=${result}"
echo "${result}"
return 1
else
txid_pg="'${address}'"
fi
trace "[watchtxidrequest] txid=${txid}, txid_pg=${txid_pg}"
local cb1conf_url cb1conf_url_pg cb1conf_url_pg_where cb1conf_url_json
cb1conf_url=$(echo "${request}" | jq -re ".confirmedCallbackURL")
if [ "$?" -ne "0" ]; then
# cb1conf_url not found or null
cb1conf_url_json="null"
cb1conf_url_pg="null"
cb1conf_url_pg_where=" IS NULL"
else
cb1conf_url_json="\"${cb1conf_url}\""
cb1conf_url_pg="'${cb1conf_url}'"
cb1conf_url_pg_where="=${cb1conf_url_pg}"
fi
trace "[watchtxidrequest] cb1conf_url=${cb1conf_url}, cb1conf_url_pg=${cb1conf_url_pg}, cb1conf_url_pg_where=${cb1conf_url_pg_where}, cb1conf_url_json=${cb1conf_url_json}"
local cbxconf_url cbxconf_url_pg cbxconf_url_pg_where
cbxconf_url=$(echo "${request}" | jq -e ".xconfCallbackURL")
if [ "$?" -ne "0" ]; then
# cbxconf_url not found or null
cbxconf_url_json="null"
cbxconf_url_pg="null"
cbxconf_url_pg_where=" IS NULL"
else
cbxconf_url_json="\"${cbxconf_url}\""
cbxconf_url_pg="'${cbxconf_url}'"
cbxconf_url_pg_where="=${cbxconf_url_pg}"
fi
trace "[watchtxidrequest] cbxconf_url=${cbxconf_url}, cbxconf_url_pg=${cbxconf_url_pg}, cbxconf_url_pg_where=${cbxconf_url_pg_where}, cbxconf_url_json=${cbxconf_url_json}"
local nbxconf=$(echo "${request}" | jq ".nbxconf") local nbxconf=$(echo "${request}" | jq ".nbxconf")
trace "[watchtxidrequest] nbxconf=${nbxconf}" trace "[watchtxidrequest] nbxconf=${nbxconf}"
local cb1cond local cb1cond
local cbxcond local cbxcond
local inserted local inserted
local id_inserted local id_inserted
local result
trace "[watchtxidrequest] Watch request on txid (${txid}), cb 1-conf (${cb1conf_url}) and cb x-conf (${cbxconf_url}) on ${nbxconf} confirmations." trace "[watchtxidrequest] Watch request on txid (${txid}), cb 1-conf (${cb1conf_url}) and cb x-conf (${cbxconf_url}) on ${nbxconf} confirmations."
sql "INSERT INTO watching_by_txid (txid, watching, callback1conf, callbackxconf, nbxconf) VALUES (${txid}, 1, ${cb1conf_url}, ${cbxconf_url}, ${nbxconf}) ON CONFLICT(txid, callback1conf, callbackxconf) DO UPDATE SET watching=1, nbxconf=${nbxconf}, calledback1conf=0, calledbackxconf=0" id_inserted=$(sql "INSERT INTO watching_by_txid (txid, watching, callback1conf, callbackxconf, nbxconf)"\
" VALUES (${txid_pg}, true, ${cb1conf_url_pg}, ${cbxconf_url_pg}, ${nbxconf})"\
" ON CONFLICT (txid, COALESCE(callback1conf, ''), COALESCE(callbackxconf, '')) DO"\
" UPDATE SET watching=true, nbxconf=${nbxconf}, calledback1conf=false, calledbackxconf=false"\
" RETURNING id" \
"SELECT id FROM watching_by_txid WHERE txid=${txid_pg} AND callback1conf${cb1conf_url_pg_where} AND callbackxconf${cbxconf_url_pg_where}")
returncode=$? returncode=$?
trace_rc ${returncode} trace_rc ${returncode}
if [ "${returncode}" -eq 0 ]; then if [ "${returncode}" -eq 0 ]; then
inserted=1 inserted=true
if [ "${cb1conf_url}" = "null" ]; then
cb1cond=" IS NULL"
else
cb1cond="=${cb1conf_url}"
fi
if [ "${cbxconf_url}" = "null" ]; then
cbxcond=" IS NULL"
else
cbxcond="=${cbxconf_url}"
fi
id_inserted=$(sql "SELECT id FROM watching_by_txid WHERE txid=${txid} AND callback1conf${cb1cond} AND callbackxconf${cbxcond}")
trace "[watchtxidrequest] id_inserted: ${id_inserted}" trace "[watchtxidrequest] id_inserted: ${id_inserted}"
else else
inserted=0 inserted=false
id_inserted=null
fi fi
local data="{\"id\":${id_inserted}, local data='{"id":'${id_inserted}','\
\"event\":\"watchtxid\", '"event":"watchtxid",'\
\"inserted\":${inserted}, '"inserted":'${inserted}','\
\"txid\":${txid}, '"txid":"'${txid}'",'\
\"confirmedCallbackURL\":${cb1conf_url}, '"confirmedCallbackURL":'${cb1conf_url_json}','\
\"xconfCallbackURL\":${cbxconf_url}, '"xconfCallbackURL":'${cbxconf_url_json}','\
\"nbxconf\":${nbxconf}}" '"nbxconf":'${nbxconf}'}'
trace "[watchtxidrequest] responding=${data}" trace "[watchtxidrequest] responding=${data}"
echo "${data}" echo "${data}"

View File

@@ -1,4 +1,3 @@
#!/bin/sh
# Reset # Reset
Color_Off='\033[0m' # Text Reset Color_Off='\033[0m' # Text Reset

View File

@@ -1,4 +1,8 @@
#!/bin/sh #!/bin/bash
# This needs to be run in regtest
# This will mine n blocks. If n is not supplied, will mine 1 block.
# Mine # Mine
mine() { mine() {
@@ -6,9 +10,9 @@ mine() {
local minedaddr local minedaddr
echo ; echo "About to mine ${nbblocks} block(s)..." echo ; echo "About to mine ${nbblocks} block(s)..."
minedaddr=$(docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat getnewaddress | tr -d '\r') minedaddr=$(docker exec -t $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat getnewaddress | tr -d '\r')
echo ; echo "minedaddr=${minedaddr}" echo ; echo "minedaddr=${minedaddr}"
docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat generatetoaddress ${nbblocks} "${minedaddr}" docker exec -t $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat generatetoaddress ${nbblocks} "${minedaddr}"
} }
case "${0}" in *mine.sh) mine $@;; esac case "${0}" in *mine.sh) mine $@;; esac

View File

@@ -0,0 +1,15 @@
#!/bin/bash
date
callbackservername=${1:-"tests-manage-missed"}
callbackserverport=${2:-"1111"}
callbackserverport2=${3:-"1112"}
callbackserverport3=${4:-"1113"}
callbackserverport4=${5:-"1114"}
docker run --rm -d --network cyphernodeappsnet --name ${callbackservername} alpine sh -c "nc -vlkp${callbackserverport} -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo 1>&2'"
docker exec -d ${callbackservername} sh -c "nc -vlkp${callbackserverport2} -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo 1>&2'"
docker exec -d ${callbackservername} sh -c "nc -vlkp${callbackserverport3} -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo 1>&2'"
docker exec -d ${callbackservername} sh -c "nc -vlkp${callbackserverport4} -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo 1>&2'"

View File

@@ -1,24 +1,74 @@
#!/bin/sh #!/bin/bash
# docker exec -it $(docker ps -q -f "name=proxy\.") ./tests/test-batching.sh . ./colors.sh
# curl localhost:8888/listbatchers | jq # This needs to be run in regtest
# curl -d '{}' localhost:8888/getbatcher | jq # You need jq installed for these tests to run correctly
# curl -d '{}' localhost:8888/getbatchdetails | jq
# curl -d '{"outputLabel":"test002","address":"1abd","amount":0.0002}' localhost:8888/addtobatch | jq
# curl -d '{}' localhost:8888/batchspend | jq
# curl -d '{"outputId":1}' localhost:8888/removefrombatch | jq
# curl -d '{"batcherLabel":"lowfees","confTarget":32}' localhost:8888/createbatcher | jq # This will test:
# curl localhost:8888/listbatchers | jq #
# - listbatchers
# - getbatcher
# - getbatchdetails
# - getnewaddress
# - addtobatch
# - batchspend
# - removefrombatch
# - createbatcher
#
# Notes:
# curl proxy:8888/listbatchers | jq
# curl -d '{}' proxy:8888/getbatcher | jq
# curl -d '{}' proxy:8888/getbatchdetails | jq
# curl -d '{"outputLabel":"test002","address":"1abd","amount":0.0002}' proxy:8888/addtobatch | jq
# curl -d '{}' proxy:8888/batchspend | jq
# curl -d '{"outputId":1}' proxy:8888/removefrombatch | jq
# curl -d '{"batcherLabel":"lowfees","confTarget":32}' proxy:8888/createbatcher | jq
# curl proxy:8888/listbatchers | jq
# curl -d '{"batcherLabel":"lowfees"}' proxy:8888/getbatcher | jq
# curl -d '{"batcherLabel":"lowfees"}' proxy:8888/getbatchdetails | jq
# curl -d '{"batcherLabel":"lowfees","outputLabel":"test002","address":"1abd","amount":0.0002}' proxy:8888/addtobatch | jq
# curl -d '{"batcherLabel":"lowfees"}' proxy:8888/batchspend | jq
# curl -d '{"batcherLabel":"lowfees","outputId":9}' proxy:8888/removefrombatch | jq
trace() {
if [ "${1}" -le "${TRACING}" ]; then
echo -e "$(date -u +%FT%TZ) ${2}" 1>&2
fi
}
start_test_container() {
docker run -d --rm -t --name tests-batching --network=cyphernodenet alpine
}
stop_test_container() {
trace 1 "\n\n[stop_test_container] ${BCyan}Stopping existing containers if they are running...${Color_Off}\n"
# docker stop tests-batching
# docker stop tests-batching-cb
local containers=$(docker ps -q -f "name=tests-batching")
if [ -n "${containers}" ]; then
docker stop ${containers}
fi
}
exec_in_test_container() {
docker exec -it tests-batching "$@"
}
# curl -d '{"batcherLabel":"lowfees"}' localhost:8888/getbatcher | jq
# curl -d '{"batcherLabel":"lowfees"}' localhost:8888/getbatchdetails | jq
# curl -d '{"batcherLabel":"lowfees","outputLabel":"test002","address":"1abd","amount":0.0002}' localhost:8888/addtobatch | jq
# curl -d '{"batcherLabel":"lowfees"}' localhost:8888/batchspend | jq
# curl -d '{"batcherLabel":"lowfees","outputId":9}' localhost:8888/removefrombatch | jq
testbatching() { testbatching() {
trace 1 "\n\n[testbatching] ${BCyan}Let's test batching features!...${Color_Off}\n"
local response local response
local id local id
local id2 local id2
@@ -35,41 +85,41 @@ testbatching() {
echo "url2=${url2}" echo "url2=${url2}"
# List batchers (should show at least empty default batcher) # List batchers (should show at least empty default batcher)
echo "Testing listbatchers..." trace 2 "\n\n[testbatching] ${BCyan}Testing listbatchers...${Color_Off}\n"
response=$(curl -s proxy:8888/listbatchers) response=$(exec_in_test_container curl -s proxy:8888/listbatchers)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
id=$(echo "${response}" | jq ".result[0].batcherId") id=$(echo "${response}" | jq ".result[0].batcherId")
echo "batcherId=${id}" trace 3 "[testbatching] batcherId=${id}"
if [ "${id}" -ne "1" ]; then if [ "${id}" -ne "1" ]; then
exit 10 exit 10
fi fi
echo "Tested listbatchers." trace 2 "\n\n[testbatching] ${BCyan}Tested listbatchers.${Color_Off}\n"
# getbatcher the default batcher # getbatcher the default batcher
echo "Testing getbatcher..." trace 2 "\n\n[testbatching] ${BCyan}Testing getbatcher...${Color_Off}\n"
response=$(curl -sd '{}' localhost:8888/getbatcher) response=$(exec_in_test_container curl -sd '{}' proxy:8888/getbatcher)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq -r ".result.batcherLabel") data=$(echo "${response}" | jq -r ".result.batcherLabel")
echo "batcherLabel=${data}" trace 3 "[testbatching] batcherLabel=${data}"
if [ "${data}" != "default" ]; then if [ "${data}" != "default" ]; then
exit 20 exit 20
fi fi
response=$(curl -sd '{"batcherId":1}' localhost:8888/getbatcher) response=$(exec_in_test_container curl -sd '{"batcherId":1}' proxy:8888/getbatcher)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq -r ".result.batcherLabel") data=$(echo "${response}" | jq -r ".result.batcherLabel")
echo "batcherLabel=${data}" trace 3 "[testbatching] batcherLabel=${data}"
if [ "${data}" != "default" ]; then if [ "${data}" != "default" ]; then
exit 25 exit 25
fi fi
echo "Tested getbatcher." trace 2 "\n\n[testbatching] ${BCyan}Tested getbatcher.${Color_Off}\n"
# getbatchdetails the default batcher # getbatchdetails the default batcher
echo "Testing getbatchdetails..." trace 2 "\n\n[testbatching] ${BCyan}Testing getbatchdetails...${Color_Off}\n"
response=$(curl -sd '{}' localhost:8888/getbatchdetails) response=$(exec_in_test_container curl -sd '{}' proxy:8888/getbatchdetails)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq -r ".result.batcherLabel") data=$(echo "${response}" | jq -r ".result.batcherLabel")
echo "batcherLabel=${data}" trace 3 "[testbatching] batcherLabel=${data}"
if [ "${data}" != "default" ]; then if [ "${data}" != "default" ]; then
exit 30 exit 30
fi fi
@@ -78,10 +128,10 @@ testbatching() {
exit 32 exit 32
fi fi
response=$(curl -sd '{"batcherId":1}' localhost:8888/getbatchdetails) response=$(exec_in_test_container curl -sd '{"batcherId":1}' proxy:8888/getbatchdetails)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq -r ".result.batcherLabel") data=$(echo "${response}" | jq -r ".result.batcherLabel")
echo "batcherLabel=${data}" trace 3 "[testbatching] batcherLabel=${data}"
if [ "${data}" != "default" ]; then if [ "${data}" != "default" ]; then
exit 35 exit 35
fi fi
@@ -89,16 +139,16 @@ testbatching() {
if [ "$?" -ne 0 ]; then if [ "$?" -ne 0 ]; then
exit 37 exit 37
fi fi
echo "Tested getbatchdetails." trace 2 "\n\n[testbatching] ${BCyan}Tested getbatchdetails.${Color_Off}\n"
# addtobatch to default batcher # addtobatch to default batcher
echo "Testing addtobatch..." trace 2 "\n\n[testbatching] ${BCyan}Testing addtobatch...${Color_Off}\n"
address1=$(curl -s localhost:8888/getnewaddress | jq -r ".address") address1=$(exec_in_test_container curl -s proxy:8888/getnewaddress | jq -r ".address")
echo "address1=${address1}" trace 3 "[testbatching] address1=${address1}"
response=$(curl -sd '{"outputLabel":"test001","address":"'${address1}'","amount":0.001}' localhost:8888/addtobatch) response=$(exec_in_test_container curl -sd '{"outputLabel":"test001","address":"'${address1}'","amount":0.001}' proxy:8888/addtobatch)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
id=$(echo "${response}" | jq ".result.batcherId") id=$(echo "${response}" | jq ".result.batcherId")
echo "batcherId=${id}" trace 3 "[testbatching] batcherId=${id}"
if [ "${id}" -ne "1" ]; then if [ "${id}" -ne "1" ]; then
exit 40 exit 40
fi fi
@@ -106,14 +156,14 @@ testbatching() {
if [ "$?" -ne 0 ]; then if [ "$?" -ne 0 ]; then
exit 42 exit 42
fi fi
echo "outputId=${id}" trace 3 "[testbatching] outputId=${id}"
address2=$(curl -s localhost:8888/getnewaddress | jq -r ".address") address2=$(exec_in_test_container curl -s proxy:8888/getnewaddress | jq -r ".address")
echo "address2=${address2}" trace 3 "[testbatching] address2=${address2}"
response=$(curl -sd '{"batcherId":1,"outputLabel":"test002","address":"'${address2}'","amount":22000000}' localhost:8888/addtobatch) response=$(exec_in_test_container curl -sd '{"batcherId":1,"outputLabel":"test002","address":"'${address2}'","amount":22000000}' proxy:8888/addtobatch)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
id2=$(echo "${response}" | jq ".result.batcherId") id2=$(echo "${response}" | jq ".result.batcherId")
echo "batcherId=${id2}" trace 3 "[testbatching] batcherId=${id2}"
if [ "${id2}" -ne "1" ]; then if [ "${id2}" -ne "1" ]; then
exit 47 exit 47
fi fi
@@ -121,115 +171,104 @@ testbatching() {
if [ "$?" -ne 0 ]; then if [ "$?" -ne 0 ]; then
exit 50 exit 50
fi fi
echo "outputId=${id2}" trace 3 "[testbatching] outputId=${id2}"
echo "Tested addtobatch." trace 2 "\n\n[testbatching] ${BCyan}Tested addtobatch.${Color_Off}\n"
# batchspend default batcher # batchspend default batcher
echo "Testing batchspend..." trace 2 "\n\n[testbatching] ${BCyan}Testing batchspend...${Color_Off}\n"
response=$(curl -sd '{}' localhost:8888/batchspend) response=$(exec_in_test_container curl -sd '{}' proxy:8888/batchspend)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
echo "${response}" | jq -e ".error" echo "${response}" | jq -e ".error"
if [ "$?" -ne 0 ]; then if [ "$?" -ne 0 ]; then
exit 55 exit 55
fi fi
echo "Tested batchspend." trace 2 "\n\n[testbatching] ${BCyan}Tested batchspend.${Color_Off}\n"
# getbatchdetails the default batcher # getbatchdetails the default batcher
echo "Testing getbatchdetails..." trace 2 "\n\n[testbatching] ${BCyan}Testing getbatchdetails...${Color_Off}\n"
response=$(curl -sd '{}' localhost:8888/getbatchdetails) response=$(exec_in_test_container curl -sd '{}' proxy:8888/getbatchdetails)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq ".result.nbOutputs") data=$(echo "${response}" | jq ".result.nbOutputs")
echo "nbOutputs=${data}" trace 3 "[testbatching] nbOutputs=${data}"
echo "Tested getbatchdetails." trace 2 "\n\n[testbatching] ${BCyan}Tested getbatchdetails.${Color_Off}\n"
# removefrombatch from default batcher # removefrombatch from default batcher
echo "Testing removefrombatch..." trace 2 "\n\n[testbatching] ${BCyan}Testing removefrombatch...${Color_Off}\n"
response=$(curl -sd '{"outputId":'${id}'}' localhost:8888/removefrombatch) response=$(exec_in_test_container curl -sd '{"outputId":'${id}'}' proxy:8888/removefrombatch)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
id=$(echo "${response}" | jq ".result.batcherId") id=$(echo "${response}" | jq ".result.batcherId")
echo "batcherId=${id}" trace 3 "[testbatching] batcherId=${id}"
if [ "${id}" -ne "1" ]; then if [ "${id}" -ne "1" ]; then
exit 60 exit 60
fi fi
response=$(curl -sd '{"outputId":'${id2}'}' localhost:8888/removefrombatch) response=$(exec_in_test_container curl -sd '{"outputId":'${id2}'}' proxy:8888/removefrombatch)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
id=$(echo "${response}" | jq ".result.batcherId") id=$(echo "${response}" | jq ".result.batcherId")
echo "batcherId=${id}" trace 3 "[testbatching] batcherId=${id}"
if [ "${id}" -ne "1" ]; then if [ "${id}" -ne "1" ]; then
exit 64 exit 64
fi fi
echo "Tested removefrombatch." trace 2 "\n\n[testbatching] ${BCyan}Tested removefrombatch.${Color_Off}\n"
# getbatchdetails the default batcher # getbatchdetails the default batcher
echo "Testing getbatchdetails..." trace 2 "\n\n[testbatching] ${BCyan}Testing getbatchdetails...${Color_Off}\n"
response=$(curl -sd '{"batcherId":1}' localhost:8888/getbatchdetails) response=$(exec_in_test_container curl -sd '{"batcherId":1}' proxy:8888/getbatchdetails)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data2=$(echo "${response}" | jq ".result.nbOutputs") data2=$(echo "${response}" | jq ".result.nbOutputs")
echo "nbOutputs=${data2}" trace 3 "[testbatching] nbOutputs=${data2}"
if [ "${data2}" -ne "$((${data}-2))" ]; then if [ "${data2}" -ne "$((${data}-2))" ]; then
exit 68 exit 68
fi fi
echo "Tested getbatchdetails." trace 2 "\n\n[testbatching] ${BCyan}Tested getbatchdetails.${Color_Off}\n"
# Create a batcher # Create a batcher
echo "Testing createbatcher..." trace 2 "\n\n[testbatching] ${BCyan}Testing createbatcher...${Color_Off}\n"
response=$(curl -s -H 'Content-Type: application/json' -d '{"batcherLabel":"testbatcher","confTarget":32}' proxy:8888/createbatcher) response=$(exec_in_test_container curl -s -H 'Content-Type: application/json' -d '{"batcherLabel":"testbatcher","confTarget":32}' proxy:8888/createbatcher)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
id=$(echo "${response}" | jq -e ".result.batcherId") id=$(echo "${response}" | jq -e ".result.batcherId")
if [ "$?" -ne "0" ]; then if [ "$?" -ne "0" ]; then
exit 70 exit 70
fi fi
# List batchers (should show at least default and testbatcher batchers) # List batchers (should show at least default and testbatcher batchers)
echo "Testing listbatches..." trace 2 "\n\n[testbatching] ${BCyan}Testing listbatchers...${Color_Off}\n"
response=$(curl -s proxy:8888/listbatchers) response=$(exec_in_test_container curl -s proxy:8888/listbatchers)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
id=$(echo "${response}" | jq '.result[] | select(.batcherLabel == "testbatcher") | .batcherId') id=$(echo "${response}" | jq '.result[] | select(.batcherLabel == "testbatcher") | .batcherId')
echo "batcherId=${id}" trace 3 "[testbatching] batcherId=${id}"
if [ -z "${id}" ]; then if [ -z "${id}" ]; then
exit 75 exit 75
fi fi
echo "Tested listbatchers." trace 2 "\n\n[testbatching] ${BCyan}Tested listbatchers.${Color_Off}\n"
# getbatcher the testbatcher batcher # getbatcher the testbatcher batcher
echo "Testing getbatcher..." trace 2 "\n\n[testbatching] ${BCyan}Testing getbatcher...${Color_Off}\n"
response=$(curl -sd '{"batcherId":'${id}'}' localhost:8888/getbatcher) response=$(exec_in_test_container curl -sd '{"batcherId":'${id}'}' proxy:8888/getbatcher)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq -r ".result.batcherLabel") data=$(echo "${response}" | jq -r ".result.batcherLabel")
echo "batcherLabel=${data}" trace 3 "[testbatching] batcherLabel=${data}"
if [ "${data}" != "testbatcher" ]; then if [ "${data}" != "testbatcher" ]; then
exit 80 exit 80
fi fi
response=$(curl -sd '{"batcherLabel":"testbatcher"}' localhost:8888/getbatcher) response=$(exec_in_test_container curl -sd '{"batcherLabel":"testbatcher"}' proxy:8888/getbatcher)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq -r ".result.batcherId") data=$(echo "${response}" | jq -r ".result.batcherId")
echo "batcherId=${data}" trace 3 "[testbatching] batcherId=${data}"
if [ "${data}" != "${id}" ]; then if [ "${data}" != "${id}" ]; then
exit 90 exit 90
fi fi
echo "Tested getbatcher." trace 2 "\n\n[testbatching] ${BCyan}Tested getbatcher.${Color_Off}\n"
# getbatchdetails the testbatcher batcher # getbatchdetails the testbatcher batcher
echo "Testing getbatchdetails..." trace 2 "\n\n[testbatching] ${BCyan}Testing getbatchdetails...${Color_Off}\n"
response=$(curl -sd '{"batcherLabel":"testbatcher"}' localhost:8888/getbatchdetails) response=$(exec_in_test_container curl -sd '{"batcherLabel":"testbatcher"}' proxy:8888/getbatchdetails)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq -r ".result.batcherId") data=$(echo "${response}" | jq -r ".result.batcherId")
echo "batcherId=${data}" trace 3 "[testbatching] batcherId=${data}"
if [ "${data}" != "${id}" ]; then if [ "${data}" != "${id}" ]; then
exit 100 exit 100
fi fi
@@ -238,10 +277,10 @@ testbatching() {
exit 110 exit 110
fi fi
response=$(curl -sd '{"batcherId":'${id}'}' localhost:8888/getbatchdetails) response=$(exec_in_test_container curl -sd '{"batcherId":'${id}'}' proxy:8888/getbatchdetails)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq -r ".result.batcherLabel") data=$(echo "${response}" | jq -r ".result.batcherLabel")
echo "batcherLabel=${data}" trace 3 "[testbatching] batcherLabel=${data}"
if [ "${data}" != "testbatcher" ]; then if [ "${data}" != "testbatcher" ]; then
exit 120 exit 120
fi fi
@@ -249,16 +288,16 @@ testbatching() {
if [ "$?" -ne 0 ]; then if [ "$?" -ne 0 ]; then
exit 130 exit 130
fi fi
echo "Tested getbatchdetails." trace 2 "\n\n[testbatching] ${BCyan}Tested getbatchdetails.${Color_Off}\n"
# addtobatch to testbatcher batcher # addtobatch to testbatcher batcher
echo "Testing addtobatch..." trace 2 "\n\n[testbatching] ${BCyan}Testing addtobatch...${Color_Off}\n"
address1=$(curl -s localhost:8888/getnewaddress | jq -r ".address") address1=$(exec_in_test_container curl -s proxy:8888/getnewaddress | jq -r ".address")
echo "address1=${address1}" trace 3 "[testbatching] address1=${address1}"
response=$(curl -sd '{"batcherId":'${id}',"outputLabel":"test001","address":"'${address1}'","amount":0.001,"webhookUrl":"'${url1}'/'${address1}'"}' localhost:8888/addtobatch) response=$(exec_in_test_container curl -sd '{"batcherId":'${id}',"outputLabel":"test001","address":"'${address1}'","amount":0.001,"webhookUrl":"'${url1}'/'${address1}'"}' proxy:8888/addtobatch)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq ".result.batcherId") data=$(echo "${response}" | jq ".result.batcherId")
echo "batcherId=${data}" trace 3 "[testbatching] batcherId=${data}"
if [ "${data}" -ne "${id}" ]; then if [ "${data}" -ne "${id}" ]; then
exit 140 exit 140
fi fi
@@ -266,14 +305,14 @@ testbatching() {
if [ "$?" -ne 0 ]; then if [ "$?" -ne 0 ]; then
exit 142 exit 142
fi fi
echo "outputId=${id2}" trace 3 "[testbatching] outputId=${id2}"
address2=$(curl -s localhost:8888/getnewaddress | jq -r ".address") address2=$(exec_in_test_container curl -s proxy:8888/getnewaddress | jq -r ".address")
echo "address2=${address2}" trace 3 "[testbatching] address2=${address2}"
response=$(curl -sd '{"batcherLabel":"testbatcher","outputLabel":"test002","address":"'${address2}'","amount":0.002,"webhookUrl":"'${url2}'/'${address2}'"}' localhost:8888/addtobatch) response=$(exec_in_test_container curl -sd '{"batcherLabel":"testbatcher","outputLabel":"test002","address":"'${address2}'","amount":0.002,"webhookUrl":"'${url2}'/'${address2}'"}' proxy:8888/addtobatch)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq ".result.batcherId") data=$(echo "${response}" | jq ".result.batcherId")
echo "batcherId=${data}" trace 3 "[testbatching] batcherId=${data}"
if [ "${data}" -ne "${id}" ]; then if [ "${data}" -ne "${id}" ]; then
exit 150 exit 150
fi fi
@@ -281,48 +320,69 @@ testbatching() {
if [ "$?" -ne 0 ]; then if [ "$?" -ne 0 ]; then
exit 152 exit 152
fi fi
echo "outputId=${id2}" trace 3 "[testbatching] outputId=${id2}"
echo "Tested addtobatch." trace 2 "\n\n[testbatching] ${BCyan}Tested addtobatch.${Color_Off}\n"
# batchspend testbatcher batcher # batchspend testbatcher batcher
echo "Testing batchspend..." trace 2 "\n\n[testbatching] ${BCyan}Testing batchspend...${Color_Off}\n"
response=$(curl -sd '{"batcherLabel":"testbatcher"}' localhost:8888/batchspend) response=$(exec_in_test_container curl -sd '{"batcherLabel":"testbatcher"}' proxy:8888/batchspend)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data2=$(echo "${response}" | jq -e ".result.txid") data2=$(echo "${response}" | jq -e ".result.txid")
if [ "$?" -ne 0 ]; then if [ "$?" -ne 0 ]; then
exit 160 exit 160
fi fi
echo "txid=${data2}" trace 3 "[testbatching] txid=${data2}"
data=$(echo "${response}" | jq ".result.outputs | length") data=$(echo "${response}" | jq ".result.outputs | length")
if [ "${data}" -ne "2" ]; then if [ "${data}" -ne "2" ]; then
exit 162 exit 162
fi fi
echo "Tested batchspend." trace 2 "\n\n[testbatching] ${BCyan}Tested batchspend.${Color_Off}\n"
# getbatchdetails the testbatcher batcher # getbatchdetails the testbatcher batcher
echo "Testing getbatchdetails..." trace 2 "\n\n[testbatching] ${BCyan}Testing getbatchdetails...${Color_Off}\n"
echo "txid=${data2}" trace 3 "[testbatching] txid=${data2}"
response=$(curl -sd '{"batcherLabel":"testbatcher","txid":'${data2}'}' localhost:8888/getbatchdetails) response=$(exec_in_test_container curl -sd '{"batcherLabel":"testbatcher","txid":'${data2}'}' proxy:8888/getbatchdetails)
echo "response=${response}" trace 3 "[testbatching] response=${response}"
data=$(echo "${response}" | jq ".result.nbOutputs") data=$(echo "${response}" | jq ".result.nbOutputs")
echo "nbOutputs=${data}" trace 3 "[testbatching] nbOutputs=${data}"
if [ "${data}" -ne "2" ]; then if [ "${data}" -ne "2" ]; then
exit 170 exit 170
fi fi
echo "Tested getbatchdetails." trace 2 "\n\n[testbatching] ${BCyan}Tested getbatchdetails.${Color_Off}\n"
# List batchers # List batchers
# Add to batch # Add to batch
# List batchers # List batchers
# Remove from batch # Remove from batch
# List batchers # List batchers
trace 1 "\n\n[testbatching] ${On_IGreen}${BBlack} ALL GOOD! Yayyyy! ${Color_Off}\n"
} }
wait_for_callbacks() { start_callback_server() {
nc -vlp1111 -e sh -c 'echo -en "HTTP/1.1 200 OK\r\n\r\n" ; timeout 1 tee /dev/tty | cat ; echo 1>&2' & trace 1 "\n\n[start_callback_server] ${BCyan}Let's start a callback server!...${Color_Off}\n"
nc -vlp1112 -e sh -c 'echo -en "HTTP/1.1 200 OK\r\n\r\n" ; timeout 1 tee /dev/tty | cat ; echo 1>&2' &
port=${1:-${callbackserverport}}
docker run --rm -t --name tests-batching-cb --network=cyphernodenet alpine sh -c "nc -vlp${port} -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; echo -en \"\\033[40m\\033[0;37m\" >&2 ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo -e \"\033[0m\" >&2'" &
} }
wait_for_callbacks TRACING=3
stop_test_container
start_test_container
callbackserverport="1111"
callbackservername="tests-batching-cb"
trace 1 "\n\n[test-batching] ${BCyan}Installing needed packages...${Color_Off}\n"
exec_in_test_container apk add --update curl
testbatching testbatching
trace 1 "\n\n[test-batching] ${BCyan}Tearing down...${Color_Off}\n"
wait wait
stop_test_container
trace 1 "\n\n[test-batching] ${BCyan}See ya!${Color_Off}\n"

View File

@@ -1,6 +1,44 @@
#!/bin/sh #!/bin/bash
test_derive() { . ./colors.sh
# You need jq installed for these tests to run correctly
# This will test:
#
# - deriveindex
# - derivepubpath
# - deriveindex_bitcoind
# - derivepubpath_bitcoind
#
# ...and it will compare performance between Pycoin et Bitcoin Core's address derivations...
#
trace() {
if [ "${1}" -le "${TRACING}" ]; then
echo -e "$(date -u +%FT%TZ) ${2}" 1>&2
fi
}
start_test_container() {
docker run -d --rm -t --name tests-derive --network=cyphernodenet alpine
}
stop_test_container() {
trace 1 "\n\n[stop_test_container] ${BCyan}Stopping existing containers if they are running...${Color_Off}\n"
# docker stop test-derive
local containers=$(docker ps -q -f "name=tests-derive")
if [ -n "${containers}" ]; then
docker stop ${containers}
fi
}
exec_in_test_container() {
docker exec -it tests-derive "$@"
}
tests_derive() {
local address local address
local address1 local address1
local address2 local address2
@@ -8,82 +46,85 @@ test_derive() {
local response local response
local transaction local transaction
trace 1 "\n\n[tests_derive] ${BCyan}Let's test the derivation features!...${Color_Off}\n"
# deriveindex # deriveindex
# (GET) http://proxy:8888/deriveindex/25-30 # (GET) http://proxy:8888/deriveindex/25-30
# {"addresses":[{"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"},{"address":"2NFLhFghAPKEPuZCKoeXYYxuaBxhKXbmhBV"},{"address":"2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP"},{"address":"2Mth8XDZpXkY9d95tort8HYEAuEesow2tF6"},{"address":"2MwqEmAXhUw6H7bJwMhD13HGWVEj2HgFiNH"},{"address":"2N2Y4BVRdrRFhweub2ehHXveGZC3nryMEJw"}]} # {"addresses":[{"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"},{"address":"2NFLhFghAPKEPuZCKoeXYYxuaBxhKXbmhBV"},{"address":"2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP"},{"address":"2Mth8XDZpXkY9d95tort8HYEAuEesow2tF6"},{"address":"2MwqEmAXhUw6H7bJwMhD13HGWVEj2HgFiNH"},{"address":"2N2Y4BVRdrRFhweub2ehHXveGZC3nryMEJw"}]}
echo "Testing deriveindex..." trace 2 "\n\n[tests_derive] ${BCyan}Testing deriveindex...${Color_Off}\n"
response=$(curl -s proxy:8888/deriveindex/25-30) response=$(exec_in_test_container curl -s proxy:8888/deriveindex/25-30)
echo "response=${response}" trace 3 "[tests_derive] response=${response}"
local nbaddr=$(echo "${response}" | jq ".addresses | length") local nbaddr=$(echo "${response}" | jq ".addresses | length")
trace 3 "[tests_derive] nbaddr=${nbaddr}"
if [ "${nbaddr}" -ne "6" ]; then if [ "${nbaddr}" -ne "6" ]; then
exit 130 return 130
fi fi
address=$(echo "${response}" | jq ".addresses[2].address" | tr -d '\"') address=$(echo "${response}" | jq ".addresses[2].address" | tr -d '\"')
if [ "${address}" != "2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP" ]; then if [ "${address}" != "2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP" ]; then
exit 140 return 140
fi fi
echo "Tested deriveindex." trace 2 "\n\n[tests_derive] ${BCyan}Tested deriveindex.${Color_Off}\n"
# derivepubpath # derivepubpath
# (GET) http://proxy:8888/derivepubpath # (GET) http://proxy:8888/derivepubpath
# BODY {"pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/25-30"} # BODY {"pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/25-30"}
# {"addresses":[{"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"},{"address":"2NFLhFghAPKEPuZCKoeXYYxuaBxhKXbmhBV"},{"address":"2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP"},{"address":"2Mth8XDZpXkY9d95tort8HYEAuEesow2tF6"},{"address":"2MwqEmAXhUw6H7bJwMhD13HGWVEj2HgFiNH"},{"address":"2N2Y4BVRdrRFhweub2ehHXveGZC3nryMEJw"}]} # {"addresses":[{"address":"2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8"},{"address":"2NFLhFghAPKEPuZCKoeXYYxuaBxhKXbmhBV"},{"address":"2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP"},{"address":"2Mth8XDZpXkY9d95tort8HYEAuEesow2tF6"},{"address":"2MwqEmAXhUw6H7bJwMhD13HGWVEj2HgFiNH"},{"address":"2N2Y4BVRdrRFhweub2ehHXveGZC3nryMEJw"}]}
echo "Testing derivepubpath..." trace 2 "\n\n[tests_derive] ${BCyan}Testing derivepubpath...${Color_Off}\n"
response=$(curl -s -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/25-30\"}" proxy:8888/derivepubpath) response=$(exec_in_test_container curl -s -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/25-30\"}" proxy:8888/derivepubpath)
echo "response=${response}" trace 3 "[tests_derive] response=${response}"
local nbaddr=$(echo "${response}" | jq ".addresses | length") local nbaddr=$(echo "${response}" | jq ".addresses | length")
if [ "${nbaddr}" -ne "6" ]; then if [ "${nbaddr}" -ne "6" ]; then
exit 150 return 150
fi fi
address=$(echo "${response}" | jq ".addresses[2].address" | tr -d '\"') address=$(echo "${response}" | jq ".addresses[2].address" | tr -d '\"')
if [ "${address}" != "2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP" ]; then if [ "${address}" != "2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP" ]; then
exit 160 return 160
fi fi
echo "Tested derivepubpath." trace 2 "\n\n[tests_derive] ${BCyan}Tested derivepubpath.${Color_Off}\n"
# deriveindex_bitcoind # deriveindex_bitcoind
# (GET) http://proxy:8888/deriveindex_bitcoind/25-30 # (GET) http://proxy:8888/deriveindex_bitcoind/25-30
# ["2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8","2NFLhFghAPKEPuZCKoeXYYxuaBxhKXbmhBV","2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP","2Mth8XDZpXkY9d95tort8HYEAuEesow2tF6","2MwqEmAXhUw6H7bJwMhD13HGWVEj2HgFiNH","2N2Y4BVRdrRFhweub2ehHXveGZC3nryMEJw"] # ["2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8","2NFLhFghAPKEPuZCKoeXYYxuaBxhKXbmhBV","2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP","2Mth8XDZpXkY9d95tort8HYEAuEesow2tF6","2MwqEmAXhUw6H7bJwMhD13HGWVEj2HgFiNH","2N2Y4BVRdrRFhweub2ehHXveGZC3nryMEJw"]
echo "Testing deriveindex_bitcoind..." trace 2 "\n\n[tests_derive] ${BCyan}Testing deriveindex_bitcoind...${Color_Off}\n"
response=$(curl -s proxy:8888/deriveindex_bitcoind/25-30) response=$(exec_in_test_container curl -s proxy:8888/deriveindex_bitcoind/25-30)
echo "response=${response}" trace 3 "[tests_derive] response=${response}"
local nbaddr=$(echo "${response}" | jq ". | length") local nbaddr=$(echo "${response}" | jq ". | length")
if [ "${nbaddr}" -ne "6" ]; then if [ "${nbaddr}" -ne "6" ]; then
exit 130 return 130
fi fi
address=$(echo "${response}" | jq ".[2]" | tr -d '\"') address=$(echo "${response}" | jq ".[2]" | tr -d '\"')
if [ "${address}" != "2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP" ]; then if [ "${address}" != "2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP" ]; then
exit 140 return 140
fi fi
echo "Tested deriveindex_bitcoind." trace 2 "\n\n[tests_derive] ${BCyan}Tested deriveindex_bitcoind.${Color_Off}\n"
# derivepubpath_bitcoind # derivepubpath_bitcoind
# (GET) http://proxy:8888/derivepubpath_bitcoind # (GET) http://proxy:8888/derivepubpath_bitcoind
# BODY {"pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/25-30"} # BODY {"pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/25-30"}
# ["2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8","2NFLhFghAPKEPuZCKoeXYYxuaBxhKXbmhBV","2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP","2Mth8XDZpXkY9d95tort8HYEAuEesow2tF6","2MwqEmAXhUw6H7bJwMhD13HGWVEj2HgFiNH","2N2Y4BVRdrRFhweub2ehHXveGZC3nryMEJw"] # ["2N6Q9kBcLtNswgMSLSQ5oduhbctk7hxEJW8","2NFLhFghAPKEPuZCKoeXYYxuaBxhKXbmhBV","2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP","2Mth8XDZpXkY9d95tort8HYEAuEesow2tF6","2MwqEmAXhUw6H7bJwMhD13HGWVEj2HgFiNH","2N2Y4BVRdrRFhweub2ehHXveGZC3nryMEJw"]
echo "Testing derivepubpath_bitcoind..." trace 2 "\n\n[tests_derive] ${BCyan}Testing derivepubpath_bitcoind...${Color_Off}\n"
response=$(curl -s -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/25-30\"}" proxy:8888/derivepubpath_bitcoind) response=$(exec_in_test_container curl -s -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/25-30\"}" proxy:8888/derivepubpath_bitcoind)
echo "response=${response}" trace 3 "[tests_derive] response=${response}"
local nbaddr=$(echo "${response}" | jq ". | length") local nbaddr=$(echo "${response}" | jq ". | length")
if [ "${nbaddr}" -ne "6" ]; then if [ "${nbaddr}" -ne "6" ]; then
exit 150 return 150
fi fi
address=$(echo "${response}" | jq ".[2]" | tr -d '\"') address=$(echo "${response}" | jq ".[2]" | tr -d '\"')
if [ "${address}" != "2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP" ]; then if [ "${address}" != "2N7gepbQtRM5Hm4PTjvGadj9wAwEwnAsKiP" ]; then
exit 160 return 160
fi fi
echo "Tested derivepubpath_bitcoind." trace 2 "\n\n[tests_derive] ${BCyan}Tested derivepubpath_bitcoind.${Color_Off}\n"
# deriveindex_bitcoind and derivepubpath_bitcoind faster derivation? # deriveindex_bitcoind and derivepubpath_bitcoind faster derivation?
echo -e "\nDeriving 500 addresses with derivepubpath (Pycoin)..." trace 2 "\n\n[tests_derive] ${BCyan}Deriving 500 addresses with derivepubpath (Pycoin)...${Color_Off}\n"
time curl -s -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/0-499\"}" proxy:8888/derivepubpath > /dev/null time exec_in_test_container sh -c 'curl -s -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/0-499\"}" proxy:8888/derivepubpath > /dev/null'
echo -e "\nDeriving 500 addresses with derivepubpath_bitcoind (Bitcoin Core)..." trace 2 "\n\n[tests_derive] ${BCyan}Deriving 500 addresses with derivepubpath_bitcoind (Bitcoin Core)...${Color_Off}\n"
time curl -s -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/0-499\"}" proxy:8888/derivepubpath_bitcoind > /dev/null time exec_in_test_container sh -c 'curl -s -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/0-499\"}" proxy:8888/derivepubpath_bitcoind > /dev/null'
# Deriving 500 addresses with derivepubpath (pycoin)... # Deriving 500 addresses with derivepubpath (pycoin)...
# real 0m 18.15s # real 0m 18.15s
@@ -95,8 +136,26 @@ test_derive() {
# user 0m 0.00s # user 0m 0.00s
# sys 0m 0.00s # sys 0m 0.00s
trace 1 "\n\n[tests_derive] ${On_IGreen}${BBlack} ALL GOOD! Yayyyy! ${Color_Off}\n"
} }
apk add curl jq TRACING=3
returncode=0
test_derive stop_test_container
start_test_container
trace 1 "\n\n[test-derive] ${BCyan}Installing needed packages...${Color_Off}\n"
exec_in_test_container apk add --update curl
returncode=$(tests_derive)
trace 1 "\n\n[test-derive] ${BCyan}Tearing down...${Color_Off}\n"
wait
stop_test_container
trace 1 "\n\n[test-derive] ${BCyan}See ya!${Color_Off}\n"
exit ${returncode}

View File

@@ -1,26 +1,53 @@
#!/bin/sh #!/bin/bash
. ./colors.sh . ./colors.sh
. ./mine.sh . ./mine.sh
# This needs to be run in regtest
# You need jq installed for these tests to run correctly
# This will test the missed watched transactions mechanisms by broadcasting
# transactions on watched addresses while the proxy is shut down...
#
# - getnewaddress
# - watch
# - executecallbacks
#
trace() { trace() {
if [ "${1}" -le "${TRACING}" ]; then if [ "${1}" -le "${TRACING}" ]; then
echo "$(date -u +%FT%TZ) ${2}" 1>&2 echo -e "$(date -u +%FT%TZ) ${2}" 1>&2
fi fi
} }
start_test_container() { start_test_container() {
docker run -d --rm -it --name tests-manage-missed --network=cyphernodenet alpine docker run -d --rm -t --name tests-manage-missed --network=cyphernodenet alpine
} }
stop_test_container() { stop_test_container() {
docker stop tests-manage-missed trace 1 "\n\n[stop_test_container] ${BCyan}Stopping existing containers if they are running...${Color_Off}\n"
# docker stop tests-manage-missed
local containers=$(docker ps -q -f "name=tests-manage-missed")
if [ -n "${containers}" ]; then
docker stop ${containers}
fi
} }
exec_in_test_container() { exec_in_test_container() {
docker exec -it tests-manage-missed $@ docker exec -it tests-manage-missed $@
} }
wait_for_proxy() {
trace 1 "\n\n[wait_for_proxy] ${BCyan}Waiting for the proxy to be ready...${Color_Off}\n"
# First ping the containers to make sure they're up...
docker exec -t tests-manage-missed sh -c 'while true ; do ping -c 1 proxy ; [ "$?" -eq "0" ] && break ; sleep 5; done'
# Now check if the lightning nodes are ready to accept requests...
docker exec -t tests-manage-missed sh -c 'while true ; do curl proxy:8888/helloworld ; [ "$?" -eq "0" ] && break ; sleep 5; done'
}
test_manage_missed_0_conf() { test_manage_missed_0_conf() {
# Missed 0-conf: # Missed 0-conf:
# 1. Get new address # 1. Get new address
@@ -46,13 +73,13 @@ test_manage_missed_0_conf() {
trace 3 "[test_manage_missed_0_conf] response=${response}" trace 3 "[test_manage_missed_0_conf] response=${response}"
trace 3 "[test_manage_missed_0_conf] Shutting down the proxy..." trace 3 "[test_manage_missed_0_conf] Shutting down the proxy..."
docker stop $(docker ps -q -f "name=proxy") docker stop $(docker ps -q -f "name=proxy\.")
trace 3 "[test_manage_missed_0_conf] Sending coins to watched address while proxy is down..." trace 3 "[test_manage_missed_0_conf] Sending coins to watched address while proxy is down..."
docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address} 0.0001 docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address} 0.0001
# txid1=$(exec_in_test_container curl -d '{"address":"'${address}'","amount":0.0001}' proxy:8888/spend | jq -r ".txid")
trace 3 "[test_manage_missed_0_conf] Sleeping for 10 seconds to let the proxy restart..." wait_for_proxy
sleep 10
trace 3 "[test_manage_missed_0_conf] Calling executecallbacks..." trace 3 "[test_manage_missed_0_conf] Calling executecallbacks..."
exec_in_test_container curl -s -H "Content-Type: application/json" proxy:8888/executecallbacks exec_in_test_container curl -s -H "Content-Type: application/json" proxy:8888/executecallbacks
@@ -87,18 +114,18 @@ test_manage_missed_1_conf() {
trace 3 "[test_manage_missed_1_conf] Sending coins to watched address while proxy is up..." trace 3 "[test_manage_missed_1_conf] Sending coins to watched address while proxy is up..."
docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address} 0.0001 docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address} 0.0001
# txid1=$(exec_in_test_container curl -d '{"address":"'${address}'","amount":0.0001}' proxy:8888/spend | jq -r ".txid")
trace 3 "[test_manage_missed_1_conf] Sleeping for 10 seconds to let the 0-conf callbacks to happen..." trace 3 "[test_manage_missed_1_conf] Sleeping for 20 seconds to let the 0-conf callbacks to happen..."
sleep 10 sleep 20
trace 3 "[test_manage_missed_1_conf] Shutting down the proxy..." trace 3 "[test_manage_missed_1_conf] Shutting down the proxy..."
docker stop $(docker ps -q -f "name=proxy") docker stop $(docker ps -q -f "name=proxy\.")
trace 3 "[test_manage_missed_1_conf] Mine a new block..." trace 3 "[test_manage_missed_1_conf] Mine a new block..."
mine mine
trace 3 "[test_manage_missed_1_conf] Sleeping for 10 seconds to let the proxy restart..." wait_for_proxy
sleep 10
trace 3 "[test_manage_missed_1_conf] Calling executecallbacks..." trace 3 "[test_manage_missed_1_conf] Calling executecallbacks..."
exec_in_test_container curl -s -H "Content-Type: application/json" proxy:8888/executecallbacks exec_in_test_container curl -s -H "Content-Type: application/json" proxy:8888/executecallbacks
@@ -107,15 +134,15 @@ test_manage_missed_1_conf() {
wait_for_callbacks() { wait_for_callbacks() {
trace 1 "[wait_for_callbacks] ${BCyan}Let's start the callback servers!...${Color_Off}" trace 1 "[wait_for_callbacks] ${BCyan}Let's start the callback servers!...${Color_Off}"
docker exec -t tests-manage-missed sh -c "nc -vlp1111 -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo 1>&2'" & docker exec -t tests-manage-missed sh -c "nc -vlp1111 -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; echo -en \"\\033[40m\\033[0;37m\" >&2 ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo -e \"\033[0m\" >&2'" &
docker exec -t tests-manage-missed sh -c "nc -vlp1112 -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo 1>&2'" & docker exec -t tests-manage-missed sh -c "nc -vlp1112 -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; echo -en \"\\033[40m\\033[0;37m\" >&2 ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo -e \"\033[0m\" >&2'" &
docker exec -t tests-manage-missed sh -c "nc -vlp1113 -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo 1>&2'" & docker exec -t tests-manage-missed sh -c "nc -vlp1113 -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; echo -en \"\\033[40m\\033[0;37m\" >&2 ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo -e \"\033[0m\" >&2'" &
docker exec -t tests-manage-missed sh -c "nc -vlp1114 -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo 1>&2'" & docker exec -t tests-manage-missed sh -c "nc -vlp1114 -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; echo -en \"\\033[40m\\033[0;37m\" >&2 ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo -e \"\033[0m\" >&2'" &
} }
TRACING=3 TRACING=3
stop_test_container
start_test_container start_test_container
wait_for_callbacks wait_for_callbacks
@@ -128,9 +155,8 @@ trace 2 "url2=${url2}"
trace 2 "url3=${url3}" trace 2 "url3=${url3}"
trace 2 "url4=${url4}" trace 2 "url4=${url4}"
exec_in_test_container apk add curl exec_in_test_container apk add --update curl
# exec_in_test_container ping -c 5 tests-manage-missed
# exec_in_test_container curl -vd 'toto' ${url1}/allo
test_manage_missed_0_conf test_manage_missed_0_conf
test_manage_missed_1_conf test_manage_missed_1_conf

View File

@@ -0,0 +1,450 @@
#!/bin/bash
. ./colors.sh
. ./mine.sh
# This needs to be run in regtest
# You need jq installed for these tests to run correctly
# This will test:
#
# - watchxpub
# - get_unused_addresses_by_watchlabel
# - derivepubpath_bitcoind
# - getactivexpubwatches
# - getactivewatchesbyxpub
# - getactivewatchesbylabel
# - spend
# - get_txns_by_watchlabel
# - unwatchxpubbyxpub
# - unwatchxpubbylabel
#
trace() {
if [ "${1}" -le "${TRACING}" ]; then
echo -e "$(date -u +%FT%TZ) ${2}" 1>&2
fi
}
start_test_container() {
docker run -d --rm -t --name tests-watch-pub32 --network=cyphernodenet alpine
}
stop_test_container() {
trace 1 "\n\n[stop_test_container] ${BCyan}Stopping existing containers if they are running...${Color_Off}\n"
# docker stop tests-watch-pub32
# docker stop tests-watch-pub32-cb
local containers=$(docker ps -q -f "name=tests-watch-pub32")
if [ -n "${containers}" ]; then
docker stop ${containers}
fi
}
exec_in_test_container() {
docker exec -it tests-watch-pub32 "$@"
}
test_watch_pub32() {
# Watch an xpub
# 1. Call watchxpub with xpub1 label1 with url1 as callback
# 2. Call watchxpub with xpub2 label2 with url2 as callback
# 3. Call get_unused_addresses_by_watchlabel with label1 /10, take last as address1, save index1
# 4. Call get_unused_addresses_by_watchlabel with label2 /10, take last as address2, save index2
# 5. Call derivepubpath_bitcoind with xpub1 path 0/index1, compare with address1
# 6. Call derivepubpath_bitcoind with xpub2 path 0/index2, compare with address2
# 7. Call getactivexpubwatches
# 8. Call getactivewatchesbyxpub with xpub1, compare index1'th address with address1
# 9. Call getactivewatchesbyxpub with xpub2, compare index2'th address with address2
# 10. Call getactivewatchesbylabel with label1, compare index1'th address with address1
# 11. Call getactivewatchesbylabel with label2, compare index2'th address with address2
# 12. Send coins to address1, wait for callback
# 13. Send coins to address2, wait for callback
# 14. Call get_txns_by_watchlabel for label1, search sent tx
# 15. Call get_txns_by_watchlabel for label2, search sent tx
# 16. Call get_unused_addresses_by_watchlabel with label1 /10, check address1 is NOT there
# 17. Call get_unused_addresses_by_watchlabel with label2 /10, check address2 is NOT there
# 18. Call getactivexpubwatches
# 19. Call getactivewatchesbyxpub with xpub1, last n should be 10 more (index1 + 100)
# 20. Call getactivewatchesbyxpub with xpub2, last n should be 10 more (index2 + 100)
# 21. Call unwatchxpubbyxpub with xpub1
# 22. Call unwatchxpubbylabel with label2
# 23. Call getactivewatchesbyxpub with xpub1, should be empty
# 24. Call getactivewatchesbyxpub with xpub2, should be empty
local xpub1="upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb"
local xpub2="tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk"
local label1="label$RANDOM"
local label2="label$RANDOM"
local path1="0/n"
local path2="0/n"
local path_a1
local path_a2
local callbackurl0conf1="http://${callbackservername}:1111/callbackurl0conf1"
local callbackurl1conf1="http://${callbackservername}:1112/callbackurl1conf1"
local callbackurl0conf2="http://${callbackservername}:1113/callbackurl0conf2"
local callbackurl1conf2="http://${callbackservername}:1114/callbackurl1conf2"
local address
local address1
local address2
local last_imported_n1
local last_imported_n2
local last_imported_n1_x
local last_imported_n2_x
local index
local index1
local index2
local txid
local txid1
local txid2
local data
local response
trace 1 "\n\n[test_watch_pub32] ${BCyan}Let's test \"watch by xpub\" features!...${Color_Off}\n"
# 1. Call watchxpub with xpub1 label1 with url1 as callback
trace 2 "\n\n[test_watch_pub32] ${BCyan}1. watchxpub 1...${Color_Off}\n"
data='{"label":"'${label1}'","pub32":"'${xpub1}'","path":"'${path1}'","nstart":0,"unconfirmedCallbackURL":"'${callbackurl0conf1}'","confirmedCallbackURL":"'${callbackurl1conf1}'"}'
trace 3 "[test_watch_pub32] data=${data}"
response=$(exec_in_test_container curl -d "${data}" proxy:8888/watchxpub)
trace 3 "[test_watch_pub32] response=${response}"
data=$(echo "${response}" | jq -re ".error")
if [ "${?}" -eq "0" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 1. watchxpub 1 failed: ${data}! ${Color_Off}\n"
return 10
fi
# 2. Call watchxpub with xpub2 label2 with url2 as callback
trace 2 "\n\n[test_watch_pub32] ${BCyan}2. watchxpub 2...${Color_Off}\n"
data='{"label":"'${label2}'","pub32":"'${xpub2}'","path":"'${path2}'","nstart":0,"unconfirmedCallbackURL":"'${callbackurl0conf2}'","confirmedCallbackURL":"'${callbackurl1conf2}'"}'
trace 3 "[test_watch_pub32] data=${data}"
response=$(exec_in_test_container curl -d "${data}" proxy:8888/watchxpub)
trace 3 "[test_watch_pub32] response=${response}"
data=$(echo "${response}" | jq -re ".label")
if [ "${label2}" != "${data}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 2. watchxpub 2 failed! ${Color_Off}\n"
return 20
fi
# 3. Call get_unused_addresses_by_watchlabel with label1 /10, take last as address1, save index1
trace 2 "\n\n[test_watch_pub32] ${BCyan}3. Call get_unused_addresses_by_watchlabel with label1 /10...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/get_unused_addresses_by_watchlabel/${label1}/10)
trace 3 "[test_watch_pub32] response=${response}"
address1=$(echo "${response}" | jq -r ".label_unused_addresses[9] | .address")
trace 3 "[test_watch_pub32] address1=${address1}"
index1=$(echo "${response}" | jq -r ".label_unused_addresses[9] | .address_pub32_index")
trace 3 "[test_watch_pub32] index1=${index1}"
if [ "${address1}" = "null" ] || [ "${index1}" = "null" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 3. Call get_unused_addresses_by_watchlabel with label1 /10! ${Color_Off}\n"
return 87
fi
# 4. Call get_unused_addresses_by_watchlabel with label2 /10, take last as address2, save index2
trace 2 "\n\n[test_watch_pub32] ${BCyan}4. Call get_unused_addresses_by_watchlabel with label2 /10...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/get_unused_addresses_by_watchlabel/${label2}/10)
trace 3 "[test_watch_pub32] response=${response}"
address2=$(echo "${response}" | jq -r ".label_unused_addresses[9] | .address")
trace 3 "[test_watch_pub32] address2=${address2}"
index2=$(echo "${response}" | jq -r ".label_unused_addresses[9] | .address_pub32_index")
trace 3 "[test_watch_pub32] index2=${index2}"
if [ "${address2}" = "null" ] || [ "${index2}" = "null" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 4. Call get_unused_addresses_by_watchlabel with label2 /10! ${Color_Off}\n"
return 87
fi
# 5. Call derivepubpath_bitcoind with xpub1 path 0/index1, compare with address1
trace 2 "\n\n[test_watch_pub32] ${BCyan}5. Call derivepubpath_bitcoind with xpub1 path 0/index1...${Color_Off}\n"
path_a1=$(echo "${path1}" | sed -En "s/n/${index1}/p")
data='{"pub32":"'${xpub1}'","path":"'${path_a1}'"}'
trace 3 "[test_watch_pub32] data=${data}"
response=$(exec_in_test_container curl -d "${data}" proxy:8888/derivepubpath_bitcoind)
trace 3 "[test_watch_pub32] response=${response}"
address=$(echo "${response}" | jq -r ".[0]")
trace 3 "[test_watch_pub32] address=${address}"
if [ "${address}" != "${address1}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 5. Call derivepubpath_bitcoind with xpub1 path 0/index1! ${Color_Off}\n"
return 30
fi
# 6. Call derivepubpath_bitcoind with xpub2 path 0/index2, compare with address2
trace 2 "\n\n[test_watch_pub32] ${BCyan}6. Call derivepubpath_bitcoind with xpub2 path 0/index2...${Color_Off}\n"
path_a2=$(echo "${path2}" | sed -En "s/n/${index2}/p")
data='{"pub32":"'${xpub2}'","path":"'${path_a2}'"}'
trace 3 "[test_watch_pub32] data=${data}"
response=$(exec_in_test_container curl -d "${data}" proxy:8888/derivepubpath_bitcoind)
trace 3 "[test_watch_pub32] response=${response}"
address=$(echo "${response}" | jq -r ".[0]")
trace 3 "[test_watch_pub32] address=${address}"
if [ "${address}" != "${address2}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 6. Call derivepubpath_bitcoind with xpub2 path 0/index2! ${Color_Off}\n"
return 30
fi
# 7. Call getactivexpubwatches
trace 2 "\n\n[test_watch_pub32] ${BCyan}7. getactivexpubwatches...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivexpubwatches)
trace 3 "[test_watch_pub32] response=${response}"
last_imported_n1=$(echo "${response}" | jq -r ".watches | map(select(.pub32 == \"${xpub1}\"))[0] | .last_imported_n")
trace 3 "[test_watch_pub32] last_imported_n1=${last_imported_n1}"
last_imported_n2=$(echo "${response}" | jq -r ".watches | map(select(.pub32 == \"${xpub2}\"))[0] | .last_imported_n")
trace 3 "[test_watch_pub32] last_imported_n2=${last_imported_n2}"
if [ "${last_imported_n1}" -ne "100" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 7. \"${last_imported_n1}\" -ne \"100\"! ${Color_Off}\n"
return 50
fi
if [ "${last_imported_n2}" -ne "100" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 7. \"${last_imported_n2}\" -ne \"100\"! ${Color_Off}\n"
return 55
fi
# 8. Call getactivewatchesbyxpub with xpub1, compare index1'th address with address1
trace 2 "\n\n[test_watch_pub32] ${BCyan}8. Call getactivewatchesbyxpub with xpub1...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivewatchesbyxpub/${xpub1})
# trace 3 "[test_watch_pub32] response=${response}"
address=$(echo "${response}" | jq -r ".watches | map(select(.pub32_index == ${index1}))[0] | .address")
trace 3 "[test_watch_pub32] ${index1}th address=${address}"
if [ "${address}" != "${address1}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 8. Call getactivewatchesbyxpub with xpub1: \"${address}\" != \"${address1}\"! ${Color_Off}\n"
return 60
fi
# Check if last_imported_n1 exists in watched list
index=$(echo "${response}" | jq -r "[.watches[].pub32_index] | index(${last_imported_n1})")
trace 3 "[test_watch_pub32] index=${index}"
if [ "${index}" != "100" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 8. Call getactivewatchesbyxpub with xpub1: \"${index}\" != \"100\"! ${Color_Off}\n"
return 65
fi
# 9. Call getactivewatchesbyxpub with xpub2, compare index2'th address with address2
trace 2 "\n\n[test_watch_pub32] ${BCyan}9. Call getactivewatchesbyxpub with xpub2...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivewatchesbyxpub/${xpub2})
# trace 3 "[test_watch_pub32] response=${response}"
address=$(echo "${response}" | jq -r ".watches | map(select(.pub32_index == ${index2}))[0] | .address")
trace 3 "[test_watch_pub32] ${index2}th address=${address}"
if [ "${address}" != "${address2}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 9. Call getactivewatchesbyxpub with xpub2: \"${address}\" != \"${address2}\"! ${Color_Off}\n"
return 60
fi
# Check if last_imported_n1 exists in watched list
index=$(echo "${response}" | jq -r "[.watches[].pub32_index] | index(${last_imported_n2})")
trace 3 "[test_watch_pub32] index=${index}"
if [ "${index}" != "100" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 9. Call getactivewatchesbyxpub with xpub2: \"${index}\" != \"100\"! ${Color_Off}\n"
return 65
fi
# 10. Call getactivewatchesbylabel with label1, compare index1'th address with address1
trace 2 "\n\n[test_watch_pub32] ${BCyan}10. Call getactivewatchesbylabel with label1...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivewatchesbylabel/${label1})
# trace 3 "[test_watch_pub32] response=${response}"
address=$(echo "${response}" | jq -r ".watches | map(select(.pub32_index == ${index1}))[0] | .address")
trace 3 "[test_watch_pub32] ${index1}th address=${address}"
if [ "${address}" != "${address1}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 10. Call getactivewatchesbylabel with label1: \"${address}\" != \"${address1}\"! ${Color_Off}\n"
return 80
fi
# 11. Call getactivewatchesbylabel with label2, compare index2'th address with address2
trace 2 "\n\n[test_watch_pub32] ${BCyan}11. Call getactivewatchesbylabel with label2...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivewatchesbylabel/${label2})
# trace 3 "[test_watch_pub32] response=${response}"
address=$(echo "${response}" | jq -r ".watches | map(select(.pub32_index == ${index2}))[0] | .address")
trace 3 "[test_watch_pub32] ${index2}th address=${address}"
if [ "${address}" != "${address2}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 11. Call getactivewatchesbylabel with label2: \"${address}\" != \"${address2}\"! ${Color_Off}\n"
return 80
fi
# 12. Send coins to address1, wait for callback
trace 2 "\n\n[test_watch_pub32] ${BCyan}12. Send coins to address1...${Color_Off}\n"
start_callback_server 1111
# Let's use the bitcoin node directly to better simulate an external spend
txid1=$(docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address1} 0.0001 | tr -d "\r\n")
# txid1=$(exec_in_test_container curl -d '{"address":"'${address1}'","amount":0.001}' proxy:8888/spend | jq -r ".txid")
trace 3 "[test_watch_pub32] txid1=${txid1}"
trace 3 "[test_watch_pub32] Waiting for 0-conf callback on address1..."
wait
# 13. Send coins to address2, wait for callback
trace 2 "\n\n[test_watch_pub32] ${BCyan}13. Send coins to address2...${Color_Off}\n"
start_callback_server 1113
# Let's use the bitcoin node directly to better simulate an external spend
txid2=$(docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address2} 0.0001 | tr -d "\r\n")
# txid2=$(exec_in_test_container curl -d '{"address":"'${address2}'","amount":0.001}' proxy:8888/spend | jq -r ".txid")
trace 3 "[test_watch_pub32] txid2=${txid2}"
trace 3 "[test_watch_pub32] Waiting for 0-conf callback on address2..."
wait
# 14. Call get_txns_by_watchlabel for label1, search sent tx
trace 2 "\n\n[test_watch_pub32] ${BCyan}14. Call get_txns_by_watchlabel for label1...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/get_txns_by_watchlabel/${label1}/1000)
trace 3 "[test_watch_pub32] response=${response}"
txid=$(echo "${response}" | jq -r ".label_txns | map(select(.txid == \"${txid1}\"))[0] | .txid")
trace 3 "[test_watch_pub32] txid searched=${txid}"
if [ "${txid}" != "${txid1}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 14. Call get_txns_by_watchlabel for label1: \"${txid}\" != \"${txid1}\"! ${Color_Off}\n"
return 88
fi
# 15. Call get_txns_by_watchlabel for label2, search sent tx
trace 2 "\n\n[test_watch_pub32] ${BCyan}15. Call get_txns_by_watchlabel for label2...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/get_txns_by_watchlabel/${label2}/1000)
trace 3 "[test_watch_pub32] response=${response}"
txid=$(echo "${response}" | jq -r ".label_txns | map(select(.txid == \"${txid2}\"))[0] | .txid")
trace 3 "[test_watch_pub32] txid searched=${txid}"
if [ "${txid}" != "${txid2}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 15. Call get_txns_by_watchlabel for label2: \"${txid}\" != \"${txid2}\"! ${Color_Off}\n"
return 88
fi
# 16. Call get_unused_addresses_by_watchlabel with label1 /10, check address1 is NOT there
trace 2 "\n\n[test_watch_pub32] ${BCyan}16. Call get_unused_addresses_by_watchlabel with label1 /10...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/get_unused_addresses_by_watchlabel/${label1}/10)
trace 3 "[test_watch_pub32] response=${response}"
address=$(echo "${response}" | jq -r ".label_unused_addresses | map(select(.address == \"${address1}\"))[0] | .address")
trace 3 "[test_watch_pub32] ${index1}th address searched=${address}"
if [ "${address}" = "${address1}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 16. Call get_unused_addresses_by_watchlabel with label1 /10: \"${address}\" = \"${address1}\"! ${Color_Off}\n"
return 87
fi
# 17. Call get_unused_addresses_by_watchlabel with label2 /10, check address2 is NOT there
trace 2 "\n\n[test_watch_pub32] ${BCyan}17. Call get_unused_addresses_by_watchlabel with label2 /10...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/get_unused_addresses_by_watchlabel/${label2}/10)
trace 3 "[test_watch_pub32] response=${response}"
address=$(echo "${response}" | jq -r ".label_unused_addresses | map(select(.address == \"${address2}\"))[0] | .address")
trace 3 "[test_watch_pub32] ${index2}th address searched=${address}"
if [ "${address}" = "${address2}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 17. Call get_unused_addresses_by_watchlabel with label2 /10: \"${address}\" = \"${address2}\"! ${Color_Off}\n"
return 87
fi
# 18. Call getactivexpubwatches
trace 2 "\n\n[test_watch_pub32] ${BCyan}18. getactivexpubwatches...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivexpubwatches)
trace 3 "[test_watch_pub32] response=${response}"
last_imported_n1_x=$(echo "${response}" | jq -r ".watches | map(select(.pub32 == \"${xpub1}\"))[0] | .last_imported_n")
trace 3 "[test_watch_pub32] last_imported_n1_x=${last_imported_n1_x}"
last_imported_n2_x=$(echo "${response}" | jq -r ".watches | map(select(.pub32 == \"${xpub2}\"))[0] | .last_imported_n")
trace 3 "[test_watch_pub32] last_imported_n2_x=${last_imported_n2_x}"
if [ "${last_imported_n1_x}" -ne "$((100+${index1}))" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 18. Call getactivexpubwatches: \"${last_imported_n1_x}\" -ne \"$((100+${index1}))\"! ${Color_Off}\n"
return 90
fi
if [ "${last_imported_n2_x}" -ne "$((100+${index2}))" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 18. Call getactivexpubwatches \"${last_imported_n2_x}\" -ne \"$((100+${index2}))\"! ${Color_Off}\n"
return 95
fi
# 19. Call getactivewatchesbyxpub with xpub1, last n should be 10 more (index1 + 100)
trace 2 "\n\n[test_watch_pub32] ${BCyan}19. Call getactivewatchesbyxpub with xpub1...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivewatchesbyxpub/${xpub1})
# trace 3 "[test_watch_pub32] response=${response}"
index=$(echo "${response}" | jq -r "[.watches[].pub32_index] | index(${last_imported_n1_x})")
trace 3 "[test_watch_pub32] index=${index}"
if [ "${index}" != "${last_imported_n1_x}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 19. Call getactivewatchesbyxpub with xpub1: \"${index}\" != \"${last_imported_n1_x}\"! ${Color_Off}\n"
return 100
fi
# 20. Call getactivewatchesbyxpub with xpub2, last n should be 10 more (index2 + 100)
trace 2 "\n\n[test_watch_pub32] ${BCyan}20. Call getactivewatchesbyxpub with xpub2...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivewatchesbyxpub/${xpub2})
# trace 3 "[test_watch_pub32] response=${response}"
index=$(echo "${response}" | jq -r "[.watches[].pub32_index] | index(${last_imported_n2_x})")
trace 3 "[test_watch_pub32] index=${index}"
if [ "${index}" != "${last_imported_n2_x}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 20. Call getactivewatchesbyxpub with xpub2: \"${index}\" != \"${last_imported_n2_x}\"! ${Color_Off}\n"
return 100
fi
# 21. Call unwatchxpubbyxpub with xpub1
trace 2 "\n\n[test_watch_pub32] ${BCyan}21. unwatchxpubbyxpub with xpub1...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/unwatchxpubbyxpub/${xpub1})
trace 3 "[test_watch_pub32] response=${response}"
data=$(echo "${response}" | jq -re ".pub32")
if [ "${xpub1}" != "${data}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 21. Call unwatchxpubbyxpub with xpub1! ${Color_Off}\n"
return 120
fi
# 22. Call unwatchxpubbylabel with label2
trace 2 "\n\n[test_watch_pub32] ${BCyan}22. unwatchxpubbylabel with label2...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/unwatchxpubbylabel/${label2})
trace 3 "[test_watch_pub32] response=${response}"
data=$(echo "${response}" | jq -re ".label")
if [ "${label2}" != "${data}" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 22. Call unwatchxpubbylabel with label2 failed! ${Color_Off}\n"
return 130
fi
# 23. Call getactivewatchesbyxpub with xpub1, should be empty
trace 2 "\n\n[test_watch_pub32] ${BCyan}23. getactivewatchesbyxpub with xpub1...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivewatchesbyxpub/${xpub1})
trace 3 "[test_watch_pub32] response=${response}"
data=$(echo "${response}" | jq ".watches | length")
if [ "${data}" -ne "0" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 23. getactivewatchesbyxpub xpub1 still watching! ${Color_Off}\n"
return 140
fi
# 24. Call getactivewatchesbyxpub with xpub2, should be empty
trace 2 "\n\n[test_watch_pub32] ${BCyan}24. getactivewatchesbyxpub with xpub2...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivewatchesbyxpub/${xpub2})
trace 3 "[test_watch_pub32] response=${response}"
data=$(echo "${response}" | jq ".watches | length")
if [ "${data}" -ne "0" ]; then
trace 1 "\n\n[test_watch_pub32] ${On_Red}${BBlack} 24. getactivewatchesbyxpub xpub2 still watching! ${Color_Off}\n"
return 150
fi
trace 1 "\n\n[test_watch_pub32] ${On_IGreen}${BBlack} ALL GOOD! Yayyyy! ${Color_Off}\n"
}
start_callback_server() {
trace 1 "\n\n[start_callback_server] ${BCyan}Let's start a callback server!...${Color_Off}\n"
port=${1:-${callbackserverport}}
docker run --rm -t --name tests-watch-pub32-cb --network=cyphernodenet alpine sh -c "nc -vlp${port} -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; echo -en \"\\033[40m\\033[0;37m\" >&2 ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo -e \"\033[0m\" >&2'" &
# docker run --rm -it --name tests-watch-pub32-cb --network=cyphernodenet alpine sh -c "nc -vlkp1111 -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; echo -en \"\\033[40m\\033[0;37m\" >&2 ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo -e \"\033[0m\" >&2'"
}
TRACING=3
stop_test_container
start_test_container
callbackserverport="1111"
callbackservername="tests-watch-pub32-cb"
trace 1 "\n\n[test_watch_pub32] ${BCyan}Installing needed packages...${Color_Off}\n"
exec_in_test_container apk add --update curl
test_watch_pub32
trace 1 "\n\n[test_watch_pub32] ${BCyan}Tearing down...${Color_Off}\n"
wait
stop_test_container
trace 1 "\n\n[test_watch_pub32] ${BCyan}See ya!${Color_Off}\n"

View File

@@ -2,8 +2,28 @@
# . /mine.sh # . /mine.sh
# This should be run in regtest
# docker run -it --rm -it --name cn-tests --network=cyphernodenet -v "$PWD/mine.sh:/mine.sh" -v "$PWD/tests.sh:/tests.sh" -v "$PWD/tests-cb.sh:/tests-cb.sh" alpine /tests.sh # docker run -it --rm -it --name cn-tests --network=cyphernodenet -v "$PWD/mine.sh:/mine.sh" -v "$PWD/tests.sh:/tests.sh" -v "$PWD/tests-cb.sh:/tests-cb.sh" alpine /tests.sh
# This will test:
#
# - getbestblockhash
# - getbestblockinfo
# - getblockinfo
# - getnewaddress
# - getbalance
# - watch and callbacks
# - getactivewatches
# - unwatch
# - deriveindex
# - derivepubpath
# - spend
# - gettransaction
# - ln_getinfo
# - ln_newaddr
#
#
tests() tests()
{ {
@@ -108,12 +128,12 @@ tests()
fi fi
local imported=$(echo "${response}" | jq ".imported" | tr -d '\"') local imported=$(echo "${response}" | jq ".imported" | tr -d '\"')
echo "imported=${imported}" echo "imported=${imported}"
if [ "${imported}" != "1" ]; then if [ "${imported}" != "true" ]; then
exit 30 exit 30
fi fi
local inserted=$(echo "${response}" | jq ".inserted" | tr -d '\"') local inserted=$(echo "${response}" | jq ".inserted" | tr -d '\"')
echo "inserted=${inserted}" echo "inserted=${inserted}"
if [ "${inserted}" != "1" ]; then if [ "${inserted}" != "true" ]; then
exit 40 exit 40
fi fi
local unconfirmedCallbackURL=$(echo "${response}" | jq ".unconfirmedCallbackURL" | tr -d '\"') local unconfirmedCallbackURL=$(echo "${response}" | jq ".unconfirmedCallbackURL" | tr -d '\"')

View File

@@ -8,77 +8,79 @@
. ./responsetoclient.sh . ./responsetoclient.sh
. ./trace.sh . ./trace.sh
main() main() {
{ trace "Entering main()..."
trace "Entering main()..."
local step=0 local step=0
local cmd local cmd
local http_method local http_method
local line local line
local content_length local content_length
local response local response
local returncode local returncode
while read line; do while read line; do
line=$(echo "${line}" | tr -d '\r\n') line=$(echo "${line}" | tr -d '\r\n')
trace "[main] line=${line}" trace "[main] line=${line}"
if [ "${cmd}" = "" ]; then if [ "${cmd}" = "" ]; then
# First line! # First line!
# Looking for something like: # Looking for something like:
# GET /cmd/params HTTP/1.1 # GET /cmd/params HTTP/1.1
# POST / HTTP/1.1 # POST / HTTP/1.1
cmd=$(echo "${line}" | cut -d '/' -f2 | cut -d ' ' -f1) cmd=$(echo "${line}" | cut -d '/' -f2 | cut -d ' ' -f1)
trace "[main] cmd=${cmd}" trace "[main] cmd=${cmd}"
http_method=$(echo "${line}" | cut -d ' ' -f1) http_method=$(echo "${line}" | cut -d ' ' -f1)
trace "[main] http_method=${http_method}" trace "[main] http_method=${http_method}"
if [ "${http_method}" = "GET" ]; then if [ "${http_method}" = "GET" ]; then
step=1 step=1
fi fi
fi fi
if [ "${line}" = "" ]; then if [ "${line}" = "" ]; then
trace "[main] empty line" trace "[main] empty line"
if [ ${step} -eq 1 ]; then if [ ${step} -eq 1 ]; then
trace "[main] body part finished, disconnecting" trace "[main] body part finished, disconnecting"
break break
else else
trace "[main] headers part finished, body incoming" trace "[main] headers part finished, body incoming"
step=1 step=1
fi fi
fi fi
# line=content-length: 406 # line=content-length: 406
case "${line}" in *[cC][oO][nN][tT][eE][nN][tT]-[lL][eE][nN][gG][tT][hH]*) case "${line}" in *[cC][oO][nN][tT][eE][nN][tT]-[lL][eE][nN][gG][tT][hH]*)
content_length=$(echo "${line}" | cut -d ':' -f2) content_length=$(echo "${line}" | cut -d ':' -f2)
trace "[main] content_length=${content_length}"; trace "[main] content_length=${content_length}";
;; ;;
esac esac
if [ ${step} -eq 1 ]; then if [ ${step} -eq 1 ]; then
trace "[main] step=${step}" trace "[main] step=${step}"
if [ "${http_method}" = "POST" ]; then if [ "${http_method}" = "POST" ]; then
read -n ${content_length} line read -n ${content_length} line
trace "[main] line=${line}" trace "[main] line=${line}"
fi fi
case "${cmd}" in case "${cmd}" in
derive) derive)
# POST http://192.168.111.152:7777/derive # POST http://192.168.111.152:7777/derive
# BODY {"pub32":"tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk","path":"0/25-30"} # BODY {"pub32":"tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk","path":"0/25-30"}
# BODY {"pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/25-30"} # BODY {"pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/25-30"}
# BODY {"pub32":"vpub5SLqN2bLY4WeZF3kL4VqiWF1itbf3A6oRrq9aPf16AZMVWYCuN9TxpAZwCzVgW94TNzZPNc9XAHD4As6pdnExBtCDGYRmNJrcJ4eV9hNqcv","path":"0/25-30"} # BODY {"pub32":"vpub5SLqN2bLY4WeZF3kL4VqiWF1itbf3A6oRrq9aPf16AZMVWYCuN9TxpAZwCzVgW94TNzZPNc9XAHD4As6pdnExBtCDGYRmNJrcJ4eV9hNqcv","path":"0/25-30"}
response=$(derive "${line}") response=$(derive "${line}")
response_to_client "${response}" ${?} returncode=$?
break ;;
;; *)
esac response='{"error": {"code": -32601, "message": "Method not found"}, "id": "1"}'
break returncode=1
fi ;;
done esac
trace "[main] exiting" response=$(echo "${response}" | jq -Mc)
return 0 response_to_client "${response}" ${returncode}
break
fi
done
trace "[main] exiting"
return 0
} }
export TRACING
main main
exit $? exit $?

View File

@@ -1,6 +1,3 @@
#!/bin/sh #!/bin/sh
export TRACING exec nc -vlkp${PYCOIN_LISTENING_PORT} -e ./requesthandler.sh
export PYCOIN_LISTENING_PORT
nc -vlkp${PYCOIN_LISTENING_PORT} -e ./requesthandler.sh