Merge branch 'dev' into features/env-proxycron

This commit is contained in:
kexkey
2022-03-28 12:12:05 -04:00
committed by GitHub
20 changed files with 665 additions and 213 deletions

View File

@@ -17,7 +17,7 @@
# action_installation_info=stats
# action_getmempoolinfo=stats
# action_getblockhash=stats
#
#
# # Watcher can do what the stats can do, plus:
# action_watch=watcher
# action_unwatch=watcher
@@ -47,7 +47,7 @@
# action_ln_listpays=watcher
# action_ln_paystatus=watcher
# action_bitcoin_estimatesmartfee=watcher
#
#
# # Spender can do what the watcher can do, plus:
# action_get_txns_spending=spender
# action_getbalance=spender
@@ -78,10 +78,10 @@
# action_listbatchers=spender
# action_getbatcher=spender
# action_getbatchdetails=spender
#
#
# # Admin can do what the spender can do, plus:
#
#
#
#
# # Should be called from inside the Docker network only:
# action_conf=internal
# action_newblock=internal
@@ -558,7 +558,7 @@ exec_in_test_container_leave_lf apk add --update curl coreutils openssl
# Copy keys to test container
trace 1 "\n\n[test_gatekeeper] ${BCyan}Copying keys and certs to test container...${Color_Off}\n"
gatekeeperid=$(docker ps -q -f "name=cyphernode_gatekeeper")
gatekeeperid=$(docker ps -q -f "name=cyphernode.gatekeeper")
testid=$(docker ps -q -f "name=tests-gatekeeper")
docker cp ${gatekeeperid}:/etc/nginx/conf.d/keys.properties - | docker cp - ${testid}:/
docker cp ${gatekeeperid}:/etc/ssl/certs/cert.pem - | docker cp - ${testid}:/

View File

@@ -2,15 +2,15 @@
TRACING=1
# CYPHERNODE VERSION "v0.7.0-dev"
CONF_VERSION="v0.7.0-dev-local"
GATEKEEPER_VERSION="v0.7.0-dev-local"
TOR_VERSION="v0.7.0-dev-local"
PROXY_VERSION="v0.7.0-dev-local"
NOTIFIER_VERSION="v0.7.0-dev-local"
PROXYCRON_VERSION="v0.7.0-dev-local"
OTSCLIENT_VERSION="v0.7.0-dev-local"
PYCOIN_VERSION="v0.7.0-dev-local"
# CYPHERNODE VERSION "v0.9.0-dev"
CONF_VERSION="v0.9.0-dev-local"
GATEKEEPER_VERSION="v0.9.0-dev-local"
TOR_VERSION="v0.9.0-dev-local"
PROXY_VERSION="v0.9.0-dev-local"
NOTIFIER_VERSION="v0.9.0-dev-local"
PROXYCRON_VERSION="v0.9.0-dev-local"
OTSCLIENT_VERSION="v0.9.0-dev-local"
PYCOIN_VERSION="v0.9.0-dev-local"
trace()
{

View File

@@ -485,11 +485,11 @@
"dev": true
},
"ajv": {
"version": "6.10.0",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.10.0.tgz",
"integrity": "sha512-nffhOpkymDECQyR0mnsUtoCE8RlX38G0rYP+wgLWFyZuUyuuojSSvi/+euOiQBIn63whYwYVIIH1TvE3tu4OEg==",
"version": "6.12.3",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.3.tgz",
"integrity": "sha512-4K0cK3L1hsqk9xIb2z9vs/XU+PGJZ9PNpJRDS9YLzmNdX6jmVPfamLvTJr0aDAusnHyCHO6MjzlkAsgtqp9teA==",
"requires": {
"fast-deep-equal": "^2.0.1",
"fast-deep-equal": "^3.1.1",
"fast-json-stable-stringify": "^2.0.0",
"json-schema-traverse": "^0.4.1",
"uri-js": "^4.2.2"
@@ -873,6 +873,13 @@
"resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz",
"integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA=="
},
"chownr": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
"integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==",
"dev": true,
"optional": true
},
"ci-info": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz",
@@ -1702,9 +1709,9 @@
"dev": true
},
"fast-deep-equal": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz",
"integrity": "sha1-ewUhjd+WZ79/Nwv3/bLLFf3Qqkk="
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
},
"fast-json-stable-stringify": {
"version": "2.0.0",
@@ -1824,6 +1831,16 @@
"map-cache": "^0.2.2"
}
},
"fs-minipass": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-1.2.7.tgz",
"integrity": "sha512-GWSSJGFy4e9GUeCcbIkED+bgAoFyj7XF1mV8rma3QW4NIqX9Kyx79N/PF61H5udOV3aY1IaMLs6pGbH71nlCTA==",
"dev": true,
"optional": true,
"requires": {
"minipass": "^2.6.0"
}
},
"fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
@@ -1886,9 +1903,7 @@
},
"chownr": {
"version": "1.1.1",
"bundled": true,
"dev": true,
"optional": true
"bundled": true
},
"code-point-at": {
"version": "1.1.0",
@@ -1944,8 +1959,6 @@
"fs-minipass": {
"version": "1.2.5",
"bundled": true,
"dev": true,
"optional": true,
"requires": {
"minipass": "^2.2.1"
}
@@ -2026,12 +2039,6 @@
"dev": true,
"optional": true
},
"ini": {
"version": "1.3.5",
"bundled": true,
"dev": true,
"optional": true
},
"is-fullwidth-code-point": {
"version": "1.0.0",
"bundled": true,
@@ -2065,7 +2072,6 @@
"minipass": {
"version": "2.3.5",
"bundled": true,
"dev": true,
"optional": true,
"requires": {
"safe-buffer": "^5.1.2",
@@ -2075,8 +2081,6 @@
"minizlib": {
"version": "1.2.1",
"bundled": true,
"dev": true,
"optional": true,
"requires": {
"minipass": "^2.2.1"
}
@@ -2265,7 +2269,6 @@
"safe-buffer": {
"version": "5.1.2",
"bundled": true,
"dev": true,
"optional": true
},
"safer-buffer": {
@@ -2333,21 +2336,6 @@
"dev": true,
"optional": true
},
"tar": {
"version": "4.4.8",
"bundled": true,
"dev": true,
"optional": true,
"requires": {
"chownr": "^1.1.1",
"fs-minipass": "^1.2.5",
"minipass": "^2.3.4",
"minizlib": "^1.1.1",
"mkdirp": "^0.5.0",
"safe-buffer": "^5.1.2",
"yallist": "^3.0.2"
}
},
"util-deprecate": {
"version": "1.0.2",
"bundled": true,
@@ -2372,7 +2360,6 @@
"yallist": {
"version": "3.0.3",
"bundled": true,
"dev": true,
"optional": true
}
}
@@ -2628,6 +2615,13 @@
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
"integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
},
"ini": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
"integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==",
"dev": true,
"optional": true
},
"inquirer": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/inquirer/-/inquirer-6.3.1.tgz",
@@ -3729,6 +3723,27 @@
"integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=",
"dev": true
},
"minipass": {
"version": "2.9.0",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-2.9.0.tgz",
"integrity": "sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg==",
"dev": true,
"optional": true,
"requires": {
"safe-buffer": "^5.1.2",
"yallist": "^3.0.0"
}
},
"minizlib": {
"version": "1.3.3",
"resolved": "https://registry.npmjs.org/minizlib/-/minizlib-1.3.3.tgz",
"integrity": "sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q==",
"dev": true,
"optional": true,
"requires": {
"minipass": "^2.9.0"
}
},
"mixin-deep": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
@@ -5019,6 +5034,48 @@
}
}
},
"tar": {
"version": "4.4.19",
"resolved": "https://registry.npmjs.org/tar/-/tar-4.4.19.tgz",
"integrity": "sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA==",
"dev": true,
"optional": true,
"requires": {
"chownr": "^1.1.4",
"fs-minipass": "^1.2.7",
"minipass": "^2.9.0",
"minizlib": "^1.3.3",
"mkdirp": "^0.5.5",
"safe-buffer": "^5.2.1",
"yallist": "^3.1.1"
},
"dependencies": {
"minimist": {
"version": "1.2.5",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
"integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==",
"dev": true,
"optional": true
},
"mkdirp": {
"version": "0.5.5",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
"integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
"dev": true,
"optional": true,
"requires": {
"minimist": "^1.2.5"
}
},
"safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"dev": true,
"optional": true
}
}
},
"test-exclude": {
"version": "5.2.3",
"resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-5.2.3.tgz",
@@ -5251,9 +5308,9 @@
}
},
"uri-js": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz",
"integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==",
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
"integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
"requires": {
"punycode": "^2.1.0"
}
@@ -5457,6 +5514,13 @@
"integrity": "sha512-wNcy4NvjMYL8gogWWYAO7ZFWFfHcbdbE57tZO8e4cbpj8tfUcwrwqSl3ad8HxpYWCdXcJUCeKKZS62Av1affwQ==",
"dev": true
},
"yallist": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
"integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
"dev": true,
"optional": true
},
"yargs": {
"version": "12.0.5",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-12.0.5.tgz",

View File

@@ -16,7 +16,7 @@
],
"dependencies": {
"@rauschma/stringio": "^1.4.0",
"ajv": "^6.10.0",
"ajv": "^6.12.3",
"chalk": "^2.4.2",
"coinstring": "^2.3.0",
"colorsys": "^1.0.22",

View File

@@ -1,6 +1,8 @@
#!/bin/bash
. ./.cyphernodeconf/installer/config.sh
current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)"
. ${current_path}/.cyphernodeconf/installer/config.sh
# be aware that randomly downloaded cyphernode apps will have access to
# your configuration and filesystem.
@@ -14,7 +16,7 @@ start_apps() {
local APP_START_SCRIPT_PATH
local APP_ID
for i in $current_path/apps/*
for i in ${current_path}/apps/*
do
APP_SCRIPT_PATH=$(echo $i)
if [ -d "$APP_SCRIPT_PATH" ] && [ ! -f "$APP_SCRIPT_PATH/ignoreThisApp" ]; then
@@ -39,7 +41,7 @@ start_apps() {
if [ "$DOCKER_MODE" = "swarm" ]; then
docker stack deploy -c $APP_SCRIPT_PATH/docker-compose.yaml $APP_ID
elif [ "$DOCKER_MODE" = "compose" ]; then
docker-compose -f $APP_SCRIPT_PATH/docker-compose.yaml up -d --remove-orphans
docker-compose -p $APP_ID -f $APP_SCRIPT_PATH/docker-compose.yaml up -d --remove-orphans
fi
fi
fi
@@ -58,15 +60,13 @@ fi
export USER=$(id -u <%= default_username %>):$(id -g <%= default_username %>)
<% } %>
current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)"
# Let's make sure the container readyness files are deleted before starting the stack
docker run --rm -v cyphernode_container_monitor:/container_monitor alpine sh -c 'rm -f /container_monitor/*_ready'
<% if (docker_mode == 'swarm') { %>
docker stack deploy -c $current_path/docker-compose.yaml cyphernode
<% } else if(docker_mode == 'compose') { %>
docker-compose -f $current_path/docker-compose.yaml up -d --remove-orphans
docker-compose -p cyphernode -f $current_path/docker-compose.yaml up -d --remove-orphans
<% } %>
start_apps
@@ -80,4 +80,4 @@ else
printf " It's pretty fast!\r\n"
fi
. ./testdeployment.sh
. ${current_path}/testdeployment.sh

View File

@@ -2,7 +2,6 @@
current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)"
# be aware that randomly downloaded cyphernode apps will have access to
# your configuration and filesystem.
# !!!!!!!!! DO NOT INCLUDE APPS WITHOUT REVIEW !!!!!!!!!!
@@ -15,7 +14,7 @@ stop_apps() {
local APP_START_SCRIPT_PATH
local APP_ID
for i in $current_path/apps/*
for i in ${current_path}/apps/*
do
APP_SCRIPT_PATH=$(echo $i)
if [ -d "$APP_SCRIPT_PATH" ] && [ ! -f "$APP_SCRIPT_PATH/ignoreThisApp" ]; then
@@ -38,7 +37,7 @@ stop_apps() {
if [ "$DOCKER_MODE" = "swarm" ]; then
docker stack rm $APP_ID
elif [ "$DOCKER_MODE" = "compose" ]; then
docker-compose -f $APP_SCRIPT_PATH/docker-compose.yaml down
docker-compose -p $APP_ID -f $APP_SCRIPT_PATH/docker-compose.yaml down
fi
fi
@@ -46,7 +45,7 @@ stop_apps() {
done
}
. ./.cyphernodeconf/installer/config.sh
. ${current_path}/.cyphernodeconf/installer/config.sh
stop_apps
export USER=$(id -u):$(id -g)
@@ -54,5 +53,5 @@ export USER=$(id -u):$(id -g)
<% if (docker_mode == 'swarm') { %>
docker stack rm cyphernode
<% } else if(docker_mode == 'compose') { %>
docker-compose -f $current_path/docker-compose.yaml down
docker-compose -p cyphernode -f ${current_path}/docker-compose.yaml down
<% } %>

View File

@@ -1,6 +1,8 @@
#!/bin/bash
. ./.cyphernodeconf/installer/config.sh
current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)"
. ${current_path}/.cyphernodeconf/installer/config.sh
# be aware that randomly downloaded cyphernode apps will have access to
# your configuration and filesystem.
@@ -17,7 +19,7 @@ test_apps() {
local TRAEFIK_HTTP_PORT=<%= traefik_http_port %>
local TRAEFIK_HTTPS_PORT=<%= traefik_https_port %>
for i in $current_path/apps/*
for i in ${current_path}/apps/*
do
APP_SCRIPT_PATH=$(echo $i)
if [ -d "$APP_SCRIPT_PATH" ]; then
@@ -37,7 +39,7 @@ test_apps() {
fi
fi
done
return $returncode
return ${returncode}
}
<% if (run_as_different_user) { %>
@@ -51,8 +53,6 @@ fi
export USER=$(id -u <%= default_username %>):$(id -g <%= default_username %>)
<% } %>
current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)"
# Will test if Cyphernode is fully up and running...
docker run --rm -it -v $current_path/testfeatures.sh:/testfeatures.sh \
-v <%= gatekeeper_datapath %>:/gatekeeper \
@@ -60,9 +60,9 @@ docker run --rm -it -v $current_path/testfeatures.sh:/testfeatures.sh \
-v cyphernode_container_monitor:/container_monitor:ro \
--network cyphernodenet eclipse-mosquitto:<%= mosquitto_version %> /testfeatures.sh
if [ -f $current_path/exitStatus.sh ]; then
. $current_path/exitStatus.sh
rm -f $current_path/exitStatus.sh
if [ -f ${current_path}/exitStatus.sh ]; then
. ${current_path}/exitStatus.sh
rm -f ${current_path}/exitStatus.sh
fi
if [ "$EXIT_STATUS" -ne "0" ]; then

View File

@@ -84,7 +84,7 @@ checkpostgres() {
echo -en "\r\n\e[1;36mTesting Postgres... " > /dev/console
local rc
pg_isready -h postgres -U cyphernode
pg_isready -h postgres -U cyphernode > /dev/null
[ "${?}" -ne "0" ] && return 105
echo -e "\e[1;36mPostgres rocks!" > /dev/console
@@ -109,7 +109,7 @@ checknotifier() {
local response
local returncode
nc -vlp1111 -e sh -c 'echo -en "HTTP/1.1 200 OK\\r\\n\\r\\n" ; date >&2 ; timeout 1 tee /dev/tty | cat ; ' &
nc -lp1111 -e sh -c 'echo -en "HTTP/1.1 200 OK\\r\\n\\r\\n" ; timeout 1 tee /dev/null ;' > /dev/null &
response=$(mosquitto_rr -h broker -W 15 -t notifier -e "response/$$" -m "{\"response-topic\":\"response/$$\",\"cmd\":\"web\",\"url\":\"http://$(hostname):1111/notifiertest\",\"tor\":false}")
returncode=$?
[ "${returncode}" -ne "0" ] && return 115

31
dist/setup.sh vendored
View File

@@ -127,7 +127,7 @@ sudo_if_required() {
}
modify_permissions() {
local directories=("$current_path/apps" "$current_path/.env" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local directories=("$current_path/.env" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
for d in "${directories[@]}"
do
if [[ -e $d ]]; then
@@ -139,7 +139,7 @@ modify_permissions() {
}
modify_owner() {
local directories=("$current_path/apps" "$current_path/.env" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local directories=("$current_path/.env" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER)
for d in "${directories[@]}"
do
@@ -676,7 +676,7 @@ install_docker() {
check_directory_owner() {
# if one directory does not have access rights for $RUN_AS_USER, we echo 1, else we echo 0
local directories=("$current_path/apps" "$current_path/.env" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local directories=("$current_path/.env" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local status=0
for d in "${directories[@]}"
do
@@ -780,7 +780,7 @@ sanity_checks_pre_install() {
if [[ $sudo_reason == 'directories' ]]; then
echo " or check your data volumes if they have the right owner."
echo " The owner of the following folders should be '$RUN_AS_USER':"
local directories=("$current_path/apps" "$current_path/.env" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local directories=("$current_path/.env" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local status=0
for d in "${directories[@]}"
do
@@ -804,8 +804,7 @@ install_apps() {
local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER)
local apps_repo="https://github.com/SatoshiPortal/cypherapps.git"
echo " clone $apps_repo into apps"
docker run --rm -u $user -v "$current_path":/git --entrypoint git cyphernode/cyphernodeconf:$CONF_VERSION clone --single-branch -b ${CYPHERAPPS_VERSION} "$apps_repo" /git/apps > /dev/null 2>&1
next
docker run --rm -v "$current_path":/git --entrypoint sh cyphernode/cyphernodeconf:$CONF_VERSION -c "git clone --single-branch -b ${CYPHERAPPS_VERSION} \"$apps_repo\" /git/apps > /dev/null 2>&1 ; chown -R $user /git/apps"
fi
if [[ $FEATURE_LIGHTNING == true ]]; then
@@ -867,16 +866,16 @@ ALWAYSYES=0
SUDO_REQUIRED=0
AUTOSTART=0
# CYPHERNODE VERSION "v0.7.0-dev"
SETUP_VERSION="v0.7.0-dev"
CONF_VERSION="v0.7.0-dev"
GATEKEEPER_VERSION="v0.7.0-dev"
TOR_VERSION="v0.7.0-dev"
PROXY_VERSION="v0.7.0-dev"
NOTIFIER_VERSION="v0.7.0-dev"
PROXYCRON_VERSION="v0.7.0-dev"
OTSCLIENT_VERSION="v0.7.0-dev"
PYCOIN_VERSION="v0.7.0-dev"
# CYPHERNODE VERSION "v0.9.0-dev"
SETUP_VERSION="v0.9.0-dev"
CONF_VERSION="v0.9.0-dev"
GATEKEEPER_VERSION="v0.9.0-dev"
TOR_VERSION="v0.9.0-dev"
PROXY_VERSION="v0.9.0-dev"
NOTIFIER_VERSION="v0.9.0-dev"
PROXYCRON_VERSION="v0.9.0-dev"
OTSCLIENT_VERSION="v0.9.0-dev"
PYCOIN_VERSION="v0.9.0-dev"
CYPHERAPPS_VERSION="dev"
BITCOIN_VERSION="v22.0"
LIGHTNING_VERSION="v0.10.2"

View File

@@ -0,0 +1,76 @@
# Cyphernode v0.8.0
Say hello to PostgreSQL! We moved from SQLite3 to PostgreSQL to take advantage of its enterprise-class features. Here are some of our motivations:
- Better overall performance
- Easier to implement replicas / distributed redundancy
- Running in an independent container: can be used by other containers as well
- More/better administration tools
- Easier to configure C-lightning to use PostgreSQL
- Future development
All of that may also be possible with SQLite3, but with a lot more work.
If you have an existing Cyphernode installation with existing data, Cyphernode will take care of the migration: we built all the required ETL scripts that will hopefully flawlessly move your current instance to the new DBMS.
There are also several improvements and new features in this release. Thanks go to [@pablof7z](https://twitter.com/pablof7z) @phillamy and @schulterklopfer for their valuable contributions, feedbacks and inputs!
## New features
- PostgreSQL: migrating from SQLite3 to PostgreSQL
- Automatic migration from current SQLite3 to new PostgreSQL (ETL)
- New Indexes
- Separate container
- Support for labels when:
- watching addresses
- getting new addresses
- New `ln_paystatus` endpoint
- New `validateaddress` endpoint
- New `deriveindex_bitcoind` endpoint (20x faster than Pycoin), also supports ypub/upub and zpub/vpub notations!
- New `derivepubpath_bitcoind` (20x faster than Pycoin), also supports ypub/upub and zpub/vpub notations!
## Fixes and improvements
- Refactoring of _manage_missed_conf_ and _confirmation management_
- `ln_pay` now first pays using `legacy_pay` (MPP disabled) and on failure (for routing reasons), retry with the `pay` plugin (MPP enabled by default)
- Small fixes in `ln_pay`
- Small fixes in `ln_delinvoice`
- Small fixes in `ln_connectfund`
- Small fixes in LN webhooks
- `ln_listpays` can now take a `bolt11` string argument
- Sometimes, Gatekeeper was not compliant to JWT: now it is but still compatible with previous buggy version
- Fixed CN client examples
- Gatekeeper now returns _401 Unauthorized_ on authentication error and _403 Forbidden_ on authorization error
- Gatekeeper now waits for the Proxy to be ready before listening to requests
- More graceful shutdown on certain containers
- Docker now uses the `helloworld` endpoint to check Proxy's health
- Better way to determine slow machine during setup
- Better tests when starting up
- Fixed a bug when running Cyphernode as current user instead of dedicated user
- When trying to add a batcher that already exists (same `label`), it will now modify existing one
- Got rid of the full rawtx from the database! Let's use Bitcoin Core if needed
- `helloworld` endpoint now returns a JSON compliant response
- Added and improved tests:
- api_auth_docker/tests/test-gatekeeper.sh
- proxy_docker/app/tests/test-manage-missed.sh
- proxy_docker/app/tests/test-batching.sh
- proxy_docker/app/tests/test-derive.sh
- proxy_docker/app/tests/test-watchpub32.sh
- proxy_docker/app/tests/test-watches.sh
- Fixed typos and improved clarity in messages
- Bump ws from 5.2.2 to 5.2.3 in /cyphernodeconf_docker
- Bump path-parse from 1.0.6 to 1.0.7 in /cyphernodeconf_docker
- Bump tmpl from 1.0.4 to 1.0.5 in /cyphernodeconf_docker
- Bump validator from 10.11.0 to 13.7.0 in /cyphernodeconf_docker
- Code cleaning
## Upgrades
- C-lightning from v0.10.0 to v0.10.2
- Bitcoin Core from v0.21.1 to v22.0
## Cypherapps
- Batcher from v0.1.2 to v0.2.0
- Spark Wallet from v0.2.17 to v0.3.0
- Specter from v1.3.1 to v1.7.1

View File

@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash
# Must be logged to docker hub:
# docker login -u cyphernode
@@ -59,18 +59,37 @@ x86_docker="amd64"
arm_docker="arm"
aarch64_docker="arm64"
v1="v0"
v2="v0.8"
v3="v0.8.0"
# Build amd64 and arm64 first, building for arm will trigger the manifest creation and push on hub
#arch_docker=${arm_docker}
#arch_docker=${aarch64_docker}
arch_docker=${x86_docker}
echo -e "\nBuild ${v3} for:\n"
echo "1) AMD 64 bits (Most PCs)"
echo "2) ARM 64 bits (RPi4, Mac M1)"
echo "3) ARM 32 bits (RPi2-3)"
echo -en "\nYour choice (1, 2, 3): "
read arch_input
v1="v0"
v2="v0.7"
v3="v0.7.0"
case "${arch_input}" in
1)
arch_docker=${x86_docker}
;;
2)
arch_docker=${aarch64_docker}
;;
3)
arch_docker=${arm_docker}
;;
*)
echo "Not a valid choice."
exit 1
;;
esac
echo "\nBuilding Cyphernode Core containers\n"
echo "arch_docker=$arch_docker\n"
echo -e "\nBuilding Cyphernode Core containers\n"
echo -e "arch_docker=$arch_docker\n"
image "gatekeeper" "api_auth_docker/" ${arch_docker} \
&& image "proxycron" "cron_docker/" ${arch_docker} \

View File

@@ -303,8 +303,8 @@ ln_connectfund() {
ln_pay() {
trace "Entering ln_pay()..."
# Let's try to pay (MPP disabled) for 30 seconds.
# If this doesn't work for a routing reason, let's try to pay (MPP enabled) for 30 seconds.
# Let's try to pay (MPP enabled) for 85 seconds.
# If this doesn't work for a routing reason, let's try to legacypay (MPP disabled) for 85 seconds.
# If this doesn't work, return an error.
local result
@@ -347,14 +347,39 @@ ln_pay() {
trace "[ln_pay] Expected description <> Invoice description"
returncode=1
else
# Amount and description are as expected (or empty description), let's pay!
trace "[ln_pay] Amount and description are as expected, let's try to pay without MPP!"
# Amount and description are as expected (or empty description), let's see if already paid
trace "[ln_pay] Amount and description are as expected, let's see if already paid"
result=$(ln_listpays "${bolt11}")
returncode=$?
trace_rc ${returncode}
trace "[ln_pay] result=${result}"
local complete pending failed
complete=$(echo "${result}" | jq -er '.pays | map(select(.status == "complete")) | last')
trace "[ln_pay] complete=${complete}"
pending=$(echo "${result}" | jq -er '.pays | map(select(.status == "pending")) | last')
trace "[ln_pay] pending=${pending}"
failed=$(echo "${result}" | jq -er '.pays | map(select(.status == "failed")) | last')
trace "[ln_pay] failed=${failed}"
if [ "${complete}" != "null" ]; then
trace "[ln_pay] responding complete"
echo "${complete}"
return 0
fi
if [ "${pending}" != "null" ]; then
trace "[ln_pay] responding pending"
echo "${pending}"
return 1
fi
# Payment not previously done, let's pay!
trace "[ln_pay] Payment not previously done, let's try to pay with MPP!"
if [ "${invoice_msatoshi}" = "null" ]; then
# "any" amount on the invoice, we force paying the expected_msatoshi provided to ln_pay by the user
result=$(ln_call_lightningd legacypay -k bolt11=${bolt11} msatoshi=${expected_msatoshi} retry_for=30)
result=$(ln_call_lightningd pay -k bolt11=${bolt11} msatoshi=${expected_msatoshi} retry_for=85)
else
result=$(ln_call_lightningd legacypay -k bolt11=${bolt11} retry_for=30)
result=$(ln_call_lightningd pay -k bolt11=${bolt11} retry_for=85)
fi
returncode=$?
trace_rc ${returncode}
@@ -363,34 +388,31 @@ ln_pay() {
# Successful payment example:
#
# {
# "id": 16,
# "payment_hash": "f00877afeec4d771c2db68af80b8afa5dad3b495dad498828327e484c93f67d5",
# "destination": "021ec6ccede19caa0bc7d7f9699c73e63cb2b79a4877529a60d7ac6a4ebb03487a",
# "msatoshi": 1234,
# "amount_msat": "1234msat",
# "msatoshi_sent": 1235,
# "amount_sent_msat": "1235msat",
# "created_at": 1633373202,
# "status": "complete",
# "payment_preimage": "373cd9a0f83426506f1535f6ca1f08f279f0bd82d257fd3fc8cd49fbc25750f2",
# "bolt11": "lntb1ps4kjlrpp57qy80tlwcnthrskmdzhcpw905hdd8dy4mt2f3q5ryljgfjflvl2sdq9u2d2zxqr3jscqp2sp5c2qykk0pdaeh2yrvn4cpkchsnyxwjnaptujggsd6ldqjfd8jhh3qrzjqwyx8nu2hygyvgc02cwdtvuxe0lcxz06qt3lpsldzcdr46my5epmj85hhvqqqtsqqqqqqqlgqqqqqqgq9q9qyyssqpnwtw6mzxu8pr5mrm8677ke8p5fjcu6dyrrvuy8j5f5p8mzv2phr2y0yx3z7mvgf5uqzzdytegg04u7hcu8ma50692cg69cdtsgw9hsph0xeha"
# }
# Failure response examples:
#
# {
# "code": -32602,
# "message": "03c05f973d9c7218e7aec4f52c2c8ab395f51f41d627c398237b5ff056f46faf09: unknown destination node_id (no public channels?)"
# "destination": "029b26c73b2c19ec9bdddeeec97c313670c96b6414ceacae0fb1b3502e490a6cbb",
# "payment_hash": "0d1e62210e7af9a4146258652fd4cfecd2638086850583e994a103884e2b4e78",
# "created_at": 1631200188.550,
# "parts": 1,
# "msatoshi": 530114,
# "amount_msat": "530114msat",
# "msatoshi_sent": 530114,
# "amount_sent_msat": "530114msat",
# "payment_preimage": "2672c5fa280367222bf30db82566b78909927a67d5756d5ae0227b2ff8f3a907",
# "status": "complete"
# }
#
# {
# "code": 206,
# "message": "Route wanted fee of 16101625msat"
# }
#
# Failed payment example:
# {
# "code": 207,
# "message": "Invoice expired"
# "code": 210,
# "message": "Destination 029b26c73b2c19ec9bdddeeec97c313670c96b6414ceacae0fb1b3502e490a6cbb is not reachable directly and all routehints were unusable.",
# "attempts": [
# {
# "status": "failed",
# "failreason": "Destination 029b26c73b2c19ec9bdddeeec97c313670c96b6414ceacae0fb1b3502e490a6cbb is not reachable directly and all routehints were unusable.",
# "partid": 0,
# "amount": "528214msat"
# }
# ]
# }
#
@@ -411,21 +433,23 @@ ln_pay() {
# 207: Invoice expired. Payment took too long before expiration, or already expired at the time you initiated payment. The data field of the error indicates now (the current time) and expiry (the invoice expiration) as UNIX epoch time in seconds.
# 210: Payment timed out without a payment in progress.
# Let's try legacypay if code NOT 207 or 201.
# Let's try pay if code NOT 207 or 201.
if [ "${code}" -eq "201" ] || [ "${code}" -eq "207" ] || [ "${code}" -lt "0" ]; then
if [ "${code}" -eq "201" ] || [ "${code}" -eq "207" ]; then
trace "[ln_pay] Failure code, response will be the cli result."
else
trace "[ln_pay] Ok let's deal with potential routing failures and retry with MPP..."
trace "[ln_pay] Ok let's deal with potential routing failures and retry without MPP..."
if [ "${invoice_msatoshi}" = "null" ]; then
# "any" amount on the invoice, we force paying the expected_msatoshi provided to ln_pay by the user
result=$(ln_call_lightningd pay -k bolt11=${bolt11} msatoshi=${expected_msatoshi} retry_for=30)
result=$(ln_call_lightningd legacypay -k bolt11=${bolt11} msatoshi=${expected_msatoshi} retry_for=85)
else
result=$(ln_call_lightningd pay -k bolt11=${bolt11} retry_for=30)
result=$(ln_call_lightningd legacypay -k bolt11=${bolt11} retry_for=85)
fi
returncode=$?
trace_rc ${returncode}
trace "[ln_pay] result=${result}"
if [ "${returncode}" -ne "0" ]; then
trace "[ln_pay] Failed!"
else
@@ -435,34 +459,36 @@ ln_pay() {
# Successful payment example:
#
# {
# "destination": "029b26c73b2c19ec9bdddeeec97c313670c96b6414ceacae0fb1b3502e490a6cbb",
# "payment_hash": "0d1e62210e7af9a4146258652fd4cfecd2638086850583e994a103884e2b4e78",
# "created_at": 1631200188.550,
# "parts": 1,
# "msatoshi": 530114,
# "amount_msat": "530114msat",
# "msatoshi_sent": 530114,
# "amount_sent_msat": "530114msat",
# "payment_preimage": "2672c5fa280367222bf30db82566b78909927a67d5756d5ae0227b2ff8f3a907",
# "status": "complete"
# "id": 16,
# "payment_hash": "f00877afeec4d771c2db68af80b8afa5dad3b495dad498828327e484c93f67d5",
# "destination": "021ec6ccede19caa0bc7d7f9699c73e63cb2b79a4877529a60d7ac6a4ebb03487a",
# "msatoshi": 1234,
# "amount_msat": "1234msat",
# "msatoshi_sent": 1235,
# "amount_sent_msat": "1235msat",
# "created_at": 1633373202,
# "status": "complete",
# "payment_preimage": "373cd9a0f83426506f1535f6ca1f08f279f0bd82d257fd3fc8cd49fbc25750f2",
# "bolt11": "lntb1ps4kjlrpp57qy80tlwcnthrskmdzhcpw905hdd8dy4mt2f3q5ryljgfjflvl2sdq9u2d2zxqr3jscqp2sp5c2qykk0pdaeh2yrvn4cpkchsnyxwjnaptujggsd6ldqjfd8jhh3qrzjqwyx8nu2hygyvgc02cwdtvuxe0lcxz06qt3lpsldzcdr46my5epmj85hhvqqqtsqqqqqqqlgqqqqqqgq9q9qyyssqpnwtw6mzxu8pr5mrm8677ke8p5fjcu6dyrrvuy8j5f5p8mzv2phr2y0yx3z7mvgf5uqzzdytegg04u7hcu8ma50692cg69cdtsgw9hsph0xeha"
# }
#
#
# Failed payment example:
# {
# "code": 210,
# "message": "Destination 029b26c73b2c19ec9bdddeeec97c313670c96b6414ceacae0fb1b3502e490a6cbb is not reachable directly and all routehints were unusable.",
# "attempts": [
# {
# "status": "failed",
# "failreason": "Destination 029b26c73b2c19ec9bdddeeec97c313670c96b6414ceacae0fb1b3502e490a6cbb is not reachable directly and all routehints were unusable.",
# "partid": 0,
# "amount": "528214msat"
# }
# ]
# }
#
# Failure response examples:
#
# {
# "code": -32602,
# "message": "03c05f973d9c7218e7aec4f52c2c8ab395f51f41d627c398237b5ff056f46faf09: unknown destination node_id (no public channels?)"
# }
#
# {
# "code": 206,
# "message": "Route wanted fee of 16101625msat"
# }
#
# {
# "code": 207,
# "message": "Invoice expired"
# }
#
fi
else
# code tag not found
@@ -549,12 +575,12 @@ ln_getroute() {
trace "Entering ln_getroute()..."
# Defaults used from c-lightning documentation
local result
local result
local id=${1}
local msatoshi=${2}
local riskfactor=${3}
result=$(ln_call_lightningd getroute -k id=${id} msatoshi=${msatoshi} riskfactor=${riskfactor})
result=$(ln_call_lightningd getroute -k id=${id} msatoshi=${msatoshi} riskfactor=${riskfactor})
returncode=$?
echo "${result}"
@@ -566,7 +592,7 @@ ln_withdraw() {
trace "Entering ln_withdraw()..."
# Defaults used from c-lightning documentation
local result
local result
local request=${1}
local destination=$(echo "${request}" | jq -r ".destination")
local satoshi=$(echo "${request}" | jq -r ".satoshi")
@@ -575,8 +601,8 @@ ln_withdraw() {
if [ "${all}" == true ] || [ "${all}" == "true" ] ; then
satoshi="all"
fi
result=$(ln_call_lightningd withdraw ${destination} ${satoshi} ${feerate})
result=$(ln_call_lightningd withdraw ${destination} ${satoshi} ${feerate})
returncode=$?
echo "${result}"

View File

@@ -52,14 +52,6 @@ compute_vin_total_amount()
local vin_vout_amount=0
local vout
local vin_total_amount=0
local vin_hash
local vin_confirmations
local vin_timereceived
local vin_vsize
local vin_blockhash
local vin_blockheight
local vin_blocktime
local txid_already_inserted=true
for vin_txid_vout in ${vin_txids_vout}
do
@@ -75,21 +67,6 @@ compute_vin_total_amount()
trace "[compute_vin_total_amount] vin_vout_amount=${vin_vout_amount}"
vin_total_amount=$(awk "BEGIN { printf(\"%.8f\", ${vin_total_amount}+${vin_vout_amount}); exit}")
trace "[compute_vin_total_amount] vin_total_amount=${vin_total_amount}"
vin_hash=$(echo "${vin_raw_tx}" | jq -r ".result.hash")
vin_confirmations=$(echo "${vin_raw_tx}" | jq ".result.confirmations")
vin_timereceived=$(echo "${vin_raw_tx}" | jq ".result.time")
vin_size=$(echo "${vin_raw_tx}" | jq ".result.size")
vin_vsize=$(echo "${vin_raw_tx}" | jq ".result.vsize")
vin_blockhash=$(echo "${vin_raw_tx}" | jq -r ".result.blockhash")
vin_blockheight=$(echo "${vin_raw_tx}" | jq ".result.blockheight")
vin_blocktime=$(echo "${vin_raw_tx}" | jq ".result.blocktime")
# Let's insert the vin tx in the DB just in case it would be useful
sql "INSERT INTO tx (txid, hash, confirmations, timereceived, size, vsize, blockhash, blockheight, blocktime)"\
" VALUES ('${vin_txid}', '${vin_hash}', ${vin_confirmations}, ${vin_timereceived}, ${vin_size}, ${vin_vsize}, '${vin_blockhash}', ${vin_blockheight}, ${vin_blocktime})"\
" ON CONFLICT (txid) DO"\
" UPDATE SET blockhash='${vin_blockhash}', blockheight=${vin_blockheight}, blocktime=${vin_blocktime}, confirmations=${vin_confirmations}"
trace_rc $?
done
echo "${vin_total_amount}"

View File

@@ -184,8 +184,8 @@ confirmation() {
if [ -n "${event_message}" ]; then
# There's an event message, let's publish it!
trace "[confirmation] mosquitto_pub -h broker -t tx_confirmation -m \"{\"txid\":\"${txid}\",\"hash\":${tx_hash},\"address\":\"${address}\",\"vout_n\":${tx_vout_n},\"amount\":${tx_vout_amount},\"confirmations\":${tx_nb_conf},\"eventMessage\":\"${event_message}\"}\""
response=$(mosquitto_pub -h broker -t tx_confirmation -m "{\"txid\":\"${txid}\",\"hash\":${tx_hash},\"address\":\"${address}\",\"vout_n\":${tx_vout_n},\"amount\":${tx_vout_amount},\"confirmations\":${tx_nb_conf},\"eventMessage\":\"${event_message}\"}")
trace "[confirmation] mosquitto_pub -h broker -t tx_confirmation -m \"{\"txid\":\"${txid}\",\"hash\":\"${tx_hash}\",\"address\":\"${address}\",\"vout_n\":${tx_vout_n},\"amount\":${tx_vout_amount},\"confirmations\":${tx_nb_conf},\"eventMessage\":\"${event_message}\"}\""
response=$(mosquitto_pub -h broker -t tx_confirmation -m "{\"txid\":\"${txid}\",\"hash\":\"${tx_hash}\",\"address\":\"${address}\",\"vout_n\":${tx_vout_n},\"amount\":${tx_vout_amount},\"confirmations\":${tx_nb_conf},\"eventMessage\":\"${event_message}\"}")
returncode=$?
trace_rc ${returncode}
fi

View File

@@ -23,15 +23,33 @@ createCurlConfig() {
}
# If the file .dbfailed exists, it means we previously failed to process DB migrations.
# Sometimes, depending on timing, a migration fails but it doesn't mean it's corrupted.
# It may be a container that was not accessible for a short period of time, for example.
# So we'll try up to MAX_ATTEMPTS times before concluding in failure.
# For this to work, we'll put the number of attemps in the .dbfailed file.
MAX_ATTEMPTS=5
nb_attempts=1
if [ -e ${DB_PATH}/.dbfailed ]; then
n=$(cat ${DB_PATH}/.dbfailed)
nb_attempts=$((n+1))
fi
if [ "${nb_attempts}" -gt "${MAX_ATTEMPTS}" ]; then
touch /container_monitor/proxy_dbfailed
trace "[startproxy] A previous database creation/migration failed. Stopping."
trace "[startproxy] A file called .dbfailed has been created. Fix the migration errors, remove .dbfailed and retry."
trace "[startproxy] Too many database creation/migration failed attempts. Failed attempts = ${nb_attempts}."
trace "[startproxy] A file called .dbfailed has been created in your proxy datapath. Fix the migration errors, remove .dbfailed and retry."
trace "[startproxy] Check your log files, especially postgres."
trace "[startproxy] Exiting."
sleep 30
exit 1
else
rm -f /container_monitor/proxy_dbfailed
if [ "${nb_attempts}" -gt "1" ]; then
trace "[startproxy] Current database creation/migration attempt = ${nb_attempts}. Retrying..."
fi
fi
trace "[startproxy] Waiting for PostgreSQL to be ready..."
@@ -57,17 +75,21 @@ else
fi
if [ "${returncode}" -ne "0" ]; then
touch ${DB_PATH}/.dbfailed
touch /container_monitor/proxy_dbfailed
trace "[startproxy] Database creation/migration failed. Stopping."
echo -n "${nb_attempts}" > ${DB_PATH}/.dbfailed
trace "[startproxy] Database creation/migration failed. We will retry ${MAX_ATTEMPTS} times."
trace "[startproxy] A file called .dbfailed has been created in your proxy datapath. Fix the migration errors, remove .dbfailed and retry."
trace "[startproxy] Check your log files, especially postgres."
trace "[startproxy] Exiting."
sleep 30
exit ${returncode}
fi
# /container_monitor/proxy_ready will be created by Docker's health check
rm -f /container_monitor/proxy_ready
rm -f /container_monitor/proxy_dbfailed
rm -f ${DB_PATH}/.dbfailed
chmod 0600 $DB_FILE
createCurlConfig ${WATCHER_BTC_NODE_RPC_CFG} ${WATCHER_BTC_NODE_RPC_USER}

View File

@@ -528,7 +528,7 @@ watchtxidrequest() {
return 1
else
txid_pg="'${address}'"
txid_pg="'${txid}'"
fi
trace "[watchtxidrequest] txid=${txid}, txid_pg=${txid_pg}"
@@ -547,7 +547,7 @@ watchtxidrequest() {
trace "[watchtxidrequest] cb1conf_url=${cb1conf_url}, cb1conf_url_pg=${cb1conf_url_pg}, cb1conf_url_pg_where=${cb1conf_url_pg_where}, cb1conf_url_json=${cb1conf_url_json}"
local cbxconf_url cbxconf_url_pg cbxconf_url_pg_where
cbxconf_url=$(echo "${request}" | jq -e ".xconfCallbackURL")
cbxconf_url=$(echo "${request}" | jq -re ".xconfCallbackURL")
if [ "$?" -ne "0" ]; then
# cbxconf_url not found or null
cbxconf_url_json="null"

View File

@@ -10,9 +10,9 @@ mine() {
local minedaddr
echo ; echo "About to mine ${nbblocks} block(s)..."
minedaddr=$(docker exec -t $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat getnewaddress | tr -d '\r')
minedaddr=$(docker exec -t $(docker ps -q -f "name=cyphernode.bitcoin") bitcoin-cli -rpcwallet=spending01.dat getnewaddress | tr -d '\r')
echo ; echo "minedaddr=${minedaddr}"
docker exec -t $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat generatetoaddress ${nbblocks} "${minedaddr}"
docker exec -t $(docker ps -q -f "name=cyphernode.bitcoin") bitcoin-cli -rpcwallet=spending01.dat generatetoaddress ${nbblocks} "${minedaddr}"
}
case "${0}" in *mine.sh) mine $@;; esac

View File

@@ -73,10 +73,12 @@ test_manage_missed_0_conf() {
trace 3 "[test_manage_missed_0_conf] response=${response}"
trace 3 "[test_manage_missed_0_conf] Shutting down the proxy..."
docker stop $(docker ps -q -f "name=proxy\.")
# There are two container names containing "proxy": proxy and proxycron
# Let's exclude proxycron
docker restart $(docker ps -q -f "name=proxy[^c]")
trace 3 "[test_manage_missed_0_conf] Sending coins to watched address while proxy is down..."
docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address} 0.0001
docker exec -it $(docker ps -q -f "name=cyphernode.bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address} 0.0001
# txid1=$(exec_in_test_container curl -d '{"address":"'${address}'","amount":0.0001}' proxy:8888/spend | jq -r ".txid")
wait_for_proxy
@@ -113,14 +115,16 @@ test_manage_missed_1_conf() {
trace 3 "[test_manage_missed_1_conf] response=${response}"
trace 3 "[test_manage_missed_1_conf] Sending coins to watched address while proxy is up..."
docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address} 0.0001
docker exec -it $(docker ps -q -f "name=cyphernode.bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address} 0.0001
# txid1=$(exec_in_test_container curl -d '{"address":"'${address}'","amount":0.0001}' proxy:8888/spend | jq -r ".txid")
trace 3 "[test_manage_missed_1_conf] Sleeping for 20 seconds to let the 0-conf callbacks to happen..."
sleep 20
trace 3 "[test_manage_missed_1_conf] Shutting down the proxy..."
docker stop $(docker ps -q -f "name=proxy\.")
# There are two container names containing "proxy": proxy and proxycron
# Let's exclude proxycron
docker restart $(docker ps -q -f "name=proxy[^c]")
trace 3 "[test_manage_missed_1_conf] Mine a new block..."
mine

View File

@@ -0,0 +1,266 @@
#!/bin/bash
. ./colors.sh
. ./mine.sh
# This needs to be run in regtest
# You need jq installed for these tests to run correctly
# This will test:
#
# - getnewaddress
# - watch
# - getactivewatches
# - unwatch
# - watchtxid
# - unwatchtxid
# - spend
#
trace() {
if [ "${1}" -le "${TRACING}" ]; then
echo -e "$(date -u +%FT%TZ) ${2}" 1>&2
fi
}
start_test_container() {
docker run -d --rm -t --name tests-watches --network=cyphernodenet alpine
}
stop_test_container() {
trace 1 "\n\n[stop_test_container] ${BCyan}Stopping existing containers if they are running...${Color_Off}\n"
# docker stop tests-watches
local containers=$(docker ps -q -f "name=tests-watches")
if [ -n "${containers}" ]; then
docker stop ${containers}
fi
}
exec_in_test_container() {
docker exec -it tests-watches "$@"
}
test_watches() {
# Watch addresses and a txid
# 1. Call getnewaddress twice with label1 and label2
# 2. Call watch on the address with label1
# 3. Call watch on the address with label2
# 4. Call getactivewatches, search for addresses with label1 and label2
# 6. unwatch label2
# 7. Call getactivewatches, check that label2 is not there
# 9. Start a callback server for label1 watch 0-conf webhook
# 10. Call spend, to the address with label1 (triggers 0-conf webhook)
# 11. Wait for label1's 0-conf webhook
# 12. Call watchtxid on spent txid with 3-conf webhook
# 13. Start a callback servers for 1-conf txid watch webhook
# 14. Generate a block (triggers 1-conf webhook)
# 15. Wait for 1-conf webhook
# 16. Start a callback servers for 3-conf txid watch webhook
# 17. Generate 2 blocks (triggers 3-conf webhook)
# 18. Wait for 3-conf webhook
# 20. Call getactivewatches, make sure label1 and label2 are not there
local label1="label$RANDOM"
local label2="label$RANDOM"
local callbackurl0conf1="tests-watches:1111/callbackurl0conf1"
local callbackurl1conf1="tests-watches:1112/callbackurl1conf1"
local callbackurl1conftxid="tests-watches:1113/callbackurl1conftxid"
local callbackurl3conftxid="tests-watches:1114/callbackurl3conftxid"
local address
local address1
local address2
local txid
local data
local response
trace 1 "\n\n[test_watches] ${BCyan}Let's test \"watch addresses and a txid\" features!...${Color_Off}\n"
# 1. Call getnewaddress twice with label1 and label2
trace 2 "\n\n[test_watches] ${BCyan}1. getnewaddress...${Color_Off}\n"
data='{"label":"'${label1}'"}'
trace 3 "[test_watches] data=${data}"
response=$(exec_in_test_container curl -d "${data}" proxy:8888/getnewaddress)
trace 3 "[test_watches] response=${response}"
data=$(echo "${response}" | jq -re ".error")
if [ "${?}" -eq "0" ]; then
trace 1 "\n\n[test_watches] ${On_Red}${BBlack} 1. getnewaddress 1 failed: ${data}! ${Color_Off}\n"
return 10
fi
address1=$(echo "$response" | jq -r ".address")
trace 3 "[test_watches] address1=${address1}"
data='{"label":"'${label2}'"}'
trace 3 "[test_watches] data=${data}"
response=$(exec_in_test_container curl -d "${data}" proxy:8888/getnewaddress)
trace 3 "[test_watches] response=${response}"
data=$(echo "${response}" | jq -re ".error")
if [ "${?}" -eq "0" ]; then
trace 1 "\n\n[test_watches] ${On_Red}${BBlack} 1. getnewaddress 2 failed: ${data}! ${Color_Off}\n"
return 15
fi
address2=$(echo "$response" | jq -r ".address")
trace 3 "[test_watches] address2=${address2}"
# 2. Call watch on the address with label1
trace 2 "\n\n[test_watches] ${BCyan}2. watch 1...${Color_Off}\n"
local data='{"address":"'${address1}'","unconfirmedCallbackURL":"'${callbackurl0conf1}'","confirmedCallbackURL":"'${callbackurl1conf1}'","label":"watch_'${label1}'"}'
trace 3 "[test_watches] data=${data}"
response=$(exec_in_test_container curl -d "${data}" proxy:8888/watch)
trace 3 "[test_watches] response=${response}"
data=$(echo "${response}" | jq -re ".error")
if [ "${?}" -eq "0" ]; then
trace 1 "\n\n[test_watches] ${On_Red}${BBlack} 2. watch 1 failed: ${data}! ${Color_Off}\n"
return 20
fi
# 3. Call watch on the address with label2
trace 2 "\n\n[test_watches] ${BCyan}3. watch 2...${Color_Off}\n"
local data='{"address":"'${address2}'","unconfirmedCallbackURL":"dummy","confirmedCallbackURL":"dummy","label":"watch_'${label2}'"}'
trace 3 "[test_watches] data=${data}"
response=$(exec_in_test_container curl -d "${data}" proxy:8888/watch)
trace 3 "[test_watches] response=${response}"
data=$(echo "${response}" | jq -re ".error")
if [ "${?}" -eq "0" ]; then
trace 1 "\n\n[test_watches] ${On_Red}${BBlack} 3. watch 2 failed: ${data}! ${Color_Off}\n"
return 25
fi
# 4. Call getactivewatches, search for addresses with label1 and label2
trace 2 "\n\n[test_watches] ${BCyan}4. Call getactivewatches, search for addresses with label1 and label2...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivewatches)
# trace 3 "[test_watches] response=${response}"
address=$(echo "${response}" | jq -r ".watches | map(select(.label == \"watch_${label1}\"))[0].address")
trace 3 "[test_watches] address=${address}"
if [ "${address}" != "${address1}" ]; then
trace 1 "\n\n[test_watches] ${On_Red}${BBlack} 4. Call getactivewatches, search for address with label1: \"${address}\" != \"${address1}\"! ${Color_Off}\n"
return 30
fi
address=$(echo "${response}" | jq -r ".watches | map(select(.label == \"watch_${label2}\"))[0].address")
trace 3 "[test_watches] address=${address}"
if [ "${address}" != "${address2}" ]; then
trace 1 "\n\n[test_watches] ${On_Red}${BBlack} 4. Call getactivewatches, search for address with label2: \"${address}\" != \"${address2}\"! ${Color_Off}\n"
return 35
fi
# 6. unwatch label2
trace 2 "\n\n[test_watches] ${BCyan}6. unwatch label2...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/unwatch/${address2})
trace 3 "[test_watches] response=${response}"
data=$(echo "${response}" | jq -re ".error")
if [ "${?}" -eq "0" ]; then
trace 1 "\n\n[test_watches] ${On_Red}${BBlack} 6. unwatch label2 failed: ${data}! ${Color_Off}\n"
return 40
fi
# 7. Call getactivewatches, check that label2 is not there
trace 2 "\n\n[test_watches] ${BCyan}7. Call getactivewatches, check that label2 is not there...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivewatches)
# trace 3 "[test_watches] response=${response}"
address=$(echo "${response}" | jq -r ".watches | map(select(.label == \"watch_${label2}\"))[0].address")
trace 3 "[test_watches] address=${address}"
if [ "${address}" = "${address2}" ]; then
trace 1 "\n\n[test_watches] ${On_Red}${BBlack} 4. Call getactivewatches, found address2: \"${address}\" = \"${address2}\"! ${Color_Off}\n"
return 50
fi
# 9. Start a callback server for label1 watch 0-conf webhook
# 10. Call spend, to the address with label1 (triggers 0-conf webhook)
# 11. Wait for label1's 0-conf webhook
trace 2 "\n\n[test_watches] ${BCyan}10. Send coins to address1...${Color_Off}\n"
start_callback_server 1111
# Let's use the bitcoin node directly to better simulate an external spend
txid=$(docker exec -it $(docker ps -q -f "name=cyphernode.bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address1} 0.0001 | tr -d "\r\n")
# txid=$(exec_in_test_container curl -d '{"address":"'${address1}'","amount":0.001}' proxy:8888/spend | jq -r ".txid")
trace 3 "[test_watches] txid=${txid}"
trace 3 "[test_watches] Waiting for 0-conf callback on address1..."
wait
# 12. Call watchtxid on spent txid with 3-conf webhook
trace 2 "\n\n[test_watches] ${BCyan}12. Call watchtxid on spent txid with 3-conf webhook...${Color_Off}\n"
# BODY {"txid":"b081ca7724386f549cf0c16f71db6affeb52ff7a0d9b606fb2e5c43faffd3387","confirmedCallbackURL":"192.168.111.233:1111/callback1conf","xconfCallbackURL":"192.168.111.233:1111/callbackXconf","nbxconf":6}
local data='{"txid":"'${txid}'","confirmedCallbackURL":"'${callbackurl1conftxid}'","xconfCallbackURL":"'${callbackurl3conftxid}'","nbxconf":3}'
trace 3 "[test_watches] data=${data}"
response=$(exec_in_test_container curl -d "${data}" proxy:8888/watchtxid)
trace 3 "[test_watches] response=${response}"
data=$(echo "${response}" | jq -re ".error")
if [ "${?}" -eq "0" ]; then
trace 1 "\n\n[test_watches] ${On_Red}${BBlack} 12. Call watchtxid on spent txid with 3-conf webhook failed: ${data}! ${Color_Off}\n"
return 60
fi
# 13. Start a callback servers for 1-conf txid watch webhook
trace 2 "\n\n[test_watches] ${BCyan}13. Start a callback servers for 1-conf txid watch webhook...${Color_Off}\n"
start_callback_server 1112
start_callback_server 1113
# 14. Generate a block (triggers 1-conf webhook)
trace 3 "[test_manage_missed_1_conf] Mine a new block..."
mine
# 15. Wait for 1-conf webhook
trace 3 "[test_watches] Waiting for 1-conf callbacks on address1 and txid..."
wait
# 16. Start a callback servers for 3-conf txid watch webhook
trace 2 "\n\n[test_watches] ${BCyan}16. Start a callback servers for 3-conf txid watch webhook...${Color_Off}\n"
start_callback_server 1114
# 17. Generate 2 blocks (triggers 3-conf webhook)
trace 3 "[test_watches] Mine 2 new blocks..."
mine 2
# 18. Wait for 3-conf webhook
trace 3 "[test_watches] Waiting for 3-conf callback on txid..."
wait
# 20. Call getactivewatches, make sure label1 and label2 are not there
trace 2 "\n\n[test_watches] ${BCyan}20. Call getactivewatches, make sure label1 and label2 are not there...${Color_Off}\n"
response=$(exec_in_test_container curl proxy:8888/getactivewatches)
# trace 3 "[test_watches] response=${response}"
address=$(echo "${response}" | jq -r ".watches | map(select(.label == \"watch_${label1}\"))[0].address")
trace 3 "[test_watches] address=${address}"
if [ "${address}" = "${address1}" ]; then
trace 1 "\n\n[test_watches] ${On_Red}${BBlack} 4. Call getactivewatches, found address1: \"${address}\" = \"${address1}\"! ${Color_Off}\n"
return 70
fi
address=$(echo "${response}" | jq -r ".watches | map(select(.label == \"watch_${label2}\"))[0].address")
trace 3 "[test_watches] address=${address}"
if [ "${address}" = "${address2}" ]; then
trace 1 "\n\n[test_watches] ${On_Red}${BBlack} 4. Call getactivewatches, found address2: \"${address}\" = \"${address2}\"! ${Color_Off}\n"
return 75
fi
trace 1 "\n\n[test_watches] ${On_IGreen}${BBlack} ALL GOOD! Yayyyy! ${Color_Off}\n"
}
start_callback_server() {
trace 1 "[start_callback_server] ${BCyan}Let's start the callback servers!...${Color_Off}"
local port=${1:-1111}
docker exec -t tests-watches sh -c "nc -vlp${port} -e sh -c 'echo -en \"HTTP/1.1 200 OK\\\\r\\\\n\\\\r\\\\n\" ; echo -en \"\\033[40m\\033[0;37m\" >&2 ; date >&2 ; timeout 1 tee /dev/tty | cat ; echo -e \"\033[0m\" >&2'" &
}
TRACING=3
stop_test_container
start_test_container
trace 1 "\n\n[test_watches] ${BCyan}Installing needed packages...${Color_Off}\n"
exec_in_test_container apk add --update curl
test_watches
trace 1 "\n\n[test_watches] ${BCyan}Tearing down...${Color_Off}\n"
wait
stop_test_container
trace 1 "\n\n[test_watches] ${BCyan}See ya!${Color_Off}\n"

View File

@@ -272,7 +272,7 @@ test_watch_pub32() {
trace 2 "\n\n[test_watch_pub32] ${BCyan}12. Send coins to address1...${Color_Off}\n"
start_callback_server 1111
# Let's use the bitcoin node directly to better simulate an external spend
txid1=$(docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address1} 0.0001 | tr -d "\r\n")
txid1=$(docker exec -it $(docker ps -q -f "name=cyphernode.bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address1} 0.0001 | tr -d "\r\n")
# txid1=$(exec_in_test_container curl -d '{"address":"'${address1}'","amount":0.001}' proxy:8888/spend | jq -r ".txid")
trace 3 "[test_watch_pub32] txid1=${txid1}"
trace 3 "[test_watch_pub32] Waiting for 0-conf callback on address1..."
@@ -282,7 +282,7 @@ test_watch_pub32() {
trace 2 "\n\n[test_watch_pub32] ${BCyan}13. Send coins to address2...${Color_Off}\n"
start_callback_server 1113
# Let's use the bitcoin node directly to better simulate an external spend
txid2=$(docker exec -it $(docker ps -q -f "name=cyphernode_bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address2} 0.0001 | tr -d "\r\n")
txid2=$(docker exec -it $(docker ps -q -f "name=cyphernode.bitcoin") bitcoin-cli -rpcwallet=spending01.dat sendtoaddress ${address2} 0.0001 | tr -d "\r\n")
# txid2=$(exec_in_test_container curl -d '{"address":"'${address2}'","amount":0.001}' proxy:8888/spend | jq -r ".txid")
trace 3 "[test_watch_pub32] txid2=${txid2}"
trace 3 "[test_watch_pub32] Waiting for 0-conf callback on address2..."