Merge pull request #36 from SatoshiPortal/release

Release v0.1.0-rc.1
This commit is contained in:
kexkey
2018-12-28 17:22:03 -05:00
committed by GitHub
106 changed files with 7614 additions and 685 deletions

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "install/SatoshiPortal/dockers"]
path = install/SatoshiPortal/dockers
url = https://github.com/SatoshiPortal/dockers.git

View File

@@ -1,21 +1,21 @@
FROM nginx:alpine
RUN apk add --update --no-cache \
bash \
git \
openssl \
fcgiwrap \
spawn-fcgi \
curl \
jq
jq \
su-exec
COPY auth.sh /etc/nginx/conf.d
COPY auth.sh /etc/nginx/conf.d/
COPY default-ssl.conf /etc/nginx/conf.d/default.conf
COPY statuspage.html /etc/nginx/conf.d/status/
COPY entrypoint.sh entrypoint.sh
COPY keys.properties /etc/nginx/conf.d
COPY api.properties /etc/nginx/conf.d
COPY trace.sh /etc/nginx/conf.d
COPY tests.sh /etc/nginx/conf.d
COPY ip-whitelist.conf /etc/nginx/conf.d
COPY trace.sh /etc/nginx/conf.d/
COPY tests.sh /etc/nginx/conf.d/
RUN chmod +x /etc/nginx/conf.d/auth.sh entrypoint.sh

View File

@@ -2,7 +2,27 @@
So all the other containers are in the Docker Swarm and we want to expose a real HTTP/S interface to clients outside of the Swarm, that makes sense. Clients have to get an API key first.
## Build
## Pull our Cyphernode image
```shell
docker pull cyphernode/gatekeeper:latest
```
## Build yourself the image
```shell
docker build -t cyphernode/gatekeeper:latest .
```
## Run image
If you are using it independantly from the Docker stack (docker-compose.yml), you can run it like that:
```shell
docker run -d --rm --name gatekeeper -p 80:80 -p 443:443 --network cyphernodenet -v "~/cyphernode-ssl/certs:/etc/ssl/certs" -v "~/cyphernode-ssl/private:/etc/ssl/private" --env-file env.properties cyphernode/gatekeeper:latest `id -u cyphernode`:`id -g cyphernode`
```
## Prepare
### Create your API key and put it in keys.properties
@@ -23,7 +43,7 @@ dd if=/dev/urandom bs=32 count=1 2> /dev/null | xxd -ps -c 32
Put the id, key and groups in keys.properties and give the id and key to the client. The key is a secret. keys.properties looks like this:
```property
#kappiid="id";kapi_key="key";kapi_groups="group1,group2";leave the rest intact
# kapi_id="id";kapi_key="key";kapi_groups="group1,group2";leave the rest intact
kapi_id="001";kapi_key="2df1eeea370eacdc5cf7e96c2d82140d1568079a5d4d87006ec8718a98883b36";kapi_groups="watcher";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}
kapi_id="002";kapi_key="50c5e483b80964595508f214229b014aa6c013594d57d38bcb841093a39f1d83";kapi_groups="watcher";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}
kapi_id="003";kapi_key="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";kapi_groups="watcher,spender";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}

View File

@@ -1,3 +1,4 @@
# The file api.properties generated by the installer should look like this.
# Watcher can:
action_watch=watcher
@@ -10,7 +11,7 @@ action_gettransaction=watcher
action_ln_getinfo=watcher
action_ln_create_invoice=watcher
# Spender can do what the watcher can do plus:
# Spender can do what the watcher can do, plus:
action_getbalance=spender
action_getnewaddress=spender
action_spend=spender
@@ -20,10 +21,13 @@ action_deriveindex=spender
action_derivepubpath=spender
action_ln_pay=spender
action_ln_newaddr=spender
action_ots_stamp=spender
action_ots_getfile=spender
# Admin can do what the spender can do plus:
# Admin can do what the spender can do, plus:
# Should be called from inside the Swarm:
# Should be called from inside the Docker network only:
action_conf=internal
action_executecallbacks=internal
action_ots_backoffice=internal

View File

@@ -39,14 +39,14 @@ verify_sign()
if [ ${exp} -gt ${current} ]; then
trace "[verify_sign] Not expired, let's validate signature"
local id=$(echo ${payload} | jq ".id" | tr -d '"')
trace "[verify_sign] id=${id}"
trace "[verify_sign] id=${id}"
# Check for code injection
# id will usually be an int, but can be alphanum... nothing else
case $id in (*[![:alnum:]]*|"")
trace "[verify_sign] Potential code injection, exiting"
return 1
esac
# Check for code injection
# id will usually be an int, but can be alphanum... nothing else
case $id in (*[![:alnum:]]*|"")
trace "[verify_sign] Potential code injection, exiting"
return 1
esac
# It is so much faster to include the keys here instead of grep'ing the file for key.
. ./keys.properties
@@ -87,16 +87,16 @@ verify_group()
trace "[verify_group] Verifying group..."
local id=${1}
# REQUEST_URI should look like this: /watch/2blablabla
local action=$(echo "${REQUEST_URI:1}" | cut -d '/' -f1)
# REQUEST_URI should look like this: /v0/watch/2blablabla
local action=$(echo "${REQUEST_URI#\/}" | cut -d '/' -f2)
trace "[verify_group] action=${action}"
# Check for code injection
# action can be alphanum... and _ and - but nothing else
local actiontoinspect=$(echo "$action" | tr -d '_-')
case $actiontoinspect in (*[![:alnum:]]*|"")
trace "[verify_group] Potential code injection, exiting"
return 1
trace "[verify_group] Potential code injection, exiting"
return 1
esac
# It is so much faster to include the keys here instead of grep'ing the file for key.
@@ -121,15 +121,17 @@ verify_group()
# $HTTP_AUTHORIZATION = Bearer <token>
# Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjAwMyIsImV4cCI6MTU0MjE0OTMyNH0=.b811067cf79c7009a0a38f110a6e3bf82cc4310aa6afae75b9d915b9febf13f7
# If this is not found in header, we leave
trace "[auth.sh] HTTP_AUTHORIZATION=${HTTP_AUTHORIZATION}"
if [ "${HTTP_AUTHORIZATION:0:6}" = "Bearer" ]; then
token="${HTTP_AUTHORIZATION:6}"
# /bin/sh on debian points to dash, which does not support substring in the form ${var:offset:length}
if [ "-${HTTP_AUTHORIZATION%% *}" = "-Bearer" ]; then
token="${HTTP_AUTHORIZATION#Bearer }"
if [ -n "$token" ]; then
trace "[auth.sh] Valid format for authorization header"
verify_sign "${token}"
[ "$?" -eq "0" ] && return
trace "[auth.sh] Valid format for authorization header"
verify_sign "${token}"
[ "$?" -eq "0" ] && return
fi
fi

View File

@@ -2,14 +2,21 @@ server {
listen 443 ssl;
server_name localhost;
include /etc/nginx/conf.d/ip-whitelist.conf;
#include /etc/nginx/conf.d/ip-whitelist.conf;
ssl_certificate /etc/ssl/certs/cert.pem;
ssl_certificate_key /etc/ssl/private/key.pem;
location / {
location /status {
auth_basic "status";
auth_basic_user_file conf.d/status/htpasswd;
root /etc/nginx/conf.d;
index statuspage.html;
}
location /v0/ {
auth_request /auth;
proxy_pass http://cyphernode:8888;
proxy_pass http://proxy:8888/;
}
location /auth {

View File

@@ -2,11 +2,11 @@ server {
listen 80;
server_name localhost;
include /etc/nginx/conf.d/ip-whitelist.conf;
#include /etc/nginx/conf.d/ip-whitelist.conf;
location / {
location /v0/ {
auth_request /auth;
proxy_pass http://cyphernode:8888;
proxy_pass http://proxy:8888/;
}
location /auth {

View File

@@ -1,5 +1,17 @@
#!/bin/sh
#!/bin/bash
spawn-fcgi -s /var/run/fcgiwrap.socket -u nginx -g nginx -U nginx -- /usr/bin/fcgiwrap
user='nginx'
if [[ $1 ]]; then
IFS=':' read -ra arr <<< "$1"
if [[ ${arr[0]} ]]; then
user=${arr[0]};
fi
fi
spawn-fcgi -M 0660 -s /var/run/fcgiwrap.socket -u $user -g nginx -U $user -- `which fcgiwrap`
chmod -R g+rw /var/run/fcgiwrap.socket /etc/nginx/conf.d/*
chown -R :nginx /etc/nginx/conf.d/*
nginx -g "daemon off;"

View File

@@ -1,8 +0,0 @@
# Leave commented if you don't want to use IP whitelist
#real_ip_header X-Forwarded-For;
#set_real_ip_from 0.0.0.0/0;
# List of white listed IP addresses...
#allow 45.56.67.78;
#deny all;

View File

@@ -1,4 +1,6 @@
#kappiid="id";kapi_key="key";kapi_groups="group1,group2";leave the rest intact
# The file keys.properties generated by the installer should look like this.
# kapi_id="id";kapi_key="key";kapi_groups="group1,group2";leave the rest intact
kapi_id="001";kapi_key="2df1eeea370eacdc5cf7e96c2d82140d1568079a5d4d87006ec8718a98883b36";kapi_groups="watcher";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}
kapi_id="002";kapi_key="50c5e483b80964595508f214229b014aa6c013594d57d38bcb841093a39f1d83";kapi_groups="watcher";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}
kapi_id="003";kapi_key="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";kapi_groups="watcher,spender";eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}

View File

@@ -0,0 +1,58 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html>
<head>
<script src="https://code.jquery.com/jquery-3.3.1.min.js"></script>
<script type="text/javascript">
$(function() {
installation_status();
});
function httpget(url) {
return fetch(url, { method: "GET" })
.catch(err => {
console.log('HTTP GET Error: ' + err.message + ' :: STACK : ' + err.stack);
$("#result").text((JSON.stringify(err.message)));
return Promise.reject(err.message);
})
.then(res => {
if (!res.ok) {
return res.json().then(data => {
console.log('HTTP GET Error: ' + data.error.message);
$("#result").text(JSON.stringify(data.error.message));
return Promise.reject(data.error.message);
});
}
return res.json();
})
.then(data => Promise.resolve(JSON.stringify(data)))
}
function installation_status() {
httpget("installation.json")
.then(result => {
$("#result").text(result);
});
}
</script>
</head>
<body>
<div id="hello">
<h1>Hello World from Cyphernode!</h1>
<h2>If you are here, it means you successfully deployed Cyphernode. Congratulations, fellow Cyphernode Operator!</h2>
</div>
<hr/>
<div id="files">
<h2>The following files have been encrypted with your configuration passphrase and your client keys passphrase, respectively:</h2>
<ul>
<li><a href="config.7z">Download your Cyphernode <b>configurations</b>, can be used for another Cyphernode deployment</a></li>
<li><a href="client.7z">Download Client <b>API ID's and keys</b>, needed in your client apps</a></li>
</ul>
</div>
<div id="Status">
<h2>This is the status of Cyphernode's installation and running components</h2>
<pre lang="xml" id="result" style="white-space: pre-wrap"></pre>
</div>
</body>
</html>

View File

@@ -4,7 +4,7 @@
# Replace
# proxy_pass http://cyphernode:8888;
# by
# proxy_pass http://tests:8888;
# proxy_pass http://cyphernode:1111;
# in /etc/nginx/conf.d/default.conf to run the tests
test_expiration()
@@ -143,56 +143,68 @@ test_authorization_spender()
# getbalance
echo -n " Testing getbalance... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/getbalance)
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 130
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 135
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 430
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 435
# getnewaddress
echo -n " Testing getnewaddress... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/getnewaddress)
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 140
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 145
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 440
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 445
# spend
echo -n " Testing spend... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/spend)
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 150
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 155
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 450
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 455
# addtobatch
echo -n " Testing addtobatch... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/addtobatch)
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 160
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 165
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 460
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 465
# batchspend
echo -n " Testing batchspend... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/batchspend)
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 170
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 175
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 470
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 475
# deriveindex
echo -n " Testing deriveindex... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/deriveindex)
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 180
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 185
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 480
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 485
# derivepubpath
echo -n " Testing derivepubpath... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/derivepubpath)
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 190
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 195
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 490
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 495
# ln_pay
echo -n " Testing ln_pay... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/ln_pay)
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 200
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 205
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 500
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 505
# ln_newaddr
echo -n " Testing ln_newaddr... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/ln_newaddr)
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 210
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 215
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 510
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 515
# ots_stamp
echo -n " Testing ots_stamp... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/ots_stamp)
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 520
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 525
# ots_getfile
echo -n " Testing ots_getfile... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/ots_getfile)
[ ${is_spender} = true ] && [ "${rc}" -eq "403" ] && return 530
[ ${is_spender} = false ] && [ "${rc}" -ne "403" ] && return 535
return 0
}
@@ -216,12 +228,17 @@ test_authorization_internal()
# conf
echo -n " Testing conf... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/conf)
[ "${rc}" -ne "403" ] && return 220
[ "${rc}" -ne "403" ] && return 920
# executecallbacks
echo -n " Testing executecallbacks... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/executecallbacks)
[ "${rc}" -ne "403" ] && return 230
[ "${rc}" -ne "403" ] && return 930
# ots_backoffice
echo -n " Testing ots_backoffice... "
rc=$(time -f "%E" curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" -k https://localhost/ots_backoffice)
[ "${rc}" -ne "403" ] && return 940
return 0
}

75
build.sh Executable file
View File

@@ -0,0 +1,75 @@
#!/bin/bash
TRACING=1
trace()
{
if [ -n "${TRACING}" ]; then
echo "[$(date +%Y-%m-%dT%H:%M:%S%z)] ${1}" > /dev/stderr
fi
}
trace_rc()
{
if [ -n "${TRACING}" ]; then
echo "[$(date +%Y-%m-%dT%H:%M:%S%z)] Last return code: ${1}" > /dev/stderr
fi
}
build_docker_image() {
local dockerfile="Dockerfile"
if [[ ""$3 != "" ]]; then
dockerfile=$3
fi
trace "building docker image: $2"
#docker build -q $1 -f $1/$dockerfile -t $2:latest > /dev/null
docker build $1 -f $1/$dockerfile -t $2
}
build_docker_images() {
trace "Updating SatoshiPortal repos"
git submodule update --recursive --remote
local bitcoin_dockerfile=Dockerfile.amd64
local clightning_dockerfile=Dockerfile.amd64
local proxy_dockerfile=Dockerfile.amd64
# compat mode for SatoshiPortal repo
# TODO: add more mappings?
if [[ $(uname -m) == 'armv7l' ]]; then
bitcoin_dockerfile="Dockerfile.arm32v6"
clightning_dockerfile="Dockerfile.arm32v6"
proxy_dockerfile="Dockerfile.arm32v6"
fi
trace "Creating cyphernodeconf image"
build_docker_image install/ cyphernode/cyphernodeconf:$CN_VERSION
trace "Creating SatoshiPortal images"
build_docker_image install/SatoshiPortal/dockers/bitcoin-core cyphernode/bitcoin:$BC_VERSION $bitcoin_dockerfile
build_docker_image install/SatoshiPortal/dockers/c-lightning cyphernode/clightning:$CL_VERSION $clightning_dockerfile
trace "Creating cyphernode images"
build_docker_image api_auth_docker/ cyphernode/gatekeeper:$CN_VERSION
build_docker_image proxy_docker/ cyphernode/proxy:$CN_VERSION $proxy_dockerfile
build_docker_image cron_docker/ cyphernode/proxycron:$CN_VERSION
build_docker_image pycoin_docker/ cyphernode/pycoin:$CN_VERSION
build_docker_image otsclient_docker/ cyphernode/otsclient:$CN_VERSION
}
# CYPHERNODE VERSION
GATEKEEPER_VERSION="latest"
PROXY_VERSION="latest"
PROXYCRON_VERSION="latest"
OTSCLIENT_VERSION="latest"
PYCOIN_VERSION="latest"
BITCOIN_VERSION="latest"
LIGHTNING_VERSION="latest"
build_docker_images

View File

@@ -30,48 +30,56 @@ CyphernodeClient.prototype._generateToken = function() {
return token
}
CyphernodeClient.prototype._post = function(url, postdata, cb) {
CyphernodeClient.prototype._post = function(url, postdata, cb, addedOptions) {
let urlr = this.baseURL + url;
HTTP.post(urlr,
{
data: postdata,
npmRequestOptions: {
strictSSL: false,
agentOptions: {
rejectUnauthorized: false
}
},
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + this._generateToken()
let httpOptions = {
data: postdata,
npmRequestOptions: {
strictSSL: false,
agentOptions: {
rejectUnauthorized: false
}
}, function (err, resp) {
},
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + this._generateToken()
}
}
if (addedOptions) {
Object.assign(httpOptions.npmRequestOptions, addedOptions)
}
HTTP.post(urlr, httpOptions,
function (err, resp) {
// console.log(err)
// console.log(resp)
cb(err, resp.data)
cb(err, resp.data || resp.content)
}
)
};
CyphernodeClient.prototype._get = function(url, cb) {
CyphernodeClient.prototype._get = function(url, cb, addedOptions) {
let urlr = this.baseURL + url;
HTTP.get(urlr,
{
npmRequestOptions: {
strictSSL: false,
agentOptions: {
rejectUnauthorized: false
}
},
headers: {
'Authorization': 'Bearer ' + this._generateToken()
let httpOptions = {
npmRequestOptions: {
strictSSL: false,
agentOptions: {
rejectUnauthorized: false
}
}, function (err, resp) {
},
headers: {
'Authorization': 'Bearer ' + this._generateToken()
}
}
if (addedOptions) {
Object.assign(httpOptions.npmRequestOptions, addedOptions)
}
HTTP.get(urlr, httpOptions,
function (err, resp) {
// console.log(err)
// console.log(resp)
cb(err, resp.data)
cb(err, resp.data || resp.content)
}
)
};
@@ -112,3 +120,17 @@ CyphernodeClient.prototype.getNewAddress = function(cbreply) {
// http://192.168.122.152:8080/getnewaddress
this._get('/getnewaddress', cbreply);
};
CyphernodeClient.prototype.ots_stamp = function(hash, callbackUrl, cbreply) {
// POST https://cyphernode/ots_stamp
// BODY {"hash":"1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7","callbackUrl":"192.168.111.233:1111/callbackUrl"}
let data = { hash: hash, callbackUrl: callbackUrl }
this._post('/ots_stamp', data, cbreply);
};
CyphernodeClient.prototype.ots_getfile = function(hash, cbreply) {
// http://192.168.122.152:8080/ots_getfile/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
// encoding: null is for HTTP get to not convert the binary data to the default encoding
this._get('/ots_getfile/' + hash, cbreply, { encoding: null });
};

View File

@@ -4,73 +4,92 @@
invoke_cyphernode()
{
local action=${1}
local post=${2}
local action=${1}
local post=${2}
local p64=$(echo -n "{\"id\":\"${id}\",\"exp\":$((`date +"%s"`+10))}" | base64)
local s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$key" -sha256 -r | cut -sd ' ' -f1)
local token="$h64.$p64.$s"
local p64=$(echo -n "{\"id\":\"${id}\",\"exp\":$((`date +"%s"`+10))}" | base64)
local s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$key" -sha256 -r | cut -sd ' ' -f1)
local token="$h64.$p64.$s"
if [ -n "${post}" ]; then
echo $(curl -v -H "Authorization: Bearer $token" -d "${post}" -k "https://cyphernode/${action}")
return $?
else
echo $(curl -v -H "Authorization: Bearer $token" -k "https://cyphernode/${action}")
return $?
fi
if [ -n "${post}" ]; then
echo $(curl -v -H "Authorization: Bearer $token" -d "${post}" -k "https://cyphernode/${action}")
return $?
else
echo $(curl -v -H "Authorization: Bearer $token" -k "https://cyphernode/${action}")
return $?
fi
}
watch()
{
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","unconfirmedCallbackURL":"192.168.122.233:1111/callback0conf","confirmedCallbackURL":"192.168.122.233:1111/callback1conf"}
local btcaddr=${1}
local cb0conf=${2}
local cb1conf=${3}
local post="{\"address\":\"${btcaddr}\",\"unconfirmedCallbackURL\":\"${cb0conf}\",\"confirmedCallbackURL\":\"${cb1conf}\"}"
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","unconfirmedCallbackURL":"192.168.122.233:1111/callback0conf","confirmedCallbackURL":"192.168.122.233:1111/callback1conf"}
local btcaddr=${1}
local cb0conf=${2}
local cb1conf=${3}
local post="{\"address\":\"${btcaddr}\",\"unconfirmedCallbackURL\":\"${cb0conf}\",\"confirmedCallbackURL\":\"${cb1conf}\"}"
echo $(invoke_cyphernode "watch" ${post})
echo $(invoke_cyphernode "watch" ${post})
}
unwatch()
{
# 192.168.122.152:8080/unwatch/2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp
local btcaddr=${1}
# 192.168.122.152:8080/unwatch/2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp
local btcaddr=${1}
echo $(invoke_cyphernode "unwatch/${btcaddr}")
echo $(invoke_cyphernode "unwatch/${btcaddr}")
}
getactivewatches()
{
# 192.168.122.152:8080/getactivewatches
echo $(invoke_cyphernode "getactivewatches")
# 192.168.122.152:8080/getactivewatches
echo $(invoke_cyphernode "getactivewatches")
}
gettransaction()
{
# http://192.168.122.152:8080/gettransaction/af867c86000da76df7ddb1054b273ca9e034e8c89d049b5b2795f9f590f67648
local txid=${1}
# http://192.168.122.152:8080/gettransaction/af867c86000da76df7ddb1054b273ca9e034e8c89d049b5b2795f9f590f67648
local txid=${1}
echo $(invoke_cyphernode "gettransaction/${txid}")
echo $(invoke_cyphernode "gettransaction/${txid}")
}
spend()
{
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","amount":0.00233}
local btcaddr=${1}
local amount=${2}
local post="{\"address\":\"${btcaddr}\",\"amount\":\"${amount}\"}"
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","amount":0.00233}
local btcaddr=${1}
local amount=${2}
local post="{\"address\":\"${btcaddr}\",\"amount\":\"${amount}\"}"
echo $(invoke_cyphernode "spend" ${post})
echo $(invoke_cyphernode "spend" ${post})
}
getbalance()
{
# http://192.168.122.152:8080/getbalance
echo $(invoke_cyphernode "getbalance")
# http://192.168.122.152:8080/getbalance
echo $(invoke_cyphernode "getbalance")
}
getnewaddress()
{
# http://192.168.122.152:8080/getnewaddress
echo $(invoke_cyphernode "getnewaddress")
# http://192.168.122.152:8080/getnewaddress
echo $(invoke_cyphernode "getnewaddress")
}
ots_stamp()
{
# POST https://cyphernode/ots_stamp
# BODY {"hash":"1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7","callbackUrl":"192.168.111.233:1111/callbackUrl"}
local hash=${1}
local callbackUrl=${2}
local post="{\"hash\":\"${hash}\",\"callbackUrl\":\"${callbackUrl}\"}"
echo $(invoke_cyphernode "ots_stamp" ${post})
}
ots_getfile()
{
# http://192.168.122.152:8080/ots_getfile/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
local hash=${1}
echo $(invoke_cyphernode "ots_getfile/${hash}")
}

View File

@@ -1,4 +1,4 @@
FROM alpine
FROM alpine:3.8
RUN apk add --update --no-cache \
curl

View File

@@ -1,13 +1,28 @@
# Cyphernode CRON container
## Pull our Cyphernode image
```shell
docker pull cyphernode/proxycron:latest
```
## Build yourself the image
```shell
docker build -t cyphernode/proxycron:latest .
```
## Run image
If you are using it independantly from the Docker stack (docker-compose.yml), you can run it like that:
```shell
docker run --rm -d --network cyphernodenet --env-file env.properties cyphernode/proxycron:latest
```
## Configure your container by modifying `env.properties` file
```properties
PROXY_URL=cyphernode:8888/executecallbacks
```
## Building docker image
```shell
docker build -t proxycronimg .
TX_CONF_URL=cyphernode:8888/executecallbacks
OTS_URL=cyphernode:8888/ots_backoffice
```

View File

@@ -1,3 +1,4 @@
#!/bin/sh
curl ${PROXY_URL}
curl ${TX_CONF_URL}
curl ${OTS_URL}

View File

@@ -1 +1,2 @@
PROXY_URL=cyphernode:8888/executecallbacks
TX_CONF_URL=proxy:8888/executecallbacks
OTS_URL=proxy:8888/ots_backoffice

26
dist/config.json.sample vendored Normal file
View File

@@ -0,0 +1,26 @@
{
"derivation_path": "0/n",
"installer": "docker",
"features": [
"lightning",
"otsclient",
"electrum"
],
"net": "testnet",
"xpub": "upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb",
"bitcoin_mode": "internal",
"bitcoin_rpcuser": "user",
"bitcoin_rpcpassword": "password",
"bitcoin_prune": false,
"bitcoin_uacomment": "",
"lightning_implementation": "c-lightning",
"lightning_nodename": "SatoshiPortal",
"lightning_nodecolor": "ff00ff",
"electrum_implementation": "eps",
"installer_mode": "docker",
"proxy_datapath": "/tmp/p",
"bitcoin_datapath": "/tmp/b",
"lightning_datapath": "/tmp/l",
"bitcoin_expose": false,
"devmode": true
}

724
dist/setup.sh vendored Executable file
View File

@@ -0,0 +1,724 @@
#!/bin/bash
### Execute this on a freshly install ubuntu luna node
# curl -fsSL get.docker.com -o get-docker.sh
# sh get-docker.sh
# sudo usermod -aG docker $USER
## logout and relogin
# git clone --branch features/install --recursive https://github.com/schulterklopfer/cyphernode.git
# sudo curl -L "https://github.com/docker/compose/releases/download/1.22.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
# sudo chmod +x /usr/local/bin/docker-compose
# cd cyphernode
# ./setup.sh -ci
# docker-compose -f docker-compose.yaml up [-d]
# FROM: https://stackoverflow.com/questions/5195607/checking-bash-exit-status-of-several-commands-efficiently
# Use step(), try(), and next() to perform a series of commands and print
# [ OK ] or [FAILED] at the end. The step as a whole fails if any individual
# command fails.
#
# Example:
# step "Remounting / and /boot as read-write:"
# try mount -o remount,rw /
# try mount -o remount,rw /boot
# next
step() {
echo -n "$@"
STEP_OK=0
[[ -w /tmp ]] && echo $STEP_OK > /tmp/step.$$
}
try() {
# Check for `-b' argument to run command in the background.
local BG=
[[ $1 == -b ]] && { BG=1; shift; }
[[ $1 == -- ]] && { shift; }
# Run the command.
if [[ -z $BG ]]; then
"$@"
else
"$@" &
fi
# Check if command failed and update $STEP_OK if so.
local EXIT_CODE=$?
if [[ $EXIT_CODE -ne 0 ]]; then
STEP_OK=$EXIT_CODE
[[ -w /tmp ]] && echo $STEP_OK > /tmp/step.$$
if [[ -n $LOG_STEPS ]]; then
local FILE=$(readlink -m "${BASH_SOURCE[1]}")
local LINE=${BASH_LINENO[0]}
echo "$FILE: line $LINE: Command \`$*' failed with exit code $EXIT_CODE." >> "$LOG_STEPS"
fi
fi
return $EXIT_CODE
}
echo_success() {
#echo -n "[ OK ]"
echo -n
}
echo_failure() {
echo -n "[ FAILED ]"
}
next() {
[[ -f /tmp/step.$$ ]] && { STEP_OK=$(< /tmp/step.$$); rm -f /tmp/step.$$; }
[[ $STEP_OK -eq 0 ]] && echo_success || echo_failure
echo
return $STEP_OK
}
#function finish {
#
#}
#trap finish EXIT
cowsay() {
echo '
                     _____________________________________ 
                    / To start cyphernode run: ./start.sh \
                    \ To stop cyphernode run:  ./stop.sh  /
                     ------------------------------------- 
                            \   ^__^
                             \  (oo)\_______
                                (__)\       )\/\
                                    ||----w |
                                    ||     ||

[?25h[?1;5;2004l'
}
## /utils ----
sudo_if_required() {
if [[ $SUDO_REQUIRED == 1 && ! $(id -u) == 0 ]]; then
try sudo $@
else
try $@
fi
}
modify_permissions() {
local directories=("installer" "gatekeeper" "lightning" "bitcoin" "docker-compose.yaml" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH")
for d in "${directories[@]}"
do
if [[ -e $d ]]; then
step " modify permissions: $d"
sudo_if_required chmod -R og-rwx $d
next
fi
done
}
modify_owner() {
local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH")
local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER)
for d in "${directories[@]}"
do
if [[ -e $d ]]; then
step " modify owner \"$RUN_AS_USER\": $d "
sudo_if_required chown -R $user $d
next
fi
done
}
configure() {
## build setup docker image
local recreate=""
if [[ $1 == 1 ]]; then
recreate="recreate"
fi
local arch=$(uname -m)
local pw_env=''
local interactive=''
local gen_options=''
if [[ -t 1 ]]; then
interactive=' -it'
else
gen_options=' --force 2'
fi
if [[ $CFG_PASSWORD ]]; then
pw_env=" -e CFG_PASSWORD=$CFG_PASSWORD"
fi
if [[ $arch =~ ^arm ]]; then
clear && echo "Thinking. This may take a while, since I'm a Raspberry PI and my brain is so tiny. :("
else
clear && echo "Thinking..."
fi
# before starting a new cyphernodeconf, kill all the others
local otherCyphernodeconf=$(docker ps | grep "cyphernodeconf" | awk '{ print $1 }');
if [[ ! ''$otherCyphernodeconf == '' ]]; then
docker rm -f $otherCyphernodeconf > /dev/null 2>&1
fi
local user=$(id -u):$(id -g)
if [[ ! ''$CONFIGURE_AS_USER == '' ]]; then
user=$CONFIGURE_AS_USER
step "configure as user \"$CONFIGURE_AS_USER\""
fi
# configure features of cyphernode
docker run -v $current_path:/data \
-e DEFAULT_USER=$USER \
-e DEFAULT_CERT_HOSTNAME=$(hostname) \
-e VERSION_OVERRIDE=$VERSION_OVERRIDE \
-e GATEKEEPER_VERSION=$GATEKEEPER_VERSION \
-e PROXY_VERSION=$PROXY_VERSION \
-e PROXYCRON_VERSION=$PROXYCRON_VERSION \
-e OTSCLIENT_VERSION=$OTSCLIENT_VERSION \
-e PYCOIN_VERSION=$PYCOIN_VERSION \
-e BITCOIN_VERSION=$BITCOIN_VERSION \
-e LIGHTNING_VERSION=$LIGHTNING_VERSION \
--log-driver=none$pw_env \
--network none \
--rm$interactive cyphernode/cyphernodeconf:$CONF_VERSION $user yo --no-insight cyphernode$gen_options $recreate
if [[ -f $current_path/exitStatus.sh ]]; then
. $current_path/exitStatus.sh
rm $current_path/exitStatus.sh
fi
if [[ ! $EXIT_STATUS == 0 ]]; then
exit 1
fi
}
copy_file() {
local doCopy=0
local sourceFile=$1
local targetFile=$2
local sudo=''
local createBackup=1
if [[ $4 == 1 ]]; then
sudo='sudo '
fi
if [[ ! ''$3 == '' ]]; then
createBackup=$3
fi
if [[ ! -f $sourceFile ]]; then
return 1;
fi
if [[ -f $targetFile ]]; then
${sudo}cmp --silent $sourceFile $targetFile
if [[ $? == 1 ]]; then
# different content
if [[ $createBackup == 1 ]]; then
step " create backup of $targetFile "
try ${sudo}cp $targetFile $targetFile-$(date +"%y-%m-%d-%T")
next
fi
doCopy=1
else
echo "identical $sourceFile == $targetFile"
fi
else
doCopy=1
fi
if [[ $doCopy == 1 ]]; then
local basename=$(basename "$sourceFile")
step " copy $sourceFile => $targetFile "
try ${sudo}cp $sourceFile $targetFile
next
fi
}
create_user() {
#check if user exists
if [[ ! $RUN_AS_USER == $USER ]]; then
id -u $RUN_AS_USER > /dev/null 2>&1
if [[ $? == 1 ]]; then
step " create user $RUN_AS_USER "
if [[ $(id -u) == 0 ]]; then
try useradd $RUN_AS_USER
else
try sudo useradd $RUN_AS_USER
fi
next
fi
fi
}
process_bitcoinconf() {
local bitcoinconf=$1
# grep for prune entry and delete all whitespaces
local pruneEntry=$(sudo_if_required grep -e ^prune $bitcoinconf | tr -d '[:space:]')
local txindexEntry=$(sudo_if_required grep -e ^txindex $bitcoinconf | tr -d '[:space:]')
local testnetEntry=$(sudo_if_required grep -e ^testnet $bitcoinconf | tr -d '[:space:]')
local regtestEntry=$(sudo_if_required grep -e ^regtest $bitcoinconf | tr -d '[:space:]')
local prune=0
local txindex=0
local testnet=0
local regtest=0
if [[ $pruneEntry =~ ^prune && ! $pruneEntry == 'prune=0' ]]; then
prune=1
fi
if [[ $txindexEntry =~ ^txindex && ! $txindexEntry == 'txindex=0' ]]; then
txindex=1
fi
if [[ $testnetEntry =~ ^testnet && ! $testnetEntry == 'testnet=0' ]]; then
testnet=1
fi
if [[ $regtestEntry =~ ^regtest && ! $regtestEntry == 'regtest=0' ]]; then
regtest=1
fi
# prune & txindex: 3
# !prune & txindex: 2
# prune & !txindex: 1
# !prune & !txindex: 0
echo $(($prune|$txindex<<1|$testnet<<2|$regtest<<3))
}
compare_bitcoinconf() {
local new_bitcoinconf=$1
local old_bitcoinconf=$2
local status
if [[ ! -f $old_bitcoinconf || ! -f $new_bitcoinconf ]]; then
return 1
fi
local old_config=$(process_bitcoinconf $old_bitcoinconf )
local new_config=$(process_bitcoinconf $new_bitcoinconf )
local old_prune=$(($old_config&1))
local old_txindex=$((($old_config>>1)&1))
local old_testnet=$((($old_config>>2)&1))
local old_regtest=$((($old_config>>3)&1))
local new_prune=$(($new_config&1))
local new_txindex=$((($new_config>>1)&1))
local new_testnet=$((($new_config>>2)&1))
local new_regtest=$((($new_config>>3)&1))
if [[ $new_prune == 1 && $old_prune == 0 ]]; then
# warn about data loss
# ask for user permission
status='dataloss'
fi
if [[ $new_txindex == 1 && $old_txindex == 0 ]]; then
# warn about reindexing
status='reindex'
fi
if [[ ! $new_testnet == $old_testnet || ! $new_regtest == $old_regtest ]]; then
# warn about reindexing
status='incompatible'
fi
echo $status
}
install_docker() {
local archpath=$(uname -m)
# compat mode for SatoshiPortal repo
# TODO: add more mappings?
if [[ $archpath == 'armv7l' ]]; then
archpath="rpi"
fi
if [ ! -d $GATEKEEPER_DATAPATH ]; then
step " create $GATEKEEPER_DATAPATH"
sudo_if_required mkdir -p $GATEKEEPER_DATAPATH
next
fi
if [ -d $GATEKEEPER_DATAPATH ]; then
if [[ ! -f $GATEKEEPER_DATAPATH/installation.json ]]; then
# prevent mounting installation.json as a directory
sudo_if_required touch $GATEKEEPER_DATAPATH/installation.json
fi
if [[ ! -d $GATEKEEPER_DATAPATH/certs ]]; then
sudo_if_required mkdir -p $GATEKEEPER_DATAPATH/certs > /dev/null 2>&1
fi
if [[ ! -d $GATEKEEPER_DATAPATH/private ]]; then
sudo_if_required mkdir -p $GATEKEEPER_DATAPATH/private > /dev/null 2>&1
fi
copy_file $current_path/gatekeeper/api.properties $GATEKEEPER_DATAPATH/api.properties 1 $SUDO_REQUIRED
copy_file $current_path/gatekeeper/keys.properties $GATEKEEPER_DATAPATH/keys.properties 1 $SUDO_REQUIRED
copy_file $current_path/config.7z $GATEKEEPER_DATAPATH/config.7z 1 $SUDO_REQUIRED
copy_file $current_path/client.7z $GATEKEEPER_DATAPATH/client.7z 1 $SUDO_REQUIRED
copy_file $current_path/gatekeeper/cert.pem $GATEKEEPER_DATAPATH/certs/cert.pem 1 $SUDO_REQUIRED
copy_file $current_path/gatekeeper/key.pem $GATEKEEPER_DATAPATH/private/key.pem 1 $SUDO_REQUIRED
copy_file $current_path/gatekeeper/htpasswd $GATEKEEPER_DATAPATH/htpasswd 1 $SUDO_REQUIRED
fi
if [ ! -d $PROXY_DATAPATH ]; then
step " create $PROXY_DATAPATH"
sudo_if_required mkdir -p $PROXY_DATAPATH
next
fi
copy_file $current_path/installer/config.sh $PROXY_DATAPATH/config.sh 1 $SUDO_REQUIRED
if [[ $BITCOIN_INTERNAL == true ]]; then
if [ ! -d $BITCOIN_DATAPATH ]; then
step " create $BITCOIN_DATAPATH"
sudo_if_required mkdir -p $BITCOIN_DATAPATH
next
fi
if [ -d $BITCOIN_DATAPATH ]; then
local cmpStatus=$(compare_bitcoinconf $current_path/bitcoin/bitcoin.conf $BITCOIN_DATAPATH/bitcoin.conf)
if [[ $cmpStatus == 'dataloss' ]]; then
if [[ $ALWAYSYES == 1 ]]; then
copy_file $current_path/bitcoin/bitcoin.conf $BITCOIN_DATAPATH/bitcoin.conf 1 $SUDO_REQUIRED
else
while true; do
echo " Really copy bitcoin.conf with pruning option?"
read -p " This will discard some blockchain data. (yn) " yn
case $yn in
[Yy]* ) copy_file $current_path/bitcoin/bitcoin.conf $BITCOIN_DATAPATH/bitcoin.conf 1 $SUDO_REQUIRED; break;;
[Nn]* ) copy_file $current_path/bitcoin/bitcoin.conf $BITCOIN_DATAPATH/bitcoin.conf.cyphernode 0 $SUDO_REQUIRED
echo " Your cyphernode installation is most likely broken."
echo " Please check bitcoin.conf.cyphernode on how to repair it manually.";
break;;
* ) echo "Please answer yes or no.";;
esac
done
fi
elif [[ $cmpStatus == 'incompatible' ]]; then
copy_file $current_path/bitcoin/bitcoin.conf $BITCOIN_DATAPATH/bitcoin.conf.cyphernode 0 $SUDO_REQUIRED
echo " Blockchain data is not compatible, due to misconfigured nets."
echo " Your cyphernode installation is most likely broken."
echo " Please check bitcoin.conf.cyphernode on how to repair it manually."
else
if [[ $cmpStatus == 'reindex' ]]; then
echo " Warning Reindexing will take some time."
fi
copy_file $current_path/bitcoin/bitcoin.conf $BITCOIN_DATAPATH/bitcoin.conf 1 $SUDO_REQUIRED
fi
fi
fi
if [[ $FEATURE_LIGHTNING == true ]]; then
if [[ $LIGHTNING_IMPLEMENTATION == "c-lightning" ]]; then
local dockerfile="Dockerfile"
if [[ $archpath == "rpi" ]]; then
dockerfile="Dockerfile-alpine"
fi
if [ ! -d $LIGHTNING_DATAPATH ]; then
step " create $LIGHTNING_DATAPATH"
sudo_if_required mkdir -p $LIGHTNING_DATAPATH
next
fi
if [ -d $LIGHTNING_DATAPATH ]; then
copy_file $current_path/lightning/c-lightning/config $LIGHTNING_DATAPATH/config 1 $SUDO_REQUIRED
copy_file $current_path/lightning/c-lightning/bitcoin.conf $LIGHTNING_DATAPATH/bitcoin.conf 1 $SUDO_REQUIRED
fi
fi
fi
if [[ $FEATURE_OTSCLIENT == true ]]; then
if [ ! -d $OTSCLIENT_DATAPATH ]; then
step " create $OTSCLIENT_DATAPATH"
sudo_if_required mkdir -p $OTSCLIENT_DATAPATH
next
fi
fi
docker swarm join-token worker > /dev/null 2>&1
local noSwarm=$?;
if [[ $DOCKER_MODE == 'swarm' && $noSwarm == 1 ]]; then
step " init docker swarm"
try docker swarm init --task-history-limit 1 > /dev/null 2>&1
next
fi
local net_entry=$(docker network ls | grep cyphernodenet);
if [[ $net_entry =~ 'cyphernodenet' ]]; then
if [[ $net_entry =~ 'local' && $DOCKER_MODE == 'swarm' ]]; then
step " recreate cyphernode network"
try docker network rm cyphernodenet > /dev/null 2>&1
try docker network create -d overlay --attachable --opt encrypted cyphernodenet > /dev/null 2>&1
next
elif [[ $net_entry =~ 'swarm' && $DOCKER_MODE == 'compose' ]]; then
step " recreate cyphernode network"
try docker network rm cyphernodenet > /dev/null 2>&1
try docker network create cyphernodenet > /dev/null 2>&1
next
fi
else
if [[ $DOCKER_MODE == 'swarm' ]]; then
step " create cyphernode network"
try docker network create -d overlay --attachable --opt encrypted cyphernodenet > /dev/null 2>&1
next
elif [[ $DOCKER_MODE == 'compose' ]]; then
step " create cyphernode network"
try docker network create cyphernodenet > /dev/null 2>&1
next
fi
fi
copy_file $current_path/installer/docker/docker-compose.yaml $current_path/docker-compose.yaml
copy_file $current_path/installer/testfeatures.sh $current_path/testfeatures.sh 0
copy_file $current_path/installer/start.sh $current_path/start.sh 0
copy_file $current_path/installer/stop.sh $current_path/stop.sh 0
if [[ ! -x $current_path/start.sh ]]; then
step " make start.sh executable"
try chmod +x $current_path/start.sh
next
fi
if [[ ! -x $current_path/stop.sh ]]; then
step " make stop.sh executable"
try chmod +x $current_path/stop.sh
next
fi
if [[ ! -x $current_path/testfeatures.sh ]]; then
step " make testfeatures.sh executable"
try chmod +x $current_path/testfeatures.sh
next
fi
}
check_directory_owner() {
# if one directory does not have access rights for $RUN_AS_USER, we echo 1, else we echo 0
local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH")
local status=0
for d in "${directories[@]}"
do
if [[ -e $d ]]; then
# is it mine and does it have rw ?
# don't care about group rights
if [[ ! -r $d || ! -w $d ]]; then
status=1
break;
fi
else
# does parent exist and do we have rw on that?
local parentDir=$(dirname $d)
while [[ ! $parentDir == '/' && ! -e $parentDir ]]; do
parentDir=$(dirname $parentDir)
done
if [[ ! -r $parentDir || ! -w $parentDir ]]; then
status=1
fi
fi
done
echo $status
}
check_bitcoind() {
echo 0
}
sanity_checks() {
echo " check requirements."
if ! [ -x "$(command -v docker)" ]; then
echo " docker is not installed on your system. Please check https://www.docker.com/get-started."
exit
fi
if [[ $DOCKER_MODE == 'compose' && ! -x "$(command -v docker-compose)" ]]; then
echo " docker-compose is not installed on your system. Please check https://docs.docker.com/compose/install/."
exit
fi
local OS=$(uname -s)
if [[ ''$RUN_AS_USER == '' ]]; then
RUN_AS_USER=$USER
elif [[ $OS == 'Darwin' ]]; then
echo " Run as user option is not supported on OSX."
echo " Please run start.sh later as the user you are running this setup utility under."
RUN_AS_USER=$USER
fi
local sudo=0
local sudo_reason
if [[ ! ''$RUN_AS_USER == ''$USER ]]; then
sudo=1
sudo_reason='user'
fi
if [[ $sudo == 0 ]]; then
# we still don't need sudo. Let's check access to directories
sudo=$(check_directory_owner)
sudo_reason='directories'
fi
if [[ $sudo == 1 ]]; then
echo " check Cyphernode installer has determined that it needs sudo to continue."
echo " Let's verify that you have sudo rights..."
sudo echo " Yes! You have what it takes to run cyphernode."
if [[ $? == 1 ]]; then
echo " AARGH! Mein Leben..."
echo " To fix this, either ask your administrator to add you to the sudo group"
if [[ $sudo_reason == 'user' ]]; then
echo " or do not use the 'run as different user' option."
fi
if [[ $sudo_reason == 'directories' ]]; then
echo " or check your data volumes if they have the right owner."
echo " The owner of the following folders should be '$RUN_AS_USER':"
local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH")
local status=0
for d in "${directories[@]}"
do
if [[ -e $d ]]; then
echo " $d"
fi
done
fi
exit
else
SUDO_REQUIRED=1
fi
else
echo " nice! everything seems to be ok."
fi
}
install() {
if [[ ''$INSTALLER_MODE == 'none' ]]; then
echo "Skipping installation phase"
elif [[ ''$INSTALLER_MODE == 'docker' ]]; then
install_docker
fi
}
CONFIGURE=0
INSTALL=0
RECREATE=0
TRACING=1
ALWAYSYES=0
SUDO_REQUIRED=0
AUTOSTART=0
# CYPHERNODE VERSION "v0.1.0-rc.1"
VERSION_OVERRIDE="true"
CONF_VERSION="v0.1-rc.1"
GATEKEEPER_VERSION="v0.1-rc.1"
PROXY_VERSION="v0.1-rc.1"
PROXYCRON_VERSION="v0.1-rc.1"
OTSCLIENT_VERSION="v0.1-rc.1"
PYCOIN_VERSION="v0.1-rc.1"
BITCOIN_VERSION="v0.17.0"
LIGHTNING_VERSION="v0.6.2"
# trap ctrl-c and call ctrl_c()
trap ctrl_c INT
function ctrl_c() {
echo " Canceling installation process."
exit
}
export current_path="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
while getopts ":cirhys" opt; do
case $opt in
r)
RECREATE=1
;;
c)
CONFIGURE=1
;;
i)
INSTALL=1
;;
y)
ALWAYSYES=1
;;
s)
AUTOSTART=1
;;
h)
echo "-c configure" >&2
echo "-r recreate" >&2
echo "-i install" >&2
echo "-y assume yes to all questions" >&2
echo "-s autostart" >&2
exit
;;
\?)
echo "Invalid option: -$OPTARG. Use -c to configure and -i to install or -r to recreate from config.json." >&2
;;
esac
done
if [[ $CONFIGURE == 0 && $INSTALL == 0 && $RECREATE == 0 ]]; then
CONFIGURE=1
INSTALL=1
fi
if [[ $CONFIGURE == 1 ]]; then
configure $RECREATE
fi
if [[ -f $current_path/installer/config.sh ]]; then
. $current_path/installer/config.sh
fi
if [[ $CLEANUP == 'true' && $(docker image ls | grep cyphernodeconf) =~ cyphernodeconf ]]; then
step " clean cyphernodeconf image"
try docker image rm cyphernodeconf > /dev/null 2>&1
next
fi
if [[ $INSTALL == 1 ]]; then
sanity_checks
create_user
install
modify_owner
modify_permissions
fi
if [[ $AUTOSTART == 1 ]]; then
exec $current_path/start.sh
else
cowsay
fi

1
dist/sr.sh vendored Normal file
View File

@@ -0,0 +1 @@
curl -fsSL https://raw.githubusercontent.com/SatoshiPortal/cyphernode/master/dist/setup.sh -o setup_cyphernode.sh && chmod +x setup_cyphernode.sh && ./setup_cyphernode.sh

View File

@@ -1,3 +1,5 @@
# This README file can be used if you want to install manually. This is the old documentation before there was the installer.
# Here are the exact steps I did to install cyphernode on a debian server running on x86 arch, as user debian.
## Update server and install git
@@ -38,6 +40,7 @@ vi proxy_docker/env.properties
```shell
vi cron_docker/env.properties
vi pycoin_docker/env.properties
vi otsclient_docker/env.properties
vi api_auth_docker/env.properties
```
@@ -45,13 +48,14 @@ vi api_auth_docker/env.properties
```shell
sudo useradd cyphernode
mkdir ~/btcproxydb ; sudo chown -R cyphernode:debian ~/btcproxydb ; sudo chmod g+ws ~/btcproxydb
mkdir ~/proxydb ; sudo chown -R cyphernode:cyphernode ~/proxydb ; sudo chmod g+ws ~/proxydb
mkdir -p ~/cyphernode-ssl/certs ~/cyphernode-ssl/private
openssl req -subj '/CN=localhost' -x509 -newkey rsa:4096 -nodes -keyout ~/cyphernode-ssl/private/key.pem -out ~/cyphernode-ssl/certs/cert.pem -days 365
docker build -t authapi api_auth_docker/.
docker build -t proxycronimg cron_docker/.
docker build -t btcproxyimg proxy_docker/.
docker build -t pycoinimg pycoin_docker/.
docker build -t otsclientimg otsclient_docker/.
```
## Build images from Satoshi Portal's dockers repo
@@ -67,7 +71,7 @@ vi bitcoin.conf
*Make sure testnet, rpcuser and rpcpassword have the same value as in bitcoin node's bitcoin.conf file (see below)*
```console
rpcconnect=btcnode
rpcconnect=bitcoin
rpcuser=rpc_username
rpcpassword=rpc_password
testnet=1
@@ -78,14 +82,17 @@ rpcwallet=ln01.dat
vi config
mkdir ~/lndata
cp config ~/lndata/
sudo chown -R cyphernode:debian ~/lndata ; sudo chmod g+ws ~/lndata
sudo chown -R cyphernode:cyphernode ~/lndata ; sudo chmod g+ws ~/lndata
sudo find ~/lndata -type d -exec chmod 2775 {} \; ; sudo find ~/lndata -type f -exec chmod g+rw {} \;
docker build -t clnimg .
cd ../../bitcoin-core/
mkdir ~/btcdata
sudo chown -R cyphernode:debian ~/btcdata ; sudo chmod g+ws ~/btcdata
sudo chown -R cyphernode:cyphernode ~/btcdata ; sudo chmod g+ws ~/btcdata
sudo find ~/btcdata -type d -exec chmod 2775 {} \; ; sudo find ~/btcdata -type f -exec chmod g+rw {} \;
docker build -t btcnode .
mkdir ~/otsfiles
sudo chown -R cyphernode:cyphernode ~/otsfiles ; sudo chmod g+ws ~/otsfiles
sudo find ~/otsfiles -type d -exec chmod 2775 {} \; ; sudo find ~/otsfiles -type f -exec chmod g+rw {} \;
```
## Mount bitcoin data volume and make sure bitcoin configuration is ok
@@ -113,21 +120,21 @@ zmqpubrawtx=tcp://0.0.0.0:29000
wallet=watching01.dat
wallet=spending01.dat
wallet=ln01.dat
walletnotify=curl cyphernode:8888/conf/%s
walletnotify=curl proxy:8888/conf/%s
```
## Deploy the cyphernode stack
```shell
cd ~/cyphernode/
USER=`id -u cyphernode`:`id -g cyphernode` docker stack deploy --compose-file docker-compose.yml cyphernodestack
USER=`id -u cyphernode`:`id -g cyphernode` docker stack deploy --compose-file docker-compose.yml cyphernode
```
## Wait a few minutes and re-apply permissions
```shell
sudo chown -R cyphernode:debian ~/lndata ; sudo chmod g+ws ~/lndata
sudo chown -R cyphernode:debian ~/btcdata ; sudo chmod g+ws ~/btcdata
sudo chown -R cyphernode:cyphernode ~/lndata ; sudo chmod g+ws ~/lndata
sudo chown -R cyphernode:cyphernode ~/btcdata ; sudo chmod g+ws ~/btcdata
sudo find ~/lndata -type d -exec chmod 2775 {} \; ; sudo find ~/lndata -type f -exec chmod g+rw {} \;
sudo find ~/btcdata -type d -exec chmod 2775 {} \; ; sudo find ~/btcdata -type f -exec chmod g+rw {} \;
```
@@ -135,13 +142,20 @@ sudo find ~/btcdata -type d -exec chmod 2775 {} \; ; sudo find ~/btcdata -type f
## Test the deployment
```shell
id="001";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="2df1eeea370eacdc5cf7e96c2d82140d1568079a5d4d87006ec8718a98883b36";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -H "Authorization: Bearer $token" -k https://localhost/getbestblockhash
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -H "Authorization: Bearer $token" -k https://localhost/getbalance
id="001";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="2df1eeea370eacdc5cf7e96c2d82140d1568079a5d4d87006ec8718a98883b36";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Authorization: Bearer $token" -k https://127.0.0.1/getbestblockhash
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Authorization: Bearer $token" -k https://127.0.0.1/getbalance
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Content-Type: application/json" -d '{"hash":"123","callbackUrl":"http://callback"}' -H "Authorization: Bearer $token" -k https://127.0.0.1/ots_stamp
```
If you need the authorization header to copy/paste in another tool:
```shell
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+30))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";echo "Bearer $token"
```
```shell
echo "GET /getbestblockinfo" | docker run --rm -i --network=cyphernodenet alpine nc cyphernode:8888 -
echo "GET /getbalance" | docker run --rm -i --network=cyphernodenet alpine nc cyphernode:8888 -
echo "GET /ln_getinfo" | docker run --rm -i --network=cyphernodenet alpine nc cyphernode:8888 -
docker exec -it `docker ps -q -f name=cyphernodestack_cyphernode` curl -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/25-30\"}" cyphernode:8888/derivepubpath
echo "GET /getbestblockinfo" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /getbalance" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /ln_getinfo" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
docker exec -it `docker ps -q -f name=cyphernodestack_cyphernode` curl -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/25-30\"}" proxy:8888/derivepubpath
```

150
doc/INSTALL-MANUALLY.md Normal file
View File

@@ -0,0 +1,150 @@
# This README file can be used if you want to install manually. This is the old documentation before there was the installer.
## Upgrading
Your proxy's database won't be lost. Migration scripts are taking care of automatically migrating the database when starting the proxy.
```
proxy_docker/app/data/sqlmigrate*
```
# Cyphernode
Indirection layer between client and Bitcoin-related services.
Here's the plan:
- The containers are not publicly exposing ports.
- Everything is accessible exclusively within the encrypted overlay network.
- If your system is distributed:
- ...should be doubly encrypted by an OpenVPN tunnel
- ...the hosts should be secured and the VPN tunnel should have limited scope by iptables rules on each host.
- We can have different Bitcoin Nodes for watching and spending, giving the flexibility to have different security models one each.
- Only the Proxy has Bitcoin Node RPC credentials.
- The Proxy is exclusively accessible by the Overlay network's containers.
- To manually manage the Proxy (and have access to it), one has to gain access to the Docker host servers as a docker user.
- **Coming soon**: added security to use the spending features of the Proxy with Trezor and Coldcard.
## See [Step-by-step detailed instructions](INSTALL-MANUAL-STEPS.md) for real-world copy-paste standard install instructions
## Setting up
Default setup assumes your Bitcoin Node is already running somewhere. The reason is that it takes a lot of disk space and often already exists in your infrastructure, why not reusing it. After all, full blockchain sync takes a while.
You could also just uncomment it in the docker-compose file. If you run it in pruned mode, say so in config.properties. The computefees feature won't work in pruned mode.
### Set the swarm
(10.8.0.2 is the host's VPN IP address)
```shell
debian@dev:~/dev/Cyphernode$ docker swarm init --task-history-limit 1 --advertise-addr 10.8.0.2
Swarm initialized: current node (hufy324d291dyakizsuvjd0uw) is now a manager.
To add a worker to this swarm, run the following command:
docker swarm join --token SWMTKN-1-2pxouynn9g8si42e8g9ujwy0v9po45axx367fy0fkjhzo3l1z8-75nirjfkobl7htvpfh986pyz3 10.8.0.2:2377
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
```
### Create the Overlay Network and make sure your app joins it!
(if your app is not a Docker container, you will have to expose Cyphernode's port and secure it! In that case, use a reverse proxy with TLS)
```shell
debian@dev:~/dev/Cyphernode$ docker network create --driver=overlay --attachable --opt encrypted cyphernodenet
debian@dev:~/dev/Cyphernode$ docker network connect cyphernodenet yourappcontainer
```
### Configuration
```shell
debian@dev:~/dev/Cyphernode$ vi proxy_docker/env.properties
debian@dev:~/dev/Cyphernode$ vi cron_docker/env.properties
debian@dev:~/dev/Cyphernode$ vi pycoin_docker/env.properties
debian@dev:~/dev/Cyphernode$ vi api_auth_docker/env.properties
```
### Build cron image
[See how to build proxycron image](../cron_docker)
### Build btcproxy image
[See how to build btcproxy image](../proxy_docker)
### Build pycoin image
[See how to build pycoin image](../pycoin_docker)
### Build btcnode image
[See how to build btcnode image](https://github.com/SatoshiPortal/dockers/tree/master/x86_64/bitcoin-core)
### Build clightning image
[See how to build clightning image](https://github.com/SatoshiPortal/dockers/tree/master/x86_64/LN/c-lightning)
### Build the authenticated HTTP API image
[See how to build authapi image](../api_auth_docker)
### Deploy
**Edit docker-compose.yml to specify special deployment constraints or if you want to run the Bitcoin node on the same machine: uncomment corresponding lines.**
```shell
debian@dev:~/dev/Cyphernode$ USER=`id -u cyphernode`:`id -g cyphernode` docker stack deploy --compose-file docker-compose.yml cyphernodestack
Creating service cyphernodestack_authapi
Creating service cyphernodestack_cyphernode
Creating service cyphernodestack_proxycronnode
Creating service cyphernodestack_pycoinnode
Creating service cyphernodestack_clightningnode
```
## Off-site Bitcoin Node
This section is useful if you already have a Bitcoin Core node running and you want to use it in Cyphernode. In that case, please comment out the btcnode section from docker-compose.yml.
### Join swarm created on Cyphernode server
```shell
pi@SP-BTC01:~ $ docker swarm join --token SWMTKN-1-2pxouynn9g8si42e8g9ujwy0v9po45axx367fy0fkjhzo3l1z8-75nirjfkobl7htvpfh986pyz3 10.8.0.2:2377
```
### Build node container image
[See how to build Bitcoin Node image](https://github.com/SatoshiPortal/dockers/tree/master/rpi/bitcoin-core)
### Connect already-running node
```shell
pi@SP-BTC01:~ $ docker network connect cyphernodenet btcnode
```
## Test deployment from outside of the Swarm
```shell
id="001";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="2df1eeea370eacdc5cf7e96c2d82140d1568079a5d4d87006ec8718a98883b36";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Authorization: Bearer $token" -k https://127.0.0.1/getbestblockhash
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Authorization: Bearer $token" -k https://127.0.0.1/getbalance
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Content-Type: application/json" -d '{"hash":"123","callbackUrl":"http://callback"}' -H "Authorization: Bearer $token" -k https://127.0.0.1/ots_stamp
```
If you need the authorization header to copy/paste in another tool:
```shell
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+60))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";echo "Bearer $token"
```
## Test deployment from any host of the swarm
```shell
echo "GET /getbestblockinfo" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /getbalance" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /getbestblockhash" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /getblockinfo/00000000a64e0d1ae0c39166f4e8717a672daf3d61bf7bbb41b0f487fcae74d2" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
curl -v -H "Content-Type: application/json" -d '{"address":"2MsWyaQ8APbnqasFpWopqUKqsdpiVY3EwLE","amount":0.2}' proxy:8888/spend
echo "GET /ln_getinfo" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /ln_newaddr" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
curl -v -H "Content-Type: application/json" -d '{"msatoshi":10000,"label":"koNCcrSvhX3dmyFhW","description":"Bylls order #10649","expiry":900}' proxy:8888/ln_create_invoice
curl -v -H "Content-Type: application/json" -d '{"bolt11":"lntb1pdca82tpp5gv8mn5jqlj6xztpnt4r472zcyrwf3y2c3cvm4uzg2gqcnj90f83qdp2gf5hgcm0d9hzqnm4w3kx2apqdaexgetjyq3nwvpcxgcqp2g3d86wwdfvyxcz7kce7d3n26d2rw3wf5tzpm2m5fl2z3mm8msa3xk8nv2y32gmzlhwjved980mcmkgq83u9wafq9n4w28amnmwzujgqpmapcr3","msatoshi":10000,"description":"Bitcoin Outlet order #7082"}' proxy:8888/ln_pay
```

View File

@@ -1,133 +1,55 @@
# Cyphernode
Indirection layer between client and Bitcoin-related services.
## Setting Up
Here's the plan:
### Installer
- The containers are not publicly exposing ports.
- Everything is accessible exclusively within the encrypted overlay network.
- If your system is distributed:
- ...should be doubly encrypted by an OpenVPN tunnel
- ...the hosts should be secured and the VPN tunnel should have limited scope by iptables rules on each host.
- We can have different Bitcoin Nodes for watching and spending, giving the flexibility to have different security models one each.
- Only the Proxy has Bitcoin Node RPC credentials.
- The Proxy is exclusively accessible by the Overlay network's containers.
- To manually manage the Proxy (and have access to it), one has to gain access to the Docker host servers as a docker user.
- **Coming soon**: added security to use the spending features of the Proxy with Trezor and Coldcard.
We are providing an installer to help you setup Cyphernode. All the Docker images used by Cyphernode have been prebuilt for x86 and ARM (RPi) architectures and are hosted on the Docker hub public registry, Cyphernode repository (https://hub.docker.com/u/cyphernode/).
## See [Step-by-step detailed instructions](INSTALL-MANUAL-STEPS.md) for real-world copy-paste standard install instructions
## Setting up
Default setup assumes your Bitcoin Node is already running somewhere. The reason is that it takes a lot of disk space and often already exists in your infrastructure, why not reusing it. After all, full blockchain sync takes a while.
You could also just uncomment it in the docker-compose file. If you run it in pruned mode, say so in config.properties. The computefees feature won't work in pruned mode.
### Set the swarm
(10.8.0.2 is the host's VPN IP address)
You can clone the git repository and install:
```shell
debian@dev:~/dev/Cyphernode$ docker swarm init --task-history-limit 1 --advertise-addr 10.8.0.2
Swarm initialized: current node (hufy324d291dyakizsuvjd0uw) is now a manager.
To add a worker to this swarm, run the following command:
docker swarm join --token SWMTKN-1-2pxouynn9g8si42e8g9ujwy0v9po45axx367fy0fkjhzo3l1z8-75nirjfkobl7htvpfh986pyz3 10.8.0.2:2377
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
git clone https://github.com/SatoshiPortal/cyphernode.git
cd cyphernode/dist
./setup.sh
```
### Create the Overlay Network and make sure your app joins it!
(if your app is not a Docker container, you will have to expose Cyphernode's port and secure it! In that case, use a reverse proxy with TLS)
Or you can simply run this magic command to start setup and installation:
```shell
debian@dev:~/dev/Cyphernode$ docker network create --driver=overlay --attachable --opt encrypted cyphernodenet
debian@dev:~/dev/Cyphernode$ docker network connect cyphernodenet yourappcontainer
curl -fsSL https://raw.githubusercontent.com/SatoshiPortal/cyphernode/master/dist/setup.sh -o setup_cyphernode.sh && chmod +x setup_cyphernode.sh && ./setup_cyphernode.sh
```
### Configuration
## Upgrading
Your proxy's database won't be lost. Migration scripts are taking care of automatically migrating the database when starting the proxy.
```
proxy_docker/app/data/sqlmigrate*
```
## Manually test your installation through the Gatekeeper
If you need the authorization header to copy/paste in another tool, put your API ID (id=) and API key (k=) in the following command:
```shell
debian@dev:~/dev/Cyphernode$ vi proxy_docker/env.properties
debian@dev:~/dev/Cyphernode$ vi cron_docker/env.properties
debian@dev:~/dev/Cyphernode$ vi pycoin_docker/env.properties
debian@dev:~/dev/Cyphernode$ vi api_auth_docker/env.properties
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+60))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";echo "Bearer $token"
```
### Build cron image
[See how to build proxycron image](../cron_docker)
### Build btcproxy image
[See how to build btcproxy image](../proxy_docker)
### Build pycoin image
[See how to build pycoin image](../pycoin_docker)
### Build btcnode image
[See how to build btcnode image](https://github.com/SatoshiPortal/dockers/tree/master/x86_64/bitcoin-core)
### Build clightning image
[See how to build clightning image](https://github.com/SatoshiPortal/dockers/tree/master/x86_64/LN/c-lightning)
### Build the authenticated HTTP API image
[See how to build authapi image](../api_auth_docker)
### Deploy
**Edit docker-compose.yml to specify special deployment constraints or if you want to run the Bitcoin node on the same machine: uncomment corresponding lines.**
Directly using curl on command line, put your API ID (id=) and API key (k=) in the following commands:
```shell
debian@dev:~/dev/Cyphernode$ USER=`id -u cyphernode`:`id -g cyphernode` docker stack deploy --compose-file docker-compose.yml cyphernodestack
Creating service cyphernodestack_authapi
Creating service cyphernodestack_cyphernode
Creating service cyphernodestack_proxycronnode
Creating service cyphernodestack_pycoinnode
Creating service cyphernodestack_clightningnode
id="001";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="2df1eeea370eacdc5cf7e96c2d82140d1568079a5d4d87006ec8718a98883b36";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Authorization: Bearer $token" -k https://127.0.0.1/getbestblockhash
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Authorization: Bearer $token" -k https://127.0.0.1/getbalance
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Content-Type: application/json" -d '{"hash":"123","callbackUrl":"http://callback"}' -H "Authorization: Bearer $token" -k https://127.0.0.1/ots_stamp
```
## Off-site Bitcoin Node
This section is useful if you already have a Bitcoin Core node running and you want to use it in Cyphernode. In that case, please comment out the btcnode section from docker-compose.yml.
### Join swarm created on Cyphernode server
## Manually test your installation directly on the Proxy:
```shell
pi@SP-BTC01:~ $ docker swarm join --token SWMTKN-1-2pxouynn9g8si42e8g9ujwy0v9po45axx367fy0fkjhzo3l1z8-75nirjfkobl7htvpfh986pyz3 10.8.0.2:2377
```
### Build node container image
[See how to build Bitcoin Node image](https://github.com/SatoshiPortal/dockers/tree/master/rpi/bitcoin-core)
### Connect already-running node
```shell
pi@SP-BTC01:~ $ docker network connect cyphernodenet btcnode
```
## Test deployment from outside of the Swarm
```shell
id="001";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="2df1eeea370eacdc5cf7e96c2d82140d1568079a5d4d87006ec8718a98883b36";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -H "Authorization: Bearer $token" -k https://localhost/getbestblockhash
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -H "Authorization: Bearer $token" -k https://localhost/getbalance
```
## Test deployment from any host of the swarm
```shell
echo "GET /getbestblockinfo" | docker run --rm -i --network=cyphernodenet alpine nc cyphernode:8888 -
echo "GET /getbalance" | docker run --rm -i --network=cyphernodenet alpine nc cyphernode:8888 -
echo "GET /getbestblockhash" | docker run --rm -i --network=cyphernodenet alpine nc cyphernode:8888 -
echo "GET /getblockinfo/00000000a64e0d1ae0c39166f4e8717a672daf3d61bf7bbb41b0f487fcae74d2" | docker run --rm -i --network=cyphernodenet alpine nc cyphernode:8888 -
curl -v -H "Content-Type: application/json" -d '{"address":"2MsWyaQ8APbnqasFpWopqUKqsdpiVY3EwLE","amount":0.2}' cyphernode:8888/spend
echo "GET /ln_getinfo" | docker run --rm -i --network=cyphernodenet alpine nc cyphernode:8888 -
echo "GET /ln_newaddr" | docker run --rm -i --network=cyphernodenet alpine nc cyphernode:8888 -
curl -v -H "Content-Type: application/json" -d '{"msatoshi":10000,"label":"koNCcrSvhX3dmyFhW","description":"Bylls order #10649","expiry":900}' cyphernode:8888/ln_create_invoice
curl -v -H "Content-Type: application/json" -d '{"bolt11":"lntb1pdca82tpp5gv8mn5jqlj6xztpnt4r472zcyrwf3y2c3cvm4uzg2gqcnj90f83qdp2gf5hgcm0d9hzqnm4w3kx2apqdaexgetjyq3nwvpcxgcqp2g3d86wwdfvyxcz7kce7d3n26d2rw3wf5tzpm2m5fl2z3mm8msa3xk8nv2y32gmzlhwjved980mcmkgq83u9wafq9n4w28amnmwzujgqpmapcr3","msatoshi":10000,"description":"Bitcoin Outlet order #7082"}' cyphernode:8888/ln_pay
echo "GET /getbestblockinfo" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /getbalance" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /getbestblockhash" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /getblockinfo/00000000a64e0d1ae0c39166f4e8717a672daf3d61bf7bbb41b0f487fcae74d2" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /ln_getinfo" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
```

102
doc/README.md Normal file
View File

@@ -0,0 +1,102 @@
# Cyphernode
Indirection layer (API) between your applications and Bitcoin-related services.
Your application <-----> Cyphernode
Cyphernode is:
Gatekeeper (TLS, JWT) <-----> Proxy (Cyphernode Core) <-----> Feature Containers
- By default, the only exposed (published) port is 443 (HTTPS) on the Gatekeeper.
- By default, everything else is accessible exclusively within the encrypted overlay network.
- If your system is distributed (customized Cyphernode setup), the overlay network...
- ...should be doubly encrypted with a VPN or SSH tunnel
- ...the hosts should be secured and the VPN/SSH tunnel should have limited scope by iptables rules on each host.
- We can have different Bitcoin Nodes for watching and spending, giving the flexibility to have different security models one each.
- Only the Proxy has Bitcoin Node RPC credentials.
- To manually manage the Proxy (and have access to it), one has to gain access to the Docker host servers as a docker user.
## Setting Up
### Installer
We are providing an installer to help you setup Cyphernode.
#### See [Instructions for installation](INSTALL.md) for automatic install instructions
All the Docker images used by Cyphernode have been prebuilt for x86 and ARM (RPi) architectures and are hosted on the Docker hub public registry, Cyphernode repository (https://hub.docker.com/u/cyphernode/).
### Build from sources
However, it is possible for you to build from sources. In that case, please refer to the files INSTALL-MANUALLY.md and INSTALL-MANUAL-STEPS.md.
#### See [Instructions for manual installation](INSTALL-MANUALLY.md) for manual build and install instructions
#### See [Step-by-step detailed instructions](INSTALL-MANUAL-STEPS.md) for real-world copy-paste standard install instructions
# For Your Information
Current components in Cyphernode:
- Gatekeeper: front door where all requests hit Cyphernode. Takes care of: TLS, authentication and authorization.
- Proxy: request handler. Well dispatch authenticated and authorized requests to the right component. Use a SQLite3 database for its tasks.
- Proxy Cron: scheduler. Can call the proxy on regular interval for asynchronous tasks like payment notifications on watches, callbacks when OTS files are ready, etc.
- Pycoin: Bitcoin keys and addresses tool. Used by Cyphernode to derive addresses from an xPub and a derivation path.
- Bitcoin: Bitcoin Core node. Cyphernode uses a watching wallet for watchers (no funds) and a spending wallet for spending. Mandatory component, but optionally part of Cyphernode installation, as we can use an already running Bitcoin Core node.
- Lightning: optional. C-Lightning node. The LN node will use the Bitcoin node for its tasks.
- OTSclient: optional. Used to stamp hashes on the Bitcoin blockchain.
Future components:
- Trezor-connect: use a Trezor to authenticate. Will be used to log into control panel (see next point) and other.
- Control Panel: web control panel, with different functionalities depending on user's group: admin, spender, watcher.
- Grafana: displays stats graphics on Cyphernode use and load.
- PGP: signs anything with your PGP key.
- PSBT: sign transactions using a Coldcard.
- Electrum (Personal) Server: would be part of the installation for your convenience, but not really used by Cyphernode.
## Bitcoin Core Node
If you decide to have a prune Bitcoin Core node, the fee calculation on incoming transactions won't work. We can't compute the fees on someone else's transactions without having the whole indexed blockchain.
## Lightning Network
Currently, the LN functionalities of Cyphernode are very limited. Maybe even hard to use. You can:
- Get information on your LN node: ln_getinfo
- Get a Bitcoin address where to send your funds to be used by your LN node: ln_newaddr
- Create an invoice, so people can send you payment; the burden of creating a channel/route to you is on the payer: ln_create_invoice
- Pay an invoice. You have to have the invoice and your LN node must already be connected to the network: ln_pay
Basic and crucial functionalities that's missing (you have to manually use lightning-cli on your LN node):
- Be notified when a LN payment is received
- Connect your node to the LN network
- Open/close channels
## Manually test your installation through the Gatekeeper
If you need the authorization header to copy/paste in another tool, put your API ID (id=) and API key (k=) in the following command:
```shell
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+60))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";echo "Bearer $token"
```
Directly using curl on command line, put your API ID (id=) and API key (k=) in the following commands:
```shell
id="001";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="2df1eeea370eacdc5cf7e96c2d82140d1568079a5d4d87006ec8718a98883b36";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Authorization: Bearer $token" -k https://127.0.0.1/getbestblockhash
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Authorization: Bearer $token" -k https://127.0.0.1/getbalance
id="003";h64=$(echo -n "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64);p64=$(echo -n "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64);k="b9b8d527a1a27af2ad1697db3521f883760c342fc386dbc42c4efbb1a4d5e0af";s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1);token="$h64.$p64.$s";curl -v -H "Content-Type: application/json" -d '{"hash":"123","callbackUrl":"http://callback"}' -H "Authorization: Bearer $token" -k https://127.0.0.1/ots_stamp
```
## Manually test your installation directly on the Proxy:
```shell
echo "GET /getbestblockinfo" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /getbalance" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /getbestblockhash" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /getblockinfo/00000000a64e0d1ae0c39166f4e8717a672daf3d61bf7bbb41b0f487fcae74d2" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
echo "GET /ln_getinfo" | docker run --rm -i --network=cyphernodenet alpine nc proxy:8888 -
```

120
docker-compose-sample.yml Normal file
View File

@@ -0,0 +1,120 @@
version: "3"
services:
gatekeeper:
# HTTP authentication API gate
environment:
- "TRACING=1"
image: cyphernode/gatekeeper:latest
ports:
- "443:443"
volumes:
- "~/cn-files/cn-gatekeeper/certs:/etc/ssl/certs"
- "~/cn-files/cn-gatekeeper/private:/etc/ssl/private"
- "~/cn-files/cn-gatekeeper/keys.properties:/etc/nginx/conf.d/keys.properties"
- "~/cn-files/cn-gatekeeper/api.properties:/etc/nginx/conf.d/api.properties"
command: $USER
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
restart: always
proxy:
command: $USER ./startproxy.sh
# Bitcoin Mini Proxy
environment:
- "TRACING=1"
- "WATCHER_BTC_NODE_RPC_URL=bitcoin:18332/wallet/watching01.dat"
- "WATCHER_BTC_NODE_RPC_USER=bitcoin:CHANGEME"
- "WATCHER_BTC_NODE_RPC_CFG=/tmp/watcher_btcnode_curlcfg.properties"
- "SPENDER_BTC_NODE_RPC_URL=bitcoin:18332/wallet/spending01.dat"
- "SPENDER_BTC_NODE_RPC_USER=bitcoin:CHANGEME"
- "SPENDER_BTC_NODE_RPC_CFG=/tmp/spender_btcnode_curlcfg.properties"
- "PROXY_LISTENING_PORT=8888"
- "DB_PATH=/proxy/db"
- "DB_FILE=/proxy/db/proxydb"
- "PYCOIN_CONTAINER=pycoin:7777"
- "WATCHER_BTC_NODE_PRUNED=false"
- "OTSCLIENT_CONTAINER=otsclient:6666"
- "OTS_FILES=/proxy/otsfiles"
image: cyphernode/proxy:latest
volumes:
- "~/cn-files/cn-proxydb:/proxy/db"
- "~/cn-files/cn-lndata:/.lightning"
- "~/cn-files/cn-otsfiles:/proxy/otsfiles"
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
restart: always
proxycron:
environment:
- "PROXY_URL=proxy:8888/executecallbacks"
image: cyphernode/proxycron:latest
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
restart: always
pycoin:
# Pycoin
command: $USER ./startpycoin.sh
image: cyphernode/pycoin:latest
environment:
- "TRACING=1"
- "PYCOIN_LISTENING_PORT=7777"
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
restart: always
lightning:
command: $USER lightningd
image: cyphernode/clightning:v0.6.2
volumes:
- "~/cn-files/cn-lndata:/.lightning"
- "~/cn-files/cn-lndata/bitcoin.conf:/.bitcoin/bitcoin.conf"
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
restart: always
otsclient:
environment:
- "TRACING=1"
- "OTSCLIENT_LISTENING_PORT=6666"
image: cyphernode/otsclient:latest
# deploy:
# placement:
# constraints: [node.hostname==dev]
volumes:
- "~/cn-files/cn-otsfiles:/otsfiles"
command: $USER /script/startotsclient.sh
networks:
- cyphernodenet
restart: always
bitcoin:
command: $USER bitcoind
image: cyphernode/bitcoin:v0.17.0
volumes:
- "~/cn-files/cn-btcdata:/.bitcoin"
networks:
- cyphernodenet
restart: always
networks:
cyphernodenet:
external: true

View File

@@ -1,91 +0,0 @@
version: "3"
services:
authapi:
# HTTP authentication API gate
env_file:
- api_auth_docker/env.properties
image: authapi
ports:
# - "80:80"
- "443:443"
volumes:
- "~/cyphernode-ssl/certs:/etc/ssl/certs"
- "~/cyphernode-ssl/private:/etc/ssl/private"
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
cyphernode:
# Bitcoin Mini Proxy
env_file:
- proxy_docker/env.properties
image: btcproxyimg
volumes:
# Variable substitutions don't work
# Match with DB_PATH in proxy_docker/env.properties
- "~/btcproxydb:/proxy/db"
# c-lightning looks for $HOME/.lightning/, and $HOME is set to / in the container
- "~/lndata:/.lightning"
# deploy:
# placement:
# constraints: [node.hostname==dev]
command: $USER ./startproxy.sh
networks:
- cyphernodenet
proxycronnode:
# Async jobs
env_file:
- cron_docker/env.properties
image: proxycronimg
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
pycoinnode:
# Pycoin
env_file:
- pycoin_docker/env.properties
image: pycoinimg
# deploy:
# placement:
# constraints: [node.hostname==dev]
command: $USER ./startpycoin.sh
networks:
- cyphernodenet
clightningnode:
# c-lightning lightning network node
image: clnimg
ports:
- "9735:9735"
volumes:
- "~/lndata:/.lightning"
# deploy:
# placement:
# constraints: [node.hostname==dev]
command: $USER lightningd
networks:
- cyphernodenet
btcnode:
# Bitcoin node
image: btcnode
# ports:
# - "18333:18333"
# - "29000:29000"
# - "8333:8333"
volumes:
- "~/btcdata:/.bitcoin"
command: $USER bitcoind
networks:
- cyphernodenet
networks:
cyphernodenet:
external: true

6
install/.dockerignore Normal file
View File

@@ -0,0 +1,6 @@
SatoshiPortal
data
script
generator-cyphernode/node_modules
generator-cyphernode/package-lock
generator-cyphernode/__tests__

1
install/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
data

18
install/Dockerfile Normal file
View File

@@ -0,0 +1,18 @@
FROM node:11.1-alpine
RUN apk add --update bash su-exec p7zip openssl nano && rm -rf /var/cache/apk/*
RUN mkdir -p /app
RUN mkdir /.config
RUN chmod a+rwx /.config
RUN npm install -g yo
COPY generator-cyphernode /app
WORKDIR /app/generator-cyphernode
RUN npm link
WORKDIR /data
ENV EDITOR=/usr/bin/nano
ENTRYPOINT ["/sbin/su-exec"]
RUN find / -perm +6000 -type f -exec chmod a-s {} \; || true

0
install/data/.gitkeep Normal file
View File

View File

@@ -0,0 +1,2 @@
node_modules
coverage

View File

@@ -0,0 +1,10 @@
[
{
"name": "Lightning node",
"value": "lightning"
},
{
"name": "Opentimestamps client",
"value": "otsclient"
}
]

View File

@@ -0,0 +1,42 @@
{
"features": "What optional <font underline='true'>features</font> do you want me to activate?",
"net": "Which Bitcoin <font underline='true'>network</font> do you want Cyphernode to run on?",
"run_as_different_user": "I recommend running Cyphernode as a <font underline='true'>different user</font> when possible. Using your current user would give Cyphernode your current access rights, which could be a security issue especially if you are a sudoer. Please note that this feature is not supported on OSX at runtime, but you will be fine activating it in case you want to use the configuration file on another machine.",
"username": "Run Cyphernode as <font underline='true'>what user</font>? I recommend user <font color='# 0000ff'>cyphernode</font>. If the user does not exist, I will create it for you.",
"use_xpub": "Cyphernode can derive Bitcoin addresses from an xPub and the derivation path you want. If you want, you can provide your xPub and derivation path right now and call 'derive' with only the index instead of having to pass your xPub and derivation path on each call.",
"xpub": "Cyphernode can derive addresses from your <font underline='true'>default xPub key</font>. With that functionality, you don't have to provide your xPub every time you call the derivation endpoints.",
"derivation_path": "Cyphernode can derive addresses from your <font underline='true'>default derivation path</font>. With that functionality, you don't have to provide your derivation path every time you call the derivation endpoints.",
"proxy_datapath": "The Cyphernode proxy container, which routes all the requests to the right services uses a sqlite3 database to keep track of some things. This DB will be mounted from a <font underline='true'>local path</font>, easy to back up from outside Docker. <font color='#ff0000'>If running on OSX, check mountable directories in Docker's File Sharing configs.</font>",
"proxy_datapath_custom": " ",
"gatekeeper_clientkeyspassword": "The Gatekeeper checks all the incoming requests for the right permissions before delegating them to the proxy. Following the JWT standard, it uses HMAC signature verification to allow or deny access. Signatures are created and verified using secret keys. I am going to generate the secret keys and keep them in an encrypted file. You will be able to download this encrypted file later. Please provide the <font underline='true'>encryption passphrase</font>.",
"gatekeeper_clientkeyspassword_c": " ",
"gatekeeper_recreatekeys": "The Gatekeeper keys already exist, do you want to <font underline='true'>regenerate</font> them? This will overwrite existing ones.",
"gatekeeper_recreatecert": "The Gatekeeper TLS (SSL) certificates already exist, do you want to <font underline='true'>regenerate</font> them? This will overwrite existing ones.",
"gatekeeper_datapath": "The Gatekeeper's files (TLS certs, HMAC keys, Groups/API) will be stored in a container's mounted directory. Please provide the <font underline='true'>local mounted path</font> to that directory. <font color='#ff0000'>If running on OSX, check mountable directories in Docker's File Sharing configs.</font>",
"gatekeeper_datapath_custom": "Provide the <font underline='true'>full path name</font> where the Gatekeeper's files will be saved.",
"gatekeeper_edit_apiproperties": "If you know what you are doing, it is possible to manually edit the API endpoints/groups authorization. (Not recommended)",
"gatekeeper_apiproperties": "You are about to edit the api.properties file. The format of the file is pretty simple: for each action, you will find what access group can access it. <font color='# 0000ff'>Admin</font> group can do what <font color='# 0000ff'>Spender</font> group can, and <font color='# 0000ff'>Spender</font> group can do what <font color='# 0000ff'>Watcher</font> group can. <font color='# 0000ff'>Internal</font> group is for the endpoints accessible only within the Docker network, like the backoffice tasks used by the Cron container. The access groups for each API id/key are found in the <font color='# 0000ff'>keys.properties</font> file.",
"gatekeeper_cns": "I use <font underline='true'>domain names</font> and/or <font underline='true'>IP addresses</font> to create valid TLS certificates. For example, if <font color='# 0000ff'>https://cyphernodehost/getbestblockhash</font> and <font color='# 0000ff'>https://192.168.7.44/getbestblockhash</font> will be used, enter <font color='# 0000ff'>cyphernodehost, 192.168.7.44</font> as a possible domains. <font color='# 0000ff'>127.0.0.1, localhost, gatekeeper</font> will be automatically added to your list. Make sure the provided domain names are in your DNS or client's hosts file and is reachable.",
"bitcoin_mode": "Cyphernode can spawn a new <font underline='true'>Bitcoin Core</font> full node for its own use. But if you already have a Bitcoin Core node running, Cyphernode can use that.",
"bitcoin_node_ip": "Cyphernode uses <font color='#00ff00'>Bitcoin Core</font> RPC interface for its tasks. Please provide the <font underline='true'>IP address</font> of your current Bitcoin Core node.",
"bitcoin_rpcuser": "Bitcoin Core's <font underline='true'>RPC username</font> used by Cyphernode when calling the node.",
"bitcoin_rpcpassword": "Bitcoin Core's <font underline='true'>RPC password</font> used by Cyphernode when calling the node.",
"bitcoin_prune": "If you don't have at least 350GB of disk space, you should run Bitcoin Core in <font underline='true'>prune mode</font>. <font color='#00ff00'>NOTE</font>: when running Bitcoin Core in prune mode, the incoming transactions' fees cannot be computed by Cyphernode and won't be part of the addresses watching's callbacks payload.",
"bitcoin_prune_size": "Minimum <font underline='true'>size</font> is <font color='#00ff00'>550</font>. This option specifies the maximum number in MB Bitcoin Core will allocate for raw block & undo data.",
"bitcoin_uacomment": "<font underline='true'>User Agent</font> string used by Bitcoin Core. (Optional)",
"bitcoin_datapath": "<font underline='true'>Path name</font> to where Bitcoin Core's data files (blockchain data, wallets, configs, etc.) are stored. This directory will be mounted into the Bitcoin node's container. If you already have a sync'ed node, you can copy data there to be used by the node, instead of resyncing everything. <font color='#00ff00'>NOTE</font>: only copy chainstate/ and blocks/ contents. <font color='#ff0000'>If running on OSX, check mountable directories in Docker's File Sharing configs.</font>",
"bitcoin_datapath_custom": " ",
"bitcoin_expose": "By default, Bitcoin node ports (RPC and protocol) won't be <font underline='true'>published</font> outside of Docker. Do you want to expose them so that your node can be accessed from outside of the Docker network?",
"lightning_implementation": "Multiple <font underline='true'>LN implementations</font> exist. Please choose the one you want to use with Cyphernode.",
"lightning_nodename": "LN nodes have names. Choose the <font underline='true'>name you want</font> for yours.",
"lightning_nodecolor": "LN nodes have colors. Choose the <font underline='true'>color you want</font> for yours in RGB format (RRGGBB). For example, pure red would be <font color='#ff0000'>ff0000</font>.",
"lightning_datapath": "<font underline='true'>Path name</font> to where LN's data files are stored. This directory will be mounted into the LN node's container. <font color='#ff0000'>If running on OSX, check mountable directories in Docker's File Sharing configs.</font>",
"lightning_datapath_custom": " ",
"lightning_expose": "By default, LN node port will be <font underline='true'>published</font> outside of Docker. Do you want to hide it so that your node can't be accessed from outside of the Docker network?",
"otsclient_datapath": "<font underline='true'>Full path</font> where the OTS files will be stored. This path will be mounted into the otsclient container which will create the OTS files when <font color='#00ff00'>stamping</font> and update them when <font color='#00ff00'>upgrading</font> stamps. It will also be mounted to the proxy container so that it can serve the <font color='#00ff00'>ots_getfile</font> and send the OTS files to clients. <font color='#ff0000'>If running on OSX, check mountable directories in Docker's File Sharing configs.</font>",
"otsclient_datapath_custom": " ",
"installer_mode": "Only one <font underline='true'>installation mode</font> is supported, right now: <font color='#0000ff'>local docker (self-hosted)</font>. Choose wisely ;-)",
"installer_cleanup": "Do you want to <font underline='true'>remove</font> this configurator Docker image after installation? This would free about 150MB of disk space.",
"docker_mode": "Cyphernode Docker services can be run using <font underline='true'>Docker Swarm</font> (https://docs.docker.com/engine/swarm/) or <font underline='true'>docker-compose</font> (https://docs.docker.com/compose/overview/). Both will work, some users prefer one to another depending on deployment types, scalability, current framework, etc.",
"__default__": ""
}

View File

@@ -0,0 +1,530 @@
const Generator = require('yeoman-generator');
const chalk = require('chalk');
const wrap = require('wrap-ansi');
const html2ansi = require('./lib/html2ansi.js');
const fs = require('fs');
const validator = require('validator');
const path = require("path");
const coinstring = require('coinstring');
const name = require('./lib/name.js');
const Archive = require('./lib/archive.js');
const ApiKey = require('./lib/apikey.js');
const Cert = require('./lib/cert.js');
const featureChoices = require('./features.json');
const uaCommentRegexp = /^[a-zA-Z0-9 \.,:_\-\?\/@]+$/; // TODO: look for spec of unsafe chars
const userRegexp = /^[a-zA-Z0-9\._\-]+$/;
const reset = '\u001B8\u001B[u';
const clear = '\u001Bc';
const configFileVersion='0.1.0';
const defaultAPIProperties = `
# Watcher can:
action_watch=watcher
action_unwatch=watcher
action_getactivewatches=watcher
action_getbestblockhash=watcher
action_getbestblockinfo=watcher
action_getblockinfo=watcher
action_gettransaction=watcher
action_ln_getinfo=watcher
action_ln_create_invoice=watcher
# Spender can do what the watcher can do, plus:
action_getbalance=spender
action_getnewaddress=spender
action_spend=spender
action_addtobatch=spender
action_batchspend=spender
action_deriveindex=spender
action_derivepubpath=spender
action_ln_pay=spender
action_ln_newaddr=spender
action_ots_stamp=spender
action_ots_getfile=spender
# Admin can do what the spender can do, plus:
# Should be called from inside the Docker network only:
action_conf=internal
action_executecallbacks=internal
action_ots_backoffice=internal
`;
const prefix = function() {
return chalk.green('Cyphernode')+': ';
};
let prompters = [];
fs.readdirSync(path.join(__dirname, "prompters")).forEach(function(file) {
prompters.push(require(path.join(__dirname, "prompters",file)));
});
const sleep = function(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
const easeOutCubic = function(t, b, c, d) {
return c*((t=t/d-1)*t*t+1)+b;
}
const splash = async function() {
let frames = [];
fs.readdirSync(path.join(__dirname,'splash')).forEach(function(file) {
frames.push(fs.readFileSync(path.join(__dirname,'splash',file)));
});
const frame0 = frames[0];
const frame0lines = frame0.toString().split('\n');
const frame0lineCount = frame0lines.length;
const steps = 10;
process.stdout.write(clear);
await sleep(150);
for( let i=0; i<=steps; i++ ) {
const pos = easeOutCubic( i, 0, frame0lineCount, steps ) | 0;
process.stdout.write(reset);
for( let l=frame0lineCount-pos; l<frame0lineCount; l++ ) {
process.stdout.write( frame0lines[l]+'\n' );
}
await sleep(33);
}
await sleep(400);
for( let frame of frames ) {
process.stdout.write(reset);
process.stdout.write(frame.toString());
await sleep(33);
}
await sleep(400);
process.stdout.write('\n');
}
module.exports = class extends Generator {
constructor(args, opts) {
super(args, opts);
if( args.indexOf('recreate') !== -1 ) {
this.recreate = true;
}
this.featureChoices = featureChoices;
if( fs.existsSync(path.join('/data', 'exitStatus.sh')) ) {
fs.unlinkSync(path.join('/data', 'exitStatus.sh'));
}
}
async _initConfig() {
const versionOverride = process.env.VERSION_OVERRIDE==='true';
if( fs.existsSync(this.destinationPath('config.7z')) ) {
let r = {};
if( process.env.CFG_PASSWORD ) {
this.configurationPassword = process.env.CFG_PASSWORD;
} else {
process.stdout.write(reset);
while( !r.password ) {
r = await this.prompt([{
type: 'password',
name: 'password',
message: prefix()+chalk.bold.blue('Enter your configuration password?'),
filter: this._trimFilter
}]);
}
this.configurationPassword = r.password;
}
const archive = new Archive( this.destinationPath('config.7z'), this.configurationPassword );
r = await archive.readEntry('config.json');
if( r.error ) {
console.log(chalk.bold.red('Password is wrong. Have a nice day.'));
process.exit(1);
}
if( !r.value ) {
console.log(chalk.bold.red('config archive is corrupt.'));
process.exit(1);
}
try {
this.props = JSON.parse(r.value);
this.props.__version = this.props.__version || configFileVersion;
} catch( err ) {
console.log(chalk.bold.red('config archive is corrupt.'));
process.exit(1);
}
} else {
let r = {};
process.stdout.write(clear+reset);
while( !r.password0 || !r.password1 || r.password0 !== r.password1 ) {
if( r.password0 && r.password1 && r.password0 !== r.password1 ) {
console.log(chalk.bold.red('Passwords do not match')+'\n');
}
r = await this.prompt([{
type: 'password',
name: 'password0',
message: prefix()+chalk.bold.blue('Choose your configuration password'),
filter: this._trimFilter
},
{
type: 'password',
name: 'password1',
message: prefix()+chalk.bold.blue('Confirm your configuration password'),
filter: this._trimFilter
}]);
}
this.configurationPassword = r.password0;
this.props = {
__version: configFileVersion
};
}
if( this.props.__version !== configFileVersion ) {
// migrate here
}
this.props.gatekeeper_statuspw = await new Cert().passwd(this.configurationPassword);
if( versionOverride ) {
delete this.props.gatekeeper_version;
delete this.props.proxy_version;
delete this.props.proxycron_version;
delete this.props.pycoin_version;
delete this.props.otsclient_version;
delete this.props.bitcoin_version;
delete this.props.lightning_version;
delete this.props.grafana_version;
}
this._assignConfigDefaults();
for( let c of this.featureChoices ) {
c.checked = this._isChecked( 'features', c.value );
}
}
async prompting() {
await this._initConfig();
await sleep(1000);
await splash();
if( this.recreate ) {
// no prompts
return;
}
// save gatekeeper key password to check if it changed
this.gatekeeper_clientkeyspassword = this.props.gatekeeper_clientkeyspassword;
let r = await this.prompt([{
type: 'confirm',
name: 'enablehelp',
message: prefix()+'Enable help?',
default: this._getDefault( 'enablehelp' ),
}]);
this.props.enablehelp = r.enablehelp;
if( this.props.enablehelp ) {
this.help = require('./help.json');
}
let prompts = [];
for( let m of prompters ) {
prompts = prompts.concat(m.prompts(this));
}
return this.prompt(prompts).then(props => {
this.props = Object.assign(this.props, props);
});
}
async configuring() {
if( this.props.gatekeeper_recreatekeys ||
this.props.gatekeeper_keys.configEntries.length===0 ) {
const apikey = new ApiKey();
let configEntries = [];
let clientInformation = [];
apikey.setId('001');
apikey.setGroups(['watcher']);
await apikey.randomiseKey();
configEntries.push(apikey.getConfigEntry());
clientInformation.push(apikey.getClientInformation());
apikey.setId('002');
apikey.setGroups(['watcher','spender']);
await apikey.randomiseKey();
configEntries.push(apikey.getConfigEntry());
clientInformation.push(apikey.getClientInformation());
apikey.setId('003');
apikey.setGroups(['watcher','spender','admin']);
await apikey.randomiseKey();
configEntries.push(apikey.getConfigEntry());
clientInformation.push(apikey.getClientInformation());
this.props.gatekeeper_keys = {
configEntries: configEntries,
clientInformation: clientInformation
}
}
if( this.props.gatekeeper_recreatecert ||
!this.props.gatekeeper_sslcert ||
!this.props.gatekeeper_sslkey ) {
delete this.props.gatekeeper_recreatecert;
const cert = new Cert();
console.log(chalk.bold.green( '☕ Generating gatekeeper cert. This may take a while ☕' ));
try {
const cns = (this.props.gatekeeper_cns||'').split(',').map(e=>e.trim().toLowerCase()).filter(e=>!!e);
const result = await cert.create(cns);
if( result.code === 0 ) {
this.props.gatekeeper_sslkey = result.key.toString();
this.props.gatekeeper_sslcert = result.cert.toString();
// Total array of cns, used to create Cyphernode's URLs
this.props.cns = []
result.cns.forEach(e => {
this.props.cns.push(e)
})
} else {
console.log(chalk.bold.red( 'error! Gatekeeper cert was not created' ));
}
} catch( err ) {
console.log(chalk.bold.red( 'error! Gatekeeper cert was not created' ));
}
}
delete this.props.gatekeeper_recreatekeys;
}
async writing() {
const configJsonString = JSON.stringify(this.props, null, 4);
const archive = new Archive( this.destinationPath('config.7z'), this.configurationPassword );
if( !await archive.writeEntry( 'config.json', configJsonString ) ) {
console.log(chalk.bold.red( 'error! Config archive was not written' ));
}
const pathProps = [
'gatekeeper_datapath',
'proxy_datapath',
'bitcoin_datapath',
'lightning_datapath',
'otsclient_datapath'
];
for( let pathProp of pathProps ) {
if( this.props[pathProp] === '_custom' ) {
this.props[pathProp] = this.props[pathProp+'_custom'] || '';
}
}
for( let m of prompters ) {
const name = m.name();
for( let t of m.templates(this.props) ) {
const p = path.join(name,t);
this.fs.copyTpl(
this.templatePath(p),
this.destinationPath(p),
this.props
);
}
}
if( this.props.gatekeeper_keys && this.props.gatekeeper_keys.clientInformation ) {
if( this.gatekeeper_clientkeyspassword !== this.props.gatekeeper_clientkeyspassword &&
fs.existsSync(this.destinationPath('client.7z')) ) {
fs.unlinkSync( this.destinationPath('client.7z') );
}
const archive = new Archive( this.destinationPath('client.7z'), this.props.gatekeeper_clientkeyspassword );
if( !await archive.writeEntry( 'keys.txt', this.props.gatekeeper_keys.clientInformation.join('\n') ) ) {
console.log(chalk.bold.red( 'error! Client gatekeeper key archive was not written' ));
}
if( !await archive.writeEntry( 'cacert.pem', this.props.gatekeeper_sslcert ) ) {
console.log(chalk.bold.red( 'error! Client gatekeeper key archive was not written' ));
}
}
fs.writeFileSync(path.join('/data', 'exitStatus.sh'), 'EXIT_STATUS=0');
}
install() {
}
/* some utils */
_assignConfigDefaults() {
this.props = Object.assign( {
features: [],
enablehelp: true,
net: 'testnet',
xpub: '',
derivation_path: '0/n',
installer_mode: 'docker',
devmode: false,
devregistry: false,
run_as_different_user: true,
username: 'cyphernode',
docker_mode: 'compose',
bitcoin_rpcuser: 'bitcoin',
bitcoin_rpcpassword: 'CHANGEME',
bitcoin_uacomment: '',
bitcoin_prune: false,
bitcoin_prune_size: 550,
bitcoin_datapath: '',
bitcoin_node_ip: '',
bitcoin_mode: 'internal',
bitcoin_expose: false,
lightning_expose: true,
gatekeeper_apiproperties: defaultAPIProperties,
gatekeeper_ipwhitelist: '',
gatekeeper_keys: { configEntries: [], clientInformation: [] },
gatekeeper_sslcert: '',
gatekeeper_sslkey: '',
gatekeeper_cns: process.env['DEFAULT_CERT_HOSTNAME'] || '',
proxy_datapath: '',
lightning_implementation: 'c-lightning',
lightning_datapath: '',
lightning_nodename: name.generate(),
lightning_nodecolor: '',
otsclient_datapath: '',
installer_cleanup: false,
default_username: process.env.DEFAULT_USER || '',
gatekeeper_version: process.env.GATEKEEPER_VERSION || 'latest',
proxy_version: process.env.PROXY_VERSION || 'latest',
proxycron_version: process.env.PROXYCRON_VERSION || 'latest',
pycoin_version: process.env.PYCOIN_VERSION || 'latest',
otsclient_version: process.env.OTSCLIENT_VERSION || 'latest',
bitcoin_version: process.env.BITCOIN_VERSION || 'latest',
lightning_version: process.env.LIGHTNING_VERSION || 'latest'
}, this.props );
}
_isChecked( name, value ) {
return this.props && this.props[name] && this.props[name].indexOf(value) != -1 ;
}
_getDefault( name ) {
return this.props && this.props[name];
}
_optional(input,validator) {
if( input === undefined ||
input === null ||
input === '' ) {
return true;
}
return validator(input);
}
_ipOrFQDNValidator( host ) {
host = (host+"").trim();
if( !(validator.isIP(host) ||
validator.isFQDN(host)) ) {
throw new Error( 'No IP address or fully qualified domain name' )
}
return true;
}
_xkeyValidator( xpub ) {
// TOOD: check for version
if( !coinstring.isValid( xpub ) ) {
throw new Error('Not an extended key.');
}
return true;
}
_pathValidator( p ) {
return true;
}
_derivationPathValidator( path ) {
return true;
}
_colorValidator(color) {
if( !validator.isHexadecimal(color) ) {
throw new Error('Not a hex color.');
}
return true;
}
_lightningNodeNameValidator(name) {
if( !name || name.length > 32 ) {
throw new Error('Please enter anything shorter than 32 characters');
}
return true;
}
_notEmptyValidator( path ) {
if( !path ) {
throw new Error('Please enter something');
}
return true;
}
_usernameValidator( user ) {
if( !userRegexp.test( user ) ) {
throw new Error('Choose a valid username');
}
return true;
}
_UACommentValidator( comment ) {
if( !uaCommentRegexp.test( comment ) ) {
throw new Error('Unsafe characters in UA comment. Please use only a-z, A-Z, 0-9, SPACE and .,:_?@');
}
return true;
}
_trimFilter( input ) {
return (input+"").trim();
}
_featureChoices() {
return this.featureChoices;
}
_getHelp( topic ) {
if( !this.props.enablehelp || !this.help ) {
return '';
}
const helpText = this.help[topic] || this.help['__default__'];
if( !helpText ||helpText === '' ) {
return '';
}
return "\n\n"+wrap( html2ansi(helpText),82 )+"\n\n";
}
};

View File

@@ -0,0 +1,76 @@
const spawn = require('child_process').spawn;
module.exports = class ApiKey {
constructor( id, groups, key, script ) {
this.setId(id || '001');
this.setGroups(groups || ['admin'] );
this.setScript(script || 'eval ugroups_${kapi_id}=${kapi_groups};eval ukey_${kapi_id}=${kapi_key}' );
this.setKey(key);
}
setGroups( groups ) {
this.groups = groups;
}
setId( id ) {
this.id = id;
}
setScript( script ) {
this.script = script;
}
setKey( key ) {
this.key = key;
}
async randomiseKey() {
try {
//const dd = spawn('/bin/dd if=/dev/urandom bs=32 count=1 | /usr/bin/xxd -pc 32');
const dd = spawn("dd if=/dev/urandom bs=32 count=1 | xxd -pc32", [], {stdio: ['ignore', 'pipe', 'ignore' ], shell: true} );
const result = await new Promise( function(resolve, reject ) {
let result = '';
dd.stdout.on('data', function( a,b,c) {
let chunk = a.toString().trim();
result += chunk;
});
dd.stdout.on('end', function() {
result = result.replace(/[^a-zA-Z0-9]/,'');
resolve(result);
});
dd.stdout.on('error', function(err) {
console.log(err);
reject(err);
})
});
this.key = result;
} catch( err ) {
console.log( err );
return;
}
}
getKey() {
return this.key;
}
getConfigEntry() {
if( !this.key ) {
return;
}
return `kapi_id="${this.id}";kapi_key="${this.key}";kapi_groups="${this.groups.join(',')}";${this.script}`;
}
getClientInformation() {
return `${this.id}=${this.key}`;
}
}
//dd if=/dev/urandom bs=32 count=1 2> /dev/null | xxd -pc 32

View File

@@ -0,0 +1,77 @@
const fs = require('fs');
const spawn = require('child_process').spawn;
const stringio = require('@rauschma/stringio');
const defaultArgs = ['-t7z', '-ms=on', '-mhe=on'];
module.exports = class Archive {
constructor( file, password ) {
this.file = file || 'archive.7z'
this.password = password;
}
async readEntry( entryName ) {
if( !entryName ) {
return;
}
let args = defaultArgs.slice();
args.unshift('x');
args.push( '-so' );
if( this.password ) {
args.push('-p'+this.password );
}
args.push( this.file )
args.push( entryName )
const archiver = spawn('7z', args, { stdio: ['ignore', 'pipe', 'ignore'] } );
const result = await stringio.readableToString(archiver.stdout);
try {
await stringio.onExit( archiver );
} catch( err ) {
return { error: err };
}
return { error: null, value: result };
}
async writeEntry( entryName, content ) {
if( !entryName ) {
return;
}
let args = defaultArgs.slice();
args.unshift('a');
if( this.password ) {
args.push('-p'+this.password );
}
args.push( '-si'+entryName );
args.push( this.file )
const archiver = spawn('7z', args, { stdio: ['pipe', 'ignore', 'ignore' ] } );
await stringio.streamWrite(archiver.stdin, content);
await stringio.streamEnd(archiver.stdin);
try {
await stringio.onExit( archiver );
} catch( err ) {
return false;
}
return true;
}
async deleteEntry( entryName ) {
if( !entryName ) {
return;
}
let args = defaultArgs.slice();
args.unshift('d');
if( this.password ) {
args.push('-p'+this.password );
}
args.push( this.file )
args.push( entryName )
const archiver = spawn('7z', args, { stdio: ['ignore', 'pipe','ignore'] } );
try {
await stringio.onExit( archiver );
} catch( err ) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,133 @@
const fs = require('fs');
const spawn = require('child_process').spawn;
const defaultArgs = ['req', '-x509', '-newkey', 'rsa:4096', '-nodes'];
const path = require('path');
const tmp = require('tmp');
const validator = require('validator');
const confTmpl = `
[req]
distinguished_name = req_distinguished_name
x509_extensions = v3_ca
prompt = no
[req_distinguished_name]
CN = %PRIMARY_CN%
[v3_ca]
subjectAltName = @alt_names
[alt_names]
%ALT_DOMAINS%
%ALT_IPS%
`;
const domainTmpl = 'DNS.%#% = %DOMAIN%';
const ipTmpl = 'IP.%#% = %IP%'
module.exports = class Cert {
constructor( options ) {
options = options || {};
this.args = options.args || { days: 3650 };
}
buildConfig( cns ) {
let ips = [];
let domains = [];
for( let cn of cns ) {
if( validator.isIP(cn) ) {
ips.push( cn );
} else {
domains.push( cn );
}
}
let conf = confTmpl;
if( !domains.length ) {
domains.push('localhost');
}
conf = conf.replace( '%PRIMARY_CN%', domains[0] )
let domainCount = 0;
domains = domains.map( d => domainTmpl.replace( '%#%', ++domainCount ).replace('%DOMAIN%', d) );
conf = conf.replace( '%ALT_DOMAINS%', domains.join('\n') || '' )
let ipCount = 0;
ips = ips.map( ip => ipTmpl.replace( '%#%', ++ipCount ).replace('%IP%', ip) );
conf = conf.replace( '%ALT_IPS%', ips.join('\n') || '' )
return conf;
}
async create( cns ) {
cns = cns || [];
cns = cns.concat(['127.0.0.1','localhost','gatekeeper']);
let args = defaultArgs.slice();
const certFileTmp = tmp.fileSync();
const keyFileTmp = tmp.fileSync();
const confFileTmp = tmp.fileSync();
args.push( '-out' );
args.push( certFileTmp.name );
args.push( '-keyout' );
args.push( keyFileTmp.name );
args.push( '-config' );
args.push( confFileTmp.name );
for( let k in this.args ) {
args.push( '-'+k);
args.push( this.args[k] );
}
const conf = this.buildConfig( cns );
fs.writeFileSync( confFileTmp.name, conf );
const openssl = spawn('openssl', args, { stdio: ['ignore', 'ignore', 'ignore'] } );
let code = await new Promise( function(resolve, reject) {
openssl.on('exit', (code) => {
resolve(code);
});
});
const cert = fs.readFileSync( certFileTmp.name );
const key = fs.readFileSync( keyFileTmp.name );
certFileTmp.removeCallback();
keyFileTmp.removeCallback();
confFileTmp.removeCallback();
return {
code: code,
key: key,
cert: cert,
cns: cns
}
}
getFullPath() {
return path.join( this.folder, this.filename );
}
async passwd( pw ) {
const openssl = spawn('openssl', [ "passwd", pw ], {stdio: ['ignore', 'pipe', 'ignore' ]});
const result = await new Promise( function(resolve, reject ) {
let result = '';
openssl.stdout.on('data', (data) => {
result += data.toString();
});
openssl.on('exit', (code) => {
resolve(result);
});
});
return result;
}
}

View File

@@ -0,0 +1,46 @@
const parse5 = require('parse5');
const chalk = require('chalk');
const options = {
scriptingEnabled: false
}
const convert = function(data){
// recursively flatten
let v = data.childNodes && data.childNodes.length?
data.childNodes.map(d=> convert(d)).join(''):
data.value?data.value:'';
switch(data.tagName){
case 'br':
v += '\n'
break
case 'font':
if( data.attrs && data.attrs.length ) {
for( let attr of data.attrs ) {
if( attr.name === 'color' && /^#[a-f0-9]{6}$/.test(attr.value) ) {
v = chalk.hex(attr.value)(v);
}
if( attr.name === 'bold' && attr.value === 'true' ) {
v = chalk.bold(v);
}
if( attr.name === 'italic' && attr.value === 'true' ) {
v = chalk.italic(v);
}
if( attr.name === 'underline' && attr.value === 'true' ) {
v = chalk.underline(v);
}
if( attr.name === 'strikethrough' && attr.value === 'true' ) {
v = chalk.strikethrough(v);
}
}
}
break;
}
return v;
}
module.exports = function(html){
return convert(parse5.parseFragment(html, options));
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,88 @@
const chalk = require('chalk');
const name = 'cyphernode';
const capitalise = function( txt ) {
return txt.charAt(0).toUpperCase() + txt.substr(1);
};
const prefix = function() {
return chalk.green(capitalise(name)+': ');
};
module.exports = {
name: function() {
return name;
},
prompts: function( utils ) {
return [{
// https://github.com/SBoudrias/Inquirer.js#question
// input, confirm, list, rawlist, expand, checkbox, password, editor
type: 'checkbox',
name: 'features',
message: prefix()+'What features do you want to add to your cyphernode?'+utils._getHelp('features'),
choices: utils._featureChoices()
},
{
type: 'list',
name: 'net',
default: utils._getDefault( 'net' ),
message: prefix()+'What net do you want to run on?'+utils._getHelp('net'),
choices: [{
name: "Testnet",
value: "testnet"
},{
name: "Mainnet",
value: "mainnet"
}]
},
{
type: 'confirm',
name: 'run_as_different_user',
default: utils._getDefault( 'run_as_different_user' ),
message: prefix()+'Run as different user?'+utils._getHelp('run_as_different_user')
},
{
when: function( props ) {
return props.run_as_different_user;
},
type: 'input',
name: 'username',
default: utils._getDefault( 'username' ),
message: prefix()+'What username will cyphernode run under?'+utils._getHelp('username'),
filter: utils._trimFilter,
validate: utils._usernameValidator
},
{
type: 'confirm',
name: 'use_xpub',
default: utils._getDefault( 'use_xpub' )||false,
message: prefix()+'Use a default xpub key to watch or generate adresses?'+utils._getHelp('use_xpub'),
},
{
when: function( props ) {
return props.use_xpub;
},
type: 'input',
name: 'xpub',
default: utils._getDefault( 'xpub' ),
message: prefix()+'What is your default xpub key?'+utils._getHelp('xpub'),
filter: utils._trimFilter,
validate: utils._xkeyValidator
},
{
when: function( props ) {
return props.use_xpub;
},
type: 'input',
name: 'derivation_path',
default: utils._getDefault( 'derivation_path' ),
message: prefix()+'What is your default derivation path?'+utils._getHelp('derivation_path'),
filter: utils._trimFilter,
validate: utils._derivationPathValidator
}];
},
templates: function( props ) {
return [];
}
};

View File

@@ -0,0 +1,102 @@
const chalk = require('chalk');
const name = 'gatekeeper';
const capitalise = function( txt ) {
return txt.charAt(0).toUpperCase() + txt.substr(1);
};
const prefix = function() {
return chalk.green(capitalise(name)+': ');
};
const hasAuthKeys = function( props ) {
return props &&
props.gatekeeper_keys &&
props.gatekeeper_keys.configEntries &&
props.gatekeeper_keys.configEntries.length > 0;
}
const hasCert = function( props ) {
return props &&
props.gatekeeper_sslkey &&
props.gatekeeper_sslcert
}
let password = '';
module.exports = {
name: function() {
return name;
},
prompts: function( utils ) {
// TODO: delete clientKeys archive when password chnages
return [{
type: 'password',
name: 'gatekeeper_clientkeyspassword',
default: utils._getDefault( 'gatekeeper_clientkeyspassword' ),
message: prefix()+'Enter a password to protect your client keys with'+utils._getHelp('gatekeeper_clientkeyspassword'),
filter: utils._trimFilter,
validate: utils._notEmptyValidator
},
{
when: function( props ) {
// hacky hack
password = props.gatekeeper_clientkeyspassword;
return true;
},
type: 'password',
name: 'gatekeeper_clientkeyspassword_c',
default: utils._getDefault( 'gatekeeper_clientkeyspassword_c' ),
message: prefix()+'Confirm your client keys password.'+utils._getHelp('gatekeeper_clientkeyspassword_c'),
filter: utils._trimFilter,
validate: function( input ) {
if(input !== password) {
throw new Error( 'Client keys passwords do not match' );
}
return true;
}
},
{
when: function() { return hasAuthKeys( utils.props ); },
type: 'confirm',
name: 'gatekeeper_recreatekeys',
default: false,
message: prefix()+'Recreate gatekeeper keys?'+utils._getHelp('gatekeeper_recreatekeys')
},
{
when: function() { return hasCert( utils.props ); },
type: 'confirm',
name: 'gatekeeper_recreatecert',
default: false,
message: prefix()+'Recreate gatekeeper certificate?'+utils._getHelp('gatekeeper_recreatecert')
},
{
when: function(props) { return !hasCert( utils.props ) || props.gatekeeper_recreatecert },
type: 'input',
name: 'gatekeeper_cns',
default: utils._getDefault( 'gatekeeper_cns' ),
message: prefix()+'Gatekeeper cert CNS (ips, domains, wildcard domains seperated by comma)?'+utils._getHelp('gatekeeper_cns')
},
{
type: 'confirm',
name: 'gatekeeper_edit_apiproperties',
default: false,
message: prefix()+'Edit API properties?'+utils._getHelp('gatekeeper_edit_apiproperties')
},
{
when: function( props ) {
const r = props.gatekeeper_edit_apiproperties;
delete props.gatekeeper_edit_apiproperties;
return r;
},
type: 'editor',
name: 'gatekeeper_apiproperties',
message: utils._getHelp('gatekeeper_apiproperties')||' ',
default: utils._getDefault( 'gatekeeper_apiproperties' )
}];
},
templates: function( props ) {
return [ 'keys.properties', 'api.properties', 'cert.pem', 'key.pem', 'htpasswd' ];
}
};

View File

@@ -0,0 +1,109 @@
const chalk = require('chalk');
const name = 'bitcoin';
const capitalise = function( txt ) {
return txt.charAt(0).toUpperCase() + txt.substr(1);
};
const prefix = function() {
return chalk.green(capitalise(name)+': ');
};
const bitcoinExternal = function(props) {
return props.bitcoin_mode === 'external'
};
const bitcoinInternal = function(props) {
return props.bitcoin_mode === 'internal'
};
const bitcoinInternalAndPrune = function(props) {
return bitcoinInternal(props) && props.bitcoin_prune;
};
module.exports = {
name: function() {
return name;
},
prompts: function( utils ) {
return [
{
type: 'list',
name: 'bitcoin_mode',
default: utils._getDefault( 'bitcoin_mode' ),
message: prefix()+'Where is your bitcoin full node running?'+utils._getHelp('bitcoin_mode'),
choices: [
{
name: 'Nowhere! I want cyphernode to run one.',
value: 'internal'
},
{
name: 'I have a full node running.',
value: 'external'
}
]
},
{
when: bitcoinExternal,
type: 'input',
name: 'bitcoin_node_ip',
default: utils._getDefault( 'bitcoin_node_ip' ),
filter: utils._trimFilter,
validate: utils._ipOrFQDNValidator,
message: prefix()+'What is your full node ip address?'+utils._getHelp('bitcoin_node_ip'),
},
{
type: 'input',
name: 'bitcoin_rpcuser',
default: utils._getDefault( 'bitcoin_rpcuser' ),
message: prefix()+'Name of bitcoin rpc user?'+utils._getHelp('bitcoin_rpcuser'),
filter: utils._trimFilter,
},
{
type: 'password',
name: 'bitcoin_rpcpassword',
default: utils._getDefault( 'bitcoin_rpcpassword' ),
message: prefix()+'Password of bitcoin rpc user?'+utils._getHelp('bitcoin_rpcpassword'),
filter: utils._trimFilter,
},
{
when: bitcoinInternal,
type: 'confirm',
name: 'bitcoin_prune',
default: utils._getDefault( 'bitcoin_prune' ),
message: prefix()+'Run bitcoin node in prune mode?'+utils._getHelp('bitcoin_prune'),
},
{
when: bitcoinInternalAndPrune,
type: 'input',
name: 'bitcoin_prune_size',
default: utils._getDefault( 'bitcoin_prune_size' ),
message: prefix()+'What is the maximum size of your blockchain data in megabytes?'+utils._getHelp('bitcoin_prune_size'),
validate: function( input ) {
if( ! /^\d+$/.test(input) ) {
throw new Error( "Not a number");
}
if( input < 550 ) {
throw new Error( "At least 550 is required");
}
return true;
}
}, // TODO: ask for size of prune
{
when: bitcoinInternal,
type: 'input',
name: 'bitcoin_uacomment',
default: utils._getDefault( 'bitcoin_uacomment' ),
message: prefix()+'Any UA comment?'+utils._getHelp('bitcoin_uacomment'),
filter: utils._trimFilter,
validate: (input)=> {return utils._optional(input,utils._UACommentValidator) }
}];
},
env: function( props ) {
return 'VAR0=VALUE0\nVAR1=VALUE1'
},
templates: function( props ) {
return ['bitcoin.conf']
}
};

View File

@@ -0,0 +1,81 @@
const path = require('path');
const chalk = require('chalk');
const name = 'lightning';
const capitalise = function( txt ) {
return txt.charAt(0).toUpperCase() + txt.substr(1);
};
const prefix = function() {
return chalk.green(capitalise(name)+': ');
};
const featureCondition = function(props) {
return props.features && props.features.indexOf( name ) != -1;
};
const templates = {
'lnd': [ path.join('lnd','lnd.conf') ],
'c-lightning': [ path.join('c-lightning','config'), path.join('c-lightning','bitcoin.conf') ]
};
module.exports = {
name: function() {
return name;
},
prompts: function( utils ) {
return [
/*
{
when: featureCondition,
type: 'list',
name: 'lightning_implementation',
default: utils._getDefault( 'lightning_implementation' ),
message: prefix()+'What lightning implementation do you want to use?'+utils._getHelp('lightning_implementation'),
choices: [
{
name: 'C-lightning',
value: 'c-lightning'
},
{
name: 'LND',
value: 'lnd'
}
]
},
*/
{
when: featureCondition,
type: 'input',
name: 'lightning_nodename',
default: utils._getDefault( 'lightning_nodename' ),
filter: utils._trimFilter,
validate: (input)=>{
if( !input.trim() ) {
return true;
}
return utils._lightningNodeNameValidator(input);
},
message: prefix()+'What name has your lightning node?'+utils._getHelp('lightning_nodename'),
},
{
when: featureCondition,
type: 'input',
name: 'lightning_nodecolor',
default: utils._getDefault( 'lightning_nodecolor' ),
filter: utils._trimFilter,
validate: (input)=>{
if( !input.trim() ) {
return true;
}
return utils._colorValidator(input);
},
message: prefix()+'What color has your lightning node?'+utils._getHelp('lightning_nodecolor'),
}];
},
templates: function( props ) {
return templates[props.lightning_implementation]
}
};

View File

@@ -0,0 +1,245 @@
const path = require('path');
const chalk = require('chalk');
const name = 'installer';
const capitalise = function( txt ) {
return txt.charAt(0).toUpperCase() + txt.substr(1);
};
const prefix = function() {
return chalk.green(capitalise(name)+': ');
};
const installerDocker = function(props) {
return props.installer_mode === 'docker'
};
module.exports = {
name: function() {
return name;
},
prompts: function( utils ) {
return [{
type: 'list',
name: 'installer_mode',
default: utils._getDefault( 'installer_mode' ),
message: prefix()+chalk.red('Where do you want to install cyphernode?')+utils._getHelp('installer_mode'),
choices: [{
name: "Docker",
value: "docker"
}]
},
{
when: installerDocker,
type: 'list',
name: 'gatekeeper_datapath',
default: utils._getDefault( 'gatekeeper_datapath' ),
choices: [
{
name: "/var/run/cyphernode/gatekeeper (needs sudo and "+chalk.red('incompatible with OSX')+")",
value: "/var/run/cyphernode/gatekeeper"
},
{
name: "~/.cyphernode/gatekeeper",
value: "~/.cyphernode/gatekeeper"
},
{
name: "~/gatekeeper",
value: "~/gatekeeper"
},
{
name: "Custom path",
value: "_custom"
}
],
message: prefix()+'Where do you want to store your gatekeeper data?'+utils._getHelp('gatekeeper_datapath'),
},
{
when: (props)=>{ return installerDocker(props) && (props.gatekeeper_datapath === '_custom') },
type: 'input',
name: 'gatekeeper_datapath_custom',
default: utils._getDefault( 'gatekeeper_datapath_custom' ),
filter: utils._trimFilter,
validate: utils._pathValidator,
message: prefix()+'Custom path for gatekeeper data?'+utils._getHelp('gatekeeper_datapath_custom'),
},
{
when: installerDocker,
type: 'list',
name: 'proxy_datapath',
default: utils._getDefault( 'proxy_datapath' ),
choices: [
{
name: "/var/run/cyphernode/proxy (needs sudo and "+chalk.red('incompatible with OSX')+")",
value: "/var/run/cyphernode/proxy"
},
{
name: "~/.cyphernode/proxy",
value: "~/.cyphernode/proxy"
},
{
name: "~/proxy",
value: "~/proxy"
},
{
name: "Custom path",
value: "_custom"
}
],
message: prefix()+'Where do you want to store your proxy data?'+utils._getHelp('proxy_datapath'),
},
{
when: (props)=>{ return installerDocker(props) && (props.proxy_datapath === '_custom') },
type: 'input',
name: 'proxy_datapath_custom',
default: utils._getDefault( 'proxy_datapath_custom' ),
filter: utils._trimFilter,
validate: utils._pathValidator,
message: prefix()+'Custom path for your proxy data?'+utils._getHelp('proxy_datapath_custom'),
},
{
when: function(props) { return installerDocker(props) && props.bitcoin_mode === 'internal' },
type: 'list',
name: 'bitcoin_datapath',
default: utils._getDefault( 'bitcoin_datapath' ),
choices: [
{
name: "/var/run/cyphernode/bitcoin (needs sudo and "+chalk.red('incompatible with OSX')+")",
value: "/var/run/cyphernode/bitcoin"
},
{
name: "~/.cyphernode/bitcoin",
value: "~/.cyphernode/bitcoin"
},
{
name: "~/bitcoin",
value: "~/bitcoin"
},
{
name: "Custom path",
value: "_custom"
}
],
message: prefix()+'Where do you want to store your bitcoin full node data?'+utils._getHelp('bitcoin_datapath'),
},
{
when: function(props) { return installerDocker(props) && props.bitcoin_mode === 'internal' && props.bitcoin_datapath === '_custom' },
type: 'input',
name: 'bitcoin_datapath_custom',
default: utils._getDefault( 'bitcoin_datapath_custom' ),
filter: utils._trimFilter,
validate: utils._pathValidator,
message: prefix()+'Custom path for your bitcoin full node data?'+utils._getHelp('bitcoin_datapath_custom'),
},
{
when: function(props) { return installerDocker(props) && props.features.indexOf('lightning') !== -1 },
type: 'list',
name: 'lightning_datapath',
default: utils._getDefault( 'lightning_datapath' ),
choices: [
{
name: "/var/run/cyphernode/lightning (needs sudo - "+chalk.red('incompatible with OSX')+")",
value: "/var/run/cyphernode/lightning"
},
{
name: "~/.cyphernode/lightning",
value: "~/.cyphernode/lightning"
},
{
name: "~/lightning",
value: "~/lightning"
},
{
name: "Custom path",
value: "_custom"
}
],
message: prefix()+'Where do you want to store your lightning node data?'+utils._getHelp('lightning_datapath'),
},
{
when: function(props) { return installerDocker(props) && props.features.indexOf('lightning') !== -1 && props.lightning_datapath === '_custom'},
type: 'input',
name: 'lightning_datapath_custom',
default: utils._getDefault( 'lightning_datapath_custom' ),
filter: utils._trimFilter,
validate: utils._pathValidator,
message: prefix()+'Custom path for your lightning node data?'+utils._getHelp('lightning_datapath_custom'),
},
{
when: function(props) { return installerDocker(props) && props.features.indexOf('otsclient') !== -1 },
type: 'list',
name: 'otsclient_datapath',
default: utils._getDefault( 'otsclient_datapath' ),
choices: [
{
name: "/var/run/cyphernode/otsclient (needs sudo and "+chalk.red('incompatible with OSX')+")",
value: "/var/run/cyphernode/otsclient"
},
{
name: "~/.cyphernode/otsclient",
value: "~/.cyphernode/otsclient"
},
{
name: "~/otsclient",
value: "~/otsclient"
},
{
name: "Custom path",
value: "_custom"
}
],
message: prefix()+'Where do you want to store your OTS data?'+utils._getHelp('otsclient_datapath'),
},
{
when: function(props) { return installerDocker(props) && props.features.indexOf('otsclient') !== -1 && props.otsclient_datapath === '_custom' },
type: 'input',
name: 'otsclient_datapath_custom',
default: utils._getDefault( 'otsclient_datapath_custom' ),
filter: utils._trimFilter,
validate: utils._pathValidator,
message: prefix()+'Where is your otsclient data?'+utils._getHelp('otsclient_datapath_custom'),
},
{
when: function(props) { return installerDocker(props) && props.bitcoin_mode === 'internal' },
type: 'confirm',
name: 'bitcoin_expose',
default: utils._getDefault( 'bitcoin_expose' ),
message: prefix()+'Expose bitcoin full node outside of the docker network?'+utils._getHelp('bitcoin_expose'),
},
{
when: function(props) { return installerDocker(props) && props.features.indexOf('lightning') !== -1 },
type: 'confirm',
name: 'lightning_expose',
default: utils._getDefault( 'lightning_expose' ),
message: prefix()+'Expose lightning node outside of the docker network?'+utils._getHelp('lightning_expose'),
},
{
when: installerDocker,
type: 'list',
name: 'docker_mode',
default: utils._getDefault( 'docker_mode' ),
message: prefix()+'What docker mode: docker swarm or docker-compose?'+utils._getHelp('docker_mode'),
choices: [{
name: "docker swarm",
value: "swarm"
},
{
name: "docker-compose",
value: "compose"
}]
},
{
type: 'confirm',
name: 'installer_cleanup',
default: utils._getDefault( 'installer_cleanup' ),
message: prefix()+'Cleanup installer after installation?'+utils._getHelp('installer_cleanup'),
}];
},
templates: function( props ) {
if( props.installer_mode === 'docker' ) {
return ['config.sh','start.sh', 'stop.sh', 'testfeatures.sh', path.join('docker', 'docker-compose.yaml')];
}
return ['config.sh','start.sh', 'stop.sh', 'testfeatures.sh'];
}
};

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,25 @@
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         
                                         

View File

@@ -0,0 +1,43 @@
<% if (net === 'testnet') { %>
# testnet
testnet=1
<% } %>
<% if (bitcoin_prune) { %>
prune=<%= bitcoin_prune_size || 550 %>
<% } else { %>
txindex=1
<% } %>
zmqpubrawblock=tcp://0.0.0.0:18501
zmqpubrawtx=tcp://0.0.0.0:18502
#tor
#proxy=127.0.0.1:9050
#listen=1
maxmempool=64
dbcache=64
rpcuser=<%= bitcoin_rpcuser %>
rpcpassword=<%= bitcoin_rpcpassword %>
# ATTENTION: VERY DANGEROUS OUTSIDE THE DOCKER NETWORK
rpcallowip=0.0.0.0/0
server=1
<% if (net === 'testnet') { %>
test.wallet=watching01.dat
test.wallet=spending01.dat
test.wallet=ln01.dat
<% } else { %>
main.wallet=watching01.dat
main.wallet=spending01.dat
main.wallet=ln01.dat
<% } %>
walletnotify=curl proxy:8888/conf/%s
<% if ( bitcoin_uacomment != null && bitcoin_uacomment != '' ) { %>
uacomment=<%= bitcoin_uacomment %>
<% } %>

View File

@@ -0,0 +1,6 @@
# Watcher can do stuff
# Spender can do what the watcher can do plus more stuff
# Admin can do what the spender can do plus even more stuff
<%- gatekeeper_apiproperties %>

View File

@@ -0,0 +1 @@
<%- gatekeeper_sslcert %>

View File

@@ -0,0 +1 @@
admin:<%- gatekeeper_statuspw %>

View File

@@ -0,0 +1 @@
<%- gatekeeper_sslkey %>

View File

@@ -0,0 +1 @@
<%- gatekeeper_keys.configEntries.join('\n') %>

View File

@@ -0,0 +1,19 @@
INSTALLER_MODE=<%= installer_mode %>
BITCOIN_INTERNAL=<%= (bitcoin_mode==="internal"?'true':'false') %>
FEATURE_LIGHTNING=<%= (features.indexOf('lightning') != -1)?'true':'false' %>
FEATURE_OTSCLIENT=<%= (features.indexOf('otsclient') != -1)?'true':'false' %>
LIGHTNING_IMPLEMENTATION=<%= lightning_implementation %>
PROXY_DATAPATH=<%= proxy_datapath %>
GATEKEEPER_DATAPATH=<%= gatekeeper_datapath %>
DOCKER_MODE=<%= docker_mode %>
RUN_AS_USER=<%= run_as_different_user?username:'' %>
CLEANUP=<%= installer_cleanup?'true':'false' %>
<% if ( features.indexOf('lightning') !== -1 && lightning_implementation === 'c-lightning' ) { %>
LIGHTNING_DATAPATH=<%= lightning_datapath %>
<% } %>
<% if ( features.indexOf('otsclient') !== -1 ) { %>
OTSCLIENT_DATAPATH=<%= otsclient_datapath %>
<% } %>
<% if ( bitcoin_mode==="internal" ) { %>
BITCOIN_DATAPATH=<%= bitcoin_datapath %>
<% } %>

View File

@@ -0,0 +1,149 @@
version: "3"
services:
gatekeeper:
# HTTP authentication API gate
environment:
- "TRACING=1"
image: cyphernode/gatekeeper:<%= gatekeeper_version %>
ports:
- "443:443"
volumes:
- "<%= gatekeeper_datapath %>/certs:/etc/ssl/certs"
- "<%= gatekeeper_datapath %>/private:/etc/ssl/private"
- "<%= gatekeeper_datapath %>/keys.properties:/etc/nginx/conf.d/keys.properties"
- "<%= gatekeeper_datapath %>/api.properties:/etc/nginx/conf.d/api.properties"
- "<%= gatekeeper_datapath %>/htpasswd:/etc/nginx/conf.d/status/htpasswd"
- "<%= gatekeeper_datapath %>/installation.json:/etc/nginx/conf.d/status/installation.json"
- "<%= gatekeeper_datapath %>/client.7z:/etc/nginx/conf.d/status/client.7z"
- "<%= gatekeeper_datapath %>/config.7z:/etc/nginx/conf.d/status/config.7z"
command: $USER
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
restart: always
proxy:
command: $USER ./startproxy.sh
# Bitcoin Mini Proxy
environment:
- "TRACING=1"
- "WATCHER_BTC_NODE_RPC_URL=<%= (bitcoin_mode === 'internal')?'bitcoin':bitcoin_node_ip %>:<%= (net === 'mainnet')?'8332':'18332' %>/wallet/watching01.dat"
- "WATCHER_BTC_NODE_RPC_USER=<%= bitcoin_rpcuser %>:<%= bitcoin_rpcpassword %>"
- "WATCHER_BTC_NODE_RPC_CFG=/tmp/watcher_btcnode_curlcfg.properties"
- "SPENDER_BTC_NODE_RPC_URL=<%= (bitcoin_mode === 'internal')?'bitcoin':bitcoin_node_ip %>:<%= (net === 'mainnet')?'8332':'18332' %>/wallet/spending01.dat"
- "SPENDER_BTC_NODE_RPC_USER=<%= bitcoin_rpcuser %>:<%= bitcoin_rpcpassword %>"
- "SPENDER_BTC_NODE_RPC_CFG=/tmp/spender_btcnode_curlcfg.properties"
- "PROXY_LISTENING_PORT=8888"
- "DB_PATH=/proxy/db"
- "DB_FILE=/proxy/db/proxydb"
- "PYCOIN_CONTAINER=pycoin:7777"
<% if ( use_xpub && xpub ) { %>
- "DERIVATION_PUB32=<%= xpub %>"
- "DERIVATION_PATH=<%= derivation_path %>"
<% } %>
- "WATCHER_BTC_NODE_PRUNED=<%= bitcoin_prune?'true':'false' %>"
- "OTSCLIENT_CONTAINER=otsclient:6666"
- "OTS_FILES=/proxy/otsfiles"
image: cyphernode/proxy:<%= proxy_version %>
<% if ( devmode ) { %>
ports:
- "8888:8888"
<% } %>
volumes:
- "<%= proxy_datapath %>:/proxy/db"
<% if ( features.indexOf('lightning') !== -1 && lightning_implementation === 'c-lightning' ) { %>
- "<%= lightning_datapath %>:/.lightning"
<% } %>
<% if ( features.indexOf('otsclient') !== -1 ) { %>
- "<%= otsclient_datapath %>:/proxy/otsfiles"
<% } %>
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
restart: always
proxycron:
environment:
- "PROXY_URL=proxy:8888/executecallbacks"
image: cyphernode/proxycron:<%= proxycron_version %>
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
restart: always
pycoin:
# Pycoin
command: $USER ./startpycoin.sh
image: cyphernode/pycoin:<%= pycoin_version %>
environment:
- "TRACING=1"
- "PYCOIN_LISTENING_PORT=7777"
<% if ( devmode ) { %>
ports:
- "7777:7777"
<% } %>
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
restart: always
<% if ( features.indexOf('lightning') !== -1 && lightning_implementation === 'c-lightning' ) { %>
lightning:
command: $USER lightningd
image: cyphernode/clightning:<%= lightning_version %>
<% if( lightning_expose ) { %>
ports:
- "9735:9735"
<% } %>
volumes:
- "<%= lightning_datapath%>:/.lightning"
- "<%= lightning_datapath%>/bitcoin.conf:/.bitcoin/bitcoin.conf"
# deploy:
# placement:
# constraints: [node.hostname==dev]
networks:
- cyphernodenet
restart: always
<% } %>
<% if ( features.indexOf('otsclient') !== -1 ) { %>
otsclient:
environment:
- "TRACING=1"
- "OTSCLIENT_LISTENING_PORT=6666"
image: cyphernode/otsclient:<%= otsclient_version %>
# deploy:
# placement:
# constraints: [node.hostname==dev]
volumes:
- "<%= otsclient_datapath%>:/otsfiles"
command: $USER /script/startotsclient.sh
networks:
- cyphernodenet
restart: always
<% } %>
<% if( bitcoin_mode === 'internal' ) { %>
bitcoin:
command: $USER bitcoind
image: cyphernode/bitcoin:<%= bitcoin_version %>
<% if( bitcoin_expose ) { %>
ports:
- "<%= (net === 'mainnet')?'8332:8332':'18332:18332' %>"
<% } %>
volumes:
- "<%= bitcoin_datapath%>:/.bitcoin"
networks:
- cyphernodenet
restart: always
<% } %>
networks:
cyphernodenet:
external: true

View File

@@ -0,0 +1,50 @@
#!/bin/sh
<% if (run_as_different_user) { %>
OS=$(uname -s)
if [ "$OS" = "Darwin" ]; then
printf "\r\n\033[0;91m'Run as another user' feature is not supported on OSX. User <%= default_username %> will be used to run Cyphernode.\033[0m\r\n\r\n"
export USER=$(id -u <%= default_username %>):$(id -g <%= default_username %>)
else
export USER=$(id -u <%= username %>):$(id -g <%= username %>)
fi
<% } else { %>
export USER=$(id -u <%= default_username %>):$(id -g <%= default_username %>)
<% } %>
export ARCH=$(uname -m)
current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)"
<% if (docker_mode == 'swarm') { %>
docker stack deploy -c $current_path/docker-compose.yaml cyphernode
<% } else if(docker_mode == 'compose') { %>
docker-compose -f $current_path/docker-compose.yaml up -d --remove-orphans
<% } %>
arch=$(uname -m)
case "${arch}" in arm*)
printf "\r\n\033[1;31mSince we're on a slow RPi, let's give Docker 60 more seconds before performing our tests...\033[0m\r\n\r\n"
sleep 60
;;
esac
# Will test if Cyphernode is fully up and running...
docker run --rm -it -v $current_path/testfeatures.sh:/testfeatures.sh \
-v <%= gatekeeper_datapath %>:/gatekeeper \
-v $current_path:/dist \
--network cyphernodenet alpine:3.8 /testfeatures.sh
if [ -f $current_path/exitStatus.sh ]; then
. $current_path/exitStatus.sh
rm -f $current_path/exitStatus.sh
fi
if [ "$EXIT_STATUS" -ne "0" ]; then
printf "\r\n\033[1;31mThere was an error during cyphernode installation. Please see Docker's logs for more information. Run ./stop.sh to stop cyphernode.\r\n\r\n\033[0m"
exit 1
fi
printf "\r\n\033[0;92mDepending on your current location and DNS settings, point your favorite browser to one of the following URLs to access Cyphernode's status page:\r\n"
printf "\r\n"
printf "\033[0;95m<% cns.forEach(cn => { %><%= ('https://' + cn + '/status/\\r\\n') %><% }) %>\033[0m\r\n"
printf "\033[0;92mUse 'admin' as the username with the configuration password you selected at the beginning of the configuration process.\r\n\r\n\033[0m"

View File

@@ -0,0 +1,13 @@
#!/bin/sh
current_path="$(cd "$(dirname "$0")" >/dev/null && pwd)"
<% if (docker_mode == 'swarm') { %>
export USER=$(id -u):$(id -g)
export ARCH=$(uname -m)
docker stack rm cyphernode
<% } else if(docker_mode == 'compose') { %>
export USER=$(id -u):$(id -g)
export ARCH=$(uname -m)
docker-compose -f $current_path/docker-compose.yaml down
<% } %>

View File

@@ -0,0 +1,325 @@
#!/bin/sh
apk add --update --no-cache openssl curl jq > /dev/null
. /gatekeeper/keys.properties
checkgatekeeper() {
echo -e "\r\n\e[1;36mTesting Gatekeeper...\e[0;32m" > /dev/console
local rc
local id="001"
local k
eval k='$ukey_'$id
local h64=$(echo "{\"alg\":\"HS256\",\"typ\":\"JWT\"}" | base64)
# Let's test expiration: 1 second in payload, request 2 seconds later
local p64=$(echo "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+1))}" | base64)
local s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1)
local token="$h64.$p64.$s"
echo -e " Sleeping 2 seconds... " > /dev/console
sleep 2
echo " Testing expired request... " > /dev/console
rc=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" --cacert /gatekeeper/certs/cert.pem https://gatekeeper/v0/getblockinfo)
[ "${rc}" -ne "403" ] && return 10
# Let's test authentication (signature)
p64=$(echo "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64)
s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1)
token="$h64.$p64.a$s"
echo " Testing bad signature... " > /dev/console
rc=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" --cacert /gatekeeper/certs/cert.pem https://gatekeeper/v0/getblockinfo)
[ "${rc}" -ne "403" ] && return 30
# Let's test authorization (action access for groups)
token="$h64.$p64.$s"
echo " Testing watcher trying to do a spender action... " > /dev/console
rc=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" --cacert /gatekeeper/certs/cert.pem https://gatekeeper/v0/getbalance)
[ "${rc}" -ne "403" ] && return 40
id="002"
eval k='$ukey_'$id
p64=$(echo "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64)
s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1)
token="$h64.$p64.$s"
echo " Testing spender trying to do an internal action call... " > /dev/console
rc=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" --cacert /gatekeeper/certs/cert.pem https://gatekeeper/v0/conf)
[ "${rc}" -ne "403" ] && return 50
id="003"
eval k='$ukey_'$id
p64=$(echo "{\"id\":\"$id\",\"exp\":$((`date +"%s"`+10))}" | base64)
s=$(echo -n "$h64.$p64" | openssl dgst -hmac "$k" -sha256 -r | cut -sd ' ' -f1)
token="$h64.$p64.$s"
echo " Testing admin trying to do an internal action call... " > /dev/console
rc=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $token" --cacert /gatekeeper/certs/cert.pem https://gatekeeper/v0/conf)
[ "${rc}" -ne "403" ] && return 60
echo -e "\e[1;36mGatekeeper rocks!" > /dev/console
return 0
}
checkpycoin() {
echo -en "\r\n\e[1;36mTesting Pycoin... " > /dev/console
local rc
rc=$(curl -H "Content-Type: application/json" -d "{\"pub32\":\"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb\",\"path\":\"0/25-30\"}" -s -o /dev/null -w "%{http_code}" http://proxy:8888/derivepubpath)
[ "${rc}" -ne "200" ] && return 100
echo -e "\e[1;36mPycoin rocks!" > /dev/console
return 0
}
checkots() {
echo -en "\r\n\e[1;36mTesting OTSclient... " > /dev/console
local rc
rc=$(curl -s -H "Content-Type: application/json" -d '{"hash":"123","callbackUrl":"http://callback"}' http://proxy:8888/ots_stamp)
echo "${rc}" | grep "Invalid hash 123 for sha256" > /dev/null
[ "$?" -ne "0" ] && return 200
echo -e "\e[1;36mOTSclient rocks!" > /dev/console
return 0
}
checkbitcoinnode() {
echo -en "\r\n\e[1;36mTesting Bitcoin... " > /dev/console
local rc
rc=$(curl -s -o /dev/null -w "%{http_code}" http://proxy:8888/getbestblockhash)
[ "${rc}" -ne "200" ] && return 300
echo -e "\e[1;36mBitcoin node rocks!" > /dev/console
return 0
}
checklnnode() {
echo -en "\r\n\e[1;36mTesting Lightning... " > /dev/console
local rc
rc=$(curl -s -o /dev/null -w "%{http_code}" http://proxy:8888/ln_getinfo)
[ "${rc}" -ne "200" ] && return 400
echo -e "\e[1;36mLN node rocks!" > /dev/console
return 0
}
checkservice() {
local interval=10
local totaltime=120
local outcome
local returncode=0
local endtime=$(($(date +%s) + ${totaltime}))
local result
echo -e "\r\n\e[1;36mTesting if Cyphernode is up and running... \e[0;36mI will keep trying during up to $((${totaltime} / 60)) minutes to give time to Docker to deploy everything...\e[0;32m" > /dev/console
while :
do
outcome=0
for container in gatekeeper proxy proxycron pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
echo -e " \e[0;32mVerifying \e[0;33m${container}\e[0;32m..." > /dev/console
(ping -c 10 ${container} 2> /dev/null | grep "0% packet loss" > /dev/null) &
eval ${container}=$!
done
for container in gatekeeper proxy proxycron pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
eval wait '$'${container} ; returncode=$? ; outcome=$((${outcome} + ${returncode}))
eval c_${container}=${returncode}
done
# If '0% packet loss' everywhere or 5 minutes passed, we get out of this loop
([ "${outcome}" -eq "0" ] || [ $(date +%s) -gt ${endtime} ]) && break
echo -e "\e[1;31mCyphernode still not ready, will retry every ${interval} seconds for $((${totaltime} / 60)) minutes ($((${endtime} - $(date +%s))) seconds left)." > /dev/console
sleep ${interval}
done
# "containers": [
# { "name": "gatekeeper", "active":true },
# { "name": "proxy", "active":true },
# { "name": "proxycron", "active":true },
# { "name": "pycoin", "active":true },
# { "name": "otsclient", "active":true },
# { "name": "bitcoin", "active":true },
# { "name": "lightning", "active":true }
# ]
for container in gatekeeper proxy proxycron pycoin <%= (features.indexOf('otsclient') != -1)?'otsclient ':'' %>bitcoin <%= (features.indexOf('lightning') != -1)?'lightning ':'' %>; do
[ -n "${result}" ] && result="${result},"
result="${result}{\"name\":\"${container}\",\"active\":"
eval "returncode=\$c_${container}"
if [ "${returncode}" -eq "0" ]; then
result="${result}true}"
else
result="${result}false}"
fi
done
result="\"containers\":[${result}]"
echo $result
return ${outcome}
}
timeout_feature() {
local interval=10
local totaltime=60
local testwhat=${1}
local returncode
local endtime=$(($(date +%s) + ${totaltime}))
while :
do
eval ${testwhat}
returncode=$?
# If no error or 2 minutes passed, we get out of this loop
([ "${returncode}" -eq "0" ] || [ $(date +%s) -gt ${endtime} ]) && break
echo -e "\e[1;31mMaybe it's too early, I'll retry every ${interval} seconds for $((${totaltime} / 60)) minutes ($((${endtime} - $(date +%s))) seconds left)." > /dev/console
sleep ${interval}
done
return ${returncode}
}
feature_status() {
local returncode=${1}
local errormsg=${2}
[ "${returncode}" -eq "0" ] && echo "true"
[ "${returncode}" -ne "0" ] && echo "false" && echo -e "\e[1;31m${errormsg}" > /dev/console
}
# /proxy/installation.json will contain something like that:
#{
# "containers": [
# { "name": "gatekeeper", "active":true },
# { "name": "proxy", "active":true },
# { "name": "proxycron", "active":true },
# { "name": "pycoin", "active":true },
# { "name": "otsclient", "active":true },
# { "name": "bitcoin", "active":true },
# { "name": "lightning", "active":true }
# ],
# "features": [
# { "name": "gatekeeper", "working":true },
# { "name": "pycoin", "working":true },
# { "name": "otsclient", "working":true },
# { "name": "bitcoin", "working":true },
# { "name": "lightning", "working":true }
# ]
#}
# Let's first see if everything is up.
echo "EXIT_STATUS=1" > /dist/exitStatus.sh
brokenproxy="false"
containers=$(checkservice)
returncode=$?
finalreturncode=${returncode}
if [ "${returncode}" -ne "0" ]; then
echo -e "\e[1;31mCyphernode could not fully start properly within delay." > /dev/console
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"proxy\") | .active")
if [ "${status}" = "false" ]; then
echo -e "\e[1;31mThe Proxy, the main Cyphernode's component, is not responding. We will only test the gatekeeper if its container is up, but you'll see errors for the other components. Please check the logs." > /dev/console
brokenproxy="true"
fi
else
echo -e "\e[1;36mCyphernode seems to be correctly deployed. Let's run more thourough tests..." > /dev/console
fi
# Let's now check each feature fonctionality...
# "features": [
# { "name": "gatekeeper", "working":true },
# { "name": "pycoin", "working":true },
# { "name": "otsclient", "working":true },
# { "name": "bitcoin", "working":true },
# { "name": "lightning", "working":true }
# ]
result="${containers},\"features\":[{\"name\":\"gatekeeper\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"gatekeeper\") | .active")
if [ "${status}" = "true" ]; then
timeout_feature checkgatekeeper
returncode=$?
else
returncode=1
fi
finalreturncode=$((${returncode} | ${finalreturncode}))
result="${result}$(feature_status ${returncode} 'Gatekeeper error!')}"
result="${result},{\"name\":\"pycoin\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"pycoin\") | .active")
if [[ "${brokenproxy}" != "true" && "${status}" = "true" ]]; then
timeout_feature checkpycoin
returncode=$?
else
returncode=1
fi
finalreturncode=$((${returncode} | ${finalreturncode}))
result="${result}$(feature_status ${returncode} 'Pycoin error!')}"
<% if (features.indexOf('otsclient') != -1) { %>
result="${result},{\"name\":\"otsclient\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"otsclient\") | .active")
if [[ "${brokenproxy}" != "true" && "${status}" = "true" ]]; then
timeout_feature checkots
returncode=$?
else
returncode=1
fi
finalreturncode=$((${returncode} | ${finalreturncode}))
result="${result}$(feature_status ${returncode} 'OTSclient error!')}"
<% } %>
result="${result},{\"name\":\"bitcoin\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"bitcoin\") | .active")
if [[ "${brokenproxy}" != "true" && "${status}" = "true" ]]; then
timeout_feature checkbitcoinnode
returncode=$?
else
returncode=1
fi
finalreturncode=$((${returncode} | ${finalreturncode}))
result="${result}$(feature_status ${returncode} 'Bitcoin error!')}"
<% if (features.indexOf('lightning') != -1) { %>
result="${result},{\"name\":\"lightning\",\"working\":"
status=$(echo "{${containers}}" | jq ".containers[] | select(.name == \"lightning\") | .active")
if [[ "${brokenproxy}" != "true" && "${status}" = "true" ]]; then
timeout_feature checklnnode
returncode=$?
else
returncode=1
fi
finalreturncode=$((${returncode} | ${finalreturncode}))
result="${result}$(feature_status ${returncode} 'Lightning error!')}"
<% } %>
result="{${result}]}"
echo "${result}" > /gatekeeper/installation.json
echo -e "\r\n\e[1;32mTests finished.\e[0m" > /dev/console
echo "EXIT_STATUS=${finalreturncode}" > /dist/exitStatus.sh

View File

@@ -0,0 +1,8 @@
<% if (net === 'testnet') { %>
# testnet
testnet=1
<% } %>
rpcconnect=<%= (bitcoin_mode === 'internal')?'bitcoin':bitcoin_node_ip %>
rpcuser=<%= bitcoin_rpcuser %>
rpcpassword=<%= bitcoin_rpcpassword %>

View File

@@ -0,0 +1,15 @@
<% if (net === 'testnet') { %>
# testnet
network=testnet
<% } else if (net === 'mainnet') { %>
network=bitcoin
<% } %>
<% if( lightning_nodename ) { %>
alias=<%= lightning_nodename %>
<% } %>
<% if( lightning_nodecolor ) { %>
rgb=<%= lightning_nodecolor %>
<% } %>
bitcoin-rpcconnect=<%= (bitcoin_mode === 'internal')?'bitcoin':bitcoin_node_ip %>
bitcoin-rpcuser=<%= bitcoin_rpcuser %>
bitcoin-rpcpassword=<%= bitcoin_rpcpassword %>

View File

@@ -0,0 +1,27 @@
[Application Options]
debuglevel=info
maxpendingchannels=10
externalip=88.198.55.131
color=#a111ff
alias=SatoshiPortal01
rpclisten=0.0.0.0:10009
tlsextraip=lnd
tlsextradomain=lnd
[Bitcoin]
bitcoin.active=1
bitcoin.node=bitcoind
bitcoin.mainnet=1
[Bitcoind]
bitcoind.rpcuser=<%= bitcoin_rpcuser %>
bitcoind.rpcpass=<%= bitcoin_rpcpassword %>
bitcoind.zmqpubrawblock=tcp://bitcoin:18501
bitcoind.zmqpubrawtx=tcp://bitcoin:18502
#bitcoind.zmqpath=tcp://bitcoin:18501
bitcoind.rpchost=bitcoin
[autopilot]
autopilot.active=1
autopilot.maxchannels=5
autopilot.allocation=0.6

View File

@@ -0,0 +1,34 @@
{
"name": "generator-cyphernode",
"version": "0.0.0",
"description": "",
"homepage": "",
"author": {
"name": "jash",
"email": "jash@schulterklopfer-productions.de",
"url": ""
},
"files": [
"generators"
],
"main": "generators/index.js",
"keywords": [
"cyphernode",
"yeoman-generator"
],
"engines": {
"npm": ">= 4.0.0"
},
"dependencies": {
"@rauschma/stringio": "^1.4.0",
"chalk": "^2.1.0",
"coinstring": "^2.3.0",
"parse5": "^5.1.0",
"validator": "^10.8.0",
"wrap-ansi": "^4.0.0",
"yeoman-environment": "2.3.3",
"yeoman-generator": "2.0.5"
},
"repository": "git@github.com:schulterklopfer/cyphernode.git",
"license": "MIT"
}

View File

@@ -0,0 +1,27 @@
FROM node:11.1-alpine
RUN apk add --update --no-cache \
git \
jq \
su-exec \
&& yarn global add javascript-opentimestamps
WORKDIR /script
COPY script/otsclient.sh /script/otsclient.sh
COPY script/requesthandler.sh /script/requesthandler.sh
COPY script/responsetoclient.sh /script/responsetoclient.sh
COPY script/startotsclient.sh /script/startotsclient.sh
COPY script/trace.sh /script/trace.sh
RUN chmod +x /script/startotsclient.sh /script/requesthandler.sh
ENTRYPOINT ["su-exec"]
# docker build -t otsclient-js .
# docker run -it --rm --name otsclient -v /home/debian/otsfiles:/otsfiles otsclient-js `id -u cyphernode`:`id -g cyphernode` ash
# ots-cli.js stamp -d 1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
# ots-cli.js verify -d 1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7 1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7.ots
# ots-cli.js info 1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7.ots
# ots-cli.js upgrade 1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7.ots

View File

@@ -0,0 +1,36 @@
# OTS Client Cyphernode Container
## Pull our Cyphernode image
```shell
docker pull cyphernode/otsclient:latest
```
## Build yourself the image
```shell
docker build -t cyphernode/otsclient:latest .
```
## OTS files directory...
```shell
mkdir -p ~/otsfiles
sudo chown -R cyphernode:cyphernode ~/otsfiles ; sudo chmod g+ws ~/otsfiles
sudo find ~/otsfiles -type d -exec chmod 2775 {} \; ; sudo find ~/otsfiles -type f -exec chmod g+rw {} \;
```
## Run image
If you are using it independantly from the Docker stack (docker-compose.yml), you can run it like that:
```shell
docker run --rm -d -p 6666:6666 --network cyphernodenet --env-file env.properties cyphernode/otsclient:latest `id -u cyphernode`:`id -g cyphernode` ./startotsclient.sh
```
## Usefull examples
```shell
curl http://localhost:6666/stamp/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
curl http://localhost:6666/upgrade/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
```

View File

@@ -0,0 +1,2 @@
TRACING=1
OTSCLIENT_LISTENING_PORT=6666

View File

@@ -0,0 +1,83 @@
#!/bin/sh
. ./trace.sh
stamp()
{
trace "Entering stamp()..."
local hash=${1}
trace "[stamp] hash=${hash}"
local result
local returncode
local data
trace "[stamp] ots-cli.js stamp -d ${hash}"
result=$(cd /otsfiles && ots-cli.js stamp -d ${hash} 2>&1)
returncode=$?
trace_rc ${returncode}
trace "[stamp] result=${result}"
# The timestamp proof '1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7.ots' has been created!
data="{\"method\":\"stamp\",\"hash\":\"${hash}\",\"result\":\""
trace "[stamp] grepping..."
echo "${result}" | grep "has been created!" > /dev/null
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -eq "0" ]; then
# String found
data="${data}success\"}"
else
# String nor found
data="${data}error\",\"error\":\"${result}\"}"
fi
trace "[stamp] data=${data}"
echo "${data}"
return ${returncode}
}
upgrade()
{
trace "Entering upgrade()..."
local hash=${1}
trace "[upgrade] hash=${hash}"
local result
local returncode
trace "[upgrade] ots-cli.js upgrade ${hash}.ots"
result=$(cd /otsfiles && ots-cli.js upgrade ${hash}.ots 2>&1)
returncode=$?
trace_rc ${returncode}
trace "[upgrade] result=${result}"
# Success! Timestamp complete
# Failed! Timestamp not complete
data="{\"method\":\"upgrade\",\"hash\":\"${hash}\",\"result\":\""
trace "[upgrade] grepping..."
echo "${result}" | grep "Success!" > /dev/null
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -eq "0" ]; then
data="${data}success\"}"
else
data="${data}error\",\"error\":\"${result}\"}"
fi
trace "[upgrade] data=${data}"
echo "${data}"
return ${returncode}
}

View File

@@ -0,0 +1,88 @@
#!/bin/sh
#
#
#
#
. ./otsclient.sh
. ./responsetoclient.sh
. ./trace.sh
main()
{
trace "Entering main()..."
local step=0
local cmd
local http_method
local line
local content_length
local response
local returncode
while read line; do
line=$(echo "${line}" | tr -d '\r\n')
trace "[main] line=${line}"
if [ "${cmd}" = "" ]; then
# First line!
# Looking for something like:
# GET /cmd/params HTTP/1.1
# POST / HTTP/1.1
cmd=$(echo "${line}" | cut -d '/' -f2 | cut -d ' ' -f1)
trace "[main] cmd=${cmd}"
http_method=$(echo "${line}" | cut -d ' ' -f1)
trace "[main] http_method=${http_method}"
if [ "${http_method}" = "GET" ]; then
step=1
fi
fi
if [ "${line}" = "" ]; then
trace "[main] empty line"
if [ ${step} -eq 1 ]; then
trace "[main] body part finished, disconnecting"
break
else
trace "[main] headers part finished, body incoming"
step=1
fi
fi
# line=content-length: 406
case "${line}" in *[cC][oO][nN][tT][eE][nN][tT]-[lL][eE][nN][gG][tT][hH]*)
content_length=$(echo ${line} | cut -d ':' -f2)
trace "[main] content_length=${content_length}";
;;
esac
if [ ${step} -eq 1 ]; then
trace "[main] step=${step}"
if [ "${http_method}" = "POST" ]; then
read -n ${content_length} line
trace "[main] line=${line}"
fi
case "${cmd}" in
stamp)
# GET http://192.168.111.152:8080/stamp/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
response=$(stamp $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
response_to_client "${response}" ${?}
break
;;
upgrade)
# GET http://192.168.111.152:8080/upgrade/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
response=$(upgrade $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
response_to_client "${response}" ${?}
break
;;
esac
break
fi
done
trace "[main] exiting"
return 0
}
export TRACING
main
exit $?

View File

@@ -0,0 +1,21 @@
#!/bin/sh
. ./trace.sh
response_to_client()
{
trace "Entering response_to_client()..."
local response=${1}
local returncode=${2}
([ -z "${returncode}" ] || [ "${returncode}" -eq "0" ]) && echo -ne "HTTP/1.1 200 OK\r\n"
[ -n "${returncode}" ] && [ "${returncode}" -ne "0" ] && echo -ne "HTTP/1.1 400 Bad Request\r\n"
echo -e "Content-Type: application/json\r\nContent-Length: ${#response}\r\n\r\n${response}"
# Small delay needed for the data to be processed correctly by peer
sleep 0.2s
}
case "${0}" in *responsetoclient.sh) response_to_client $@;; esac

View File

@@ -0,0 +1,6 @@
#!/bin/sh
export TRACING
export OTSCLIENT_LISTENING_PORT
nc -vlkp${OTSCLIENT_LISTENING_PORT} -e ./requesthandler.sh

View File

@@ -0,0 +1,15 @@
#!/bin/sh
trace()
{
if [ -n "${TRACING}" ]; then
echo "$(date -Is) ${1}" 1>&2
fi
}
trace_rc()
{
if [ -n "${TRACING}" ]; then
echo "$(date -Is) Last return code: ${1}" 1>&2
fi
}

View File

@@ -1,42 +0,0 @@
FROM alpine
ENV HOME /proxy
RUN apk add --update --no-cache \
sqlite \
jq \
curl \
su-exec
COPY app/script/callbacks_job.sh ${HOME}/callbacks_job.sh
COPY app/script/blockchainrpc.sh ${HOME}/blockchainrpc.sh
COPY app/script/call_lightningd.sh ${HOME}/call_lightningd.sh
COPY app/script/bitcoin.sh ${HOME}/bitcoin.sh
COPY app/script/requesthandler.sh ${HOME}/requesthandler.sh
COPY app/script/watchrequest.sh ${HOME}/watchrequest.sh
COPY app/script/walletoperations.sh ${HOME}/walletoperations.sh
COPY app/script/confirmation.sh ${HOME}/confirmation.sh
COPY app/script/startproxy.sh ${HOME}/startproxy.sh
COPY app/script/trace.sh ${HOME}/trace.sh
COPY app/script/sendtobitcoinnode.sh ${HOME}/sendtobitcoinnode.sh
COPY app/script/responsetoclient.sh ${HOME}/responsetoclient.sh
COPY app/script/importaddress.sh ${HOME}/importaddress.sh
COPY app/script/sql.sh ${HOME}/sql.sh
COPY app/data/watching.sql ${HOME}/watching.sql
COPY app/script/computefees.sh ${HOME}/computefees.sh
COPY app/script/unwatchrequest.sh ${HOME}/unwatchrequest.sh
COPY app/script/getactivewatches.sh ${HOME}/getactivewatches.sh
COPY app/script/manage_missed_conf.sh ${HOME}/manage_missed_conf.sh
COPY app/script/tests.sh ${HOME}/tests.sh
COPY app/script/tests-cb.sh ${HOME}/tests-cb.sh
COPY app/bin/lightning-cli_x86 ${HOME}/lightning-cli
WORKDIR ${HOME}
RUN chmod +x startproxy.sh requesthandler.sh lightning-cli \
&& chmod o+w . \
&& mkdir db
VOLUME ["${HOME}/db", "${HOME}/.lightning"]
ENTRYPOINT ["su-exec"]

View File

@@ -0,0 +1,39 @@
FROM alpine:3.8
# Taking care of glibc shit (glibc not natively supported by Alpine but lightning-cli uses it)
ENV GLIBC_VERSION 2.27-r0
ENV GLIBC_SHA256 938bceae3b83c53e7fa9cc4135ce45e04aae99256c5e74cf186c794b97473bc7
ENV GLIBCBIN_SHA256 3a87874e57b9d92e223f3e90356aaea994af67fb76b71bb72abfb809e948d0d6
# Download and install glibc (https://github.com/jeanblanchard/docker-alpine-glibc/blob/master/Dockerfile)
RUN wget -O /etc/apk/keys/sgerrand.rsa.pub https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC_VERSION/sgerrand.rsa.pub \
&& wget -O glibc.apk "https://github.com/sgerrand/alpine-pkg-glibc/releases/download/${GLIBC_VERSION}/glibc-${GLIBC_VERSION}.apk" \
&& echo "$GLIBC_SHA256 glibc.apk" | sha256sum -c - \
&& wget -O glibc-bin.apk "https://github.com/sgerrand/alpine-pkg-glibc/releases/download/${GLIBC_VERSION}/glibc-bin-${GLIBC_VERSION}.apk" \
&& echo "$GLIBCBIN_SHA256 glibc-bin.apk" | sha256sum -c - \
&& apk add --update --no-cache glibc-bin.apk glibc.apk \
&& /usr/glibc-compat/sbin/ldconfig /lib /usr/glibc-compat/lib \
&& echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf \
&& rm -rf glibc.apk glibc-bin.apk
ENV HOME /proxy
RUN apk add --update --no-cache \
sqlite \
jq \
curl \
su-exec
WORKDIR ${HOME}
COPY app/data/* ./
COPY app/script/* ./
COPY --from=cyphernode/clightning:v0.6.2 /usr/bin/lightning-cli ./
RUN chmod +x startproxy.sh requesthandler.sh lightning-cli sqlmigrate*.sh \
&& chmod o+w . \
&& mkdir db
VOLUME ["${HOME}/db", "/.lightning"]
ENTRYPOINT ["su-exec"]

View File

@@ -0,0 +1,35 @@
FROM alpine:3.8
# Taking care of glibc shit (glibc not natively supported by Alpine but lightning-cli uses it)
ENV GLIBC_VERSION 2.27-r0
# Download and install glibc (https://github.com/jeanblanchard/docker-alpine-glibc/blob/master/Dockerfile)
RUN apk add --update --no-cache wget \
&& wget -O glibc.apk "https://github.com/yangxuan8282/alpine-pkg-glibc/releases/download/${GLIBC_VERSION}/glibc-${GLIBC_VERSION}.apk" \
&& wget -O glibc-bin.apk "https://github.com/yangxuan8282/alpine-pkg-glibc/releases/download/${GLIBC_VERSION}/glibc-bin-${GLIBC_VERSION}.apk" \
&& apk add --allow-untrusted --update --no-cache glibc-bin.apk glibc.apk \
&& /usr/glibc-compat/sbin/ldconfig /lib /usr/glibc-compat/lib \
&& echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf \
&& rm -rf glibc.apk glibc-bin.apk
ENV HOME /proxy
RUN apk add --update --no-cache \
sqlite \
jq \
curl \
su-exec
WORKDIR ${HOME}
COPY app/data/* ./
COPY app/script/* ./
COPY --from=cyphernode/clightning:v0.6.2 /usr/bin/lightning-cli ./
RUN chmod +x startproxy.sh requesthandler.sh lightning-cli sqlmigrate*.sh \
&& chmod o+w . \
&& mkdir db
VOLUME ["${HOME}/db", "/.lightning"]
ENTRYPOINT ["su-exec"]

View File

@@ -1,6 +1,24 @@
# Cyphernode Proxy
We assume you are the user pi on a Raspberry Pi.
## Pull our Cyphernode image
```shell
docker pull cyphernode/proxy:latest
```
## Build yourself the image
```shell
docker build -t cyphernode/proxy:latest .
```
## Run image
If you want to run this container independently from Cyphernode:
```shell
docker run --rm -d -p 8888:8888 --network cyphernodenet --env-file env.properties cyphernode/proxy:latest `id -u cyphernode`:`id -g cyphernode` ./startproxy.sh
```
## Configure your container by modifying `env.properties` file
@@ -45,7 +63,7 @@ docker build -t btcproxyimg .
## Create sqlite3 database path and give rights
```shell
mkdir ~/btcproxydb ; sudo chown -R cyphernode:pi ~/btcproxydb ; sudo chmod g+ws ~/btcproxydb
mkdir ~/proxydb ; sudo chown -R cyphernode:cyphernode ~/proxydb ; sudo chmod g+ws ~/proxydb
```
## What you MUST have in your Watching Bitcoin node's bitcoin.conf file

View File

@@ -1,5 +0,0 @@
# Nota bene
lightning-cli binary has been pre-compiled in an Alpine docker container.
Use lightning-cli_arm for armhf architecture, lightning-cli_x86 for x86_64.

View File

@@ -53,3 +53,25 @@ CREATE TABLE recipient (
inserted_ts INTEGER DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_recipient_address ON recipient (address);
CREATE TABLE stamp (
id INTEGER PRIMARY KEY AUTOINCREMENT,
hash TEXT UNIQUE,
callbackUrl TEXT,
requested INTEGER DEFAULT FALSE,
upgraded INTEGER DEFAULT FALSE,
calledback INTEGER DEFAULT FALSE,
inserted_ts INTEGER DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_stamp_hash ON stamp (hash);
CREATE INDEX idx_stamp_calledback ON stamp (calledback);
CREATE TABLE cyphernode_props (
id INTEGER PRIMARY KEY AUTOINCREMENT,
property TEXT,
value TEXT,
inserted_ts INTEGER DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_cp_property ON cyphernode_props (property);
INSERT INTO cyphernode_props (property, value) VALUES ("version", "0.1");

View File

@@ -0,0 +1,10 @@
#!/bin/sh
sqlite3 db/proxydb ".tables" | grep "stamp" > /dev/null
if [ "$?" -eq "1" ]; then
# stamp not there, we have to migrate
echo "Migrating database from v0 to v0.1..."
cat sqlmigrate20181213_0-0.1.sql | sqlite3 $DB_FILE
else
echo "Database v0 to v0.1 migration already done, skipping!"
fi

View File

@@ -0,0 +1,23 @@
PRAGMA foreign_keys = ON;
CREATE TABLE stamp (
id INTEGER PRIMARY KEY AUTOINCREMENT,
hash TEXT UNIQUE,
callbackUrl TEXT,
requested INTEGER DEFAULT FALSE,
upgraded INTEGER DEFAULT FALSE,
calledback INTEGER DEFAULT FALSE,
inserted_ts INTEGER DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_stamp_hash ON stamp (hash);
CREATE INDEX idx_stamp_calledback ON stamp (calledback);
CREATE TABLE cyphernode_props (
id INTEGER PRIMARY KEY AUTOINCREMENT,
property TEXT,
value TEXT,
inserted_ts INTEGER DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_cp_property ON cyphernode_props (property);
INSERT INTO cyphernode_props (property, value) VALUES ("version", "0.1");

View File

@@ -0,0 +1,229 @@
#!/bin/sh
. ./trace.sh
serve_ots_stamp()
{
trace "Entering serve_ots_stamp()..."
local request=${1}
local hash=$(echo "${request}" | jq ".hash" | tr -d '"')
trace "[serve_ots_stamp] hash=${hash}"
local callbackUrl=$(echo "${request}" | jq ".callbackUrl" | tr -d '"')
trace "[serve_ots_stamp] callbackUrl=${callbackUrl}"
local result
local returncode
local errorstring
local id_inserted
local requested
local row
# Already requested?
row=$(sql "SELECT id, requested FROM stamp WHERE hash='${hash}'")
trace "[serve_ots_stamp] row=${row}"
if [ -n "${row}" ]; then
# Hash exists in DB...
trace "[serve_ots_stamp] Hash already exists in DB."
requested=$(echo "${row}" | cut -d '|' -f2)
trace "[serve_ots_stamp] requested=${requested}"
id_inserted=$(echo "${row}" | cut -d '|' -f1)
trace "[serve_ots_stamp] id_inserted=${id_inserted}"
if [ "${requested}" -eq "1" ]; then
# Stamp already requested
trace "[serve_ots_stamp] Stamp already requested"
errorstring="Duplicate stamping request, hash already exists in DB and been OTS requested"
returncode=1
else
errorstring=$(request_ots_stamp "${hash}")
returncode=$?
fi
else
sql "INSERT OR IGNORE INTO stamp (hash, callbackUrl) VALUES (\"${hash}\", \"${callbackUrl}\")"
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -eq "0" ]; then
id_inserted=$(sql "SELECT id FROM stamp WHERE hash='${hash}'")
trace_rc $?
errorstring=$(request_ots_stamp "${hash}")
returncode=$?
trace_rc ${returncode}
else
trace "[serve_ots_stamp] Stamp request could not be inserted in DB"
errorstring="Stamp request could not be inserted in DB, please retry later"
returncode=1
fi
fi
result="{\"method\":\"ots_stamp\",\"hash\":\"${hash}\",\"id\":\"${id_inserted}\",\"result\":\""
if [ "${returncode}" -eq "0" ]; then
result="${result}success\"}"
else
result="${result}error\",\"error\":\"${errorstring}\"}"
fi
trace "[serve_ots_stamp] result=${result}"
# Output response to stdout before exiting with return code
echo "${result}"
return ${returncode}
}
request_ots_stamp()
{
# Request the OTS server to stamp
local hash=${1}
local returncode
local result
local errorstring
trace "[request_ots_stamp] Stamping..."
result=$(curl -s ${OTSCLIENT_CONTAINER}/stamp/${hash})
returncode=$?
trace_rc ${returncode}
trace "[request_ots_stamp] Stamping result=${result}"
if [ "${returncode}" -eq 0 ]; then
# jq -e will have a return code of 1 if the supplied tag is null.
errorstring=$(echo "${result}" | tr '\r\n' ' ' | jq -e ".error")
if [ "$?" -eq "0" ]; then
# Error tag not null, so there's an error
errorstring=$(echo "${errorstring}" | tr -d '"')
# If the error message is "Already exists"
trace "[request_ots_stamp] grepping 'already exists'..."
echo "${result}" | grep "already exists" > /dev/null
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -eq "0" ]; then
# "already exists" found, let's try updating DB again
trace "[request_ots_stamp] was already requested to the OTS server... let's update the DB, looks like it didn't work on first try"
sql "UPDATE stamp SET requested=1 WHERE hash='${hash}'"
errorstring="Duplicate stamping request, hash already exists in DB and been OTS requested"
returncode=1
else
# If OTS CLIENT responded with an error, it is not down, it just can't stamp it. ABORT.
trace "[request_ots_stamp] Stamping error: ${errorstring}"
sql "DELETE FROM stamp WHERE hash='${hash}'"
returncode=1
fi
else
trace "[request_ots_stamp] Stamping request sent successfully!"
sql "UPDATE stamp SET requested=1 WHERE hash='${hash}'"
errorstring=""
returncode=0
fi
else
trace "[request_ots_stamp] Stamping error, will retry later: ${errorstring}"
errorstring=""
returncode=0
fi
echo "${errorstring}"
return ${returncode}
}
serve_ots_backoffice()
{
# What we want to do here:
# ========================
# Re-request the unrequested calls to ots_stamp
# Upgrade requested calls to ots_stamp that have not been called back yet
# Call back newly upgraded stamps
trace "Entering serve_ots_backoffice()..."
local result
local returncode
# Let's fetch all the incomplete stamping request
local callbacks=$(sql 'SELECT hash, callbackUrl, requested, upgraded FROM stamp WHERE NOT calledback')
trace "[serve_ots_backoffice] callbacks=${callbacks}"
local url
local hash
local requested
local upgraded
local IFS=$'\n'
for row in ${callbacks}
do
trace "[serve_ots_backoffice] row=${row}"
hash=$(echo "${row}" | cut -d '|' -f1)
trace "[serve_ots_backoffice] hash=${hash}"
requested=$(echo "${row}" | cut -d '|' -f3)
trace "[serve_ots_backoffice] requested=${requested}"
upgraded=$(echo "${row}" | cut -d '|' -f4)
trace "[serve_ots_backoffice] upgraded=${upgraded}"
if [ "${requested}" -ne "1" ]; then
# Re-request the unrequested calls to ots_stamp
request_ots_stamp "${hash}"
returncode=$?
else
if [ "${upgraded}" -ne "1" ]; then
# Upgrade requested calls to ots_stamp that have not been called back yet
trace "[serve_ots_backoffice] curl -s ${OTSCLIENT_CONTAINER}/upgrade/${hash}"
result=$(curl -s ${OTSCLIENT_CONTAINER}/upgrade/${hash})
returncode=$?
trace_rc ${returncode}
trace "[serve_ots_backoffice] result=${result}"
if [ "${returncode}" -eq 0 ]; then
# CURL success... let's see if error in response
errorstring=$(echo "${result}" | tr '\r\n' ' ' | jq -e ".error")
if [ "$?" -eq "0" ]; then
# Error tag not null, so there's an error
trace "[serve_ots_backoffice] not upgraded!"
upgraded=0
else
# No failure, upgraded
trace "[serve_ots_backoffice] just upgraded!"
sql "UPDATE stamp SET upgraded=1 WHERE hash=\"${hash}\""
trace_rc $?
upgraded=1
fi
fi
fi
if [ "${upgraded}" -eq "1" ]; then
trace "[serve_ots_backoffice] upgraded! Let's call the callback..."
url=$(echo "${row}" | cut -d '|' -f2)
trace "[serve_ots_backoffice] url=${url}"
# Call back newly upgraded stamps
curl -H "X-Forwarded-Proto: https" ${url}
returncode=$?
trace_rc ${returncode}
if [ "${returncode}" -eq "0" ]; then
sql "UPDATE stamp SET calledback=1 WHERE hash=\"${hash}\""
trace_rc $?
fi
fi
fi
done
}
serve_ots_getfile()
{
trace "Entering serve_ots_getfile()..."
local hash=${1}
trace "[serve_ots_getfile] hash=${hash}"
binfile_response_to_client "/otsfiles/" "${hash}.ots"
returncode=$?
trace_rc ${returncode}
return ${returncode}
}

View File

@@ -17,216 +17,238 @@
. ./walletoperations.sh
. ./bitcoin.sh
. ./call_lightningd.sh
. ./ots.sh
main()
{
trace "Entering main()..."
trace "Entering main()..."
local step=0
local cmd
local http_method
local line
local content_length
local response
local returncode
local step=0
local cmd
local http_method
local line
local content_length
local response
local returncode
while read line; do
line=$(echo "${line}" | tr -d '\r\n')
trace "[main] line=${line}"
while read line; do
line=$(echo "${line}" | tr -d '\r\n')
trace "[main] line=${line}"
if [ "${cmd}" = "" ]; then
# First line!
# Looking for something like:
# GET /cmd/params HTTP/1.1
# POST / HTTP/1.1
cmd=$(echo "${line}" | cut -d '/' -f2 | cut -d ' ' -f1)
trace "[main] cmd=${cmd}"
http_method=$(echo "${line}" | cut -d ' ' -f1)
trace "[main] http_method=${http_method}"
if [ "${http_method}" = "GET" ]; then
step=1
fi
fi
if [ "${line}" = "" ]; then
trace "[main] empty line"
if [ ${step} -eq 1 ]; then
trace "[main] body part finished, disconnecting"
break
else
trace "[main] headers part finished, body incoming"
step=1
fi
fi
# line=content-length: 406
case "${line}" in *[cC][oO][nN][tT][eE][nN][tT]-[lL][eE][nN][gG][tT][hH]*)
content_length=$(echo ${line} | cut -d ':' -f2)
trace "[main] content_length=${content_length}";
;;
esac
if [ ${step} -eq 1 ]; then
trace "[main] step=${step}"
if [ "${http_method}" = "POST" ]; then
read -n ${content_length} line
trace "[main] line=${line}"
fi
case "${cmd}" in
watch)
# POST http://192.168.111.152:8080/watch
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","unconfirmedCallbackURL":"192.168.111.233:1111/callback0conf","confirmedCallbackURL":"192.168.111.233:1111/callback1conf"}
if [ "${cmd}" = "" ]; then
# First line!
# Looking for something like:
# GET /cmd/params HTTP/1.1
# POST / HTTP/1.1
cmd=$(echo "${line}" | cut -d '/' -f2 | cut -d ' ' -f1)
trace "[main] cmd=${cmd}"
http_method=$(echo "${line}" | cut -d ' ' -f1)
trace "[main] http_method=${http_method}"
if [ "${http_method}" = "GET" ]; then
step=1
fi
fi
if [ "${line}" = "" ]; then
trace "[main] empty line"
if [ ${step} -eq 1 ]; then
trace "[main] body part finished, disconnecting"
break
else
trace "[main] headers part finished, body incoming"
step=1
fi
fi
# line=content-length: 406
case "${line}" in *[cC][oO][nN][tT][eE][nN][tT]-[lL][eE][nN][gG][tT][hH]*)
content_length=$(echo ${line} | cut -d ':' -f2)
trace "[main] content_length=${content_length}";
;;
esac
if [ ${step} -eq 1 ]; then
trace "[main] step=${step}"
if [ "${http_method}" = "POST" ]; then
read -n ${content_length} line
trace "[main] line=${line}"
fi
case "${cmd}" in
watch)
# POST http://192.168.111.152:8080/watch
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","unconfirmedCallbackURL":"192.168.111.233:1111/callback0conf","confirmedCallbackURL":"192.168.111.233:1111/callback1conf"}
response=$(watchrequest "${line}")
response_to_client "${response}" ${?}
break
;;
unwatch)
# curl (GET) 192.168.111.152:8080/unwatch/2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp
response=$(watchrequest "${line}")
response_to_client "${response}" ${?}
break
;;
unwatch)
# curl (GET) 192.168.111.152:8080/unwatch/2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp
response=$(unwatchrequest "${line}")
response_to_client "${response}" ${?}
break
;;
getactivewatches)
# curl (GET) 192.168.111.152:8080/getactivewatches
response=$(unwatchrequest "${line}")
response_to_client "${response}" ${?}
break
;;
getactivewatches)
# curl (GET) 192.168.111.152:8080/getactivewatches
response=$(getactivewatches)
response_to_client "${response}" ${?}
break
;;
conf)
# curl (GET) 192.168.111.152:8080/conf/b081ca7724386f549cf0c16f71db6affeb52ff7a0d9b606fb2e5c43faffd3387
response=$(getactivewatches)
response_to_client "${response}" ${?}
break
;;
conf)
# curl (GET) 192.168.111.152:8080/conf/b081ca7724386f549cf0c16f71db6affeb52ff7a0d9b606fb2e5c43faffd3387
response=$(confirmation_request "${line}")
response_to_client "${response}" ${?}
break
;;
getbestblockhash)
# curl (GET) http://192.168.111.152:8080/getbestblockhash
response=$(confirmation_request "${line}")
response_to_client "${response}" ${?}
break
;;
getbestblockhash)
# curl (GET) http://192.168.111.152:8080/getbestblockhash
response=$(get_best_block_hash)
response_to_client "${response}" ${?}
break
;;
getblockinfo)
# curl (GET) http://192.168.111.152:8080/getblockinfo/000000006f82a384c208ecfa04d05beea02d420f3f398ddda5c7f900de5718ea
response=$(get_best_block_hash)
response_to_client "${response}" ${?}
break
;;
getblockinfo)
# curl (GET) http://192.168.111.152:8080/getblockinfo/000000006f82a384c208ecfa04d05beea02d420f3f398ddda5c7f900de5718ea
response=$(get_block_info $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
response_to_client "${response}" ${?}
break
;;
gettransaction)
# curl (GET) http://192.168.111.152:8080/gettransaction/af867c86000da76df7ddb1054b273ca9e034e8c89d049b5b2795f9f590f67648
response=$(get_block_info $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
response_to_client "${response}" ${?}
break
;;
gettransaction)
# curl (GET) http://192.168.111.152:8080/gettransaction/af867c86000da76df7ddb1054b273ca9e034e8c89d049b5b2795f9f590f67648
response=$(get_rawtransaction $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
response_to_client "${response}" ${?}
break
;;
getbestblockinfo)
# curl (GET) http://192.168.111.152:8080/getbestblockinfo
response=$(get_rawtransaction $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
response_to_client "${response}" ${?}
break
;;
getbestblockinfo)
# curl (GET) http://192.168.111.152:8080/getbestblockinfo
response=$(get_best_block_info)
response_to_client "${response}" ${?}
break
;;
executecallbacks)
# curl (GET) http://192.168.111.152:8080/executecallbacks
response=$(get_best_block_info)
response_to_client "${response}" ${?}
break
;;
executecallbacks)
# curl (GET) http://192.168.111.152:8080/executecallbacks
manage_not_imported
manage_missed_conf
response=$(do_callbacks)
response_to_client "${response}" ${?}
break
;;
getbalance)
# curl (GET) http://192.168.111.152:8080/getbalance
manage_not_imported
manage_missed_conf
response=$(do_callbacks)
response_to_client "${response}" ${?}
break
;;
getbalance)
# curl (GET) http://192.168.111.152:8080/getbalance
response=$(getbalance)
response_to_client "${response}" ${?}
break
;;
getnewaddress)
# curl (GET) http://192.168.111.152:8080/getnewaddress
response=$(getbalance)
response_to_client "${response}" ${?}
break
;;
getnewaddress)
# curl (GET) http://192.168.111.152:8080/getnewaddress
response=$(getnewaddress)
response_to_client "${response}" ${?}
break
;;
spend)
# POST http://192.168.111.152:8080/spend
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","amount":0.00233}
response=$(getnewaddress)
response_to_client "${response}" ${?}
break
;;
spend)
# POST http://192.168.111.152:8080/spend
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","amount":0.00233}
response=$(spend "${line}")
response_to_client "${response}" ${?}
break
;;
addtobatch)
# POST http://192.168.111.152:8080/addtobatch
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","amount":0.00233}
response=$(spend "${line}")
response_to_client "${response}" ${?}
break
;;
addtobatch)
# POST http://192.168.111.152:8080/addtobatch
# BODY {"address":"2N8DcqzfkYi8CkYzvNNS5amoq3SbAcQNXKp","amount":0.00233}
response=$(addtobatching $(echo "${line}" | jq ".address" | tr -d '"') $(echo "${line}" | jq ".amount"))
response_to_client "${response}" ${?}
break
;;
batchspend)
# GET http://192.168.111.152:8080/batchspend
response=$(addtobatching $(echo "${line}" | jq ".address" | tr -d '"') $(echo "${line}" | jq ".amount"))
response_to_client "${response}" ${?}
break
;;
batchspend)
# GET http://192.168.111.152:8080/batchspend
response=$(batchspend "${line}")
response_to_client "${response}" ${?}
break
;;
deriveindex)
# curl GET http://192.168.111.152:8080/deriveindex/25-30
# curl GET http://192.168.111.152:8080/deriveindex/34
response=$(batchspend "${line}")
response_to_client "${response}" ${?}
break
;;
deriveindex)
# curl GET http://192.168.111.152:8080/deriveindex/25-30
# curl GET http://192.168.111.152:8080/deriveindex/34
response=$(deriveindex $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
response_to_client "${response}" ${?}
break
;;
derivepubpath)
# POST http://192.168.111.152:8080/derivepubpath
# BODY {"pub32":"tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk","path":"0/25-30"}
# BODY {"pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/25-30"}
# BODY {"pub32":"vpub5SLqN2bLY4WeZF3kL4VqiWF1itbf3A6oRrq9aPf16AZMVWYCuN9TxpAZwCzVgW94TNzZPNc9XAHD4As6pdnExBtCDGYRmNJrcJ4eV9hNqcv","path":"0/25-30"}
response=$(deriveindex $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3))
response_to_client "${response}" ${?}
break
;;
derivepubpath)
# POST http://192.168.111.152:8080/derivepubpath
# BODY {"pub32":"tpubD6NzVbkrYhZ4YR3QK2tyfMMvBghAvqtNaNK1LTyDWcRHLcMUm3ZN2cGm5BS3MhCRCeCkXQkTXXjiJgqxpqXK7PeUSp86DTTgkLpcjMtpKWk","path":"0/25-30"}
# BODY {"pub32":"upub5GtUcgGed1aGH4HKQ3vMYrsmLXwmHhS1AeX33ZvDgZiyvkGhNTvGd2TA5Lr4v239Fzjj4ZY48t6wTtXUy2yRgapf37QHgt6KWEZ6bgsCLpb","path":"0/25-30"}
# BODY {"pub32":"vpub5SLqN2bLY4WeZF3kL4VqiWF1itbf3A6oRrq9aPf16AZMVWYCuN9TxpAZwCzVgW94TNzZPNc9XAHD4As6pdnExBtCDGYRmNJrcJ4eV9hNqcv","path":"0/25-30"}
response=$(send_to_pycoin "${line}")
response_to_client "${response}" ${?}
break
;;
ln_getinfo)
# GET http://192.168.111.152:8080/ln_getinfo
response=$(send_to_pycoin "${line}")
response_to_client "${response}" ${?}
break
;;
ln_getinfo)
# GET http://192.168.111.152:8080/ln_getinfo
response=$(ln_getinfo)
response_to_client "${response}" ${?}
break
;;
ln_create_invoice)
# POST http://192.168.111.152:8080/ln_create_invoice
# BODY {"msatoshi":"10000","label":"koNCcrSvhX3dmyFhW","description":"Bylls order #10649","expiry":"900"}
response=$(ln_getinfo)
response_to_client "${response}" ${?}
break
;;
ln_create_invoice)
# POST http://192.168.111.152:8080/ln_create_invoice
# BODY {"msatoshi":"10000","label":"koNCcrSvhX3dmyFhW","description":"Bylls order #10649","expiry":"900"}
response=$(ln_create_invoice "${line}")
response_to_client "${response}" ${?}
break
;;
ln_pay)
# POST http://192.168.111.152:8080/ln_pay
# BODY {"bolt11":"lntb1pdca82tpp5gv8mn5jqlj6xztpnt4r472zcyrwf3y2c3cvm4uzg2gqcnj90f83qdp2gf5hgcm0d9hzqnm4w3kx2apqdaexgetjyq3nwvpcxgcqp2g3d86wwdfvyxcz7kce7d3n26d2rw3wf5tzpm2m5fl2z3mm8msa3xk8nv2y32gmzlhwjved980mcmkgq83u9wafq9n4w28amnmwzujgqpmapcr3","expected_msatoshi":"10000","expected_description":"Bitcoin Outlet order #7082"}
response=$(ln_create_invoice "${line}")
response_to_client "${response}" ${?}
break
;;
ln_pay)
# POST http://192.168.111.152:8080/ln_pay
# BODY {"bolt11":"lntb1pdca82tpp5gv8mn5jqlj6xztpnt4r472zcyrwf3y2c3cvm4uzg2gqcnj90f83qdp2gf5hgcm0d9hzqnm4w3kx2apqdaexgetjyq3nwvpcxgcqp2g3d86wwdfvyxcz7kce7d3n26d2rw3wf5tzpm2m5fl2z3mm8msa3xk8nv2y32gmzlhwjved980mcmkgq83u9wafq9n4w28amnmwzujgqpmapcr3","expected_msatoshi":"10000","expected_description":"Bitcoin Outlet order #7082"}
response=$(ln_pay "${line}")
response_to_client "${response}" ${?}
break
;;
ln_newaddr)
# GET http://192.168.111.152:8080/ln_newaddr
response=$(ln_pay "${line}")
response_to_client "${response}" ${?}
break
;;
ln_newaddr)
# GET http://192.168.111.152:8080/ln_newaddr
response=$(ln_newaddr)
response_to_client "${response}" ${?}
break
;;
esac
break
fi
done
trace "[main] exiting"
return 0
response=$(ln_newaddr)
response_to_client "${response}" ${?}
break
;;
ots_stamp)
# POST http://192.168.111.152:8080/ots_stamp
# BODY {"hash":"1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7","callbackUrl":"192.168.111.233:1111/callbackUrl"}
response=$(serve_ots_stamp "${line}")
response_to_client "${response}" ${?}
break
;;
ots_backoffice)
# curl (GET) http://192.168.111.152:8080/ots_upgradeandcallback
response=$(serve_ots_backoffice)
response_to_client "${response}" ${?}
break
;;
ots_getfile)
# curl (GET) http://192.168.111.152:8080/ots_getfile/1ddfb769eb0b8876bc570e25580e6a53afcf973362ee1ee4b54a807da2e5eed7
serve_ots_getfile $(echo "${line}" | cut -d ' ' -f2 | cut -d '/' -f3)
break
;;
esac
break
fi
done
trace "[main] exiting"
return 0
}
export NODE_RPC_URL=$BTC_NODE_RPC_URL

View File

@@ -4,18 +4,71 @@
response_to_client()
{
trace "Entering response_to_client()..."
trace "Entering response_to_client()..."
local response=${1}
local returncode=${2}
local response=${1}
local returncode=${2}
local contenttype=${3}
([ -z "${returncode}" ] || [ "${returncode}" -eq "0" ]) && echo -ne "HTTP/1.1 200 OK\r\n"
[ -n "${returncode}" ] && [ "${returncode}" -ne "0" ] && echo -ne "HTTP/1.1 400 Bad Request\r\n"
[ -z "${contenttype}" ] && contenttype="application/json"
echo -en "Content-Type: application/json\r\nContent-Length: ${#response}\r\n\r\n${response}"
([ -z "${returncode}" ] || [ "${returncode}" -eq "0" ]) && echo -ne "HTTP/1.1 200 OK\r\n"
[ -n "${returncode}" ] && [ "${returncode}" -ne "0" ] && echo -ne "HTTP/1.1 400 Bad Request\r\n"
# Small delay needed for the data to be processed correctly by peer
sleep 0.2s
echo -en "Content-Type: ${contenttype}\r\nContent-Length: ${#response}\r\n\r\n${response}"
# Small delay needed for the data to be processed correctly by peer
sleep 0.5s
}
htmlfile_response_to_client()
{
trace "Entering htmlfile_response_to_client()..."
local path=${1}
local filename=${2}
local pathfile="${path}${filename}"
local returncode
trace "[htmlfile_response_to_client] path=${path}"
trace "[htmlfile_response_to_client] filename=${filename}"
trace "[htmlfile_response_to_client] pathfile=${pathfile}"
local file_length=$(stat -c'%s' ${pathfile})
trace "[htmlfile_response_to_client] file_length=${file_length}"
[ -r "${pathfile}" ] \
&& echo -ne "HTTP/1.1 200 OK\r\nContent-Type: text/html\r\nContent-Length: ${file_length}\r\n\r\n" \
&& cat ${pathfile}
[ ! -r "${pathfile}" ] && echo -ne "HTTP/1.1 404 Not Found\r\n"
# Small delay needed for the data to be processed correctly by peer
sleep 0.5s
}
binfile_response_to_client()
{
trace "Entering binfile_response_to_client()..."
local path=${1}
local filename=${2}
local pathfile="${path}${filename}"
local returncode
trace "[file_response_to_client] path=${path}"
trace "[file_response_to_client] filename=${filename}"
trace "[file_response_to_client] pathfile=${pathfile}"
local file_length=$(stat -c'%s' ${pathfile})
trace "[file_response_to_client] file_length=${file_length}"
[ -r "${pathfile}" ] \
&& echo -ne "HTTP/1.1 200 OK\r\nContent-Type: application/octet-stream\r\nContent-Disposition: inline; filename=\"${filename}\"\r\nContent-Length: ${file_length}\r\n\r\n" \
&& cat ${pathfile}
[ ! -r "${pathfile}" ] && echo -ne "HTTP/1.1 404 Not Found\r\n"
# Small delay needed for the data to be processed correctly by peer
sleep 0.5s
}
case "${0}" in *responsetoclient.sh) response_to_client $@;; esac

View File

@@ -5,7 +5,7 @@
send_to_watcher_node()
{
trace "Entering send_to_watcher_node()..."
send_to_bitcoin_node ${WATCHER_NODE_RPC_URL} watcher_btcnode_curlcfg.properties $@
send_to_bitcoin_node ${WATCHER_NODE_RPC_URL} ${WATCHER_NODE_RPC_CFG} $@
local returncode=$?
trace_rc ${returncode}
return ${returncode}
@@ -14,7 +14,7 @@ send_to_watcher_node()
send_to_spender_node()
{
trace "Entering send_to_spender_node()..."
send_to_bitcoin_node ${SPENDER_NODE_RPC_URL} spender_btcnode_curlcfg.properties $@
send_to_bitcoin_node ${SPENDER_NODE_RPC_URL} ${SPENDER_NODE_RPC_CFG} $@
local returncode=$?
trace_rc ${returncode}
return ${returncode}
@@ -27,11 +27,11 @@ send_to_bitcoin_node()
local result
local errorstring
local node_url=${1}
local configfile=${2}
local config=${2}
local data=${3}
trace "[send_to_bitcoin_node] curl -s --config ${configfile} -H \"Content-Type: application/json\" -d \"${data}\" ${node_url}"
result=$(curl -s --config ${configfile} -H "Content-Type: application/json" -d "${data}" ${node_url})
trace "[send_to_bitcoin_node] curl -s --user ${user} -H \"Content-Type: application/json\" -d \"${data}\" ${node_url}"
result=$(curl -s --config ${config} -H "Content-Type: application/json" -d "${data}" ${node_url})
returncode=$?
trace_rc ${returncode}
trace "[send_to_bitcoin_node] result=${result}"

View File

@@ -31,10 +31,17 @@ createCurlConfig() {
}
if [ ! -e ${DB_FILE} ]; then
echo "DB not found, creating..." 1>&2
cat watching.sql | sqlite3 $DB_FILE
echo "DB not found, creating..."
cat cyphernode.sql | sqlite3 $DB_FILE
else
echo "DB found, migrating..."
for script in sqlmigrate*.sh; do
sh $script
done
fi
chmod 0600 $DB_FILE
createCurlConfig ${WATCHER_BTC_NODE_RPC_CFG} ${WATCHER_BTC_NODE_RPC_USER}
createCurlConfig ${SPENDER_BTC_NODE_RPC_CFG} ${SPENDER_BTC_NODE_RPC_USER}

Some files were not shown because too many files have changed in this diff Show More