Merge branch 'dev' into releases/v0.8.0

This commit is contained in:
kexkey
2022-01-18 11:26:05 -05:00
2 changed files with 33 additions and 12 deletions

11
dist/setup.sh vendored
View File

@@ -127,7 +127,7 @@ sudo_if_required() {
} }
modify_permissions() { modify_permissions() {
local directories=("$current_path/apps" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
for d in "${directories[@]}" for d in "${directories[@]}"
do do
if [[ -e $d ]]; then if [[ -e $d ]]; then
@@ -139,7 +139,7 @@ modify_permissions() {
} }
modify_owner() { modify_owner() {
local directories=("$current_path/apps" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER) local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER)
for d in "${directories[@]}" for d in "${directories[@]}"
do do
@@ -669,7 +669,7 @@ install_docker() {
check_directory_owner() { check_directory_owner() {
# if one directory does not have access rights for $RUN_AS_USER, we echo 1, else we echo 0 # if one directory does not have access rights for $RUN_AS_USER, we echo 1, else we echo 0
local directories=("$current_path/apps" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local status=0 local status=0
for d in "${directories[@]}" for d in "${directories[@]}"
do do
@@ -773,7 +773,7 @@ sanity_checks_pre_install() {
if [[ $sudo_reason == 'directories' ]]; then if [[ $sudo_reason == 'directories' ]]; then
echo " or check your data volumes if they have the right owner." echo " or check your data volumes if they have the right owner."
echo " The owner of the following folders should be '$RUN_AS_USER':" echo " The owner of the following folders should be '$RUN_AS_USER':"
local directories=("$current_path/apps" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH")
local status=0 local status=0
for d in "${directories[@]}" for d in "${directories[@]}"
do do
@@ -797,8 +797,7 @@ install_apps() {
local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER) local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER)
local apps_repo="https://github.com/SatoshiPortal/cypherapps.git" local apps_repo="https://github.com/SatoshiPortal/cypherapps.git"
echo " clone $apps_repo into apps" echo " clone $apps_repo into apps"
docker run --rm -u $user -v "$current_path":/git --entrypoint git cyphernode/cyphernodeconf:$CONF_VERSION clone --single-branch -b ${CYPHERAPPS_VERSION} "$apps_repo" /git/apps > /dev/null 2>&1 docker run --rm -v "$current_path":/git --entrypoint sh cyphernode/cyphernodeconf:$CONF_VERSION -c "git clone --single-branch -b ${CYPHERAPPS_VERSION} \"$apps_repo\" /git/apps > /dev/null 2>&1 ; chown -R $user /git/apps"
next
fi fi
if [[ $FEATURE_LIGHTNING == true ]]; then if [[ $FEATURE_LIGHTNING == true ]]; then

View File

@@ -23,15 +23,33 @@ createCurlConfig() {
} }
# If the file .dbfailed exists, it means we previously failed to process DB migrations.
# Sometimes, depending on timing, a migration fails but it doesn't mean it's corrupted.
# It may be a container that was not accessible for a short period of time, for example.
# So we'll try up to MAX_ATTEMPTS times before concluding in failure.
# For this to work, we'll put the number of attemps in the .dbfailed file.
MAX_ATTEMPTS=5
nb_attempts=1
if [ -e ${DB_PATH}/.dbfailed ]; then if [ -e ${DB_PATH}/.dbfailed ]; then
n=$(cat ${DB_PATH}/.dbfailed)
nb_attempts=$((n+1))
fi
if [ "${nb_attempts}" -gt "${MAX_ATTEMPTS}" ]; then
touch /container_monitor/proxy_dbfailed touch /container_monitor/proxy_dbfailed
trace "[startproxy] A previous database creation/migration failed. Stopping." trace "[startproxy] Too many database creation/migration failed attempts. Failed attempts = ${nb_attempts}."
trace "[startproxy] A file called .dbfailed has been created. Fix the migration errors, remove .dbfailed and retry." trace "[startproxy] A file called .dbfailed has been created in your proxy datapath. Fix the migration errors, remove .dbfailed and retry."
trace "[startproxy] Check your log files, especially postgres."
trace "[startproxy] Exiting." trace "[startproxy] Exiting."
sleep 30 sleep 30
exit 1 exit 1
else else
rm -f /container_monitor/proxy_dbfailed if [ "${nb_attempts}" -gt "1" ]; then
trace "[startproxy] Current database creation/migration attempt = ${nb_attempts}. Retrying..."
fi
fi fi
trace "[startproxy] Waiting for PostgreSQL to be ready..." trace "[startproxy] Waiting for PostgreSQL to be ready..."
@@ -57,17 +75,21 @@ else
fi fi
if [ "${returncode}" -ne "0" ]; then if [ "${returncode}" -ne "0" ]; then
touch ${DB_PATH}/.dbfailed echo -n "${nb_attempts}" > ${DB_PATH}/.dbfailed
touch /container_monitor/proxy_dbfailed trace "[startproxy] Database creation/migration failed. We will retry ${MAX_ATTEMPTS} times."
trace "[startproxy] Database creation/migration failed. Stopping."
trace "[startproxy] A file called .dbfailed has been created in your proxy datapath. Fix the migration errors, remove .dbfailed and retry." trace "[startproxy] A file called .dbfailed has been created in your proxy datapath. Fix the migration errors, remove .dbfailed and retry."
trace "[startproxy] Check your log files, especially postgres."
trace "[startproxy] Exiting." trace "[startproxy] Exiting."
sleep 30 sleep 30
exit ${returncode} exit ${returncode}
fi fi
# /container_monitor/proxy_ready will be created by Docker's health check
rm -f /container_monitor/proxy_ready rm -f /container_monitor/proxy_ready
rm -f /container_monitor/proxy_dbfailed
rm -f ${DB_PATH}/.dbfailed
chmod 0600 $DB_FILE chmod 0600 $DB_FILE
createCurlConfig ${WATCHER_BTC_NODE_RPC_CFG} ${WATCHER_BTC_NODE_RPC_USER} createCurlConfig ${WATCHER_BTC_NODE_RPC_CFG} ${WATCHER_BTC_NODE_RPC_USER}