diff --git a/dist/setup.sh b/dist/setup.sh index 352ec5a..2d51cde 100755 --- a/dist/setup.sh +++ b/dist/setup.sh @@ -127,7 +127,7 @@ sudo_if_required() { } modify_permissions() { - local directories=("$current_path/apps" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") + local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") for d in "${directories[@]}" do if [[ -e $d ]]; then @@ -139,7 +139,7 @@ modify_permissions() { } modify_owner() { - local directories=("$current_path/apps" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") + local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$OTSCLIENT_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER) for d in "${directories[@]}" do @@ -669,7 +669,7 @@ install_docker() { check_directory_owner() { # if one directory does not have access rights for $RUN_AS_USER, we echo 1, else we echo 0 - local directories=("$current_path/apps" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") + local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local status=0 for d in "${directories[@]}" do @@ -773,7 +773,7 @@ sanity_checks_pre_install() { if [[ $sudo_reason == 'directories' ]]; then echo " or check your data volumes if they have the right owner." echo " The owner of the following folders should be '$RUN_AS_USER':" - local directories=("$current_path/apps" "$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") + local directories=("$BITCOIN_DATAPATH" "$LIGHTNING_DATAPATH" "$PROXY_DATAPATH" "$GATEKEEPER_DATAPATH" "$POSTGRES_DATAPATH" "$LOGS_DATAPATH" "$TRAEFIK_DATAPATH" "$TOR_DATAPATH") local status=0 for d in "${directories[@]}" do @@ -797,8 +797,7 @@ install_apps() { local user=$(id -u $RUN_AS_USER):$(id -g $RUN_AS_USER) local apps_repo="https://github.com/SatoshiPortal/cypherapps.git" echo " clone $apps_repo into apps" - docker run --rm -u $user -v "$current_path":/git --entrypoint git cyphernode/cyphernodeconf:$CONF_VERSION clone --single-branch -b ${CYPHERAPPS_VERSION} "$apps_repo" /git/apps > /dev/null 2>&1 - next + docker run --rm -v "$current_path":/git --entrypoint sh cyphernode/cyphernodeconf:$CONF_VERSION -c "git clone --single-branch -b ${CYPHERAPPS_VERSION} \"$apps_repo\" /git/apps > /dev/null 2>&1 ; chown -R $user /git/apps" fi if [[ $FEATURE_LIGHTNING == true ]]; then diff --git a/proxy_docker/app/script/startproxy.sh b/proxy_docker/app/script/startproxy.sh index 3ba6b5d..793ceb3 100644 --- a/proxy_docker/app/script/startproxy.sh +++ b/proxy_docker/app/script/startproxy.sh @@ -23,15 +23,33 @@ createCurlConfig() { } +# If the file .dbfailed exists, it means we previously failed to process DB migrations. +# Sometimes, depending on timing, a migration fails but it doesn't mean it's corrupted. +# It may be a container that was not accessible for a short period of time, for example. +# So we'll try up to MAX_ATTEMPTS times before concluding in failure. + +# For this to work, we'll put the number of attemps in the .dbfailed file. + +MAX_ATTEMPTS=5 + +nb_attempts=1 if [ -e ${DB_PATH}/.dbfailed ]; then + n=$(cat ${DB_PATH}/.dbfailed) + nb_attempts=$((n+1)) +fi + +if [ "${nb_attempts}" -gt "${MAX_ATTEMPTS}" ]; then touch /container_monitor/proxy_dbfailed - trace "[startproxy] A previous database creation/migration failed. Stopping." - trace "[startproxy] A file called .dbfailed has been created. Fix the migration errors, remove .dbfailed and retry." + trace "[startproxy] Too many database creation/migration failed attempts. Failed attempts = ${nb_attempts}." + trace "[startproxy] A file called .dbfailed has been created in your proxy datapath. Fix the migration errors, remove .dbfailed and retry." + trace "[startproxy] Check your log files, especially postgres." trace "[startproxy] Exiting." sleep 30 exit 1 else - rm -f /container_monitor/proxy_dbfailed + if [ "${nb_attempts}" -gt "1" ]; then + trace "[startproxy] Current database creation/migration attempt = ${nb_attempts}. Retrying..." + fi fi trace "[startproxy] Waiting for PostgreSQL to be ready..." @@ -57,17 +75,21 @@ else fi if [ "${returncode}" -ne "0" ]; then - touch ${DB_PATH}/.dbfailed - touch /container_monitor/proxy_dbfailed - trace "[startproxy] Database creation/migration failed. Stopping." + echo -n "${nb_attempts}" > ${DB_PATH}/.dbfailed + trace "[startproxy] Database creation/migration failed. We will retry ${MAX_ATTEMPTS} times." trace "[startproxy] A file called .dbfailed has been created in your proxy datapath. Fix the migration errors, remove .dbfailed and retry." + trace "[startproxy] Check your log files, especially postgres." trace "[startproxy] Exiting." sleep 30 exit ${returncode} fi +# /container_monitor/proxy_ready will be created by Docker's health check rm -f /container_monitor/proxy_ready +rm -f /container_monitor/proxy_dbfailed +rm -f ${DB_PATH}/.dbfailed + chmod 0600 $DB_FILE createCurlConfig ${WATCHER_BTC_NODE_RPC_CFG} ${WATCHER_BTC_NODE_RPC_USER}