Make swarm single host mode explicit

This commit is contained in:
SKP
2020-01-20 13:05:35 +01:00
committed by kexkey
parent bb80a6ea96
commit f5009806bb
2 changed files with 182 additions and 60 deletions

View File

@@ -22,12 +22,20 @@ services:
networks:
- cyphernodenet
- cyphernodeappsnet
<% if (docker_mode == 'compose') { %>
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode.core == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
# deploy:
# placement:
# constraints: [node.hostname==dev]
<% } %>
<% if ( features.indexOf('lightning') !== -1 && lightning_implementation === 'c-lightning' ) { %>
@@ -54,16 +62,24 @@ services:
stop_grace_period: 30s
networks:
- cyphernodenet
<% if (docker_mode == 'compose') { %>
restart: always
<% } %>
<% if ( features.indexOf('tor') !== -1 ) { %>
depends_on:
- tor
<% } %>
# deploy: --lightning-dir=/.lightning
# placement:
# constraints: [node.hostname==dev]
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode.core == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
<% } %>
<% if( bitcoin_mode === 'internal' ) { %>
@@ -93,16 +109,24 @@ services:
stop_grace_period: 30s
networks:
- cyphernodenet
<% if (docker_mode == 'compose') { %>
restart: always
<% } %>
<% if ( features.indexOf('tor') !== -1 ) { %>
depends_on:
- tor
<% } %>
# deploy:
# placement:
# constraints: [node.hostname==dev]
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode.core == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
<% } %>
##########################
@@ -153,12 +177,20 @@ services:
<% } %>
networks:
- cyphernodenet
<% if (docker_mode == 'compose') { %>
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode.core == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
# deploy:
# placement:
# constraints: [node.hostname==dev]
##########################
# PROXYCRON #
@@ -171,14 +203,22 @@ services:
- "OTS_URL=proxy:8888/ots_backoffice"
networks:
- cyphernodenet
<% if (docker_mode == 'compose') { %>
restart: always
<% } %>
depends_on:
- proxy
# deploy:
# placement:
# constraints: [node.hostname==dev]
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode.core == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
##########################
# BROKER #
@@ -189,12 +229,20 @@ services:
networks:
- cyphernodenet
- cyphernodeappsnet
<% if (docker_mode == 'compose') { %>
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode.core == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
# deploy:
# placement:
# constraints: [node.hostname==dev]
##########################
# NOTIFIER #
@@ -213,14 +261,22 @@ services:
networks:
- cyphernodenet
- cyphernodeappsnet
<% if (docker_mode == 'compose') { %>
restart: always
<% } %>
depends_on:
- broker
# deploy:
# placement:
# constraints: [node.hostname==dev]
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode.core == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
##########################
# PYCOIN #
@@ -240,12 +296,20 @@ services:
- "<%= logs_datapath %>:/cnlogs"
networks:
- cyphernodenet
<% if (docker_mode == 'compose') { %>
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode.core == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
# deploy:
# placement:
# constraints: [node.hostname==dev]
<% if ( features.indexOf('otsclient') !== -1 ) { %>
##########################
@@ -272,12 +336,20 @@ services:
command: $USER /script/startotsclient.sh
networks:
- cyphernodenet
<% if (docker_mode == 'compose') { %>
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode.core == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
# deploy:
# placement:
# constraints: [node.hostname==dev]
<% } %>
##########################
@@ -308,14 +380,22 @@ services:
networks:
- cyphernodenet
- cyphernodeappsnet
<% if (docker_mode == 'compose') { %>
restart: always
<% } %>
depends_on:
- proxy
# deploy:
# placement:
# constraints: [node.hostname==dev]
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode.core == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
##########################
# TRAEFIK #
@@ -333,14 +413,22 @@ services:
- "<%= traefik_datapath%>/htpasswd:/htpasswd/htpasswd:ro"
networks:
- cyphernodeappsnet
<% if (docker_mode == 'compose') { %>
restart: always
<% } %>
depends_on:
- gatekeeper
# deploy:
# placement:
# constraints: [node.hostname==dev]
<% if ( docker_mode === 'swarm' ) { %>
deploy:
replicas: 1
placement:
constraints:
- node.labels.io.cyphernode.infra == true
restart_policy:
condition: "any"
delay: 1s
update_config:
parallelism: 1
<% } else { %>
restart: always
<% } %>
volumes:
container_monitor:

42
dist/setup.sh vendored
View File

@@ -538,10 +538,44 @@ install_docker() {
docker swarm join-token worker > /dev/null 2>&1
local noSwarm=$?;
if [[ $DOCKER_MODE == 'swarm' && $noSwarm == 1 ]]; then
step " init docker swarm"
try docker swarm init --task-history-limit 1 > /dev/null 2>&1
next
if [[ $DOCKER_MODE == 'swarm' ]]; then
if [[ $noSwarm == 1 ]]; then
step " init docker swarm"
try docker swarm init --task-history-limit 1 > /dev/null 2>&1
next
fi
local nodeid
nodeid=$(docker node ls -f role=manager --format="{{.ID}}")
# we only support swarm in single host mode, so all labels needed to spawn containers in the swarm
# are given to the manager of the swarm
# it is possible to move the io.cyphernode.apps label to a different node,
# for apps which rely on shared volumes with core components, we have the io.cyphernode.clingyapps
if [[ $(docker node inspect ${nodeid} --format '{{ index .Spec.Labels "io.cyphernode.core" }}') == "true" ]]; then
step "  [32madd docker node label: io.cyphernode.core"
try docker node update --label-add io.cyphernode.core=true ${nodeid} > /dev/null 2>&1
next
fi
if [[ $(docker node inspect ${nodeid} --format '{{ index .Spec.Labels "io.cyphernode.infra" }}') == "true" ]]; then
step "  [32madd docker node label: io.cyphernode.infra"
try docker node update --label-add io.cyphernode.infra=true ${nodeid} > /dev/null 2>&1
next
fi
if [[ $(docker node inspect ${nodeid} --format '{{ index .Spec.Labels "io.cyphernode.apps" }}') == "true" ]]; then
step "  [32madd docker node label: io.cyphernode.apps"
try docker node update --label-add io.cyphernode.apps=true ${nodeid} > /dev/null 2>&1
next
fi
if [[ $(docker node inspect ${nodeid} --format '{{ index .Spec.Labels "io.cyphernode.clingyapps" }}') == "true" ]]; then
step "  [32madd docker node label: io.cyphernode.clingyapps"
try docker node update --label-add io.cyphernode.clingyapps=true ${nodeid} > /dev/null 2>&1
next
fi
fi
local net_entry=$(docker network ls | grep cyphernodenet);