diff --git a/.docker/nginx/Dockerfile b/.docker/nginx/Dockerfile index b79664a..2a6ddaf 100644 --- a/.docker/nginx/Dockerfile +++ b/.docker/nginx/Dockerfile @@ -14,20 +14,14 @@ RUN if getent group $shared_volume_group_id ; then \ groupadd -g $shared_volume_group_id $shared_volume_group_name && usermod -aG $shared_volume_group_name nginx; \ fi -RUN wget --no-check-certificate https://releases.hashicorp.com/consul-template/0.19.3/consul-template_0.19.3_linux_amd64.zip && \ - unzip -d /usr/local/bin consul-template_0.19.3_linux_amd64.zip && \ - rm -rf consul-template_0.19.3_linux_amd64.zip - COPY ./.docker/nginx/template/entrypoint.sh / COPY ./.docker/nginx/template/nginx.service /etc/service/nginx/run/ -COPY ./.docker/nginx/template/consul-template.service /etc/service/consul-template/run/ COPY ./.docker/nginx/template/nginx.conf.main /etc/nginx/nginx.conf -COPY ./.docker/nginx/template/ctmpl /ctmpl +COPY ./.docker/nginx/template/conf.d /conf.d COPY ./.docker/nginx/template/logrotate/nginx /etc/logrotate.d/nginx -RUN chmod +x /etc/service/nginx/run && chmod +x /etc/service/consul-template/run -RUN sed -i -e 's/\r$//' /etc/service/consul-template/run/consul-template.service +RUN chmod +x /etc/service/nginx/run RUN sed -i -e 's/\r$//' /etc/service/nginx/run/nginx.service RUN rm /etc/nginx/conf.d/default.conf diff --git a/.docker/nginx/origin/conf.d/http/additionals/nginx.conf.ctmpl.origin b/.docker/nginx/origin/conf.d/http/additionals/nginx.conf.ctmpl.origin deleted file mode 100644 index e1e1676..0000000 --- a/.docker/nginx/origin/conf.d/http/additionals/nginx.conf.ctmpl.origin +++ /dev/null @@ -1,34 +0,0 @@ -server { - - listen !#{additional_port} default_server; - listen [::]:!#{additional_port} default_server; - - server_name localhost; - - error_page 497 http://$host:$server_port$request_uri; - - client_max_body_size !#{NGINX_CLIENT_MAX_BODY_SIZE}; - - location / { - add_header Pragma no-cache; - add_header Cache-Control no-cache; - {{ with $key_value := keyOrDefault "!#{CONSUL_KEY}" "blue" }} - {{ if or (eq $key_value "blue") (eq $key_value "green") }} - proxy_pass http://!#{proxy_hostname}:!#{additional_port}; - {{ else }} - proxy_pass http://!#{proxy_hostname_blue}:!#{additional_port}; - {{ end }} - {{ end }} - proxy_set_header Host $http_host; - proxy_set_header X-Scheme $scheme; - proxy_set_header X-Forwarded-Protocol $scheme; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Real-IP $remote_addr; - proxy_http_version 1.1; - proxy_read_timeout 300s; - proxy_connect_timeout 75s; - } - - access_log /var/log/nginx/access.log; - error_log /var/log/nginx/error.log; -} \ No newline at end of file diff --git a/.docker/nginx/origin/conf.d/http/additionals/nginx.conf.contingency.origin b/.docker/nginx/origin/conf.d/http/additionals/nginx.conf.prepared.origin similarity index 100% rename from .docker/nginx/origin/conf.d/http/additionals/nginx.conf.contingency.origin rename to .docker/nginx/origin/conf.d/http/additionals/nginx.conf.prepared.origin diff --git a/.docker/nginx/origin/conf.d/http/app/nginx.conf.ctmpl.origin b/.docker/nginx/origin/conf.d/http/app/nginx.conf.ctmpl.origin deleted file mode 100644 index beb1493..0000000 --- a/.docker/nginx/origin/conf.d/http/app/nginx.conf.ctmpl.origin +++ /dev/null @@ -1,36 +0,0 @@ -server { - - listen !#{EXPOSE_PORT} default_server; - listen [::]:!#{EXPOSE_PORT} default_server; - - server_name localhost; - - error_page 497 http://$host:$server_port$request_uri; - - client_max_body_size !#{NGINX_CLIENT_MAX_BODY_SIZE}; - - location / { - add_header Pragma no-cache; - add_header Cache-Control no-cache; - {{ with $key_value := keyOrDefault "!#{CONSUL_KEY}" "blue" }} - {{ if or (eq $key_value "blue") (eq $key_value "green") }} - proxy_pass http://!#{proxy_hostname}:!#{APP_PORT}; - {{ else }} - proxy_pass http://!#{proxy_hostname_blue}:!#{APP_PORT}; - {{ end }} - {{ end }} - proxy_set_header Host $http_host; - proxy_set_header X-Scheme $scheme; - proxy_set_header X-Forwarded-Protocol $scheme; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Real-IP $remote_addr; - proxy_http_version 1.1; - proxy_read_timeout 300s; - proxy_connect_timeout 75s; - } - - !#{USE_NGINX_RESTRICTED_LOCATION} - - access_log /var/log/nginx/access.log; - error_log /var/log/nginx/error.log; -} \ No newline at end of file diff --git a/.docker/nginx/origin/conf.d/http/app/nginx.conf.contingency.origin b/.docker/nginx/origin/conf.d/http/app/nginx.conf.prepared.origin similarity index 100% rename from .docker/nginx/origin/conf.d/http/app/nginx.conf.contingency.origin rename to .docker/nginx/origin/conf.d/http/app/nginx.conf.prepared.origin diff --git a/.docker/nginx/origin/conf.d/https/additionals/nginx.conf.ctmpl.origin b/.docker/nginx/origin/conf.d/https/additionals/nginx.conf.ctmpl.origin deleted file mode 100644 index 7f7d360..0000000 --- a/.docker/nginx/origin/conf.d/https/additionals/nginx.conf.ctmpl.origin +++ /dev/null @@ -1,43 +0,0 @@ -server { - listen !#{additional_port} default_server ssl; - listen [::]:!#{additional_port} default_server ssl; - - http2 on; - - server_name localhost; - - error_page 497 https://$host:$server_port$request_uri; - - client_max_body_size !#{NGINX_CLIENT_MAX_BODY_SIZE}; - - - ssl_certificate /etc/nginx/ssl/!#{COMMERCIAL_SSL_NAME}.chained.crt; - ssl_certificate_key /etc/nginx/ssl/!#{COMMERCIAL_SSL_NAME}.key; - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_prefer_server_ciphers on; - ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; - - location / { - add_header Pragma no-cache; - add_header Cache-Control no-cache; - {{ with $key_value := keyOrDefault "!#{CONSUL_KEY}" "blue" }} - {{ if or (eq $key_value "blue") (eq $key_value "green") }} - proxy_pass !#{app_https_protocol}://!#{proxy_hostname}:!#{additional_port}; - {{ else }} - proxy_pass !#{app_https_protocol}://!#{proxy_hostname_blue}:!#{additional_port}; - {{ end }} - {{ end }} - proxy_set_header Host $http_host; - proxy_set_header X-Scheme $scheme; - proxy_set_header X-Forwarded-Protocol $scheme; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Real-IP $remote_addr; - proxy_http_version 1.1; - proxy_read_timeout 300s; - proxy_connect_timeout 75s; - } - - - access_log /var/log/nginx/access.log; - error_log /var/log/nginx/error.log; -} \ No newline at end of file diff --git a/.docker/nginx/origin/conf.d/https/additionals/nginx.conf.contingency.origin b/.docker/nginx/origin/conf.d/https/additionals/nginx.conf.prepared.origin similarity index 100% rename from .docker/nginx/origin/conf.d/https/additionals/nginx.conf.contingency.origin rename to .docker/nginx/origin/conf.d/https/additionals/nginx.conf.prepared.origin diff --git a/.docker/nginx/origin/conf.d/https/app/nginx.conf.ctmpl.origin b/.docker/nginx/origin/conf.d/https/app/nginx.conf.ctmpl.origin deleted file mode 100644 index 77ba58e..0000000 --- a/.docker/nginx/origin/conf.d/https/app/nginx.conf.ctmpl.origin +++ /dev/null @@ -1,45 +0,0 @@ -server { - - listen !#{EXPOSE_PORT} default_server ssl; - listen [::]:!#{EXPOSE_PORT} default_server ssl; - - http2 on; - server_name localhost; - - error_page 497 https://$host:$server_port$request_uri; - - client_max_body_size !#{NGINX_CLIENT_MAX_BODY_SIZE}; - - - ssl_certificate /etc/nginx/ssl/!#{COMMERCIAL_SSL_NAME}.chained.crt; - ssl_certificate_key /etc/nginx/ssl/!#{COMMERCIAL_SSL_NAME}.key; - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_prefer_server_ciphers on; - ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; - - - location / { - add_header Pragma no-cache; - add_header Cache-Control no-cache; - {{ with $key_value := keyOrDefault "!#{CONSUL_KEY}" "blue" }} - {{ if or (eq $key_value "blue") (eq $key_value "green") }} - proxy_pass !#{app_https_protocol}://!#{proxy_hostname}:!#{APP_PORT}; - {{ else }} - proxy_pass !#{app_https_protocol}://!#{proxy_hostname_blue}:!#{APP_PORT}; - {{ end }} - {{ end }} - proxy_set_header Host $http_host; - proxy_set_header X-Scheme $scheme; - proxy_set_header X-Forwarded-Protocol $scheme; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Real-IP $remote_addr; - proxy_http_version 1.1; - proxy_read_timeout 300s; - proxy_connect_timeout 75s; - } - - !#{USE_NGINX_RESTRICTED_LOCATION} - - access_log /var/log/nginx/access.log; - error_log /var/log/nginx/error.log; -} \ No newline at end of file diff --git a/.docker/nginx/origin/conf.d/https/app/nginx.conf.contingency.origin b/.docker/nginx/origin/conf.d/https/app/nginx.conf.prepared.origin similarity index 100% rename from .docker/nginx/origin/conf.d/https/app/nginx.conf.contingency.origin rename to .docker/nginx/origin/conf.d/https/app/nginx.conf.prepared.origin diff --git a/.docker/nginx/template/ctmpl/http/.gitkeep b/.docker/nginx/template/conf.d/http/.gitkeep similarity index 100% rename from .docker/nginx/template/ctmpl/http/.gitkeep rename to .docker/nginx/template/conf.d/http/.gitkeep diff --git a/.docker/nginx/template/ctmpl/https/.gitkeep b/.docker/nginx/template/conf.d/https/.gitkeep similarity index 100% rename from .docker/nginx/template/ctmpl/https/.gitkeep rename to .docker/nginx/template/conf.d/https/.gitkeep diff --git a/.docker/nginx/template/consul-template.service b/.docker/nginx/template/consul-template.service deleted file mode 100644 index 50772cc..0000000 --- a/.docker/nginx/template/consul-template.service +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -exec consul-template \ - -consul-addr consul:8500 \ - -template "/etc/consul-templates/nginx.conf.ctmpl:/etc/nginx/conf.d/nginx.conf::service nginx reload" diff --git a/.docker/nginx/template/entrypoint.sh b/.docker/nginx/template/entrypoint.sh index 8aef066..6b55dd2 100644 --- a/.docker/nginx/template/entrypoint.sh +++ b/.docker/nginx/template/entrypoint.sh @@ -17,19 +17,18 @@ echo "[INSIDE_NGINX_CONTAINER][NOTICE] Start Logrotate (every hour at minute 1) service cron restart || echo "[WARN] Restarting Cron failed." -# From this point on, the configuration of the NGINX consul-template begins. -if [[ ! -d /etc/consul-templates ]]; then - echo "[INSIDE_NGINX_CONTAINER][NOTICE] As the directory name '/etc/consul-templates' does NOT exist, it has been created." - mkdir /etc/consul-templates +# From this point on, the configuration of the NGINX template begins. +if [[ ! -d /etc/templates ]]; then + echo "[INSIDE_NGINX_CONTAINER][NOTICE] As the directory name '/etc/templates' does NOT exist, it has been created." + mkdir /etc/templates fi app_url=$(printenv APP_URL) protocol=$(echo ${app_url} | awk -F[/:] '{print $1}') -echo "[INSIDE_NGINX_CONTAINER][NOTICE] Copy the template, contingency files for ${protocol} from '/ctmpl/${protocol}' to '/etc/consul-templates'." +echo "[INSIDE_NGINX_CONTAINER][NOTICE] Copy the prepared files for ${protocol} from '/conf.d/${protocol}' to '/etc/templates'." sleep 2 -cp -f /ctmpl/${protocol}/nginx.conf.ctmpl /etc/consul-templates -cp -f /ctmpl/${protocol}/nginx.conf.contingency.blue /etc/consul-templates -cp -f /ctmpl/${protocol}/nginx.conf.contingency.green /etc/consul-templates +cp -f /conf.d/${protocol}/nginx.conf.prepared.blue /etc/templates +cp -f /conf.d/${protocol}/nginx.conf.prepared.green /etc/templates # SSL if [[ ${protocol} = 'https' ]]; then @@ -76,9 +75,8 @@ if [[ ${protocol} = 'https' ]]; then chmod 644 /etc/nginx/ssl/${commercial_ssl_name}.chained.crt chmod 644 /etc/nginx/ssl/${commercial_ssl_name}.crt - sed -i -e "s/!#{COMMERCIAL_SSL_NAME}/${commercial_ssl_name}/" /etc/consul-templates/nginx.conf.ctmpl || (echo "commercial_ssl_name (${commercial_ssl_name}) on .env failed to be applied. (ctmpl)" && exit 1) - sed -i -e "s/!#{COMMERCIAL_SSL_NAME}/${commercial_ssl_name}/" /etc/consul-templates/nginx.conf.contingency.blue || (echo "commercial_ssl_name (${commercial_ssl_name}) on .env failed to be applied. (contingency blue)" && exit 1) - sed -i -e "s/!#{COMMERCIAL_SSL_NAME}/${commercial_ssl_name}/" /etc/consul-templates/nginx.conf.contingency.green || (echo "commercial_ssl_name (${commercial_ssl_name}) on .env failed to be applied. (contingency green)" && exit 1) + sed -i -e "s/!#{COMMERCIAL_SSL_NAME}/${commercial_ssl_name}/" /etc/templates/nginx.conf.prepared.blue || (echo "commercial_ssl_name (${commercial_ssl_name}) on .env failed to be applied. (prepared blue)" && exit 1) + sed -i -e "s/!#{COMMERCIAL_SSL_NAME}/${commercial_ssl_name}/" /etc/templates/nginx.conf.prepared.green || (echo "commercial_ssl_name (${commercial_ssl_name}) on .env failed to be applied. (prepared green)" && exit 1) fi @@ -104,7 +102,5 @@ for retry_count in {1..5}; do sleep 3 done -echo "[INSIDE_NGINX_CONTAINER][NOTICE] Applying the Nginx template..." -bash /etc/service/consul-template/run/consul-template.service echo "[INSIDE_NGINX_CONTAINER][NOTICE] Start the Nginx." bash /etc/service/nginx/run/nginx.service diff --git a/.env.example b/.env.example index 501b4bd..c020967 100644 --- a/.env.example +++ b/.env.example @@ -10,7 +10,6 @@ DOCKER_LAYER_CORRUPTION_RECOVERY=false NGINX_RESTART=false -CONSUL_RESTART=false # The method of acquiring Docker images: @@ -37,8 +36,6 @@ PROJECT_PORT=xxx # Samples : ADDITIONAL_PORTS=5005,5006 ADDITIONAL_PORTS=8093 -CONSUL_KEY_VALUE_STORE=http://consul:8500/v1/kv/deploy/xxx - # If you locate your project on ../ (upper folder) HOST_ROOT_LOCATION=../ # If you locate your project's Dockerfile ../ (upper folder) @@ -88,6 +85,4 @@ NGINX_LOGROTATE_FILE_SIZE=1M SHARED_VOLUME_GROUP_ID=1351 SHARED_VOLUME_GROUP_NAME=shared-volume-group -UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID= - -USE_MY_OWN_NGINX_ORIGIN=false \ No newline at end of file +UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID= \ No newline at end of file diff --git a/.env.example.java b/.env.example.java index 91f8cb9..aea97e5 100644 --- a/.env.example.java +++ b/.env.example.java @@ -10,7 +10,6 @@ NGINX_RESTART=false -CONSUL_RESTART=false # The method of acquiring Docker images: # build (Used in developer's local environment or during Jenkins builds when a new image needs to be built, so this module is typically used) @@ -29,7 +28,6 @@ # Example (8093,8094,11000...) ADDITIONAL_PORTS=5005 -CONSUL_KEY_VALUE_STORE=http://consul:8500/v1/kv/deploy/spring-sample-h-auth # If you locate your project on ../ (upper folder) HOST_ROOT_LOCATION=./samples/spring-sample-h-auth @@ -79,6 +77,4 @@ SHARED_VOLUME_GROUP_ID=1351 SHARED_VOLUME_GROUP_NAME=shared-volume-group -UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID= - -USE_MY_OWN_NGINX_ORIGIN=false \ No newline at end of file +UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID= \ No newline at end of file diff --git a/.env.example.java.commercial.ssl.sample b/.env.example.java.commercial.ssl.sample index 001046e..6f6fb91 100644 --- a/.env.example.java.commercial.ssl.sample +++ b/.env.example.java.commercial.ssl.sample @@ -9,7 +9,6 @@ DOCKER_LAYER_CORRUPTION_RECOVERY=false NGINX_RESTART=false -CONSUL_RESTART=false # The method of acquiring Docker images: @@ -29,7 +28,6 @@ PROJECT_PORT=8300 # Example (8093,8094,11000...) ADDITIONAL_PORTS= -CONSUL_KEY_VALUE_STORE=http://consul:8500/v1/kv/deploy/spring-sample-h-auth # 1) ''/var/web/project/spring-sample-h-auth' is here HOST_ROOT_LOCATION=/var/web/project/spring-sample-h-auth @@ -81,6 +79,4 @@ NGINX_LOGROTATE_FILE_SIZE=1M SHARED_VOLUME_GROUP_ID=1351 SHARED_VOLUME_GROUP_NAME=shared-volume-group -UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID= - -USE_MY_OWN_NGINX_ORIGIN=false \ No newline at end of file +UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID= \ No newline at end of file diff --git a/.env.example.node b/.env.example.node index 51dc5a2..f434c6f 100644 --- a/.env.example.node +++ b/.env.example.node @@ -9,7 +9,6 @@ COMMERCIAL_SSL_NAME=yyy DOCKER_LAYER_CORRUPTION_RECOVERY=false NGINX_RESTART=false -CONSUL_RESTART=false # The method of acquiring Docker images: # build (Used in developer's local environment or during Jenkins builds when a new image needs to be built, so this module is typically used) @@ -28,8 +27,6 @@ PROJECT_PORT=[13000,3000] # Example (8093,8094,11000...) ADDITIONAL_PORTS= -CONSUL_KEY_VALUE_STORE=http://consul:8500/v1/kv/deploy/node-express-boilerplate - # If you locate your project on ../ (upper folder) HOST_ROOT_LOCATION=./samples/node-express-boilerplate # If you locate your project's Dockerfile ../ (upper folder) @@ -75,6 +72,4 @@ NGINX_LOGROTATE_FILE_SIZE=1M SHARED_VOLUME_GROUP_ID=1351 SHARED_VOLUME_GROUP_NAME=shared-volume-group -UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID= - -USE_MY_OWN_NGINX_ORIGIN=false \ No newline at end of file +UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID= \ No newline at end of file diff --git a/.env.example.php b/.env.example.php index d4dfec8..ee1e0dc 100644 --- a/.env.example.php +++ b/.env.example.php @@ -10,7 +10,6 @@ DOCKER_LAYER_CORRUPTION_RECOVERY=false NGINX_RESTART=false -CONSUL_RESTART=false # The method of acquiring Docker images: # build (Used in developer's local environment or during Jenkins builds when a new image needs to be built, so this module is typically used) @@ -29,8 +28,6 @@ # Example (8093,8094,11000...) ADDITIONAL_PORTS= -CONSUL_KEY_VALUE_STORE=http://consul:8500/v1/kv/deploy/laravel_crud_boilerplate - # If you locate your project on ../ (upper folder) HOST_ROOT_LOCATION=./samples/laravel-crud-boilerplate # If you locate your project's Dockerfile ../ (upper folder) @@ -75,13 +72,11 @@ # ex. /docs/api-app.html NGINX_RESTRICTED_LOCATION=xxx -REDIRECT_HTTPS_TO_HTTP=false +REDIRECT_HTTPS_TO_HTTP=true NGINX_LOGROTATE_FILE_NUMBER=7 NGINX_LOGROTATE_FILE_SIZE=100K SHARED_VOLUME_GROUP_ID=1351 SHARED_VOLUME_GROUP_NAME=laravel-shared-volume-group -UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID=1000 - -USE_MY_OWN_NGINX_ORIGIN=false \ No newline at end of file +UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID=1000 \ No newline at end of file diff --git a/.gitignore b/.gitignore index b7dc396..7c86e7d 100644 --- a/.gitignore +++ b/.gitignore @@ -37,11 +37,11 @@ !/docker-stack-app-original-blue.yml !/docker-stack-app-original-green.yml -/.docker/nginx/template/ctmpl/http/* -!/.docker/nginx/template/ctmpl/http/.gitkeep +/.docker/nginx/template/conf.d/http/* +!/.docker/nginx/template/conf.d/http/.gitkeep -/.docker/nginx/template/ctmpl/https/* -!/.docker/nginx/template/ctmpl/https/.gitkeep +/.docker/nginx/template/conf.d/https/* +!/.docker/nginx/template/conf.d/https/.gitkeep /.docker/nginx/template/logrotate/* !/.docker/nginx/template/logrotate/.gitkeep @@ -50,9 +50,9 @@ /.docker/nginx/origin/nginx.conf.main.origin.customized -/.docker/nginx/origin/conf.d/http/additionals/nginx.conf.ctmpl.origin.customized -/.docker/nginx/origin/conf.d/http/app/nginx.conf.ctmpl.origin.customized +/.docker/nginx/origin/conf.d/http/additionals/nginx.conf.prepared.origin.customized +/.docker/nginx/origin/conf.d/http/app/nginx.conf.prepared.origin.customized -/.docker/nginx/origin/conf.d/https/additionals/nginx.conf.ctmpl.origin.customized -/.docker/nginx/origin/conf.d/https/app/nginx.conf.ctmpl.origin.customized +/.docker/nginx/origin/conf.d/https/additionals/nginx.conf.prepared.origin.customized +/.docker/nginx/origin/conf.d/https/app/nginx.conf.prepared.origin.customized diff --git a/README.md b/README.md index e412f7b..afa2db2 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,12 @@ > A Simple and Safe Blue-Green Deployment Starting from Your Source Code—Not from Your Prebuilt Docker Image +> [NOTE] To upgrade your app from v5 to v6, update your .env file with the following settings and proceed: + ```.dotenv + DOCKER_LAYER_CORRUPTION_RECOVERY=true # Warning: This will remove your app container and image. + NGINX_RESTART=true # For normal upgrades, this setting is sufficient. For zero-downtime deployment, set this to false. Details at the Upgrade section. + ``` + ## Table of Contents - [Features](#features) - [Process Summary](#process-summary) @@ -19,15 +25,13 @@ - [APP_URL](#app_url) - [Important ENVs That Require Restarting NGINX](#important-envs-that-require-restarting-nginx) - [Upgrade](#upgrade) - - [Fully Customizing NGINX Configuration](#fully-customizing-nginx-configuration) - - [NGINX Contingency Function](#nginx-contingency-function) + - [NGINX Prepared Function](#nginx-prepared-function) - [Terms](#terms) - [Log Levels](#log-levels) - [Check States](#check-states) - [Emergency](#emergency) - [Security](#Security) - [Running & Stopping Multiple Projects](#running--stopping-multiple-projects) - - [Consul](#consul) - [USE_NGINX_RESTRICTION on .env](#use_nginx_restriction-on-env) - [Advanced](#advanced) - [Production Deployment](#production-deployment) @@ -57,13 +61,12 @@ - Step 2: Perform a health check with customized settings defined in your .env file - Nginx Router Test Container - External Integrity Check - - Nginx Contingency Plan - Rollback Procedures - Additional Know-hows on Docker: Tips and best practices for optimizing your Docker workflow and deployment processes - For example, Traefik offers powerful dynamic configuration and service discovery; however, certain errors, such as a failure to detect containers (due to issues like unrecognized certificates), can lead to frustrating 404 errors that are hard to trace through logs alone. - https://stackoverflow.com/questions/76660749/traefik-404-page-not-found-when-use-https - https://community.traefik.io/t/getting-bad-gateway-404-page-when-supposed-to-route-to-container-port-8443/20398 - - Manipulates NGINX configuration files directly to ensure container accessibility. It also tests configuration files by launching a test NGINX Docker instance, and if an NGINX config update via Consul-Template fails, Contingency Plan provided is activated to ensure connectivity to your containers. + - Manipulates NGINX configuration files directly to ensure container accessibility. 3. **Track Blue-Green status and the Git SHA of your running container for easy monitoring.** @@ -84,11 +87,11 @@ ## Process Summary - Term Reference - - ``All`` means below is "App", "Nginx", "Consul&Registrator". + - ``All`` means below is "App", "Nginx"". - ``(Re)Load`` means ``docker run.... OR docker-compose up``. - ``State`` is ``Blue`` or ``Green`` - More is on [Terms](#terms) -- Load Consul & Registrator, then the App, and finally Nginx to prevent upstream errors. +- Load the App, and finally Nginx to prevent upstream errors. ```mermaid @@ -96,25 +99,18 @@ graph TD; A[Initialize and Set Variables] --> B[Backup All Images] B --> C[Check the .env File Integrity] C --> D[Build All Images] - D --> E[Create Consul Network] - E --> F{Reload Consul if Required} - F -- Yes --> G[Reload Consul] - F -- No --> H[Load Your App] - G --> H[Load Your App] - H --> I[Check App Integrity] - I --> J{Reload Nginx if Required} - J -- Yes --> K[Check Nginx Template Integrity by Running a Test Container] - J -- No --> L[Check All Containers' Health] - K --> L[Check All Containers' Health] - L --> M{Set New State Using Consul Template} - M -- Fails --> O[Run Nginx Contingency Plan] - M -- Success --> N[External Integrity Check] - O --> N[External Integrity Check] - N -- Fails --> P[Rollback App if Needed] - N -- Success --> Q["Remove the Opposite State (Blue or Green) from the Running Containers"] - P --> Q["Remove the Opposite State from the Running Containers"] - Q --> R[Clean Up Dangling Images] - R --> S[Deployment Complete] + D --> E[Load Your App] + E --> F[Check App Integrity] + F --> G{Reload Nginx if Required} + G -- Yes --> H[Check Nginx Template Integrity by Running a Test Container] + G -- No --> I[Check All Containers' Health] + H --> I[Check All Containers' Health] + I --> J[External Integrity Check] + J -- Fails --> K[Rollback App if Needed] + J -- Success --> L["Remove the Opposite State (Blue or Green) from the Running Containers"] + K --> L["Remove the Opposite State from the Running Containers"] + L --> M[Clean Up Dangling Images] + M --> N[Deployment Complete] ``` ![img5.png](/documents/images/img5.png) @@ -157,13 +153,10 @@ graph TD; | bash | 4.4 at least | Manual | - | | curl | N/A | Manual | - | | yq | 4.35.1 | Auto | Use v4.35.1 instead of the latest version. The lastest version causes a parsing error | -| consul (docker image) | 1.14.11 | Auto | An error occurred due to a payload format issue while the lastest version of it was communicating with gliderlabs/registrator. | -| gliderlabs/registrator (docker image) | master | Auto | | | nginx (docker image) | 1.25.4 | Auto | Considering changing it to a certain version, but until now no issues have been detected. | | docker | 24~27 | Manual | I think too old versions could cause problems, and the lastest version v27.x causes only a warning message. | | docker-compose | 2 | Manual | I think too old versions could cause problems, and the v2 is recommended. | -- Although issues with wrong versions of these libraries can cause errors, there are several safety mechanisms in place to prevent the server from being interrupted. For example, when you run run.sh, early on it checks: 1) the existence of the required libraries, 2) the NGINX Contingency Function section below, and 3) in case of restarting Nginx (NGINX_RESTART=true in .env), a preliminary check for integrity (check_nginx_templates_integrity in use-nginx.sh). - For ``docker-compose``, if you use a version above v2.25.0, you will see a warning message: ``[WARN] The attribute 'version' is obsolete and will be ignored. Please remove it to avoid potential confusion``. You can ignore it at this point. - For MAC users, ``GNU-based bash, sed, grep`` should be installed. - For MAC users, ``SHARED_VOLUME_GROUP_*`` on .env are skipped. @@ -294,9 +287,6 @@ DOCKER_COMPOSE_HOST_VOLUME_CHECK=false # This option should be used when upgrading the Runner. See the "Upgrade" section below. NGINX_RESTART=false -# Setting this to 'true' is not recommended for normal operation as it results in prolonged downtime. -CONSUL_RESTART=false - # Specify the location of the .git folder for your project here to enable tracking through container labels. # To track, simply run `bash check-current_states.sh`. DOCKER_BUILD_SHA_INSERT_GIT_ROOT= @@ -320,7 +310,6 @@ REDIRECT_HTTPS_TO_HTTP=true APP_URL PROJECT_PORT ADDITIONAL_PORT -CONSUL_KEY_VALUE USE_COMMERCIAL_SSL COMMERCIAL_SSL_NAME DOCKER_COMPOSE_NGINX_SELECTIVE_VOLUMES @@ -334,7 +323,6 @@ NGINX_LOGROTATE_FILE_SIZE SHARED_VOLUME_GROUP_ID # The application to the host does NOT depend on NGINX_RESTART=true. It is always applied. SHARED_VOLUME_GROUP_NAME # The application to the host does NOT depend on NGINX_RESTART=true. It is always applied. UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID # The application to the host does NOT depend on NGINX_RESTART=true. It is always applied. -USE_MY_OWN_NGINX_ORIGIN ``` ### Upgrade @@ -348,38 +336,6 @@ sudo bash run.sh ``` - However, as you are aware, ```NGINX_RESTART=true``` causes a short downtime. **Make sure ```NGINX_RESTART=false``` at all times**. -### Fully Customizing NGINX Configuration - -![img.png](/documents/images/img3.png) - -- The ``origin`` folder is where you can modify original Nginx conf files. -- Create the five yellow-highlighted files ending with 'ctmpl.origin.customized' by copying the originals ending with 'ctmpl.origin.' -- You don't have to create all five files; just create the ones you need. -- In the .env file, set this to 'true' -```shell -USE_MY_OWN_NGINX_ORIGIN=true -# See '[IMPORTANT] ENVs that require 'NGINX_RESTART=true' above. -NGINX_RESTART=true -``` -- For reference, the files you just created are ignored by git, so there won't be any issues when you run the following: -```shell -# Check if the source codes of Runner is manipulated. -bash check-source-integrity.sh -``` -- Then, run ``(sudo) bash run.sh``. **Starting from v5.0.0**, if NGINX_RESTART is set to 'true', the Runner will test your configuration using ``nginx -t`` in a temporary container before recreating the NGINX container. If the test fails, the process stops, preventing any side effects on your currently running app. -- Don't touch any file in ``.docker/nginx/template``. They are just ones in ready to be injected into the NGINX Image in Dockerfile. -- Process of NGINX Configuration - - ``Origin`` -(processed with the .env)-> ``Template`` -(docker build)-> ``Docker Image`` -(running entrypoint.sh)-> ``Docker Container`` -- NGINX Logrotate - - ``.docker/nginx/origin/logroate/nginx`` -- ENV Spel - - A syntax that brings values from the .env file throughout the ecosystem. - - ``!#{ value here }`` -![img4.png](/documents/images/img4.png) - -### NGINX Contingency Function -- In the event of a Consul failure, the NGINX Contingency module takes over and operates NGINX autonomously. This ensures uninterrupted service by allowing NGINX to function independently. - ### Terms For all echo messages or properties .env, the following terms indicate... - BUILD (=LOAD IMAGE) : ```docker build``` @@ -400,7 +356,7 @@ For all echo messages or properties .env, the following terms indicate... ```shell bash check-current-states.sh # The output example -[DEBUG] ! Checking which (Blue OR Green) is currently running... (Base Check) : consul_pointing(blue), nginx_pointing(blue}), blue_status(running), green_status(exited) +[DEBUG] ! Checking which (Blue OR Green) is currently running... (Base Check) : nginx_pointing(blue}), blue_status(running), green_status(exited) [DEBUG] ! Checked which (Blue OR Green) is currently running... (Final Check) : blue_score : 130, green_score : 27, state : blue, new_state : green, state_for_emergency : blue, new_upstream : https://PROJECT_NAME:8300. ``` - The higher the score a state receives, the more likely it is to be the currently running state. So the updated App should be deployed as the non-occupied state(new_state). @@ -443,12 +399,8 @@ bash ./rollback.sh # The Nginx Container is roll-backed as well. (Not recommended. Nginx is safely managed as long as you use released versions.) bash ./rollback.sh 1 ``` -- Critical Error on the Consul Network - - This rarely happens when... - - The server machine has been restarted, and affects the Consul network. - - You change the ```ORCHESTRATION_TYPE``` on the .env, the two types (compose,stack) for it come to use different network scopes, which leads to a collision. ```shell -bash emergency-consul-down-and-up.sh +bash emergency-all-down-and-up.sh ``` ### Security @@ -477,10 +429,6 @@ bash check-source-integrity.sh - If you wish to terminate the project, which should be on your .env, run ```bash stop-all-containers.sh``` - If you wish to remove the project's images, which should be on your .env, run ```bash remove-all-images.sh``` -### Consul -`` http://localhost:8500 `` -- Need to set a firewall for the 8500 port referring to ``./docker-orchestration-consul.yml``. - ### USE_NGINX_RESTRICTION on .env - https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-http-basic-authentication - Create ```.htpasswd``` on ```./.docker/nginx/custom-files``` if you would like use the settings. This is useful when you apply security to API Doc Modules such as Spring-Rest-Docs. @@ -566,4 +514,16 @@ git status # If any changes are detected, the source code may be corrupted, or j docker swarm init sudo bash run.sh ``` + +### ETC +- Process of NGINX Configuration + - ``Origin`` -(processed with the .env)-> ``Template`` -(docker build)-> ``Docker Image`` -(running entrypoint.sh)-> ``Docker Container`` +- NGINX Logrotate + - ``.docker/nginx/origin/logroate/nginx`` +- ENV Spel + - A syntax that brings values from the .env file throughout the ecosystem. + - ``!#{ value here }`` + ![img4.png](/documents/images/img4.png) + + --- diff --git a/docker-compose-app-original.yml b/docker-compose-app-original.yml index 45ed49d..c9a61b2 100644 --- a/docker-compose-app-original.yml +++ b/docker-compose-app-original.yml @@ -7,7 +7,7 @@ services: image: ${PROJECT_NAME}:blue restart: always networks: - - consul + - dbgr-net extra_hosts: - "${HOST_IP}:host-gateway" environment: @@ -22,7 +22,7 @@ services: image: ${PROJECT_NAME}:green restart: always networks: - - consul + - dbgr-net extra_hosts: - "${HOST_IP}:host-gateway" environment: @@ -31,6 +31,6 @@ services: - .env volumes: [] networks: - consul: + dbgr-net: external: - name: consul + name: dbgr-net diff --git a/docker-orchestration-app-nginx-original.yml b/docker-orchestration-app-nginx-original.yml index fac6805..a57bc14 100644 --- a/docker-orchestration-app-nginx-original.yml +++ b/docker-orchestration-app-nginx-original.yml @@ -12,10 +12,10 @@ services: - ./.docker/ssl:/etc/nginx/ssl - ./.docker/nginx/custom-files:/etc/nginx/custom-files networks: - - consul + - dbgr-net ports: - ${PROJECT_PORT}:${PROJECT_PORT} networks: - consul: + dbgr-net: external: - name: consul + name: dbgr-net diff --git a/docker-orchestration-consul.yml b/docker-orchestration-consul.yml deleted file mode 100644 index 3223714..0000000 --- a/docker-orchestration-consul.yml +++ /dev/null @@ -1,46 +0,0 @@ -version: '3.7' - -# https://www.consul.io/intro/index.html -# http://gliderlabs.github.io/registrator/latest/ - -services: - consul: - hostname: consul - container_name: consul - image: hashicorp/consul:1.14.11 - restart: always - environment: - - CONSUL_LOCAL_CONFIG={"disable_update_check":true} - entrypoint: - - consul - - agent - - -server - - -bootstrap - - -data-dir=/data - - -bind={{ GetInterfaceIP "eth0" }} - - -client=0.0.0.0 - - -ui - volumes: - - consul:/data - networks: - - consul - - registrator: - container_name: registrator - command: -internal consul://consul:8500 - privileged: true - image: gliderlabs/registrator:master - restart: always - links: - - consul - volumes: - - /var/run/docker.sock:/tmp/docker.sock - networks: - - consul - -networks: - consul: - name: consul - external: true -volumes: - consul: diff --git a/docker-stack-app-original-blue.yml b/docker-stack-app-original-blue.yml index 41750c4..bb11527 100644 --- a/docker-stack-app-original-blue.yml +++ b/docker-stack-app-original-blue.yml @@ -12,7 +12,7 @@ services: deploy: replicas: 2 networks: - - consul + - dbgr-net networks: - consul: + dbgr-net: external: true diff --git a/docker-stack-app-original-green.yml b/docker-stack-app-original-green.yml index 179fdf3..d4d0846 100644 --- a/docker-stack-app-original-green.yml +++ b/docker-stack-app-original-green.yml @@ -12,7 +12,7 @@ services: deploy: replicas: 2 networks: - - consul + - dbgr-net networks: - consul: + dbgr-net: external: true diff --git a/documents/Deploy-React-Project-with-DBGR.md b/documents/Deploy-React-Project-with-DBGR.md index 24364d6..b2e544b 100644 --- a/documents/Deploy-React-Project-with-DBGR.md +++ b/documents/Deploy-React-Project-with-DBGR.md @@ -39,7 +39,6 @@ DOCKER_LAYER_CORRUPTION_RECOVERY=false NGINX_RESTART=false - CONSUL_RESTART=false # The method of acquiring Docker images: # build (Used in developer's local environment or during Jenkins builds when a new image needs to be built, so this module is typically used) @@ -59,8 +58,6 @@ # Example (8093,8094,11000...) ADDITIONAL_PORTS= - CONSUL_KEY_VALUE_STORE=http://consul:8500/v1/kv/deploy/your-app - # If you locate your project on ../ (upper folder) HOST_ROOT_LOCATION=/var/projects/your-app # If you locate your project's Dockerfile ../ (upper folder) @@ -91,7 +88,7 @@ DOCKER_BUILD_ARGS={"PROJECT_ROOT_IN_CONTAINER":"/app"} # For SSL, the host folder is recommended to be './.docker/ssl' to be synchronized with 'docker-orchestration-app-nginx-original.yml'. # [IMPORTANT] Run mkdir -p /var/projects/files/your-app/logs on your host machine - DOCKER_COMPOSE_SELECTIVE_VOLUMES=["/var/projects/your-app/.docker/nginx/app.conf.ctmpl:/etc/nginx-template/app.conf.ctmpl","/var/projects/files/your-app/logs:/var/log/nginx"] + DOCKER_COMPOSE_SELECTIVE_VOLUMES=["/var/projects/your-app/.docker/nginx/app.conf.conf.d:/etc/nginx-template/app.conf.conf.d","/var/projects/files/your-app/logs:/var/log/nginx"] # [IMPORTANT] Run mkdir -p /var/projects/files/nginx/logs on your host machine DOCKER_COMPOSE_NGINX_SELECTIVE_VOLUMES=["/var/projects/files/nginx/logs:/var/log/nginx"] DOCKER_COMPOSE_HOST_VOLUME_CHECK=false @@ -126,8 +123,7 @@ SHARED_VOLUME_GROUP_ID=1559 SHARED_VOLUME_GROUP_NAME=mba-shared-volume-group UIDS_BELONGING_TO_SHARED_VOLUME_GROUP_ID=1000,1001 - - USE_MY_OWN_NGINX_ORIGIN=false + ``` ### Locate your commercial SSLs in the folder ``docker-blue-green-runner/.docker/ssl``. See the comments in the ``.env`` above. - For me, I have used GoDaddy, https://dearsikandarkhan.medium.com/ssl-godaddy-csr-create-on-mac-osx-4401c47fd94c . @@ -177,15 +173,15 @@ ENTRYPOINT bash /entrypoint.sh - .docker/ - entrypoint.sh - nginx/ - - app.conf.ctmpl + - app.conf.conf.d - entrypoint.sh - ```shell #!/bin/bash # synced the paths at DOCKER_COMPOSE_SELECTIVE_VOLUMES in .env - cat /etc/nginx-template/app.conf.ctmpl > /etc/nginx/conf.d/default.conf + cat /etc/nginx-template/app.conf.conf.d > /etc/nginx/conf.d/default.conf /usr/sbin/nginx -t && exec /usr/sbin/nginx -g "daemon off;" ``` - - app.conf.ctmpl + - app.conf.conf.d - ```nginx server { listen 8360; diff --git a/documents/images/img3.png b/documents/images/img3.png index 98c6f5b..f97d12d 100644 Binary files a/documents/images/img3.png and b/documents/images/img3.png differ diff --git a/emergency-consul-down-and-up.sh b/emergency-all-down-and-up.sh similarity index 87% rename from emergency-consul-down-and-up.sh rename to emergency-all-down-and-up.sh index 88e55ee..d14d3e7 100644 --- a/emergency-consul-down-and-up.sh +++ b/emergency-all-down-and-up.sh @@ -17,15 +17,14 @@ git config core.filemode false sleep 3 -source ./use-consul.sh cache_non_dependent_global_vars check_env_integrity -echo "[STRONG WARNING] This process removes all Containers in the Consul network, which means your running Apps will be stopped." +echo "[STRONG WARNING] This process removes all Containers in the dbgr-network, which means your running Apps will be stopped." echo "[WARNING] This will re-create your network according to the orchestration_type on .env. (stack : swarm, compose : local). The current orchestration_type is '${orchestration_type}'" -network_name="consul" +network_name="dbgr-net" docker stack rm ${project_name}-blue || echo "[DEBUG] D" docker stack rm ${project_name}-green || echo "[DEBUG] E" @@ -33,12 +32,10 @@ docker stack rm ${project_name}-green || echo "[DEBUG] E" container_ids=($(docker network inspect -f '{{range .Containers}}{{.Name}} {{end}}' "$network_name")) || echo "[NOTICE] THe network name ${network_name} has NOT been found." for container_id in "${container_ids[@]}"; do - echo "[NOTICE] Stopping & Removing containers for removing the Consul network : $container_id" + echo "[NOTICE] Stopping & Removing containers for removing the dbgr network : $container_id" docker network disconnect -f "$network_name" "$container_id" || echo "[DEBUG] F" docker stop "$container_id" || echo "[DEBUG] G" docker container rm "$container_id" || echo "[DEBUG] H" done -sleep 5 - -consul_down_and_up_with_network +sleep 5 \ No newline at end of file diff --git a/emergency-nginx-down-and-up.sh b/emergency-nginx-down-and-up.sh index 995363a..6cbcaed 100644 --- a/emergency-nginx-down-and-up.sh +++ b/emergency-nginx-down-and-up.sh @@ -41,4 +41,4 @@ echo "[NOTICE] Finally, !! Deploy the App as !! ${state_a} !!, we will now deplo # run nginx_down_and_up # activate : blue or green -./nginx-blue-green-activate.sh ${state_a} ${state_b} ${state_upstream} ${consul_key_value_store} \ No newline at end of file +./nginx-blue-green-activate.sh ${state_a} ${state_b} ${state_upstream} \ No newline at end of file diff --git a/emergency-nginx-restart.sh b/emergency-nginx-restart.sh index e28f26e..986ac0b 100644 --- a/emergency-nginx-restart.sh +++ b/emergency-nginx-restart.sh @@ -45,8 +45,7 @@ initiate_nginx_docker_compose_file apply_env_service_name_onto_nginx_yaml apply_ports_onto_nginx_yaml apply_docker_compose_volumes_onto_app_nginx_yaml -save_nginx_ctmpl_template_from_origin -save_nginx_contingency_template_from_origin +save_nginx_prepared_template_from_origin save_nginx_logrotate_template_from_origin save_nginx_main_template_from_origin # build @@ -54,4 +53,4 @@ load_nginx_docker_image # run nginx_down_and_up # activate : blue or green -./nginx-blue-green-activate.sh ${state_a} ${state_b} ${state_upstream} ${consul_key_value_store} \ No newline at end of file +./nginx-blue-green-activate.sh ${state_a} ${state_b} ${state_upstream} \ No newline at end of file diff --git a/nginx-blue-green-activate.sh b/nginx-blue-green-activate.sh index bb82bde..d27302d 100644 --- a/nginx-blue-green-activate.sh +++ b/nginx-blue-green-activate.sh @@ -11,21 +11,11 @@ cache_non_dependent_global_vars new_state=$1 old_state=$2 new_upstream=$3 -consul_key_value_store=$4 -echo "[NOTICE] new_state : ${new_state}, old_state : ${old_state}, new_upstream : ${new_upstream}, consul_key_value_store : ${consul_key_value_store}" +echo "[NOTICE] new_state : ${new_state}, old_state : ${old_state}, new_upstream : ${new_upstream}" -was_state=$(docker exec ${project_name}-nginx curl ${consul_key_value_store}?raw) || { - echo "[EMERGENCY] Errors on Nginx or Consul Network. Executing Nginx Contingency Plan." - was_state="${old_state}" -} -echo "[NOTICE] CONSUL (${consul_key_value_store}) is currently pointing to : ${was_state}" -if [[ ${old_state} != ${was_state} ]]; then - echo "[WARNING] Was State (${was_state}, currently pointed from CONSUL) is different from Old State (${old_state}, checked at the first stage of the mother script.)" -fi - -# The meaning of "${pid_was} != '-'" is that when Nginx has fully started, the BLUE-GREEN change operation is performed in CONSUL. +# The meaning of "${pid_was} != '-'" is that when Nginx has fully started, the BLUE-GREEN change operation is performed. echo "[NOTICE] Check if Nginx is completely UP." for retry_count in {1..5}; do pid_was=$(docker exec ${project_name}-nginx pidof nginx 2>/dev/null || echo '-') @@ -35,7 +25,6 @@ for retry_count in {1..5}; do break else echo "[NOTICE] Retrying... (pid_was : ${pid_was})" - fi if [[ ${retry_count} -eq 4 ]]; then @@ -47,31 +36,30 @@ for retry_count in {1..5}; do sleep 3 done -echo "[NOTICE] Activate ${new_state} CONSUL. (old Nginx pids: ${pid_was})" -echo "[NOTICE] ${new_state} is stored in CONSUL." -docker exec ${project_name}-nginx curl -X PUT -d ${new_state} ${consul_key_value_store} >/dev/null || { - echo "![NOTICE] Setting ${new_state} on nginx.conf according to the Nginx Contingency Plan." - docker exec ${project_name}-nginx cp -f /etc/consul-templates/nginx.conf.contingency.${new_state} /etc/nginx/conf.d/nginx.conf || docker exec ${project_name}-nginx cp -f /ctmpl/${protocol}/nginx.conf.contingency.${new_state} /etc/nginx/conf.d/nginx.conf - docker exec ${project_name}-nginx sh -c 'service nginx reload || service nginx restart || [EMERGENCY] Nginx Contingency Plan failed as well. Correct /etc/nginx/conf.d/nginx.conf directly and Run "service nginx restart".' -} +echo "[NOTICE] Activate ${new_state} in the Nginx config file. (old Nginx pids: ${pid_was})" +echo "[NOTICE] ${new_state} is stored in the Nginx config file." + +echo "![NOTICE] Setting ${new_state} in nginx.conf... (from '/etc/templates/nginx.conf.prepared.${new_state}')" +docker exec ${project_name}-nginx cp -f /etc/templates/nginx.conf.prepared.${new_state} /etc/nginx/conf.d/nginx.conf +docker exec ${project_name}-nginx sh -c 'service nginx reload || service nginx restart || [EMERGENCY] Nginx Prepared Plan failed. Correct /etc/nginx/conf.d/nginx.conf directly in the Nginx container and Run "service nginx restart".' sleep 1 re=$(check_availability_out_of_container_speed_mode | tail -n 1); if [[ ${re} != 'true' ]]; then - echo "![NOTICE] Setting ${new_state} on nginx.conf according to the Nginx Contingency Plan." - docker exec ${project_name}-nginx cp -f /etc/consul-templates/nginx.conf.contingency.${new_state} /etc/nginx/conf.d/nginx.conf || docker exec ${project_name}-nginx cp -f /ctmpl/${protocol}/nginx.conf.contingency.${new_state} /etc/nginx/conf.d/nginx.conf - docker exec ${project_name}-nginx sh -c 'service nginx reload || service nginx restart || [EMERGENCY] Nginx Contingency Plan failed as well. Correct /etc/nginx/conf.d/nginx.conf directly and Run "service nginx restart".' + echo "![NOTICE] Setting ${new_state} on nginx.conf according to the Nginx Prepared Plan." + docker exec ${project_name}-nginx cp -f /etc/templates/nginx.conf.prepared.${new_state} /etc/nginx/conf.d/nginx.conf || docker exec ${project_name}-nginx cp -f /conf.d/${protocol}/nginx.conf.prepared.${new_state} /etc/nginx/conf.d/nginx.conf + docker exec ${project_name}-nginx sh -c 'service nginx reload || service nginx restart || [EMERGENCY] Nginx Prepared Plan failed as well. Correct /etc/nginx/conf.d/nginx.conf directly and Run "service nginx restart".' fi -echo "[NOTICE] The PID of NGINX has been confirmed. Now, checking if CONSUL has been replaced with ${new_upstream} string in the NGINX configuration file." +echo "[NOTICE] The PID of NGINX has been confirmed. Now, checking if ${new_upstream} string is in the NGINX configuration file." count=0 while [ 1 ]; do lines=$(docker exec ${project_name}-nginx nginx -T | grep ${new_state} | wc -l | xargs) if [[ ${lines} == '0' ]]; then count=$((count + 1)) if [[ ${count} -eq 10 ]]; then - echo "[WARNING] Since ${new_upstream} string is not found in the NGINX configuration file, we will revert CONSUL to ${old_state} (although it should already be ${old_state}, we will save it again to ensure)" + echo "[WARNING] Since ${new_upstream} string is not found in the NGINX configuration file, we will revert to ${old_state} (although it should already be ${old_state}, we will save it again to ensure)" old_state_container_name= if [[ ${orchestration_type} == 'stack' ]]; then old_state_container_name=$(docker ps -q --filter "name=^${project_name}-${old_state} " | shuf -n 1) @@ -99,7 +87,7 @@ while [ 1 ]; do fi if [[ ${is_run} == 'yes' ]]; then - ./nginx-blue-green-reset.sh ${consul_key_value_store} ${old_state} ${new_state} + ./nginx-blue-green-reset.sh ${old_state} ${new_state} else echo "[WARNING] We won't revert, as ${old_state} is NOT running as well." fi diff --git a/nginx-blue-green-reset.sh b/nginx-blue-green-reset.sh index 61ba9ef..bc0a3cd 100644 --- a/nginx-blue-green-reset.sh +++ b/nginx-blue-green-reset.sh @@ -5,24 +5,21 @@ source use-common.sh project_name=$(get_value_from_env "PROJECT_NAME") orchestration_type=$(get_value_from_env "ORCHESTRATION_TYPE") -consul_key_value_store=$1 -state=$2 -new_state=$3 +state=$1 +new_state=$2 echo "[NOTICE] Point Nginx back to ${state} from nginx-blue-green-reset.sh." -docker exec ${project_name}-nginx curl -X PUT -d ${state} ${consul_key_value_store} > /dev/null || { - echo "[ERROR] Setting ${state} on '/etc/nginx/conf.d/nginx.conf' directly according to the Nginx Contingency Plan." - docker exec ${project_name}-nginx cp -f /etc/consul-templates/nginx.conf.contingency.${state} /etc/nginx/conf.d/nginx.conf - docker exec ${project_name}-nginx sh -c 'service nginx reload || service nginx restart || [EMERGENCY] Nginx Contingency Plan failed as well. Correct /etc/nginx/conf.d/nginx.conf directly and Run "service nginx restart".' -} +echo "[ERROR] Setting ${state} on '/etc/nginx/conf.d/nginx.conf' directly according to the Nginx Prepared Plan." +docker exec ${project_name}-nginx cp -f /etc/templates/nginx.conf.prepared.${state} /etc/nginx/conf.d/nginx.conf +docker exec ${project_name}-nginx sh -c 'service nginx reload || service nginx restart || [EMERGENCY] Nginx Prepared Plan failed as well. Correct /etc/nginx/conf.d/nginx.conf directly and Run "service nginx restart".' echo "[NOTICE] Stopping the ${new_state} ${orchestration_type}" if [[ ${orchestration_type} != 'stack' ]]; then docker-compose -f docker-${orchestration_type}-${project_name}.yml stop ${project_name}-${new_state} - echo "[NOTICE] The previous (${new_state}) container has been stopped because the deployment was successful. (If NGINX_RESTART=true or CONSUL_RESTART=true, existing containers have already been terminated in the load_all_containers function.)" + echo "[NOTICE] The previous (${new_state}) container has been stopped because the deployment was successful. (If NGINX_RESTART=true, existing containers have already been terminated in the load_all_containers function.)" else docker stack rm ${project_name}-${new_state} - echo "[NOTICE] The previous (${new_state}) service has been stopped because the deployment was successful. (If NGINX_RESTART=true or CONSUL_RESTART=true, existing containers have already been terminated in the load_all_containers function.)" + echo "[NOTICE] The previous (${new_state}) service has been stopped because the deployment was successful. (If NGINX_RESTART=true, existing containers have already been terminated in the load_all_containers function.)" fi exit 1 diff --git a/push-to-git.sh b/push-to-git.sh index fdfc135..19b5cd3 100644 --- a/push-to-git.sh +++ b/push-to-git.sh @@ -18,7 +18,7 @@ _main() { echo "[NOTICE] Log in to the Gitlab Container Registry." docker_login_with_params ${git_token_image_load_from_username} ${git_token_image_load_from_password} ${git_image_load_from_host} - echo "[NOTICE] Prepare current versions of the App,Nginx,Consul and Registrator and push them." + echo "[NOTICE] Prepare current versions of the App,Nginx and push them." echo "[DEBUG] Run : docker tag ${project_name}:${state_for_push} ${app_image_name_in_registry}" docker tag ${project_name}:${state_for_push} ${app_image_name_in_registry} || exit 1 @@ -33,22 +33,6 @@ _main() { docker push ${nginx_image_name_in_registry} || exit 1 echo "[DEBUG] Run : docker rmi -f ${nginx_image_name_in_registry}" docker rmi ${nginx_image_name_in_registry} || exit 1 - - - - echo "[DEBUG] Run : docker tag hashicorp/consul:1.14.11 ${consul_image_name_in_registry}}" - docker tag hashicorp/consul:1.14.11 ${consul_image_name_in_registry} || exit 1 - echo "[DEBUG] Run : docker push ${consul_image_name_in_registry}" - docker push ${consul_image_name_in_registry} || exit 1 - echo "[DEBUG] Run : docker rmi -f ${consul_image_name_in_registry}" - docker rmi ${consul_image_name_in_registry} || exit 1 - - echo "[DEBUG] Run : docker tag gliderlabs/registrator:v7 ${registrator_image_name_in_registry}" - docker tag gliderlabs/registrator:v7 ${registrator_image_name_in_registry} || exit 1 - echo "[DEBUG] Run : docker push ${registrator_image_name_in_registry}" - docker push ${registrator_image_name_in_registry} || exit 1 - echo "[DEBUG] Run : docker rmi -f ${registrator_image_name_in_registry}" - docker rmi ${registrator_image_name_in_registry} || exit 1 } diff --git a/rollback.sh b/rollback.sh index 15c601d..aca5e8d 100644 --- a/rollback.sh +++ b/rollback.sh @@ -59,7 +59,7 @@ else echo "[NOTICE] Nginx will NOT be restarted, as ${nginx_restart} = false" fi -./nginx-blue-green-activate.sh ${new_state} ${state} ${new_upstream} ${consul_key_value_store} +./nginx-blue-green-activate.sh ${new_state} ${state} ${new_upstream} if [[ ${orchestration_type} != 'stack' ]]; then diff --git a/run.sh b/run.sh index a31b670..aaa5ce2 100644 --- a/run.sh +++ b/run.sh @@ -25,7 +25,6 @@ sleep 1 source ./use-app.sh source ./use-nginx.sh -source ./use-consul.sh # Back-up priority : new > blue or green > latest @@ -83,12 +82,9 @@ terminate_whole_system(){ docker-compose -f docker-${orchestration_type}-${project_name}.yml down || echo "[NOTICE] docker-${orchestration_type}-${project_name}.yml down failure" docker-compose -f docker-${orchestration_type}-${project_name}.yml down || echo "[NOTICE] docker-${orchestration_type}-${project_name}.yml down failure" - docker-compose -f docker-${orchestration_type}-consul.yml down || echo "[NOTICE] docker-${orchestration_type}-${project_name}-consul.yml down failure" docker-compose -f docker-orchestration-${project_name}-nginx.yml down || echo "[NOTICE] docker-orchestration-${project_name}-nginx.yml down failure" - docker network rm consul - - docker network rm consul + docker network rm dbgr-net || echo "[NOTICE] dbgr-net has already been removed." docker system prune -f fi @@ -96,24 +92,17 @@ terminate_whole_system(){ load_all_containers(){ - # app, consul, nginx + # app, nginx # In the past, restarting Nginx before App caused error messages like "upstream not found" in the Nginx configuration file. This seems to have caused a 502 error on the socket side. - echo "[NOTICE] Creating consul network..." + echo "[NOTICE] Creating dbgr-net network..." if [[ ${orchestration_type} != 'stack' ]]; then - docker network create consul || echo "[NOTICE] Consul Network has already been created. You can ignore this message." - else - docker network create --driver overlay consul || echo "[NOTICE] Consul Network has already been created. You can ignore this message." - fi - - # Therefore, it is safer to restart the containers in the order of Consul -> App -> Nginx. - - if [[ ${consul_restart} == 'true' ]]; then - consul_down_and_up + docker network create dbgr-net || echo "[NOTICE] DBGR Network has already been created. You can ignore this message." else - echo "[NOTICE] As CONSUL_RESTART in .env is NOT true, Consul won't be restarted." + docker network create --driver overlay dbgr-net || echo "[NOTICE] DBGR Network has already been created. You can ignore this message." fi + # Therefore, it is safer to restart the containers in the order of App -> Nginx. echo "[NOTICE] Run the app as a ${new_state} container. (As long as NGINX_RESTART is set to 'false', this won't stop the running container since this is a BLUE-GREEN deployment.)" app_down_and_up @@ -136,8 +125,6 @@ load_all_containers(){ check_edge_routing_containers_loaded || (echo "[ERROR] Failed in loading necessary supporting containers." && exit 1) - check_common_containers_loaded || (echo "[ERROR] Failed in loading supporting containers. We will conduct the Nginx Contingency Plan.") - } @@ -190,8 +177,7 @@ _main() { apply_ports_onto_nginx_yaml apply_docker_compose_volumes_onto_app_nginx_yaml - save_nginx_ctmpl_template_from_origin - save_nginx_contingency_template_from_origin + save_nginx_prepared_template_from_origin save_nginx_logrotate_template_from_origin save_nginx_main_template_from_origin @@ -224,11 +210,6 @@ _main() { load_app_docker_image fi - if [[ ${consul_restart} == 'true' ]]; then - display_checkpoint_message "Building Docker image for Consul... ('consul_restart' is set to true) (14%)" - load_consul_docker_image - fi - if [[ ${nginx_restart} == 'true' ]]; then display_checkpoint_message "Building Docker image for Nginx... ('nginx_restart' is set to true) (16%)" load_nginx_docker_image @@ -240,20 +221,20 @@ _main() { fi - local cached_new_state=${new_state} - cache_all_states - if [[ ${cached_new_state} != "${new_state}" ]]; then - (echo "[ERROR] Just checked all states shortly after the Docker Images had been done built. The state the App was supposed to be deployed as has been changed. (Original : ${cached_new_state}, New : ${new_state}). For the safety, we exit..." && exit 1) - fi + #local cached_new_state=${new_state} + #cache_all_states + #if [[ ${cached_new_state} != "${new_state}" ]]; then + # (echo "[ERROR] Just checked all states shortly after the Docker Images had been done built. The state the App was supposed to be deployed as has been changed. (Original : ${cached_new_state}, New : ${new_state}). For the safety, we exit..." && exit 1) + #fi - # docker-compose up the App, Nginx, Consul & * Internal Integrity Check for the App - display_checkpoint_message "Starting docker-compose for App, Nginx, and Consul, followed by an internal integrity check for the app... (40%)" + # docker-compose up the App, Nginx & * Internal Integrity Check for the App + display_checkpoint_message "Starting docker-compose for App and Nginx, followed by an internal integrity check for the app... (40%)" load_all_containers display_checkpoint_message "Reached the transition point... (65%)" display_immediate_transition ${state} ${new_state} - ./nginx-blue-green-activate.sh ${new_state} ${state} ${new_upstream} ${consul_key_value_store} + ./nginx-blue-green-activate.sh ${new_state} ${state} ${new_upstream} # [E] External Integrity Check, if fails, 'emergency-nginx-down-and-up.sh' will be run. display_checkpoint_message "Performing external integrity check. If it fails, 'emergency-nginx-down-and-up.sh' will be executed... (87%)" @@ -279,16 +260,19 @@ _main() { echo "[DEBUG] state : ${state}, new_state : ${new_state}, initially_cached_old_state : ${initially_cached_old_state}" - echo "[NOTICE] For safety, finally check Consul pointing before stopping the previous container (${initially_cached_old_state})." - local consul_pointing=$(docker exec ${project_name}-nginx curl ${consul_key_value_store}?raw 2>/dev/null || echo "failed") - if [[ ${consul_pointing} != ${initially_cached_old_state} ]]; then + echo "[NOTICE] For safety, finally check Nginx pointing before stopping the previous container (${initially_cached_old_state})." + + local nginx_pointing + nginx_pointing=$(get_nginx_pointing "$project_name") + + if [[ ${nginx_pointing} != ${initially_cached_old_state} ]]; then if [[ ${orchestration_type} != 'stack' ]]; then docker-compose -f docker-${orchestration_type}-${project_name}.yml stop ${project_name}-${initially_cached_old_state} - echo "[NOTICE] The previous (${initially_cached_old_state}) container (initially_cached_old_state) has been stopped because the deployment was successful. (If NGINX_RESTART=true or CONSUL_RESTART=true, existing containers have already been terminated in the load_all_containers function.)" + echo "[NOTICE] The previous (${initially_cached_old_state}) container (initially_cached_old_state) has been stopped because the deployment was successful. (If NGINX_RESTART=true, existing containers have already been terminated in the load_all_containers function.)" else docker stack rm ${project_name}-${initially_cached_old_state} - echo "[NOTICE] The previous (${initially_cached_old_state}) service (initially_cached_old_state) has been stopped because the deployment was successful. (If NGINX_RESTART=true or CONSUL_RESTART=true, existing containers have already been terminated in the load_all_containers function.)" + echo "[NOTICE] The previous (${initially_cached_old_state}) service (initially_cached_old_state) has been stopped because the deployment was successful. (If NGINX_RESTART=true, existing containers have already been terminated in the load_all_containers function.)" fi display_checkpoint_message "CURRENT APP_URL: ${app_url}." @@ -301,7 +285,7 @@ _main() { docker rmi $(docker images -f "dangling=true" -q) || echo "[NOTICE] Any images in use will not be deleted." else - echo "[NOTICE] The previous (${initially_cached_old_state}) container (initially_cached_old_state) has NOT been stopped because the current Consul Pointing is ${consul_pointing}." + echo "[NOTICE] The previous (${initially_cached_old_state}) container (initially_cached_old_state) has NOT been stopped because the current Nginx Pointing is ${nginx_pointing}." fi } diff --git a/stop-all-containers.sh b/stop-all-containers.sh index 163488b..52261f6 100644 --- a/stop-all-containers.sh +++ b/stop-all-containers.sh @@ -34,7 +34,5 @@ docker container rm ${project_name}-green || echo "[DEBUG] A-L 5" docker stack rm ${project_name}-blue || echo "[DEBUG] F" docker stack rm ${project_name}-green || echo "[DEBUG] F-2" -docker-compose -f docker-orchestration-consul.yml down || echo "[DEBUG] C" - docker-compose down || echo "[DEBUG] G" docker system prune -f \ No newline at end of file diff --git a/tests/spring-sample-h-auth/run-and-kill-jar-and-after-seconds-auto-recovered.sh b/tests/spring-sample-h-auth/run-and-kill-jar-and-after-seconds-auto-recovered.sh deleted file mode 100644 index bf5ab6a..0000000 --- a/tests/spring-sample-h-auth/run-and-kill-jar-and-after-seconds-auto-recovered.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -sed -i -e "s/\r$//g" $(basename $0) -set -eu - -cd ../../ - -sudo chmod a+x *.sh - -echo "[NOTICE] Substituting CRLF with LF to prevent possible CRLF errors..." -bash prevent-crlf.sh -git config apply.whitespace nowarn -git config core.filemode false - -container=$(docker ps --format '{{.Names}}' | grep "spring-sample-h-auth-[bg]") -if [ -z "$container" ]; then - echo "[NOTICE] There is NO spring-sample-h-auth container, now we will build it." - cp -f .env.java.real .env - sudo bash run.sh -else - echo "[NOTICE] $container exists." -fi - -sleep 3 -source ./use-common.sh -source ./use-app.sh - -cache_global_vars - -consul_pointing=$(docker exec ${project_name}-nginx curl ${consul_key_value_store}?raw 2>/dev/null || echo "failed") - -echo "[TEST][NOTICE] ! Kill the jar in ${project_name}-${consul_pointing}" -docker exec ${project_name}-${consul_pointing} kill 9 $(pgrep -f 'java') -sleep 2 - -if [[ $(check_availability_inside_container ${consul_pointing} 120 5 | tail -n 1) == 'true' ]]; then - echo "[TEST][NOTICE] : SUCCESS " - else - echo "[TEST][NOTICE] : FAILURE " -fi \ No newline at end of file diff --git a/tests/spring-sample-h-auth/run-and-kill-jar-and-state-is-restarting-or-running.sh b/tests/spring-sample-h-auth/run-and-kill-jar-and-state-is-restarting-or-running.sh deleted file mode 100644 index 2946bc1..0000000 --- a/tests/spring-sample-h-auth/run-and-kill-jar-and-state-is-restarting-or-running.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -sed -i -e "s/\r$//g" $(basename $0) -set -eu - -cd ../../ - -sudo chmod a+x *.sh - -echo "[NOTICE] Substituting CRLF with LF to prevent possible CRLF errors..." -bash prevent-crlf.sh -git config apply.whitespace nowarn -git config core.filemode false - -container=$(docker ps --format '{{.Names}}' | grep "spring-sample-h-auth-[bg]") -if [ -z "$container" ]; then - echo "[NOTICE] There is NO spring-sample-h-auth container, now we will build it." - cp -f .env.java.real .env - sudo bash run.sh -else - echo "[NOTICE] $container exists." -fi - -sleep 3 -source ./use-common.sh - -cache_global_vars - -consul_pointing=$(docker exec ${project_name}-nginx curl ${consul_key_value_store}?raw 2>/dev/null || echo "failed") - -echo "[TEST][NOTICE] ! Kill the jar in ${project_name}-${consul_pointing}" -docker exec ${project_name}-${consul_pointing} kill 9 $(pgrep -f 'java') - -# Print state checking process -result=$(cache_all_states) - -if [[ $result == *"currently running"* ]]; then - echo "[TEST][NOTICE] : SUCCESS : running" -elif [[ $result == *"currently restarting"* ]]; then - echo "[TEST][NOTICE] : SUCCESS : restarting" -else - echo "[TEST][NOTICE] : FAILURE" -fi diff --git a/tests/spring-sample-h-auth/run-and-make-consul-pointing-error-and-recovered.sh b/tests/spring-sample-h-auth/run-and-make-consul-pointing-error-and-recovered.sh deleted file mode 100644 index 7d27178..0000000 --- a/tests/spring-sample-h-auth/run-and-make-consul-pointing-error-and-recovered.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash -sed -i -e "s/\r$//g" $(basename $0) -set -eu - -cd ../../ - -sudo chmod a+x *.sh - -echo "[NOTICE] Substituting CRLF with LF to prevent possible CRLF errors..." -bash prevent-crlf.sh -git config apply.whitespace nowarn -git config core.filemode false - -container=$(docker ps --format '{{.Names}}' | grep "spring-sample-h-auth-[bg]") -if [ -z "$container" ]; then - echo "[NOTICE] There is NO spring-sample-h-auth container, now we will build it." - cp -f .env.java.real .env - sudo bash run.sh -else - echo "[NOTICE] $container exists." -fi - -sleep 3 -source ./use-common.sh -source ./use-app.sh - -cache_global_vars - -consul_pointing=$(docker exec ${project_name}-nginx curl ${consul_key_value_store}?raw 2>/dev/null || echo "failed") -the_opposite_of_consul_pointing='' -if [[ ${consul_pointing} == 'blue' ]]; then - the_opposite_of_consul_pointing='green' -else - the_opposite_of_consul_pointing='blue' -fi - -echo "[TEST][DEBUG] the_opposite_of_consul_pointing : ${the_opposite_of_consul_pointing}" - -echo "[TEST][NOTICE] To make a Nginx error, get consul_pointing to the wrong(=NOT running) container" -bash emergency-nginx-down-and-up.sh ${the_opposite_of_consul_pointing} || echo "" -#echo "[TEST][NOTICE] Run 'emergency-nginx-down-and-up.sh'" -#bash emergency-nginx-down-and-up.sh - -echo "[TEST][NOTICE] Run check_availability_out_of_container" -cache_global_vars -re=$(check_availability_out_of_container | tail -n 1); - -if [[ ${re} != 'true' ]]; then - echo "[TEST][NOTICE] : FAILURE" -else - echo "[TEST][NOTICE] : SUCCESS" -fi \ No newline at end of file diff --git a/use-common.sh b/use-common.sh index 71422e1..765d6f1 100644 --- a/use-common.sh +++ b/use-common.sh @@ -188,9 +188,6 @@ cache_non_dependent_global_vars() { docker_build_labels="${docker_build_labels},project.git.sha=${project_git_sha}" fi - consul_key_value_store=$(get_value_from_env "CONSUL_KEY_VALUE_STORE") - consul_key=$(echo ${consul_key_value_store} | cut -d "/" -f6)\\/$(echo ${consul_key_value_store} | cut -d "/" -f7) - app_health_check_path=$(get_value_from_env "APP_HEALTH_CHECK_PATH") bad_app_health_check_pattern=$(get_value_from_env "BAD_APP_HEALTH_CHECK_PATTERN") good_app_health_check_pattern=$(get_value_from_env "GOOD_APP_HEALTH_CHECK_PATTERN") @@ -217,16 +214,17 @@ cache_non_dependent_global_vars() { nginx_restart=$(get_value_from_env "NGINX_RESTART") - consul_restart=$(get_value_from_env "CONSUL_RESTART") - if [[ ${consul_restart} == 'true' && ${nginx_restart} == 'false' ]]; then - echo "[ERROR] On .env, consul_restart=true but nginx_restart=false. That does NOT make sense, as Nginx depends on Consul." && exit 1 - fi + use_my_own_app_yml=$(get_value_from_env "USE_MY_OWN_APP_YML") skip_building_app_image=$(get_value_from_env "SKIP_BUILDING_APP_IMAGE") if [[ ${docker_layer_corruption_recovery} == 'true' && ${skip_building_app_image} == 'true' ]]; then - echo "[ERROR] On .env, docker_layer_corruption_recovery=true and skip_building_app_image=true as well. That does NOT make sense, as 'docker_layer_corruption_recovery=true' removes all images first." && exit 1 + echo "[ERROR] Configuration conflict in .env: 'docker_layer_corruption_recovery=true' and 'skip_building_app_image=true' cannot coexist. 'docker_layer_corruption_recovery=true' removes all images, so skipping the app image build is not logical." && exit 1 + fi + + if [[ ${docker_layer_corruption_recovery} == 'true' && ${nginx_restart} == 'false' ]]; then + echo "[ERROR] Configuration conflict in .env: 'docker_layer_corruption_recovery=true' and 'nginx_restart=false' cannot coexist. 'docker_layer_corruption_recovery=true' removes all images, which requires a restart of Nginx." && exit 1 fi orchestration_type=$(get_value_from_env "ORCHESTRATION_TYPE") @@ -268,7 +266,6 @@ cache_non_dependent_global_vars() { if [[ $(validate_number "$nginx_logrotate_file_number") == "false" ]]; then echo "[WARNING] NGINX_LOGROTATE_FILE_NUMBER in .env has an incorrect format. (value: $nginx_logrotate_file_number, correct examples: 5,10,101..., etc. Expected behavior: Logrotate won't work). However, this is NOT a serious issue. We will continue the process." fi - use_my_own_nginx_origin=$(get_value_from_env "USE_MY_OWN_NGINX_ORIGIN") } @@ -303,18 +300,8 @@ cache_global_vars() { app_image_name_in_registry="${git_image_load_from_host}/${git_image_load_from_pathname}-app:${git_image_version}" nginx_image_name_in_registry="${git_image_load_from_host}/${git_image_load_from_pathname}-nginx:${git_image_version}" - consul_image_name_in_registry="${git_image_load_from_host}/${git_image_load_from_pathname}-consul:${git_image_version}" - registrator_image_name_in_registry="${git_image_load_from_host}/${git_image_load_from_pathname}-registrator:${git_image_version}" - - if [[ $(docker exec consul echo 'yes' 2> /dev/null) == '' ]] - then - echo '[NOTICE] Since the Consul container is not running, we consider it as consul_restart=true and start from loading the image again. (The .env file will not be changed.)' - consul_restart=true - - # Since there is no Dockerfile, unlike the 'load_nginx_docker_image' and 'load_app_docker_image' functions, there is no 'build' command. - fi if [[ $(docker exec ${project_name}-nginx echo 'yes' 2> /dev/null) == '' ]] then echo "[NOTICE] Since the '${project_name}-nginx:latest' container is not running, we consider it as 'nginx_restart=true' and start from building again." @@ -614,7 +601,7 @@ check_one_container_loaded(){ if [ "$(docker ps -q -f name=^${1})" ]; then echo "[NOTICE] Supporting container ( ${1} ) running checked." else - echo "[ERROR] Supporting container ( ${1} ) running not found. But, this does NOT stop the current deployment, according to the Nginx Contingency Plan." + echo "[ERROR] Supporting container ( ${1} ) running not found. But, this does NOT stop the current deployment, according to the Nginx Prepared Plan." fi } @@ -626,13 +613,6 @@ check_one_edge_routing_container_loaded(){ fi } -check_common_containers_loaded(){ - all_container_names=("consul" "registrator") - for name in "${all_container_names[@]}"; do - check_one_container_loaded ${name} - done -} - check_edge_routing_containers_loaded() { all_container_names=("${project_name}-nginx") @@ -656,7 +636,7 @@ docker_login_with_params() { # experimental set_network_driver_for_orchestration_type(){ - local network_name="consul" + local network_name="dbgr-net" local swarm_network_driver="overlay" local local_network_driver="local" # 네트워크 존재 여부 확인 @@ -665,9 +645,9 @@ set_network_driver_for_orchestration_type(){ echo "[NOTICE] Network name (${network_name}) does not exist." if [[ ${orchestration_type} != 'stack' ]]; then - docker network create consul || echo "[NOTICE] Consul Network (Local) has already been created. You can ignore this message." + docker network create dbgr-net || echo "[NOTICE] DBGR Network (Local) has already been created. You can ignore this message." else - docker network create --driver ${swarm_network_driver} --attachable consul || echo "[NOTICE] Consul Network (Swarm) has already been created. You can ignore this message." + docker network create --driver ${swarm_network_driver} --attachable dbgr-net || echo "[NOTICE] DBGR Network (Swarm) has already been created. You can ignore this message." fi else network_driver=$(docker network inspect $network_id --format "{{.Driver}}") @@ -677,12 +657,12 @@ set_network_driver_for_orchestration_type(){ exit 0 else echo "[NOTICE] $swarm_network_driver is not appropriate for ${orchestration_type}" - bash emergency-consul-down-and-up; + bash emergency-all-down-and-up; fi elif [ "$network_driver" == "$local_network_driver" ]; then if [[ ${orchestration_type} == 'stack' ]]; then echo "[NOTICE] $swarm_network_driver is not appropriate for ${orchestration_type}" - bash emergency-consul-down-and-up; + bash emergency-all-down-and-up; else echo "[NOTICE] $swarm_network_driver is appropriately set for $local_network_driver" exit 0 diff --git a/use-consul.sh b/use-consul.sh deleted file mode 100644 index 0286088..0000000 --- a/use-consul.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash -set -eu - -git config apply.whitespace nowarn -git config core.filemode false - - -consul_down_and_up(){ - - echo "[NOTICE] As !CONSUL_RESTART is true, which means there will be a short-downtime for CONSUL, terminate CONSUL container and network." - - echo "[NOTICE] Forcefully Stop & Remove CONSUL Container." - docker-compose -f docker-orchestration-consul.yml down || echo "[NOTICE] The previous Consul & Registrator Container has been stopped, if exists." - docker container rm -f consul || echo "[NOTICE] The previous Consul Container has been removed, if exists." - docker container rm -f registrator || echo "[NOTICE] The previous Registrator Container has been removed, if exists." - - echo "[NOTICE] Up CONSUL container" - # https://github.com/hashicorp/consul/issues/17973 - docker-compose -p consul -f docker-orchestration-consul.yml up -d || echo "[NOTICE] Consul has already been created. You can ignore this message." - - sleep 7 -} - -consul_down_and_up_with_network(){ - - echo "[NOTICE] As !CONSUL_RESTART is true, which means there will be a short-downtime for CONSUL, terminate CONSUL container and network." - - echo "[NOTICE] Stop & Remove CONSUL Container." - docker-compose -f docker-orchestration-consul.yml down || echo "[NOTICE] The previous Consul & Registrator Container has been stopped, if exists." - docker network disconnect -f consul consul && docker container stop consul && docker container rm consul || echo "[NOTICE] The previous Consul Container has been removed, if exists." - docker container rm -f consul || echo "[NOTICE] The previous Consul Container has been removed, if exists." - docker container rm -f registrator || echo "[NOTICE] The previous Registrator Container has been removed, if exists." - - sleep 5 - - echo "[NOTICE] We will remove the network Consul and restart it." - docker network rm -f consul || echo "[NOTICE] Failed to remove Consul Network. You can ignore this message, or if you want to restart it, please terminate other projects that share the Consul network." - docker system prune -f - - if [[ ${orchestration_type} != 'stack' ]]; then - echo "[DEBUG] orchestration_type : ${orchestration_type} / A" - docker network create consul || (echo "[ERROR] Consul Network has NOT been removed. You need to remove all containers and re-create the consul network manually." && exit 1) - else - docker network create --driver overlay --attachable consul || (echo "[ERROR] Consul Network has NOT been removed. You need to remove all containers and re-create the consul network manually." && exit 1) - echo "[DEBUG] orchestration_type : ${orchestration_type} / B" - fi - - - echo "[NOTICE] Up CONSUL container" - # https://github.com/hashicorp/consul/issues/17973 - docker-compose -p consul -f docker-orchestration-consul.yml up -d || echo "[NOTICE] Consul has already been created. You can ignore this message." - - sleep 5 - - docker system prune -f -} -load_consul_docker_image(){ - - - if [ ${git_image_load_from} = "registry" ]; then - - # Almost all of clients use this deployment. - - echo "[NOTICE] Attempt to log in to the Registry." - docker_login_with_params ${git_token_image_load_from_username} ${git_token_image_load_from_password} ${git_image_load_from_host} - - echo "[NOTICE] Pull the Registrator image stored in the Registry." - docker pull ${registrator_image_name_in_registry} || exit 1 - docker tag ${registrator_image_name_in_registry} gliderlabs/registrator:v7 || exit 1 - docker rmi -f ${registrator_image_name_in_registry} || exit 1 - - echo "[NOTICE] Pull the Consul image stored in the Registry." - docker pull ${consul_image_name_in_registry} || exit 1 - docker tag ${consul_image_name_in_registry} hashicorp/consul:1.14.11 || exit 1 - docker rmi -f ${consul_image_name_in_registry} || exit 1 - fi - - # Since there is no Dockerfile, unlike the 'load_nginx_docker_image' and 'load_app_docker_image' functions, there is no 'build' command. - - -} \ No newline at end of file diff --git a/use-nginx.sh b/use-nginx.sh index 0d663c3..32c6116 100644 --- a/use-nginx.sh +++ b/use-nginx.sh @@ -62,119 +62,8 @@ apply_docker_compose_volumes_onto_app_nginx_yaml(){ } -set_origin_file() { - local customized_file=$1 - local default_file=$2 - if [[ ${use_my_own_nginx_origin} = 'true' ]]; then - if [[ -f $customized_file ]]; then - echo $customized_file - else - echo $default_file - fi - else - echo $default_file - fi -} - -save_nginx_ctmpl_template_from_origin(){ - - local proxy_hostname= - local proxy_hostname_blue= - - if [[ ${orchestration_type} == 'stack' ]]; then - proxy_hostname="!#{PROJECT_NAME}-{{ \$key_value }}_!#{PROJECT_NAME}-{{ \$key_value }}" - proxy_hostname_blue="!#{PROJECT_NAME}-blue_!#{PROJECT_NAME}-blue" - else - proxy_hostname="!#{PROJECT_NAME}-{{ \$key_value }}" - proxy_hostname_blue="!#{PROJECT_NAME}-blue" - fi - - local app_https_protocol="https"; - if [[ ${redirect_https_to_http} = 'true' ]]; then - app_https_protocol="http" - fi - - local nginx_template_file=".docker/nginx/template/ctmpl/${protocol}/nginx.conf.ctmpl" - - echo "[NOTICE] NGINX template (${nginx_template_file}) is now being created." - - local app_origin_file=$(set_origin_file ".docker/nginx/origin/conf.d/${protocol}/app/nginx.conf.ctmpl.origin.customized" \ - ".docker/nginx/origin/conf.d/${protocol}/app/nginx.conf.ctmpl.origin") - - echo "[DEBUG] ${app_origin_file} will be added to Template (${nginx_template_file})" - - sed -e "s|!#{proxy_hostname}|${proxy_hostname}|g" \ - -e "s|!#{proxy_hostname_blue}|${proxy_hostname_blue}|g" \ - -e "s|!#{app_https_protocol}|${app_https_protocol}|g" \ - "${app_origin_file}" > "${nginx_template_file}" - - - echo "" >> "${nginx_template_file}" - - local additionals_origin_file=$(set_origin_file ".docker/nginx/origin/conf.d/${protocol}/additionals/nginx.conf.ctmpl.origin.customized" \ - ".docker/nginx/origin/conf.d/${protocol}/additionals/nginx.conf.ctmpl.origin") - - echo "[DEBUG] ${additionals_origin_file} will be added to Template (${nginx_template_file})" - - if [ ${#additional_ports[@]} -eq 0 ]; then - echo "[DEBUG] However, no additional_ports found. it will not be added to ${nginx_template_file}" - else - for i in "${additional_ports[@]}" - do - - sed -e "s|!#{proxy_hostname}|${proxy_hostname}|g" \ - -e "s|!#{proxy_hostname_blue}|${proxy_hostname_blue}|g" \ - -e "s|!#{app_https_protocol}|${app_https_protocol}|g" \ - -e "s|!#{additional_port}|${i}|g" \ - "${additionals_origin_file}" >> "${nginx_template_file}" - - echo "" >> ${nginx_template_file} - done - fi - - sed -i -e "s|!#{EXPOSE_PORT}|${expose_port}|g" \ - -e "s|!#{APP_PORT}|${app_port}|g" \ - -e "s|!#{PROJECT_NAME}|${project_name}|g" \ - -e "s|!#{CONSUL_KEY}|${consul_key}|g" \ - -e "s|!#{NGINX_CLIENT_MAX_BODY_SIZE}|${nginx_client_max_body_size}|g" \ - "${nginx_template_file}" - - - if [[ ${use_nginx_restricted_location} = 'true' ]]; then - - sed -i -e "/!#{USE_NGINX_RESTRICTED_LOCATION}/c \ - location ${nginx_restricted_location} { \ - add_header Pragma no-cache; \ - add_header Cache-Control no-cache; \ - \ - auth_basic \"Restricted\"; \ - auth_basic_user_file /etc/nginx/custom-files/.htpasswd; \ - \ - {{ with \$key_value := keyOrDefault \"${consul_key}\" \"blue\" }} \ - {{ if or (eq \$key_value \"blue\") (eq \$key_value \"green\") }} \ - proxy_pass ${protocol}://${project_name}-{{ \$key_value }}:${app_port}; \ - {{ else }} \ - proxy_pass ${protocol}://${project_name}-blue:${app_port}; \ - {{ end }} \ - {{ end }} \ - proxy_set_header Host \$http_host; \ - proxy_set_header X-Scheme \$scheme; \ - proxy_set_header X-Forwarded-Protocol \$scheme; \ - proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; \ - proxy_set_header X-Real-IP \$remote_addr; \ - proxy_http_version 1.1; \ - proxy_read_timeout 300s; \ - proxy_connect_timeout 75s; \ - }" "${nginx_template_file}" - else - - sed -i -e "s/!#{USE_NGINX_RESTRICTED_LOCATION}//" "${nginx_template_file}" - - fi -} - -save_nginx_contingency_template_from_origin(){ +save_nginx_prepared_template_from_origin(){ local proxy_hostname= @@ -190,35 +79,34 @@ save_nginx_contingency_template_from_origin(){ fi - local nginx_contingency_template_temp_file=".docker/nginx/template/ctmpl/${protocol}/nginx.conf.contingency" - local nginx_contingency_template_blue_file=".docker/nginx/template/ctmpl/${protocol}/nginx.conf.contingency.blue" - local nginx_contingency_template_green_file=".docker/nginx/template/ctmpl/${protocol}/nginx.conf.contingency.green" + local nginx_prepared_template_temp_file=".docker/nginx/template/conf.d/${protocol}/nginx.conf.prepared" + local nginx_prepared_template_blue_file=".docker/nginx/template/conf.d/${protocol}/nginx.conf.prepared.blue" + local nginx_prepared_template_green_file=".docker/nginx/template/conf.d/${protocol}/nginx.conf.prepared.green" - echo "[NOTICE] NGINX template (${nginx_contingency_template_temp_file}) is now being created." + echo "[NOTICE] NGINX template (${nginx_prepared_template_temp_file}) is now being created." sed -e "s|!#{proxy_hostname}|${proxy_hostname}|g" \ -e "s|!#{app_https_protocol}|${app_https_protocol}|g" \ - .docker/nginx/origin/conf.d/${protocol}/app/nginx.conf.contingency.origin > ${nginx_contingency_template_temp_file} + .docker/nginx/origin/conf.d/${protocol}/app/nginx.conf.prepared.origin > ${nginx_prepared_template_temp_file} - echo "" >> ${nginx_contingency_template_temp_file} + echo "" >> ${nginx_prepared_template_temp_file} for i in "${additional_ports[@]}" do sed -e "s|!#{proxy_hostname}|${proxy_hostname}|g" \ -e "s|!#{app_https_protocol}|${app_https_protocol}|g" \ -e "s|!#{additional_port}|${i}|g" \ - .docker/nginx/origin/conf.d/${protocol}/additionals/nginx.conf.contingency.origin >> ${nginx_contingency_template_temp_file} + .docker/nginx/origin/conf.d/${protocol}/additionals/nginx.conf.prepared.origin >> ${nginx_prepared_template_temp_file} - echo "" >> ${nginx_contingency_template_temp_file} + echo "" >> ${nginx_prepared_template_temp_file} done sed -i -e "s|!#{EXPOSE_PORT}|${expose_port}|g" \ -e "s|!#{APP_PORT}|${app_port}|g" \ -e "s|!#{PROJECT_NAME}|${project_name}|g" \ - -e "s|!#{CONSUL_KEY}|${consul_key}|g" \ -e "s|!#{NGINX_CLIENT_MAX_BODY_SIZE}|${nginx_client_max_body_size}|g" \ - ${nginx_contingency_template_temp_file} + ${nginx_prepared_template_temp_file} @@ -241,20 +129,20 @@ save_nginx_contingency_template_from_origin(){ proxy_http_version 1.1; \ proxy_read_timeout 300s; \ proxy_connect_timeout 75s; \ - }" ${nginx_contingency_template_temp_file} + }" ${nginx_prepared_template_temp_file} else - sed -i -e "s/!#{USE_NGINX_RESTRICTED_LOCATION}//" ${nginx_contingency_template_temp_file} + sed -i -e "s/!#{USE_NGINX_RESTRICTED_LOCATION}//" ${nginx_prepared_template_temp_file} fi - echo "[NOTICE] Creating 'nginx.conf.contingency.blue', 'nginx.conf.contingency.green''." - cp -f ${nginx_contingency_template_temp_file} ${nginx_contingency_template_blue_file} - sed -i -e "s/!#{APP_STATE}/blue/" ${nginx_contingency_template_blue_file} - cp -f ${nginx_contingency_template_temp_file} ${nginx_contingency_template_green_file} - sed -i -e "s/!#{APP_STATE}/green/" ${nginx_contingency_template_green_file} + echo "[NOTICE] Creating 'nginx.conf.prepared.blue', 'nginx.conf.prepared.green''." + cp -f ${nginx_prepared_template_temp_file} ${nginx_prepared_template_blue_file} + sed -i -e "s/!#{APP_STATE}/blue/" ${nginx_prepared_template_blue_file} + cp -f ${nginx_prepared_template_temp_file} ${nginx_prepared_template_green_file} + sed -i -e "s/!#{APP_STATE}/green/" ${nginx_prepared_template_green_file} } @@ -274,8 +162,7 @@ save_nginx_main_template_from_origin(){ echo "[NOTICE] NGINX Main template (.docker/nginx/template/nginx.conf.main) is now being created." - local main_origin_file=$(set_origin_file ".docker/nginx/origin/nginx.conf.main.origin.customized" \ - ".docker/nginx/origin/nginx.conf.main.origin") + local main_origin_file=".docker/nginx/origin/nginx.conf.main.origin" echo "[DEBUG] ${main_origin_file} will be processed into Template (.docker/nginx/template/nginx.conf.main)" @@ -330,21 +217,21 @@ nginx_down_and_up(){ check_nginx_templates_integrity(){ - echo "[NOTICE] Now we'll create a temporary NGINX image to test parsed settings in '.docker/nginx/template/ctmpl'" + echo "[NOTICE] Now we'll create a temporary NGINX image to test parsed settings in '.docker/nginx/template/conf.d'" docker build --build-arg DISABLE_CACHE=${CUR_TIME} --build-arg protocol="${protocol}" --build-arg shared_volume_group_id="${shared_volume_group_id}" --build-arg shared_volume_group_name="${shared_volume_group_name}" --tag ${project_name}-nginx-test -f ./.docker/nginx/Dockerfile -m ${docker_build_memory_usage} . || exit 1 - echo "[NOTICE] Now we'll create a temporary NGINX container to test parsed settings in '.docker/nginx/template/ctmpl'" + echo "[NOTICE] Now we'll create a temporary NGINX container to test parsed settings in '.docker/nginx/template/conf.d'" stop_and_remove_container "${project_name}-nginx-test" docker run -d -it --name ${project_name}-nginx-test \ -e SERVICE_NAME=nginx \ - --network=consul \ + --network=dbgr-net \ --env-file .env \ ${project_name}-nginx-test:latest sleep 3 - echo "[NOTICE] Now we'll run 'nginx -t' to verify the syntax of '.docker/nginx/template/nginx.conf.main & ctmpl'" + echo "[NOTICE] Now we'll run 'nginx -t' to verify the syntax of '.docker/nginx/template/nginx.conf.main & conf.d'" output=$(docker exec ${project_name}-nginx-test nginx -t 2>&1 || echo "[ERROR] ${project_name}-nginx-test failed to run. But don't worry. this is testing just before restarting Nginx. Check settings in '.docker/nginx/origin & .docker/nginx/template'") if echo "$output" | grep -q "successful"; then diff --git a/use-states.sh b/use-states.sh index 32a2f7d..db784ff 100644 --- a/use-states.sh +++ b/use-states.sh @@ -4,15 +4,15 @@ set -eu git config apply.whitespace nowarn git config core.filemode false -cache_all_states() { - - echo '[NOTICE] Checking which container, blue or green, is running. (Priority : Where Consul Pointing = Where Nginx Pointing > Which Container Running > Which Container Restarting)' +get_nginx_pointing() { + local project_name=$1 + local nginx_config + local blue_exists + local green_exists + local nginx_pointing - local consul_pointing - consul_pointing=$(docker exec ${project_name}-nginx curl ${consul_key_value_store}?raw 2>/dev/null || echo "failed") + nginx_config=$(docker exec "${project_name}-nginx" cat /etc/nginx/conf.d/nginx.conf || echo "failed") - local nginx_pointing - nginx_config=$(docker exec ${project_name}-nginx cat /etc/nginx/conf.d/nginx.conf || echo "failed") if echo "$nginx_config" | grep -Eq "^[^#]*proxy_pass http[s]*://${project_name}-blue"; then blue_exists="blue" else @@ -35,52 +35,60 @@ cache_all_states() { nginx_pointing="failed" fi + echo "$nginx_pointing" +} + +cache_all_states() { + + echo '[NOTICE] Checking which container, blue or green, is running. (Priority : Where Nginx Pointing > Which Container Running > Which Container Restarting)' + + ## Calculation + + # 1. Nginx pointing + local nginx_pointing + nginx_pointing=$(get_nginx_pointing "$project_name") + # 2. Container status local blue_status blue_status=$(docker inspect --format='{{.State.Status}}' ${project_name}-blue 2>/dev/null || echo "unknown") local green_status green_status=$(docker inspect --format='{{.State.Status}}' ${project_name}-green 2>/dev/null || echo "unknown") - echo "[DEBUG] ! Checking which (Blue OR Green) is currently running... (Base Check) : consul_pointing(${consul_pointing}), nginx_pointing(${nginx_pointing}), blue_status(${blue_status}), green_status(${green_status})" + echo "[DEBUG] ! Checking which (Blue OR Green) is currently running... (Base Check) : nginx_pointing(${nginx_pointing}), blue_status(${blue_status}), green_status(${green_status})" - local blue_score=0 + local blue_score=1 # Base score local green_score=0 - # consul_pointing - if [[ "$consul_pointing" == "blue" ]]; then - blue_score=$((blue_score + 50)) - elif [[ "$consul_pointing" == "green" ]]; then - green_score=$((green_score + 50)) - fi - # nginx_pointing + ## Give scores + + # 1. Nginx pointing if [[ "$nginx_pointing" == "blue" ]]; then - blue_score=$((blue_score + 50)) + blue_score=$((blue_score + 30)) elif [[ "$nginx_pointing" == "green" ]]; then - green_score=$((green_score + 50)) + green_score=$((green_score + 30)) fi - - # status + # 2. Container status case "$blue_status" in "running") blue_score=$((blue_score + 30)) ;; "restarting") - blue_score=$((blue_score + 29)) + blue_score=$((blue_score + 28)) ;; "created") - blue_score=$((blue_score + 28)) + blue_score=$((blue_score + 25)) ;; "exited") - blue_score=$((blue_score + 27)) + blue_score=$((blue_score + 5)) ;; "paused") - blue_score=$((blue_score + 26)) + blue_score=$((blue_score + 3)) ;; "dead") - blue_score=$((blue_score + 25)) + blue_score=$((blue_score + 1)) ;; *) ;; @@ -91,19 +99,19 @@ cache_all_states() { green_score=$((green_score + 30)) ;; "restarting") - green_score=$((green_score + 29)) + green_score=$((green_score + 28)) ;; "created") - green_score=$((green_score + 28)) + green_score=$((green_score + 25)) ;; "exited") - green_score=$((green_score + 27)) + green_score=$((green_score + 5)) ;; "paused") - green_score=$((green_score + 26)) + green_score=$((green_score + 3)) ;; "dead") - green_score=$((green_score + 25)) + green_score=$((green_score + 1)) ;; *) ;;