Skip to content

Commit

Permalink
Clean up deprecated deployment code
Browse files Browse the repository at this point in the history
- Improves email logging
- Removes `depends_on` configuration from docker compose files
- Removes some deprecated deployment code around Elastalert config file formatting
- Creates backup user on backup servers automatically
- Verifys Kibana is ready before setting up alert configuration

comments about why certain changes are potentially needed can be found here #897
  • Loading branch information
rikukissa committed Feb 28, 2024
1 parent 46e08f2 commit a529f42
Show file tree
Hide file tree
Showing 14 changed files with 67 additions and 116 deletions.
5 changes: 1 addition & 4 deletions .github/workflows/provision.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
name: Provision environment
run-name: Provision ${{ github.event.inputs.environment }}
on:
push:
branches:
- release-v1.4.1
workflow_dispatch:
inputs:
environment:
Expand Down Expand Up @@ -112,7 +109,7 @@ jobs:
ansible_user: ${{ secrets.SSH_USER }}

- name: Run playbook
uses: dawidd6/action-ansible-playbook@v2
uses: dawidd6/action-ansible-playbook@v2.8.0
env:
ANSIBLE_PERSISTENT_COMMAND_TIMEOUT: 10
ANSIBLE_SSH_TIMEOUT: 10
Expand Down
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,13 @@

## [1.4.1](https://github.com/opencrvs/opencrvs-farajaland/compare/v1.4.0...v1.4.1)

- Improved logging for emails being sent
- Deployment: Verifies Kibana is ready before setting up alert configuration
- Deployment: Removes `depends_on` configuration from docker compose files
- Deployment: Removes some deprecated deployment code around Elastalert config file formatting
- Provisioning: Creates backup user on backup servers automatically
- Provisioning: Update ansible Github action task version

## [1.4.0](https://github.com/opencrvs/opencrvs-farajaland/compare/v1.3.3...v1.4.0)

- Added examples for configuring HTTP-01, DNS-01, and manual HTTPS certificates. By default, development and QA environments use HTTP-01, while others use DNS-01.
Expand Down
12 changes: 1 addition & 11 deletions infrastructure/deployment/deploy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -351,16 +351,7 @@ configured_ssh << EOF
EOF

# Setup configuration files and compose file for the deployment domain
configured_ssh "
HOST=$HOST
SMTP_HOST=$SMTP_HOST
SMTP_PORT=$SMTP_PORT
ALERT_EMAIL=$ALERT_EMAIL
SENDER_EMAIL_ADDRESS=$SENDER_EMAIL_ADDRESS
DOMAIN=$DOMAIN
MINIO_ROOT_USER=$MINIO_ROOT_USER
MINIO_ROOT_PASSWORD=$MINIO_ROOT_PASSWORD
/opt/opencrvs/infrastructure/setup-deploy-config.sh $HOST | tee -a $LOG_LOCATION/setup-deploy-config.log"
configured_ssh "/opt/opencrvs/infrastructure/setup-deploy-config.sh $HOST"

rotate_secrets

Expand All @@ -372,7 +363,6 @@ echo
echo "Waiting 2 mins for mongo to deploy before working with data. Please note it can take up to 10 minutes for the entire stack to deploy in some scenarios."
echo

sleep 120 # Required as Kibana cannot be immediately contacted
echo "Setting up Kibana config & alerts"

while true; do
Expand Down
21 changes: 0 additions & 21 deletions infrastructure/docker-compose.deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,6 @@ services:
'file=@/config.ndjson'
]
restart: on-failure
depends_on:
- kibana
volumes:
# Exceed Docker config file 500 kb file limit, thus a volume mount
- '/opt/opencrvs/infrastructure/monitoring/kibana/config.ndjson:/config.ndjson'
Expand Down Expand Up @@ -182,8 +180,6 @@ services:
configs:
- source: kibana.{{ts}}
target: /usr/share/kibana/config/kibana.yml
depends_on:
- elasticsearch
logging:
driver: gelf
options:
Expand Down Expand Up @@ -234,8 +230,6 @@ services:
replicas: 1
restart_policy:
condition: none
depends_on:
- mongo1
environment:
- REPLICAS=1
- MONGODB_ADMIN_USER=${MONGODB_ADMIN_USER}
Expand Down Expand Up @@ -344,8 +338,6 @@ services:
/bin/sh -c "
/usr/bin/mc admin trace --path ocrvs/* minio
"
depends_on:
- minio
configs:
- source: minio-mc-config.{{ts}}
target: /root/.mc/config.json
Expand All @@ -367,8 +359,6 @@ services:
image: ubuntu:bionic
entrypoint: ['bash', '/usr/app/setup.sh']
restart: on-failure
depends_on:
- elasticsearch
environment:
- ELASTICSEARCH_HOST=elasticsearch
- ELASTIC_PASSWORD=${ELASTICSEARCH_SUPERUSER_PASSWORD}
Expand Down Expand Up @@ -405,8 +395,6 @@ services:
- '/opt/opencrvs/infrastructure/monitoring/elastalert/rules:/opt/elastalert/rules'
networks:
- overlay_net
depends_on:
- elasticsearch
deploy:
labels:
- 'traefik.enable=false'
Expand All @@ -423,8 +411,6 @@ services:
logstash:
image: logstash:7.17.0
command: logstash -f /etc/logstash/logstash.conf --verbose
depends_on:
- elasticsearch
ports:
- '12201:12201'
- '12201:12201/udp'
Expand All @@ -447,9 +433,6 @@ services:
replicas: 1
apm-server:
image: docker.elastic.co/apm/apm-server:7.15.2
depends_on:
- elasticsearch
- kibana
cap_add: ['CHOWN', 'DAC_OVERRIDE', 'SETGID', 'SETUID']
cap_drop: ['ALL']
restart: always
Expand Down Expand Up @@ -906,8 +889,6 @@ services:
environment:
- mongodb__url=mongodb://hearth:${HEARTH_MONGODB_PASSWORD}@mongo1/hearth-dev?replicaSet=rs0
- logger__level=warn
depends_on:
- mongo1
deploy:
labels:
- 'traefik.enable=false'
Expand All @@ -930,8 +911,6 @@ services:
environment:
- mongo_url=mongodb://openhim:${OPENHIM_MONGODB_PASSWORD}@mongo1/openhim-dev?replicaSet=rs0
- mongo_atnaUrl=mongodb://openhim:${OPENHIM_MONGODB_PASSWORD}@mongo1/openhim-dev?replicaSet=rs0
depends_on:
- mongo1
deploy:
labels:
- 'traefik.enable=false'
Expand Down
14 changes: 2 additions & 12 deletions infrastructure/docker-compose.production-deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,7 @@ services:
hearth:
environment:
- mongodb__url=mongodb://hearth:${HEARTH_MONGODB_PASSWORD}@mongo1,mongo2/hearth-dev?replicaSet=rs0
depends_on:
- mongo1
- mongo2

deploy:
replicas: 2

Expand All @@ -159,17 +157,12 @@ services:
- HEARTH_MONGO_URL=mongodb://hearth:${HEARTH_MONGODB_PASSWORD}@mongo1,mongo2/hearth-dev?replicaSet=rs0
- OPENHIM_MONGO_URL=mongodb://openhim:${OPENHIM_MONGODB_PASSWORD}@mongo1,mongo2/openhim-dev?replicaSet=rs0
- WAIT_HOSTS=mongo1:27017,mongo2:27017,influxdb:8086,minio:9000,elasticsearch:9200
depends_on:
- mongo1
- mongo2

openhim-core:
environment:
- mongo_url=mongodb://openhim:${OPENHIM_MONGODB_PASSWORD}@mongo1,mongo2/openhim-dev?replicaSet=rs0
- mongo_atnaUrl=mongodb://openhim:${OPENHIM_MONGODB_PASSWORD}@mongo1,mongo2/openhim-dev?replicaSet=rs0
depends_on:
- mongo1
- mongo2

deploy:
replicas: 2

Expand Down Expand Up @@ -207,9 +200,6 @@ services:
- overlay_net

mongo-on-update:
depends_on:
- mongo1
- mongo2
environment:
- REPLICAS=2

Expand Down
8 changes: 0 additions & 8 deletions infrastructure/docker-compose.staging-deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -145,8 +145,6 @@ services:
hearth:
environment:
- mongodb__url=mongodb://hearth:${HEARTH_MONGODB_PASSWORD}@mongo1/hearth-dev?replicaSet=rs0
depends_on:
- mongo1
deploy:
replicas: 1

Expand All @@ -158,15 +156,11 @@ services:
- HEARTH_MONGO_URL=mongodb://hearth:${HEARTH_MONGODB_PASSWORD}@mongo1/hearth-dev?replicaSet=rs0
- OPENHIM_MONGO_URL=mongodb://openhim:${OPENHIM_MONGODB_PASSWORD}@mongo1/openhim-dev?replicaSet=rs0
- WAIT_HOSTS=mongo1:27017,influxdb:8086,minio:9000,elasticsearch:9200
depends_on:
- mongo1

openhim-core:
environment:
- mongo_url=mongodb://openhim:${OPENHIM_MONGODB_PASSWORD}@mongo1/openhim-dev?replicaSet=rs0
- mongo_atnaUrl=mongodb://openhim:${OPENHIM_MONGODB_PASSWORD}@mongo1/openhim-dev?replicaSet=rs0
depends_on:
- mongo1
deploy:
replicas: 1

Expand All @@ -175,8 +169,6 @@ services:
replicas: 1

mongo-on-update:
depends_on:
- mongo1
environment:
- REPLICAS=1

Expand Down
7 changes: 0 additions & 7 deletions infrastructure/logrotate.conf
Original file line number Diff line number Diff line change
Expand Up @@ -58,13 +58,6 @@ include /etc/logrotate.d
rotate 1
}

/var/log/setup-deploy-config.log {
missingok
monthly
create 0660 root application
rotate 1
}

/var/log/rotate-secrets.log {
missingok
monthly
Expand Down
3 changes: 2 additions & 1 deletion infrastructure/monitoring/elastalert/elastalert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ buffer_time:

es_host: elasticsearch
es_port: 9200

# es_username: <passed as environment variables>
# es_password: <passed as environment variables>
writeback_index: elastalert_status

alert_time_limit:
Expand Down
24 changes: 17 additions & 7 deletions infrastructure/monitoring/kibana/setup-config.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,18 +10,28 @@
#!/bin/bash
set -e

# Define Docker command with the image and network
# Define common variables
kibana_alerting_api_url="http://kibana:5601/api/alerting/rules/_find?page=1&per_page=100&default_search_operator=AND&sort_field=name&sort_order=asc"
docker_command="docker run --rm -v /opt/opencrvs/infrastructure/monitoring/kibana/config.ndjson:/config.ndjson --network=opencrvs_overlay_net curlimages/curl"

# First delete all alerts. This is because the import doesn't remove alerts that are no longer in the config
$docker_command --connect-timeout 60 -u elastic:$ELASTICSEARCH_SUPERUSER_PASSWORD http://kibana:5601/api/alerting/rules/_find\?page\=1\&per_page\=100\&default_search_operator\=AND\&sort_field\=name\&sort_order\=asc | docker run --rm -i --network=opencrvs_overlay_net stedolan/jq -r '.data[].id' | while read -r id; do
# Initial API status check to ensure Kibana is ready
status_code=$($docker_command --connect-timeout 60 -u elastic:$ELASTICSEARCH_SUPERUSER_PASSWORD -o /dev/null -w '%{http_code}' "$kibana_alerting_api_url")

if [ "$status_code" -ne 200 ]; then
echo "Kibana is not ready. API returned status code: $status_code"
exit 1
fi

# Delete all alerts
$docker_command --connect-timeout 60 -u elastic:$ELASTICSEARCH_SUPERUSER_PASSWORD "$kibana_alerting_api_url" | docker run --rm -i --network=opencrvs_overlay_net stedolan/jq -r '.data[].id' | while read -r id; do
$docker_command --connect-timeout 60 -X DELETE -H 'kbn-xsrf: true' -u elastic:$ELASTICSEARCH_SUPERUSER_PASSWORD "http://kibana:5601/api/alerting/rule/$id"
done

$docker_command --connect-timeout 60 -u elastic:$ELASTICSEARCH_SUPERUSER_PASSWORD -X POST http://kibana:5601/api/saved_objects/_import?overwrite=true -H 'kbn-xsrf: true' --form file=@/config.ndjson > /dev/null
# Import configuration
$docker_command --connect-timeout 60 -u elastic:$ELASTICSEARCH_SUPERUSER_PASSWORD -X POST "http://kibana:5601/api/saved_objects/_import?overwrite=true" -H 'kbn-xsrf: true' --form file=@/config.ndjson > /dev/null

# Re-enable all alerts. This is because after importing a config, all alerts are disabled by default
$docker_command --connect-timeout 60 -u elastic:$ELASTICSEARCH_SUPERUSER_PASSWORD http://kibana:5601/api/alerting/rules/_find\?page\=1\&per_page\=100\&default_search_operator\=AND\&sort_field\=name\&sort_order\=asc | docker run --rm -i --network=opencrvs_overlay_net stedolan/jq -r '.data[].id' | while read -r id; do
# Re-enable all alerts
$docker_command --connect-timeout 60 -u elastic:$ELASTICSEARCH_SUPERUSER_PASSWORD "$kibana_alerting_api_url" | docker run --rm -i --network=opencrvs_overlay_net stedolan/jq -r '.data[].id' | while read -r id; do
$docker_command --connect-timeout 60 -X POST -H 'kbn-xsrf: true' -u elastic:$ELASTICSEARCH_SUPERUSER_PASSWORD "http://kibana:5601/api/alerting/rule/$id/_disable"
$docker_command --connect-timeout 60 -X POST -H 'kbn-xsrf: true' -u elastic:$ELASTICSEARCH_SUPERUSER_PASSWORD "http://kibana:5601/api/alerting/rule/$id/_enable"
done
done
10 changes: 10 additions & 0 deletions infrastructure/server-setup/backups.yml
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,16 @@
vars:
manager_hostname: "{{ groups['docker-manager-first'][0] }}"
tasks:
- name: Ensure backup user is present
user:
name: '{{ external_backup_server_user }}'
state: present
create_home: true
home: '/home/{{ external_backup_server_user }}'
shell: /bin/bash
tags:
- backups

- set_fact:
external_backup_server_user_home: '/home/{{ external_backup_server_user }}'
tags:
Expand Down
8 changes: 0 additions & 8 deletions infrastructure/server-setup/tasks/application.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,6 @@
groups: application
append: yes

- name: Create deploy logfile
ansible.builtin.file:
path: /var/log/setup-deploy-config.log
owner: '{{ ansible_user }}'
group: 'application'
state: touch
mode: 'u+rwX,g+rwX,o-rwx'

- name: Create secret logfile
ansible.builtin.file:
path: /var/log/rotate-secrets.log
Expand Down
14 changes: 0 additions & 14 deletions infrastructure/setup-deploy-config.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ echo "Setting up deployment config for $HOST - `date --iso-8601=ns`"
# Set hostname in openhim-console config
sed -i "s/{{hostname}}/$HOST/g" /opt/opencrvs/infrastructure/openhim-console-config.deploy.json


# Set hostname in compose file
for file in /opt/opencrvs/infrastructure/docker-compose*.yml; do
sed -i "s/{{hostname}}/$HOST/g" "$file"
Expand All @@ -26,19 +25,6 @@ done
KIBANA_ENCRYPTION_KEY=`uuidgen`
sed -i "s/{{KIBANA_ENCRYPTION_KEY}}/$KIBANA_ENCRYPTION_KEY/g" /opt/opencrvs/infrastructure/monitoring/kibana/kibana.yml

# Move metabase file
mv /opt/opencrvs/infrastructure/metabase.init.db.sql /data/metabase/metabase.init.db.sql

# Replace environment variables from all alert definition files
for file in /opt/opencrvs/infrastructure/monitoring/elastalert/rules/*.yaml; do
sed -i -e "s%{{HOST}}%$1%" $file
sed -i -e "s%{{SMTP_HOST}}%$SMTP_HOST%" $file
sed -i -e "s%{{SMTP_PORT}}%$SMTP_PORT%" $file
sed -i -e "s%{{ALERT_EMAIL}}%$ALERT_EMAIL%" $file
sed -i -e "s%{{SENDER_EMAIL_ADDRESS}}%$SENDER_EMAIL_ADDRESS%" $file
sed -i -e "s%{{DOMAIN}}%$DOMAIN%" $file
done

sed -i -e "s%{{MINIO_ROOT_USER}}%$MINIO_ROOT_USER%" /opt/opencrvs/infrastructure/mc-config/config.json
sed -i -e "s%{{MINIO_ROOT_PASSWORD}}%$MINIO_ROOT_PASSWORD%" /opt/opencrvs/infrastructure/mc-config/config.json

Expand Down
Loading

0 comments on commit a529f42

Please sign in to comment.