Skip to content
Snippets Groups Projects
Commit f1288db1 authored by Ana Aviles's avatar Ana Aviles
Browse files

Merge branch 'reorder_deployments' into 'master'

Reorder deployments

See merge request openappstack/openappstack!124
parents ae61d3f1 1b5967a5
No related branches found
No related tags found
No related merge requests found
include:
- .gitlab/ci_templates/kaniko.yml
- .gitlab/ci_templates/ssh_setup.yml
stages:
- build
- setup-cluster
......@@ -36,14 +37,7 @@ ci_test_image:
bootstrap:
stage: setup-cluster
before_script:
- ansible --version
script:
# Ensure test/ is not world-writable otherwise ansible-playbook refuses to run, see
# https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir
- chmod 755 ansible/
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
- echo "hostname $HOSTNAME, subdomain $SUBDOMAIN, domain $DOMAIN, address $ADDRESS"
- python3 -m openappstack $HOSTNAME create --create-droplet $DOMAIN --hostname $HOSTNAME --ssh-key-id $SSH_KEY_ID --create-domain-records --subdomain $SUBDOMAIN
- python3 -m openappstack $HOSTNAME install --ansible-param='--skip-tags=helmfile'
......@@ -59,17 +53,11 @@ bootstrap:
- helmfiles/**/*
- test/**/*
- openappstack/**/*
extends: .ssh_setup
install:
stage: install-apps
variables:
ANSIBLE_HOST_KEY_CHECKING: 'False'
script:
# Ensure test/ is not world-writable otherwise ansible-playbook refuses to run, see
# https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir
- chmod 755 ansible/
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
- python3 -m openappstack $HOSTNAME install --ansible-param='--tags=helmfile'
# Show versions of installed apps/binaries
- ansible master -m shell -a 'oas-version-info.sh 2>&1'
......@@ -85,14 +73,11 @@ install:
- helmfiles/**/*
- test/**/*
- openappstack/**/*
extends: .ssh_setup
testinfra:
stage: health-test
script:
- mkdir ~/.ssh
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
- echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config
- cd ansible/
- pytest -v -m 'testinfra' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*'
only:
......@@ -102,15 +87,12 @@ testinfra:
- helmfiles/**/*
- test/**/*
- openappstack/**/*
extends: .ssh_setup
certs:
stage: health-test
allow_failure: true
script:
- mkdir ~/.ssh
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
- echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config
- cd ansible/
- pytest -s -m 'certs' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*'
only:
......@@ -120,6 +102,7 @@ certs:
- helmfiles/**/*
- test/**/*
- openappstack/**/*
extends: .ssh_setup
prometheus-alerts:
stage: health-test
......@@ -127,10 +110,6 @@ prometheus-alerts:
OAS_DOMAIN: 'ci-${CI_PIPELINE_ID}.ci.openappstack.net'
allow_failure: true
script:
- mkdir ~/.ssh
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
- echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config
- cd test/
- pytest -s -m 'prometheus' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*'
only:
......@@ -139,6 +118,7 @@ prometheus-alerts:
- ansible/**/*
- helmfiles/**/*
- test/**/*
extends: .ssh_setup
behave-nextcloud:
stage: integration-test
......
.ssh_setup:
before_script:
- mkdir ~/.ssh
- echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
......@@ -40,19 +40,16 @@
- name: production
server: "https://acme-v02.api.letsencrypt.org/directory"
- name: Apply cert-manager helmfile
- name: Install cert-manager
tags:
- helmfile
- cert-manager
shell: |
set -e -x -o pipefail
/usr/local/bin/helmfile \
-b /usr/local/bin/helm \
-e oas \
-f {{ data_directory }}/source/helmfiles/helmfile.d/05-cert-manager.yaml \
apply \
--suppress-secrets \
| sed 's/\x1B\[[0-9;]*[JKmsu]//g' \
>> {{ log_directory }}/helmfile.log
args:
executable: /bin/bash
include_role:
name: "helmfile"
tasks_from: "apply"
apply:
tags:
- helmfile
- cert-manager
vars:
helmfile: '05-cert-manager'
---
- name: Clone nextcloud repo
tags:
- git
- nextcloud
git:
repo: 'https://open.greenhost.net/openappstack/nextcloud'
dest: '{{ data_directory }}/source/repos/nextcloud'
version: '{{ git_nextcloud_version }}'
- name: Remove requirements.lock file
tags:
- git
- nextcloud
- helmfile
file:
path: '{{ data_directory }}/source/repos/nextcloud/nextcloud-onlyoffice/requirements.lock'
state: absent
- name: Clone local-storage repo
tags:
- git
- local-storage
git:
repo: 'https://open.greenhost.net/openappstack/local-storage'
dest: '{{ data_directory }}/source/repos/local-storage'
version: '{{ git_local_storage_version }}'
- name: Make Prometheus custom resource definitions
tags:
- helmfile
- monitoring
# NOTE: Change the commit hash in the URL when upgrading Prometheus
command: '/snap/bin/kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/v0.31.1/example/prometheus-operator-crd/{{ item }}'
loop:
- alertmanager.crd.yaml
- prometheus.crd.yaml
- prometheusrule.crd.yaml
- servicemonitor.crd.yaml
- podmonitor.crd.yaml
- name: Get prometheus PV name
tags:
- prometheus
shell: "kubectl -n oas get pvc prometheus-prometheus-oas-{{ release_name }}-prometheus-promet-prometheus-0 -o=jsonpath='{.spec.volumeName}'"
register: prometheus_pv_name
failed_when: false
changed_when: false
# Needed because previously we ran prometheus as root
- name: Ensure prometheus volume is accessible by the prometheus pod
tags:
- prometheus
file:
dest: "{{ data_directory }}/local-storage/{{ prometheus_pv_name.stdout }}"
owner: '1000'
group: '2000'
recurse: true
when: prometheus_pv_name.stdout
- name: Apply storage helmfile
tags:
- helmfile
shell: |
set -e -x -o pipefail
/usr/local/bin/helmfile -b /usr/local/bin/helm -e oas \
-f {{ data_directory }}/source/helmfiles/helmfile.d/00-storage.yaml \
apply --suppress-secrets \
| sed 's/\x1B\[[0-9;]*[JKmsu]//g' \
>> {{ log_directory }}/helmfile.log
args:
executable: /bin/bash
when: '"00-storage" in helmfiles'
- name: Apply nginx helmfile
tags:
- helmfile
shell: |
set -e -x -o pipefail
/usr/local/bin/helmfile -b /usr/local/bin/helm -e oas \
-f {{ data_directory }}/source/helmfiles/helmfile.d/10-nginx.yaml \
apply --suppress-secrets \
| sed 's/\x1B\[[0-9;]*[JKmsu]//g' \
>> {{ log_directory }}/helmfile.log
args:
executable: /bin/bash
when: '"10-nginx" in helmfiles'
# Force needed for upgrading from 5 to 6: https://github.com/helm/charts/tree/master/stable/prometheus-operator#upgrading-from-5xx-to-6xx
- name: Apply monitoring helmfile with force
tags:
- helmfile
environment:
- GRAFANA_ADMIN_PASSWORD: "{{ grafana_admin_password }}"
shell: |
set -e -x -o pipefail
/usr/local/bin/helmfile -b /usr/local/bin/helm -e oas \
-f {{ data_directory }}/source/helmfiles/helmfile.d/15-monitoring.yaml \
apply --suppress-secrets --args='--force' \
| sed 's/\x1B\[[0-9;]*[JKmsu]//g' \
>> {{ log_directory }}/helmfile.log
args:
executable: /bin/bash
when: '"15-monitoring" in helmfiles'
- name: Apply nextcloud helmfile
tags:
- helmfile
environment:
- NEXTCLOUD_PASSWORD: "{{ nextcloud_password }}"
- NEXTCLOUD_MARIADB_PASSWORD: "{{ nextcloud_mariadb_password }}"
- NEXTCLOUD_MARIADB_ROOT_PASSWORD: "{{ nextcloud_mariadb_root_password }}"
- ONLYOFFICE_JWT_SECRET: "{{ onlyoffice_jwt_secret }}"
- ONLYOFFICE_POSTGRESQL_PASSWORD: "{{ onlyoffice_postgresql_password }}"
- ONLYOFFICE_RABBITMQ_PASSWORD: "{{ onlyoffice_rabbitmq_password }}"
shell: |
set -e -x -o pipefail
/usr/local/bin/helmfile -b /usr/local/bin/helm -e oas \
-f {{ data_directory }}/source/helmfiles/helmfile.d/20-nextcloud.yaml \
apply --suppress-secrets \
| sed 's/\x1B\[[0-9;]*[JKmsu]//g' \
>> {{ log_directory }}/helmfile.log
args:
executable: /bin/bash
when: '"20-nextcloud" in helmfiles'
---
- name: Clone local-storage repo
tags:
- git
- helmfile
- local-storage
git:
repo: 'https://open.greenhost.net/openappstack/local-storage'
dest: '{{ data_directory }}/source/repos/local-storage'
version: '{{ git_local_storage_version }}'
- name: Install local-storage provisioner
tags:
- helmfile
- local-storage
include_role:
name: "helmfile"
tasks_from: "apply"
apply:
tags:
- helmfile
- local-storage
vars:
helmfile: '00-storage'
---
- import_tasks: init.yml
- import_tasks: cert-manager.yml
- import_tasks: helmfiles.yml
- name: Import tasks from init.yml
import_tasks: init.yml
tags: [ helmfile ]
- name: Install local-storage
import_tasks: local-storage.yml
tags: [ helmfile ]
when: '"00-storage" in helmfiles'
- name: Install cert-manager
import_tasks: cert-manager.yml
tags: [ helmfile ]
when: '"05-cert-manager" in helmfiles'
- name: Install nginx
import_tasks: nginx.yml
tags: [ helmfile ]
when: '"10-nginx" in helmfiles'
- name: Install prometheus
import_tasks: prometheus.yml
tags: [ helmfile ]
when: '"15-monitoring" in helmfiles'
- name: Install nextcloud
import_tasks: nextcloud.yml
tags: [ helmfile ]
when: '"20-nextcloud" in helmfiles'
---
- name: Clone nextcloud repo
tags:
- git
- helmfile
- nextcloud
git:
repo: 'https://open.greenhost.net/openappstack/nextcloud'
dest: '{{ data_directory }}/source/repos/nextcloud'
version: '{{ git_nextcloud_version }}'
- name: Remove requirements.lock file
tags:
- git
- nextcloud
- helmfile
file:
path: '{{ data_directory }}/source/repos/nextcloud/nextcloud-onlyoffice/requirements.lock'
state: absent
- name: Install nextcloud and onlyoffice
tags:
- helmfile
- nextcloud
- onlyoffice
include_role:
name: "helmfile"
tasks_from: "apply"
apply:
tags:
- helmfile
- nextcloud
- onlyoffice
environment:
- NEXTCLOUD_PASSWORD: "{{ nextcloud_password }}"
- NEXTCLOUD_MARIADB_PASSWORD: "{{ nextcloud_mariadb_password }}"
- NEXTCLOUD_MARIADB_ROOT_PASSWORD: "{{ nextcloud_mariadb_root_password }}"
- ONLYOFFICE_JWT_SECRET: "{{ onlyoffice_jwt_secret }}"
- ONLYOFFICE_POSTGRESQL_PASSWORD: "{{ onlyoffice_postgresql_password }}"
- ONLYOFFICE_RABBITMQ_PASSWORD: "{{ onlyoffice_rabbitmq_password }}"
vars:
helmfile: '20-nextcloud'
---
- name: Install nginx ingress controller
tags:
- helmfile
- nginx
include_role:
name: "helmfile"
tasks_from: "apply"
apply:
tags:
- helmfile
- nginx
vars:
helmfile: '10-nginx'
---
- name: Make Prometheus custom resource definitions
tags:
- helmfile
- prometheus
# NOTE: Change the commit hash in the URL when upgrading Prometheus
command: '/snap/bin/kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/v0.31.1/example/prometheus-operator-crd/{{ item }}'
loop:
- alertmanager.crd.yaml
- prometheus.crd.yaml
- prometheusrule.crd.yaml
- servicemonitor.crd.yaml
- podmonitor.crd.yaml
- name: Get prometheus PV name
tags:
- prometheus
shell: "kubectl -n oas get pvc prometheus-prometheus-oas-{{ release_name }}-prometheus-promet-prometheus-0 -o=jsonpath='{.spec.volumeName}'"
register: prometheus_pv_name
failed_when: false
changed_when: false
# Needed because previously we ran prometheus as root
- name: Ensure prometheus volume is accessible by the prometheus pod
tags:
- prometheus
file:
dest: "{{ data_directory }}/local-storage/{{ prometheus_pv_name.stdout }}"
owner: '1000'
group: '2000'
recurse: true
when: prometheus_pv_name.stdout
- name: Install prometheus and graphana
include_role:
name: "helmfile"
tasks_from: "apply"
apply:
tags:
- monitoring
- prometheus
environment:
- GRAFANA_ADMIN_PASSWORD: "{{ grafana_admin_password }}"
tags:
- monitoring
- prometheus
vars:
helmfile: '15-monitoring'
# Force needed for upgrading from 5 to 6, see
# https://github.com/helm/charts/tree/master/stable/prometheus-operator#upgrading-from-5xx-to-6xx
helmfile_apply_args: '--args="--force"'
# Add additional helmfile apply args
helmfile_apply_args: ''
---
- name: Apply helmfile
tags:
- helmfile
shell: |
set -e -x -o pipefail
/usr/local/bin/helmfile -b /usr/local/bin/helm -e oas \
-f {{ data_directory }}/source/helmfiles/helmfile.d/{{ helmfile }}.yaml \
apply --suppress-secrets {{ helmfile_apply_args }} \
| sed 's/\x1B\[[0-9;]*[JKmsu]//g' \
>> {{ log_directory }}/helmfile.log
args:
executable: /bin/bash
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment