Newer
Older
- setup-cluster
- health-test
- integration-test
# Repeated values, because we're not allowed to use a variable in a variable
SUBDOMAIN: "${CI_COMMIT_REF_SLUG}.ci"
ADDRESS: "${CI_COMMIT_REF_SLUG}.ci.openappstack.net"
KANIKO_BUILD_IMAGENAME: "openappstack-ci"
default:
image: "${CI_REGISTRY_IMAGE}/${KANIKO_BUILD_IMAGENAME}:${CI_COMMIT_REF_SLUG}"
- requirements.txt

Maarten de Waard
committed
- echo "hostname $HOSTNAME, subdomain $SUBDOMAIN, domain $DOMAIN, address $ADDRESS";
- ls clusters/${HOSTNAME} || echo "directory clusters/${HOSTNAME} not found"
# Creates the VPS only if an old VPS for this branch is not re-usable
- sh .gitlab/ci_scripts/create_vps.sh
artifacts:
paths:
- clusters
expire_in: 1 month
when: always
only:
changes:
- .gitlab-ci.yml
- test/**/*
- openappstack/**/*
extends: .ssh_setup
# Cache the cluster secrets so the next job can use it too
cache:
paths:
- clusters/$HOSTNAME/**
key: ${CI_COMMIT_REF_SLUG}
stage: setup-cluster
script:
# Copy inventory files to ansible folder for use in install-apps step
- cp clusters/${CI_COMMIT_REF_SLUG}/inventory.yml ansible/
- cp clusters/${CI_COMMIT_REF_SLUG}/group_vars/all/settings.yml ansible/group_vars/all/
- python3 -m openappstack $HOSTNAME install
- ansible/inventory.yml
- ansible/group_vars/all/settings.yml
- openappstack/**/*
# Cache the cluster secrets so the next job can use them
cache:
paths:
- clusters/$HOSTNAME/**
key: ${CI_COMMIT_REF_SLUG}
stage: wait-for-deployments
script:
- cd ansible/
- export KUBECONFIG="${PWD}/../clusters/${HOSTNAME}/secrets/kube_config_cluster.yml"
- pytest -v -s -m 'helmreleases' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*' --reruns 120 --reruns-delay 10
only:
changes:
- .gitlab-ci.yml
- ansible/**/*
- test/**/*
- openappstack/**/*
extends: .ssh_setup
- pytest -v -m 'testinfra' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*'
- openappstack/**/*
- pytest -s -m 'certs' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*'
- openappstack/**/*
prometheus-alerts:
stage: health-test
variables:
OAS_DOMAIN: '${CI_COMMIT_REF_SLUG}.ci.openappstack.net'
- pytest -s -m 'prometheus' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*'
# Wait until flux creates the NextCloud HelmRelease.
- ssh root@$ADDRESS '/bin/bash -c "while true; do kubectl get hr -n oas-apps nextcloud; if [ \$? -eq 0 ]; then break; fi; sleep 20; done"'
# Wait until NextCloud is ready.
- ssh root@$ADDRESS '/bin/bash -c "kubectl wait -n oas-apps hr/nextcloud --for condition=Released --timeout=20m"'
# Run the behave tests for NextCloud.
- python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags nextcloud || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags nextcloud
artifacts:
paths:
- test/behave/screenshots/
expire_in: 1 month
when: on_failure
- openappstack/**/*
extends: .ssh_setup
- python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags grafana || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags grafana
artifacts:
paths:
- test/behave/screenshots/
expire_in: 1 month
when: on_failure
- openappstack/**/*
terminate_mr_droplet_after_merge:
stage: cleanup
before_script:
- echo "We leave MR droplets running even when the pipeline is successful \
to be able to investigate a MR. We need to terminate them when the MR \
is merged into master."
if [ "$(git show -s --pretty=%p HEAD | wc -w)" -gt 1 ]
commit_message="$(git show -s --format=%s)"
tmp="${commit_message#*\'}"
merged_branch="${tmp%%\'*}"
echo "Current HEAD is a merge commit, removing droplet from related merge request branch name '#${merged_branch}'."
python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^${merged_branch}\.\")"
else
echo "Current HEAD is NOT a merge commit, nothing to do."
fi
only:
refs:
- master
terminate_old_droplets:
- echo "Terminate droplets 5 days after creation. Branches that exist longer than 5 days will get a new droplet when CI runs again."
- python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name('\d+-.*', 5)"
- openappstack/**/*
# We need one job that run every time (without any `only:` limitation).
# This works around a Gitlab bug: if no job runs at all due to
# `only`, Gitlab gets confused and doesn't allow you to merge the MR:
# https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html#limitations
gitlab-merge-workaround:
stage: cleanup
script: