include: - .gitlab/ci_templates/kaniko.yml - .gitlab/ci_templates/ssh_setup.yml stages: - build - create-vps - setup-cluster - wait-for-deployments - health-test - integration-test - cleanup variables: SSH_KEY_ID: "411" HOSTNAME: "${CI_COMMIT_REF_SLUG}" # Repeated values, because we're not allowed to use a variable in a variable SUBDOMAIN: "${CI_COMMIT_REF_SLUG}.ci" DOMAIN: "openappstack.net" ADDRESS: "${CI_COMMIT_REF_SLUG}.ci.openappstack.net" ANSIBLE_HOST_KEY_CHECKING: "False" KANIKO_BUILD_IMAGENAME: "openappstack-ci" default: image: "${CI_REGISTRY_IMAGE}/${KANIKO_BUILD_IMAGENAME}:${CI_COMMIT_REF_SLUG}" ci_test_image: stage: build only: changes: - .gitlab-ci.yml - Dockerfile - requirements.txt - .gitlab/ci_templates/kaniko.yml extends: .kaniko_build create-vps: stage: create-vps script: - echo "hostname $HOSTNAME, subdomain $SUBDOMAIN, domain $DOMAIN, address $ADDRESS"; - ls clusters/${HOSTNAME} || echo "directory clusters/${HOSTNAME} not found" # Creates the VPS only if an old VPS for this branch is not re-usable - sh .gitlab/ci_scripts/create_vps.sh artifacts: paths: - clusters expire_in: 1 month when: always only: changes: - .gitlab-ci.yml - .gitlab/ci_scripts/* - ansible/**/* - flux/**/* - test/**/* - openappstack/**/* extends: .ssh_setup # Cache the cluster secrets so the next job can use it too cache: paths: - clusters/$HOSTNAME/** key: ${CI_COMMIT_REF_SLUG} setup-openappstack: stage: setup-cluster script: # Copy inventory files to ansible folder for use in install-apps step - chmod 700 ansible - cp clusters/${CI_COMMIT_REF_SLUG}/inventory.yml ansible/ - cp clusters/${CI_COMMIT_REF_SLUG}/group_vars/all/settings.yml ansible/group_vars/all/ # Set up cluster - python3 -m openappstack $HOSTNAME install # Show versions of installed apps/binaries - chmod 700 ansible - cd ansible - ansible master -m shell -a 'oas-version-info.sh 2>&1' artifacts: paths: - ./clusters - ansible/inventory.yml - ansible/group_vars/all/settings.yml expire_in: 1 month when: always only: changes: - .gitlab-ci.yml - ansible/**/* - flux/**/* - test/**/* - openappstack/**/* extends: .ssh_setup # Cache the cluster secrets so the next job can use them cache: paths: - clusters/$HOSTNAME/** key: ${CI_COMMIT_REF_SLUG} test_helmreleases: stage: wait-for-deployments script: - cd ansible/ - export KUBECONFIG="${PWD}/../clusters/${HOSTNAME}/secrets/kube_config_cluster.yml" - pytest -v -s -m 'helmreleases' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*' --reruns 120 --reruns-delay 10 only: changes: - .gitlab-ci.yml - ansible/**/* - flux/**/* - test/**/* - openappstack/**/* extends: .ssh_setup testinfra: stage: health-test script: - cd ansible/ - pytest -v -m 'testinfra' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*' only: changes: - .gitlab-ci.yml - ansible/**/* - flux/**/* - test/**/* - openappstack/**/* extends: .ssh_setup certs: stage: health-test allow_failure: true script: - cd ansible/ - pytest -s -m 'certs' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*' only: changes: - .gitlab-ci.yml - ansible/**/* - flux/**/* - test/**/* - openappstack/**/* extends: .ssh_setup prometheus-alerts: stage: health-test variables: OAS_DOMAIN: '${CI_COMMIT_REF_SLUG}.ci.openappstack.net' allow_failure: true script: - cd test/ - pytest -s -m 'prometheus' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*' only: changes: - .gitlab-ci.yml - ansible/**/* - flux/**/* - test/**/* extends: .ssh_setup behave-nextcloud: stage: integration-test script: # Wait until flux creates the NextCloud HelmRelease. - ssh root@$ADDRESS '/bin/bash -c "while true; do kubectl get hr -n oas-apps nextcloud; if [ \$? -eq 0 ]; then break; fi; sleep 20; done"' # Wait until NextCloud is ready. - ssh root@$ADDRESS '/bin/bash -c "kubectl wait -n oas-apps hr/nextcloud --for condition=Released --timeout=20m"' # Run the behave tests for NextCloud. - python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags nextcloud || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags nextcloud artifacts: paths: - test/behave/screenshots/ expire_in: 1 month when: on_failure retry: 2 only: changes: - .gitlab-ci.yml - ansible/**/* - flux/**/* - test/**/* - openappstack/**/* extends: .ssh_setup behave-grafana: stage: integration-test script: - python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags grafana || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags grafana artifacts: paths: - test/behave/screenshots/ expire_in: 1 month when: on_failure only: changes: - .gitlab-ci.yml - ansible/**/* - flux/**/* - test/**/* - openappstack/**/* terminate_mr_droplet_after_merge: stage: cleanup before_script: - echo "We leave MR droplets running even when the pipeline is successful \ to be able to investigate a MR. We need to terminate them when the MR \ is merged into master." script: | if [ "$(git show -s --pretty=%p HEAD | wc -w)" -gt 1 ] then commit_message="$(git show -s --format=%s)" tmp="${commit_message#*\'}" merged_branch="${tmp%%\'*}" echo "Current HEAD is a merge commit, removing droplet from related merge request branch name '#${merged_branch}'." python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^${merged_branch}\.\")" else echo "Current HEAD is NOT a merge commit, nothing to do." fi only: refs: - master terminate_old_droplets: stage: cleanup script: - echo "Terminate droplets 5 days after creation. Branches that exist longer than 5 days will get a new droplet when CI runs again." - python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name('\d+-.*', 5)" only: changes: - .gitlab-ci.yml - ansible/**/* - flux/**/* - test/**/* - openappstack/**/* # We need one job that run every time (without any `only:` limitation). # This works around a Gitlab bug: if no job runs at all due to # `only`, Gitlab gets confused and doesn't allow you to merge the MR: # https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html#limitations gitlab-merge-workaround: stage: cleanup script: - echo "That went well"