...
 
Commits (44)
......@@ -24,7 +24,7 @@ default:
image: "${CI_REGISTRY_IMAGE}/${KANIKO_BUILD_IMAGENAME}:${CI_COMMIT_REF_SLUG}"
ci_test_image:
ci-test-image:
stage: build
only:
changes:
......@@ -94,12 +94,13 @@ setup-openappstack:
- clusters/$HOSTNAME/**
key: ${CI_COMMIT_REF_SLUG}
test_helmreleases:
test-helmreleases:
stage: wait-for-deployments
script:
- cd ansible/
- export KUBECONFIG="${PWD}/../clusters/${HOSTNAME}/secrets/kube_config_cluster.yml"
- pytest -v -s -m 'helmreleases' --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*' --reruns 120 --reruns-delay 10
- pytest -v -s -m 'apps_running' --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*' --reruns 120 --reruns-delay 10
only:
changes:
- .gitlab-ci.yml
......@@ -155,8 +156,6 @@ prometheus-alerts:
behave-nextcloud:
stage: integration-test
script:
# Wait for Nextcloud and ONLYOFFICE pods to be Ready
- ssh root@$FQDN '/bin/bash -c "kubectl wait -n oas-apps pod -l app.kubernetes.io/instance=nc --for condition=Ready --timeout=20m"'
# Run the behave tests for NextCloud.
- python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags nextcloud || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags nextcloud
artifacts:
......@@ -177,8 +176,6 @@ behave-nextcloud:
behave-wordpress:
stage: integration-test
script:
# Wait for wordpress pod to be Ready
- ssh root@$FQDN '/bin/bash -c "kubectl wait -n oas-apps pod -l release=wordpress --for condition=Ready --timeout=20m"'
# Run behave tests against wordpress
- python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags wordpress || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags wordpress
artifacts:
......@@ -199,8 +196,6 @@ behave-wordpress:
behave-rocketchat:
stage: integration-test
script:
# Wait for Rocket.Chat pod to be Ready
- ssh root@$FQDN '/bin/bash -c "kubectl wait -n oas-apps pod -l app.kubernetes.io/instance=rocketchat --for condition=Ready --timeout=20m"'
# Run behave tests against Rocket.Chat
- python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags rocketchat || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags rocketchat
artifacts:
......@@ -221,8 +216,6 @@ behave-rocketchat:
behave-grafana:
stage: integration-test
script:
# Wait for Grafana pod to be Ready.
- ssh root@$FQDN '/bin/bash -c "kubectl wait -n oas pod -l app=grafana --for condition=Ready --timeout=20m"'
# Run behave tests against Grafana
- python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags grafana || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags grafana
artifacts:
......@@ -239,7 +232,7 @@ behave-grafana:
- openappstack/**/*
extends: .ssh_setup
terminate_mr_droplet_after_merge:
terminate-mr-droplet-after-merge:
stage: cleanup
before_script:
- echo "We leave MR droplets running even when the pipeline is successful \
......@@ -260,7 +253,7 @@ terminate_mr_droplet_after_merge:
refs:
- master
terminate_old_droplets:
terminate-old-droplets:
stage: cleanup
script:
- echo "Terminate droplets 5 days after creation. Branches that exist longer than 5 days will get a new droplet when CI runs again."
......
......@@ -11,3 +11,4 @@
* [ ] commit (signed)
* [ ] create signed tag (`git tag -s 0.2.0 -m 'Release 0.2.0'`)
* [ ] Push to MR, including tag
* [ ] Log into readthedocs.org and update documentation for tagged versions
# Changelog
## [Unreleased]
## [0.3.1] - 2020-03-18
No unreleased changes yet.
* Repair rocket chat installation
* Rocket Chat supports e2e encryption by default
* Upgrade Nextcloud chart version
* Single Sign-on related Jobs are now cleaned up after a successful installation
Known issues:
* Same as 0.3.0
## [0.3.0] - 2020-01-20
......
../../../../flux/oas/local-storage.yaml
\ No newline at end of file
---
- name: Install helm-operator
tags:
- flux
# Commented version of below shell command:
# helm upgrade
# # Install a new release if it doesn't yet exist.
# --install
# --repo "https://charts.fluxcd.io"
# --namespace oas
# --version 0.3.0
# --set createCRD=true
# # Reconcile actual helm releases with HelmRelease objects with this
# # interval.
# --set chartsSyncInterval=20m
# # Update HelmRelease objects' status with this interval.
# --set statusUpdateInterval=30s
# # Helm release name
# helm-operator
# # Chart name
# helm-operator
shell: helm upgrade --install --repo "https://charts.fluxcd.io" --namespace oas --version 0.3.0 --set createCRD=true --set chartsSyncInterval=20m --set statusUpdateInterval=30s helm-operator helm-operator
- name: Create Kubernetes secret with local-storage settings
tags:
- config
- flux
- local-storage
vars:
flux:
name: "local-storage"
namespace: "oas"
include_tasks:
file: flux_secret.yml
apply:
tags:
- config
- flux
- local-storage
- name: Create local-storage HelmResource
tags:
- config
- flux
- local-storage
k8s:
state: present
resource_definition: "{{ lookup('file', 'local-storage.yaml') | from_yaml }}"
- name: Install flux
tags:
......@@ -33,26 +80,17 @@
# --set syncGarbageCollection.enabled=true
# # Look for .flux.yaml files for manifest generation.
# --set manifestGeneration=true
# # Set the interval between checks for updates in the git repo to 1 hour.
# --set git.pollInterval=1h
# # Helm release name
# flux
# # Chart name
# flux
shell: helm upgrade --install --repo "https://charts.fluxcd.io" --namespace oas --version 0.16.0 --set git.url="{{ git_url }}" --set git.branch="{{ git_branch }}" --set git.path="{{ git_path }}" --set git.readonly=true --set registry.excludeImage='*' --set sync.state="secret" --set syncGarbageCollection.enabled=true --set manifestGeneration=true flux flux
shell: helm upgrade --install --repo "https://charts.fluxcd.io" --namespace oas --version 0.16.0 --set git.url="{{ git_url }}" --set git.branch="{{ git_branch }}" --set git.path="{{ git_path }}" --set git.readonly=true --set registry.excludeImage='*' --set sync.state="secret" --set syncGarbageCollection.enabled=true --set manifestGeneration=true --set git.pollInterval=1h flux flux
- name: Install helm-operator
- name: Install fluxctl via snap
tags:
- flux
# Commented version of below shell command:
# helm upgrade
# # Install a new release if it doesn't yet exist.
# --install
# --repo "https://charts.fluxcd.io"
# --namespace oas
# --version 0.3.0
# --set createCRD=true
# # Helm release name
# helm-operator
# # Chart name
# helm-operator
shell: helm upgrade --install --repo "https://charts.fluxcd.io" --namespace oas --version 0.3.0 --set createCRD=true helm-operator helm-operator
command: snap install --classic fluxctl
args:
creates: /snap/bin/fluxctl
---
- name: Create Kubernetes secret with local-storage settings
tags:
- config
- flux
- local-storage
vars:
flux:
name: "local-storage"
namespace: "oas"
include_tasks:
file: flux_secret.yml
apply:
tags:
- config
- flux
- local-storage
---
- name: Install flux
- name: Install helm operator, local-storage and flux
import_tasks: flux.yml
- name: Perform tasks necessary for local-storage
import_tasks: local-storage.yml
- name: Tasks pertaining to cert-manager
import_tasks: cert-manager.yml
......
......@@ -14,6 +14,8 @@ extraEnv: |
# create a different admin user, gets skipped.
- name: OVERWRITE_SETTING_Show_Setup_Wizard
value: completed
- name: E2E_Enable
value: "true"
livenessProbe:
initialDelaySeconds: 180
......
replicaCount: 1
consentProviderImage:
<< : &IMAGE_DEFAULTS_SSO { tag: "0.2.3", pullPolicy: "Always" }
<< : &IMAGE_DEFAULTS_SSO { tag: "master", pullPolicy: "Always" }
repository: "open.greenhost.net:4567/openappstack/single-sign-on/consent_provider"
loginProviderImage:
<< : *IMAGE_DEFAULTS_SSO
......@@ -12,7 +12,7 @@ singleSignOnHost: &SSO_HOST "sso.{{ domain }}"
userpanel:
applicationName: &USER_PANEL user-panel
image:
<< : &IMAGE_DEFAULTS_USER_PANEL { tag: "1.2.0", pullPolicy: "Always" }
<< : &IMAGE_DEFAULTS_USER_PANEL { tag: "master", pullPolicy: "Always" }
repository: "open.greenhost.net:4567/openappstack/user-panel/frontend"
ingress:
host: "admin.{{ domain }}"
......
image:
tag: 34-add-openid-connect-plugin-and-configuration
tag: master
initImage:
tag: 34-add-openid-connect-plugin-and-configuration
tag: master
wordpress:
config:
......@@ -12,8 +12,6 @@ wordpress:
pssw: "{{ wordpress_admin_password }}"
site:
alt_path: openappstack-login
# NOTE: Use a theme *slug* here
theme: twentynineteen
# NOTE: Make sure you use underscore and that the localisation is in full caps
locale: en_US
version: 5.3.2
......@@ -37,6 +35,8 @@ openid_connect_settings:
http_request_timeout: "15"
enable_logging: "1"
scope: email profile openid openappstack_roles offline_access
role_mapping_enabled: true
role_key: openappstack_roles
database:
db:
......
......@@ -106,3 +106,89 @@ have developed a [local storage
provisioner](https://open.greenhost.net/openappstack/local-storage) that
automatically provides persistent data on the VPS running OAS to an application
that requests it.
## Automatic updates
OpenAppStack has an auto-update mechanism that performs unattended upgrades to
applications. [Flux](https://fluxcd.io/) is the system running in the cluster
that is responsible for these updates.
Technically, flux is split up in two components: `flux` and `helm-operator`.
`flux` watches a git repository (or subdirectory thereof) for [source
files](https://docs.fluxcd.io/projects/helm-operator/en/latest/references/helmrelease-custom-resource.html)
that prescribe which application versions should be installed, and stores a copy
of those prescriptions inside the cluster as a Kubernetes manifest (of kind
`HelmRelease`).
`helm-operator` watches those in-cluster `HelmRelease` manifests, checks whether
the listed applications are already installed – including correct versions and
other settings – and performs any actions that are necessary to make sure that
the cluster state matches the prescriptions: installing new applications,
upgrading others.
Which git repository is watched by flux, is configurable. For typical
production OpenAppStack deployments, this is set to be
`https://open.greenhost.net/openappstack/openappstack` – the `HelmRelease` files
are stored in the `flux` directory. The OpenAppStack team considers available
upstream updates (say an update to Nextcloud). If the new Nextcloud version
passes our tests, the team changes the corresponding application description
file in the git repository (in this case `flux/nextcloud.yaml`) to reference the
new version. OpenAppStack deployments that are configured to watch this git
repository will see the change, record it in the in-cluster `HelmRelease`
manifest, and have their `helm-operator` perform the Nextcloud upgrade.
### Local development
When developing OpenAppStack, it's nice to be able to change the application
versions for your development cluster only, without affecting production
clusters. One way to do that is to set the `flux_source.repo` and/or
`flux_source.branch` ansible variables to point to another branch of the
`open.greenhost.net/openappstack/openappstack` repository, or to a different
repository.
To make this easier, we included a way to serve up a git repository with
`HelmRelease` manifests from the cluster itself, so you don't need an external
Gitlab or Github project. This feature is disabled by default, and can be
enabled by setting the `local_flux` ansible variable to `true`. If enabled, this
will change several things:
* when you run the OpenAppStack bootstrap ansible playbook from your
workstation, the current contents of the `flux` directory from your
workstation are copied to a directory (`/var/lib/OpenAppStack/local-flux`) on
the OpenAppStack host machine; also, a git commit is created in that directory
on the host machine from these updated contents;
* as part of the OpenAppStack installation, an nginx instance is deployed that
serves up the contents of `/var/lib/OpenAppStack/local-flux` over http;
* `flux` is configured to read the `HelmRelease` files from that git repository,
served by the in-cluster nginx. In particular, the `flux_source` variables are
ignored if `local_flux` is enabled.
The `local-flux` feature is also used by our CI pipeline, in order to be as
similar as possible to a production installation, in particular using `flux` and
`helm-operator` for the installation process, while still being completely
separate from the production applications versions as prescribed by the master
repository at `open.greenhost.net/openappstack/openappstack`.
#### Triggering an update
Both `flux` and `helm-operator` check at regular intervals (currently 1 hour
and 20 minutes, respectively) whether there is an upgrade to perform. If you
don't want to wait for that after making a change, you can trigger an update:
* to let `flux` re-read the `HelmRelease` files from the git repo (be it the
OpenAppStack master one, a `local-flux` one, or yet another one), log in to
the host machine and do
$ fluxctl --k8s-fwd-ns=oas sync
If there are any changes to `HelmRelease` manifests, `helm-operator` is
notified of that through the Kubernetes API and should act on them more or
less instantly (though the actual installation or upgrade could take quite a
while, depending on what needs to be done).
* If, for some reason, the `HelmRelease` manifests are still in sync between git
repository and cluster, but you'd still like to let `helm-operator` check
whether these `HelmRelease`s match what's actually installed, you can force
that by deleting the `helm-operator` pod:
$ kubectl delete pod -n oas -l app=helm-operator
A new `helm-operator` pod should be created automatically, and in our
experience will do a reconciliation run soon.
......@@ -57,9 +57,9 @@ guide][https://openappstack.net/contact.html).
## Getting the installation script
On your **provisioning machine**, clone the OpenAppStack git repository and
checkout the latest tagged version (currently `0.3.0`):
checkout the latest tagged version (currently `0.3.1`):
$ git clone -b 0.3.0 https://open.greenhost.net/openappstack/openappstack.git
$ git clone -b 0.3.1 https://open.greenhost.net/openappstack/openappstack.git
$ cd openappstack
> **NOTE:** Git will display a warning after you use the `git` command
......
......@@ -28,6 +28,9 @@ if the OAS will return valid certificates for the provided services.
The whitebox tests run on the OAS host and check i.e. if docker is installed
in the right version etc.
First, enter the `test` directory in the Git repository on your provisioning
machine.
To run the test against your cluster, first export the `CLUSTER_DIR` environment
variabel with the location of your cluster config directory:
......
......@@ -11,7 +11,7 @@ spec:
releaseName: nc
chart:
git: https://open.greenhost.net/openappstack/nextcloud
ref: 7a493d320b7a41b08ca78c1b785365239a23ed62
ref: b5f54e09a0fc29d3c041ecea0d9310c8283d4dae
path: .
valuesFrom:
- secretKeyRef:
......
......@@ -9,14 +9,9 @@ metadata:
spec:
releaseName: rocketchat
chart:
# repository: https://kubernetes-charts.storage.googleapis.com/
# name: rocketchat
# Wait for this version to be available:
# https://github.com/helm/charts/pull/19637
# version: 2.0.1
git: https://github.com/greenhost/charts
ref: 8b80ebdce2e3e384fd5e0bfc79d65c3b089e5b4f
path: stable/rocketchat
repository: https://kubernetes-charts.storage.googleapis.com/
name: rocketchat
version: 2.0.2
valuesFrom:
- secretKeyRef:
name: rocketchat-settings
......
......@@ -9,7 +9,7 @@ spec:
releaseName: wordpress
chart:
git: https://code.greenhost.net/open/wordpress-helm
ref: 858527ebcdc57401939491ca1a4d7bf7823818df
ref: f262679bbe3de3660e8b2d70f2f5b27955bd9472
path: .
valuesFrom:
- secretKeyRef:
......
......@@ -10,7 +10,7 @@ spec:
releaseName: single-sign-on
chart:
git: https://open.greenhost.net/openappstack/single-sign-on
ref: 0.2.3
ref: master
path: ./helmchart/single-sign-on/
valuesFrom:
- secretKeyRef:
......
ansible==2.9.2
ansible==2.9.6
attrs==19.3.0
bcrypt==3.1.7
behave==1.2.6
behave-webdriver==0.3.0
cachetools==4.0.0
certifi==2019.11.28
cffi==1.13.2
cffi==1.14.0
chardet==3.0.4
cryptography==2.8
dictdiffer==0.8.1
google-auth==1.10.0
google-auth==1.11.3
-e git+https://open.greenhost.net/greenhost/cloud-api@c174bc27f7f149786a14b40f782df399cde32f8e#egg=greenhost_cloud
idna==2.8
importlib-metadata==1.3.0
Jinja2==2.10.3
kubernetes==10.0.1
idna==2.9
importlib-metadata==1.5.0
Jinja2==2.11.1
kubernetes==10.1.0
MarkupSafe==1.1.1
more-itertools==8.0.2
more-itertools==8.2.0
-e git+https://open.greenhost.net/openappstack/oas_behave@72c7154545d8cc0084267734469c4dae5ef6b85a#egg=oas_behave
oauthlib==3.1.0
openshift==0.10.1
packaging==19.2
openshift==0.10.3
packaging==20.3
paramiko==2.7.1
parse==1.12.1
parse==1.15.0
parse-type==0.5.2
pluggy==0.13.1
psutil==5.6.7
py==1.8.0
psutil==5.7.0
py==1.8.1
pyasn1==0.4.8
pyasn1-modules==0.2.7
pycparser==2.19
pyasn1-modules==0.2.8
pycparser==2.20
PyNaCl==1.3.0
pyOpenSSL==19.1.0
pyparsing==2.4.5
pytest==5.3.2
pytest-rerunfailures==8.0
pyparsing==2.4.6
pytest==5.4.1
pytest-rerunfailures==9.0
python-dateutil==2.8.1
python-string-utils==0.6.0
python-string-utils==1.0.0
pytz==2019.3
PyYAML==5.2
requests==2.22.0
PyYAML==5.3
requests==2.23.0
requests-oauthlib==1.3.0
rsa==4.0
ruamel.yaml==0.16.5
ruamel.yaml==0.16.10
ruamel.yaml.clib==0.2.0
selenium==3.141.0
six==1.13.0
six==1.14.0
tabulate==0.8.6
testinfra==3.3.0
urllib3==1.25.7
wcwidth==0.1.7
websocket-client==0.56.0
zipp==0.6.0
testinfra==5.0.0
urllib3==1.25.8
wcwidth==0.1.8
websocket-client==0.57.0
zipp==3.1.0
"""
This module contains tests for:
- test_helmreleases: if all the helmreleases in EXPECTED_RELEASES are
deployed successfully
- test_apps_running: if all the applications are running without problems
(i.e., condition = Ready for all the related pods)
"""
import os
import pytest
from kubernetes import client, config
from kubernetes.client.rest import ApiException
import os
EXPECTED_RELEASES = {
'oas': [
'cert-manager',
'ingress',
'local-storage',
'monitoring',
'loki-stack',
'single-sign-on'
],
'oas-apps': ['nextcloud', 'rocketchat', 'wordpress']
}
EXPECTED_APP_LABELS = {
'nextcloud': {
'namespace': 'oas-apps',
'label_selector': 'app.kubernetes.io/instance=nc'},
'wordpress': {
'namespace': 'oas-apps',
'label_selector': 'release=wordpress'},
'rocketchat': {
'namespace': 'oas-apps',
'label_selector': 'app.kubernetes.io/instance=rocketchat'},
'grafana': {
'namespace': 'oas',
'label_selector': 'app=grafana'}
}
def get_release(name, namespace, api):
def get_release_status(name, namespace, api):
"""Returns release status for release `name` in `namespace`"""
print('Testing %s in namespace %s ...' % (name, namespace), end='')
try:
release = api.get_namespaced_custom_object(
......@@ -16,41 +52,78 @@ def get_release(name, namespace, api):
)
release_status = release['status']['releaseStatus']
print(release_status)
except ApiException as e:
if e.status == 404:
except ApiException as ex:
if ex.status == 404:
release_status = 'not found'
else:
raise
print("**** NOT DEPLOYED, status: %s *****" % release_status)
return(release_status)
return release_status
@pytest.mark.helmreleases
def test_helmreleases(host):
"""Checks if all desired HelmReleases installed by weave flux are in
DEPLOYED state.
def check_all_pods_running(pods):
"""
Loop through all the pods in an API result.
If a pod does not have an element `status.phase` with the value "Running",
return False. Otherwise, returns True.
:param kubernetes.V1Pod[] pods: list of V1Pod elements to check
"""
expected_releases = {
'oas': ['cert-manager', 'ingress', 'local-storage', 'monitoring'],
'oas-apps': ['nextcloud', 'rocketchat', 'wordpress']
}
ret = True
for pod in pods:
print("- {}: {}".format(pod.metadata.name, pod.status.phase))
if pod.status.phase != "Running":
ret = False
return ret
@pytest.fixture(autouse=True)
def run_around_tests():
"""
Prepare kube config before running a test
"""
cluster_dir = os.environ.get("CLUSTER_DIR")
kubeconfig = os.path.join(cluster_dir, 'secrets',
'kube_config_cluster.yml')
config.load_kube_config(config_file=kubeconfig)
customObjects = client.CustomObjectsApi()
yield
@pytest.mark.helmreleases
def test_helmreleases(host):
"""
Checks if all desired HelmReleases installed by weave flux are in
DEPLOYED state.
"""
custom_objects = client.CustomObjectsApi()
failed = 0
print('\n')
for namespace in expected_releases:
for app in expected_releases[namespace]:
app_status = get_release(app, namespace, customObjects)
for namespace in EXPECTED_RELEASES:
for app in EXPECTED_RELEASES[namespace]:
app_status = get_release_status(app, namespace, custom_objects)
if app_status != 'DEPLOYED':
failed += 1
assert failed == 0, "Error: %s apps NOT DEPLOYED !" % failed
assert failed == 0, "Error: {} apps not 'DEPLOYED'!".format(failed)
if __name__ == "__main__":
test_helmreleases()
@pytest.mark.apps_running
def test_apps_running(host):
"""
Checks if all the pods related to releases in EXPECTED_RELEASES are
ready
"""
api = client.CoreV1Api()
failed = 0
print('\n')
for app, info in EXPECTED_APP_LABELS.items():
print("{}: ".format(app))
result = api.list_namespaced_pod(
namespace=info['namespace'],
label_selector=info['label_selector'],
)
if not check_all_pods_running(result.items):
failed += 1
print()
assert failed == 0, "Error: {} apps not 'Running'!".format(failed)
import re
import json
import pytest
import re
@pytest.mark.prometheus
def test_prometheus_alerts(host):
"""Test prometheus for firing alerts."""
def summarize_alerts(alerts):
"""Print a alert summary."""
print('Total alerts: %s' % len(alerts))
print(json.dumps(alerts, indent=2))
print("Starting prometheus test...")
url = 'http://monitoring-prometheus-oper-prometheus.oas.svc.cluster.local:9090/api/v1/alerts'
alert_json = json.loads(host.check_output('curl ' + url))
status = alert_json["status"]
alerts = alert_json["data"]["alerts"]
real_alerts = []
ignored_alerts = []
for alert in alerts:
# Filter out the ever firing "Dead mans switch" test alert
if ignore_alert(alert):
ignored_alerts.append(alert)
else:
real_alerts.append(alert)
print('\n\n\n========= Ignored ==========')
summarize_alerts(ignored_alerts)
print('\n\n\n========= Firing ==========')
summarize_alerts(real_alerts)
count = len(real_alerts)
assert status == "success", "Failure queriying the prometheus api at" + url
assert count == 0, "Firing alerts: {0}".format(count)
def ignore_alert(alert):
......@@ -92,5 +53,44 @@ def ignore_alert(alert):
return False
@pytest.mark.prometheus
def test_prometheus_alerts(host):
"""Test prometheus for firing alerts."""
def summarize_alerts(alerts):
"""Print a alert summary."""
print('Total alerts: %s' % len(alerts))
print(json.dumps(alerts, indent=2))
print("Starting prometheus test...")
url = 'http://monitoring-prometheus-oper-prometheus.oas.svc.cluster.local:9090/api/v1/alerts'
alert_json = json.loads(host.check_output('curl ' + url))
status = alert_json["status"]
alerts = alert_json["data"]["alerts"]
real_alerts = []
ignored_alerts = []
for alert in alerts:
# Filter out the ever firing "Dead mans switch" test alert
if ignore_alert(alert):
ignored_alerts.append(alert)
else:
real_alerts.append(alert)
print('\n\n\n========= Ignored ==========')
summarize_alerts(ignored_alerts)
print('\n\n\n========= Firing ==========')
summarize_alerts(real_alerts)
count = len(real_alerts)
assert status == "success", "Failure queriying the prometheus api at" + url
assert count == 0, "Firing alerts: {0}".format(count)
if __name__ == "__main__":
test_prometheus_alerts('')