diff --git a/.gitignore b/.gitignore
index 8682767ec2a5ac50995b767f6e165df96f6cacb0..5594231483d534a96440d143bfbf769e651c3715 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,4 @@
 # Ignore custom files created when following the README.md
-!ansible/group_vars/all/settings.yml.example
 /ansible/group_vars/all/settings*
 !/ansible/inventory.yml.example
 /ansible/inventory*
@@ -11,17 +10,16 @@
 /env
 /venv
 /.direnv
+.envrc
 
 # Ignore files created during CI
 /test/group_vars/all/
 /test/inventory*
-/test/behave/behave.ini
-/test/behave/rerun_failing.features
 /clusters
 /install/installation-kustomization/*.txt
 
 # Ignore files created during tests
-/test/behave/**/screenshots/
+Screenshot-*.png
 
 # Etc
 __pycache__
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index b5e073ef85336db80523180ea19702e3722dadd7..039d31bb7ce414a6f8de273a012a849f9443b9b2 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -53,6 +53,8 @@ include:
         - Dockerfile
         - ansible/**/*
         - flux/**/*
+        - flux2/**/*
+        - install/**/*
         - test/**/*
         - openappstack/**/*
         - requirements.txt
@@ -92,12 +94,12 @@ include:
         - flux2/apps/$APP/*.yaml
         - flux2/cluster/optional/$APP/*.yaml
         - install/install-${APP}.sh
-        - test/behave/features/$APP.feature
+        - test/taiko/*
     - if: '$TRIGGER_JOBS =~ /enable-nextcloud/'
     - if: '$CI_COMMIT_MESSAGE =~ /TRIGGER_JOBS=.*enable-nextcloud/'
     - if: '$CI_COMMIT_BRANCH == "master"'
 
-.prometheus_stack_rules:
+.kube_prometheus_stack_rules:
   extends:
     - .general_rules
 
@@ -115,7 +117,7 @@ include:
         - flux2/apps/$APP/*.yaml
         - flux2/cluster/optional/$APP/*.yaml
         - install/install-${APP}.sh
-        - test/behave/features/$APP.feature
+        - test/taiko/*
     - if: '$TRIGGER_JOBS =~ /enable-rocketchat/'
     - if: '$CI_COMMIT_MESSAGE =~ /TRIGGER_JOBS=.*enable-rocketchat/'
     - if: '$CI_COMMIT_BRANCH == "master"'
@@ -125,7 +127,7 @@ include:
     - changes:
         - flux2/core/base/$APP/*.yaml
         - install/install-openappstack.sh
-        - test/behave/features/$APP.feature
+        - test/taiko/*
     - if: '$TRIGGER_JOBS =~ /enable-single-sign-on/'
     - if: '$CI_COMMIT_MESSAGE =~ /TRIGGER_JOBS=.*enable-single-sign-on/'
     - if: '$CI_COMMIT_BRANCH == "master"'
@@ -135,8 +137,9 @@ include:
     - changes:
         - flux2/apps/$APP/*.yaml
         - flux2/cluster/optional/$APP/*.yaml
+        - flux2/infrastructure/sources/wordpress.yaml
         - install/install-${APP}.sh
-        - test/behave/features/$APP.feature
+        - test/taiko/*
     - if: '$TRIGGER_JOBS =~ /enable-wordpress/'
     - if: '$CI_COMMIT_MESSAGE =~ /TRIGGER_JOBS=.*enable-wordpress/'
     - if: '$CI_COMMIT_BRANCH == "master"'
@@ -261,7 +264,7 @@ create-vps:
     DOMAIN: "openappstack.net"
   script:
     - *debug_information
-    # Creates a VPS based on a custom CI image for which --install-kubernetes
+    # Creates a VPS based on a custom CI image for which the ansible playbook
     # has already run. See CONTRIBUTING.md#ci-pipeline-image for more info
     - bash .gitlab/ci_scripts/create_vps.sh
     # Make sure .ci.env variables are not lost
@@ -302,10 +305,8 @@ setup-openappstack:
     # Copy inventory files to ansible folder for use in install-apps step
     - chmod 700 ansible
     - cp ${CLUSTER_DIR}/inventory.yml ansible/
-    - cp ${CLUSTER_DIR}/group_vars/all/settings.yml ansible/group_vars/all/
     # Set up cluster
-    # TODO: I set --no-install-openappstack to skip the old installation procedure, should be removed eventually
-    - python3 -m openappstack $HOSTNAME install --install-kubernetes --no-install-openappstack
+    - python3 -m openappstack $HOSTNAME install
     # Customize env file, remove all comments and empty lines
     - sed "s/1.2.3.4/$IP_ADDRESS/; s/example.org/$FQDN/; s/acme_staging=false/acme_staging=true/; s/acme-v02/acme-staging-v02/; /^\s*#.*$/d; /^\s*$/d" install/.flux.env.example >> ${CLUSTER_DIR}/.flux.env
     # Deploy secret/oas-cluster-variables
@@ -335,10 +336,11 @@ setup-openappstack:
   script:
     - *debug_information
     - cd ansible/
-    - export KUBECONFIG="${PWD}/../clusters/${HOSTNAME}/secrets/kube_config_cluster.yml"
+    - export KUBECONFIG="${PWD}/../clusters/${HOSTNAME}/kube_config_cluster.yml"
     - pytest -v -s -m 'kustomizations' --app="$APP" --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*' --reruns 120 --reruns-delay 20
   extends:
     - .ssh_setup
+    - .general_rules
   interruptible: true
 
 base-kustomizations-ready:
@@ -351,7 +353,7 @@ base-kustomizations-ready:
   script:
     - *debug_information
     - cd ansible/
-    - export KUBECONFIG="${PWD}/../clusters/${HOSTNAME}/secrets/kube_config_cluster.yml"
+    - export KUBECONFIG="${PWD}/../clusters/${HOSTNAME}/kube_config_cluster.yml"
     - pytest -v -s -m 'helmreleases' --app="$APP" --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*' --reruns 120 --reruns-delay 20
   extends:
     - .ssh_setup
@@ -370,6 +372,7 @@ base-kustomizations-ready:
     - job: test-dns
   extends:
     - .helm-release
+    - .general_rules
 
 cert-manager-helm-release:
   variables:
@@ -411,7 +414,7 @@ kube-prometheus-stack-helm-release:
     APP: "kube-prometheus-stack"
   extends:
     - .base-helm-release
-    - .prometheus_stack_rules
+    - .kube_prometheus_stack_rules
 
 single-sign-on-helm-release:
   variables:
@@ -430,7 +433,7 @@ single-sign-on-helm-release:
   script:
     - *debug_information
     # Add optional override values we need for the CI pipeline only
-    - '[ -f ./install/ci-overrides/${APP}-values-override.yaml ] && kubectl apply -n oas-apps -f ./install/ci-overrides/${APP}-values-override.yaml'
+    - '[ -f ./install/ci-overrides/oas-${APP}-override.yaml ] && kubectl apply -n oas-apps -f ./install/ci-overrides/oas-${APP}-override.yaml'
     - bash ./install/install-${APP}.sh
   extends:
     - .ssh_setup
@@ -515,7 +518,7 @@ wordpress-helm-release:
   script:
     - *debug_information
     - cd ansible/
-    - export KUBECONFIG="${PWD}/../clusters/${HOSTNAME}/secrets/kube_config_cluster.yml"
+    - export KUBECONFIG="${PWD}/../clusters/${HOSTNAME}/kube_config_cluster.yml"
     - pytest -v -s -m 'apps_running' --app="$APP" --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*' --reruns 120 --reruns-delay 10
   extends:
     - .ssh_setup
@@ -589,7 +592,7 @@ kube-prometheus-stack-ready:
     - job: setup-openappstack
   extends:
     - .apps-ready
-    - .prometheus_stack_rules
+    - .kube_prometheus_stack_rules
 
 rocketchat-ready:
   variables:
@@ -654,7 +657,7 @@ kube-prometheus-stack-cert:
     - job: setup-openappstack
   extends:
     - .apps-cert
-    - .prometheus_stack_rules
+    - .kube_prometheus_stack_rules
 
 rocketchat-cert:
   variables:
@@ -706,7 +709,7 @@ testinfra:
 kube-prometheus-stack-alerts:
   stage: health-test
   variables:
-    # APP var is used in job specific rules (i.e. .prometheus_stack_rules)
+    # APP var is used in job specific rules (i.e. .kube_prometheus_stack_rules)
     APP: "kube-prometheus-stack"
   allow_failure: true
   script:
@@ -715,7 +718,7 @@ kube-prometheus-stack-alerts:
     - pytest -s -m 'prometheus' --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*'
   extends:
     - .ssh_setup
-    - .prometheus_stack_rules
+    - .kube_prometheus_stack_rules
   needs:
     - job: kube-prometheus-stack-ready
     - job: setup-openappstack
@@ -725,63 +728,62 @@ kube-prometheus-stack-alerts:
 # Stage: integration-test
 # =======================
 #
-# Runs integration tests for most apps using behave
+# Runs integration tests for most apps using taiko
 
-.behave:
+.taiko:
   stage: integration-test
   script:
     - *debug_information
-    # Run the behave tests for specific app
-    - python3 -m openappstack $HOSTNAME test --behave-headless --behave-ignore-certificate-errors --behave-tags $APP || python3 -m openappstack $HOSTNAME test --behave-headless --behave-ignore-certificate-errors --behave-tags $APP --behave-rerun-failing
+    # Run the taiko tests for specific app
+    - python3 -m openappstack $HOSTNAME test --apps $APP
   retry: 2
   artifacts:
     paths:
-      - test/behave/screenshots/
+      - test/taiko/Screenshot*
     expire_in: 1 month
     when: on_failure
   extends:
     - .ssh_setup
   interruptible: true
 
-kube-prometheus-stack-behave:
+grafana-taiko:
   variables:
-    APP: "kube-prometheus-stack"
+    APP: "grafana"
   needs:
     - job: kube-prometheus-stack-cert
     - job: setup-openappstack
   extends:
-    - .behave
-    - .prometheus_stack_rules
+    - .taiko
+    - .kube_prometheus_stack_rules
 
-nextcloud-behave:
+nextcloud-taiko:
   variables:
     APP: "nextcloud"
   needs:
     - job: nextcloud-cert
     - job: setup-openappstack
   extends:
-    - .behave
+    - .taiko
     - .nextcloud_rules
 
-rocketchat-behave:
+rocketchat-taiko:
   variables:
     APP: "rocketchat"
   needs:
     - job: rocketchat-cert
     - job: setup-openappstack
   extends:
-    - .behave
+    - .taiko
     - .rocketchat_rules
 
-
-wordpress-behave:
+wordpress-taiko:
   variables:
     APP: "wordpress"
   needs:
     - job: wordpress-cert
     - job: setup-openappstack
   extends:
-    - .behave
+    - .taiko
     - .wordpress_rules
 
 
diff --git a/.gitlab/ci_scripts/create_vps.sh b/.gitlab/ci_scripts/create_vps.sh
index 0e8972148008b1bca0ccb1660189323714320935..0a3210fa06a628a92b9cdda0f9e3ee6c74c7130c 100644
--- a/.gitlab/ci_scripts/create_vps.sh
+++ b/.gitlab/ci_scripts/create_vps.sh
@@ -7,15 +7,17 @@ set -ve
 echo "Deleting old machine"
 python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^${HOSTNAME}$\")"
 echo "Creating new machine"
-# Uses a custom disk image. See CONTRIBUTING.md#ci-pipeline-image for more info.
+# Uses a custom disk image built with 386e3614 on 2021-07-08. See
+# CONTRIBUTING.md#ci-pipeline-image for more info.
 python3 -m openappstack $HOSTNAME create \
-  --acme-staging \
-  --local-flux \
-  --prometheus-enable-ingress \
   --create-droplet $DOMAIN \
   --create-hostname $HOSTNAME \
   --ssh-key-id $SSH_KEY_ID \
   --create-domain-records \
   --subdomain $SUBDOMAIN \
-  --disk-image-id '-7473' \
-  --truncate-subdomain
+  --disk-image-id '-8115' \
+  --truncate-subdomain \
+  --docker-mirror-server="${CI_DEPENDENCY_PROXY_SERVER}" \
+  --docker-mirror-endpoint="${CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX}" \
+  --docker-mirror-username="${CI_RUNNER_PERSONAL_ACCESS_USERNAME}" \
+  --docker-mirror-password="${CI_RUNNER_PERSONAL_ACCESS_TOKEN}"
diff --git a/.gitlab/ci_templates/kaniko.yml b/.gitlab/ci_templates/kaniko.yml
index 93dfd3654795e98931f1a516db2babfa15291ee6..fd5b379183ee25490ad42a28142da615376f7c8a 100644
--- a/.gitlab/ci_templates/kaniko.yml
+++ b/.gitlab/ci_templates/kaniko.yml
@@ -17,7 +17,7 @@
   image:
     # We need a shell to provide the registry credentials, so we need to use the
     # kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image)
-    name: gcr.io/kaniko-project/executor:debug
+    name: gcr.io/kaniko-project/executor:v1.6.0-debug
     entrypoint: [""]
   script:
     - mkdir -p /kaniko/.docker/
diff --git a/.gitlab/issue_templates/new_app.md b/.gitlab/issue_templates/new_app.md
index 3bdfdbfcd352b1646bbdf83bf361939de2292886..f9bfe0bd6739ca9ea1f2d3e0221b59af0b67241f 100644
--- a/.gitlab/issue_templates/new_app.md
+++ b/.gitlab/issue_templates/new_app.md
@@ -17,7 +17,7 @@
 
 ## Tests
 
-* [ ] Add behave feature (`tests/behave/feature`)
+* [ ] Add taiko test (`tests/taiko`)
 * [ ] Check for successful helmrelease (`test/pytest/test_helmreleases.py`)
 * [ ] Test cert (`test/pytest/test_certs.py`)
 
diff --git a/.gitlab/issue_templates/release.md b/.gitlab/issue_templates/release.md
index cb7a227ab9eef6e76bcccd05587cf58bc19e7c55..57805a24a13fee4b1fdec9ba7e33a94af17b79ce 100644
--- a/.gitlab/issue_templates/release.md
+++ b/.gitlab/issue_templates/release.md
@@ -27,8 +27,6 @@ Create a MR for the new branch with the following changes:
 * [ ] Ensure all dependencies are updated and locked (locking should be done
     only on the release branch):
   * [ ] flux chart versions: `find flux/* -type f -exec yq -y -rS .spec.chart {} \;`
-  * [ ] in `ansible/group_vars/all/settings.yml.example`:
-        `flux_source.branch` should be set to the release branch
 * [ ] Push and make sure the pipeline succeeds for the last commit before tagging. This
       is important because tags should not get retagged !
 * [ ] Create and push signed tag (`git tag -s 0.2.0 -m 'Release 0.2.0'; git push --tags`)
diff --git a/.gitlab/issue_templates/update_all_components.md b/.gitlab/issue_templates/update_all_components.md
index 430f376561ab3a685567112d32487d88f558e25a..d7628fb8e7762cde98cba41b8b7db3a64e108af0 100644
--- a/.gitlab/issue_templates/update_all_components.md
+++ b/.gitlab/issue_templates/update_all_components.md
@@ -1,8 +1,11 @@
 To update all applications, check the following files:
 
-* [ ] Docker image in `Dockerfile`
-* [ ] requirements.txt
-* [ ] mitogen in `ansible/plugins`
+* [ ] Docker base image in `Dockerfile` (`FROM`)
+* [ ] All app versions in `Dockerfile`, especially:
+  * [ ] flux (Make sure the version is the same as in `ansible/group_vars/all/oas.yml`, `flux.version`)
+* [ ] `requirements.in`
+  * [ ] Run `pip install -r requirements-dev.txt && pip-compile`
+* [ ] mitogen version in `ansible/plugins`
 
 In `group_vars/all/oas.yml`:
 
@@ -13,19 +16,6 @@ In `group_vars/all/oas.yml`:
 
 Installed by flux:
 
-* [ ] ./kube-system/metallb.yaml
-* [ ] ./kube-system/local-path-provisioner.yaml
-* [ ] ./oas/grafana.yaml
-* [ ] ./oas/letsencrypt-production.yaml
-* [ ] ./oas/single-sign-on.yaml
-* [ ] ./oas/prometheus.yaml
-* [ ] ./oas/letsencrypt-staging.yaml
-* [ ] ./oas/ingress.yaml
-* [ ] ./oas/loki.yaml
-* [ ] ./oas/promtail.yaml
-* [ ] ./velero/velero.yaml
-* [ ] ./oas-custom/flux-custom.yaml
-* [ ] ./cert-manager/cert-manager.yaml
-* [ ] ./oas-apps/nextcloud.yaml (make sure to also update our nextcloud helm chart)
-* [ ] ./oas-apps/wordpress.yaml (make sure that our wordpress chart installs the newest version)
-* [ ] ./oas-apps/rocketchat.yaml
+* [ ] Run `find flux2 -name '*release*.yaml'` and paste the resulting list here
+* [ ] make sure to also update our [nextcloud helm chart](https://open.greenhost.net/openappstack/nextcloud)
+* [ ] make sure that our wordpress chart installs the newest version
diff --git a/.sourceignore b/.sourceignore
new file mode 100644
index 0000000000000000000000000000000000000000..f39d0b8ce1b8e3144757b2ad3c5cd4b932c04fb5
--- /dev/null
+++ b/.sourceignore
@@ -0,0 +1,9 @@
+# Flux ignore
+# https://toolkit.fluxcd.io/components/source/gitrepositories/#excluding-files
+
+# Exclude all
+/*
+
+# Include manifest and chart directories
+!/flux2/
+!/charts/
diff --git a/Dockerfile b/Dockerfile
index 56a5130d85b3211677c47d3c29b5c5e3ef4f9708..81bbd423db99f2852de9a5f866cc8284cf82c9e5 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,12 +1,13 @@
-FROM alpine:3.13.5
+FROM alpine:3.14
 
 LABEL name="OpenAppStack management"
-LABEL version="4.3"
+LABEL version="4.4"
 LABEL vendor1="Greenhost"
 
+ENV TAIKO_SKIP_CHROMIUM_DOWNLOAD=true
+ENV TAIKO_BROWSER_PATH=/usr/bin/chromium-browser
+ENV TAIKO_BROWSER_ARGS=--no-sandbox,--start-maximized,--disable-dev-shm-usage,--ignore-certificate-errors
 
-# Download yq v4 from github until it's packaged in alpine > 3.13
-ADD https://github.com/mikefarah/yq/releases/download/v4.7.0/yq_linux_amd64 /usr/local/bin/yq
 ADD https://github.com/fluxcd/flux2/releases/download/v0.14.2/flux_0.14.2_linux_amd64.tar.gz /tmp/
 # Download kubectl until it's packaged in alpine > 3.14
 ADD https://dl.k8s.io/release/v1.21.0/bin/linux/amd64/kubectl /usr/local/bin/
@@ -14,26 +15,28 @@ COPY ./test/pytest/le-staging-bundle.pem /usr/local/share/ca-certificates/le-sta
 COPY ./requirements.txt /requirements.txt
 RUN \
   apk --no-cache add \
-    bash=5.1.0-r0 \
-    cargo=~1.47.0-r2 \
-    chromium=~86.0.4240.111-r0 \
-    chromium-chromedriver=~86.0.4240.111-r0 \
+    bash=~5.1.4-r0 \
+    cargo=~1.52.0-r0 \
+    chromium=~91.0.4472.101-r0 \
     curl=~7.77.0-r1 \
     # needed for installing pycurl python module
     curl-dev=~7.77.0-r1 \
-    gcc=~10.2.1_pre1-r3 \
-    git=~2.30.2-r0 \
+    gcc=~10.3.1_git20210424-r2 \
+    git=~2.32.0-r0 \
     libffi-dev=3.3-r2 \
     make=~4.3-r0 \
-    musl-dev=~1.2.2-r1 \
-    openssh-client=~8.4_p1-r3 \
-    py3-pip=~20.3.4-r0 \
-    py3-wheel=~0.36.2-r0 \
-    python3-dev=~3.8.10-r0 \
-    rsync=~3.2.3-r1 \
-    rust=~1.47.0-r2 && \
+    musl-dev=~1.2.2-r3 \
+    npm=~7.17.0-r0 \
+    openssh-client=~8.6_p1-r2 \
+    py3-pip=~20.3.4-r1 \
+    python3-dev=~3.9.5-r1 \
+    rsync=~3.2.3-r2 \
+    yq=4.6.3-r1 && \
+  rm -rf /var/cache/* && \
+  mkdir /var/cache/apk && \
   chmod a+x /usr/local/bin/* && \
   update-ca-certificates && \
   pip install --no-cache-dir -r /requirements.txt && \
   ln -s /usr/bin/python3 /usr/bin/python && \
-  tar -xzf /tmp/flux*.tar.gz && mv ./flux /usr/local/bin
+  tar -xzf /tmp/flux*.tar.gz && mv ./flux /usr/local/bin && \
+  npm install -g taiko@1.2.5
diff --git a/ansible/group_vars/all/oas.yml b/ansible/group_vars/all/oas.yml
index 758f08f77772a275f9839d86f9c959135e8fad3c..23b8dd7f6fd4aa9230d208390bfdab4aef118e6e 100644
--- a/ansible/group_vars/all/oas.yml
+++ b/ansible/group_vars/all/oas.yml
@@ -1,59 +1,17 @@
 ---
 # Directory to store generated configuration and cluster state.
 data_directory: "/var/lib/OpenAppStack"
+ip_address: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
 
 # Use python3 on cluster nodes for ansible
 ansible_python_interpreter: "/usr/bin/env python3"
 
-# Application passwords. If you do not change the default values, they get
-# generated and stored in the `clusters/CLUSTERNAME/secrets/` directory.
-# You can also choose your own passwords and fill it in here instead.
-
-# Nextcloud administrator password
-nextcloud_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/nextcloud_admin_password chars=ascii_letters') }}"
-nextcloud_mariadb_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/nextcloud_mariadb_password chars=ascii_letters') }}"
-nextcloud_mariadb_root_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/nextcloud_mariadb_root_password chars=ascii_letters') }}"
-onlyoffice_jwt_secret: "{{ lookup('password', '{{ cluster_dir }}/secrets/onlyoffice_jwt_secret chars=ascii_letters') }}"
-onlyoffice_postgresql_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/onlyoffice_postgresql_password chars=ascii_letters') }}"
-onlyoffice_rabbitmq_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/onlyoffice_rabbitmq_password chars=ascii_letters') }}"
-
-# Rocketchat credentials
-rocketchat_mongodb_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/rocketchat_mongodb_password chars=ascii_letters') }}"
-rocketchat_mongodb_root_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/rocketchat_mongodb_root_password chars=ascii_letters') }}"
-rocketchat_admin_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/rocketchat_admin_password chars=ascii_letters') }}"
-
-# WordPress credentials
-wordpress_admin_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/wordpress_admin_password chars=ascii_letters') }}"
-wordpress_mariadb_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/wordpress_mariadb_password chars=ascii_letters') }}"
-wordpress_mariadb_root_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/wordpress_mariadb_root_password chars=ascii_letters') }}"
-
-# Grafana credentials
-grafana_admin_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/grafana_admin_password chars=ascii_letters') }}"
-
-# Credetnials used to protect the prometheus server. Username is "admin"
-prometheus_basic_auth: "{{ lookup('password', '{{ cluster_dir }}/secrets/prometheus_basic_auth chars=ascii_letters') }}"
-# Credetnials used to protect the alertmanager server. Username is "admin"
-alertmanager_basic_auth: "{{ lookup('password', '{{ cluster_dir }}/secrets/alertmanager_basic_auth chars=ascii_letters') }}"
-
-# Single sign-on passwords
-userpanel_oauth_client_secret: "{{ lookup('password', '{{ cluster_dir }}/secrets/userpanel_oauth_client_secret chars=ascii_letters') }}"
-nextcloud_oauth_client_secret: "{{ lookup('password', '{{ cluster_dir }}/secrets/nextcloud_oauth_client_secret chars=ascii_letters') }}"
-grafana_oauth_client_secret: "{{ lookup('password', '{{ cluster_dir }}/secrets/grafana_oauth_client_secret chars=ascii_letters') }}"
-wordpress_oauth_client_secret: "{{ lookup('password', '{{ cluster_dir }}/secrets/wordpress_oauth_client_secret chars=ascii_letters') }}"
-rocketchat_oauth_client_secret: "{{ lookup('password', '{{ cluster_dir }}/secrets/rocketchat_oauth_client_secret chars=ascii_letters') }}"
-userbackend_postgres_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/userbackend_postgres_password chars=ascii_letters') }}"
-userbackend_admin_username: "admin"
-userbackend_admin_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/userbackend_admin_password chars=ascii_letters') }}"
-userbackend_admin_email: "{{ admin_email }}"
-hydra_system_secret: "{{ lookup('password', '{{ cluster_dir }}/secrets/hydra_system_secret chars=ascii_letters') }}"
-
 # Application versions
-
 flux:
   version: 0.14.2
 
 k3s:
-  version: 'v1.20.4+k3s1'
+  version: 'v1.21.2+k3s1'
   # args to start the k3s server with
   # https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/
   # kubelet arguments can be passed with `--kubelet-arg`
diff --git a/ansible/group_vars/all/settings.yml.example b/ansible/group_vars/all/settings.yml.example
deleted file mode 100644
index c99ace1707a940e87fe8526f38502413ef4122b1..0000000000000000000000000000000000000000
--- a/ansible/group_vars/all/settings.yml.example
+++ /dev/null
@@ -1,103 +0,0 @@
-# External ip address of the cluster.
-ip_address: "203.0.113.6"
-# Main domain name of the cluster services.
-domain: "example.com"
-# Email address of someone administering the cluster.
-admin_email: "admin@example.com"
-# If this is `true` TLS certificates will be requested at the Let's Encrypt
-# staging server. If this is `false`, you use Let's Encrypt's production server.
-# Note that LE's production server has stricter rate limits, so set this to
-# `true` when you are testing something.
-# Important: Don't quote this variable !
-acme_staging: false
-
-outgoing_mail:
-  enabled: false
-  fromAddress: ""
-  smtp:
-    user: ""
-    password: ""
-    host: ""
-    ssl: false
-    port: ""
-    # Authentication type can be one of "PLAIN, NONE, LOGIN"
-    authtype: "LOGIN"
-
-backup:
-  s3:
-    # Disabled by default. To enable, change to `true` and configure the
-    # settings below. You'll also want to add "velero" to the enabled
-    # applications a bit further in this file.
-    # Finally, you'll also need to provide access credentials as
-    # secrets; see the documentation:
-    # https://docs.openappstack.net/en/latest/installation_instructions.html#step-2-optional-cluster-backups-using-velero
-    enabled: false
-    # URL of S3 service. Please use the principal domain name here, without the
-    # bucket name.
-    url: "https://store.greenhost.net"
-    # Region of S3 service that's used for backups.
-    # For some on-premise providers this may be irrelevant, but the S3
-    # apparently requires it at some point.
-    region: "ceph"
-    # Name of the S3 bucket that backups will be stored in.
-    # This has to exist already: Velero will not create it for you.
-    bucket: "openappstack-backup"
-    # Prefix that's added to backup filenames.
-    prefix: "test-instance"
-
-# Additional custom flux installation, needs to be enabled under `enabled_applications` below.
-# see https://docs.openappstack.net/en/latest/customization.html for details
-# and https://github.com/fluxcd/flux/tree/master/chart/flux#configuration for
-# possibe values.
-#
-# i.e.:
-#
-# flux-custom_extra_values:
-#   git:
-#     repo: "https://github.com/me/my_flux_config"
-#     branch: "master"
-#   …
-
-# A whitelist of applications that will be enabled.
-enabled_applications:
-  # System components, necessary for the system to function.
-  - 'cert-manager'
-  - 'letsencrypt-production'
-  - 'letsencrypt-staging'
-  - 'metallb'
-  - 'ingress'
-  - 'local-path-provisioner'
-  # - 'single-sign-on'
-  #
-  # Monitoring components (auto-enabled by GitLab CI)
-  # - 'prometheus-stack'
-  # - 'loki'
-  # - 'promtail'
-  # - 'eventrouter'
-  #
-  # The backup system Velero is disabled by default, see settings under `backup` above.
-  # - 'velero'
-  #
-  # Additional custom flux installation, disabled by default.
-  # see https://docs.openappstack.net/en/latest/customization.html for details
-  # - 'flux-custom'
-  #
-  # Applications
-  # - 'nextcloud'
-  # - 'rocketchat'
-  # - 'wordpress'
-
-
-# Use `APPNAME_extra_values` to provide additional, custom
-# helm chart values. Look for the correct APPNAME in above
-# `enabled_applications` lists.  We use Ansible's `combine` filter
-# (https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters.html#combining-hashes-dictionaries)
-# to combine the values with the default values in
-# `ansible/roles/apps/templates/settings/APPNAME.yaml`.
-# Values entered here take precedence over our defaults.
-#
-# I.e.:
-#
-#   prometheus_extra_values:
-#     extraScrapeConfigs:
-#       …
diff --git a/ansible/install-kubernetes.yml b/ansible/install-kubernetes.yml
index 462b69929141538f62b4b3e577f616cf456a16d0..83a15a0a5c16aba4d53562cc157e2d6c47c7eb29 100644
--- a/ansible/install-kubernetes.yml
+++ b/ansible/install-kubernetes.yml
@@ -9,12 +9,6 @@
 
 - name: Pre-configure hosts
   hosts: all
-  # We use `mitogen_linear` as default strategy. However,
-  # mitogen fails installing the `openshift` python module as requirement
-  # for the `k8s` ansible resource, and using `k8s` in the same context.
-  # That's why we use the standard `linear` ansible strategy for this role.
-  # See https://open.greenhost.net/openappstack/openappstack/issues/102
-  strategy: linear
   roles:
     - role: pre-configure
       tags: ['pre-configure']
diff --git a/ansible/install-openappstack.yml b/ansible/install-openappstack.yml
deleted file mode 100644
index 5589966d9530e39181fa64c9b72bc00e7a882fa0..0000000000000000000000000000000000000000
--- a/ansible/install-openappstack.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-
-- name: Run compatibility checks
-  hosts: all
-  gather_facts: false
-  pre_tasks:
-    - import_role:
-        name: compatibility-checks
-    - import_role:
-        name: kubernetes-checks
-
-- name: Install OpenAppStack
-  hosts: master
-  tasks:
-    - import_role:
-        name: apps
-      tags: ['apps']
diff --git a/ansible/roles/apps/files/ingress_hr.yaml b/ansible/roles/apps/files/ingress_hr.yaml
deleted file mode 120000
index fa3810d6c3f82f8ee433c91738e4c8848abd7894..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/files/ingress_hr.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../../../../flux/oas/ingress_hr.yaml
\ No newline at end of file
diff --git a/ansible/roles/apps/files/local-path-provisioner_hr.yaml b/ansible/roles/apps/files/local-path-provisioner_hr.yaml
deleted file mode 120000
index dc817a880ae5655be3058e42e0d0df9c539b0bda..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/files/local-path-provisioner_hr.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../../../../flux/kube-system/local-path-provisioner_hr.yaml
\ No newline at end of file
diff --git a/ansible/roles/apps/tasks/cert-manager.yml b/ansible/roles/apps/tasks/cert-manager.yml
deleted file mode 100644
index e21a17d29b27a11596541117c5fa581889ee6997..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/cert-manager.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-
-- name: Create Kubernetes secret with cert-manager settings
-  tags:
-    - config
-    - flux
-    - cert-manager
-  vars:
-    flux_secret:
-      name: "cert-manager"
-      namespace: "cert-manager"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - cert-manager
diff --git a/ansible/roles/apps/tasks/core.yml b/ansible/roles/apps/tasks/core.yml
deleted file mode 100644
index a9153f6b0530a27fde70d04789d04cf8bf67f7fc..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/core.yml
+++ /dev/null
@@ -1,178 +0,0 @@
----
-- name: Create OAS namespaces
-  tags:
-    - kubernetes
-    - namespace
-    - flux
-  k8s:
-    name: '{{ item }}'
-    api_version: v1
-    kind: Namespace
-    state: present
-  with_items:
-    - 'oas'
-    - 'oas-apps'
-    - 'cert-manager'
-    - 'velero'
-    - 'oas-custom'
-
-- name: Install helm-operator
-  tags:
-    - flux
-  vars:
-    extra_opts: "{{ helm_operator.extra_opts | default('') }}"
-  # Commented version of below shell command:
-  # helm upgrade
-  #   # Install a new release if it doesn't yet exist.
-  #   --install
-  #   --repo "https://charts.fluxcd.io"
-  #   --namespace oas
-  #   --version 1.0.1
-  #   --set helm.versions=v3
-  #   # Helm 3 doesn't have the stable repository enabled by default.
-  #   --set configureRepositories.enable=true
-  #   --set configureRepositories.repositories[0].name=stable
-  #   --set configureRepositories.repositories[0].url=https://kubernetes-charts.storage.googleapis.com
-  #   --set configureRepositories.repositories[1].name=bitnami
-  #   --set configureRepositories.repositories[1].url=https://charts.bitnami.com/bitnami
-  #   # Reconcile actual helm releases with HelmRelease objects with this
-  #   # interval.
-  #   --set chartsSyncInterval=20m
-  #   # Update HelmRelease objects' status with this interval.
-  #   --set statusUpdateInterval=30s
-  #   # Set resource limits so helm-operator can't take over the whole machine
-  #   --set resources.limits.cpu=1200m
-  #   --set resources.limits.memory=2Gi
-  #   # Delay the liveness and readiness probe a bit to prevent restarts
-  #   --set livenessProbe.initialDelaySeconds=10
-  #   --set readinessProbe.initialDelaySeconds=10
-  #   # Helm release name
-  #   helm-operator
-  #   # Chart name
-  #   helm-operator
-  shell: |
-    helm upgrade \
-    --install \
-    --repo "https://charts.fluxcd.io" \
-    --namespace oas \
-    --version {{ helm_operator.version }} \
-    --set helm.versions=v3 \
-    --set configureRepositories.enable=true \
-    --set configureRepositories.repositories[0].name=stable \
-    --set configureRepositories.repositories[0].url=https://charts.helm.sh/stable \
-    --set configureRepositories.repositories[1].name=bitnami \
-    --set configureRepositories.repositories[1].url=https://charts.bitnami.com/bitnami \
-    --set configureRepositories.repositories[2].name=nextcloud \
-    --set configureRepositories.repositories[2].url=https://nextcloud.github.io/helm \
-    --set chartsSyncInterval=20m \
-    --set statusUpdateInterval=30s \
-    --set resources.requests.cpu=500m \
-    --set resources.requests.memory=2Gi \
-    --set resources.limits.cpu=1200m \
-    --set resources.limits.memory=2Gi \
-    --set livenessProbe.initialDelaySeconds=10 \
-    --set readinessProbe.initialDelaySeconds=10 \
-    {{ extra_opts }} \
-    helm-operator \
-    helm-operator
-
-- name: Create Kubernetes secret with local-path-provisioner settings
-  tags:
-    - config
-    - flux
-    - local-path-provisioner
-  vars:
-    flux_secret:
-      name: "local-path-provisioner"
-      namespace: "kube-system"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - local-path-provisioner
-  when: "'local-path-provisioner' in enabled_applications"
-
-# We have to install local-path-provisioner before other charts, otherwise the PVCs
-# created by those charts will not have the right default storageclass assigned
-# to them.
-# It will still be managed by flux afterwards.
-- name: Create local-path-provisioner HelmResource
-  tags:
-    - config
-    - flux
-    - local-path-provisioner
-  k8s:
-    state: present
-    resource_definition: "{{ lookup('file', 'local-path-provisioner_hr.yaml') | from_yaml }}"
-  when: "'local-path-provisioner' in enabled_applications"
-
-- name: Install flux
-  tags:
-    - flux
-  vars:
-    # The first url below points to the "local-flux" nginx pod that is running
-    # inside the cluster, and is serving the git repo with HelmRelease files
-    # over http.
-    git_url: "{{ 'http://local-flux.oas.svc.cluster.local/.git' if flux.local_flux else flux.repo }}"
-    git_branch: "{{ 'master' if flux.local_flux else flux.branch }}"
-    git_path: "{{ '.' if flux.local_flux else 'flux' }}"
-    extra_opts: "{{ flux.extra_opts | default('') }}"
-  # Commented version of below shell command:
-  # helm upgrade
-  #   # Install a new release if it doesn't yet exist.
-  #   --install
-  #   --repo "https://charts.fluxcd.io"
-  #   --namespace oas
-  #   --version 1.3.0
-  #   # The git repo that flux listens to for changes.
-  #   --set git.url="{{ git_url }}"
-  #   # The branch of the git repo that flux listens to for changes.
-  #   --set git.branch="{{ git_branch }}"
-  #   # The directory within the git repo that flux listens to for changes.
-  #   --set git.path="{{ git_path }}"
-  #   --set git.readonly=true
-  #   # Do not do follow updates of upstream docker images automatically.
-  #   --set registry.excludeImage='*'
-  #   # Necessary for read-only mode.
-  #   --set sync.state="secret"
-  #   # Delete resources originally created by Flux when their manifests
-  #   # are removed from the git repo.
-  #   --set syncGarbageCollection.enabled=true
-  #   # Look for .flux.yaml files for manifest generation.
-  #   --set manifestGeneration=true
-  #   # Set the interval between checks for updates in the git repo to 1 hour.
-  #   --set git.pollInterval=1h
-  #   # Resource requests are already auto-defined. Set some limits:
-  #   --set resources.limits.cpu=50m
-  #   --set resources.limits.memory=600Mi
-  #   # Helm release name
-  #   flux
-  #   # Chart name
-  #   flux
-  shell: |
-    helm upgrade --install \
-    --repo "https://charts.fluxcd.io" \
-    --namespace oas \
-    --version "{{ flux.version }}" \
-    --set git.url="{{ git_url }}" \
-    --set git.branch="{{ git_branch }}" \
-    --set git.path="{{ git_path }}" \
-    --set git.readonly=true \
-    --set registry.excludeImage='*' \
-    --set sync.state="secret" \
-    --set syncGarbageCollection.enabled=true \
-    --set manifestGeneration=true \
-    --set git.pollInterval=1h \
-    --set resources.requests.cpu=300m \
-    --set resources.limits.cpu=400m \
-    --set resources.limits.memory=600Mi \
-    {{ extra_opts }} flux flux
-
-- name: Install fluxctl via snap
-  tags:
-    - flux
-  command: snap install --classic fluxctl
-  args:
-    creates: /snap/bin/fluxctl
diff --git a/ansible/roles/apps/tasks/eventrouter.yml b/ansible/roles/apps/tasks/eventrouter.yml
deleted file mode 100644
index 89b351e50aa291fb2ebb041a04abce482ca26664..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/eventrouter.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-- name: Create Kubernetes secret with eventrouter settings
-  tags:
-    - config
-    - flux
-    - eventrouter
-  vars:
-    flux_secret:
-      name: "eventrouter"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - eventrouter
diff --git a/ansible/roles/apps/tasks/flux-custom.yml b/ansible/roles/apps/tasks/flux-custom.yml
deleted file mode 100644
index cb71ab9c5e2c704f1cd81b2fdd51bf93da857f21..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/flux-custom.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-
-- name: Create Kubernetes secret with flux-custom settings
-  tags:
-    - config
-    - flux
-    - flux-custom
-  vars:
-    flux_secret:
-      name: "flux-custom"
-      namespace: "oas-custom"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - flux-custom
diff --git a/ansible/roles/apps/tasks/flux_secret.yml b/ansible/roles/apps/tasks/flux_secret.yml
deleted file mode 100644
index 89bbe26b737e2549610fe01ebc647265c38423d9..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/flux_secret.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Create Kubernetes secret with app settings
-  tags:
-    - config
-    - flux
-    - secret
-  vars:
-    # Merge extra values from i.e. `prometheus_extra_values`
-    extra_values: "{{ vars[flux_secret.name + '_extra_values'] | default({}) }}"
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "{{ flux_secret.namespace | default('oas-apps') }}"
-        name: "{{ flux_secret.name }}-settings"
-      data:
-        values.yaml: "{{ lookup('template','settings/{{ flux_secret.name }}.yaml') | from_yaml | combine(extra_values, recursive=True) | to_nice_yaml(indent=2) | b64encode }}"
-        enabled: "{{ (flux_secret.name in enabled_applications) | ternary('true', 'false') | b64encode }}"
diff --git a/ansible/roles/apps/tasks/letsencrypt.yml b/ansible/roles/apps/tasks/letsencrypt.yml
deleted file mode 100644
index e08880f2ac06b505e284cf74948c7ab781c5f3ab..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/letsencrypt.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-
-- name: Create Kubernetes secret with settings for letsencrypt issuers
-  tags:
-    - config
-    - flux
-    - letsencrypt
-  vars:
-    flux_secret:
-      name: "letsencrypt-{{ item }}"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - letsencrypt
-  with_items:
-    - "production"
-    - "staging"
diff --git a/ansible/roles/apps/tasks/loki.yml b/ansible/roles/apps/tasks/loki.yml
deleted file mode 100644
index eaac7c223e97a65e11c73f1b6e901a12b4e56155..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/loki.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-- name: Create Kubernetes secret with loki settings
-  tags:
-    - config
-    - flux
-    - loki
-  vars:
-    flux_secret:
-      name: "loki"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - loki
diff --git a/ansible/roles/apps/tasks/main.yml b/ansible/roles/apps/tasks/main.yml
deleted file mode 100644
index 3d0b0eff97b18d3cbd09acf12445211079ddc6fb..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/main.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-
-- name: Install namespaces, helm operator, local-path-provisioner and flux
-  import_tasks: core.yml
-
-- name: Tasks pertaining to cert-manager
-  import_tasks: cert-manager.yml
-
-- name: Tasks pertaining to letsencrypt
-  import_tasks: letsencrypt.yml
-
-- name: Tasks pertaining to prometheus and grafana
-  import_tasks: prometheus-stack.yml
-
-- name: Tasks pertaining to loki
-  import_tasks: loki.yml
-
-- name: Tasks pertaining to promtail
-  import_tasks: promtail.yml
-
-- name: Tasks pertaining to eventrouter
-  import_tasks: eventrouter.yml
-
-- name: Tasks pertaining to Single sign-on
-  import_tasks: single-sign-on.yml
-
-- name: Tasks pertaining to Rocket.chat
-  import_tasks: rocketchat.yml
-
-- name: Tasks pertaining to NextCloud
-  import_tasks: nextcloud.yml
-
-- name: Tasks pertaining to WordPress
-  import_tasks: wordpress.yml
-
-- name: Tasks pertaining to velero
-  import_tasks: velero.yml
-  when: backup.s3.enabled
-
-- name: Tasks pertaining to flux-custom
-  import_tasks: flux-custom.yml
-
-- name: Tasks pertaining to metallb
-  import_tasks: metallb.yml
diff --git a/ansible/roles/apps/tasks/metallb.yml b/ansible/roles/apps/tasks/metallb.yml
deleted file mode 100644
index 4aaa86aa94894b310979a77ca2fa4f2408af7bde..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/metallb.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-
-- name: Create metallb secret with metallb settings
-  tags:
-    - config
-    - flux
-    - metallb
-  vars:
-    flux_secret:
-      name: "metallb"
-      namespace: "kube-system"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - metallb
diff --git a/ansible/roles/apps/tasks/nextcloud.yml b/ansible/roles/apps/tasks/nextcloud.yml
deleted file mode 100644
index 387a5781f1ebfd6c500999571f8b963d49b2f2ee..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/nextcloud.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- name: Create Kubernetes secret with nextcloud settings
-  tags:
-    - config
-    - flux
-    - nextcloud
-  vars:
-    flux_secret:
-      name: "nextcloud"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - nextcloud
-
-- name: Create persistent volumes for nextcloud data and metadata
-  tags:
-    - config
-    - nextcloud
-    - storage
-  vars:
-    pvc:
-      name: "nextcloud-{{ item.name }}"
-      namespace: "oas-apps"
-      size: "{{ item.size }}"
-  include_tasks:
-    file: pvc.yml
-    apply:
-      tags:
-        - config
-        - nextcloud
-        - storage
-  with_items:
-    - name: "files"
-      size: 2Gi
-    - name: "mariadb"
-      size: 512Mi
diff --git a/ansible/roles/apps/tasks/prometheus-stack.yml b/ansible/roles/apps/tasks/prometheus-stack.yml
deleted file mode 100644
index 7941f373353210bbf95c8ad262bbd422096cef72..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/prometheus-stack.yml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-- name: Create prometheus auth secret for basic auth
-  tags:
-    - prometheus-stack
-    - config
-    - secret
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "oas"
-        name: "prometheus-basic-auth"
-      data:
-        auth: "{{ ('admin:' + (prometheus_basic_auth | password_hash('apr_md5_crypt')) + '\n')  | b64encode }}"
-
-- name: Create alertmanager auth secret for basic auth
-  tags:
-    - prometheus-stack
-    - config
-    - secret
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "oas"
-        name: "alertmanager-basic-auth"
-      data:
-        auth: "{{ ('admin:' + (alertmanager_basic_auth | password_hash('apr_md5_crypt')) + '\n')  | b64encode }}"
-
-- name: Create Kubernetes secret with prometheus settings
-  tags:
-    - config
-    - flux
-    - monitoring
-    - prometheus-stack
-  vars:
-    flux_secret:
-      name: "prometheus-stack"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - monitoring
-        - prometheus-stack
-
-- name: Create prometheus-related persistent volumes
-  tags:
-    - config
-    - monitoring
-    - prometheus-stack
-  vars:
-    pvc:
-      name: "{{ item.name }}"
-      namespace: "oas"
-      size: "{{ item.size }}"
-  include_tasks:
-    file: pvc.yml
-    apply:
-      tags:
-        - config
-        - monitoring
-        - prometheus-stack
-  loop:
-    - name: "alertmanager"
-      size: "2Gi"
-    - name: "grafana"
-      size: "2Gi"
diff --git a/ansible/roles/apps/tasks/promtail.yml b/ansible/roles/apps/tasks/promtail.yml
deleted file mode 100644
index 7e2bf955eeaee1a9aeb3b5b7d143affb22fe1b43..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/promtail.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-- name: Create Kubernetes secret with promtail settings
-  tags:
-    - config
-    - flux
-    - promtail
-  vars:
-    flux_secret:
-      name: "promtail"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - promtail
diff --git a/ansible/roles/apps/tasks/pvc.yml b/ansible/roles/apps/tasks/pvc.yml
deleted file mode 100644
index e79c4f11b33aa0f8d6a403a3761de5f3c26f9512..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/pvc.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Create Persistent Volume Claim for application
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: PersistentVolumeClaim
-      metadata:
-        namespace: "{{ pvc.namespace | default('oas-apps') }}"
-        name: "{{ pvc.name }}"
-      spec:
-        accessModes:
-          - "{{ pvc.accessMode | default('ReadWriteOnce') }}"
-        volumeMode: Filesystem
-        resources:
-          requests:
-            storage: "{{ pvc.size }}"
-        storageClassName: "{{ pvc.storageClass | default('local-path') }}"
diff --git a/ansible/roles/apps/tasks/rocketchat.yml b/ansible/roles/apps/tasks/rocketchat.yml
deleted file mode 100644
index 875e52872925771aca358e54e446d5d19678e4a3..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/rocketchat.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-
-- name: Create Kubernetes secret with Rocket.Chat settings
-  tags:
-    - config
-    - flux
-    - rocketchat
-  vars:
-    flux_secret:
-      name: "rocketchat"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - rocketchat
-
-- name: Create persistent volumes for rocketchat data
-  tags:
-    - config
-    - rocketchat
-    - storage
-  vars:
-    pvc:
-      name: "rocketchat-{{ item.name }}"
-      namespace: "oas-apps"
-      size: "{{ item.size }}"
-  include_tasks:
-    file: pvc.yml
-    apply:
-      tags:
-        - config
-        - rocketchat
-        - storage
-  with_items:
-    - name: "data"
-      size: 1Gi
diff --git a/ansible/roles/apps/tasks/single-sign-on.yml b/ansible/roles/apps/tasks/single-sign-on.yml
deleted file mode 100644
index 04a0f5c3df519b8913a4cf1630bbff06ce99eb52..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/single-sign-on.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-
-- name: Create Kubernetes secret with single-sign-on settings
-  tags:
-    - config
-    - flux
-    - single-sign-on
-  vars:
-    flux_secret:
-      name: "single-sign-on"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - single-sign-on
-
-- name: Create persistent volumes for single-sign-on userbackend data
-  tags:
-    - config
-    - single-sign-on
-    - storage
-  vars:
-    pvc:
-      name: "single-sign-on-{{ item.name }}"
-      namespace: "oas"
-      size: "{{ item.size }}"
-  include_tasks:
-    file: pvc.yml
-    apply:
-      tags:
-        - config
-        - single-sign-on
-        - storage
-  with_items:
-    - name: "userbackend"
-      size: 1Gi
diff --git a/ansible/roles/apps/tasks/velero.yml b/ansible/roles/apps/tasks/velero.yml
deleted file mode 100644
index b8b8ddc8f45d78b2a3a1abe70cb812f9180a91ee..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/velero.yml
+++ /dev/null
@@ -1,80 +0,0 @@
----
-
-- name: Create Kubernetes secret with velero settings
-  tags:
-    - config
-    - flux
-    - velero
-  vars:
-    flux_secret:
-      name: "velero"
-      namespace: "velero"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - velero
-
-- name: Create Kubernetes secret with velero S3 credentials
-  tags:
-    - config
-    - secret
-    - velero
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "velero"
-        name: "credentials"
-      data:
-        cloud: "{{ lookup('template','s3-credentials') | b64encode }}"
-
-- name: Get current velero CLI version
-  tags:
-    - velero
-  shell: "velero version | head -n 2 | tail -n 1 | cut -d' ' -f 2 | cut -d'v' -f 2"
-  failed_when: false
-  register: velero_version
-  changed_when: false
-
-- name: Show current velero CLI version
-  tags:
-    - velero
-    - debug
-  debug:
-    msg: 'Current velero version is: {{ velero_version.stdout }}'
-
-- name: Download velero CLI
-  tags:
-    - velero
-  get_url:
-    url: "https://github.com/vmware-tanzu/velero/releases/download/v{{ velero.version }}/velero-v{{ velero.version }}-linux-amd64.tar.gz"
-    checksum: "{{ velero.checksum }}"
-    dest: "/tmp/velero-v{{ velero.version }}.tar.gz"
-  when: velero_version.stdout != velero.version
-
-- name: Unpack velero CLI
-  tags:
-    - velero
-  unarchive:
-    remote_src: true
-    src: "/tmp/velero-v{{ velero.version }}.tar.gz"
-    dest: "/tmp/"
-  when: velero_version.stdout != velero.version
-
-- name: Install velero CLI
-  tags:
-    - velero
-  copy:
-    remote_src: true
-    src: "/tmp/velero-v{{ velero.version }}-linux-amd64/velero"
-    dest: "/usr/local/bin/velero"
-    owner: root
-    group: root
-    mode: "0755"
-  become: true
-  when: velero_version.stdout != velero.version
diff --git a/ansible/roles/apps/tasks/wordpress.yml b/ansible/roles/apps/tasks/wordpress.yml
deleted file mode 100644
index ef56ad69ead930d9f0a15031c7f2b544b4f283ba..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/tasks/wordpress.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Create Kubernetes secret with wordpress settings
-  tags:
-    - config
-    - flux
-    - wordpress
-  vars:
-    flux_secret:
-      name: "wordpress"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - wordpress
-
-
-- name: Create persistent volumes for wordpress data and metadata
-  tags:
-    - config
-    - wordpress
-    - storage
-  vars:
-    pvc:
-      name: "wordpress-{{ item.name }}"
-      namespace: "oas-apps"
-      size: "{{ item.size }}"
-  include_tasks:
-    file: pvc.yml
-    apply:
-      tags:
-        - config
-        - wordpress
-        - storage
-  with_items:
-    - name: "files"
-      size: 2Gi
-    - name: "mariadb"
-      size: 512Mi
diff --git a/ansible/roles/apps/templates/s3-credentials b/ansible/roles/apps/templates/s3-credentials
deleted file mode 100644
index 76827ff1217ec20ba665042bc005cf2b2d8be509..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/s3-credentials
+++ /dev/null
@@ -1,3 +0,0 @@
-[default]
-aws_access_key_id={{ lookup('file', '{{ cluster_dir }}/secrets/s3_access_key') }}
-aws_secret_access_key={{ lookup('file', '{{ cluster_dir }}/secrets/s3_secret_key') }}
diff --git a/ansible/roles/apps/templates/settings/cert-manager.yaml b/ansible/roles/apps/templates/settings/cert-manager.yaml
deleted file mode 100644
index b66a9e110e70cfc9a278ccd1fc1e0d3c11056b15..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/cert-manager.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-ingressShim:
-  {% if acme_staging %}
-  defaultIssuerName: "letsencrypt-staging"
-  {% else %}
-  defaultIssuerName: "letsencrypt-production"
-  {% endif %}
-  defaultIssuerKind: ClusterIssuer
-resources:
-  requests:
-    cpu: 20m
-    memory: 130Mi
-  limits:
-    cpu: 40m
-    memory: 260Mi
-cainjector:
-  resources:
-    requests:
-      cpu: 20m
-      memory: 140Mi
-    limits:
-      cpu: 40m
-      memory: 280Mi
-webhook:
-  resources:
-    requests:
-      cpu: 10m
-      memory: 23Mi
-    limits:
-      cpu: 20m
-      memory: 46Mi
-installCRDs: true
diff --git a/ansible/roles/apps/templates/settings/eventrouter.yaml b/ansible/roles/apps/templates/settings/eventrouter.yaml
deleted file mode 100644
index 8be599e67e9a8b005f6e7397d821783f55210b0d..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/eventrouter.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# https://github.com/helm/charts/tree/master/stable/eventrouter
-sink: stdout
-resources:
-  limits:
-    memory: 200Mi
-    cpu: 200m
-  requests:
-    memory: 100Mi
-    cpu: 100m
diff --git a/ansible/roles/apps/templates/settings/flux-custom.yaml b/ansible/roles/apps/templates/settings/flux-custom.yaml
deleted file mode 100644
index abb9d95c4d6084fca62c6b158060b4adae88bef7..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/flux-custom.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# Since flux-custom needs to get configured entrirely by the user, so we
-# don't provide any defaults. Please refer to
-# https://docs.openappstack.net/en/latest/customization.html for how to
-# configure your custom flux installation.
-#
-# Since ansible can't combine() an empty dictionary we provide one dummy
-# default value here.
-
-git:
-  branch: master
diff --git a/ansible/roles/apps/templates/settings/letsencrypt-production.yaml b/ansible/roles/apps/templates/settings/letsencrypt-production.yaml
deleted file mode 100644
index bc749009c570592ab4111d2f45ae1ad7f14915a3..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/letsencrypt-production.yaml
+++ /dev/null
@@ -1 +0,0 @@
-email: "{{ admin_email }}"
diff --git a/ansible/roles/apps/templates/settings/letsencrypt-staging.yaml b/ansible/roles/apps/templates/settings/letsencrypt-staging.yaml
deleted file mode 100644
index bc749009c570592ab4111d2f45ae1ad7f14915a3..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/letsencrypt-staging.yaml
+++ /dev/null
@@ -1 +0,0 @@
-email: "{{ admin_email }}"
diff --git a/ansible/roles/apps/templates/settings/local-path-provisioner.yaml b/ansible/roles/apps/templates/settings/local-path-provisioner.yaml
deleted file mode 100644
index 25db460906c68c3da97cee027c9986b0cb7014b7..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/local-path-provisioner.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-nodePathMap:
-  - node: DEFAULT_PATH_FOR_NON_LISTED_NODES
-    paths:
-      - "/var/lib/OpenAppStack/local-storage"
-storageClass:
-  defaultClass: true
-# We temporarily use our own build in order to use local volumes instead of
-# hostPath.
-image:
-  repository: "open.greenhost.net:4567/openappstack/openappstack/local-path-provisioner"
-  tag: "52f994f-amd64"
-
-
-resources:
-  requests:
-    cpu: 200m
-    memory: 20Mi
-  limits:
-    cpu: 400m
-    memory: 40Mi
diff --git a/ansible/roles/apps/templates/settings/loki.yaml b/ansible/roles/apps/templates/settings/loki.yaml
deleted file mode 100644
index 0983a57b9b45f3b0ef26993aac625840ef77a61d..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/loki.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-enabled: true
-resources:
-  limits:
-    cpu: 400m
-    memory: 180Mi
-  requests:
-    cpu: 200m
-    memory: 90Mi
diff --git a/ansible/roles/apps/templates/settings/metallb.yaml b/ansible/roles/apps/templates/settings/metallb.yaml
deleted file mode 100644
index 095145a2aa6a990a05a9fd3053975ba77691d9ad..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/metallb.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-# https://artifacthub.io/packages/helm/bitnami/metallb#example-layer-2-configuration
-configInline:
-  address-pools:
-    - name: default
-      protocol: layer2
-      addresses:
-        - "{{ ip_address }}/32"
diff --git a/ansible/roles/apps/templates/settings/nextcloud.yaml b/ansible/roles/apps/templates/settings/nextcloud.yaml
deleted file mode 100644
index 4b5138520b10055faaed701afcccda3dfe617d01..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/nextcloud.yaml
+++ /dev/null
@@ -1,180 +0,0 @@
-#jinja2: lstrip_blocks: "True"
-nextcloud:
-  nextcloud:
-    host: "files.{{ domain }}"
-    password: "{{ nextcloud_password }}"
-    mail:
-      enabled: {{ outgoing_mail.enabled  | default("false", true) }}
-      {% if outgoing_mail.enabled %}
-      {% set outgoing_mail_domain = outgoing_mail.fromAddress.split('@')[-1] %}
-      {% set outgoing_mail_from = outgoing_mail.fromAddress.split('@')[0] %}
-      fromAddress: "{{ outgoing_mail_from }}"
-      domain: "{{ outgoing_mail_domain }}"
-      smtp:
-        host: "{{ outgoing_mail.smtp.host }}"
-        {% if outgoing_mail.smtp.ssl %}
-        secure: "tls"
-        {% else %}
-        secure: ""
-        {% endif %}
-        port: "{{ outgoing_mail.smtp.port }}"
-        name: "{{ outgoing_mail.smtp.user }}"
-        password: "{{ outgoing_mail.smtp.password }}"
-        authtype: "{{ outgoing_mail.smtp.authtype }}"
-      {% endif %}
-  cronjob:
-    # Set curl to accept insecure connections when acme staging is used
-    curlInsecure: "{{ acme_staging }}"
-
-  ingress:
-    enabled: true
-    annotations:
-      # Tell cert-manager to automatically get a TLS certificate
-      kubernetes.io/tls-acme: "true"
-      # Set max body size high to allow big NextCloud uploads
-      nginx.ingress.kubernetes.io/proxy-body-size: 1G
-      nginx.ingress.kubernetes.io/server-snippet: |-
-        server_tokens off;
-        proxy_hide_header X-Powered-By;
-        rewrite ^/.well-known/webfinger /public.php?service=webfinger last;
-        rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
-        rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json;
-        location = /.well-known/carddav {
-          return 301 $scheme://$host/remote.php/dav;
-        }
-        location = /.well-known/caldav {
-          return 301 $scheme://$host/remote.php/dav;
-        }
-        location = /robots.txt {
-          allow all;
-          log_not_found off;
-          access_log off;
-        }
-    hosts:
-      - "files.{{ domain }}"
-    tls:
-      - hosts:
-          - "files.{{ domain }}"
-        secretName: oas-nextcloud-files
-
-  # Use 2 GB of storage for NC storage (maybe make configurable later?)
-  persistence:
-    enabled: true
-    existingClaim: "nextcloud-files"
-
-  podAnnotations:
-    # Let the backup system include nextcloud data.
-    backup.velero.io/backup-volumes: "nextcloud-data"
-
-  # Explicitly disable use of internal database
-  internalDatabase:
-    enabled: false
-
-  livenessProbe:
-    initialDelaySeconds: 300
-    failureThreshold: 20
-  readinessProbe:
-    initialDelaySeconds: 300
-
-  resources:
-    limits:
-      cpu: 500m
-      memory: 512Mi
-    requests:
-      cpu: 200m
-      memory: 256Mi
-
-  # Enable and configure MariaDB chart
-  mariadb:
-    db:
-      password: "{{ nextcloud_mariadb_password }}"
-    enabled: true
-    master:
-      annotations:
-        # Let the backup system include nextcloud database data.
-        backup.velero.io/backup-volumes: "data"
-      persistence:
-        ## Enable PostgreSQL persistence using Persistent Volume Claims.
-        enabled: true
-        existingClaim: "nextcloud-mariadb"
-      resources:
-        limits:
-          cpu: 200m
-          memory: 512Mi
-        requests:
-          cpu: 100m
-          memory: 256Mi
-    replication:
-      enabled: false
-    rootUser:
-      password: "{{ nextcloud_mariadb_root_password }}"
-
-setupApps:
-  backoffLimit: 20
-
-onlyoffice:
-  resources:
-    limits:
-      cpu: 800m
-      memory: 2Gi
-    requests:
-      cpu: 200m
-      memory: 1Gi
-  server_name: "office.{{ domain }}"
-  ingress:
-    enabled: true
-    annotations:
-      # Tell cert-manager to automatically get a TLS certificate
-      kubernetes.io/tls-acme: "true"
-    paths:
-      - "/"
-    hosts:
-      - "office.{{ domain }}"
-    tls:
-      - hosts:
-          - "office.{{ domain }}"
-        secretName: oas-nextcloud-office
-  jwtSecret: "{{ onlyoffice_jwt_secret }}"
-
-postgresql:
-  postgresqlPassword: "{{ onlyoffice_postgresql_password }}"
-  resources:
-    limits:
-      cpu: 100m
-      memory: 64Mi
-    requests:
-      cpu: 20m
-      memory: 32Mi
-
-rabbitmq:
-  rabbitmq:
-    password: "{{ onlyoffice_rabbitmq_password }}"
-  persistence:
-    enabled: false
-  resources:
-    limits:
-      cpu: 300m
-      memory: 256Mi
-    requests:
-      cpu: 100m
-      memory: 128Mi
-
-redis:
-  cluster:
-    enabled: false
-  master:
-    persistence:
-      enabled: false
-    resources:
-      limits:
-        cpu: 100m
-        memory: 64Mi
-      requests:
-        cpu: 50m
-        memory: 32Mi
-
-sociallogin:
-  server_name: "sso.{{ domain }}"
-  client_id: nextcloud
-  client_secret: "{{ nextcloud_oauth_client_secret }}"
-  groups_claim: "openappstack_roles"
diff --git a/ansible/roles/apps/templates/settings/prometheus-stack.yaml b/ansible/roles/apps/templates/settings/prometheus-stack.yaml
deleted file mode 100644
index b73b1bc41a8a700d71ca7c01b49a39e083096b07..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/prometheus-stack.yaml
+++ /dev/null
@@ -1,269 +0,0 @@
-#jinja2:lstrip_blocks:"True"
-# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml
-
-# From: https://github.com/cablespaghetti/k3s-monitoring/blob/master/kube-prometheus-stack-values.yaml
-# Disable etcd monitoring. See https://github.com/cablespaghetti/k3s-monitoring/issues/4
-kubeEtcd:
-  enabled: false
-
-# Disable kube-controller-manager and kube-scheduler monitoring. See https://github.com/cablespaghetti/k3s-monitoring/issues/2
-kubeControllerManager:
-  enabled: false
-kubeScheduler:
-  enabled: false
-
-alertmanager:
-  persistentVolume:
-    existingClaim: "alertmanager"
-  ingress:
-    enabled: true
-    annotations:
-      nginx.ingress.kubernetes.io/auth-type: basic
-      nginx.ingress.kubernetes.io/auth-secret: alertmanager-basic-auth
-      nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required'
-      kubernetes.io/tls-acme: "true"
-    pathType: ImplementationSpecific
-    hosts:
-      - "alertmanager.{{ domain }}"
-    tls:
-      - secretName: alertmanager-tls
-        hosts:
-          - "alertmanager.{{ domain }}"
-  config:
-    {% if outgoing_mail.enabled %}
-    global:
-      smtp_from: "{{ outgoing_mail.fromAddress }}"
-      smtp_smarthost: "{{ outgoing_mail.smtp.host }}:{{ outgoing_mail.smtp.port }}"
-      smtp_auth_username: "{{ outgoing_mail.smtp.user }}"
-      smtp_auth_password: "{{ outgoing_mail.smtp.password }}"
-    {% endif %}
-    route:
-      group_by: ['job']
-      group_wait: 30s
-      group_interval: 5m
-      repeat_interval: 1h
-      {% if outgoing_mail.enabled %}
-      receiver: email
-      {% else %}
-      receiver: 'null'
-      {% endif %}
-      routes:
-      - match:
-          # This is an alert meant to ensure that the entire alerting pipeline is functional.
-          # This alert is always firing, therefore it should always be firing in Alertmanager
-          # and always fire against a receiver. There are integrations with various notification
-          # mechanisms that send a notification when this alert is not firing. For example the
-          # "DeadMansSnitch" integration in PagerDuty.
-          alertname: Watchdog
-        receiver: 'null'
-
-    receivers:
-    - name: 'null'
-    {% if outgoing_mail.enabled %}
-    - name: email
-      email_configs:
-      - send_resolved: true
-        to: {{ admin_email }}
-    {% endif %}
-
-    # Inhibition rules allow to mute a set of alerts given that another alert is firing.
-    # We use this to mute any warning-level notifications if the same alert is already critical.
-    inhibit_rules:
-    - source_match:
-        severity: 'critical'
-      target_match:
-        severity: 'warning'
-      # Apply inhibition if the alertname is the same.
-      equal: ['alertname', 'namespace']
-
-  alertmanagerSpec:
-#    replicas: 3
-#    podAntiAffinity: "soft"
-    storage:
-      volumeClaimTemplate:
-        spec:
-          accessModes: ["ReadWriteOnce"]
-          resources:
-            requests:
-              storage: 1Gi
-#    resources:
-#      limits:
-#        cpu: 500m
-#        memory: 64Mi
-#      requests:
-#        cpu: 25m
-#        memory: 32Mi
-#    priorityClassName: high-priority
-
-
-prometheus:
-  # https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus
-  prometheusSpec:
-    scrapeInterval: "3m"
-    evaluationInterval: "3m"
-    retention: "30d"
-
-#    replicas: 2
-#    podAntiAffinity: "hard"
-    storageSpec:
-      volumeClaimTemplate:
-        spec:
-          accessModes: ["ReadWriteOnce"]
-          resources:
-            requests:
-              storage: 10Gi
-
-    resources:
-      limits:
-        cpu: 200m
-        memory: 1Gi
-      requests:
-        cpu: 100m
-        memory: 512Mi
-
-  ingress:
-    enabled: true
-    annotations:
-      nginx.ingress.kubernetes.io/auth-type: basic
-      nginx.ingress.kubernetes.io/auth-secret: prometheus-basic-auth
-      nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required'
-      kubernetes.io/tls-acme: "true"
-    pathType: ImplementationSpecific
-    hosts:
-      - "prometheus.{{ domain }}"
-    tls:
-      - secretName: prometheus-tls
-        hosts:
-          - "prometheus.{{ domain }}"
-
-#
-#  service:
-#    sessionAffinity: "ClientIP"
-#
-
-grafana:
-  # https://github.com/grafana/helm-charts/tree/main/charts/grafana
-  adminPassword: "{{ grafana_admin_password }}"
-  grafana.ini:
-    server:
-      root_url: "https://grafana.{{ domain }}"
-    auth.generic_oauth:
-      name: OpenAppStack
-      enabled: true
-      client_id: grafana
-      client_secret: "{{ grafana_oauth_client_secret }}"
-      scopes: "openid profile email openappstack_roles"
-      auth_url: "https://sso.{{ domain }}/oauth2/auth"
-      token_url: "https://sso.{{ domain }}/oauth2/token"
-      api_url: "https://sso.{{ domain }}/userinfo"
-      role_attribute_path: contains(openappstack_roles[*], 'admin') && 'Admin' || 'Editor'
-  ingress:
-    enabled: true
-    annotations:
-      kubernetes.io/tls-acme: "true"
-    pathType: ImplementationSpecific
-    hosts:
-      - "grafana.{{ domain }}"
-    tls:
-      - secretName: grafana-tls
-        hosts:
-          - "grafana.{{ domain }}"
-  persistence:
-    enabled: true
-    existingClaim: "grafana"
-  podAnnotations:
-    backup.velero.io/backup-volumes: "storage"
-
-  # This allows us to pick up the Loki datasource
-  # sidecar:
-  #   datasources:
-  #     enabled: true
-  #     label: grafana_datasource
-  #   # Make a configmap with the label `grafana_dashboard` to add dashboards to
-  #   # Grafana.
-  #   dashboards:
-  #     enabled: true
-  #     lablel: grafana_dashboard
-
-  # dashboardProviders:
-  #   dashboardproviders.yaml:
-  #     apiVersion: 1
-  #     providers:
-  #     - name: 'default'
-  #       orgId: 1
-  #       folder: ''
-  #       type: file
-  #       disableDeletion: false
-  #       editable: true
-  #       options:
-  #         path: /var/lib/grafana/dashboards
-  # dashboards:
-  #   default:
-  #     kube-dash:
-  #       gnetId: 11074
-  #       revision: 2
-  #       datasource: Prometheus
-  #     loki-dash:
-  #       gnetId: 10880
-  #       revision: 1
-  #       datasource: Loki
-
-  # datasources:
-  #  datasources.yaml:
-  #    apiVersion: 1
-  #    datasources:
-  #    - name: Prometheus
-  #      type: prometheus
-  #      url: http://prometheus-server
-  #      access: proxy
-  #      isDefault: true
-
-  plugins:
-    - grafana-piechart-panel
-
-  resources:
-    limits:
-      cpu: 200m
-      memory: 256Mi
-    requests:
-      cpu: 100m
-      memory: 128Mi
-#
-#  sidecar:
-#    resources:
-#      limits:
-#        cpu: 100m
-#        memory: 128Mi
-#      requests:
-#        cpu: 5m
-#        memory: 64Mi
-
-prometheusOperator:
-  resources:
-    limits:
-      cpu: 200m
-      memory: 256Mi
-    requests:
-      cpu: 100m
-      memory: 128Mi
-#  priorityClassName: high-priority
-
-prometheus-node-exporter:
-  resources:
-    limits:
-      cpu: 200m
-      memory: 32Mi
-    requests:
-      cpu: 100m
-      memory: 16Mi
-#  priorityClassName: high-priority
-
-kube-state-metrics:
-  resources:
-    limits:
-      cpu: 200m
-      memory: 64Mi
-    requests:
-      cpu: 100m
-      memory: 32Mi
-#  priorityClassName: high-priority
diff --git a/ansible/roles/apps/templates/settings/promtail.yaml b/ansible/roles/apps/templates/settings/promtail.yaml
deleted file mode 100644
index fcba214385e9fa052b2abb8897c391a74d132149..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/promtail.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-enabled: true
-initContainer:
-  enabled: true
-  fsInotifyMaxUserInstances: 512
-resources:
-  limits:
-    cpu: 400m
-    memory: 256Mi
-  requests:
-    cpu: 300m
-    memory: 128Mi
-config:
-  lokiAddress: http://loki:3100/loki/api/v1/push
-  # https://github.com/grafana/helm-charts/blob/main/charts/promtail/values.yaml#L217
-  snippets:
-    # https://grafana.com/docs/loki/latest/clients/promtail/pipelines/
-    pipelineStages:
-      - cri: {}
-      - match:
-          selector: '{app="eventrouter"}'
-          stages:
-            - json:
-                expressions:
-                  event_verb: verb
-                  event_kind: event.involvedObject.kind
-                  event_reason: event.reason
-                  event_namespace: event.involvedObject.namespace
-                  event_name: event.metadata.name
-                  event_source_host: event.source.host
-                  event_source_component: event.source.component
-            - labels:
-                event_verb:
-                event_kind:
-                event_reason:
-                event_namespace:
-                event_name:
-                event_source_host:
-                event_source_component:
diff --git a/ansible/roles/apps/templates/settings/rocketchat.yaml b/ansible/roles/apps/templates/settings/rocketchat.yaml
deleted file mode 100644
index 01c5f67a50355212740f0a5704474ce8d411550d..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/rocketchat.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-# Hostname for Rocket.chat
-host: "chat.{{ domain }}"
-
-# Extra environment variables for Rocket.Chat. Used with tpl function, so this
-# needs to be a string
-extraEnv: |
-  - name: ADMIN_USERNAME
-    value: admin
-  - name: ADMIN_PASS
-    value: "{{ rocketchat_admin_password }}"
-  - name: ADMIN_EMAIL
-    value: "{{ admin_email }}"
-    # Set setup wizard to completed. The setup wizard, that allows you to
-    # create a different admin user, gets skipped.
-  - name: OVERWRITE_SETTING_Show_Setup_Wizard
-    value: completed
-  - name: E2E_Enable
-    value: "true"
-  - name: Accounts_RegistrationForm
-    value: Disabled
-  - name: Accounts_RegistrationForm_LinkReplacementText
-    value: "Create a new account at admin.{{ domain }} to add users"
-  # Custom OAuth rules:
-  - name: Accounts_OAuth_Custom_Openappstack
-    value: "true"
-  - name: Accounts_OAuth_Custom_Openappstack_url
-    value: https://sso.{{ domain }}
-  - name: Accounts_OAuth_Custom_Openappstack_token_path
-    value: /oauth2/token
-  - name: Accounts_OAuth_Custom_Openappstack_token_sent_via
-    value: payload
-  - name: Accounts_OAuth_Custom_Openappstack_identity_token_sent_via
-    value: payload
-  - name: Accounts_OAuth_Custom_Openappstack_identity_path
-    value: /userinfo
-  - name: Accounts_OAuth_Custom_Openappstack_authorize_path
-    value: /oauth2/auth
-  - name: Accounts_OAuth_Custom_Openappstack_scope
-    value: openid profile openappstack_roles email
-  - name: Accounts_OAuth_Custom_Openappstack_id
-    value: rocketchat
-  - name: Accounts_OAuth_Custom_Openappstack_secret
-    value: {{ rocketchat_oauth_client_secret }}
-  - name: Accounts_OAuth_Custom_Openappstack_login_style
-    value: redirect
-  - name: Accounts_OAuth_Custom_Openappstack_button_label_text
-    value: Login via OpenAppStack
-  - name: Accounts_OAuth_Custom_Openappstack_button_label_color
-    value: "#FFFFFF"
-  - name: Accounts_OAuth_Custom_Openappstack_button_color
-    value: "#1d74f5"
-  - name: Accounts_OAuth_Custom_Openappstack_username_field
-    value: preferred_username
-  - name: Accounts_OAuth_Custom_Openappstack_name_field
-    value: preferred_username
-  - name: Accounts_OAuth_Custom_Openappstack_roles_claim
-    value: openappstack_roles
-  - name: Accounts_OAuth_Custom_Openappstack_merge_roles
-    value: "true"
-  - name: Accounts_OAuth_Custom_Openappstack_merge_users
-    value: "true"
-  - name: Accounts_OAuth_Custom_Openappstack_show_button
-    value: "true"
-
-
-livenessProbe:
-  initialDelaySeconds: 180
-  failureThreshold: 20
-readinessProbe:
-  initialDelaySeconds: 60
-
-ingress:
-  enabled: true
-  annotations:
-    # Tell cert-manager to automatically get a TLS certificate
-    kubernetes.io/tls-acme: "true"
-  tls:
-    - hosts:
-        - "chat.{{ domain }}"
-      secretName: oas-rocketchat
-
-persistence:
-  enabled: true
-  size: 1Gi
-  existingClaim: "rocketchat-data"
-
-podAnnotations:
-  # Let the backup system include rocketchat data.
-  backup.velero.io/backup-volumes: "rocket-data"
-
-resources:
-  limits:
-    cpu: 400m
-    memory: 1024Mi
-  requests:
-    cpu: 100m
-    memory: 768Mi
-
-mongodb:
-  mongodbRootPassword: "{{ rocketchat_mongodb_root_password }}"
-  mongodbPassword: "{{ rocketchat_mongodb_password }}"
-  podAnnotations:
-    # Let the backup system include rocketchat data stored in mongodb.
-    backup.velero.io/backup-volumes: "datadir"
-  persistence:
-    enabled: true
-    size: 2Gi
-  resources:
-    limits:
-      cpu: 600m
-      memory: 1024Mi
-    requests:
-      cpu: 300m
-      memory: 768Mi
-
-image:
-  tag: 3.13.0
-  pullPolicy: IfNotPresent
diff --git a/ansible/roles/apps/templates/settings/velero.yaml b/ansible/roles/apps/templates/settings/velero.yaml
deleted file mode 100644
index 8c5483d769fc6fa1f5559a5212f9151fdf7f16c8..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/velero.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-# Init containers to add to the Velero deployment's pod spec. At least one plugin provider image is required.
-initContainers:
-  - name: velero-plugin-for-aws
-    image: velero/velero-plugin-for-aws:v1.1.0
-    imagePullPolicy: IfNotPresent
-    volumeMounts:
-      - mountPath: /target
-        name: plugins
-
-# Settings for Velero's prometheus metrics. Enabled by default.
-metrics:
-  enabled: true
-  scrapeInterval: 30s
-
-  # Pod annotations for Prometheus
-  podAnnotations:
-    prometheus.io/scrape: "true"
-    prometheus.io/port: "8085"
-    prometheus.io/path: "/metrics"
-
-  serviceMonitor:
-    enabled: false
-    additionalLabels: {}
-
-# Install CRDs as a templates. Enabled by default.
-installCRDs: true
-
-##
-## Parameters for the `default` BackupStorageLocation and VolumeSnapshotLocation,
-## and additional server settings.
-##
-configuration:
-  # Cloud provider being used (e.g. aws, azure, gcp).
-  # We don't use aws, but ceph which is S3-compatible.
-  provider: aws
-
-  # Parameters for the `default` BackupStorageLocation. See
-  # https://velero.io/docs/v1.0.0/api-types/backupstoragelocation/
-  backupStorageLocation:
-    # Cloud provider where backups should be stored. Usually should
-    # match `configuration.provider`. Required.
-    # The name "default" seems to be special: backups that don't have a
-    # location specified will use this one.
-    name: default
-    # Provider for the backup storage location. If omitted
-    # `configuration.provider` will be used instead.
-    # provider:
-    # Bucket to store backups in. Required.
-    bucket: {{ backup.s3.bucket }}
-    # Prefix within bucket under which to store backups. Optional.
-    prefix: {{ backup.s3.prefix }}
-    # Additional provider-specific configuration. See link above
-    # for details of required/optional fields for your provider.
-    config:
-      s3ForcePathStyle: true
-      s3Url: {{ backup.s3.url }}
-      region: {{ backup.s3.region }}
-
-rbac:
-  # Whether to create the Velero role and role binding to give all permissions to the namespace to Velero.
-  create: true
-  # Whether to create the cluster role binding to give administrator permissions to Velero
-  clusterAdministrator: true
-
-# Information about the Kubernetes service account Velero uses.
-serviceAccount:
-  server:
-    create: true
-    name:
-    annotations:
-
-# Info about the secret to be used by the Velero deployment, which
-# should contain credentials for the cloud provider IAM account you've
-# set up for Velero.
-credentials:
-  # Name of a pre-existing secret (if any) in the Velero namespace
-  # that should be used to get IAM account credentials. Optional.
-  {% if backup.s3.enabled %}
-  existingSecret: credentials
-  {% else %}
-  useSecret: false
-  {% endif %}
-
-# Whether to create backupstoragelocation crd, if false => do not create a default backup location
-backupsEnabled: {{ backup.s3.enabled }}
-# Whether to create volumesnapshotlocation crd, if false => disable snapshot feature
-snapshotsEnabled: false
-
-# Whether to deploy the restic daemonset.
-deployRestic: true
-
-restic:
-  podVolumePath: /var/lib/kubelet/pods
-  privileged: true
-
-# Backup schedules to create.
-{% if backup.s3.enabled %}
-schedules:
-  # This is just a name, can be anything.
-  nightly:
-    # Every night at 3:30.
-    schedule: "30 3 * * *"
-    template:
-      # Backups are stored for 60 days (1440 hours).
-      ttl: "1440h"
-      includedNamespaces:
-      # We include all namespaces.
-      - '*'
-{% else %}
-# There is no point in setting a backup schedule as we haven't configured a
-# storage location.
-schedules: {}
-{% endif %}
-
-configMaps: {}
diff --git a/ansible/roles/apps/templates/settings/wordpress.yaml b/ansible/roles/apps/templates/settings/wordpress.yaml
deleted file mode 100644
index 5fa27d45d4c4b7102f65dadb4df45e1839e54333..0000000000000000000000000000000000000000
--- a/ansible/roles/apps/templates/settings/wordpress.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-wordpress:
-  config:
-    db:
-      prefix: wp_
-    adm:
-      usid: admin
-      pssw: "{{ wordpress_admin_password }}"
-  site:
-    # NOTE: Make sure you use underscore and that the localisation is in full caps
-    locale: en_US
-    url: "https://www.{{ domain }}"
-    title: "OpenAppStack website"
-
-persistence:
-  existingClaim: wordpress-files
-podAnnotations:
-  backup.velero.io/backup-volumes: "wordpress-wp-uploads"
-
-openid_connect_settings:
-  enabled: true
-  client_secret: {{ wordpress_oauth_client_secret }}
-  endpoint_login: https://sso.{{ domain }}/oauth2/auth
-  endpoint_userinfo: https://sso.{{ domain }}/userinfo
-  endpoint_token: https://sso.{{ domain }}/oauth2/token
-  endpoint_end_session: ""
-  # After our SSO supports it, we should set this as the logout URL
-  # https://open.greenhost.net/openappstack/single-sign-on/issues/28
-  # endpoint_end_session: https://sso.{{ domain }}/oauth2/sessions/logout
-  no_sslverify: "0"
-  http_request_timeout: "15"
-  enable_logging: "1"
-  scope: email profile openid openappstack_roles offline_access
-  role_mapping_enabled: true
-  role_key: openappstack_roles
-
-database:
-  db:
-    user: wordpress
-    password: "{{ wordpress_mariadb_password }}"
-  rootUser:
-    password: "{{ wordpress_mariadb_root_password }}"
-  master:
-    persistence:
-      ## Enable MariaDB persistence using Persistent Volume Claims.
-      enabled: true
-      existingClaim: "wordpress-mariadb"
-    annotations:
-      # Let the backup system include nextcloud database data.
-      backup.velero.io/backup-volumes: "data"
-    resources:
-      limits:
-        cpu: 200m
-        memory: 512Mi
-      requests:
-        cpu: 100m
-        memory: 256Mi
-  replication:
-    enabled: false
-
-# It's advisable to set resource limits to prevent your K8s cluster from
-# crashing
-resources:
-  limits:
-    cpu: 500m
-    memory: 256Mi
-  requests:
-    cpu: 100m
-    memory: 128Mi
-
-ingress:
-  enabled: true
-  annotations:
-    kubernetes.io/tls-acme: "true"
-  path: /
-  hosts:
-    - "www.{{ domain }}"
-    - "{{ domain }}"
-  tls:
-    - hosts:
-        - "www.{{ domain }}"
-        - "{{ domain }}"
-      secretName: oas-wordpress
diff --git a/ansible/roles/compatibility-checks/tasks/main.yml b/ansible/roles/compatibility-checks/tasks/main.yml
index a5e01ef1c6e6601503cd14f17c4fbc04bed9a627..1ca666a49663202d1856020e01ef0b3db157221c 100644
--- a/ansible/roles/compatibility-checks/tasks/main.yml
+++ b/ansible/roles/compatibility-checks/tasks/main.yml
@@ -11,17 +11,3 @@
     that: "cluster_dir is defined"
     msg: >
       "Please define the variable `cluster_dir`."
-
-- name: Check if all variables from settings.yml.example are set in your settings.yml file
-  assert:
-    that: "{{ item }} is defined"
-    msg: >
-      "Please define the variable `{{ item }}`."
-  with_items:
-    - ip_address
-    - domain
-    - admin_email
-    - acme_staging
-    - backup
-    - enabled_applications
-    - outgoing_mail
diff --git a/ansible/roles/kubernetes-checks/tasks/main.yml b/ansible/roles/kubernetes-checks/tasks/main.yml
deleted file mode 100644
index 4eebcb4421c152fe3d69b92cc0bc1ce69aecbfbb..0000000000000000000000000000000000000000
--- a/ansible/roles/kubernetes-checks/tasks/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: Make sure kubectl has connection to server
-  tags:
-    - kubectl
-    - plugin
-    - krew
-  shell: kubectl version
-  failed_when: false
-  register: kubectl_server_version
-  changed_when: false
-
-- name: Fail if kubectl is not configured correctly
-  fail:
-    msg: Kubectl has no connection to server or is not installed. Install kubectl on the server or run install-kubernetes.yml playbook
-  when: kubectl_server_version.rc != 0
-
-- name: Make sure openshift is installed
-  tags:
-    - kubectl
-    - plugin
-    - krew
-  shell: pip3 freeze | grep openshift
-  failed_when: false
-  register: openshift_version
-  changed_when: false
-
-- name: Fail if openshift is not installed
-  fail:
-    msg: Openshift should be installed. Run `pip3 install openshift` on the server or run install-kubernetes.yml playbook
-  when: openshift_version.rc != 0
diff --git a/ansible/roles/pre-configure/tasks/main.yml b/ansible/roles/pre-configure/tasks/main.yml
index cb7b37cd41321f587f74cc2bcdb3824379296599..a18363bbd9de7db57810c1a81c0678a9ac42ec0e 100644
--- a/ansible/roles/pre-configure/tasks/main.yml
+++ b/ansible/roles/pre-configure/tasks/main.yml
@@ -17,6 +17,7 @@
       - curl
       - dnsutils
       - git
+      - haveged
       - nftables
       - rsync
       - snapd
@@ -46,16 +47,6 @@
   with_items:
     - python3-pip
 
-- name: Install python packages via pip3
-  tags:
-    - package
-    - pip
-  pip:
-    name:
-      # The openshift python package is needed for ansible k8s resource.
-      - openshift
-    executable: /usr/bin/pip3
-
 - name: Install kubectl snap
   # kubectl needs to get installed as "classic" snap
   command: snap install --classic kubectl
@@ -103,3 +94,17 @@
 
 - name: Configure firewall
   import_tasks: firewall.yml
+
+- name: debug docker_mirror data
+  debug:
+    msg: Docker mirror is "{{ docker_mirror }}"
+
+- name: Write docker registries.yaml
+  tags:
+    - k3s
+    - docker
+  template:
+    src: registries.yaml
+    dest: /etc/rancher/k3s/registries.yaml
+    mode: '0740'
+  when: docker_mirror.enabled
diff --git a/ansible/roles/pre-configure/templates/registries.yaml b/ansible/roles/pre-configure/templates/registries.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c5bb1520b6a126e31f171832028daca6b9594893
--- /dev/null
+++ b/ansible/roles/pre-configure/templates/registries.yaml
@@ -0,0 +1,14 @@
+---
+mirrors:
+  docker.io:
+    endpoint:
+      - "{{ docker_mirror.server }}"
+    rewrite:
+      # We need to remove the `server` part from the `endpoint` part, so what we
+      # end up with is a path like ""/openappstack/dependency_proxy/containers/$1"
+      "^(.*)$": "{{ docker_mirror.endpoint | regex_replace(docker_mirror.server, '') }}/$1"
+configs:
+  "{{ docker_mirror.server }}":
+    auth:
+      username: "{{ docker_mirror.username }}"
+      password: "{{ docker_mirror.password }}"
diff --git a/ansible/roles/setup-kubernetes/tasks/k3s.yml b/ansible/roles/setup-kubernetes/tasks/k3s.yml
index 9886f2ee3df086d94313819f8573a137d2ca4a94..58b5de4e0d24ab1210c9664b886cc3a95a7c1a41 100644
--- a/ansible/roles/setup-kubernetes/tasks/k3s.yml
+++ b/ansible/roles/setup-kubernetes/tasks/k3s.yml
@@ -46,4 +46,4 @@
     flat: yes
   loop:
     - src: "/etc/rancher/k3s/k3s.yaml"
-      dest: "{{ cluster_dir }}/secrets/kube_config_cluster.yml"
+      dest: "{{ cluster_dir }}/kube_config_cluster.yml"
diff --git a/docs/advanced_installation.rst b/docs/advanced_installation.rst
index 71a9c3d2c8144c24f90e096d74f9a373f814b8ef..90c90b7398a8d5ccee0088c4dc9b6ebd895753bb 100644
--- a/docs/advanced_installation.rst
+++ b/docs/advanced_installation.rst
@@ -143,7 +143,7 @@ Example: Customize Nextcloud to work with staging certificates
 
 Our CI pipeline works with staging certificates from Let's Encrypt, for that
 reason we need to allow insecure connections for the integration with
-ONLYOFFICE. You can find the file at ``install/ci-overrides/nextcloud-values-override.yaml``.
+ONLYOFFICE. You can find the file at ``install/ci-overrides/oas-nextcloud-override.yaml``.
 
 To apply it, run the following commands:
 
@@ -158,7 +158,7 @@ To apply it, run the following commands:
    # same namespace as your helmrelease with the '-n' argument
    kubectl apply \
      -n oas-apps \
-     -f ./install/ci-overrides/nextcloud-values-override.yaml
+     -f ./install/ci-overrides/oas-nextcloud-override.yaml
 
 Adding custom apps to the cluster
 =================================
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
index af2fe142ea49ee1bf2bc5c0ebb8262e6201f0dd8..b553a23559f4baf37a39a42e8bd5782b4f7e60d5 100644
--- a/docs/troubleshooting.md
+++ b/docs/troubleshooting.md
@@ -17,7 +17,7 @@ To get an overall status of your cluster you can run the tests from the
 command line.
 
 There are two types of tests: [testinfra](https://testinfra.readthedocs.io/en/latest/)
-tests, and [behave](https://behave.readthedocs.io/en/latest/) tests.
+tests, and [taiko](https://taiko.dev) tests.
 
 ### Testinfra tests
 
@@ -64,37 +64,32 @@ Test a specific application:
   not verified. Therefore we need to force plain ssh:// with either
   `connection=ssh` or `--hosts=ssh://…`
 
-### Behave tests
+### Taiko tests
 
-Behave tests run in a browser and test if all the interfaces are up
+Taiko tests run in a browser and test if all the interfaces are up
 and running and correctly connected to each other. They are integrated in the
 `openappstack` CLI command suite.
 
 #### Prerequisites
 
-By default the behave tests use the Chromium browser and Chromium webdriver. If
-you want/need to use the Firefox webdriver please refer to the manual behave
-test instructions below, under [Advanced Usage](#advanced-usage).
+Install [taiko](https://taiko.dev):
 
-Install [Chromedriver](https://sites.google.com/a/chromium.org/chromedriver/),
-i.e. for Debian/Ubuntu use:
-
-    apt install chromium-chromedriver
+    npm install -g taiko
 
 #### Usage
 
-To run all behave tests, run the following command in this repository:
+To run all taiko tests, run the following command in this repository:
 
     python -m openappstack CLUSTERNAME test
 
-In the future, this command will run all tests, but now only *behave* is
+In the future, this command will run all tests, but now only *taiko* is
 implemented. To learn more about the `test` subcommand, run:
 
     python -m openappstack CLUSTERNAME test --help
 
-You can also only run a behave test for a specific application, i.e.:
+You can also only run a taiko test for a specific application, i.e.:
 
-    python -m openappstack CLUSTERNAME test --behave-tags nextcloud
+    python -m openappstack CLUSTERNAME test --taiko-tags nextcloud
 
 ### Advanced usage
 
@@ -133,42 +128,22 @@ then:
 
     gitlab-runner exec docker --env CI_REGISTRY_IMAGE="$CI_REGISTRY_IMAGE" --env SSH_PRIVATE_KEY="$SSH_PRIVATE_KEY" --env COSMOS_API_TOKEN="$COSMOS_API_TOKEN" bootstrap
 
-#### Behave tests
-
-##### Using Firefox instead of Chromium
-
-If you want to use Firefox instead of Chromium, you need to install the gecko
-driver
-
-    apt install firefox-geckodriver
-
-Now you only need to add `-D browser=firefox` to the behave command line
-options, so run:
+#### Taiko tests
 
-    python -m openappstack CLUSTER_NAME test --behave-param='-D browser=firefox'
+##### Using Taiko without the OpenAppStack CLI
 
-##### Using behave without the OpenAppStack CLI
-
-Go to the `test/behave` directory and run:
+Go to the `test/taiko` directory and run:
 
 For nextcloud & onlyoffice tests:
 
-    behave -D nextcloud.url=https://files.example.openappstack.net \
-           -D nextcloud.password="$(cat ../../clusters/YOUR_CLUSTERNAME/secrets/nextcloud_admin_password)" \
-           -t nextcloud
-
-You can replace `nextcloud` with `grafana` or `rocketchat` to test the other
-applications.
-
-#### Run behave tests in openappstack-ci docker image
-
-    docker run --rm -it open.greenhost.net:4567/openappstack/openappstack/openappstack-ci sh
+    export DOMAIN='oas.example.net'
+    export SSO_USERNAME='user1'
+    export SSO_USER_PW='...'
+    export TAIKO_TESTS='nextcloud'
+    taiko --observe taiko-tests.js
 
-      apk --no-cache add git
-      git clone https://open.greenhost.net/openappstack/openappstack.git
-      cd openappstack/test/behave
-      behave -D nextcloud.url=https://files.ci-20410.ci.openappstack.net \
-       -D nextcloud.admin.password=…
+You can replace `nextcloud` with `grafana` or `wordpress` to test the other
+applications, or with `all` to test all applications.
 
 ## SSH access
 
@@ -284,7 +259,8 @@ Lets take it as an example how to debug the root cause.
 
 If ever things fail beyond possible recovery, here's how to completely purge an OAS installation in order to start from scratch:
 
-    cluster$ apt purge docker-ce-cli containerd.io
-    cluster$ mount | egrep '^(.*kubelet|nsfs.*docker)' | cut -d' ' -f 3 | xargs umount
-    cluster$ rm -rf /var/lib/docker /var/lib/OpenAppStack /etc/kubernetes /var/lib/etcd /var/lib/rancher /var/lib/kubelet /var/log/OpenAppStack /var/log/containers /var/log/pods
+    cluster$ /usr/local/bin/k3s-killall.sh
+    cluster$ systemctl disable k3s
+    cluster$ mount | egrep '(kubelet|nsfs|k3s)' | cut -d' ' -f 3 | xargs --no-run-if-empty -n 1 umount
+    cluster$ rm -rf /var/lib/{rancher,OpenAppStack,kubelet,cni,docker,etcd} /etc/{kubernetes,rancher} /var/log/{OpenAppStack,containers,pods} /tmp/k3s /etc/systemd/system/k3s.service
     cluster$ systemctl reboot
diff --git a/flux/.flux.yaml b/flux/.flux.yaml
deleted file mode 100644
index c4122e1e9cafbb74af5a5bfda1a72f089556f791..0000000000000000000000000000000000000000
--- a/flux/.flux.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-version: 1
-commandUpdated:
-  generators:
-  # Find all yaml files (recursively).
-  # Filename convention: appname_optional_additional_identifiers_resourcetype.yaml
-  #   i.e. oas/prometheus_alerts_custom_cm.yaml (configmap)
-  # Ignore ones with filename starting with a dot.
-  # For each file, check if the corresponding app settings configmap has
-  # disabled the app by having a key "enabled" with value "false", and skip
-  # it in that case.
-  # Otherwise, include it.
-  - command: >
-      for namespace in *; do
-        for location in $(find "$namespace" -type f \( -name '*.yaml' -o -name '*.yml' \) -a ! -name '.*'); do
-          filename=$(basename $location);
-          name="${filename%%_*}";
-          enabled=$(kubectl get secret -n "$namespace" "${name}-settings" -o jsonpath="{.data.enabled}" | base64 -d);
-          if [ "$enabled" == "true" ]; then
-            echo '---';
-            cat $location;
-          fi;
-        done;
-      done
-  updaters: []
diff --git a/flux/cert-manager/cert-manager_hr.yaml b/flux/cert-manager/cert-manager_hr.yaml
deleted file mode 100644
index 0b1de9e3989386ee150635fe041f43f7feea820a..0000000000000000000000000000000000000000
--- a/flux/cert-manager/cert-manager_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: cert-manager
-  namespace: cert-manager
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: cert-manager
-  chart:
-    # https://artifacthub.io/packages/helm/cert-manager/cert-manager
-    repository: https://charts.jetstack.io
-    name: cert-manager
-    version: 1.3.1
-  valuesFrom:
-    - secretKeyRef:
-        name: cert-manager-settings
-        key: values.yaml
diff --git a/flux/kube-system/local-path-provisioner_hr.yaml b/flux/kube-system/local-path-provisioner_hr.yaml
deleted file mode 100644
index 7d83e861ef97a82515e4b67989739ebe68f7fc7c..0000000000000000000000000000000000000000
--- a/flux/kube-system/local-path-provisioner_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: local-path-provisioner
-  namespace: kube-system
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: local-path-provisioner
-  chart:
-    git: https://github.com/rancher/local-path-provisioner
-    ref: v0.0.14
-    path: deploy/chart
-  valuesFrom:
-    - secretKeyRef:
-        name: local-path-provisioner-settings
-        key: values.yaml
-  timeout: 120
diff --git a/flux/kube-system/metallb_hr.yaml b/flux/kube-system/metallb_hr.yaml
deleted file mode 100644
index 90156f8f34986af4ac97b1fd7ecfacf5f56321ae..0000000000000000000000000000000000000000
--- a/flux/kube-system/metallb_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: metallb
-  namespace: kube-system
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: metallb
-  chart:
-    repository: https://charts.bitnami.com/bitnami
-    name: metallb
-    version: 0.1.23
-  valuesFrom:
-    - secretKeyRef:
-        name: metallb-settings
-        key: values.yaml
-  timeout: 120
diff --git a/flux/oas-apps/nextcloud_hr.yaml b/flux/oas-apps/nextcloud_hr.yaml
deleted file mode 100644
index cacb5a9a11297a55c32187a528ca5158314d35a9..0000000000000000000000000000000000000000
--- a/flux/oas-apps/nextcloud_hr.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: nextcloud
-  namespace: oas-apps
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  # Calling the release "nextcloud" runs into a bug in the helm chart.
-  # See https://open.greenhost.net/openappstack/nextcloud/issues/3 for details.
-  releaseName: nc
-  chart:
-    git: https://open.greenhost.net/openappstack/nextcloud
-    ref: 0.2.7
-    path: .
-  valuesFrom:
-    - secretKeyRef:
-        name: nextcloud-settings
-        key: values.yaml
-  timeout: 1800
-  wait: true
diff --git a/flux/oas-apps/rocketchat_hr.yaml b/flux/oas-apps/rocketchat_hr.yaml
deleted file mode 100644
index 00dc2a1cbb475f2ffcc24b379b6d8512bf1fcaf8..0000000000000000000000000000000000000000
--- a/flux/oas-apps/rocketchat_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: rocketchat
-  namespace: oas-apps
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: rocketchat
-  chart:
-    repository: https://charts.helm.sh/stable
-    name: rocketchat
-    version: 2.0.10
-  valuesFrom:
-    - secretKeyRef:
-        name: rocketchat-settings
-        key: values.yaml
-  timeout: 300
diff --git a/flux/oas-apps/wordpress_hr.yaml b/flux/oas-apps/wordpress_hr.yaml
deleted file mode 100644
index 3bb81645a8a64677c75f97eca544154072fa2911..0000000000000000000000000000000000000000
--- a/flux/oas-apps/wordpress_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: wordpress
-  namespace: oas-apps
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: wordpress
-  chart:
-    git: https://open.greenhost.net/openappstack/wordpress-helm
-    ref: 0.1.4
-    path: .
-  valuesFrom:
-    - secretKeyRef:
-        name: wordpress-settings
-        key: values.yaml
-  timeout: 1800
diff --git a/flux/oas-custom/flux-custom_hr.yaml b/flux/oas-custom/flux-custom_hr.yaml
deleted file mode 100644
index b0bc9f12fdbd21a6005ab496574230c43b9d35a5..0000000000000000000000000000000000000000
--- a/flux/oas-custom/flux-custom_hr.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: flux-custom
-  namespace: oas-custom
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: flux-custom
-  chart:
-    name: flux
-    repository: https://charts.fluxcd.io
-    version: 1.6.0
-  valuesFrom:
-  - secretKeyRef:
-      name: flux-custom-settings
-      key: values.yaml
diff --git a/flux/oas/eventrouter_hr.yaml b/flux/oas/eventrouter_hr.yaml
deleted file mode 100644
index 59eb956223b52c6ede2ab6378ffc6c3534275ee7..0000000000000000000000000000000000000000
--- a/flux/oas/eventrouter_hr.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: eventrouter
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: eventrouter
-  chart:
-    repository: https://charts.helm.sh/stable
-    name: eventrouter
-    version: 0.3.2
-  valuesFrom:
-  - secretKeyRef:
-      name: eventrouter-settings
-      key: values.yaml
diff --git a/flux/oas/ingress_hr.yaml b/flux/oas/ingress_hr.yaml
deleted file mode 100644
index c2167f74d682a0c0d149cf868e688f3c04c89af3..0000000000000000000000000000000000000000
--- a/flux/oas/ingress_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: ingress
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: ingress
-  chart:
-    # https://artifacthub.io/packages/helm/ingress-nginx/ingress-nginx
-    repository: https://kubernetes.github.io/ingress-nginx
-    name: ingress-nginx
-    version: 3.29.0
-  valuesFrom:
-    - secretKeyRef:
-        name: ingress-settings
-        key: values.yaml
diff --git a/flux/oas/letsencrypt-production_hr.yaml b/flux/oas/letsencrypt-production_hr.yaml
deleted file mode 100644
index a25ad1f276aafed91cb612c7ef9aac0275a76381..0000000000000000000000000000000000000000
--- a/flux/oas/letsencrypt-production_hr.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: letsencrypt-production
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: letsencrypt-production
-  chart:
-    git: https://open.greenhost.net/openappstack/letsencrypt-issuer
-    ref: 346ce53321fe6880dd76e7cff7e2a71e57f667d8
-    path: .
-  valuesFrom:
-    - secretKeyRef:
-        name: letsencrypt-production-settings
-        key: values.yaml
diff --git a/flux/oas/letsencrypt-staging_hr.yaml b/flux/oas/letsencrypt-staging_hr.yaml
deleted file mode 100644
index 96522de904e22a6717d511be0bdea5f8c4aabb55..0000000000000000000000000000000000000000
--- a/flux/oas/letsencrypt-staging_hr.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: letsencrypt-staging
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: letsencrypt-staging
-  chart:
-    git: https://open.greenhost.net/openappstack/letsencrypt-issuer
-    ref: 346ce53321fe6880dd76e7cff7e2a71e57f667d8
-    path: .
-  valuesFrom:
-    - secretKeyRef:
-        name: letsencrypt-staging-settings
-        key: values.yaml
-  values:
-    issuer:
-      name: letsencrypt-staging
-      server: "https://acme-staging-v02.api.letsencrypt.org/directory"
diff --git a/flux/oas/loki_cm.yaml b/flux/oas/loki_cm.yaml
deleted file mode 100644
index 6fa99c26a58208f3f58cf4b68217ff0568b348d3..0000000000000000000000000000000000000000
--- a/flux/oas/loki_cm.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  labels:
-    app: loki
-    grafana_datasource: "1"
-    release: loki
-  name: loki-datasource
-  namespace: oas
-data:
-  loki-stack-datasource.yaml: |-
-    apiVersion: 1
-    datasources:
-    - name: Loki
-      type: loki
-      access: proxy
-      url: http://loki:3100
-      version: 1
diff --git a/flux/oas/loki_hr.yaml b/flux/oas/loki_hr.yaml
deleted file mode 100644
index b973dcf8b80592d8910a2f320cffd0fa21d9eea7..0000000000000000000000000000000000000000
--- a/flux/oas/loki_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: loki
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: loki
-  chart:
-    # https://github.com/grafana/helm-charts/tree/main/charts/loki
-    repository: https://grafana.github.io/helm-charts
-    name: loki
-    version: 2.5.0
-  valuesFrom:
-  - secretKeyRef:
-      name: loki-settings
-      key: values.yaml
diff --git a/flux/oas/prometheus-stack_hr.yaml b/flux/oas/prometheus-stack_hr.yaml
deleted file mode 100644
index 45a9a76c1009128acd378ce687e913e43162e3ce..0000000000000000000000000000000000000000
--- a/flux/oas/prometheus-stack_hr.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: prometheus-stack
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: prometheus-stack
-  # https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/Chart.yaml
-  # https://artifacthub.io/packages/helm/prometheus-community/kube-prometheus-stack
-  chart:
-    repository: https://prometheus-community.github.io/helm-charts
-    name: kube-prometheus-stack
-    version: 15.4.2
-  valuesFrom:
-    - secretKeyRef:
-        name: prometheus-stack-settings
-        key: values.yaml
-  timeout: 300
diff --git a/flux/oas/promtail_hr.yaml b/flux/oas/promtail_hr.yaml
deleted file mode 100644
index 2fb7a68f25be8cf82005568033f89582a73aadf4..0000000000000000000000000000000000000000
--- a/flux/oas/promtail_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: promtail
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: promtail
-  chart:
-    # https://github.com/grafana/helm-charts/tree/main/charts/promtail
-    repository: https://grafana.github.io/helm-charts
-    name: promtail
-    version: 3.5.1
-  valuesFrom:
-  - secretKeyRef:
-      name: promtail-settings
-      key: values.yaml
diff --git a/flux/oas/single-sign-on_hr.yaml b/flux/oas/single-sign-on_hr.yaml
deleted file mode 100644
index c5b12df9d5fb896c99f63c326b1bf6690037c1e8..0000000000000000000000000000000000000000
--- a/flux/oas/single-sign-on_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: single-sign-on
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: single-sign-on
-  chart:
-    git: https://open.greenhost.net/openappstack/single-sign-on
-    ref: master
-    path: ./helmchart/single-sign-on/
-  valuesFrom:
-    - secretKeyRef:
-        name: single-sign-on-settings
-        key: values.yaml
-  timeout: 1800
diff --git a/flux/velero/velero_hr.yaml b/flux/velero/velero_hr.yaml
deleted file mode 100644
index bda7108f6990ae65d5a33af05245eade4fc7c579..0000000000000000000000000000000000000000
--- a/flux/velero/velero_hr.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: velero
-  namespace: velero
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: velero
-  chart:
-    repository: https://vmware-tanzu.github.io/helm-charts
-    name: velero
-    version: 2.11.0
-  valuesFrom:
-    - secretKeyRef:
-        name: velero-settings
-        key: values.yaml
diff --git a/flux2/apps/monitoring/eventrouter-release.yaml b/flux2/apps/monitoring/eventrouter-release.yaml
index 6ace3f9b9d9b03dddbd95e1ce3c1282f859b929f..edea351bfbace25f5b11c321f62cb769d5a4aca7 100644
--- a/flux2/apps/monitoring/eventrouter-release.yaml
+++ b/flux2/apps/monitoring/eventrouter-release.yaml
@@ -8,13 +8,14 @@ spec:
   releaseName: eventrouter
   chart:
     spec:
+      # https://github.com/helm/charts/tree/master/stable/eventrouter
       chart: eventrouter
       version: 0.3.2
       sourceRef:
         kind: HelmRepository
         name: helm-stable
         namespace: flux-system
-  interval: 40m
+  interval: 1h
   values:
     sink: stdout
     resources:
@@ -24,4 +25,11 @@ spec:
       requests:
         memory: 100Mi
         cpu: 100m
-    
\ No newline at end of file
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-eventrouter-override
+      optional: true
+    - kind: Secret
+      name: oas-eventrouter-override
+      optional: true
diff --git a/flux2/apps/monitoring/kube-prometheus-stack-release.yaml b/flux2/apps/monitoring/kube-prometheus-stack-release.yaml
index a9b592c894d3cc03f8617d04a488506e410514cb..cb5781e719c14fce9fe88c06553cfa664995bcf5 100644
--- a/flux2/apps/monitoring/kube-prometheus-stack-release.yaml
+++ b/flux2/apps/monitoring/kube-prometheus-stack-release.yaml
@@ -9,12 +9,12 @@ spec:
   chart:
     spec:
       chart: kube-prometheus-stack
-      version: 15.4.2
+      version: 16.12.0
       sourceRef:
         kind: HelmRepository
         name: prometheus-community
         namespace: flux-system
-  interval: 40m
+  interval: 1h
   install:
     timeout: 10m
   values:
@@ -31,6 +31,7 @@ spec:
     kubeScheduler:
       enabled: false
 
+    # https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml#L115
     alertmanager:
       ingress:
         enabled: true
@@ -276,3 +277,11 @@ spec:
           cpu: 100m
           memory: 64Mi
     #  priorityClassName: high-priority
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-kube-prometheus-stack-override
+      optional: true
+    - kind: Secret
+      name: oas-kube-prometheus-stack-override
+      optional: true
diff --git a/flux2/apps/monitoring/loki-release.yaml b/flux2/apps/monitoring/loki-release.yaml
index df4409e1097c828992d3718fd05247cae9907b7c..3280c961f2e143117d89c52e82178fc51ace8282 100644
--- a/flux2/apps/monitoring/loki-release.yaml
+++ b/flux2/apps/monitoring/loki-release.yaml
@@ -8,21 +8,23 @@ spec:
   releaseName: loki
   chart:
     spec:
+      # https://artifacthub.io/packages/helm/grafana/loki
       chart: loki
-      version: 2.5.0
+      version: 2.5.2
       sourceRef:
         kind: HelmRepository
         name: grafana
         namespace: flux-system
-  interval: 40m
+  interval: 1h
   values:
+    # https://github.com/grafana/helm-charts/blob/main/charts/loki/values.yaml
     resources:
       limits:
         cpu: 800m
-        memory: 180Mi
+        memory: 360Mi
       requests:
         cpu: 400m
-        memory: 90Mi
+        memory: 180Mi
     persistence:
       enabled: true
       accessModes:
@@ -30,7 +32,14 @@ spec:
       size: 10Gi
       annotations: {}
       # existingClaim:
+    # https://github.com/grafana/helm-charts/blob/main/charts/loki/values.yaml#L46
     config:
+      # https://github.com/grafana/helm-charts/blob/main/charts/loki/values.yaml#L48
+      ingester:
+        chunk_idle_period: 30m
+        chunk_block_size: 1048576
+        chunk_retain_period: 15m
+        max_transfer_retries: 10
       # https://github.com/grafana/loki/blob/main/cmd/loki/loki-local-config.yaml
       # https://grafana.com/docs/loki/latest/operations/storage/retention
       schema_config:
@@ -61,3 +70,11 @@ spec:
       table_manager:
         retention_deletes_enabled: true
         retention_period: 672h  # 28 days
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-loki-override
+      optional: true
+    - kind: Secret
+      name: oas-loki-override
+      optional: true
diff --git a/flux2/apps/monitoring/promtail-release.yaml b/flux2/apps/monitoring/promtail-release.yaml
index f5eacf743a2ab7e225d146ec5c14366c7f5ef642..227197fb6671d4bc5c072381a65d7cd89226e340 100644
--- a/flux2/apps/monitoring/promtail-release.yaml
+++ b/flux2/apps/monitoring/promtail-release.yaml
@@ -8,13 +8,14 @@ spec:
   releaseName: promtail
   chart:
     spec:
+      # https://artifacthub.io/packages/helm/grafana/promtail
       chart: promtail
-      version: 3.5.1
+      version: 3.6.0
       sourceRef:
         kind: HelmRepository
         name: grafana
         namespace: flux-system
-  interval: 40m
+  interval: 1h
   values:
     initContainer:
       enabled: true
@@ -53,3 +54,11 @@ spec:
                     event_name:
                     event_source_host:
                     event_source_component:
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-promtail-override
+      optional: true
+    - kind: Secret
+      name: oas-promtail-override
+      optional: true
diff --git a/flux2/apps/nextcloud/release.yaml b/flux2/apps/nextcloud/release.yaml
index b744ea7a5d4e927880b2a30dc17c90eea5d6ffca..d344d95c735caa17cc9c3a3ab7fb37dda5fe2bc4 100644
--- a/flux2/apps/nextcloud/release.yaml
+++ b/flux2/apps/nextcloud/release.yaml
@@ -14,7 +14,7 @@ spec:
         kind: GitRepository
         name: nextcloud
         namespace: flux-system
-  interval: 40m
+  interval: 1h
   install:
     timeout: 30m
   values:
@@ -86,7 +86,7 @@ spec:
 
       resources:
         limits:
-          cpu: 500m
+          cpu: 700m
           memory: 512Mi
         requests:
           cpu: 200m
@@ -148,10 +148,10 @@ spec:
       postgresqlPassword: "${onlyoffice_postgresql_password}"
       resources:
         limits:
-          cpu: 200m
+          cpu: 400m
           memory: 256Mi
         requests:
-          cpu: 100m
+          cpu: 200m
           memory: 128Mi
 
     rabbitmq:
@@ -189,7 +189,11 @@ spec:
       client_id: nextcloud
       client_secret: "${nextcloud_oauth_client_secret}"
       groups_claim: "openappstack_roles"
+  # Allow custom values either by configMap or by secret
   valuesFrom:
     - kind: ConfigMap
-      name: nextcloud-values-override
+      name: oas-nextcloud-override
+      optional: true
+    - kind: Secret
+      name: oas-nextcloud-override
       optional: true
diff --git a/flux2/apps/rocketchat/release.yaml b/flux2/apps/rocketchat/release.yaml
index 881d815d535a437a501a09dafb24a9ca46fe591b..2b66ad935da13e9eed4ee88a054ac3bae779889b 100644
--- a/flux2/apps/rocketchat/release.yaml
+++ b/flux2/apps/rocketchat/release.yaml
@@ -14,7 +14,7 @@ spec:
         kind: HelmRepository
         name: helm-stable
         namespace: flux-system
-  interval: 20m
+  interval: 1h
   install:
     timeout: 15m
   values:
@@ -139,3 +139,11 @@ spec:
     image:
       tag: 3.15.0
       pullPolicy: IfNotPresent
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-rocketchat-override
+      optional: true
+    - kind: Secret
+      name: oas-rocketchat-override
+      optional: true
diff --git a/flux2/apps/velero/release.yaml b/flux2/apps/velero/release.yaml
index 3cbdba99964b8040cc731368aa4044b728ee668e..3b8dacccad44fa2c2a608f48cf023519605034df 100644
--- a/flux2/apps/velero/release.yaml
+++ b/flux2/apps/velero/release.yaml
@@ -9,12 +9,12 @@ spec:
   chart:
     spec:
       chart: velero
-      version: 2.11.0
+      version: 2.23.1
       sourceRef:
         kind: HelmRepository
         name: vmware-tanzu
         namespace: flux-system
-  interval: 40m
+  interval: 1h
   values:
     # Init containers to add to the Velero deployment's pod spec. At least one
     # plugin provider image is required.
@@ -123,5 +123,12 @@ spec:
           includedNamespaces:
             # We include all namespaces.
             - '*'
-
     configMaps: {}
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-velero-override
+      optional: true
+    - kind: Secret
+      name: oas-velero-override
+      optional: true
diff --git a/flux2/apps/wordpress/release.yaml b/flux2/apps/wordpress/release.yaml
index 61fc9aafc238d7d26e56161d7a8fb8770454fb46..a6779291e9586be7208c12c17a262d35d535d958 100644
--- a/flux2/apps/wordpress/release.yaml
+++ b/flux2/apps/wordpress/release.yaml
@@ -14,7 +14,7 @@ spec:
         kind: GitRepository
         name: wordpress
         namespace: flux-system
-  interval: 40m
+  interval: 1h
   install:
     timeout: 30m
   values:
@@ -30,12 +30,12 @@ spec:
         locale: en_US
         url: "https://www.${domain}"
         title: "OpenAppStack website"
-    
+
     persistence:
       existingClaim: wordpress-files
     podAnnotations:
       backup.velero.io/backup-volumes: "wordpress-wp-uploads"
-    
+
     openid_connect_settings:
       enabled: true
       client_secret: ${wordpress_oauth_client_secret}
@@ -52,7 +52,7 @@ spec:
       scope: email profile openid openappstack_roles offline_access
       role_mapping_enabled: true
       role_key: openappstack_roles
-    
+
     database:
       db:
         user: wordpress
@@ -76,7 +76,7 @@ spec:
             memory: 256Mi
       replication:
         enabled: false
-    
+
     # It's advisable to set resource limits to prevent your K8s cluster from
     # crashing
     resources:
@@ -86,7 +86,7 @@ spec:
       requests:
         cpu: 100m
         memory: 128Mi
-    
+
     ingress:
       enabled: true
       annotations:
@@ -100,4 +100,11 @@ spec:
             - "www.${domain}"
             - "${domain}"
           secretName: oas-wordpress
-    
\ No newline at end of file
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-wordpress-override
+      optional: true
+    - kind: Secret
+      name: oas-wordpress-override
+      optional: true
diff --git a/flux2/cluster/base/core.yaml b/flux2/cluster/base/core.yaml
index 830a2664458de5cd3a068e58bf7634b19c73acb1..1ab36950217f0841f2b89a5c8a4015d937eaa154 100644
--- a/flux2/cluster/base/core.yaml
+++ b/flux2/cluster/base/core.yaml
@@ -1,3 +1,4 @@
+---
 apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
 kind: Kustomization
 metadata:
@@ -6,7 +7,7 @@ metadata:
 spec:
   dependsOn:
     - name: infrastructure
-  interval: 1m0s
+  interval: 1h
   sourceRef:
     kind: GitRepository
     name: openappstack
@@ -21,3 +22,16 @@ spec:
         name: oas-oauth-variables
       - kind: Secret
         name: oas-cluster-variables
+  healthChecks:
+    - apiVersion: helm.toolkit.fluxcd.io/v1beta1
+      kind: HelmRelease
+      name: metallb
+      namespace: kube-system
+    - apiVersion: helm.toolkit.fluxcd.io/v1beta1
+      kind: HelmRelease
+      name: nginx
+      namespace: oas
+    - apiVersion: helm.toolkit.fluxcd.io/v1beta1
+      kind: HelmRelease
+      name: single-sign-on
+      namespace: oas
diff --git a/flux2/cluster/base/infrastructure.yaml b/flux2/cluster/base/infrastructure.yaml
index aa1dbd4933c103100b2187960a5ca3b1bf3e3fa8..5de7ecd6ee7b75e5f19cb89006e4e13bf118bf08 100644
--- a/flux2/cluster/base/infrastructure.yaml
+++ b/flux2/cluster/base/infrastructure.yaml
@@ -5,10 +5,23 @@ metadata:
   name: infrastructure
   namespace: flux-system
 spec:
-  interval: 10m
+  interval: 1h
   sourceRef:
     kind: GitRepository
     name: openappstack
   path: ./flux2/infrastructure
   prune: true
   validation: client
+  healthChecks:
+    - apiVersion: helm.toolkit.fluxcd.io/v1beta1
+      kind: HelmRelease
+      name: cert-manager
+      namespace: cert-manager
+    - apiVersion: helm.toolkit.fluxcd.io/v1beta1
+      kind: HelmRelease
+      name: local-path-provisioner
+      namespace: kube-system
+    - apiVersion: helm.toolkit.fluxcd.io/v1beta1
+      kind: HelmRelease
+      name: oas-secrets
+      namespace: flux-system
diff --git a/flux2/cluster/base/monitoring.yaml b/flux2/cluster/base/monitoring.yaml
index 930ab2d2fc7bdad3fcf026959160530803896ffc..b8cf3c73dab87926f1e24fa07289c2094732fdc2 100644
--- a/flux2/cluster/base/monitoring.yaml
+++ b/flux2/cluster/base/monitoring.yaml
@@ -5,7 +5,7 @@ metadata:
   name: monitoring
   namespace: flux-system
 spec:
-  interval: 45m
+  interval: 1h
   dependsOn:
     - name: core
     - name: infrastructure
diff --git a/flux2/cluster/optional/nextcloud/nextcloud.yaml b/flux2/cluster/optional/nextcloud/nextcloud.yaml
index ed6a58caaebfdd2cdf7b6076cda84a585c0387b0..e09fbd14abbac271ed0ec945566e272a06eeaa38 100644
--- a/flux2/cluster/optional/nextcloud/nextcloud.yaml
+++ b/flux2/cluster/optional/nextcloud/nextcloud.yaml
@@ -5,7 +5,7 @@ metadata:
   name: nextcloud
   namespace: flux-system
 spec:
-  interval: 45m
+  interval: 1h
   dependsOn:
     - name: core
     - name: infrastructure
diff --git a/flux2/cluster/optional/rocketchat/rocketchat.yaml b/flux2/cluster/optional/rocketchat/rocketchat.yaml
index 080cbd5544595733d8942cf39481628c117d487f..c3cd0c2d68c2443921be4d9f83d7e4b2b20d0dd9 100644
--- a/flux2/cluster/optional/rocketchat/rocketchat.yaml
+++ b/flux2/cluster/optional/rocketchat/rocketchat.yaml
@@ -5,7 +5,7 @@ metadata:
   name: rocketchat
   namespace: flux-system
 spec:
-  interval: 10m0s
+  interval: 1h
   dependsOn:
     - name: core
     - name: infrastructure
diff --git a/flux2/cluster/optional/velero/velero.yaml b/flux2/cluster/optional/velero/velero.yaml
index c89ad010bbfabe2718ae9b583ef1044871696cf1..ac446403455e881b5c0a447d4c75de06a8801162 100644
--- a/flux2/cluster/optional/velero/velero.yaml
+++ b/flux2/cluster/optional/velero/velero.yaml
@@ -5,7 +5,7 @@ metadata:
   name: velero
   namespace: flux-system
 spec:
-  interval: 45m
+  interval: 1h
   dependsOn:
     - name: core
     - name: infrastructure
diff --git a/flux2/cluster/optional/wordpress/wordpress.yaml b/flux2/cluster/optional/wordpress/wordpress.yaml
index 6fc4bc4df9e1f703057f6c3e4e8dc335756d0beb..f6017a0999fb19a4930bbe6d98b921121ee7140e 100644
--- a/flux2/cluster/optional/wordpress/wordpress.yaml
+++ b/flux2/cluster/optional/wordpress/wordpress.yaml
@@ -5,7 +5,7 @@ metadata:
   name: wordpress
   namespace: flux-system
 spec:
-  interval: 45m
+  interval: 1h
   dependsOn:
     - name: core
     - name: infrastructure
diff --git a/flux2/cluster/test/all-optional.yaml b/flux2/cluster/test/all-optional.yaml
deleted file mode 100644
index d24d824c5e5d9d005ec36aea25fc327b2f01ba3b..0000000000000000000000000000000000000000
--- a/flux2/cluster/test/all-optional.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
-kind: Kustomization
-metadata:
-  name: all-optional
-  namespace: flux-system
-spec:
-  dependsOn:
-    - name: base
-  interval: 10m0s
-  sourceRef:
-    kind: GitRepository
-    name: openappstack
-  path: ./flux2/cluster/optional
-  prune: true
-  validation: client
diff --git a/flux2/cluster/test/base.yaml b/flux2/cluster/test/base.yaml
deleted file mode 100644
index e185ff1e93474281b69c85200026cf399836baf0..0000000000000000000000000000000000000000
--- a/flux2/cluster/test/base.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
-kind: Kustomization
-metadata:
-  name: base
-  namespace: flux-system
-spec:
-  interval: 10m0s
-  sourceRef:
-    kind: GitRepository
-    name: openappstack
-  path: ./flux2/cluster/base
-  prune: true
-  validation: client
diff --git a/flux2/core/base/metallb/release.yaml b/flux2/core/base/metallb/release.yaml
index 8127c48739e6e7a9938002d1d21adf09efc9b436..d190242dbde3a73436f936a5a44d08aa5db9a5f3 100644
--- a/flux2/core/base/metallb/release.yaml
+++ b/flux2/core/base/metallb/release.yaml
@@ -8,13 +8,14 @@ spec:
   releaseName: metallb
   chart:
     spec:
+      # https://artifacthub.io/packages/helm/bitnami/metallb
       chart: metallb
-      version: 0.1.23
+      version: 2.4.3
       sourceRef:
         kind: HelmRepository
         name: bitnami
         namespace: flux-system
-  interval: 40m
+  interval: 1h
   install:
     timeout: 2m
   values:
@@ -25,3 +26,11 @@ spec:
           protocol: layer2
           addresses:
             - "${ip_address}/32"
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-metallb-override
+      optional: true
+    - kind: Secret
+      name: oas-metallb-override
+      optional: true
diff --git a/flux2/infrastructure/nginx/kustomization.yaml b/flux2/core/base/nginx/kustomization.yaml
similarity index 100%
rename from flux2/infrastructure/nginx/kustomization.yaml
rename to flux2/core/base/nginx/kustomization.yaml
diff --git a/flux2/infrastructure/nginx/release.yaml b/flux2/core/base/nginx/release.yaml
similarity index 75%
rename from flux2/infrastructure/nginx/release.yaml
rename to flux2/core/base/nginx/release.yaml
index 1c2514019e3559c9446c0a975129af32dd5706fb..7647c14e912a5e75d9a4f47425289e97c8f46e58 100644
--- a/flux2/infrastructure/nginx/release.yaml
+++ b/flux2/core/base/nginx/release.yaml
@@ -7,16 +7,21 @@ spec:
   releaseName: ingress-nginx
   chart:
     spec:
+      # https://artifacthub.io/packages/helm/ingress-nginx/ingress-nginx
       chart: ingress-nginx
       sourceRef:
         kind: HelmRepository
         name: ingress-nginx
         namespace: flux-system
-      version: "3.31.0"
+      version: "3.34.0"
   interval: 1h0m0s
   install:
     remediation:
       retries: 3
+    timeout: 9m
+  dependsOn:
+    - name: metallb
+      namespace: kube-system
   values:
     controller:
       image:
@@ -40,3 +45,11 @@ spec:
         requests:
           cpu: 100m
           memory: 64Mi
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-nginx-override
+      optional: true
+    - kind: Secret
+      name: oas-nginx-override
+      optional: true
diff --git a/flux2/core/base/single-sign-on/release.yaml b/flux2/core/base/single-sign-on/release.yaml
index 4215607f1db19571a67d8a69cab7b7893941fd4b..fda4dbbb515726d684575e1b74a91192d6b3b5b1 100644
--- a/flux2/core/base/single-sign-on/release.yaml
+++ b/flux2/core/base/single-sign-on/release.yaml
@@ -41,7 +41,7 @@ spec:
           description: "Grafana allows you to query, visualize, alert on and understand metrics generated by OpenAppStack. It can be used to create explore and share dashboards."
       username: "${userbackend_admin_username}"
       password: "${userbackend_admin_password}"
-      email: "${userbackend_admin_email}"
+      email: "${admin_email}"
       postgres:
         password: "${userbackend_postgres_password}"
       persistence:
@@ -147,3 +147,11 @@ spec:
         - "authorization_code"
         - "refresh_token"
         - "client_credentials"
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-single-sign-on-override
+      optional: true
+    - kind: Secret
+      name: oas-single-sign-on-override
+      optional: true
diff --git a/flux2/infrastructure/cert-manager/release.yaml b/flux2/infrastructure/cert-manager/release.yaml
index ca524e844733ec1f57e12e02f97dd67657c637ef..18c68f6b40771e19ec1989597c69046c30c68a34 100644
--- a/flux2/infrastructure/cert-manager/release.yaml
+++ b/flux2/infrastructure/cert-manager/release.yaml
@@ -12,7 +12,7 @@ spec:
         kind: HelmRepository
         name: jetstack
         namespace: flux-system
-      version: 1.3.1
+      version: 1.4.0
   interval: 1h0m0s
   install:
     remediation:
@@ -26,15 +26,15 @@ spec:
         cpu: 100m
         memory: 256Mi
       limits:
-        cpu: 100m
+        cpu: 200m
         memory: 512Mi
     cainjector:
       resources:
         requests:
-          cpu: 100m
+          cpu: 200m
           memory: 384Mi
         limits:
-          cpu: 100m
+          cpu: 400m
           memory: 768Mi
     webhook:
       resources:
@@ -42,6 +42,14 @@ spec:
           cpu: 100m
           memory: 40Mi
         limits:
-          cpu: 100m
+          cpu: 200m
           memory: 80Mi
     installCRDs: true
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-cert-manager-override
+      optional: true
+    - kind: Secret
+      name: oas-cert-manager-override
+      optional: true
diff --git a/flux2/infrastructure/local-path-provisioner/release.yaml b/flux2/infrastructure/local-path-provisioner/release.yaml
index bd8afc1b7ccc3f77c8c2402a9772c4dc4d662eba..094b4a36635f6a37a1bb113ce996a07155d6f9ee 100644
--- a/flux2/infrastructure/local-path-provisioner/release.yaml
+++ b/flux2/infrastructure/local-path-provisioner/release.yaml
@@ -34,4 +34,12 @@ spec:
         memory: 20Mi
       limits:
         cpu: 400m
-        memory: 40Mi
\ No newline at end of file
+        memory: 40Mi
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-local-path-provisioner-override
+      optional: true
+    - kind: Secret
+      name: oas-local-path-provisioner-override
+      optional: true
diff --git a/flux2/infrastructure/secrets/release.yaml b/flux2/infrastructure/secrets/release.yaml
index a64f39be980d5fdf09c5be8bfd00c8d2cf35ecfc..4073c1768bdba6a4e64880fbe108d1e15e4dd62d 100644
--- a/flux2/infrastructure/secrets/release.yaml
+++ b/flux2/infrastructure/secrets/release.yaml
@@ -14,3 +14,11 @@ spec:
         name: openappstack
         namespace: flux-system
   interval: 1h0m0s
+  # Allow custom values either by configMap or by secret
+  valuesFrom:
+    - kind: ConfigMap
+      name: oas-secrets-override
+      optional: true
+    - kind: Secret
+      name: oas-secrets-override
+      optional: true
diff --git a/flux2/infrastructure/sources/bitnami.yaml b/flux2/infrastructure/sources/bitnami.yaml
index fc43978de6dbb4ea739a7490cc1a7de81f1cca86..87a14bf7e6a05564f1cd06dc26a4bdb547b30280 100644
--- a/flux2/infrastructure/sources/bitnami.yaml
+++ b/flux2/infrastructure/sources/bitnami.yaml
@@ -4,5 +4,5 @@ kind: HelmRepository
 metadata:
   name: bitnami
 spec:
-  interval: 5m
-  url: https://charts.bitnami.com/bitnami
\ No newline at end of file
+  interval: 1h
+  url: https://charts.bitnami.com/bitnami
diff --git a/flux2/infrastructure/sources/grafana.yaml b/flux2/infrastructure/sources/grafana.yaml
index a31873aba87b3c186a5a9037a6d3d011f1161c0e..642e4ddf451deee6b66635f66da7d89962c61447 100644
--- a/flux2/infrastructure/sources/grafana.yaml
+++ b/flux2/infrastructure/sources/grafana.yaml
@@ -4,5 +4,5 @@ kind: HelmRepository
 metadata:
   name: grafana
 spec:
-  interval: 5m
-  url: https://grafana.github.io/helm-charts
\ No newline at end of file
+  interval: 1h
+  url: https://grafana.github.io/helm-charts
diff --git a/flux2/infrastructure/sources/helm-stable.yaml b/flux2/infrastructure/sources/helm-stable.yaml
index 937aa31399f4c3d65d00089cc400b88e8e8fbd1b..aa6f80a825b49c4b004e9d60c6e283abc9199ddd 100644
--- a/flux2/infrastructure/sources/helm-stable.yaml
+++ b/flux2/infrastructure/sources/helm-stable.yaml
@@ -4,5 +4,5 @@ kind: HelmRepository
 metadata:
   name: helm-stable
 spec:
-  interval: 5m
+  interval: 1h
   url: https://charts.helm.sh/stable
diff --git a/flux2/infrastructure/sources/prometheus-community.yaml b/flux2/infrastructure/sources/prometheus-community.yaml
index 101e68de30857df7049511c9a9d7611fcb875e54..76348e942314d3e72a6368c261fe7f04105abf2e 100644
--- a/flux2/infrastructure/sources/prometheus-community.yaml
+++ b/flux2/infrastructure/sources/prometheus-community.yaml
@@ -4,5 +4,5 @@ kind: HelmRepository
 metadata:
   name: prometheus-community
 spec:
-  interval: 5m
-  url: https://prometheus-community.github.io/helm-charts
\ No newline at end of file
+  interval: 1h
+  url: https://prometheus-community.github.io/helm-charts
diff --git a/flux2/infrastructure/sources/single-sign-on.yaml b/flux2/infrastructure/sources/single-sign-on.yaml
index c9f9ac39895fea18dadf76d262b8e84ddec0f1a0..790c57a0de484bf876a3be74e3870b49e3ea103c 100644
--- a/flux2/infrastructure/sources/single-sign-on.yaml
+++ b/flux2/infrastructure/sources/single-sign-on.yaml
@@ -13,4 +13,4 @@ spec:
   # For all available options, see:
   # https://toolkit.fluxcd.io/components/source/api/#source.toolkit.fluxcd.io/v1beta1.GitRepositoryRef
   ref:
-    branch: master
+    tag: 0.2.9
diff --git a/flux2/infrastructure/sources/vmware-tanzu.yaml b/flux2/infrastructure/sources/vmware-tanzu.yaml
index b3d1aeced724829690dc00736215f433ef30e304..e2dd4fe6924d40a77a27741f9792bffc60cd03b2 100644
--- a/flux2/infrastructure/sources/vmware-tanzu.yaml
+++ b/flux2/infrastructure/sources/vmware-tanzu.yaml
@@ -4,5 +4,5 @@ kind: HelmRepository
 metadata:
   name: vmware-tanzu
 spec:
-  interval: 5m
-  url: https://vmware-tanzu.github.io/helm-charts
\ No newline at end of file
+  interval: 1h
+  url: https://vmware-tanzu.github.io/helm-charts
diff --git a/flux2/infrastructure/sources/wordpress.yaml b/flux2/infrastructure/sources/wordpress.yaml
index 3d103cbfaecbcd2f4f226e859b5660a8b888be21..8a9af6007501e5ff79d022257736891a088081c7 100644
--- a/flux2/infrastructure/sources/wordpress.yaml
+++ b/flux2/infrastructure/sources/wordpress.yaml
@@ -13,5 +13,4 @@ spec:
   # For all available options, see:
   # https://toolkit.fluxcd.io/components/source/api/#source.toolkit.fluxcd.io/v1beta1.GitRepositoryRef
   ref:
-    # tag: 0.2.0
-    branch: 82-move-requirements-to-chart-use-uris-for-remote-repositories
+    tag: 0.2.0
diff --git a/install/ci-overrides/nextcloud-values-override.yaml b/install/ci-overrides/oas-nextcloud-override.yaml
similarity index 91%
rename from install/ci-overrides/nextcloud-values-override.yaml
rename to install/ci-overrides/oas-nextcloud-override.yaml
index 2b63c4c226e27481567029de8e385910014de923..f81c0f172aa2bbf85f89fef49dc2490878893680 100644
--- a/install/ci-overrides/nextcloud-values-override.yaml
+++ b/install/ci-overrides/oas-nextcloud-override.yaml
@@ -2,7 +2,7 @@
 apiVersion: v1
 kind: ConfigMap
 metadata:
-  name: nextcloud-values-override
+  name: oas-nextcloud-override
 data:
   values.yaml: |
     # By overriding these values, Nextcloud and ONLYOFFICE will work on ACME
diff --git a/install/install-nextcloud.sh b/install/install-nextcloud.sh
index 33553aeb3f727bfc33cef315425df4424212bde3..1f9998817e88e506abb2e46103defd012be5830b 100755
--- a/install/install-nextcloud.sh
+++ b/install/install-nextcloud.sh
@@ -10,4 +10,4 @@ flux create kustomization add-nextcloud \
   --source=GitRepository/openappstack \
   --path="./flux2/cluster/optional/nextcloud" \
   --prune=true \
-  --interval=10m
+  --interval=1h
diff --git a/install/install-openappstack.sh b/install/install-openappstack.sh
index 0b0efe56a6deb9dcff08900589bcef5854597331..4c90eae1cd90500aa306e780dc5b2142ddf966b4 100755
--- a/install/install-openappstack.sh
+++ b/install/install-openappstack.sh
@@ -7,13 +7,19 @@ flux install \
   --watch-all-namespaces=true \
   --namespace=flux-system
 
+# get current git branch name
+branch=${CI_COMMIT_REF_NAME:-}
+[ -z "$branch" ] && branch=$(git rev-parse --abbrev-ref HEAD)
+
+echo "Tracking branch $branch for https://open.greenhost.net/openappstack/openappstack flux repo"
+
 flux create source git openappstack \
   --url=https://open.greenhost.net/openappstack/openappstack \
-  --branch=master \
+  --branch=$branch \
   --interval=1m
 
 flux create kustomization openappstack \
   --source=GitRepository/openappstack \
   --path="./flux2/cluster/base" \
   --prune=true \
-  --interval=10m
+  --interval=1h
diff --git a/install/install-rocketchat.sh b/install/install-rocketchat.sh
index a0364c459d12cea53eda938188c1358e0ae62e64..bbf145ead3293e5e38081211e2c5f9c49975ae18 100755
--- a/install/install-rocketchat.sh
+++ b/install/install-rocketchat.sh
@@ -10,4 +10,4 @@ flux create kustomization add-rocketchat \
   --source=GitRepository/openappstack \
   --path="./flux2/cluster/optional/rocketchat" \
   --prune=true \
-  --interval=10m
+  --interval=1h
diff --git a/install/install-velero.sh b/install/install-velero.sh
index 3b85c1f773d7e6e127e70cc851432ffa516e5388..ade3b6f1ec0df645f9365fbb11f9b7fe03a918df 100755
--- a/install/install-velero.sh
+++ b/install/install-velero.sh
@@ -10,4 +10,4 @@ flux create kustomization add-velero \
   --source=GitRepository/openappstack \
   --path="./flux2/cluster/optional/velero" \
   --prune=true \
-  --interval=10m
+  --interval=1h
diff --git a/install/install-wordpress.sh b/install/install-wordpress.sh
index 2db41fdd637caf22844afca9d5355b5f07c9328a..b734bf6006ea186cd15f9bcb99177e576fd6fc68 100755
--- a/install/install-wordpress.sh
+++ b/install/install-wordpress.sh
@@ -10,4 +10,4 @@ flux create kustomization add-wordpress \
   --source=GitRepository/openappstack \
   --path="./flux2/cluster/optional/wordpress" \
   --prune=true \
-  --interval=10m
+  --interval=1h
diff --git a/openappstack/__main__.py b/openappstack/__main__.py
index e573294675c98108f60d5d9fb9899b9798a2d082..8d17fb5f6c0f2a72efdaa80eca768ae12f5810cf 100755
--- a/openappstack/__main__.py
+++ b/openappstack/__main__.py
@@ -23,12 +23,10 @@ import logging
 from math import floor
 import os
 import sys
+from subprocess import Popen, PIPE
 import greenhost_cloud
-from behave.__main__ import main as behave_main
 from openappstack import name, cluster, ansible
 
-ALL_TESTS = ['behave']
-
 # We're limiting to 50, because we use subdomains, the current longest of which
 # is 7 characters (`office.`). Max CN length is 63 characters, so we have some
 # wiggle room for longer subdomains in the future.
@@ -77,10 +75,20 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
         type=str,
         help='hostname of the machine. If not provided for a new machine, the '
              'cluster name is used.')
+
     create_parser.add_argument(
-        '--prometheus-enable-ingress',
-        action='store_true',
-        help=("Use this if you want to access OpenAppStack's prometheus api from outside"))
+        '--docker-mirror-server',
+        help=("Server name for a docker mirror"))
+    create_parser.add_argument(
+        '--docker-mirror-endpoint',
+        help=("Full endpoint for a docker mirror"))
+    create_parser.add_argument(
+        '--docker-mirror-username',
+        help=("Username for `docker login` to docker mirror"))
+    create_parser.add_argument(
+        '--docker-mirror-password',
+        help=("Password for `docker login` to docker mirror"))
+
 
     group = create_parser.add_mutually_exclusive_group(required=True)
 
@@ -135,18 +143,6 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
         help=('Truncate subdomain so subdomain and domain are shorter than '
               '{} characters.'.format(MAX_DOMAIN_LENGTH)))
 
-    droplet_creation_group.add_argument(
-        '--acme-staging',
-        action='store_true',
-        help=("Use this for development clusters. Uses Let's Encrypt's "
-              'staging environment'))
-
-    droplet_creation_group.add_argument(
-        '--local-flux',
-        action='store_true',
-        help=("Use this for development clusters. Uses an in-cluster "
-              'auto-update feed'))
-
     droplet_creation_group.add_argument(
         '--disk-image-id',
         help=("Custom disk image ID. Use negative value for a custom template "
@@ -178,20 +174,7 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
         metavar=['PARAM[=VALUE]'],
         action='append',
         nargs=1,
-        help='forward ansible parameters to the ansible-playbook call. If '
-          '--install-kubernetes is supplied, parameters are passed to both '
-          'ansible playbooks')
-
-    install_parser.add_argument(
-        '--install-kubernetes',
-        action='store_true',
-        help="Installs k3s on your VPS before installing OpenAppStack")
-
-    install_parser.add_argument(
-        '--no-install-openappstack',
-        action='store_true',
-        help=("Skip openappstack installation. This is useful if you only "
-              "want a kubernetes cluster."))
+        help='forward ansible parameters to the ansible-playbook call.')
 
     test_parser = subparsers.add_parser(
         'test',
@@ -200,41 +183,18 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
     test_parser.set_defaults(func=test)
 
     test_parser.add_argument(
-        '--run-test',
-        action='append',
-        help=('Run only a specific kind of test. If not provided, all tests '
-              'are run. The currently available tests are: ') \
-            + ','.join(ALL_TESTS))
-
-    test_parser.add_argument(
-        '--behave-rerun-failing',
+        '--observe',
         action='store_true',
-        help=('Run behave with @rerun_failing.features'))
+        help=('Enables headful mode and runs script with 3000ms delay.'))
     test_parser.add_argument(
-        '--behave-tags',
-        nargs='+',
-        default=[],
-        help=('Only run behave tests with these tags'))
-    test_parser.add_argument(
-        '--behave-headless',
-        action='store_true',
-        help=('Run behave in headless mode'))
-    test_parser.add_argument(
-        '--behave-ignore-certificate-errors',
-        action='store_true',
-        help=('Ignore certificate errors, needed if you use i.e. the Letsencrypt ACME staging CA'))
-    test_parser.add_argument(
-        '--behave-param',
-        metavar=['PARAM[=VALUE]'],
-        action='append',
-        nargs=1,
-        default=[],
-        help=('Pass additional behave options (like "-D browser=firefox"'))
+        '--apps',
+        default='all',
+        help=('Which apps to test (i.e. "nextcloud,wordpress". Defaults to'
+              '"all".'))
 
     info_parser = subparsers.add_parser(
         'info',
         help=("Show information about a cluster"))
-
     info_parser.set_defaults(func=info)
     info_parser.add_argument(
         '--ip-address',
@@ -242,6 +202,11 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
         help=('Only output the IP address of the machine')
     )
 
+    secrets_parser = subparsers.add_parser(
+        'secrets',
+        help=("Show OAS cluster secrets"))
+    secrets_parser.set_defaults(func=secrets)
+
     args = parser.parse_args()
     loglevel = logging.DEBUG if args.verbose else logging.INFO
     init_logging(log, loglevel)
@@ -278,6 +243,16 @@ def info(clus, args):
     clus.print_info(args)
 
 
+def secrets(clus, args):
+    """
+    Dumps cluster secrets and then exits
+
+    :param cluster.Cluster clus: cluster to show information about
+    """
+    clus.load_data()
+    clus.dump_secrets()
+
+
 def create(clus, args):  # pylint: disable=too-many-branches
     """
     Parses arguments for the 'create' subcommand
@@ -326,9 +301,6 @@ def create(clus, args):  # pylint: disable=too-many-branches
     if args.disk_image_id:
         clus.disk_image_id = args.disk_image_id
 
-    # Set acme_staging to False so we use Let's Encrypt's live environment
-    if args.acme_staging:
-        clus.acme_staging = True
     if args.create_droplet:
         clus.create_droplet(ssh_key_id=args.ssh_key_id, hostname=args.create_hostname)
         if args.verbose:
@@ -338,14 +310,26 @@ def create(clus, args):  # pylint: disable=too-many-branches
     elif args.droplet_id:
         clus.set_info_by_droplet_id(args.droplet_id)
     elif args.ip_address:
-        if not args.create_hostname:
-            log.error('--create-hostname required when using --ip-address')
-            sys.exit(2)
-        clus.set_info_by_ip_and_hostname(args.ip_address, args.create_hostname)
+        if args.create_hostname:
+            create_hostname = args.create_hostname
+        else:
+            log.info('No --create-hostname provided, using cluster name "%s"',
+                     args.cluster_name)
+            create_hostname = args.cluster_name
+        clus.set_info_by_ip_and_hostname(args.ip_address, create_hostname)
     elif args.droplet_hostname:
         clus.set_info_by_hostname(args.droplet_hostname)
 
-    # Write inventory.yml and settings.yml files
+    if args.docker_mirror_server:
+        clus.docker_mirror_server = args.docker_mirror_server
+    if args.docker_mirror_endpoint:
+        clus.docker_mirror_endpoint = args.docker_mirror_endpoint
+    if args.docker_mirror_username:
+        clus.docker_mirror_username = args.docker_mirror_username
+    if args.docker_mirror_password:
+        clus.docker_mirror_password = args.docker_mirror_password
+
+    # Write inventory.yml
     clus.write_cluster_files()
 
     if args.create_domain_records:
@@ -364,67 +348,94 @@ def install(clus, args):
     """
     clus.load_data()
 
-    if args.install_kubernetes:
-        ansible.run_ansible(
-            clus,
-            os.path.join(ansible.ANSIBLE_PATH, 'install-kubernetes.yml'),
-            args.ansible_param)
-
-    if not args.no_install_openappstack:
-        ansible.run_ansible(
-            clus,
-            os.path.join(ansible.ANSIBLE_PATH, 'install-openappstack.yml'),
-            args.ansible_param)
+    ansible.run_ansible(
+        clus,
+        os.path.join(ansible.ANSIBLE_PATH, 'install-kubernetes.yml'),
+        args.ansible_param)
 
 
 def test(clus, args):
     """
-    Runs behave or testinfra test. Overwrites behave_path/behave.ini!
+    Runs taiko tests.
 
     :param cluster.Cluster clus: Cluster object to run tests on
     :param argparse.Namespace args: Command line arguments
     """
 
-    # At the moment we only have one type if test, but more tests will be added
-    # to this in the future. If args.run_test is empty, we run all the tests
-    run_test = args.run_test
-    if not run_test:
-        run_test = ALL_TESTS
-
-    if 'behave' in run_test:
-        behave_path = os.path.join(os.path.dirname(__file__), '..', 'test',
-                                   'behave')
-        # Run from the behave directory so behave automatically loads all the
-        # necessary files
-        os.chdir(behave_path)
-        clus.load_data()
-        behave_ini = os.path.join(behave_path, 'behave.ini')
-        clus.write_behave_config(behave_ini)
-        command = []
-        if args.behave_rerun_failing:
-            command.append('@rerun_failing.features')
-        if args.behave_headless:
-            command.append('-D headless=True')
-        if args.behave_ignore_certificate_errors:
-            command.append('-D ignore_certificate_errors=True')
-        for tag in args.behave_tags:
-            log.info(command)
-            command.append('-t {tag}'.format(tag=tag))
-        if args.behave_param:
-            for param in args.behave_param:
-                if len(param) > 1:
-                    log.warning('More than 1 parameter. Ignoring the rest! '
-                                'Use --behave-param several times to supply '
-                                'more than 1 parameter')
-                param = param[0]
-                command.append(param)
-        log.info('Running behave command %s', command)
-        return_code = behave_main(command)
-
-        # Remove behave.ini so we don't leave secrets hanging around.
-        os.remove(behave_ini)
-
-        sys.exit(return_code)
+    taiko_path = os.path.join(os.path.dirname(__file__), '..', 'test', 'taiko')
+    # Run from the taiko directory so taiko automatically loads all the
+    # necessary files
+    os.chdir(taiko_path)
+    clus.load_data()
+    command = ['taiko']
+    if args.observe:
+        command.append('--observe')
+    command.append('apps.js')
+
+    # Set env vars
+
+    # SSO tests currently only work with valid letsencrypt production certs.
+    # Therefor we disable SSO tests for now, until
+    # https://open.greenhost.net/openappstack/single-sign-on/-/issues/62
+    # is fixed.
+    #
+    #  sso_username = clus.get_password_from_kubernetes(
+    #      'oas-single-sign-on-variables',
+    #      'userbackend_admin_username',
+    #      'flux-system'
+    #  )
+    #  sso_password = clus.get_password_from_kubernetes(
+    #      'oas-single-sign-on-variables',
+    #      'userbackend_admin_password',
+    #      'flux-system'
+    #  )
+    #  os.environ["SSO_USERNAME"] = sso_username
+    #  os.environ["SSO_PASSWORD"] = sso_password
+
+    if "nextcloud" in args.apps or args.apps=='all':
+        nextcloud_password = clus.get_password_from_kubernetes(
+            'oas-nextcloud-variables',
+            'nextcloud_password',
+            'flux-system'
+        )
+        os.environ["NEXTCLOUD_PASSWORD"] = nextcloud_password
+
+    if "rocketchat" in args.apps or args.apps=='all':
+        rocketchat_password = clus.get_password_from_kubernetes(
+            'oas-rocketchat-variables',
+            'rocketchat_admin_password',
+            'flux-system'
+        )
+        os.environ["ROCKETCHAT_PASSWORD"] = rocketchat_password
+
+    if "wordpress" in args.apps or args.apps=='all':
+        wordpress_password = clus.get_password_from_kubernetes(
+            'oas-wordpress-variables',
+            'wordpress_admin_password',
+            'flux-system'
+        )
+        os.environ["WORDPRESS_PASSWORD"] = wordpress_password
+
+    if "grafana" in args.apps or args.apps=='all':
+        grafana_password = clus.get_password_from_kubernetes(
+            'oas-kube-prometheus-stack-variables',
+            'grafana_admin_password',
+            'flux-system'
+        )
+        os.environ["GRAFANA_PASSWORD"] = grafana_password
+
+    os.environ["TAIKO_TESTS"] = args.apps
+    os.environ['DOMAIN'] = clus.domain
+
+    log.info('Running taiko command %s', command)
+
+    with Popen(command, stdout=PIPE, bufsize=1,
+               universal_newlines=True) as taiko:
+        for line in taiko.stdout:
+            print(line, end='')
+
+    return_code = (taiko.returncode)
+    sys.exit(return_code)
 
 def create_domain_records(domain, droplet_ip, subdomain=None):
     """
diff --git a/openappstack/ansible.py b/openappstack/ansible.py
index 01702e509452b3c8743ef650eb7c326a29e5d85f..e9797f7d9b8af7784f0c41fc6c440cd8ddc6d671 100644
--- a/openappstack/ansible.py
+++ b/openappstack/ansible.py
@@ -38,7 +38,7 @@ def run_ansible(clus, playbook, ansible_params=None):
         ['-e', 'cluster_dir=' + clus.cluster_dir]
 
     ansible_playbook_command += \
-        ['-i', clus.inventory_file, '-e', '@' + clus.settings_file, playbook]
+        ['-i', clus.inventory_file, playbook]
 
     log.info('Running "%s" in ansible directory "%s"',
              ansible_playbook_command,
@@ -75,6 +75,22 @@ def create_inventory(cluster):
 
     inventory['all']['hosts'][cluster.hostname]['ansible_host'] = \
         cluster.ip_address
+    inventory['all']['hosts'][cluster.hostname]['domain'] = \
+        cluster.domain
+
+    if cluster.docker_mirror_endpoint \
+            and cluster.docker_mirror_server \
+            and cluster.docker_mirror_username \
+            and cluster.docker_mirror_password:
+        docker_mirror = {}
+        docker_mirror['enabled'] = True
+        docker_mirror['endpoint'] = cluster.docker_mirror_endpoint
+        docker_mirror['username'] = cluster.docker_mirror_username
+        docker_mirror['password'] = cluster.docker_mirror_password
+        docker_mirror['server'] = cluster.docker_mirror_server
+        inventory['all']['hosts'][cluster.hostname]['docker_mirror'] = \
+            docker_mirror
+
     inventory['all']['children']['master']['hosts'] = cluster.hostname
     inventory['all']['children']['worker']['hosts'] = cluster.hostname
 
diff --git a/openappstack/cluster.py b/openappstack/cluster.py
index 4106a1e1323062383b74331a75f5880b303a172e..858829af8ac8a36c60b25ef935f50e431f12787c 100644
--- a/openappstack/cluster.py
+++ b/openappstack/cluster.py
@@ -1,7 +1,6 @@
 """Contains code for managing the files related to an OpenAppStack cluster."""
 
 import base64
-import configparser
 import logging
 import os
 import sys
@@ -54,40 +53,39 @@ class Cluster:
         self.ip_address = None
         self.hostname = None
         self.domain = None
-        # By default, use Let's Encrypt's live environment
-        self.acme_staging = False
         # Set this to False if the data needs to be (re)loaded from file
         self.data_loaded = False
-        # Load data from inventory.yml and settings.yml
+        # Load data from inventory.yml
         if load_data:
             self.load_data()
         # Can be used to use a custom disk image.
         self.disk_image_id = DEFAULT_IMAGE
+        self.docker_mirror_server = None
+        self.docker_mirror_endpoint = None
+        self.docker_mirror_username = None
+        self.docker_mirror_password = None
 
     def load_data(self):
         """
-        Loads cluster data from inventory.yml and settings.yml files
+        Loads cluster data from inventory.yml and files
 
         Set self.data_loaded to False if this function should re-read data
         from file.
         """
         if not self.data_loaded:
-            with open(self.settings_file, 'r') as stream:
-                settings = yaml.safe_load(stream)
-                self.ip_address = settings['ip_address']
-                self.domain = settings['domain']
-
-            log.debug("""Read data from settings.yml:
-                ip address: %s
-                domain: %s""", self.ip_address, self.domain)
-
             with open(self.inventory_file, 'r') as stream:
                 inventory = yaml.safe_load(stream)
+                print(inventory)
                 # Work with the master node from the inventory
                 self.hostname = inventory['all']['children']['master']['hosts']
-
-            log.debug(
-                'Read data from inventory.yml:\n\thostname: %s', self.hostname)
+                self.domain = \
+                        inventory['all']['hosts'][self.hostname]['domain']
+                self.ip_address = \
+                        inventory['all']['hosts'][self.hostname]['ansible_host']
+            log.debug("""Read data from inventory:
+                ip address: %s
+                domain: %s
+                hostname: %s""", self.ip_address, self.domain, self.hostname)
         else:
             log.debug('Not loading cluster data from file. Set '
                       'Cluster.data_loaded to False if you want a reload.')
@@ -155,51 +153,15 @@ class Cluster:
         self.hostname = hostname
 
     def write_cluster_files(self):
-        """Creates an inventory.yml and settings.yml file for the cluster"""
+        """Creates an inventory.yml and dotenv file for the cluster"""
         self.make_cluster_directories()
         ansible.create_inventory(self)
-
-        # Create settings
-        with open(os.path.join(ansible.ANSIBLE_PATH, 'group_vars',
-                               'all', 'settings.yml.example'),
-                  'r') as stream:
-            settings = yaml.safe_load(stream)
-
-        settings['ip_address'] = self.ip_address
-        settings['domain'] = self.domain
-        settings['admin_email'] = 'admin@{0}'.format(self.domain)
-        settings['cluster_dir'] = self.cluster_dir
-
-        # Configure apps to handle invalid certs i.e. from
-        # Letsencrypt staging API
-        settings['acme_staging'] = self.acme_staging
-        nextcloud_extra_values = """
-          onlyoffice:
-            unauthorizedStorage: true
-            httpsHstsEnabled: false
-        """
-        if self.acme_staging:
-            settings['nextcloud_extra_values'] = \
-                yaml.load(nextcloud_extra_values)
-
-        file_contents = yaml.safe_dump(settings, default_flow_style=False)
-        log.debug(file_contents)
-
-        # Create CLUSTER_DIR/group_vars/all/ if non-existent
-        vars_dir = os.path.dirname(self.settings_file)
-        if not os.path.exists(vars_dir):
-            os.makedirs(vars_dir)
-
-        with open(self.settings_file, 'w') as stream:
-            stream.write(file_contents)
-            log.info("Created %s", self.settings_file)
-
         dotenv_file = """CLUSTER_NAME={name}
 CLUSTER_DIR={cluster_dir}
 IP_ADDRESS={ip_address}
 HOSTNAME={hostname}
 FQDN={domain}
-KUBECONFIG={secret_dir}/kube_config_cluster.yml
+KUBECONFIG={cluster_dir}/kube_config_cluster.yml
 """
 
         with open(self.dotenv_file, 'w') as stream:
@@ -209,7 +171,6 @@ KUBECONFIG={secret_dir}/kube_config_cluster.yml
                 ip_address=self.ip_address,
                 hostname=self.hostname,
                 domain=self.domain,
-                secret_dir=self.secret_dir
             ))
             log.info("Created %s", self.dotenv_file)
 
@@ -220,100 +181,57 @@ KUBECONFIG={secret_dir}/kube_config_cluster.yml
     def make_cluster_directories(self):
         """Make sure the cluster's file directory exists"""
         os.makedirs(self.cluster_dir, exist_ok=True)
-        os.makedirs(self.secret_dir, exist_ok=True)
 
     @property
     def inventory_file(self):
         """Path to the ansible inventory.yml for this cluster"""
         return os.path.join(self.cluster_dir, 'inventory.yml')
 
-    @property
-    def settings_file(self):
-        """Path to the ansible settings.yml for this cluster"""
-        return os.path.join(self.cluster_dir, 'group_vars', 'all',
-                            'settings.yml')
-
     @property
     def dotenv_file(self):
         """Path to the .cluster.env file with relevant environment variables"""
         return os.path.join(self.cluster_dir, '.cluster.env')
 
 
-    @property
-    def secret_dir(self):
-        """Path where all the passwords for cluster admins are saved"""
-        return os.path.join(self.cluster_dir, 'secrets')
-
-    def write_behave_config(self, config_path):
+    def dump_secrets(self):
         """
-        Write behave config file for the cluster.
-
-        :param str config_path: Configuration is written to config_path (e.g.
-            /home/you/openappstack/test/behave.ini). If config_path already
-            exists, the program is aborted.
+        Shows all OAS cluster secrets.
         """
-        if os.path.isfile(config_path):
-            log.error('%s file already exists, not overwriting '
-                      'file! Remove the file if you want to run behave. '
-                      'Program will exit now', config_path)
-            sys.exit(2)
-
-        grafana_admin_password = self.get_password_from_kubernetes(
-            'kube-prometheus-stack-grafana',
-            'admin-password',
-            'oas')
-
-        rocketchat_admin_password = self.get_password_from_kubernetes(
-            'oas-rocketchat-variables',
-            'rocketchat_admin_password',
-            'flux-system')
-
-        nextcloud_admin_password = self.get_password_from_kubernetes(
-            'nc-nextcloud',
-            'nextcloud-password',
-            'oas-apps')
-
-        wordpress_admin_password = self.get_password_from_kubernetes(
-            'oas-wordpress-variables',
-            'wordpress_admin_password',
-            'flux-system')
-
-        behave_config = configparser.ConfigParser()
-        behave_config['behave'] = {}
-        behave_config['behave']['format'] = 'rerun'
-        behave_config['behave']['outfiles'] = 'rerun_failing.features'
-        behave_config['behave']['show_skipped'] = 'false'
-
-        behave_config['behave.userdata'] = {}
-
-        behave_config['behave.userdata']['nextcloud.url'] = \
-            'https://files.{}'.format(self.domain)
-        behave_config['behave.userdata']['nextcloud.username'] = 'admin'
-        behave_config['behave.userdata']['nextcloud.password'] = \
-            nextcloud_admin_password
-        behave_config['behave.userdata']['onlyoffice.url'] = \
-            'https://office.{}/welcome'.format(self.domain)
-
-        behave_config['behave.userdata']['rocketchat.url'] = \
-            'https://chat.{}'.format(self.domain)
-        behave_config['behave.userdata']['rocketchat.username'] = 'admin'
-        behave_config['behave.userdata']['rocketchat.password'] = \
-            rocketchat_admin_password
-
-        behave_config['behave.userdata']['wordpress.url'] = \
-            'https://www.{}/wp-login.php'.format(self.domain)
-        behave_config['behave.userdata']['wordpress.username'] = 'admin'
-        behave_config['behave.userdata']['wordpress.password'] = \
-            wordpress_admin_password
-
-        behave_config['behave.userdata']['grafana.url'] = \
-            'https://grafana.{}'.format(self.domain)
-        behave_config['behave.userdata']['grafana.username'] = 'admin'
-        behave_config['behave.userdata']['grafana.password'] = \
-            grafana_admin_password
-
-        with open(config_path, 'w') as config_file:
-            behave_config.write(config_file)
+        all_secrets = {
+            'flux-system': {
+                'oas-kube-prometheus-stack-variables': ['grafana_admin_password'],
+                'oas-nextcloud-variables': [
+                    'nextcloud_mariadb_password',
+                    'nextcloud_mariadb_root_password',
+                    'nextcloud_password',
+                    'onlyoffice_jwt_secret',
+                    'onlyoffice_postgresql_password',
+                    'onlyoffice_rabbitmq_password'],
+                'oas-rocketchat-variables': [
+                    'rocketchat_admin_password',
+                    'mongodb_root_password',
+                    'mongodb_password'],
+                'oas-single-sign-on-variables': [
+                    'userbackend_admin_username',
+                    'userbackend_admin_password',
+                    'userbackend_postgres_password',
+                    'hydra_system_secret'],
+                'oas-wordpress-variables': [
+                    'wordpress_admin_password',
+                    'wordpress_mariadb_password',
+                    'wordpress_mariadb_root_password']
+            }
+
+        }
+
+        for sec in all_secrets.values():
+            for app, app_secrets in sec.items():
+                for app_secret in app_secrets:
+                    secret = self.get_password_from_kubernetes(
+                        app,
+                        app_secret,
+                        'flux-system')
+                    print(app_secret + '=' + secret)
 
     def get_password_from_kubernetes(self, secret, key, namespace):
         """
@@ -325,19 +243,19 @@ KUBECONFIG={secret_dir}/kube_config_cluster.yml
             encoded password
         :param string namespace: The namespace the secret is in
         """
-        kubeconfig = os.path.join(self.secret_dir, 'kube_config_cluster.yml')
+        kubeconfig = os.path.join(self.cluster_dir, 'kube_config_cluster.yml')
         config.load_kube_config(config_file=kubeconfig)
         api = client.CoreV1Api()
         try:
             secret_data = api.read_namespaced_secret(secret, namespace)
-        except client.exceptions.ApiException:
+        except client.rest.ApiException:
             print(f"Secret {secret} not found in namespace '{namespace}'")
             return "password not found"
         try:
             password = secret_data.data[key]
         except KeyError:
             print(f"Could not get password from secret '{secret}' in namespace"
-                   " '{namespace}' with key '{key}'")
+                  " '{namespace}' with key '{key}'")
             return "password not found"
 
         return base64.b64decode(password).decode('utf-8')
@@ -360,18 +278,16 @@ KUBECONFIG={secret_dir}/kube_config_cluster.yml
 
     Configuration:
       - Inventory file: {inventory_file}
-      - Settings file: {settings_file}
 
     Kubectl:
 
     To use kubectl with this cluster, copy-paste this in your terminal:
 
-    export KUBECONFIG={secret_dir}/kube_config_cluster.yml"""
+    export KUBECONFIG={cluster_dir}/kube_config_cluster.yml"""
             print(info_string.format(
                 name=self.name,
                 ip_address=self.ip_address,
                 hostname=self.hostname,
                 domain=self.domain,
                 inventory_file=self.inventory_file,
-                settings_file=self.settings_file,
-                secret_dir=self.secret_dir))
+                cluster_dir=self.cluster_dir))
diff --git a/requirements.in b/requirements.in
index ce21c81430aa32ef88eecb355f0ec37f9fccf6e7..9520c07b26e88af094939e9f1194c0a8a3fc8eb8 100644
--- a/requirements.in
+++ b/requirements.in
@@ -9,8 +9,6 @@
 ansible>=2.9.10,<2.10
 # needed for test_dns.py
 dnspython>=2.1.0
-# Needed for ansible k8s resource
-openshift>=0.12.0
 # Needed for testinfra using the ansible module
 paramiko>=2.7.0
 psutil>=5.5.0
@@ -22,5 +20,4 @@ tld>=0.12.5
 setuptools>=40.6.2
 wheel>=0.33.1
 -e git+https://open.greenhost.net/greenhost/cloud-api#egg=greenhost_cloud
--e git+https://open.greenhost.net/openappstack/oas_behave@c05009a#egg=oas_behave
 passlib>=1.7.2
diff --git a/requirements.txt b/requirements.txt
index d3bdb09e1901a6c433812a481a8ddce1d9a890e7..37719e0d57399159432edebb2e9f33c96a26c9c1 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,18 +6,12 @@
 #
 -e git+https://open.greenhost.net/greenhost/cloud-api#egg=greenhost_cloud
     # via -r requirements.in
--e git+https://open.greenhost.net/openappstack/oas_behave@c05009a#egg=oas_behave
-    # via -r requirements.in
 ansible==2.9.18
     # via -r requirements.in
 attrs==20.3.0
     # via pytest
 bcrypt==3.2.0
     # via paramiko
-behave-webdriver==0.3.0
-    # via oas-behave
-behave==1.2.6
-    # via behave-webdriver
 cachetools==4.2.1
     # via google-auth
 certifi==2020.12.5
@@ -60,12 +54,6 @@ packaging==20.9
     # via pytest
 paramiko==2.7.2
     # via -r requirements.in
-parse-type==0.5.2
-    # via behave
-parse==1.19.0
-    # via
-    #   behave
-    #   parse-type
 passlib==1.7.4
     # via -r requirements.in
 pluggy==0.13.1
@@ -119,16 +107,12 @@ ruamel.yaml.clib==0.2.2
     # via ruamel.yaml
 ruamel.yaml==0.16.13
     # via openshift
-selenium==3.141.0
-    # via behave-webdriver
 six==1.15.0
     # via
     #   bcrypt
-    #   behave
     #   google-auth
     #   kubernetes
     #   openshift
-    #   parse-type
     #   pynacl
     #   pyopenssl
     #   python-dateutil
@@ -145,7 +129,6 @@ urllib3==1.26.4
     # via
     #   kubernetes
     #   requests
-    #   selenium
 websocket-client==0.58.0
     # via kubernetes
 wheel==0.36.2
diff --git a/test/behave/features/environment.py b/test/behave/features/environment.py
deleted file mode 100644
index 2cdc9f865c58d2f652a072702a4ce6547ff77ab2..0000000000000000000000000000000000000000
--- a/test/behave/features/environment.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from oas_behave.environment import *
-
-
-def before_tag(context, tag):
-    """Define steps run before each tag."""
-
-    userdata = context.config.userdata
-    if tag == 'kube-prometheus-stack':
-        context.grafana = get_values(userdata, 'grafana')
-
-    if tag == 'nextcloud':
-        context.nextcloud = get_values(userdata, 'nextcloud')
-
-    if tag == 'rocketchat':
-        context.rocketchat = get_values(userdata, 'rocketchat')
-
-    if tag == 'wordpress':
-        context.wordpress = get_values(userdata, 'wordpress')
diff --git a/test/behave/features/kube-prometheus-stack.feature b/test/behave/features/kube-prometheus-stack.feature
deleted file mode 100644
index 52cca2246f35031a5dc4437032b6aed5e5e37cdd..0000000000000000000000000000000000000000
--- a/test/behave/features/kube-prometheus-stack.feature
+++ /dev/null
@@ -1,26 +0,0 @@
-@kube-prometheus-stack
-Feature: Test grafana admin login
-  As an OAS admin
-  I want to be able to login to grafana as the user admin
-  And I want to be able to look at the logs
-
-Scenario: Open grafana
-  Given I have closed all but the first window
-  And I open the grafana URL
-  Then I wait on element "//input[@name='user']" for 25000ms to be visible
-  And I expect that the title is "Grafana"
-  And I expect that element "//input[@name='password']" is visible
-  And I expect that the path is "/login"
-
-Scenario: Login to grafana
-  Given the element "//input[@name='user']" is visible
-  When I enter the "grafana" "username" in the inputfield "//input[@name='user']"
-  And I enter the "grafana" "password" in the inputfield "//input[@name='password']"
-  And I click on the button "//button[@aria-label='Login button']"
-  Then I wait on element "sidemenu.sidemenu" for 60000ms to be visible
-  And I expect that the path is "/"
-
-Scenario: As an admin I want to look at the helm-controller logs
-  When I open the grafana explore helm-controller URL
-  Then I wait on element ".graph-panel" for 25000ms to be visible
-  And I expect that element ".datapoints-warning" does not exist
diff --git a/test/behave/features/nextcloud.feature b/test/behave/features/nextcloud.feature
deleted file mode 100644
index eeb638db1624a598ef9dcb46e83ea9f4267d79e2..0000000000000000000000000000000000000000
--- a/test/behave/features/nextcloud.feature
+++ /dev/null
@@ -1,54 +0,0 @@
-@nextcloud
-Feature: Test nextcloud admin login
-  As an OAS admin
-  I want to be able to login to nextcloud as the user admin
-  And I want to be able to open a document in OnlyOffice
-
-Scenario: Test OnlyOffice welcome screen
-  Given I have closed all but the first window
-  And I open the onlyoffice URL
-  Then I wait on element "#status-ok-icon" for 1000ms to exist
-
-Scenario: Open nextcloud
-  When I open the nextcloud URL
-  Then I wait on element "input#user" for 25000ms to be visible
-  And  I expect that element "input#user" is visible
-
-Scenario: Login to nextcloud
-  Given the element "input#user" is visible
-  When I enter the "nextcloud" "username" in the inputfield "input#user"
-  And I enter the "nextcloud" "password" in the inputfield "input#password"
-  And I click on the button "input#submit-form"
-  Then I expect that element ".logo" does exist
-  And I expect that cookie "nc_session_id" exists
-  And I expect that cookie "nc_token" exists
-  And I expect that cookie "nc_username" exists
-
-Scenario: Close welcome to nextcloud modal
-  When I close the nextcloud welcome wizard if it exists
-  Then I wait on element "#firstrunwizard" for 1000ms to not exist
-  And I wait on element "[aria-label='Files']" for 5000ms to be visible
-
-Scenario: Create a new document in OnlyOffice
-  When I click on the element "[aria-label='Files']"
-  # Unfortunaty there's no check if the element that's already visible
-  # is also clickable.
-  And I wait on element "[class='button new']" to be clickable
-  And I click on the button "[class='button new']"
-  And I click on the element "[data-action='onlyofficeDocx']"
-  And I add a random string to the inputfield ".filenameform input[type=text]"
-  And I click on the element "input.icon-confirm"
-  And I focus the last opened tab
-  Then I expect a new tab has been opened
-  And I expect that element "div.toast-error" does not exist
-
-Scenario: Assert the bold button is not activated
-  When I wait for the iframe named "frameEditor" and switch to it
-  Then I wait on element "div.asc-loadmask" for 20000ms to be visible
-  And I wait on element "div.asc-loadmask" for 20000ms to not exist
-  And I wait on element "[id='id-toolbar-btn-bold']" for 20000ms to be visible
-  And I expect that element "[id='id-toolbar-btn-bold']" does not have the class "active"
-
-Scenario: Active the bold button
-  When I click on the element "[id='id-toolbar-btn-bold']"
-  Then I expect that element "[id='id-toolbar-btn-bold']" has the class "active"
diff --git a/test/behave/features/rocketchat.feature b/test/behave/features/rocketchat.feature
deleted file mode 100644
index 0351712775c052b814567dbd180a01b364e0d19b..0000000000000000000000000000000000000000
--- a/test/behave/features/rocketchat.feature
+++ /dev/null
@@ -1,22 +0,0 @@
-@rocketchat
-Feature: Test rocketchat admin login
-  As an OAS admin
-  I want to be able to login to rocketchat as the user admin
-
-Scenario: Open rocketchat
-  Given I have closed all but the first window
-  And I open the rocketchat URL
-  Then I wait on element "//input[@name='emailOrUsername']" for 25000ms to be visible
-  And I expect that element "#pass" is visible
-
-Scenario: See SSO button
-  When I open the rocketchat URL
-  Then I wait on element "//input[@name='emailOrUsername']" for 25000ms to be visible
-  Then I wait on element "//button[@title='Openappstack']" for 5000ms to be visible
-
-Scenario: Login to rocketchat
-  Given the element "//input[@name='emailOrUsername']" is visible
-  When I enter the "rocketchat" "username" in the inputfield "//input[@name='emailOrUsername']"
-  And I enter the "rocketchat" "password" in the inputfield "#pass"
-  And I click on the button ".login"
-  Then I wait on element ".rooms-list" for 25000ms to be visible
diff --git a/test/behave/features/steps/steps.py b/test/behave/features/steps/steps.py
deleted file mode 100644
index b4d93bb6c4bae413093952f24d22a8565382248d..0000000000000000000000000000000000000000
--- a/test/behave/features/steps/steps.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from oas_behave.common_steps import *
-from selenium.webdriver.common.by import By
-from selenium.webdriver.support import expected_conditions as EC
-from selenium.webdriver.support.ui import WebDriverWait
-
-@when(u'I open the rocketchat URL')
-@given(u'I open the rocketchat URL')
-def step_impl(context):
-    """Open rocketchat URL."""
-    context.behave_driver.get(context.rocketchat['url'])
-
-@when(u'I open the wordpress URL')
-@given(u'I open the wordpress URL')
-def step_impl(context):
-    """Open wordpress URL."""
-    context.behave_driver.get(context.wordpress['url'])
-
-@when(u'I open the grafana explore helm-controller URL')
-@given(u'I open the grafana explore helm-controller URL')
-def step_impl(context):
-    """Open wordpress URL."""
-    helm_controller_url = str(context.grafana["url"]) + '/explore?orgId=1&left=["now-1h","now","Loki",{"expr":"{app=\\\"helm-controller\\\"}"}]'
-    print(helm_controller_url)
-    context.behave_driver.get(helm_controller_url)
-
-@when(u'I wait on element "{element}" to be clickable')
-@given(u'I wait on element "{element}" to be clickable')
-def step_impl(context, element):
-    """Wait for element ro be clickable."""
-    wait = WebDriverWait(context.behave_driver, 30)
-    wait.until(EC.element_to_be_clickable((By.CLASS_NAME, "button.new")))
-
-@when(u'I wait for the iframe named "{name}" and switch to it')
-@given(u'I wait for the iframe named "{name}" and switch to it')
-def step_impl(context, name):
-    """Wait for the iframe with given name and switch to it."""
-    wait = WebDriverWait(context.behave_driver, 30)
-    wait.until(EC.frame_to_be_available_and_switch_to_it((By.NAME, name)))
diff --git a/test/behave/features/wordpress.feature b/test/behave/features/wordpress.feature
deleted file mode 100644
index 076173a750b3c0df177e435a42c0d8de7ea69c5e..0000000000000000000000000000000000000000
--- a/test/behave/features/wordpress.feature
+++ /dev/null
@@ -1,17 +0,0 @@
-@wordpress
-Feature: Test WordPress admin login
-  As an OAS admin
-  I want to be able to login to WordPress as the user admin
-
-Scenario: Open WordPress
-  Given I have closed all but the first window
-  And I open the wordpress URL
-  Then I wait on element "#user_login" for 25000ms to be visible
-  And I expect that element "#user_pass" is visible
-
-Scenario: Login to WordPress
-  Given the element "#user_login" is visible
-  When I enter the "wordpress" "username" in the inputfield "#user_login"
-  And I enter the "wordpress" "password" in the inputfield "#user_pass"
-  And I click on the button "#wp-submit"
-  Then I wait on element "#wpwrap" for 25000ms to be visible
diff --git a/test/pytest/test_app_deployments.py b/test/pytest/test_app_deployments.py
index 81a085e9553612cff5ec4c24628df3fbca2e0e9e..4e3c258362bd989b5c842ba1679e4f91405f3bff 100644
--- a/test/pytest/test_app_deployments.py
+++ b/test/pytest/test_app_deployments.py
@@ -140,8 +140,7 @@ def run_around_tests():
     Prepare kube config before running a test
     """
     cluster_dir = os.environ.get("CLUSTER_DIR")
-    kubeconfig = os.path.join(cluster_dir, 'secrets',
-                              'kube_config_cluster.yml')
+    kubeconfig = os.path.join(cluster_dir, 'kube_config_cluster.yml')
     config.load_kube_config(config_file=kubeconfig)
     yield
 
diff --git a/test/pytest/test_certs.py b/test/pytest/test_certs.py
index 2b40c76787a37611288597e42bdd69701dfbd449..b8b67ec8ee1130a882daeec401d562f95a6fc170 100755
--- a/test/pytest/test_certs.py
+++ b/test/pytest/test_certs.py
@@ -114,15 +114,14 @@ def test_cert_validation(host, app): # pylint: disable=too-many-statements
     print('\n')
     for app_name in apps:
         print(f"{app_name}: ")
-        # Use FQDN env var if set, otherwise use domain var from
-        # settings.yml.
+        # Use FQDN env var if set, else use hostname from ansible inventory
         domain = os.environ.get("FQDN")
         if domain:
             print("Using domain %s from FQDN environment variable." % domain)
         else:
             ansible_vars = host.ansible.get_variables()
             domain = ansible_vars["domain"]
-            print("Using domain %s from ansible settings.yml." % domain)
+            print("Using domain %s from ansible inventory." % domain)
 
         add_custom_cert_authorities(certifi.where(),
                                     ['pytest/le-staging-bundle.pem'])
diff --git a/test/pytest/test_dns.py b/test/pytest/test_dns.py
index fd202f4878e038d9f3b2ab823fd008cdbb146e4a..6de14666279c800879a674c24c85b6b30ae390d4 100755
--- a/test/pytest/test_dns.py
+++ b/test/pytest/test_dns.py
@@ -26,10 +26,8 @@ def test_dns(host):
 
     # Get ansible domain
     ansible_vars = host.ansible.get_variables()
-    assert 'domain' in ansible_vars
-    assert 'ip_address' in ansible_vars
     domain = ansible_vars['domain']
-    expected_ip = ansible_vars['ip_address']
+    expected_ip = ansible_vars['ansible_host']
 
     # Get FLD and authorative nameservers for domain
     fld = tld.get_fld(domain, fix_protocol=True)
diff --git a/test/taiko/.eslintrc.yml b/test/taiko/.eslintrc.yml
new file mode 100644
index 0000000000000000000000000000000000000000..16c7b9d511b0afb039336b99d76606ea21d657cc
--- /dev/null
+++ b/test/taiko/.eslintrc.yml
@@ -0,0 +1,9 @@
+env:
+  browser: true
+  es2021: true
+extends:
+  - standard
+parserOptions:
+  ecmaVersion: 12
+  sourceType: module
+rules: {}
diff --git a/test/taiko/apps.js b/test/taiko/apps.js
new file mode 100644
index 0000000000000000000000000000000000000000..c4984fa18b105933563b58f6cbf073a440824b2e
--- /dev/null
+++ b/test/taiko/apps.js
@@ -0,0 +1,132 @@
+// Tests if logging into all apps works using the admin user without SSO
+// For SSO login tests see ./single-sign-on
+const { openBrowser, goto, textBox, into, write, click, toRightOf, below, link, press, image, waitFor, closeBrowser, screenshot } = require('taiko');
+const assert = require('assert');
+
+(async () => {
+  try {
+    const taikoTests = process.env.TAIKO_TESTS || 'all'
+    const domain = process.env.DOMAIN
+    const globalTimeout = 60000
+
+    // https://docs.taiko.dev/api/setconfig/
+    // setConfig( { observeTime: 1000});
+    setConfig( { observeTime: 0, navigationTimeout: globalTimeout });
+
+    console.log('Executing these tests: ' + taikoTests)
+    await openBrowser()
+
+    // Nextcloud and Onlyoffice
+    if (taikoTests.includes('nextcloud') || taikoTests === 'all') {
+      const nextcloudUrl = 'https://files.' + domain
+      const onlyofficeUrl = 'https://office.' + domain
+      const nextcloudUsername = process.env.NEXTCLOUD_USERNAME || 'admin'
+      const nextcloudPassword = process.env.NEXTCLOUD_PASSWORD
+      console.log('• Nextcloud and Onlyoffice')
+
+      await goto(onlyofficeUrl + '/welcome')
+      await waitFor('Document Server is running')
+
+      await goto(nextcloudUrl)
+      await write(nextcloudUsername, into(textBox('Username')))
+      await write(nextcloudPassword, into(textBox('Password')))
+
+      await click('Log in')
+
+      // Close potential nextcloud first run wizard modal
+      // https://github.com/nextcloud/firstrunwizard/issues/488
+      // Unfortunately, we need to sleep a while since I haven't found a
+      // good way that closes the modal *if* it pops up, since these
+      // tests should also work on subsequent logins.
+      await waitFor(5000)
+      await press('Escape')
+
+      // Open document and type some text
+      await click(link(), above('Add notes'))
+      await click('Document')
+      await press('Enter')
+
+      let italicButtonId = '#id-toolbar-btn-italic'
+      await waitFor($(italicButtonId), globalTimeout)
+
+      // Activate italic button
+      let buttonStateBefore = await evaluate($(italicButtonId), (elem) => {return elem.getAttribute('class')})
+      await assert.ok(!buttonStateBefore.includes('active'))
+      await click($(italicButtonId))
+      let buttonStateAfter = await evaluate($(italicButtonId), (elem) => {return elem.getAttribute('class')})
+      await assert.ok(buttonStateAfter.includes('active'))
+
+      await press(['H', 'i', ' ', 'f', 'r', 'o', 'm', ' ', 't', 'a', 'i', 'k', 'o', '!', 'Enter'])
+
+      // Deactivate italic finially
+      await click($(italicButtonId))
+      await closeTab()
+    }
+
+    // Rocketchat
+    if (taikoTests.includes('rocketchat') || taikoTests === 'all') {
+      const rocketchatUrl = 'https://chat.' + domain
+      const rocketchatUsername = process.env.ROCKETCHAT_USERNAME || 'admin'
+      const rocketchatPassword = process.env.ROCKETCHAT_PASSWORD
+
+      console.log('• Rocketchat')
+      await goto(rocketchatUrl)
+      await write(rocketchatUsername, into(textBox('Username')))
+      await write(rocketchatPassword, into(textBox('Password')))
+      await click('Login')
+      await waitFor('Welcome to Rocket.Chat!')
+    }
+
+    // Wordpress
+    if (taikoTests.includes('wordpress') || taikoTests === 'all') {
+      const wordpressUrl = 'https://www.' + domain
+      const wordpressUsername = process.env.WORDPRESS_USERNAME || 'admin'
+      const wordpressPassword = process.env.WORDPRESS_PASSWORD
+
+      console.log('• Wordpress')
+      await goto(wordpressUrl)
+      await click('Log in')
+      await write(wordpressUsername, into(textBox('Username')))
+      await write(wordpressPassword, into(textBox('Password')))
+      await click('Log in')
+      await assert.ok(await link('Dashboard').exists())
+    }
+
+    // Grafana
+    if (taikoTests.includes('grafana') || taikoTests === 'all') {
+      const grafanaUrl = 'https://grafana.' + domain
+      const grafanaUsername = process.env.GRAFANA_USERNAME || 'admin'
+      const grafanaPassword = process.env.GRAFANA_PASSWORD
+
+      console.log('• Grafana')
+      await goto(grafanaUrl)
+      await write(grafanaUsername, into(textBox('Username')))
+      await write(grafanaPassword, into(textBox('Password')))
+      await click('Log in')
+
+      // Node exporter dashboard
+      // Couldn't select "Manage dashboards" from the sidebar menu easily,
+      // so we just go there
+      await goto(grafanaUrl + '/dashboards')
+      await click(link('Nodes'))
+      await('CPU Usage')
+
+      // Explore Loki log messages
+      await goto(grafanaUrl + '/explore')
+      await click(image(toRightOf('Explore')))
+      await click('Loki')
+      await click('Log browser')
+      await click('app')
+      await click('cert-manager')
+      await click('grafana')
+      await click('Show logs')
+    }
+
+  } catch (error) {
+    await screenshot()
+    console.error(error)
+    process.exitCode = 1
+  } finally {
+    await closeBrowser()
+  }
+})()
diff --git a/test/taiko/single-sign-on.js b/test/taiko/single-sign-on.js
new file mode 100644
index 0000000000000000000000000000000000000000..8dc60bc76ef3227ad95521fccb8f3fd345f4d283
--- /dev/null
+++ b/test/taiko/single-sign-on.js
@@ -0,0 +1,91 @@
+// Tests if logging into all apps works using SSO
+// Unfortunately we still can't run this test in CI because we haven't found
+// a way to use SSO with LE staging certs.
+// See https://open.greenhost.net/openappstack/single-sign-on/-/issues/62
+
+const { openBrowser, goto, textBox, into, write, click, toRightOf, below, link, press, image, waitFor, closeBrowser, screenshot } = require('taiko');
+const assert = require('assert');
+
+(async () => {
+  try {
+    const taikoTests = process.env.TAIKO_TESTS || 'all'
+    const username = process.env.SSO_USERNAME
+    const pw = process.env.SSO_USER_PW
+    const domain = process.env.DOMAIN
+    const adminpanelUrl = 'https://admin.' + domain
+    const grafanaUrl = 'https://grafana.' + domain
+    const globalTimeout = 60000
+
+    // https://docs.taiko.dev/api/setconfig/
+    // setConfig( { observeTime: 1000});
+    setConfig( { observeTime: 0, navigationTimeout: globalTimeout });
+
+    console.log('Executing these tests: ' + taikoTests)
+    console.log('• Login to admin panel')
+    await openBrowser()
+
+    await goto(adminpanelUrl)
+    await click('Login')
+    await click('Login with OAS')
+    await write(username, into(textBox('Username')))
+    await write(pw, into(textBox('Password')))
+    await click('Remember me')
+    await click('Sign in')
+
+    // Nextcloud
+    if (taikoTests.includes('nextcloud') || taikoTests === 'all') {
+      console.log('• Nextcloud and Onlyoffice')
+      await click(link(below('nextcloud')))
+      await click('Log in with OpenAppStack')
+      await click('Continue with ' + username)
+
+      // Close potential nextcloud first run wizard modal
+      // https://github.com/nextcloud/firstrunwizard/issues/488
+      // Unfortunately, we need to sleep a while since I haven't found a
+      // good way that closes the modal *if* it pops up, since these
+      // tests should also work on subsequent logins.
+      await waitFor(5000)
+      await press('Escape')
+
+      await assert.ok(await text('Add notes, lists or links …').exists());
+    }
+
+    // Rocketchat
+    if (taikoTests.includes('rocketchat') || taikoTests === 'all') {
+      // Navigate to rocketchat
+      // Breaks because of 2fa challenge which is not recieved
+      // by email
+      // https://open.greenhost.net/openappstack/openappstack/-/issues/819
+      // await click(link(below('rocketchat')));
+      // await click('Login via OpenAppStack');
+      // await click('Continue with ' + username)
+    }
+
+    // Wordpress
+    if (taikoTests.includes('wordpress') || taikoTests === 'all') {
+      console.log('• Wordpress')
+      await goto(adminpanelUrl)
+      await click(link(below('wordpress')))
+      await click('Log in')
+      await click('Login with OpenID Connect')
+      await click('Continue with ' + username)
+      await assert.ok(await link('Dashboard').exists())
+    }
+
+    // Grafana
+    if (taikoTests.includes('grafana') || taikoTests === 'all') {
+      console.log('• Grafana')
+      await goto(adminpanelUrl)
+      await click(link(below('grafana')))
+      await click('Sign in with OpenAppStack')
+      await click('Continue with ' + username)
+      await assert.ok(await text('Welcome to Grafana').exists());
+    }
+  } catch (error) {
+    await screenshot()
+    console.error(error)
+    process.exitCode = 1
+  } finally {
+    await closeBrowser()
+  }
+})()