From 19d94baa5370d39e96587154a080b8f4fa4d8fd1 Mon Sep 17 00:00:00 2001
From: Maarten de Waard <maarten@greenhost.nl>
Date: Wed, 7 Jul 2021 13:55:26 +0200
Subject: [PATCH] remove the first chunk of old stuff

---
 ansible/group_vars/all/settings.yml.example   |  99 -------
 ansible/install-kubernetes.yml                |   6 -
 ansible/install-openappstack.yml              |  17 --
 ansible/roles/apps/files/ingress_hr.yaml      |   1 -
 .../apps/files/local-path-provisioner_hr.yaml |   1 -
 ansible/roles/apps/tasks/cert-manager.yml     |  18 --
 ansible/roles/apps/tasks/core.yml             | 178 ------------
 ansible/roles/apps/tasks/eventrouter.yml      |  16 --
 ansible/roles/apps/tasks/flux-custom.yml      |  18 --
 ansible/roles/apps/tasks/flux_secret.yml      |  20 --
 .../apps/tasks/kube-prometheus-stack.yml      |  74 -----
 ansible/roles/apps/tasks/letsencrypt.yml      |  21 --
 ansible/roles/apps/tasks/loki.yml             |  16 --
 ansible/roles/apps/tasks/main.yml             |  44 ---
 ansible/roles/apps/tasks/metallb.yml          |  18 --
 ansible/roles/apps/tasks/nextcloud.yml        |  39 ---
 ansible/roles/apps/tasks/promtail.yml         |  16 --
 ansible/roles/apps/tasks/pvc.yml              |  18 --
 ansible/roles/apps/tasks/rocketchat.yml       |  38 ---
 ansible/roles/apps/tasks/single-sign-on.yml   |  39 ---
 ansible/roles/apps/tasks/velero.yml           |  80 ------
 ansible/roles/apps/tasks/wordpress.yml        |  40 ---
 ansible/roles/apps/templates/s3-credentials   |   3 -
 .../apps/templates/settings/cert-manager.yaml |  32 ---
 .../apps/templates/settings/eventrouter.yaml  |  10 -
 .../apps/templates/settings/flux-custom.yaml  |  11 -
 .../settings/kube-prometheus-stack.yaml       | 269 ------------------
 .../settings/letsencrypt-production.yaml      |   1 -
 .../settings/letsencrypt-staging.yaml         |   1 -
 .../settings/local-path-provisioner.yaml      |  20 --
 .../roles/apps/templates/settings/loki.yaml   |   9 -
 .../apps/templates/settings/metallb.yaml      |   7 -
 .../apps/templates/settings/nextcloud.yaml    | 180 ------------
 .../apps/templates/settings/promtail.yaml     |  39 ---
 .../apps/templates/settings/rocketchat.yaml   | 118 --------
 .../roles/apps/templates/settings/velero.yaml | 115 --------
 .../apps/templates/settings/wordpress.yaml    |  83 ------
 .../roles/kubernetes-checks/tasks/main.yml    |  30 --
 ansible/roles/pre-configure/tasks/main.yml    |  10 -
 flux/.flux.yaml                               |  24 --
 flux/cert-manager/cert-manager_hr.yaml        |  19 --
 .../local-path-provisioner_hr.yaml            |  19 --
 flux/kube-system/metallb_hr.yaml              |  19 --
 flux/oas-apps/nextcloud_hr.yaml               |  22 --
 flux/oas-apps/rocketchat_hr.yaml              |  19 --
 flux/oas-apps/wordpress_hr.yaml               |  19 --
 flux/oas-custom/flux-custom_hr.yaml           |  18 --
 flux/oas/eventrouter_hr.yaml                  |  18 --
 flux/oas/ingress_hr.yaml                      |  19 --
 flux/oas/kube-prometheus-stack_hr.yaml        |  21 --
 flux/oas/letsencrypt-production_hr.yaml       |  18 --
 flux/oas/letsencrypt-staging_hr.yaml          |  22 --
 flux/oas/loki_cm.yaml                         |  18 --
 flux/oas/loki_hr.yaml                         |  19 --
 flux/oas/promtail_hr.yaml                     |  19 --
 flux/oas/single-sign-on_hr.yaml               |  19 --
 flux/velero/velero_hr.yaml                    |  17 --
 .../cluster/optional/nextcloud/nextcloud.yaml |  30 --
 .../optional/rocketchat/rocketchat.yaml       |  30 --
 flux2/cluster/optional/velero/velero.yaml     |  26 --
 .../cluster/optional/wordpress/wordpress.yaml |  25 --
 flux2/cluster/test/all-optional.yaml          |  16 --
 flux2/cluster/test/base.yaml                  |  14 -
 openappstack/cluster.py                       |  15 -
 requirements.in                               |   2 -
 65 files changed, 2262 deletions(-)
 delete mode 100644 ansible/install-openappstack.yml
 delete mode 120000 ansible/roles/apps/files/ingress_hr.yaml
 delete mode 120000 ansible/roles/apps/files/local-path-provisioner_hr.yaml
 delete mode 100644 ansible/roles/apps/tasks/cert-manager.yml
 delete mode 100644 ansible/roles/apps/tasks/core.yml
 delete mode 100644 ansible/roles/apps/tasks/eventrouter.yml
 delete mode 100644 ansible/roles/apps/tasks/flux-custom.yml
 delete mode 100644 ansible/roles/apps/tasks/flux_secret.yml
 delete mode 100644 ansible/roles/apps/tasks/kube-prometheus-stack.yml
 delete mode 100644 ansible/roles/apps/tasks/letsencrypt.yml
 delete mode 100644 ansible/roles/apps/tasks/loki.yml
 delete mode 100644 ansible/roles/apps/tasks/main.yml
 delete mode 100644 ansible/roles/apps/tasks/metallb.yml
 delete mode 100644 ansible/roles/apps/tasks/nextcloud.yml
 delete mode 100644 ansible/roles/apps/tasks/promtail.yml
 delete mode 100644 ansible/roles/apps/tasks/pvc.yml
 delete mode 100644 ansible/roles/apps/tasks/rocketchat.yml
 delete mode 100644 ansible/roles/apps/tasks/single-sign-on.yml
 delete mode 100644 ansible/roles/apps/tasks/velero.yml
 delete mode 100644 ansible/roles/apps/tasks/wordpress.yml
 delete mode 100644 ansible/roles/apps/templates/s3-credentials
 delete mode 100644 ansible/roles/apps/templates/settings/cert-manager.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/eventrouter.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/flux-custom.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/kube-prometheus-stack.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/letsencrypt-production.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/letsencrypt-staging.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/local-path-provisioner.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/loki.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/metallb.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/nextcloud.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/promtail.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/rocketchat.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/velero.yaml
 delete mode 100644 ansible/roles/apps/templates/settings/wordpress.yaml
 delete mode 100644 ansible/roles/kubernetes-checks/tasks/main.yml
 delete mode 100644 flux/.flux.yaml
 delete mode 100644 flux/cert-manager/cert-manager_hr.yaml
 delete mode 100644 flux/kube-system/local-path-provisioner_hr.yaml
 delete mode 100644 flux/kube-system/metallb_hr.yaml
 delete mode 100644 flux/oas-apps/nextcloud_hr.yaml
 delete mode 100644 flux/oas-apps/rocketchat_hr.yaml
 delete mode 100644 flux/oas-apps/wordpress_hr.yaml
 delete mode 100644 flux/oas-custom/flux-custom_hr.yaml
 delete mode 100644 flux/oas/eventrouter_hr.yaml
 delete mode 100644 flux/oas/ingress_hr.yaml
 delete mode 100644 flux/oas/kube-prometheus-stack_hr.yaml
 delete mode 100644 flux/oas/letsencrypt-production_hr.yaml
 delete mode 100644 flux/oas/letsencrypt-staging_hr.yaml
 delete mode 100644 flux/oas/loki_cm.yaml
 delete mode 100644 flux/oas/loki_hr.yaml
 delete mode 100644 flux/oas/promtail_hr.yaml
 delete mode 100644 flux/oas/single-sign-on_hr.yaml
 delete mode 100644 flux/velero/velero_hr.yaml
 delete mode 100644 flux2/cluster/optional/nextcloud/nextcloud.yaml
 delete mode 100644 flux2/cluster/optional/rocketchat/rocketchat.yaml
 delete mode 100644 flux2/cluster/optional/velero/velero.yaml
 delete mode 100644 flux2/cluster/optional/wordpress/wordpress.yaml
 delete mode 100644 flux2/cluster/test/all-optional.yaml
 delete mode 100644 flux2/cluster/test/base.yaml

diff --git a/ansible/group_vars/all/settings.yml.example b/ansible/group_vars/all/settings.yml.example
index 862e90c0b..931b7f033 100644
--- a/ansible/group_vars/all/settings.yml.example
+++ b/ansible/group_vars/all/settings.yml.example
@@ -2,14 +2,6 @@
 ip_address: "203.0.113.6"
 # Main domain name of the cluster services.
 domain: "example.com"
-# Email address of someone administering the cluster.
-admin_email: "admin@example.com"
-# If this is `true` TLS certificates will be requested at the Let's Encrypt
-# staging server. If this is `false`, you use Let's Encrypt's production server.
-# Note that LE's production server has stricter rate limits, so set this to
-# `true` when you are testing something.
-# Important: Don't quote this variable !
-acme_staging: false
 
 # It is possible to add a docker mirror that serves images from docker.io.
 docker_mirror:
@@ -17,94 +9,3 @@ docker_mirror:
   # endpoint: URL of docker mirror
   # username: username for auth
   # password: password for auth
-
-outgoing_mail:
-  enabled: false
-  fromAddress: ""
-  smtp:
-    user: ""
-    password: ""
-    host: ""
-    ssl: false
-    port: ""
-    # Authentication type can be one of "PLAIN, NONE, LOGIN"
-    authtype: "LOGIN"
-
-backup:
-  s3:
-    # Disabled by default. To enable, change to `true` and configure the
-    # settings below. You'll also want to add "velero" to the enabled
-    # applications a bit further in this file.
-    # Finally, you'll also need to provide access credentials as
-    # secrets; see the documentation:
-    # https://docs.openappstack.net/en/latest/installation_instructions.html#step-2-optional-cluster-backups-using-velero
-    enabled: false
-    # URL of S3 service. Please use the principal domain name here, without the
-    # bucket name.
-    url: "https://store.greenhost.net"
-    # Region of S3 service that's used for backups.
-    # For some on-premise providers this may be irrelevant, but the S3
-    # apparently requires it at some point.
-    region: "ceph"
-    # Name of the S3 bucket that backups will be stored in.
-    # This has to exist already: Velero will not create it for you.
-    bucket: "openappstack-backup"
-    # Prefix that's added to backup filenames.
-    prefix: "test-instance"
-
-# Additional custom flux installation, needs to be enabled under `enabled_applications` below.
-# see https://docs.openappstack.net/en/latest/customization.html for details
-# and https://github.com/fluxcd/flux/tree/master/chart/flux#configuration for
-# possibe values.
-#
-# i.e.:
-#
-# flux-custom_extra_values:
-#   git:
-#     repo: "https://github.com/me/my_flux_config"
-#     branch: "master"
-#   …
-
-# A whitelist of applications that will be enabled.
-enabled_applications:
-  # System components, necessary for the system to function.
-  - 'cert-manager'
-  - 'letsencrypt-production'
-  - 'letsencrypt-staging'
-  - 'metallb'
-  - 'ingress'
-  - 'local-path-provisioner'
-  # - 'single-sign-on'
-  #
-  # Monitoring components (auto-enabled by GitLab CI)
-  # - 'kube-prometheus-stack'
-  # - 'loki'
-  # - 'promtail'
-  # - 'eventrouter'
-  #
-  # The backup system Velero is disabled by default, see settings under `backup` above.
-  # - 'velero'
-  #
-  # Additional custom flux installation, disabled by default.
-  # see https://docs.openappstack.net/en/latest/customization.html for details
-  # - 'flux-custom'
-  #
-  # Applications
-  # - 'nextcloud'
-  # - 'rocketchat'
-  # - 'wordpress'
-
-
-# Use `APPNAME_extra_values` to provide additional, custom
-# helm chart values. Look for the correct APPNAME in above
-# `enabled_applications` lists.  We use Ansible's `combine` filter
-# (https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters.html#combining-hashes-dictionaries)
-# to combine the values with the default values in
-# `ansible/roles/apps/templates/settings/APPNAME.yaml`.
-# Values entered here take precedence over our defaults.
-#
-# I.e.:
-#
-#   prometheus_extra_values:
-#     extraScrapeConfigs:
-#       …
diff --git a/ansible/install-kubernetes.yml b/ansible/install-kubernetes.yml
index 462b69929..83a15a0a5 100644
--- a/ansible/install-kubernetes.yml
+++ b/ansible/install-kubernetes.yml
@@ -9,12 +9,6 @@
 
 - name: Pre-configure hosts
   hosts: all
-  # We use `mitogen_linear` as default strategy. However,
-  # mitogen fails installing the `openshift` python module as requirement
-  # for the `k8s` ansible resource, and using `k8s` in the same context.
-  # That's why we use the standard `linear` ansible strategy for this role.
-  # See https://open.greenhost.net/openappstack/openappstack/issues/102
-  strategy: linear
   roles:
     - role: pre-configure
       tags: ['pre-configure']
diff --git a/ansible/install-openappstack.yml b/ansible/install-openappstack.yml
deleted file mode 100644
index 5589966d9..000000000
--- a/ansible/install-openappstack.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-
-- name: Run compatibility checks
-  hosts: all
-  gather_facts: false
-  pre_tasks:
-    - import_role:
-        name: compatibility-checks
-    - import_role:
-        name: kubernetes-checks
-
-- name: Install OpenAppStack
-  hosts: master
-  tasks:
-    - import_role:
-        name: apps
-      tags: ['apps']
diff --git a/ansible/roles/apps/files/ingress_hr.yaml b/ansible/roles/apps/files/ingress_hr.yaml
deleted file mode 120000
index fa3810d6c..000000000
--- a/ansible/roles/apps/files/ingress_hr.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../../../../flux/oas/ingress_hr.yaml
\ No newline at end of file
diff --git a/ansible/roles/apps/files/local-path-provisioner_hr.yaml b/ansible/roles/apps/files/local-path-provisioner_hr.yaml
deleted file mode 120000
index dc817a880..000000000
--- a/ansible/roles/apps/files/local-path-provisioner_hr.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../../../../flux/kube-system/local-path-provisioner_hr.yaml
\ No newline at end of file
diff --git a/ansible/roles/apps/tasks/cert-manager.yml b/ansible/roles/apps/tasks/cert-manager.yml
deleted file mode 100644
index e21a17d29..000000000
--- a/ansible/roles/apps/tasks/cert-manager.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-
-- name: Create Kubernetes secret with cert-manager settings
-  tags:
-    - config
-    - flux
-    - cert-manager
-  vars:
-    flux_secret:
-      name: "cert-manager"
-      namespace: "cert-manager"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - cert-manager
diff --git a/ansible/roles/apps/tasks/core.yml b/ansible/roles/apps/tasks/core.yml
deleted file mode 100644
index a9153f6b0..000000000
--- a/ansible/roles/apps/tasks/core.yml
+++ /dev/null
@@ -1,178 +0,0 @@
----
-- name: Create OAS namespaces
-  tags:
-    - kubernetes
-    - namespace
-    - flux
-  k8s:
-    name: '{{ item }}'
-    api_version: v1
-    kind: Namespace
-    state: present
-  with_items:
-    - 'oas'
-    - 'oas-apps'
-    - 'cert-manager'
-    - 'velero'
-    - 'oas-custom'
-
-- name: Install helm-operator
-  tags:
-    - flux
-  vars:
-    extra_opts: "{{ helm_operator.extra_opts | default('') }}"
-  # Commented version of below shell command:
-  # helm upgrade
-  #   # Install a new release if it doesn't yet exist.
-  #   --install
-  #   --repo "https://charts.fluxcd.io"
-  #   --namespace oas
-  #   --version 1.0.1
-  #   --set helm.versions=v3
-  #   # Helm 3 doesn't have the stable repository enabled by default.
-  #   --set configureRepositories.enable=true
-  #   --set configureRepositories.repositories[0].name=stable
-  #   --set configureRepositories.repositories[0].url=https://kubernetes-charts.storage.googleapis.com
-  #   --set configureRepositories.repositories[1].name=bitnami
-  #   --set configureRepositories.repositories[1].url=https://charts.bitnami.com/bitnami
-  #   # Reconcile actual helm releases with HelmRelease objects with this
-  #   # interval.
-  #   --set chartsSyncInterval=20m
-  #   # Update HelmRelease objects' status with this interval.
-  #   --set statusUpdateInterval=30s
-  #   # Set resource limits so helm-operator can't take over the whole machine
-  #   --set resources.limits.cpu=1200m
-  #   --set resources.limits.memory=2Gi
-  #   # Delay the liveness and readiness probe a bit to prevent restarts
-  #   --set livenessProbe.initialDelaySeconds=10
-  #   --set readinessProbe.initialDelaySeconds=10
-  #   # Helm release name
-  #   helm-operator
-  #   # Chart name
-  #   helm-operator
-  shell: |
-    helm upgrade \
-    --install \
-    --repo "https://charts.fluxcd.io" \
-    --namespace oas \
-    --version {{ helm_operator.version }} \
-    --set helm.versions=v3 \
-    --set configureRepositories.enable=true \
-    --set configureRepositories.repositories[0].name=stable \
-    --set configureRepositories.repositories[0].url=https://charts.helm.sh/stable \
-    --set configureRepositories.repositories[1].name=bitnami \
-    --set configureRepositories.repositories[1].url=https://charts.bitnami.com/bitnami \
-    --set configureRepositories.repositories[2].name=nextcloud \
-    --set configureRepositories.repositories[2].url=https://nextcloud.github.io/helm \
-    --set chartsSyncInterval=20m \
-    --set statusUpdateInterval=30s \
-    --set resources.requests.cpu=500m \
-    --set resources.requests.memory=2Gi \
-    --set resources.limits.cpu=1200m \
-    --set resources.limits.memory=2Gi \
-    --set livenessProbe.initialDelaySeconds=10 \
-    --set readinessProbe.initialDelaySeconds=10 \
-    {{ extra_opts }} \
-    helm-operator \
-    helm-operator
-
-- name: Create Kubernetes secret with local-path-provisioner settings
-  tags:
-    - config
-    - flux
-    - local-path-provisioner
-  vars:
-    flux_secret:
-      name: "local-path-provisioner"
-      namespace: "kube-system"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - local-path-provisioner
-  when: "'local-path-provisioner' in enabled_applications"
-
-# We have to install local-path-provisioner before other charts, otherwise the PVCs
-# created by those charts will not have the right default storageclass assigned
-# to them.
-# It will still be managed by flux afterwards.
-- name: Create local-path-provisioner HelmResource
-  tags:
-    - config
-    - flux
-    - local-path-provisioner
-  k8s:
-    state: present
-    resource_definition: "{{ lookup('file', 'local-path-provisioner_hr.yaml') | from_yaml }}"
-  when: "'local-path-provisioner' in enabled_applications"
-
-- name: Install flux
-  tags:
-    - flux
-  vars:
-    # The first url below points to the "local-flux" nginx pod that is running
-    # inside the cluster, and is serving the git repo with HelmRelease files
-    # over http.
-    git_url: "{{ 'http://local-flux.oas.svc.cluster.local/.git' if flux.local_flux else flux.repo }}"
-    git_branch: "{{ 'master' if flux.local_flux else flux.branch }}"
-    git_path: "{{ '.' if flux.local_flux else 'flux' }}"
-    extra_opts: "{{ flux.extra_opts | default('') }}"
-  # Commented version of below shell command:
-  # helm upgrade
-  #   # Install a new release if it doesn't yet exist.
-  #   --install
-  #   --repo "https://charts.fluxcd.io"
-  #   --namespace oas
-  #   --version 1.3.0
-  #   # The git repo that flux listens to for changes.
-  #   --set git.url="{{ git_url }}"
-  #   # The branch of the git repo that flux listens to for changes.
-  #   --set git.branch="{{ git_branch }}"
-  #   # The directory within the git repo that flux listens to for changes.
-  #   --set git.path="{{ git_path }}"
-  #   --set git.readonly=true
-  #   # Do not do follow updates of upstream docker images automatically.
-  #   --set registry.excludeImage='*'
-  #   # Necessary for read-only mode.
-  #   --set sync.state="secret"
-  #   # Delete resources originally created by Flux when their manifests
-  #   # are removed from the git repo.
-  #   --set syncGarbageCollection.enabled=true
-  #   # Look for .flux.yaml files for manifest generation.
-  #   --set manifestGeneration=true
-  #   # Set the interval between checks for updates in the git repo to 1 hour.
-  #   --set git.pollInterval=1h
-  #   # Resource requests are already auto-defined. Set some limits:
-  #   --set resources.limits.cpu=50m
-  #   --set resources.limits.memory=600Mi
-  #   # Helm release name
-  #   flux
-  #   # Chart name
-  #   flux
-  shell: |
-    helm upgrade --install \
-    --repo "https://charts.fluxcd.io" \
-    --namespace oas \
-    --version "{{ flux.version }}" \
-    --set git.url="{{ git_url }}" \
-    --set git.branch="{{ git_branch }}" \
-    --set git.path="{{ git_path }}" \
-    --set git.readonly=true \
-    --set registry.excludeImage='*' \
-    --set sync.state="secret" \
-    --set syncGarbageCollection.enabled=true \
-    --set manifestGeneration=true \
-    --set git.pollInterval=1h \
-    --set resources.requests.cpu=300m \
-    --set resources.limits.cpu=400m \
-    --set resources.limits.memory=600Mi \
-    {{ extra_opts }} flux flux
-
-- name: Install fluxctl via snap
-  tags:
-    - flux
-  command: snap install --classic fluxctl
-  args:
-    creates: /snap/bin/fluxctl
diff --git a/ansible/roles/apps/tasks/eventrouter.yml b/ansible/roles/apps/tasks/eventrouter.yml
deleted file mode 100644
index 89b351e50..000000000
--- a/ansible/roles/apps/tasks/eventrouter.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-- name: Create Kubernetes secret with eventrouter settings
-  tags:
-    - config
-    - flux
-    - eventrouter
-  vars:
-    flux_secret:
-      name: "eventrouter"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - eventrouter
diff --git a/ansible/roles/apps/tasks/flux-custom.yml b/ansible/roles/apps/tasks/flux-custom.yml
deleted file mode 100644
index cb71ab9c5..000000000
--- a/ansible/roles/apps/tasks/flux-custom.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-
-- name: Create Kubernetes secret with flux-custom settings
-  tags:
-    - config
-    - flux
-    - flux-custom
-  vars:
-    flux_secret:
-      name: "flux-custom"
-      namespace: "oas-custom"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - flux-custom
diff --git a/ansible/roles/apps/tasks/flux_secret.yml b/ansible/roles/apps/tasks/flux_secret.yml
deleted file mode 100644
index 89bbe26b7..000000000
--- a/ansible/roles/apps/tasks/flux_secret.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Create Kubernetes secret with app settings
-  tags:
-    - config
-    - flux
-    - secret
-  vars:
-    # Merge extra values from i.e. `prometheus_extra_values`
-    extra_values: "{{ vars[flux_secret.name + '_extra_values'] | default({}) }}"
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "{{ flux_secret.namespace | default('oas-apps') }}"
-        name: "{{ flux_secret.name }}-settings"
-      data:
-        values.yaml: "{{ lookup('template','settings/{{ flux_secret.name }}.yaml') | from_yaml | combine(extra_values, recursive=True) | to_nice_yaml(indent=2) | b64encode }}"
-        enabled: "{{ (flux_secret.name in enabled_applications) | ternary('true', 'false') | b64encode }}"
diff --git a/ansible/roles/apps/tasks/kube-prometheus-stack.yml b/ansible/roles/apps/tasks/kube-prometheus-stack.yml
deleted file mode 100644
index 02c444309..000000000
--- a/ansible/roles/apps/tasks/kube-prometheus-stack.yml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-- name: Create prometheus auth secret for basic auth
-  tags:
-    - kube-prometheus-stack
-    - config
-    - secret
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "oas"
-        name: "prometheus-basic-auth"
-      data:
-        auth: "{{ ('admin:' + (prometheus_basic_auth | password_hash('apr_md5_crypt')) + '\n')  | b64encode }}"
-
-- name: Create alertmanager auth secret for basic auth
-  tags:
-    - kube-prometheus-stack
-    - config
-    - secret
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "oas"
-        name: "alertmanager-basic-auth"
-      data:
-        auth: "{{ ('admin:' + (alertmanager_basic_auth | password_hash('apr_md5_crypt')) + '\n')  | b64encode }}"
-
-- name: Create Kubernetes secret with prometheus settings
-  tags:
-    - config
-    - flux
-    - monitoring
-    - kube-prometheus-stack
-  vars:
-    flux_secret:
-      name: "kube-prometheus-stack"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - monitoring
-        - kube-prometheus-stack
-
-- name: Create prometheus-related persistent volumes
-  tags:
-    - config
-    - monitoring
-    - kube-prometheus-stack
-  vars:
-    pvc:
-      name: "{{ item.name }}"
-      namespace: "oas"
-      size: "{{ item.size }}"
-  include_tasks:
-    file: pvc.yml
-    apply:
-      tags:
-        - config
-        - monitoring
-        - kube-prometheus-stack
-  loop:
-    - name: "alertmanager"
-      size: "2Gi"
-    - name: "grafana"
-      size: "2Gi"
diff --git a/ansible/roles/apps/tasks/letsencrypt.yml b/ansible/roles/apps/tasks/letsencrypt.yml
deleted file mode 100644
index e08880f2a..000000000
--- a/ansible/roles/apps/tasks/letsencrypt.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-
-- name: Create Kubernetes secret with settings for letsencrypt issuers
-  tags:
-    - config
-    - flux
-    - letsencrypt
-  vars:
-    flux_secret:
-      name: "letsencrypt-{{ item }}"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - letsencrypt
-  with_items:
-    - "production"
-    - "staging"
diff --git a/ansible/roles/apps/tasks/loki.yml b/ansible/roles/apps/tasks/loki.yml
deleted file mode 100644
index eaac7c223..000000000
--- a/ansible/roles/apps/tasks/loki.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-- name: Create Kubernetes secret with loki settings
-  tags:
-    - config
-    - flux
-    - loki
-  vars:
-    flux_secret:
-      name: "loki"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - loki
diff --git a/ansible/roles/apps/tasks/main.yml b/ansible/roles/apps/tasks/main.yml
deleted file mode 100644
index 9405d1079..000000000
--- a/ansible/roles/apps/tasks/main.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-
-- name: Install namespaces, helm operator, local-path-provisioner and flux
-  import_tasks: core.yml
-
-- name: Tasks pertaining to cert-manager
-  import_tasks: cert-manager.yml
-
-- name: Tasks pertaining to letsencrypt
-  import_tasks: letsencrypt.yml
-
-- name: Tasks pertaining to prometheus and grafana
-  import_tasks: kube-prometheus-stack.yml
-
-- name: Tasks pertaining to loki
-  import_tasks: loki.yml
-
-- name: Tasks pertaining to promtail
-  import_tasks: promtail.yml
-
-- name: Tasks pertaining to eventrouter
-  import_tasks: eventrouter.yml
-
-- name: Tasks pertaining to Single sign-on
-  import_tasks: single-sign-on.yml
-
-- name: Tasks pertaining to Rocket.chat
-  import_tasks: rocketchat.yml
-
-- name: Tasks pertaining to NextCloud
-  import_tasks: nextcloud.yml
-
-- name: Tasks pertaining to WordPress
-  import_tasks: wordpress.yml
-
-- name: Tasks pertaining to velero
-  import_tasks: velero.yml
-  when: backup.s3.enabled
-
-- name: Tasks pertaining to flux-custom
-  import_tasks: flux-custom.yml
-
-- name: Tasks pertaining to metallb
-  import_tasks: metallb.yml
diff --git a/ansible/roles/apps/tasks/metallb.yml b/ansible/roles/apps/tasks/metallb.yml
deleted file mode 100644
index 4aaa86aa9..000000000
--- a/ansible/roles/apps/tasks/metallb.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-
-- name: Create metallb secret with metallb settings
-  tags:
-    - config
-    - flux
-    - metallb
-  vars:
-    flux_secret:
-      name: "metallb"
-      namespace: "kube-system"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - metallb
diff --git a/ansible/roles/apps/tasks/nextcloud.yml b/ansible/roles/apps/tasks/nextcloud.yml
deleted file mode 100644
index 387a5781f..000000000
--- a/ansible/roles/apps/tasks/nextcloud.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- name: Create Kubernetes secret with nextcloud settings
-  tags:
-    - config
-    - flux
-    - nextcloud
-  vars:
-    flux_secret:
-      name: "nextcloud"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - nextcloud
-
-- name: Create persistent volumes for nextcloud data and metadata
-  tags:
-    - config
-    - nextcloud
-    - storage
-  vars:
-    pvc:
-      name: "nextcloud-{{ item.name }}"
-      namespace: "oas-apps"
-      size: "{{ item.size }}"
-  include_tasks:
-    file: pvc.yml
-    apply:
-      tags:
-        - config
-        - nextcloud
-        - storage
-  with_items:
-    - name: "files"
-      size: 2Gi
-    - name: "mariadb"
-      size: 512Mi
diff --git a/ansible/roles/apps/tasks/promtail.yml b/ansible/roles/apps/tasks/promtail.yml
deleted file mode 100644
index 7e2bf955e..000000000
--- a/ansible/roles/apps/tasks/promtail.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-- name: Create Kubernetes secret with promtail settings
-  tags:
-    - config
-    - flux
-    - promtail
-  vars:
-    flux_secret:
-      name: "promtail"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - promtail
diff --git a/ansible/roles/apps/tasks/pvc.yml b/ansible/roles/apps/tasks/pvc.yml
deleted file mode 100644
index e79c4f11b..000000000
--- a/ansible/roles/apps/tasks/pvc.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Create Persistent Volume Claim for application
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: PersistentVolumeClaim
-      metadata:
-        namespace: "{{ pvc.namespace | default('oas-apps') }}"
-        name: "{{ pvc.name }}"
-      spec:
-        accessModes:
-          - "{{ pvc.accessMode | default('ReadWriteOnce') }}"
-        volumeMode: Filesystem
-        resources:
-          requests:
-            storage: "{{ pvc.size }}"
-        storageClassName: "{{ pvc.storageClass | default('local-path') }}"
diff --git a/ansible/roles/apps/tasks/rocketchat.yml b/ansible/roles/apps/tasks/rocketchat.yml
deleted file mode 100644
index 875e52872..000000000
--- a/ansible/roles/apps/tasks/rocketchat.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-
-- name: Create Kubernetes secret with Rocket.Chat settings
-  tags:
-    - config
-    - flux
-    - rocketchat
-  vars:
-    flux_secret:
-      name: "rocketchat"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - rocketchat
-
-- name: Create persistent volumes for rocketchat data
-  tags:
-    - config
-    - rocketchat
-    - storage
-  vars:
-    pvc:
-      name: "rocketchat-{{ item.name }}"
-      namespace: "oas-apps"
-      size: "{{ item.size }}"
-  include_tasks:
-    file: pvc.yml
-    apply:
-      tags:
-        - config
-        - rocketchat
-        - storage
-  with_items:
-    - name: "data"
-      size: 1Gi
diff --git a/ansible/roles/apps/tasks/single-sign-on.yml b/ansible/roles/apps/tasks/single-sign-on.yml
deleted file mode 100644
index 04a0f5c3d..000000000
--- a/ansible/roles/apps/tasks/single-sign-on.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-
-- name: Create Kubernetes secret with single-sign-on settings
-  tags:
-    - config
-    - flux
-    - single-sign-on
-  vars:
-    flux_secret:
-      name: "single-sign-on"
-      namespace: "oas"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - single-sign-on
-
-- name: Create persistent volumes for single-sign-on userbackend data
-  tags:
-    - config
-    - single-sign-on
-    - storage
-  vars:
-    pvc:
-      name: "single-sign-on-{{ item.name }}"
-      namespace: "oas"
-      size: "{{ item.size }}"
-  include_tasks:
-    file: pvc.yml
-    apply:
-      tags:
-        - config
-        - single-sign-on
-        - storage
-  with_items:
-    - name: "userbackend"
-      size: 1Gi
diff --git a/ansible/roles/apps/tasks/velero.yml b/ansible/roles/apps/tasks/velero.yml
deleted file mode 100644
index b8b8ddc8f..000000000
--- a/ansible/roles/apps/tasks/velero.yml
+++ /dev/null
@@ -1,80 +0,0 @@
----
-
-- name: Create Kubernetes secret with velero settings
-  tags:
-    - config
-    - flux
-    - velero
-  vars:
-    flux_secret:
-      name: "velero"
-      namespace: "velero"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - velero
-
-- name: Create Kubernetes secret with velero S3 credentials
-  tags:
-    - config
-    - secret
-    - velero
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "velero"
-        name: "credentials"
-      data:
-        cloud: "{{ lookup('template','s3-credentials') | b64encode }}"
-
-- name: Get current velero CLI version
-  tags:
-    - velero
-  shell: "velero version | head -n 2 | tail -n 1 | cut -d' ' -f 2 | cut -d'v' -f 2"
-  failed_when: false
-  register: velero_version
-  changed_when: false
-
-- name: Show current velero CLI version
-  tags:
-    - velero
-    - debug
-  debug:
-    msg: 'Current velero version is: {{ velero_version.stdout }}'
-
-- name: Download velero CLI
-  tags:
-    - velero
-  get_url:
-    url: "https://github.com/vmware-tanzu/velero/releases/download/v{{ velero.version }}/velero-v{{ velero.version }}-linux-amd64.tar.gz"
-    checksum: "{{ velero.checksum }}"
-    dest: "/tmp/velero-v{{ velero.version }}.tar.gz"
-  when: velero_version.stdout != velero.version
-
-- name: Unpack velero CLI
-  tags:
-    - velero
-  unarchive:
-    remote_src: true
-    src: "/tmp/velero-v{{ velero.version }}.tar.gz"
-    dest: "/tmp/"
-  when: velero_version.stdout != velero.version
-
-- name: Install velero CLI
-  tags:
-    - velero
-  copy:
-    remote_src: true
-    src: "/tmp/velero-v{{ velero.version }}-linux-amd64/velero"
-    dest: "/usr/local/bin/velero"
-    owner: root
-    group: root
-    mode: "0755"
-  become: true
-  when: velero_version.stdout != velero.version
diff --git a/ansible/roles/apps/tasks/wordpress.yml b/ansible/roles/apps/tasks/wordpress.yml
deleted file mode 100644
index ef56ad69e..000000000
--- a/ansible/roles/apps/tasks/wordpress.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Create Kubernetes secret with wordpress settings
-  tags:
-    - config
-    - flux
-    - wordpress
-  vars:
-    flux_secret:
-      name: "wordpress"
-  include_tasks:
-    file: flux_secret.yml
-    apply:
-      tags:
-        - config
-        - flux
-        - wordpress
-
-
-- name: Create persistent volumes for wordpress data and metadata
-  tags:
-    - config
-    - wordpress
-    - storage
-  vars:
-    pvc:
-      name: "wordpress-{{ item.name }}"
-      namespace: "oas-apps"
-      size: "{{ item.size }}"
-  include_tasks:
-    file: pvc.yml
-    apply:
-      tags:
-        - config
-        - wordpress
-        - storage
-  with_items:
-    - name: "files"
-      size: 2Gi
-    - name: "mariadb"
-      size: 512Mi
diff --git a/ansible/roles/apps/templates/s3-credentials b/ansible/roles/apps/templates/s3-credentials
deleted file mode 100644
index 76827ff12..000000000
--- a/ansible/roles/apps/templates/s3-credentials
+++ /dev/null
@@ -1,3 +0,0 @@
-[default]
-aws_access_key_id={{ lookup('file', '{{ cluster_dir }}/secrets/s3_access_key') }}
-aws_secret_access_key={{ lookup('file', '{{ cluster_dir }}/secrets/s3_secret_key') }}
diff --git a/ansible/roles/apps/templates/settings/cert-manager.yaml b/ansible/roles/apps/templates/settings/cert-manager.yaml
deleted file mode 100644
index b66a9e110..000000000
--- a/ansible/roles/apps/templates/settings/cert-manager.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-ingressShim:
-  {% if acme_staging %}
-  defaultIssuerName: "letsencrypt-staging"
-  {% else %}
-  defaultIssuerName: "letsencrypt-production"
-  {% endif %}
-  defaultIssuerKind: ClusterIssuer
-resources:
-  requests:
-    cpu: 20m
-    memory: 130Mi
-  limits:
-    cpu: 40m
-    memory: 260Mi
-cainjector:
-  resources:
-    requests:
-      cpu: 20m
-      memory: 140Mi
-    limits:
-      cpu: 40m
-      memory: 280Mi
-webhook:
-  resources:
-    requests:
-      cpu: 10m
-      memory: 23Mi
-    limits:
-      cpu: 20m
-      memory: 46Mi
-installCRDs: true
diff --git a/ansible/roles/apps/templates/settings/eventrouter.yaml b/ansible/roles/apps/templates/settings/eventrouter.yaml
deleted file mode 100644
index 8be599e67..000000000
--- a/ansible/roles/apps/templates/settings/eventrouter.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# https://github.com/helm/charts/tree/master/stable/eventrouter
-sink: stdout
-resources:
-  limits:
-    memory: 200Mi
-    cpu: 200m
-  requests:
-    memory: 100Mi
-    cpu: 100m
diff --git a/ansible/roles/apps/templates/settings/flux-custom.yaml b/ansible/roles/apps/templates/settings/flux-custom.yaml
deleted file mode 100644
index abb9d95c4..000000000
--- a/ansible/roles/apps/templates/settings/flux-custom.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# Since flux-custom needs to get configured entrirely by the user, so we
-# don't provide any defaults. Please refer to
-# https://docs.openappstack.net/en/latest/customization.html for how to
-# configure your custom flux installation.
-#
-# Since ansible can't combine() an empty dictionary we provide one dummy
-# default value here.
-
-git:
-  branch: master
diff --git a/ansible/roles/apps/templates/settings/kube-prometheus-stack.yaml b/ansible/roles/apps/templates/settings/kube-prometheus-stack.yaml
deleted file mode 100644
index b73b1bc41..000000000
--- a/ansible/roles/apps/templates/settings/kube-prometheus-stack.yaml
+++ /dev/null
@@ -1,269 +0,0 @@
-#jinja2:lstrip_blocks:"True"
-# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml
-
-# From: https://github.com/cablespaghetti/k3s-monitoring/blob/master/kube-prometheus-stack-values.yaml
-# Disable etcd monitoring. See https://github.com/cablespaghetti/k3s-monitoring/issues/4
-kubeEtcd:
-  enabled: false
-
-# Disable kube-controller-manager and kube-scheduler monitoring. See https://github.com/cablespaghetti/k3s-monitoring/issues/2
-kubeControllerManager:
-  enabled: false
-kubeScheduler:
-  enabled: false
-
-alertmanager:
-  persistentVolume:
-    existingClaim: "alertmanager"
-  ingress:
-    enabled: true
-    annotations:
-      nginx.ingress.kubernetes.io/auth-type: basic
-      nginx.ingress.kubernetes.io/auth-secret: alertmanager-basic-auth
-      nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required'
-      kubernetes.io/tls-acme: "true"
-    pathType: ImplementationSpecific
-    hosts:
-      - "alertmanager.{{ domain }}"
-    tls:
-      - secretName: alertmanager-tls
-        hosts:
-          - "alertmanager.{{ domain }}"
-  config:
-    {% if outgoing_mail.enabled %}
-    global:
-      smtp_from: "{{ outgoing_mail.fromAddress }}"
-      smtp_smarthost: "{{ outgoing_mail.smtp.host }}:{{ outgoing_mail.smtp.port }}"
-      smtp_auth_username: "{{ outgoing_mail.smtp.user }}"
-      smtp_auth_password: "{{ outgoing_mail.smtp.password }}"
-    {% endif %}
-    route:
-      group_by: ['job']
-      group_wait: 30s
-      group_interval: 5m
-      repeat_interval: 1h
-      {% if outgoing_mail.enabled %}
-      receiver: email
-      {% else %}
-      receiver: 'null'
-      {% endif %}
-      routes:
-      - match:
-          # This is an alert meant to ensure that the entire alerting pipeline is functional.
-          # This alert is always firing, therefore it should always be firing in Alertmanager
-          # and always fire against a receiver. There are integrations with various notification
-          # mechanisms that send a notification when this alert is not firing. For example the
-          # "DeadMansSnitch" integration in PagerDuty.
-          alertname: Watchdog
-        receiver: 'null'
-
-    receivers:
-    - name: 'null'
-    {% if outgoing_mail.enabled %}
-    - name: email
-      email_configs:
-      - send_resolved: true
-        to: {{ admin_email }}
-    {% endif %}
-
-    # Inhibition rules allow to mute a set of alerts given that another alert is firing.
-    # We use this to mute any warning-level notifications if the same alert is already critical.
-    inhibit_rules:
-    - source_match:
-        severity: 'critical'
-      target_match:
-        severity: 'warning'
-      # Apply inhibition if the alertname is the same.
-      equal: ['alertname', 'namespace']
-
-  alertmanagerSpec:
-#    replicas: 3
-#    podAntiAffinity: "soft"
-    storage:
-      volumeClaimTemplate:
-        spec:
-          accessModes: ["ReadWriteOnce"]
-          resources:
-            requests:
-              storage: 1Gi
-#    resources:
-#      limits:
-#        cpu: 500m
-#        memory: 64Mi
-#      requests:
-#        cpu: 25m
-#        memory: 32Mi
-#    priorityClassName: high-priority
-
-
-prometheus:
-  # https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus
-  prometheusSpec:
-    scrapeInterval: "3m"
-    evaluationInterval: "3m"
-    retention: "30d"
-
-#    replicas: 2
-#    podAntiAffinity: "hard"
-    storageSpec:
-      volumeClaimTemplate:
-        spec:
-          accessModes: ["ReadWriteOnce"]
-          resources:
-            requests:
-              storage: 10Gi
-
-    resources:
-      limits:
-        cpu: 200m
-        memory: 1Gi
-      requests:
-        cpu: 100m
-        memory: 512Mi
-
-  ingress:
-    enabled: true
-    annotations:
-      nginx.ingress.kubernetes.io/auth-type: basic
-      nginx.ingress.kubernetes.io/auth-secret: prometheus-basic-auth
-      nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required'
-      kubernetes.io/tls-acme: "true"
-    pathType: ImplementationSpecific
-    hosts:
-      - "prometheus.{{ domain }}"
-    tls:
-      - secretName: prometheus-tls
-        hosts:
-          - "prometheus.{{ domain }}"
-
-#
-#  service:
-#    sessionAffinity: "ClientIP"
-#
-
-grafana:
-  # https://github.com/grafana/helm-charts/tree/main/charts/grafana
-  adminPassword: "{{ grafana_admin_password }}"
-  grafana.ini:
-    server:
-      root_url: "https://grafana.{{ domain }}"
-    auth.generic_oauth:
-      name: OpenAppStack
-      enabled: true
-      client_id: grafana
-      client_secret: "{{ grafana_oauth_client_secret }}"
-      scopes: "openid profile email openappstack_roles"
-      auth_url: "https://sso.{{ domain }}/oauth2/auth"
-      token_url: "https://sso.{{ domain }}/oauth2/token"
-      api_url: "https://sso.{{ domain }}/userinfo"
-      role_attribute_path: contains(openappstack_roles[*], 'admin') && 'Admin' || 'Editor'
-  ingress:
-    enabled: true
-    annotations:
-      kubernetes.io/tls-acme: "true"
-    pathType: ImplementationSpecific
-    hosts:
-      - "grafana.{{ domain }}"
-    tls:
-      - secretName: grafana-tls
-        hosts:
-          - "grafana.{{ domain }}"
-  persistence:
-    enabled: true
-    existingClaim: "grafana"
-  podAnnotations:
-    backup.velero.io/backup-volumes: "storage"
-
-  # This allows us to pick up the Loki datasource
-  # sidecar:
-  #   datasources:
-  #     enabled: true
-  #     label: grafana_datasource
-  #   # Make a configmap with the label `grafana_dashboard` to add dashboards to
-  #   # Grafana.
-  #   dashboards:
-  #     enabled: true
-  #     lablel: grafana_dashboard
-
-  # dashboardProviders:
-  #   dashboardproviders.yaml:
-  #     apiVersion: 1
-  #     providers:
-  #     - name: 'default'
-  #       orgId: 1
-  #       folder: ''
-  #       type: file
-  #       disableDeletion: false
-  #       editable: true
-  #       options:
-  #         path: /var/lib/grafana/dashboards
-  # dashboards:
-  #   default:
-  #     kube-dash:
-  #       gnetId: 11074
-  #       revision: 2
-  #       datasource: Prometheus
-  #     loki-dash:
-  #       gnetId: 10880
-  #       revision: 1
-  #       datasource: Loki
-
-  # datasources:
-  #  datasources.yaml:
-  #    apiVersion: 1
-  #    datasources:
-  #    - name: Prometheus
-  #      type: prometheus
-  #      url: http://prometheus-server
-  #      access: proxy
-  #      isDefault: true
-
-  plugins:
-    - grafana-piechart-panel
-
-  resources:
-    limits:
-      cpu: 200m
-      memory: 256Mi
-    requests:
-      cpu: 100m
-      memory: 128Mi
-#
-#  sidecar:
-#    resources:
-#      limits:
-#        cpu: 100m
-#        memory: 128Mi
-#      requests:
-#        cpu: 5m
-#        memory: 64Mi
-
-prometheusOperator:
-  resources:
-    limits:
-      cpu: 200m
-      memory: 256Mi
-    requests:
-      cpu: 100m
-      memory: 128Mi
-#  priorityClassName: high-priority
-
-prometheus-node-exporter:
-  resources:
-    limits:
-      cpu: 200m
-      memory: 32Mi
-    requests:
-      cpu: 100m
-      memory: 16Mi
-#  priorityClassName: high-priority
-
-kube-state-metrics:
-  resources:
-    limits:
-      cpu: 200m
-      memory: 64Mi
-    requests:
-      cpu: 100m
-      memory: 32Mi
-#  priorityClassName: high-priority
diff --git a/ansible/roles/apps/templates/settings/letsencrypt-production.yaml b/ansible/roles/apps/templates/settings/letsencrypt-production.yaml
deleted file mode 100644
index bc749009c..000000000
--- a/ansible/roles/apps/templates/settings/letsencrypt-production.yaml
+++ /dev/null
@@ -1 +0,0 @@
-email: "{{ admin_email }}"
diff --git a/ansible/roles/apps/templates/settings/letsencrypt-staging.yaml b/ansible/roles/apps/templates/settings/letsencrypt-staging.yaml
deleted file mode 100644
index bc749009c..000000000
--- a/ansible/roles/apps/templates/settings/letsencrypt-staging.yaml
+++ /dev/null
@@ -1 +0,0 @@
-email: "{{ admin_email }}"
diff --git a/ansible/roles/apps/templates/settings/local-path-provisioner.yaml b/ansible/roles/apps/templates/settings/local-path-provisioner.yaml
deleted file mode 100644
index 25db46090..000000000
--- a/ansible/roles/apps/templates/settings/local-path-provisioner.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-nodePathMap:
-  - node: DEFAULT_PATH_FOR_NON_LISTED_NODES
-    paths:
-      - "/var/lib/OpenAppStack/local-storage"
-storageClass:
-  defaultClass: true
-# We temporarily use our own build in order to use local volumes instead of
-# hostPath.
-image:
-  repository: "open.greenhost.net:4567/openappstack/openappstack/local-path-provisioner"
-  tag: "52f994f-amd64"
-
-
-resources:
-  requests:
-    cpu: 200m
-    memory: 20Mi
-  limits:
-    cpu: 400m
-    memory: 40Mi
diff --git a/ansible/roles/apps/templates/settings/loki.yaml b/ansible/roles/apps/templates/settings/loki.yaml
deleted file mode 100644
index 0983a57b9..000000000
--- a/ansible/roles/apps/templates/settings/loki.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-enabled: true
-resources:
-  limits:
-    cpu: 400m
-    memory: 180Mi
-  requests:
-    cpu: 200m
-    memory: 90Mi
diff --git a/ansible/roles/apps/templates/settings/metallb.yaml b/ansible/roles/apps/templates/settings/metallb.yaml
deleted file mode 100644
index 095145a2a..000000000
--- a/ansible/roles/apps/templates/settings/metallb.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-# https://artifacthub.io/packages/helm/bitnami/metallb#example-layer-2-configuration
-configInline:
-  address-pools:
-    - name: default
-      protocol: layer2
-      addresses:
-        - "{{ ip_address }}/32"
diff --git a/ansible/roles/apps/templates/settings/nextcloud.yaml b/ansible/roles/apps/templates/settings/nextcloud.yaml
deleted file mode 100644
index 4b5138520..000000000
--- a/ansible/roles/apps/templates/settings/nextcloud.yaml
+++ /dev/null
@@ -1,180 +0,0 @@
-#jinja2: lstrip_blocks: "True"
-nextcloud:
-  nextcloud:
-    host: "files.{{ domain }}"
-    password: "{{ nextcloud_password }}"
-    mail:
-      enabled: {{ outgoing_mail.enabled  | default("false", true) }}
-      {% if outgoing_mail.enabled %}
-      {% set outgoing_mail_domain = outgoing_mail.fromAddress.split('@')[-1] %}
-      {% set outgoing_mail_from = outgoing_mail.fromAddress.split('@')[0] %}
-      fromAddress: "{{ outgoing_mail_from }}"
-      domain: "{{ outgoing_mail_domain }}"
-      smtp:
-        host: "{{ outgoing_mail.smtp.host }}"
-        {% if outgoing_mail.smtp.ssl %}
-        secure: "tls"
-        {% else %}
-        secure: ""
-        {% endif %}
-        port: "{{ outgoing_mail.smtp.port }}"
-        name: "{{ outgoing_mail.smtp.user }}"
-        password: "{{ outgoing_mail.smtp.password }}"
-        authtype: "{{ outgoing_mail.smtp.authtype }}"
-      {% endif %}
-  cronjob:
-    # Set curl to accept insecure connections when acme staging is used
-    curlInsecure: "{{ acme_staging }}"
-
-  ingress:
-    enabled: true
-    annotations:
-      # Tell cert-manager to automatically get a TLS certificate
-      kubernetes.io/tls-acme: "true"
-      # Set max body size high to allow big NextCloud uploads
-      nginx.ingress.kubernetes.io/proxy-body-size: 1G
-      nginx.ingress.kubernetes.io/server-snippet: |-
-        server_tokens off;
-        proxy_hide_header X-Powered-By;
-        rewrite ^/.well-known/webfinger /public.php?service=webfinger last;
-        rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
-        rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json;
-        location = /.well-known/carddav {
-          return 301 $scheme://$host/remote.php/dav;
-        }
-        location = /.well-known/caldav {
-          return 301 $scheme://$host/remote.php/dav;
-        }
-        location = /robots.txt {
-          allow all;
-          log_not_found off;
-          access_log off;
-        }
-    hosts:
-      - "files.{{ domain }}"
-    tls:
-      - hosts:
-          - "files.{{ domain }}"
-        secretName: oas-nextcloud-files
-
-  # Use 2 GB of storage for NC storage (maybe make configurable later?)
-  persistence:
-    enabled: true
-    existingClaim: "nextcloud-files"
-
-  podAnnotations:
-    # Let the backup system include nextcloud data.
-    backup.velero.io/backup-volumes: "nextcloud-data"
-
-  # Explicitly disable use of internal database
-  internalDatabase:
-    enabled: false
-
-  livenessProbe:
-    initialDelaySeconds: 300
-    failureThreshold: 20
-  readinessProbe:
-    initialDelaySeconds: 300
-
-  resources:
-    limits:
-      cpu: 500m
-      memory: 512Mi
-    requests:
-      cpu: 200m
-      memory: 256Mi
-
-  # Enable and configure MariaDB chart
-  mariadb:
-    db:
-      password: "{{ nextcloud_mariadb_password }}"
-    enabled: true
-    master:
-      annotations:
-        # Let the backup system include nextcloud database data.
-        backup.velero.io/backup-volumes: "data"
-      persistence:
-        ## Enable PostgreSQL persistence using Persistent Volume Claims.
-        enabled: true
-        existingClaim: "nextcloud-mariadb"
-      resources:
-        limits:
-          cpu: 200m
-          memory: 512Mi
-        requests:
-          cpu: 100m
-          memory: 256Mi
-    replication:
-      enabled: false
-    rootUser:
-      password: "{{ nextcloud_mariadb_root_password }}"
-
-setupApps:
-  backoffLimit: 20
-
-onlyoffice:
-  resources:
-    limits:
-      cpu: 800m
-      memory: 2Gi
-    requests:
-      cpu: 200m
-      memory: 1Gi
-  server_name: "office.{{ domain }}"
-  ingress:
-    enabled: true
-    annotations:
-      # Tell cert-manager to automatically get a TLS certificate
-      kubernetes.io/tls-acme: "true"
-    paths:
-      - "/"
-    hosts:
-      - "office.{{ domain }}"
-    tls:
-      - hosts:
-          - "office.{{ domain }}"
-        secretName: oas-nextcloud-office
-  jwtSecret: "{{ onlyoffice_jwt_secret }}"
-
-postgresql:
-  postgresqlPassword: "{{ onlyoffice_postgresql_password }}"
-  resources:
-    limits:
-      cpu: 100m
-      memory: 64Mi
-    requests:
-      cpu: 20m
-      memory: 32Mi
-
-rabbitmq:
-  rabbitmq:
-    password: "{{ onlyoffice_rabbitmq_password }}"
-  persistence:
-    enabled: false
-  resources:
-    limits:
-      cpu: 300m
-      memory: 256Mi
-    requests:
-      cpu: 100m
-      memory: 128Mi
-
-redis:
-  cluster:
-    enabled: false
-  master:
-    persistence:
-      enabled: false
-    resources:
-      limits:
-        cpu: 100m
-        memory: 64Mi
-      requests:
-        cpu: 50m
-        memory: 32Mi
-
-sociallogin:
-  server_name: "sso.{{ domain }}"
-  client_id: nextcloud
-  client_secret: "{{ nextcloud_oauth_client_secret }}"
-  groups_claim: "openappstack_roles"
diff --git a/ansible/roles/apps/templates/settings/promtail.yaml b/ansible/roles/apps/templates/settings/promtail.yaml
deleted file mode 100644
index fcba21438..000000000
--- a/ansible/roles/apps/templates/settings/promtail.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-enabled: true
-initContainer:
-  enabled: true
-  fsInotifyMaxUserInstances: 512
-resources:
-  limits:
-    cpu: 400m
-    memory: 256Mi
-  requests:
-    cpu: 300m
-    memory: 128Mi
-config:
-  lokiAddress: http://loki:3100/loki/api/v1/push
-  # https://github.com/grafana/helm-charts/blob/main/charts/promtail/values.yaml#L217
-  snippets:
-    # https://grafana.com/docs/loki/latest/clients/promtail/pipelines/
-    pipelineStages:
-      - cri: {}
-      - match:
-          selector: '{app="eventrouter"}'
-          stages:
-            - json:
-                expressions:
-                  event_verb: verb
-                  event_kind: event.involvedObject.kind
-                  event_reason: event.reason
-                  event_namespace: event.involvedObject.namespace
-                  event_name: event.metadata.name
-                  event_source_host: event.source.host
-                  event_source_component: event.source.component
-            - labels:
-                event_verb:
-                event_kind:
-                event_reason:
-                event_namespace:
-                event_name:
-                event_source_host:
-                event_source_component:
diff --git a/ansible/roles/apps/templates/settings/rocketchat.yaml b/ansible/roles/apps/templates/settings/rocketchat.yaml
deleted file mode 100644
index 01c5f67a5..000000000
--- a/ansible/roles/apps/templates/settings/rocketchat.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-# Hostname for Rocket.chat
-host: "chat.{{ domain }}"
-
-# Extra environment variables for Rocket.Chat. Used with tpl function, so this
-# needs to be a string
-extraEnv: |
-  - name: ADMIN_USERNAME
-    value: admin
-  - name: ADMIN_PASS
-    value: "{{ rocketchat_admin_password }}"
-  - name: ADMIN_EMAIL
-    value: "{{ admin_email }}"
-    # Set setup wizard to completed. The setup wizard, that allows you to
-    # create a different admin user, gets skipped.
-  - name: OVERWRITE_SETTING_Show_Setup_Wizard
-    value: completed
-  - name: E2E_Enable
-    value: "true"
-  - name: Accounts_RegistrationForm
-    value: Disabled
-  - name: Accounts_RegistrationForm_LinkReplacementText
-    value: "Create a new account at admin.{{ domain }} to add users"
-  # Custom OAuth rules:
-  - name: Accounts_OAuth_Custom_Openappstack
-    value: "true"
-  - name: Accounts_OAuth_Custom_Openappstack_url
-    value: https://sso.{{ domain }}
-  - name: Accounts_OAuth_Custom_Openappstack_token_path
-    value: /oauth2/token
-  - name: Accounts_OAuth_Custom_Openappstack_token_sent_via
-    value: payload
-  - name: Accounts_OAuth_Custom_Openappstack_identity_token_sent_via
-    value: payload
-  - name: Accounts_OAuth_Custom_Openappstack_identity_path
-    value: /userinfo
-  - name: Accounts_OAuth_Custom_Openappstack_authorize_path
-    value: /oauth2/auth
-  - name: Accounts_OAuth_Custom_Openappstack_scope
-    value: openid profile openappstack_roles email
-  - name: Accounts_OAuth_Custom_Openappstack_id
-    value: rocketchat
-  - name: Accounts_OAuth_Custom_Openappstack_secret
-    value: {{ rocketchat_oauth_client_secret }}
-  - name: Accounts_OAuth_Custom_Openappstack_login_style
-    value: redirect
-  - name: Accounts_OAuth_Custom_Openappstack_button_label_text
-    value: Login via OpenAppStack
-  - name: Accounts_OAuth_Custom_Openappstack_button_label_color
-    value: "#FFFFFF"
-  - name: Accounts_OAuth_Custom_Openappstack_button_color
-    value: "#1d74f5"
-  - name: Accounts_OAuth_Custom_Openappstack_username_field
-    value: preferred_username
-  - name: Accounts_OAuth_Custom_Openappstack_name_field
-    value: preferred_username
-  - name: Accounts_OAuth_Custom_Openappstack_roles_claim
-    value: openappstack_roles
-  - name: Accounts_OAuth_Custom_Openappstack_merge_roles
-    value: "true"
-  - name: Accounts_OAuth_Custom_Openappstack_merge_users
-    value: "true"
-  - name: Accounts_OAuth_Custom_Openappstack_show_button
-    value: "true"
-
-
-livenessProbe:
-  initialDelaySeconds: 180
-  failureThreshold: 20
-readinessProbe:
-  initialDelaySeconds: 60
-
-ingress:
-  enabled: true
-  annotations:
-    # Tell cert-manager to automatically get a TLS certificate
-    kubernetes.io/tls-acme: "true"
-  tls:
-    - hosts:
-        - "chat.{{ domain }}"
-      secretName: oas-rocketchat
-
-persistence:
-  enabled: true
-  size: 1Gi
-  existingClaim: "rocketchat-data"
-
-podAnnotations:
-  # Let the backup system include rocketchat data.
-  backup.velero.io/backup-volumes: "rocket-data"
-
-resources:
-  limits:
-    cpu: 400m
-    memory: 1024Mi
-  requests:
-    cpu: 100m
-    memory: 768Mi
-
-mongodb:
-  mongodbRootPassword: "{{ rocketchat_mongodb_root_password }}"
-  mongodbPassword: "{{ rocketchat_mongodb_password }}"
-  podAnnotations:
-    # Let the backup system include rocketchat data stored in mongodb.
-    backup.velero.io/backup-volumes: "datadir"
-  persistence:
-    enabled: true
-    size: 2Gi
-  resources:
-    limits:
-      cpu: 600m
-      memory: 1024Mi
-    requests:
-      cpu: 300m
-      memory: 768Mi
-
-image:
-  tag: 3.13.0
-  pullPolicy: IfNotPresent
diff --git a/ansible/roles/apps/templates/settings/velero.yaml b/ansible/roles/apps/templates/settings/velero.yaml
deleted file mode 100644
index 8c5483d76..000000000
--- a/ansible/roles/apps/templates/settings/velero.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-# Init containers to add to the Velero deployment's pod spec. At least one plugin provider image is required.
-initContainers:
-  - name: velero-plugin-for-aws
-    image: velero/velero-plugin-for-aws:v1.1.0
-    imagePullPolicy: IfNotPresent
-    volumeMounts:
-      - mountPath: /target
-        name: plugins
-
-# Settings for Velero's prometheus metrics. Enabled by default.
-metrics:
-  enabled: true
-  scrapeInterval: 30s
-
-  # Pod annotations for Prometheus
-  podAnnotations:
-    prometheus.io/scrape: "true"
-    prometheus.io/port: "8085"
-    prometheus.io/path: "/metrics"
-
-  serviceMonitor:
-    enabled: false
-    additionalLabels: {}
-
-# Install CRDs as a templates. Enabled by default.
-installCRDs: true
-
-##
-## Parameters for the `default` BackupStorageLocation and VolumeSnapshotLocation,
-## and additional server settings.
-##
-configuration:
-  # Cloud provider being used (e.g. aws, azure, gcp).
-  # We don't use aws, but ceph which is S3-compatible.
-  provider: aws
-
-  # Parameters for the `default` BackupStorageLocation. See
-  # https://velero.io/docs/v1.0.0/api-types/backupstoragelocation/
-  backupStorageLocation:
-    # Cloud provider where backups should be stored. Usually should
-    # match `configuration.provider`. Required.
-    # The name "default" seems to be special: backups that don't have a
-    # location specified will use this one.
-    name: default
-    # Provider for the backup storage location. If omitted
-    # `configuration.provider` will be used instead.
-    # provider:
-    # Bucket to store backups in. Required.
-    bucket: {{ backup.s3.bucket }}
-    # Prefix within bucket under which to store backups. Optional.
-    prefix: {{ backup.s3.prefix }}
-    # Additional provider-specific configuration. See link above
-    # for details of required/optional fields for your provider.
-    config:
-      s3ForcePathStyle: true
-      s3Url: {{ backup.s3.url }}
-      region: {{ backup.s3.region }}
-
-rbac:
-  # Whether to create the Velero role and role binding to give all permissions to the namespace to Velero.
-  create: true
-  # Whether to create the cluster role binding to give administrator permissions to Velero
-  clusterAdministrator: true
-
-# Information about the Kubernetes service account Velero uses.
-serviceAccount:
-  server:
-    create: true
-    name:
-    annotations:
-
-# Info about the secret to be used by the Velero deployment, which
-# should contain credentials for the cloud provider IAM account you've
-# set up for Velero.
-credentials:
-  # Name of a pre-existing secret (if any) in the Velero namespace
-  # that should be used to get IAM account credentials. Optional.
-  {% if backup.s3.enabled %}
-  existingSecret: credentials
-  {% else %}
-  useSecret: false
-  {% endif %}
-
-# Whether to create backupstoragelocation crd, if false => do not create a default backup location
-backupsEnabled: {{ backup.s3.enabled }}
-# Whether to create volumesnapshotlocation crd, if false => disable snapshot feature
-snapshotsEnabled: false
-
-# Whether to deploy the restic daemonset.
-deployRestic: true
-
-restic:
-  podVolumePath: /var/lib/kubelet/pods
-  privileged: true
-
-# Backup schedules to create.
-{% if backup.s3.enabled %}
-schedules:
-  # This is just a name, can be anything.
-  nightly:
-    # Every night at 3:30.
-    schedule: "30 3 * * *"
-    template:
-      # Backups are stored for 60 days (1440 hours).
-      ttl: "1440h"
-      includedNamespaces:
-      # We include all namespaces.
-      - '*'
-{% else %}
-# There is no point in setting a backup schedule as we haven't configured a
-# storage location.
-schedules: {}
-{% endif %}
-
-configMaps: {}
diff --git a/ansible/roles/apps/templates/settings/wordpress.yaml b/ansible/roles/apps/templates/settings/wordpress.yaml
deleted file mode 100644
index 5fa27d45d..000000000
--- a/ansible/roles/apps/templates/settings/wordpress.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-wordpress:
-  config:
-    db:
-      prefix: wp_
-    adm:
-      usid: admin
-      pssw: "{{ wordpress_admin_password }}"
-  site:
-    # NOTE: Make sure you use underscore and that the localisation is in full caps
-    locale: en_US
-    url: "https://www.{{ domain }}"
-    title: "OpenAppStack website"
-
-persistence:
-  existingClaim: wordpress-files
-podAnnotations:
-  backup.velero.io/backup-volumes: "wordpress-wp-uploads"
-
-openid_connect_settings:
-  enabled: true
-  client_secret: {{ wordpress_oauth_client_secret }}
-  endpoint_login: https://sso.{{ domain }}/oauth2/auth
-  endpoint_userinfo: https://sso.{{ domain }}/userinfo
-  endpoint_token: https://sso.{{ domain }}/oauth2/token
-  endpoint_end_session: ""
-  # After our SSO supports it, we should set this as the logout URL
-  # https://open.greenhost.net/openappstack/single-sign-on/issues/28
-  # endpoint_end_session: https://sso.{{ domain }}/oauth2/sessions/logout
-  no_sslverify: "0"
-  http_request_timeout: "15"
-  enable_logging: "1"
-  scope: email profile openid openappstack_roles offline_access
-  role_mapping_enabled: true
-  role_key: openappstack_roles
-
-database:
-  db:
-    user: wordpress
-    password: "{{ wordpress_mariadb_password }}"
-  rootUser:
-    password: "{{ wordpress_mariadb_root_password }}"
-  master:
-    persistence:
-      ## Enable MariaDB persistence using Persistent Volume Claims.
-      enabled: true
-      existingClaim: "wordpress-mariadb"
-    annotations:
-      # Let the backup system include nextcloud database data.
-      backup.velero.io/backup-volumes: "data"
-    resources:
-      limits:
-        cpu: 200m
-        memory: 512Mi
-      requests:
-        cpu: 100m
-        memory: 256Mi
-  replication:
-    enabled: false
-
-# It's advisable to set resource limits to prevent your K8s cluster from
-# crashing
-resources:
-  limits:
-    cpu: 500m
-    memory: 256Mi
-  requests:
-    cpu: 100m
-    memory: 128Mi
-
-ingress:
-  enabled: true
-  annotations:
-    kubernetes.io/tls-acme: "true"
-  path: /
-  hosts:
-    - "www.{{ domain }}"
-    - "{{ domain }}"
-  tls:
-    - hosts:
-        - "www.{{ domain }}"
-        - "{{ domain }}"
-      secretName: oas-wordpress
diff --git a/ansible/roles/kubernetes-checks/tasks/main.yml b/ansible/roles/kubernetes-checks/tasks/main.yml
deleted file mode 100644
index 4eebcb442..000000000
--- a/ansible/roles/kubernetes-checks/tasks/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: Make sure kubectl has connection to server
-  tags:
-    - kubectl
-    - plugin
-    - krew
-  shell: kubectl version
-  failed_when: false
-  register: kubectl_server_version
-  changed_when: false
-
-- name: Fail if kubectl is not configured correctly
-  fail:
-    msg: Kubectl has no connection to server or is not installed. Install kubectl on the server or run install-kubernetes.yml playbook
-  when: kubectl_server_version.rc != 0
-
-- name: Make sure openshift is installed
-  tags:
-    - kubectl
-    - plugin
-    - krew
-  shell: pip3 freeze | grep openshift
-  failed_when: false
-  register: openshift_version
-  changed_when: false
-
-- name: Fail if openshift is not installed
-  fail:
-    msg: Openshift should be installed. Run `pip3 install openshift` on the server or run install-kubernetes.yml playbook
-  when: openshift_version.rc != 0
diff --git a/ansible/roles/pre-configure/tasks/main.yml b/ansible/roles/pre-configure/tasks/main.yml
index 2ab83ed34..a18363bbd 100644
--- a/ansible/roles/pre-configure/tasks/main.yml
+++ b/ansible/roles/pre-configure/tasks/main.yml
@@ -47,16 +47,6 @@
   with_items:
     - python3-pip
 
-- name: Install python packages via pip3
-  tags:
-    - package
-    - pip
-  pip:
-    name:
-      # The openshift python package is needed for ansible k8s resource.
-      - openshift
-    executable: /usr/bin/pip3
-
 - name: Install kubectl snap
   # kubectl needs to get installed as "classic" snap
   command: snap install --classic kubectl
diff --git a/flux/.flux.yaml b/flux/.flux.yaml
deleted file mode 100644
index c4122e1e9..000000000
--- a/flux/.flux.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-version: 1
-commandUpdated:
-  generators:
-  # Find all yaml files (recursively).
-  # Filename convention: appname_optional_additional_identifiers_resourcetype.yaml
-  #   i.e. oas/prometheus_alerts_custom_cm.yaml (configmap)
-  # Ignore ones with filename starting with a dot.
-  # For each file, check if the corresponding app settings configmap has
-  # disabled the app by having a key "enabled" with value "false", and skip
-  # it in that case.
-  # Otherwise, include it.
-  - command: >
-      for namespace in *; do
-        for location in $(find "$namespace" -type f \( -name '*.yaml' -o -name '*.yml' \) -a ! -name '.*'); do
-          filename=$(basename $location);
-          name="${filename%%_*}";
-          enabled=$(kubectl get secret -n "$namespace" "${name}-settings" -o jsonpath="{.data.enabled}" | base64 -d);
-          if [ "$enabled" == "true" ]; then
-            echo '---';
-            cat $location;
-          fi;
-        done;
-      done
-  updaters: []
diff --git a/flux/cert-manager/cert-manager_hr.yaml b/flux/cert-manager/cert-manager_hr.yaml
deleted file mode 100644
index 0b1de9e39..000000000
--- a/flux/cert-manager/cert-manager_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: cert-manager
-  namespace: cert-manager
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: cert-manager
-  chart:
-    # https://artifacthub.io/packages/helm/cert-manager/cert-manager
-    repository: https://charts.jetstack.io
-    name: cert-manager
-    version: 1.3.1
-  valuesFrom:
-    - secretKeyRef:
-        name: cert-manager-settings
-        key: values.yaml
diff --git a/flux/kube-system/local-path-provisioner_hr.yaml b/flux/kube-system/local-path-provisioner_hr.yaml
deleted file mode 100644
index 7d83e861e..000000000
--- a/flux/kube-system/local-path-provisioner_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: local-path-provisioner
-  namespace: kube-system
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: local-path-provisioner
-  chart:
-    git: https://github.com/rancher/local-path-provisioner
-    ref: v0.0.14
-    path: deploy/chart
-  valuesFrom:
-    - secretKeyRef:
-        name: local-path-provisioner-settings
-        key: values.yaml
-  timeout: 120
diff --git a/flux/kube-system/metallb_hr.yaml b/flux/kube-system/metallb_hr.yaml
deleted file mode 100644
index 90156f8f3..000000000
--- a/flux/kube-system/metallb_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: metallb
-  namespace: kube-system
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: metallb
-  chart:
-    repository: https://charts.bitnami.com/bitnami
-    name: metallb
-    version: 0.1.23
-  valuesFrom:
-    - secretKeyRef:
-        name: metallb-settings
-        key: values.yaml
-  timeout: 120
diff --git a/flux/oas-apps/nextcloud_hr.yaml b/flux/oas-apps/nextcloud_hr.yaml
deleted file mode 100644
index cacb5a9a1..000000000
--- a/flux/oas-apps/nextcloud_hr.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: nextcloud
-  namespace: oas-apps
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  # Calling the release "nextcloud" runs into a bug in the helm chart.
-  # See https://open.greenhost.net/openappstack/nextcloud/issues/3 for details.
-  releaseName: nc
-  chart:
-    git: https://open.greenhost.net/openappstack/nextcloud
-    ref: 0.2.7
-    path: .
-  valuesFrom:
-    - secretKeyRef:
-        name: nextcloud-settings
-        key: values.yaml
-  timeout: 1800
-  wait: true
diff --git a/flux/oas-apps/rocketchat_hr.yaml b/flux/oas-apps/rocketchat_hr.yaml
deleted file mode 100644
index 00dc2a1cb..000000000
--- a/flux/oas-apps/rocketchat_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: rocketchat
-  namespace: oas-apps
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: rocketchat
-  chart:
-    repository: https://charts.helm.sh/stable
-    name: rocketchat
-    version: 2.0.10
-  valuesFrom:
-    - secretKeyRef:
-        name: rocketchat-settings
-        key: values.yaml
-  timeout: 300
diff --git a/flux/oas-apps/wordpress_hr.yaml b/flux/oas-apps/wordpress_hr.yaml
deleted file mode 100644
index 3bb81645a..000000000
--- a/flux/oas-apps/wordpress_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: wordpress
-  namespace: oas-apps
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: wordpress
-  chart:
-    git: https://open.greenhost.net/openappstack/wordpress-helm
-    ref: 0.1.4
-    path: .
-  valuesFrom:
-    - secretKeyRef:
-        name: wordpress-settings
-        key: values.yaml
-  timeout: 1800
diff --git a/flux/oas-custom/flux-custom_hr.yaml b/flux/oas-custom/flux-custom_hr.yaml
deleted file mode 100644
index b0bc9f12f..000000000
--- a/flux/oas-custom/flux-custom_hr.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: flux-custom
-  namespace: oas-custom
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: flux-custom
-  chart:
-    name: flux
-    repository: https://charts.fluxcd.io
-    version: 1.6.0
-  valuesFrom:
-  - secretKeyRef:
-      name: flux-custom-settings
-      key: values.yaml
diff --git a/flux/oas/eventrouter_hr.yaml b/flux/oas/eventrouter_hr.yaml
deleted file mode 100644
index 59eb95622..000000000
--- a/flux/oas/eventrouter_hr.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: eventrouter
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: eventrouter
-  chart:
-    repository: https://charts.helm.sh/stable
-    name: eventrouter
-    version: 0.3.2
-  valuesFrom:
-  - secretKeyRef:
-      name: eventrouter-settings
-      key: values.yaml
diff --git a/flux/oas/ingress_hr.yaml b/flux/oas/ingress_hr.yaml
deleted file mode 100644
index c2167f74d..000000000
--- a/flux/oas/ingress_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: ingress
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: ingress
-  chart:
-    # https://artifacthub.io/packages/helm/ingress-nginx/ingress-nginx
-    repository: https://kubernetes.github.io/ingress-nginx
-    name: ingress-nginx
-    version: 3.29.0
-  valuesFrom:
-    - secretKeyRef:
-        name: ingress-settings
-        key: values.yaml
diff --git a/flux/oas/kube-prometheus-stack_hr.yaml b/flux/oas/kube-prometheus-stack_hr.yaml
deleted file mode 100644
index 45a9a76c1..000000000
--- a/flux/oas/kube-prometheus-stack_hr.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: prometheus-stack
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: prometheus-stack
-  # https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/Chart.yaml
-  # https://artifacthub.io/packages/helm/prometheus-community/kube-prometheus-stack
-  chart:
-    repository: https://prometheus-community.github.io/helm-charts
-    name: kube-prometheus-stack
-    version: 15.4.2
-  valuesFrom:
-    - secretKeyRef:
-        name: prometheus-stack-settings
-        key: values.yaml
-  timeout: 300
diff --git a/flux/oas/letsencrypt-production_hr.yaml b/flux/oas/letsencrypt-production_hr.yaml
deleted file mode 100644
index a25ad1f27..000000000
--- a/flux/oas/letsencrypt-production_hr.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: letsencrypt-production
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: letsencrypt-production
-  chart:
-    git: https://open.greenhost.net/openappstack/letsencrypt-issuer
-    ref: 346ce53321fe6880dd76e7cff7e2a71e57f667d8
-    path: .
-  valuesFrom:
-    - secretKeyRef:
-        name: letsencrypt-production-settings
-        key: values.yaml
diff --git a/flux/oas/letsencrypt-staging_hr.yaml b/flux/oas/letsencrypt-staging_hr.yaml
deleted file mode 100644
index 96522de90..000000000
--- a/flux/oas/letsencrypt-staging_hr.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: letsencrypt-staging
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: letsencrypt-staging
-  chart:
-    git: https://open.greenhost.net/openappstack/letsencrypt-issuer
-    ref: 346ce53321fe6880dd76e7cff7e2a71e57f667d8
-    path: .
-  valuesFrom:
-    - secretKeyRef:
-        name: letsencrypt-staging-settings
-        key: values.yaml
-  values:
-    issuer:
-      name: letsencrypt-staging
-      server: "https://acme-staging-v02.api.letsencrypt.org/directory"
diff --git a/flux/oas/loki_cm.yaml b/flux/oas/loki_cm.yaml
deleted file mode 100644
index 6fa99c26a..000000000
--- a/flux/oas/loki_cm.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  labels:
-    app: loki
-    grafana_datasource: "1"
-    release: loki
-  name: loki-datasource
-  namespace: oas
-data:
-  loki-stack-datasource.yaml: |-
-    apiVersion: 1
-    datasources:
-    - name: Loki
-      type: loki
-      access: proxy
-      url: http://loki:3100
-      version: 1
diff --git a/flux/oas/loki_hr.yaml b/flux/oas/loki_hr.yaml
deleted file mode 100644
index b973dcf8b..000000000
--- a/flux/oas/loki_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: loki
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: loki
-  chart:
-    # https://github.com/grafana/helm-charts/tree/main/charts/loki
-    repository: https://grafana.github.io/helm-charts
-    name: loki
-    version: 2.5.0
-  valuesFrom:
-  - secretKeyRef:
-      name: loki-settings
-      key: values.yaml
diff --git a/flux/oas/promtail_hr.yaml b/flux/oas/promtail_hr.yaml
deleted file mode 100644
index 2fb7a68f2..000000000
--- a/flux/oas/promtail_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: promtail
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: promtail
-  chart:
-    # https://github.com/grafana/helm-charts/tree/main/charts/promtail
-    repository: https://grafana.github.io/helm-charts
-    name: promtail
-    version: 3.5.1
-  valuesFrom:
-  - secretKeyRef:
-      name: promtail-settings
-      key: values.yaml
diff --git a/flux/oas/single-sign-on_hr.yaml b/flux/oas/single-sign-on_hr.yaml
deleted file mode 100644
index c5b12df9d..000000000
--- a/flux/oas/single-sign-on_hr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: single-sign-on
-  namespace: oas
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: single-sign-on
-  chart:
-    git: https://open.greenhost.net/openappstack/single-sign-on
-    ref: master
-    path: ./helmchart/single-sign-on/
-  valuesFrom:
-    - secretKeyRef:
-        name: single-sign-on-settings
-        key: values.yaml
-  timeout: 1800
diff --git a/flux/velero/velero_hr.yaml b/flux/velero/velero_hr.yaml
deleted file mode 100644
index bda7108f6..000000000
--- a/flux/velero/velero_hr.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
-  name: velero
-  namespace: velero
-  annotations:
-    flux.weave.works/automated: "false"
-spec:
-  releaseName: velero
-  chart:
-    repository: https://vmware-tanzu.github.io/helm-charts
-    name: velero
-    version: 2.11.0
-  valuesFrom:
-    - secretKeyRef:
-        name: velero-settings
-        key: values.yaml
diff --git a/flux2/cluster/optional/nextcloud/nextcloud.yaml b/flux2/cluster/optional/nextcloud/nextcloud.yaml
deleted file mode 100644
index e09fbd14a..000000000
--- a/flux2/cluster/optional/nextcloud/nextcloud.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
-kind: Kustomization
-metadata:
-  name: nextcloud
-  namespace: flux-system
-spec:
-  interval: 1h
-  dependsOn:
-    - name: core
-    - name: infrastructure
-  sourceRef:
-    kind: GitRepository
-    name: openappstack
-  path: ./flux2/apps/nextcloud
-  prune: true
-  validation: client
-  # healthChecks:
-  #   - apiVersion: helm.toolkit.fluxcd.io/v1beta1
-  #     kind: HelmRelease
-  #     name: podinfo
-  #     namespace: podinfo
-  postBuild:
-    substituteFrom:
-      - kind: Secret
-        name: oas-nextcloud-variables
-      - kind: Secret
-        name: oas-oauth-variables
-      - kind: Secret
-        name: oas-cluster-variables
diff --git a/flux2/cluster/optional/rocketchat/rocketchat.yaml b/flux2/cluster/optional/rocketchat/rocketchat.yaml
deleted file mode 100644
index c3cd0c2d6..000000000
--- a/flux2/cluster/optional/rocketchat/rocketchat.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
-kind: Kustomization
-metadata:
-  name: rocketchat
-  namespace: flux-system
-spec:
-  interval: 1h
-  dependsOn:
-    - name: core
-    - name: infrastructure
-  sourceRef:
-    kind: GitRepository
-    name: openappstack
-  path: ./flux2/apps/rocketchat
-  prune: true
-  validation: client
-  # healthChecks:
-  #   - apiVersion: helm.toolkit.fluxcd.io/v1beta1
-  #     kind: HelmRelease
-  #     name: podinfo
-  #     namespace: podinfo
-  postBuild:
-    substituteFrom:
-      - kind: Secret
-        name: oas-rocketchat-variables
-      - kind: Secret
-        name: oas-oauth-variables
-      - kind: Secret
-        name: oas-cluster-variables
diff --git a/flux2/cluster/optional/velero/velero.yaml b/flux2/cluster/optional/velero/velero.yaml
deleted file mode 100644
index ac4464034..000000000
--- a/flux2/cluster/optional/velero/velero.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
-kind: Kustomization
-metadata:
-  name: velero
-  namespace: flux-system
-spec:
-  interval: 1h
-  dependsOn:
-    - name: core
-    - name: infrastructure
-  sourceRef:
-    kind: GitRepository
-    name: openappstack
-  path: ./flux2/apps/velero
-  prune: true
-  validation: client
-  # healthChecks:
-  #   - apiVersion: helm.toolkit.fluxcd.io/v1beta1
-  #     kind: HelmRelease
-  #     name: podinfo
-  #     namespace: podinfo
-  postBuild:
-    substituteFrom:
-      - kind: Secret
-        name: oas-cluster-variables
diff --git a/flux2/cluster/optional/wordpress/wordpress.yaml b/flux2/cluster/optional/wordpress/wordpress.yaml
deleted file mode 100644
index f6017a099..000000000
--- a/flux2/cluster/optional/wordpress/wordpress.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
-kind: Kustomization
-metadata:
-  name: wordpress
-  namespace: flux-system
-spec:
-  interval: 1h
-  dependsOn:
-    - name: core
-    - name: infrastructure
-  sourceRef:
-    kind: GitRepository
-    name: openappstack
-  path: ./flux2/apps/wordpress
-  prune: true
-  validation: client
-  postBuild:
-    substituteFrom:
-      - kind: Secret
-        name: oas-wordpress-variables
-      - kind: Secret
-        name: oas-oauth-variables
-      - kind: Secret
-        name: oas-cluster-variables
diff --git a/flux2/cluster/test/all-optional.yaml b/flux2/cluster/test/all-optional.yaml
deleted file mode 100644
index 144b75ec3..000000000
--- a/flux2/cluster/test/all-optional.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
-kind: Kustomization
-metadata:
-  name: all-optional
-  namespace: flux-system
-spec:
-  dependsOn:
-    - name: base
-  interval: 1h
-  sourceRef:
-    kind: GitRepository
-    name: openappstack
-  path: ./flux2/cluster/optional
-  prune: true
-  validation: client
diff --git a/flux2/cluster/test/base.yaml b/flux2/cluster/test/base.yaml
deleted file mode 100644
index 8daf42ce5..000000000
--- a/flux2/cluster/test/base.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
-kind: Kustomization
-metadata:
-  name: base
-  namespace: flux-system
-spec:
-  interval: 1h
-  sourceRef:
-    kind: GitRepository
-    name: openappstack
-  path: ./flux2/cluster/base
-  prune: true
-  validation: client
diff --git a/openappstack/cluster.py b/openappstack/cluster.py
index 71425bea8..a226c3b6d 100644
--- a/openappstack/cluster.py
+++ b/openappstack/cluster.py
@@ -54,8 +54,6 @@ class Cluster:
         self.ip_address = None
         self.hostname = None
         self.domain = None
-        # By default, use Let's Encrypt's live environment
-        self.acme_staging = False
         # Set this to False if the data needs to be (re)loaded from file
         self.data_loaded = False
         # Load data from inventory.yml and settings.yml
@@ -171,7 +169,6 @@ class Cluster:
 
         settings['ip_address'] = self.ip_address
         settings['domain'] = self.domain
-        settings['admin_email'] = 'admin@{0}'.format(self.domain)
         settings['cluster_dir'] = self.cluster_dir
         if self.docker_mirror_endpoint \
                 and self.docker_mirror_server \
@@ -183,18 +180,6 @@ class Cluster:
             settings['docker_mirror']['password'] = self.docker_mirror_password
             settings['docker_mirror']['server'] = self.docker_mirror_server
 
-        # Configure apps to handle invalid certs i.e. from
-        # Letsencrypt staging API
-        settings['acme_staging'] = self.acme_staging
-        nextcloud_extra_values = """
-          onlyoffice:
-            unauthorizedStorage: true
-            httpsHstsEnabled: false
-        """
-        if self.acme_staging:
-            settings['nextcloud_extra_values'] = \
-                yaml.load(nextcloud_extra_values)
-
         file_contents = yaml.safe_dump(settings, default_flow_style=False)
         log.debug(file_contents)
 
diff --git a/requirements.in b/requirements.in
index 387e00d4d..9520c07b2 100644
--- a/requirements.in
+++ b/requirements.in
@@ -9,8 +9,6 @@
 ansible>=2.9.10,<2.10
 # needed for test_dns.py
 dnspython>=2.1.0
-# Needed for ansible k8s resource
-openshift>=0.12.0
 # Needed for testinfra using the ansible module
 paramiko>=2.7.0
 psutil>=5.5.0
-- 
GitLab