diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 4e69161fa503a01552855408ba55852faff44010..2cf1a89bee0dd544a829489b686a0088207e77bc 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -12,11 +12,11 @@ stages:
   - cleanup
 variables:
   SSH_KEY_ID: "411"
-  HOSTNAME: "ci-${CI_COMMIT_REF_SLUG}"
+  HOSTNAME: "${CI_COMMIT_REF_SLUG}"
   # Repeated values, because we're not allowed to use a variable in a variable
-  SUBDOMAIN: "ci-${CI_COMMIT_REF_SLUG}.ci"
+  SUBDOMAIN: "${CI_COMMIT_REF_SLUG}.ci"
   DOMAIN: "openappstack.net"
-  ADDRESS: "ci-${CI_COMMIT_REF_SLUG}.ci.openappstack.net"
+  ADDRESS: "${CI_COMMIT_REF_SLUG}.ci.openappstack.net"
   ANSIBLE_HOST_KEY_CHECKING: "False"
   KANIKO_BUILD_IMAGENAME: "openappstack-ci"
 
@@ -66,8 +66,8 @@ setup-kubernetes:
   script:
     # Copy inventory files to ansible folder for use in install-apps step
     - chmod 700 ansible
-    - cp clusters/ci-${CI_COMMIT_REF_SLUG}/inventory.yml ansible/
-    - cp clusters/ci-${CI_COMMIT_REF_SLUG}/settings.yml ansible/group_vars/all/
+    - cp clusters/${CI_COMMIT_REF_SLUG}/inventory.yml ansible/
+    - cp clusters/${CI_COMMIT_REF_SLUG}/settings.yml ansible/group_vars/all/
     # Set up cluster
     - python3 -m openappstack $HOSTNAME install --ansible-param='--skip-tags=helmfile'
   artifacts:
@@ -162,7 +162,7 @@ certs:
 prometheus-alerts:
   stage: health-test
   variables:
-    OAS_DOMAIN: 'ci-${CI_COMMIT_REF_SLUG}.ci.openappstack.net'
+    OAS_DOMAIN: '${CI_COMMIT_REF_SLUG}.ci.openappstack.net'
   allow_failure: true
   script:
     - cd test/
@@ -229,7 +229,7 @@ terminate_mr_droplet_after_merge:
       tmp="${commit_message#*\'}"
       merged_branch="${tmp%%\'*}"
       echo "Current HEAD is a merge commit, removing droplet from related merge request branch name '#${merged_branch}'."
-      python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^ci-${merged_branch}\.\")"
+      python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^${merged_branch}\.\")"
     else
       echo "Current HEAD is NOT a merge commit, nothing to do."
     fi
@@ -241,7 +241,7 @@ terminate_old_droplets:
   stage: cleanup
   script:
     - echo "Terminate droplets 5 days after creation. Branches that exist longer than 5 days will get a new droplet when CI runs again."
-    - python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^ci-\", 5)"
+    - python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name('\d+-.*', 5)"
   only:
     changes:
       - .gitlab-ci.yml
diff --git a/ansible/group_vars/all/oas.yml b/ansible/group_vars/all/oas.yml
index 0f3808e827afa014ef6632aa5b28cba8fb605bae..77ee8c694a7b2bb622d099c784e7f25f9f99f014 100644
--- a/ansible/group_vars/all/oas.yml
+++ b/ansible/group_vars/all/oas.yml
@@ -57,3 +57,8 @@ cert_manager:
 
 prometheus:
   crd_version: 'v0.34.0'
+
+# Let the auto-update mechanism (flux) follow a cluster-local git repo,
+# not one hosted on open.greenhost.net.
+# Currently needed in order to deploy OAS applications.
+local_flux: true
diff --git a/ansible/roles/apps/tasks/cert-manager.yml b/ansible/roles/apps/tasks/cert-manager.yml
index 6ad32349443ae3d59ff40a76d2690f54395cd054..73757a6fcdbd05d161cb87b614b6d9ecbec6ed1d 100644
--- a/ansible/roles/apps/tasks/cert-manager.yml
+++ b/ansible/roles/apps/tasks/cert-manager.yml
@@ -50,13 +50,8 @@
     - config
     - flux
     - cert-manager
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "oas"
-        name: "cert-manager-settings"
-      data:
-        values.yaml: "{{ lookup('template','cert-manager-values.yaml') | b64encode }}"
+  vars:
+    flux:
+      name: "cert-manager-settings"
+      namespace: "oas"
+  include_tasks: flux_secret.yml
diff --git a/ansible/roles/apps/tasks/flux_secret.yml b/ansible/roles/apps/tasks/flux_secret.yml
new file mode 100644
index 0000000000000000000000000000000000000000..23cac6b5b1c7b9098bf4480c4f7163f157def45a
--- /dev/null
+++ b/ansible/roles/apps/tasks/flux_secret.yml
@@ -0,0 +1,16 @@
+---
+- name: Create Kubernetes secret with app settings
+  tags:
+    - config
+    - flux
+    - secret
+  k8s:
+    state: present
+    definition:
+      api_version: v1
+      kind: Secret
+      metadata:
+        namespace: "{{ flux.namespace | default('oas-apps') }}"
+        name: "{{ flux.name }}"
+      data:
+        values.yaml: "{{ lookup('template','{{ flux.name }}.yaml') | b64encode }}"
diff --git a/ansible/roles/apps/tasks/init.yml b/ansible/roles/apps/tasks/init.yml
index a1cd66afc8b512e55566f1be081f71679bb6cf17..218fc4780ce6e79c1945a5816cc1b1b38b4be33e 100644
--- a/ansible/roles/apps/tasks/init.yml
+++ b/ansible/roles/apps/tasks/init.yml
@@ -15,6 +15,7 @@
     src: '../../helmfiles'
     dest: '{{ data_directory }}/source'
     delete: true
+    use_ssh_args: true
   become: true
 
 - name: Create value overrides directory
diff --git a/ansible/roles/apps/tasks/local-storage.yml b/ansible/roles/apps/tasks/local-storage.yml
index 8b01f5bf0a5d8ac7f6d797325c917163f30b6b6c..99b67ed82409459696fe30c40b0cbb394619de73 100644
--- a/ansible/roles/apps/tasks/local-storage.yml
+++ b/ansible/roles/apps/tasks/local-storage.yml
@@ -1,17 +1,11 @@
 ---
-
-- name: Create Kubernetes secret with local-storage values
+- name: Create Kubernetes secret with local-storage settings
   tags:
     - config
     - flux
     - local-storage
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "oas"
-        name: "local-storage-settings"
-      data:
-        values.yaml: "{{ lookup('template','local-storage-values.yaml') | b64encode }}"
+  vars:
+    flux:
+      name: "local-storage-settings"
+      namespace: "oas"
+  include_tasks: flux_secret.yml
diff --git a/ansible/roles/apps/tasks/nextcloud.yml b/ansible/roles/apps/tasks/nextcloud.yml
index af3bdc5788ede5c437acb4f1ec05cac436020ab9..3ecf26ee46f15f530c140e07bd8bfc9c8e300d47 100644
--- a/ansible/roles/apps/tasks/nextcloud.yml
+++ b/ansible/roles/apps/tasks/nextcloud.yml
@@ -1,17 +1,11 @@
 ---
 
-- name: Create Kubernetes secret with NextCloud settings
+- name: Create Kubernetes secret with nextcloud settings
   tags:
     - config
     - flux
     - nextcloud
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "oas-apps"
-        name: "nextcloud-settings"
-      data:
-        values.yaml: "{{ lookup('template','nextcloud-values.yaml') | b64encode }}"
+  vars:
+    flux:
+      name: "nextcloud-settings"
+  include_tasks: flux_secret.yml
diff --git a/ansible/roles/apps/tasks/nginx.yml b/ansible/roles/apps/tasks/nginx.yml
index a84dd71746cb6727dac73ae062baf57b25d7dad8..4e4f75bc6cbd626e618775457256dc88d4828192 100644
--- a/ansible/roles/apps/tasks/nginx.yml
+++ b/ansible/roles/apps/tasks/nginx.yml
@@ -5,13 +5,8 @@
     - config
     - flux
     - nginx
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "oas"
-        name: "ingress-settings"
-      data:
-        values.yaml: "{{ lookup('template','ingress-values.yaml') | b64encode }}"
+  vars:
+    flux:
+      name: "ingress-settings"
+      namespace: "oas"
+  include_tasks: flux_secret.yml
diff --git a/ansible/roles/apps/tasks/prometheus.yml b/ansible/roles/apps/tasks/prometheus.yml
index 9866fa72611a751a0552d8d0d13f2d08db00ef97..2e2ef3987bb436803d0130df918b002116003c32 100644
--- a/ansible/roles/apps/tasks/prometheus.yml
+++ b/ansible/roles/apps/tasks/prometheus.yml
@@ -37,13 +37,8 @@
     - flux
     - monitoring
     - prometheus
-  k8s:
-    state: present
-    definition:
-      api_version: v1
-      kind: Secret
-      metadata:
-        namespace: "oas"
-        name: "monitoring-settings"
-      data:
-        values.yaml: "{{ lookup('template','monitoring-values.yaml') | b64encode }}"
+  vars:
+    flux:
+      name: "monitoring-settings"
+      namespace: "oas"
+  include_tasks: flux_secret.yml
diff --git a/ansible/roles/apps/templates/cert-manager-values.yaml b/ansible/roles/apps/templates/cert-manager-settings.yaml
similarity index 100%
rename from ansible/roles/apps/templates/cert-manager-values.yaml
rename to ansible/roles/apps/templates/cert-manager-settings.yaml
diff --git a/ansible/roles/apps/templates/ingress-values.yaml b/ansible/roles/apps/templates/ingress-settings.yaml
similarity index 100%
rename from ansible/roles/apps/templates/ingress-values.yaml
rename to ansible/roles/apps/templates/ingress-settings.yaml
diff --git a/ansible/roles/apps/templates/local-storage-values.yaml b/ansible/roles/apps/templates/local-storage-settings.yaml
similarity index 100%
rename from ansible/roles/apps/templates/local-storage-values.yaml
rename to ansible/roles/apps/templates/local-storage-settings.yaml
diff --git a/ansible/roles/apps/templates/monitoring-values.yaml b/ansible/roles/apps/templates/monitoring-settings.yaml
similarity index 100%
rename from ansible/roles/apps/templates/monitoring-values.yaml
rename to ansible/roles/apps/templates/monitoring-settings.yaml
diff --git a/ansible/roles/apps/templates/nextcloud-values.yaml b/ansible/roles/apps/templates/nextcloud-settings.yaml
similarity index 98%
rename from ansible/roles/apps/templates/nextcloud-values.yaml
rename to ansible/roles/apps/templates/nextcloud-settings.yaml
index f8de162654e14adfde8423b96163983717934407..204b48ace2525faa65ca805ef00e61c0dd7a8373 100644
--- a/ansible/roles/apps/templates/nextcloud-values.yaml
+++ b/ansible/roles/apps/templates/nextcloud-settings.yaml
@@ -49,6 +49,9 @@ nextcloud:
   readinessProbe:
     initialDelaySeconds: 120
 
+setupApps:
+  backoffLimit: 20
+
 # Enable and configure MariaDB chart
 mariadb:
   db:
diff --git a/ansible/roles/local-flux/defaults/main.yml b/ansible/roles/local-flux/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..066d0e6268aba83bd545c43b4c7aeee43af203cd
--- /dev/null
+++ b/ansible/roles/local-flux/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+repo: "/var/lib/OpenAppStack/local-flux"
diff --git a/ansible/roles/local-flux/tasks/main.yml b/ansible/roles/local-flux/tasks/main.yml
index 4bf719ff6a91e77be5a29e5a8d5616593fb10a93..158b5b747e09410b92fe3b4a5e1cf13cc760607d 100644
--- a/ansible/roles/local-flux/tasks/main.yml
+++ b/ansible/roles/local-flux/tasks/main.yml
@@ -1,72 +1,67 @@
 ---
 
-- block:
-  
-  - name: Copy HelmRelease files to server's local flux repo
-    tags:
-      - flux
-    copy:
-      src: "../../../flux/"
-      dest: "{{ repo }}"
-    register: helmreleases
-    become: true
+- name: Copy HelmRelease files to server's local flux repo
+  tags:
+    - flux
+  copy:
+    src: "../../../flux/"
+    dest: "{{ repo }}"
+  register: helmreleases
+  become: true
 
-  - name: Create local flux repo
-    tags:
-      - flux
-    command: git init "{{ repo }}"
-    args:
-      creates: "{{ repo }}/.git"
-    become: true
+- name: Create local flux repo
+  tags:
+    - flux
+  command: git init "{{ repo }}"
+  args:
+    creates: "{{ repo }}/.git"
+  become: true
 
-  - name: Enable post-update hook in local flux repo
-    tags:
-      - flux
-    copy:
-      src: "git-hook.sh"
-      dest: "{{ repo }}/.git/hooks/{{ item }}"
-      mode: "0755"
-    with_items:
-      - "post-update"
-      - "post-commit"
-    become: true
+- name: Enable post-update hook in local flux repo
+  tags:
+    - flux
+  copy:
+    src: "git-hook.sh"
+    dest: "{{ repo }}/.git/hooks/{{ item }}"
+    mode: "0755"
+  with_items:
+    - "post-update"
+    - "post-commit"
+  become: true
 
-  - name: Add HelmRelease files to local flux commit
-    tags:
-      - flux
-    shell: git add . && git -c "user.name=OpenAppStack automation" -c "user.email=tech@openappstack.net" commit --allow-empty --author="OpenAppStack automation <>" -m "Local flux via ansible"
-    args:
-      chdir: "{{ repo }}"
-    when: helmreleases.changed
-    become: true
+- name: Add HelmRelease files to local flux commit
+  tags:
+    - flux
+  shell: git add . && git -c "user.name=OpenAppStack automation" -c "user.email=tech@openappstack.net" commit --allow-empty --author="OpenAppStack automation <>" -m "Local flux via ansible"
+  args:
+    chdir: "{{ repo }}"
+  when: helmreleases.changed
+  become: true
 
-  - name: Create local-flux helm chart directory
-    tags:
-      - flux
-    file:
-      path: "/var/lib/OpenAppStack/source/local-flux/templates"
-      state: directory
-    become: true
+- name: Create local-flux helm chart directory
+  tags:
+    - flux
+  file:
+    path: "/var/lib/OpenAppStack/source/local-flux/templates"
+    state: directory
+  become: true
 
-  - name: Copy local-flux helm chart to server
-    tags:
-      - flux
-    copy:
-      src: "{{ item.file }}"
-      dest: "/var/lib/OpenAppStack/source/local-flux/{{ item.subdir }}/{{ item.file }}"
-    with_items:
-      - file: "nginx.yaml"
-        subdir: "templates"
-      - file: "values.yaml"
-        subdir: "."
-      - file: "Chart.yaml"
-        subdir: "."
-    become: true
+- name: Copy local-flux helm chart to server
+  tags:
+    - flux
+  copy:
+    src: "{{ item.file }}"
+    dest: "/var/lib/OpenAppStack/source/local-flux/{{ item.subdir }}/{{ item.file }}"
+  with_items:
+    - file: "nginx.yaml"
+      subdir: "templates"
+    - file: "values.yaml"
+      subdir: "."
+    - file: "Chart.yaml"
+      subdir: "."
+  become: true
 
-  - name: Install local-flux helm chart
-    tags:
-      - flux
-    shell: helm upgrade --install --namespace=oas local-flux /var/lib/OpenAppStack/source/local-flux
-  
-  vars:
-    repo: "/var/lib/OpenAppStack/local-flux"
+- name: Install local-flux helm chart
+  tags:
+    - flux
+  shell: helm upgrade --install --namespace=oas local-flux /var/lib/OpenAppStack/source/local-flux
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
index 45bad692c4474b6fa6eac48e6020cf3c3c783b2f..16dd305002f56e59f61606e5c0f56ae68cab0786 100644
--- a/docs/troubleshooting.md
+++ b/docs/troubleshooting.md
@@ -2,6 +2,14 @@
 
 Note: `cluster$` indicates that the commands should be run as root on your OAS cluster.
 
+## Upgrading
+
+If you encounter problems when you upgrade your cluster, please make sure first
+to include all potential new values of `ansible/group_vars/all/settings.yml.example`
+to your `clusters/YOUR_CLUSTERNAME/settings.yml`, and rerun the installation
+script.
+
+
 ## HTTPS Certificates
 
 OAS uses [cert-manager](http://docs.cert-manager.io/en/latest/) to automatically
@@ -34,4 +42,5 @@ If ever things fail beyond possible recovery, here's how to completely purge an
 
     cluster$ apt purge docker-ce-cli containerd.io
     cluster$ mount | egrep '^(tmpfs.*kubelet|nsfs.*docker)' | cut -d' ' -f 3 | xargs umount
+    cluster$ systemctl reboot
     cluster$ rm -rf /var/lib/docker /var/lib/OpenAppStack /etc/kubernetes /var/lib/etcd /var/lib/rancher /var/lib/kubelet /var/log/OpenAppStack /var/log/containers /var/log/pods
diff --git a/flux/nextcloud.yaml b/flux/nextcloud.yaml
index 5a1dda025fd83de66d7a4d5b0886ba592e5e3c3a..cfbe1224da03e1b40b75319f644aec16f17c7f35 100644
--- a/flux/nextcloud.yaml
+++ b/flux/nextcloud.yaml
@@ -12,7 +12,7 @@ spec:
   releaseName: nc
   chart:
     git: https://open.greenhost.net/openappstack/nextcloud
-    ref: 8f79b8ea86566d3ebbbb6103c52307e3ad9d1f28
+    ref: 5637eeec2fff4362a80611d7409ce3f0c090586c
     path: .
   valuesFrom:
     - secretKeyRef:
diff --git a/openappstack/__main__.py b/openappstack/__main__.py
index 42284ab1d75ccad545a5ecec9dc1ef320a65065a..720549f936fab601f8dce0dd9ee30eef58ac50bf 100755
--- a/openappstack/__main__.py
+++ b/openappstack/__main__.py
@@ -22,8 +22,9 @@ import argparse
 import logging
 import os
 import sys
+import greenhost_cloud
 from behave.__main__ import main as behave_main
-from openappstack import name, cluster, cosmos, ansible
+from openappstack import name, cluster, ansible
 
 ALL_TESTS = ['behave']
 
@@ -190,9 +191,9 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
     loglevel = logging.DEBUG if args.verbose else logging.INFO
     init_logging(log, loglevel)
 
-    # Setup logging for cosmos module
-    log_cosmos = logging.getLogger('cosmos')
-    init_logging(log_cosmos, loglevel)
+    # Setup logging for greenhost_cloud module
+    log_greenhost_cloud = logging.getLogger('greenhost_cloud')
+    init_logging(log_greenhost_cloud, loglevel)
 
     log.debug("Parsed arguments: %s", str(args))
 
@@ -204,7 +205,7 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
     if args.terminate_droplet:
         # In case none of the subparser's functions have been called, load data
         clus.load_data()
-        cosmos.terminate_droplets_by_name('^{}$'.format(clus.hostname))
+        greenhost_cloud.terminate_droplets_by_name('^{}$'.format(clus.hostname))
 
     if not hasattr(args, 'func') and not args.terminate_droplet:
         parser.print_help()
@@ -249,9 +250,9 @@ def create(clus, args):  # pylint: disable=too-many-branches
     if args.create_droplet:
         clus.create_droplet(ssh_key_id=args.ssh_key_id, hostname=args.create_hostname)
         if args.verbose:
-            cosmos.list_droplets()
+            greenhost_cloud.list_droplets()
         # Wait for ssh
-        cosmos.wait_for_ssh(clus.ip_address)
+        greenhost_cloud.wait_for_ssh(clus.ip_address)
     elif args.droplet_id:
         clus.set_info_by_droplet_id(args.droplet_id)
     elif args.ip_address:
@@ -269,7 +270,7 @@ def create(clus, args):  # pylint: disable=too-many-branches
         create_domain_records(
             args.domain, clus.ip_address, subdomain=args.subdomain)
         if args.verbose:
-            cosmos.list_domain_records(args.domain)
+            greenhost_cloud.list_domain_records(args.domain)
 
 
 def install(clus, args):
@@ -340,16 +341,16 @@ def create_domain_records(domain, droplet_ip, subdomain=None):
     if subdomain is None:
         subdomain_arg = "@"
 
-    domain_record = cosmos.create_domain_record(
+    domain_record = greenhost_cloud.create_domain_record(
         domain=domain, name=subdomain_arg,
-        data=droplet_ip, record_type='A', update=True)
+        data=droplet_ip, record_type='A', update=True, ttl=60)
     log.info('Domain record: %s', domain_record)
 
     subdomain_arg = '*'
     if subdomain is not None:
         subdomain_arg += '.' + subdomain
 
-    domain_record = cosmos.create_domain_record(
+    domain_record = greenhost_cloud.create_domain_record(
         domain=domain, name=subdomain_arg,
         data=subdomain, record_type='CNAME', update=True)
     log.info('Domain record: %s', domain_record)
diff --git a/openappstack/cluster.py b/openappstack/cluster.py
index a3927bd8743b01527ccdd53e3db227075569c879..12880f030b24284eb12b98f80b554ec410356ca9 100644
--- a/openappstack/cluster.py
+++ b/openappstack/cluster.py
@@ -7,7 +7,8 @@ import random
 import string
 import sys
 import yaml
-from openappstack import ansible, cosmos
+import greenhost_cloud
+from openappstack import ansible
 
 CLUSTER_PATH = os.path.join(os.getcwd(), 'clusters')
 
@@ -41,9 +42,6 @@ class Cluster:
         self.domain = None
         # By default, use Let's Encrypt's live environment
         self.acme_staging = False
-        # Let the auto-update mechanism (flux) follow a cluster-local git repo,
-        # not one hosted on open.greenhost.net.
-        self.local_flux = True
         # Set this to False if the data needs to be (re)loaded from file
         self.data_loaded = False
         # Load data from inventory.yml and settings.yml
@@ -92,7 +90,7 @@ class Cluster:
             # Use random generated ID in case we're not running in
             # gitlab CI and there's no CI_PIPELINE_ID env var
             hostname = self.name
-        droplet = cosmos.create_droplet(
+        droplet = greenhost_cloud.create_droplet(
             name=hostname,
             ssh_key_id=ssh_key_id,
             region=DEFAULT_REGION,
@@ -101,7 +99,7 @@ class Cluster:
             image=DEFAULT_IMAGE)
         droplet_id = droplet['droplet']['id']
         log.info('Created droplet id: %s', droplet_id)
-        cosmos.wait_for_state(droplet_id, 'running')
+        greenhost_cloud.wait_for_state(droplet_id, 'running')
         self.set_info_by_droplet_id(droplet_id)
 
     def set_info_by_droplet_id(self, droplet_id):
@@ -110,7 +108,7 @@ class Cluster:
 
         :param int droplet_id: Droplet ID at Greenhost
         """
-        droplet = cosmos.get_droplet(droplet_id)
+        droplet = greenhost_cloud.get_droplet(droplet_id)
         self.ip_address = droplet['networks']['v4'][0]['ip_address']
         self.hostname = droplet['name']
 
@@ -120,7 +118,7 @@ class Cluster:
         with the Cosmos API
         """
         hostname = r"^{}$".format(hostname)
-        droplets = cosmos.get_droplets_by_name(hostname)
+        droplets = greenhost_cloud.get_droplets_by_name(hostname)
         if droplets == []:
             log.error("Droplet with hostname %s not found", hostname)
             sys.exit(3)
@@ -150,7 +148,6 @@ class Cluster:
         settings['domain'] = self.domain
         settings['admin_email'] = 'admin@{0}'.format(self.domain)
         settings['acme_staging'] = self.acme_staging
-        settings['local_flux'] = self.local_flux
         settings['cluster_dir'] = self.cluster_dir
 
         file_contents = yaml.safe_dump(settings, default_flow_style=False)
diff --git a/openappstack/cosmos.py b/openappstack/cosmos.py
deleted file mode 100755
index 1d2bd36cc6e5db125bc6f3bdab1903edcaf34ebf..0000000000000000000000000000000000000000
--- a/openappstack/cosmos.py
+++ /dev/null
@@ -1,338 +0,0 @@
-#!/usr/bin/env python3
-"""Python module with helper functions to use the cosmos API."""
-
-import json
-from datetime import datetime
-from datetime import timedelta
-import logging
-import os
-import re
-import socket
-from time import sleep
-
-import requests
-from tabulate import tabulate
-from pytz import timezone
-
-
-# Helper functions
-def request_api(resource: str, request_type: str = 'GET',
-                data: str = ''):
-    """Query the cosmos API."""
-    if 'COSMOS_API_TOKEN' in os.environ:
-        api_token = os.environ['COSMOS_API_TOKEN']
-    else:
-        raise ValueError('Please export the COSMOS_API_TOKEN '
-                         'environment variable.')
-
-    headers = {'Content-Type': 'application/json',
-               'Authorization': 'Bearer {0}'.format(api_token)}
-    api_url_base = 'https://service.greenhost.net/api/v2'
-    api_url = '{0}/{1}'.format(api_url_base, resource)
-
-    if request_type == 'GET':
-        response = requests.get(api_url, headers=headers)
-    elif request_type == 'DELETE':
-        response = requests.delete(api_url, headers=headers)
-    elif request_type == 'POST':
-        response = requests.post(
-            api_url, headers=headers, data=json.dumps(data))
-    elif request_type == 'PUT':
-        response = requests.put(
-            api_url, headers=headers, data=json.dumps(data))
-    else:
-        raise ValueError('Specify one of GET/DELETE/POST/PUT as request_type.')
-
-    log.debug('Request: %s, %s, data: %s',
-              response.url, request_type, data)
-    log.debug('Response code: %s', response.status_code)
-
-    status_code_ok = [200, 201, 202, 204]
-    if response.status_code in status_code_ok:
-        if response.content:
-            log.debug('Response: %s\n', response.json())
-            return json.loads(response.content.decode('utf-8'))
-        return None
-    raise requests.HTTPError('WARNING: Got response code ',
-                             response.status_code, response.text)
-
-
-# API calls
-def create_domain_record(domain: str, name: str, data: str,
-                         record_type: str = 'A', update: bool = False):
-    """Create domain record.
-
-    If 'update' is set to True, the record will be updated if it exists.
-    """
-    log.info('Creating domain record')
-
-    record = {
-        'name': name,
-        'data': data,
-        'type': record_type
-    }
-    # Check if record exists
-    existing_record = get_domain_record_by_name(domain=domain, name=name,
-                                                record_type=record_type)
-    if existing_record:
-        if update:
-            log.info('Domain record exists - Updating the record.')
-            response = request_api(
-                'domains/%s/records/%s' % (domain, existing_record['id']),
-                'PUT', record)
-        else:
-            raise ValueError('Domain record exists - Doing nothing,'
-                             'please use "update=True" to update existing'
-                             'records.')
-    else:
-        log.info('Creating new record.')
-        response = request_api('domains/%s/records/' % domain, 'POST', record)
-
-    return response['domain_record']
-
-
-def create_droplet(name: str, ssh_key_id: int, region: str = 'ams1',  # pylint: disable=too-many-arguments
-                   size: int = 2048, disk: int = 20, image: int = 18):
-    """Create a droplet.
-
-    Required values:
-      - name (str): Name of the droplet
-      - ssh_key_id (int): ssh key id to add
-
-    Optional values with their default values:
-      - image (str): 18 (Ubuntu 18.04 x64)
-      - region (str): 'ams1' (Amsterdam 1)
-      - size (int): 2048 (2GB RAM)
-      - disk (int): 20 (20GB disk space)
-    """
-    log.info('Creating droplet')
-
-    data = {
-        "name": name,
-        "region": region,
-        "size": size,
-        "disk": disk,
-        "image": image,
-        "ssh_keys": ssh_key_id
-    }
-    response = request_api('droplet', 'POST', data)
-    return response
-
-
-def delete_domain_record(domain: str, record_id: int):
-    """Delete a domain record."""
-    log.info('Deleting domain record %s', record_id)
-    response = request_api('domains/{0}/records/{1}'.format(domain, record_id),
-                           'DELETE')
-    return response
-
-
-def delete_domain_records_by_name(domain: str, name_regex: str):
-    r"""Delete all domain records in a given domain matching a regex.
-
-    Examples:
-      delete_domain_records_by_name('openappstack.net', '^\*.ci-')
-      delete_domain_records_by_name('openappstack.net', '^ci-')
-
-    """
-    all_records = get_domain_records_by_name(domain, name_regex)
-    for record in all_records:
-        delete_domain_record(domain, record['id'])
-
-
-def delete_droplet(droplet_id: int):
-    """Delete a droplet. Droplet needs to be stopped first."""
-    log.info('Deleting %s', droplet_id)
-    response = request_api('droplets/{0}'.format(droplet_id), 'DELETE')
-    return response
-
-
-def get_domain_record(domain: str, droplet_id: int):
-    """Get details for given domain record."""
-    response = request_api('domains/{0}/records/{1}'.format(domain, droplet_id))
-    return response['domain_record']
-
-
-def get_domain_records(domain: str):
-    """Get domain records for given domain."""
-    response = request_api('domains/{0}/records'.format(domain))
-    return response['domain_records']
-
-
-def get_domain_record_by_name(domain: str, name: str,
-                              record_type: str = 'A'):
-    """
-    Get domain record for given name and type.
-
-    Example:
-    get_domain_record_by_name(domain='openappstack.net', name='varac-oas')
-    """
-    records = get_domain_records(domain=domain)
-    matching = None
-    for record in records:
-        if record['name'] == name and record['type'] == record_type:
-            matching = record
-            break
-    if not matching:
-        log.info('No domain record found.')
-
-    return matching
-
-
-def get_domain_records_by_name(domain: str, name_regex: str):
-    r"""
-    Get all information about domain records matching a regex in their names.
-
-    Example:
-        get_domain_records_by_name(name_regex='^ci\d+')
-    """
-    all_records = get_domain_records(domain)
-    matching = [record for record in all_records
-                if re.match(name_regex, record['name'])]
-    return matching
-
-
-def get_droplets():
-    """Get all information about all droplets."""
-    response = request_api('droplets')
-    return response['droplets']
-
-
-def get_droplets_by_name(name_regex: str):
-    r"""
-    Get all information about droplets matching a regex in their names.
-
-    Example:
-        get_droplets_by_name(name_regex='^ci\d+')
-    """
-    all_droplets = get_droplets()
-    log.debug(all_droplets)
-    matching = [droplet for droplet in all_droplets
-                if re.match(name_regex, droplet['name'])]
-    return matching
-
-
-def get_droplet(droplet_id: int):
-    """Get information about specified droplet."""
-    response = request_api('droplets/{0}'.format(droplet_id))
-    return response['droplet']
-
-
-def list_domain_records(domain: str):
-    """List domain records for given domain."""
-    records = get_domain_records(domain)
-
-    log.debug(json.dumps(records, sort_keys=True, indent=2))
-
-    table_records = [
-        [
-            record['id'], record['name'], record['type'], record['data']
-        ] for record in records
-    ]
-    log.info(tabulate(table_records,
-                      headers=['ID', 'Name', 'Type', 'Data']))
-
-
-def list_droplets():
-    """List all droplets by their ID, Name, IP and state."""
-    droplets = get_droplets()
-
-    log.debug(json.dumps(droplets, sort_keys=True, indent=2))
-
-    table_droplets = [
-        [
-            droplet['id'],
-            droplet['name'],
-            ', '.join([x['ip_address'] for x in droplet['networks']['v4']]),
-            droplet['status']
-        ]
-        for droplet in droplets]
-
-    log.info(tabulate(table_droplets,
-                      headers=['ID', 'Name', 'IPv4', 'Status']))
-
-
-def shutdown_droplet(droplet_id: int):
-    """Shut down specified droplet (through a power_off call)."""
-    log.info('Shutting down %s', droplet_id)
-    data = {"type": "power_off"}
-    response = request_api('droplets/{0}/actions'.format(droplet_id), 'POST', data)
-    return response
-
-
-def status_droplet(droplet_id: int):
-    """Get status of specified droplet."""
-    response = get_droplet(droplet_id)
-    return response['status']
-
-
-def terminate_droplet(droplet_id: int):
-    """Terminate a droplet by powering it down and deleting it."""
-    shutdown_droplet(droplet_id)
-    wait_for_state(droplet_id, 'stopped')
-    delete_droplet(droplet_id)
-
-
-def terminate_droplets_by_name(name_regex: str, ndays: int = 0,
-                               domain: str = 'openappstack.net'):
-    r"""
-    Terminate droplets matching a regex and for x days older than current day.
-
-    Droplets defined on the env variable NO_TERMINATE_DROPLETS will not be
-    delated
-
-    Example how to terminate all CI instances:
-        terminate_old_droplets(name_regex='^ci\d+', ndays=5)
-      will match i.e 'ci1234' , 'ci1', with a creation time older than 5 days
-    """
-    threshold_time = (datetime.now(tz=timezone('Europe/Stockholm')) -
-                      timedelta(days=ndays)).\
-        strftime("%Y-%m-%dT%H:%M:%S+00:00")
-    all_droplets = get_droplets()
-
-    noterminate_droplets = []
-    if 'NO_TERMINATE_DROPLETS' in os.environ:
-        noterminate_droplets = os.environ['NO_TERMINATE_DROPLETS'].split(',')
-
-    for droplet in all_droplets:
-        if droplet['name'] not in noterminate_droplets:
-            if re.match(name_regex, droplet['name']):
-                if droplet['created_at'] < threshold_time:
-                    delete_domain_records_by_name(
-                        domain, r'^\*.'+droplet['name'])
-                    delete_domain_records_by_name(domain, '^'+droplet['name'])
-                    terminate_droplet(droplet['id'])
-
-
-def wait_for_ssh(droplet_ip: str):
-    """Wait for ssh to be reachable on port 22."""
-    log.info('Waiting for ssh to become available on ip %s', droplet_ip)
-
-    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-
-    while sock.connect_ex((droplet_ip, 22)) != 0:
-        sleep(1)
-
-    log.info('SSH became available on ip %s', droplet_ip)
-
-
-def wait_for_state(droplet_id: int, state):
-    """Wait for a droplet to reach a certain state."""
-    log.info('Waiting for droplet %s to reach %s state...', droplet_id, state)
-    status = status_droplet(droplet_id)
-    log.debug(status)
-
-    while status != state:
-        sleep(1)
-        status = status_droplet(droplet_id)
-
-
-# When called from from ipython, setup
-# logging to console
-try:
-    __IPYTHON__  # pylint: disable=pointless-statement
-    log = logging.getLogger()  # pylint: disable=invalid-name
-    log.addHandler(logging.StreamHandler())
-    log.setLevel(logging.INFO)
-except NameError:
-    log = logging.getLogger(__name__)  # pylint: disable=invalid-name