diff --git a/.gitignore b/.gitignore
index 519ccbc11536d0325eff0fbb31639c1056560db1..88b8e1aba3119de64f1955aab1d6a8182ef66bf7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,7 +6,6 @@
 
 # Ignore files created by ansible-playbook
 *.retry
-/ansible/cluster_data/
 
 # Virtualenvs for the project
 /env
@@ -18,7 +17,7 @@
 /test/inventory*
 /test/behave/behave.ini
 /test/behave/rerun_failing.features
-/test/cluster_data/
+/clusters
 
 # Ignore files created during tests
 /test/behave/**/screenshots/
@@ -26,6 +25,7 @@
 # Etc
 __pycache__
 *.swp
+*.pyc
 
 # Documentation files
 /docs/_build
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 87d04b264a4ee05e6ed248f7d2a4e3620f369f55..da92db2ce5d9b9a75e0c8bed77289cf767de3064 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -8,15 +8,30 @@ stages:
   - integration-test
   - cleanup
 image: "${CI_REGISTRY_IMAGE}/openappstack-ci:${CI_COMMIT_REF_NAME}"
+variables:
+  SSH_KEY_ID: "411"
+  HOSTNAME: "ci-${CI_PIPELINE_ID}"
+  # Repeated values, because we're not allowed to use a variable in a variable
+  SUBDOMAIN: "ci-${CI_PIPELINE_ID}.ci"
+  DOMAIN: "openappstack.net"
+  ADDRESS: "ci-${CI_PIPELINE_ID}.ci.openappstack.net"
+  ANSIBLE_HOST_KEY_CHECKING: "False"
+
 ci_test_image:
   variables:
     KANIKO_CONTEXT: "test"
     KANIKO_IMAGENAME: "openappstack-ci"
+  stage: build
+  image:
+    # We need a shell to provide the registry credentials, so we need to use the
+    # kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image)
+    name: gcr.io/kaniko-project/executor:debug
+    entrypoint: [""]
   only:
     changes:
       - .gitlab-ci.yml
-      - test/Dockerfile
-      - test/requirements.txt
+      - Dockerfile
+      - requirements.txt
   extends: .kaniko_build
 
 bootstrap:
@@ -26,16 +41,15 @@ bootstrap:
   script:
     # Ensure test/ is not world-writable otherwise ansible-playbook refuses to run, see
     # https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir
-    - chmod 755 test/
-    - cd test/
+    - chmod 755 ansible/
     - eval $(ssh-agent -s)
     - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
-    - ANSIBLE_HOST_KEY_CHECKING=False python3 -u ./ci-bootstrap.py --create-droplet --create-domain-records --run-ansible --ansible-param skip-tags=helmfile
+    - echo "hostname $HOSTNAME, subdomain $SUBDOMAIN, domain $DOMAIN, address $ADDRESS"
+    - python3 -m openappstack $HOSTNAME create --create-droplet $DOMAIN --hostname $HOSTNAME --ssh-key-id $SSH_KEY_ID --create-domain-records --subdomain $SUBDOMAIN
+    - python3 -m openappstack $HOSTNAME install --ansible-param='--skip-tags=helmfile'
   artifacts:
     paths:
-    - test/cluster_data/rke.log
-    - test/inventory.yml
-    - test/group_vars/all/settings.yml
+    - ./clusters
     expire_in: 1 month
     when: always
   only:
@@ -44,6 +58,8 @@ bootstrap:
       - ansible/**/*
       - helmfiles/**/*
       - test/**/*
+      - openappstack/**/*
+
 install:
   stage: install-apps
   variables:
@@ -51,16 +67,15 @@ install:
   script:
     # Ensure test/ is not world-writable otherwise ansible-playbook refuses to run, see
     # https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir
-    - chmod 755 test/
-    - cd test/
+    - chmod 755 ansible/
     - eval $(ssh-agent -s)
     - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
-    - python3 -u ./ci-bootstrap.py --use-existing-inventory --run-ansible --ansible-param tags=helmfile --write-behave-config
+    - python3 -m openappstack $HOSTNAME install --ansible-param='--tags=helmfile'
     # Show versions of installed apps/binaries
     - ansible master -m shell -a 'oas-version-info.sh 2>&1'
   artifacts:
     paths:
-    - test/behave/behave.ini
+    - ./clusters
     expire_in: 1 month
   only:
     changes:
@@ -68,44 +83,47 @@ install:
       - ansible/**/*
       - helmfiles/**/*
       - test/**/*
+      - openappstack/**/*
+
 testinfra:
   stage: health-test
   script:
+    - mkdir ~/.ssh
     - eval $(ssh-agent -s)
     - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
-    - cd test/
-    - py.test -v -m 'testinfra' --connection=ansible --ansible-inventory=./inventory.yml --hosts='ansible://*'
+    - echo -e 'Host *\n  stricthostkeychecking no' > ~/.ssh/config
+    - cd ansible/
+    - pytest -v -m 'testinfra' --connection=ansible --ansible-inventory=../clusters/$HOSTNAME/inventory.yml --hosts='ansible://*'
   only:
     changes:
       - .gitlab-ci.yml
       - ansible/**/*
       - helmfiles/**/*
       - test/**/*
+      - openappstack/**/*
+
 certs:
   stage: health-test
-  variables:
-    OAS_DOMAIN: 'ci-${CI_PIPELINE_ID}.ci.openappstack.net'
   allow_failure: true
   script:
+    - mkdir ~/.ssh
     - eval $(ssh-agent -s)
     - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
-    - cd test/
-    - py.test -s -m 'certs' --connection=ansible --ansible-inventory=./inventory.yml --hosts='ansible://*'
+    - echo -e 'Host *\n  stricthostkeychecking no' > ~/.ssh/config
+    - cd ansible/
+    - pytest -s -m 'certs' --connection=ansible --ansible-inventory=../clusters/$HOSTNAME/inventory.yml --hosts='ansible://*'
   only:
     changes:
       - .gitlab-ci.yml
       - ansible/**/*
       - helmfiles/**/*
       - test/**/*
+      - openappstack/**/*
+
 behave-nextcloud:
   stage: integration-test
   script:
-    # Debug failing CI caches which happened in the past
-    - find . -name behave.ini
-    - ls -al test/behave
-    - grep -v 'password' test/behave/behave.ini
-    - cd test/behave/
-    - behave -D headless=True -t nextcloud || behave -D headless=True @rerun_failing.features -t nextcloud
+    - python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags nextcloud || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags nextcloud
   artifacts:
     paths:
     - test/behave/screenshots/
@@ -118,15 +136,12 @@ behave-nextcloud:
       - ansible/**/*
       - helmfiles/**/*
       - test/**/*
+      - openappstack/**/*
+
 behave-grafana:
   stage: integration-test
   script:
-    # Debug failing CI caches which happened in the past
-    - find . -name behave.ini
-    - ls -al test/behave
-    - grep -v 'password' test/behave/behave.ini
-    - cd test/behave/
-    - behave -D headless=True -t grafana || behave -D headless=True @rerun_failing.features -t grafana
+    - python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags grafana || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags grafana
   artifacts:
     paths:
     - test/behave/screenshots/
@@ -138,24 +153,27 @@ behave-grafana:
       - ansible/**/*
       - helmfiles/**/*
       - test/**/*
+      - openappstack/**/*
+
 terminate:
   stage: cleanup
   script:
     # Remove droplet after successful tests
-    - cd test/
-    - echo "$CI_COMMIT_MESSAGE" | grep '!ci_dont_terminate' && echo 'Termination of droplet disabled in commit message.' || python3 -u ./ci-bootstrap.py --use-existing-inventory --terminate
+    - echo "$CI_COMMIT_MESSAGE" | grep '!ci_dont_terminate' && echo 'Termination of droplet disabled in commit message.' || python3 -m openappstack $HOSTNAME --terminate
     # Remove droplet older than 2 days
-    - python3 -c "import cosmos; cosmos.terminate_droplets_by_name(\"^ci-\", 2)"
+    - python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^ci-\", 2)"
   only:
     changes:
       - .gitlab-ci.yml
       - ansible/**/*
       - helmfiles/**/*
       - test/**/*
+      - openappstack/**/*
+
 # This trivial job works around a Gitlab bug: if no job runs at all due to
 # `only`, Gitlab gets confused and doesn't allow you to merge the MR:
 # https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html#limitations
 gitlab-merge-workaround:
   stage: cleanup
   script:
-    - echo "That went well."
+    - echo "That went well"
diff --git a/test/Dockerfile b/Dockerfile
similarity index 89%
rename from test/Dockerfile
rename to Dockerfile
index db990ab4fafa440a51721ec63236f37712cdccda..5c818f390215af99c1abdef9c66fc0a563bbfbc0 100644
--- a/test/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,6 @@
 FROM alpine:3.9
 
-LABEL name="Openappstack bootstrap CI test image"
+LABEL name="OpenAppStack management"
 LABEL version="4.0"
 LABEL vendor1="Greenhost"
 
diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml
index 20f38d0792e4765d9a204ee4554d8116b17dc69b..6d10434b131fe62383bd8f2cd89128a5b1cb4561 100644
--- a/ansible/bootstrap.yml
+++ b/ansible/bootstrap.yml
@@ -16,6 +16,12 @@
         msg: >
           "Release name ({{ release_name }}) should start with lower case."
 
+    - name: cluster_dir variable is needed
+      assert:
+        that: "cluster_dir is defined"
+        msg: >
+          "Please define the variable `cluster_dir`."
+
 - name: Configure all hosts
   hosts: all
   # We use `mitogen_linear` as default strategy. However,
@@ -47,3 +53,5 @@
       tags: ['setup']
     - role: apps
       tags: ['apps']
+    - role: finalize
+      tags: ['finalize']
diff --git a/ansible/group_vars/all/oas.yml b/ansible/group_vars/all/oas.yml
index 9acf99a0c6b702db749f178f9939b16aa0ee3ba1..4f6f0a416cbf01736cc7ae21731521fbbaf73dbf 100644
--- a/ansible/group_vars/all/oas.yml
+++ b/ansible/group_vars/all/oas.yml
@@ -8,13 +8,18 @@ log_directory: "/var/log/OpenAppStack"
 # Use python3 on cluster nodes for ansible
 ansible_python_interpreter: "/usr/bin/env python3"
 
-# Nextcloud administrator password. If you do not change this value, it gets
-# generated and stored in `{{ secret_directory }}/nextcloud_admin_password`.
-# You can also choose your own password and fill it in here instead.
-nextcloud_password: "{{ lookup('password', '{{ secret_directory }}/nextcloud_admin_password chars=ascii_letters') }}"
-nextcloud_mariadb_password: "{{ lookup('password', '{{ secret_directory }}/nextcloud_mariadb_password chars=ascii_letters') }}"
-nextcloud_mariadb_root_password: "{{ lookup('password', '{{ secret_directory }}/nextcloud_mariadb_root_password chars=ascii_letters') }}"
-grafana_admin_password: "{{ lookup('password', '{{ secret_directory }}/grafana_admin_password chars=ascii_letters') }}"
+# Application passwords. If you do not change the default values, they get
+# generated and stored in the `clusters/CLUSTERNAME/secrets/` directory.
+# You can also choose your own passwords and fill it in here instead.
+
+# Nextcloud administrator password
+nextcloud_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/nextcloud_admin_password chars=ascii_letters') }}"
+# Nextcloud mariadb password for nextcloud db
+nextcloud_mariadb_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/nextcloud_mariadb_password chars=ascii_letters') }}"
+# Nextcloud mariadb root password
+nextcloud_mariadb_root_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/nextcloud_mariadb_root_password chars=ascii_letters') }}"
+# Grafana administrator password
+grafana_admin_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/grafana_admin_password chars=ascii_letters') }}"
 
 # Kubernetes version
 kubernetes_version: "v1.14.3-rancher1-1"
diff --git a/ansible/group_vars/all/settings.yml.example b/ansible/group_vars/all/settings.yml.example
index 6e84e7b5fa8624a4921a0a04b6ae86194c960b49..b5e08491e872f38a286ed35c61ddf4b7b4f4c0fe 100644
--- a/ansible/group_vars/all/settings.yml.example
+++ b/ansible/group_vars/all/settings.yml.example
@@ -12,8 +12,6 @@ release_name: "test"
 # `true` when you are testing something.
 # Important: Don't quote this variable !
 acme_staging: false
-# Directory where we save all the passwords etc.
-secret_directory: "./cluster_data/secrets"
 # Which apps to install from the helmfile.d/ dir
 helmfiles:
   - 00-storage
diff --git a/ansible/pytest b/ansible/pytest
new file mode 120000
index 0000000000000000000000000000000000000000..f8833a455ace67ab8a260440fb7acc93555f64a4
--- /dev/null
+++ b/ansible/pytest
@@ -0,0 +1 @@
+../test/pytest
\ No newline at end of file
diff --git a/ansible/pytest.ini b/ansible/pytest.ini
new file mode 120000
index 0000000000000000000000000000000000000000..26fb6a77a75156deb6ac6dbe7d9300bf257ffd43
--- /dev/null
+++ b/ansible/pytest.ini
@@ -0,0 +1 @@
+../test/pytest.ini
\ No newline at end of file
diff --git a/ansible/roles/apps/tasks/helmfiles.yml b/ansible/roles/apps/tasks/helmfiles.yml
index 08468d992598f5a24c779e06a2d9b6e207df4640..1097135203bd4703f8c56cf8196e7f52880adf77 100644
--- a/ansible/roles/apps/tasks/helmfiles.yml
+++ b/ansible/roles/apps/tasks/helmfiles.yml
@@ -23,6 +23,6 @@
     - NEXTCLOUD_MARIADB_PASSWORD: "{{ nextcloud_mariadb_password }}"
     - NEXTCLOUD_MARIADB_ROOT_PASSWORD: "{{ nextcloud_mariadb_root_password }}"
     - GRAFANA_ADMIN_PASSWORD: "{{ grafana_admin_password }}"
-  command: "/usr/local/bin/helmfile -b /snap/bin/helm -e oas -f {{ data_directory }}/source/helmfiles/helmfile.d/{{ item }}.yaml apply --suppress-secrets"
+  shell: /usr/local/bin/helmfile -b /snap/bin/helm -e oas -f {{ data_directory }}/source/helmfiles/helmfile.d/{{ item }}.yaml apply --suppress-secrets | sed 's/\x1B\[[0-9;]*[JKmsu]//g' >> {{ log_directory }}/helmfile.log
   with_items: "{{ helmfiles }}"
   when: item is not search("cert-manager")
diff --git a/ansible/roles/finalize/tasks/main.yml b/ansible/roles/finalize/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7c1b9f9ca7461396b49d4e5e76344689bed3efac
--- /dev/null
+++ b/ansible/roles/finalize/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- import_tasks: sync_cluster_data.yml
diff --git a/ansible/roles/finalize/tasks/sync_cluster_data.yml b/ansible/roles/finalize/tasks/sync_cluster_data.yml
new file mode 100644
index 0000000000000000000000000000000000000000..523b38a933ca02771beb4088786bff8313646454
--- /dev/null
+++ b/ansible/roles/finalize/tasks/sync_cluster_data.yml
@@ -0,0 +1,20 @@
+---
+- name: Copy cluster information to local folder
+  tags:
+    - fetch
+    - rke
+    - kubectl
+    - helmfile
+  fetch:
+    src: "{{ item.src }}"
+    dest: "{{ item.dest }}"
+    flat: yes
+  loop:
+    - src: "{{ data_directory }}/rke/kube_config_cluster.yml"
+      dest: "{{ cluster_dir }}/secrets/kube_config_cluster.yml"
+    - src: "{{ log_directory }}/rke.log"
+      dest: "{{ cluster_dir }}/cluster_data/rke.log"
+    - src: "{{ data_directory }}/rke/cluster.yml"
+      dest: "{{ cluster_dir }}/cluster_data/rke_cluster.yml"
+    - src: "{{ log_directory }}/helmfile.log"
+      dest: "{{ cluster_dir }}/cluster_data/helmfile.log"
diff --git a/ansible/roles/setup/tasks/rke.yml b/ansible/roles/setup/tasks/rke.yml
index 0f3708e40c093392678271fab796873138cc6138..8ea84379465b7c5f6d5518332d3df94933d21613 100644
--- a/ansible/roles/setup/tasks/rke.yml
+++ b/ansible/roles/setup/tasks/rke.yml
@@ -29,20 +29,3 @@
     state: link
     src: "{{ data_directory }}/rke/kube_config_cluster.yml"
     dest: /root/.kube/config
-
-- name: Copy cluster information to local folder
-  tags:
-    - fetch
-    - rke
-    - kubectl
-  fetch:
-    src: "{{ item.src }}"
-    dest: "{{ item.dest }}"
-    flat: yes
-  loop:
-    - src: "{{ data_directory }}/rke/kube_config_cluster.yml"
-      dest: "{{ secret_directory }}/kube_config_cluster.yml"
-    - src: "{{ log_directory }}/rke.log"
-      dest: cluster_data/rke.log
-    - src: "{{ data_directory }}/rke/cluster.yml"
-      dest: cluster_data/rke_cluster.yml
diff --git a/openappstack/__init__.py b/openappstack/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a85c322e6af4271383833a7a63dc8bf43c760de4
--- /dev/null
+++ b/openappstack/__init__.py
@@ -0,0 +1 @@
+name='openappstack'
diff --git a/openappstack/__main__.py b/openappstack/__main__.py
new file mode 100755
index 0000000000000000000000000000000000000000..887b1298d6d6a611e903af1cd0b2ade5646c51bc
--- /dev/null
+++ b/openappstack/__main__.py
@@ -0,0 +1,360 @@
+#!/usr/bin/env python3
+r"""
+Used by CI to bootstrap a new cluster and run tests.
+
+Env vars needed:
+- COSMOS_API_TOKEN
+
+Env vars optional:
+- NO_TERMINATE_DROPLETS: list of droplet ids that should not be
+  removed when deleting droplets.
+
+Install requirements:
+
+- On Alpine, install with:
+
+    apk --no-cache add python3-dev build-base libffi-dev linux-headers \
+      openssl-dev openssh-client
+    pip3 install -r requirements.txt
+"""
+
+import argparse
+import logging
+import os
+import sys
+from behave.__main__ import main as behave_main
+from openappstack import name, cluster, cosmos, ansible
+
+ALL_TESTS = ['behave']
+
+
+def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-locals
+    """
+    Parse arguments and start program. Depending on the arguments parsed,
+    one of three functions is called:
+
+    - create, responsible for setting up VPSs and their local configuration
+      files
+    - install, responsible for setting up Kubernetes and OpenAppStack on those
+      VPSs
+    - test, responsible for testing if all the setup steps worked.
+
+    Run python3 -m openappstack --help for more information.
+    """
+    # Parse command line arguments
+    parser = argparse.ArgumentParser(
+        prog=name,
+        description='Run bootstrap script '
+        'to deploy Openappstack to a given node.')
+
+    parser.add_argument(
+        'cluster_name',
+        metavar='CLUSTER_NAME',
+        type=str,
+        help='Name of the cluster you want to use openappstack with')
+
+    subparsers = parser.add_subparsers(help='Available subcommands')
+
+    create_parser = subparsers.add_parser('create', help='Creates a new cluster')
+    create_parser.set_defaults(func=create)
+
+    create_parser.add_argument(
+        'domain',
+        metavar='DOMAIN_NAME',
+        help='Domain name to run OpenAppStack under')
+
+    group = create_parser.add_mutually_exclusive_group(required=True)
+
+    group.add_argument(
+        '--create-droplet',
+        action='store_true',
+        help='Create droplet automatically')
+    group.add_argument(
+        '--droplet-id',
+        metavar='ID',
+        type=int,
+        help='ID of existing Greenhost VPS to use')
+
+    droplet_creation_group = create_parser.add_argument_group(
+        'droplet creation',
+        'When using --create-droplet, you need to provide:')
+    droplet_creation_group.add_argument(
+        '--hostname',
+        type=str,
+        help='hostname of the new machine. If not provided, a hostname is generated')
+
+    droplet_creation_group.add_argument(
+        '--ssh-key-id',
+        metavar='ID',
+        type=int,
+        help='Greenhost-specific database ID of ssh key to deploy with')
+
+    droplet_creation_group.add_argument(
+        '--create-domain-records',
+        action='store_true',
+        help=('Creates DNS entries at Greenhost pointing the subdomain and '
+              'domain to the cluster.'))
+
+    droplet_creation_group.add_argument(
+        '--subdomain',
+        type=str,
+        metavar='SUBDOMAIN',
+        help=('Use a custom subdomain for the generated domain records. '
+              'Defaults to no subdomain'))
+
+    droplet_creation_group.add_argument(
+        '--acme-live-environment',
+        action='store_true',
+        help=("Use this for production clusters. Uses live Let's Encrypt "
+              'environment instead of staging'))
+
+    install_parser = subparsers.add_parser(
+        'install',
+        help=("Use this to run the ansible playbook that sets up your VPS to run "
+              "OpenAppStack. The ansible-playbook process will run in the "
+              "'{ansible_path}' directory, so do not use relative paths with these "
+              "arguments.").format(ansible_path=ansible.ANSIBLE_PATH))
+    install_parser.set_defaults(func=install)
+
+    parser.add_argument(
+        '--terminate-droplet',
+        action='store_true',
+        help=('Shutdown AND DELETE droplet identified by VPS identifier after '
+              'finishing'))
+
+    parser.add_argument(
+        '-v',
+        '--verbose',
+        action='store_true',
+        help='Be more verbose')
+
+    # Ansible related
+    install_parser.add_argument(
+        '--ansible-param',
+        metavar=['PARAM[=VALUE]'],
+        action='append',
+        nargs=1,
+        help='forward ansible parameters to the ansible-playbook call')
+
+    test_parser = subparsers.add_parser(
+        'test',
+        help=("Write test configuration and run tests on your cluster"))
+
+    test_parser.set_defaults(func=test)
+
+    test_parser.add_argument(
+        '--run-test',
+        action='append',
+        help=('Run only a specific kind of test. If not provided, all tests '
+              'are run. The currently available tests are: ') \
+            + ','.join(ALL_TESTS))
+
+    test_parser.add_argument(
+        '--behave-rerun-failing',
+        action='store_true',
+        help=('Run behave with @rerun_failing.features'))
+    test_parser.add_argument(
+        '--behave-tags',
+        nargs='+',
+        default=[],
+        help=('Only run behave tests with these tags'))
+    test_parser.add_argument(
+        '--behave-headless',
+        action='store_true',
+        help=('Run behave in headless mode'))
+
+    info_parser = subparsers.add_parser(
+        'info',
+        help=("Show information about a cluster"))
+
+    info_parser.set_defaults(func=info)
+
+    args = parser.parse_args()
+    loglevel = logging.DEBUG if args.verbose else logging.INFO
+    init_logging(log, loglevel)
+
+    # Setup logging for cosmos module
+    log_cosmos = logging.getLogger('cosmos')
+    init_logging(log_cosmos, loglevel)
+
+    log.debug("Parsed arguments: %s", str(args))
+
+    clus = cluster.Cluster(args.cluster_name)
+
+    if hasattr(args, 'func'):
+        args.func(clus, args)
+
+    if args.terminate_droplet:
+        # In case none of the subparser's functions have been called, load data
+        clus.load_data()
+        cosmos.terminate_droplets_by_name('^{}$'.format(clus.hostname))
+
+    if not hasattr(args, 'func') and not args.terminate_droplet:
+        parser.print_help()
+        sys.exit(1)
+
+def info(clus, _args):
+    """
+    Shows cluster information and then exits
+
+    :param cluster.Cluster clus: cluster to show information about
+    :param argparse.Namespace _args: ignored, added for compatibility with
+        create, install and test functions.
+    """
+    clus.load_data()
+    clus.print_info()
+
+
+def create(clus, args):
+    """
+    Parses arguments for the 'create' subcommand
+
+    :param cluster.Cluster clus: Cluster object to edit
+    :param argparse.Namespace args: Command line arguments
+    """
+    clus = cluster.Cluster(args.cluster_name)
+
+    if args.create_droplet:
+        if not args.ssh_key_id:
+            log.error('--ssh-key-id required when using --create-droplet')
+            sys.exit(1)
+
+    if args.subdomain:
+        domain = '{subdomain}.{domain}'.format(
+            subdomain=args.subdomain, domain=args.domain)
+    else:
+        domain = args.domain
+    clus.domain = domain
+
+    # Set acme_staging to False so we use Let's Encrypt's live environment
+    if args.acme_live_environment:
+        clus.acme_staging = False
+    if args.create_droplet:
+        clus.create_droplet(ssh_key_id=args.ssh_key_id, hostname=args.hostname)
+        if args.verbose:
+            cosmos.list_droplets()
+        # Wait for ssh
+        cosmos.wait_for_ssh(clus.ip_address)
+    elif args.droplet_id:
+        clus.set_info_by_droplet_id(args.droplet_id)
+
+    # Write inventory.yml and settings.yml files
+    clus.write_cluster_files()
+
+    if args.create_domain_records:
+        create_domain_records(
+            args.domain, clus.ip_address, subdomain=args.subdomain)
+        if args.verbose:
+            cosmos.list_domain_records(args.domain)
+
+
+def install(clus, args):
+    """
+    Parses arguments for the 'install' subcommand
+
+    :param cluster.Cluster clus: Cluster object to install OAS to
+    :param argparse.Namespace args: Command line arguments
+    """
+    clus.load_data()
+    ansible.run_ansible(
+        clus,
+        os.path.join(ansible.ANSIBLE_PATH, 'bootstrap.yml'),
+        args.ansible_param)
+
+
+def test(clus, args):
+    """
+    Runs behave or testinfra test. Overwrites behave_path/behave.ini!
+
+    :param cluster.Cluster clus: Cluster object to run tests on
+    :param argparse.Namespace args: Command line arguments
+    """
+
+    # At the moment we only have one type if test, but more tests will be added
+    # to this in the future. If args.run_test is empty, we run all the tests
+    run_test = args.run_test
+    if not run_test:
+        run_test = ALL_TESTS
+
+    if 'behave' in run_test:
+        behave_path = os.path.join(os.path.dirname(__file__), '..', 'test',
+                                   'behave')
+        # Run from the behave directory so behave automatically loads all the
+        # necessary files
+        os.chdir(behave_path)
+        clus.load_data()
+        behave_ini = os.path.join(behave_path, 'behave.ini')
+        clus.write_behave_config(behave_ini)
+        command = []
+        if args.behave_rerun_failing:
+            command.append('@rerun_failing.features')
+        if args.behave_headless:
+            command.append('-D headless=True')
+        for tag in args.behave_tags:
+            log.info(command)
+            command.append('-t {tag}'.format(tag=tag))
+        log.info('Running behave command %s', command)
+        behave_main(command)
+
+        # Remove behave.ini so we don't leave secrets hanging around.
+        os.remove(behave_ini)
+
+
+def create_domain_records(domain, droplet_ip, subdomain=None):
+    """
+    Creates 2 domain records at Greenhost. An A record at subdomain.domain,
+    pointing to droplet_ip and a CNAME record pointing *.subdomain.domain to
+    the first record.
+
+    :param str domain: the domain the cluster will be hosted on.
+    :param str droplet_ip: The IP address the A record will point to.
+    :param subdomain: Optional subdomain to host OAS on.
+    :type subdomain: str or None
+    """
+    subdomain_arg = subdomain
+    if subdomain is None:
+        subdomain_arg = "@"
+
+    domain_record = cosmos.create_domain_record(
+        domain=domain, name=subdomain_arg,
+        data=droplet_ip, record_type='A', update=True)
+    log.info('Domain record: %s', domain_record)
+
+    subdomain_arg = '*'
+    if subdomain is not None:
+        subdomain_arg += '.' + subdomain
+
+    domain_record = cosmos.create_domain_record(
+        domain=domain, name=subdomain_arg,
+        data=subdomain, record_type='CNAME', update=True)
+    log.info('Domain record: %s', domain_record)
+
+
+def init_logging(logger, loglevel):
+    """
+    Configure logging.
+
+    - debug and info go to stdout
+    - warning and above go to stderr
+
+    :param logger: Logger to initialise
+    :param int loglevel: Log level from 0 to 50 (use logging.WARNING,
+        logging.INFO, etc.)
+    """
+    logger.setLevel(loglevel)
+    stdout = logging.StreamHandler(sys.stdout)
+    stdout.setLevel(loglevel)
+    stdout.addFilter(lambda record: record.levelno <= logging.INFO)
+    logger.info('Set log level to %d', loglevel)
+
+    stderr = logging.StreamHandler()
+    stderr.setLevel(logging.WARNING)
+
+    logger.addHandler(stdout)
+    logger.addHandler(stderr)
+
+
+if __name__ == '__main__':
+    # Setup logging for this script
+    log = logging.getLogger('openappstack')  # pylint: disable=invalid-name
+    main()
diff --git a/openappstack/ansible.py b/openappstack/ansible.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff954249a5aec02f2471681af9a52312ff7ebefc
--- /dev/null
+++ b/openappstack/ansible.py
@@ -0,0 +1,83 @@
+"""
+Module responsible for running the Ansible part of the OpenAppStack setup.
+"""
+import logging
+import os
+import subprocess
+import yaml
+
+log = logging.getLogger(__name__)  # pylint: disable=invalid-name
+
+ANSIBLE_INVENTORY = './clusters/{cluster_name}/inventory.yml'
+ANSIBLE_PATH = os.path.join(os.path.dirname(__file__),
+                            '..', 'ansible')
+
+def run_ansible(clus, playbook, ansible_params=None):
+    """
+    Call `ansible-playbook` in a subprocess to run the specified playbook. Runs
+    in the package's ansible directory.
+
+    :param str playbook: path to the playbook to run.
+    :param list ansible_params: Optionally provide a list of lists with ansible
+        params.  Each inner list may only contain one element. Can be directly
+        forwarded from argparse. Example:
+        `ansible_params = [[become-user=root], [verbose]]`
+    """
+    # playbook path here is relative to private_data_dir/project, see
+    # https://ansible-runner.readthedocs.io/en/latest/intro.html#inputdir
+    ansible_playbook_command = ['ansible-playbook']
+    if ansible_params:
+        for param in ansible_params:
+            if len(param) > 1:
+                log.warning('More than 1 parameter. Ignoring the rest! Use '
+                            '--ansible-param several times to supply '
+                            'more than 1 parameter')
+            param = param[0]
+            ansible_playbook_command.append(param)
+    ansible_playbook_command += \
+        ['-e', 'cluster_dir=' + clus.cluster_dir]
+
+    ansible_playbook_command += \
+        ['-i', clus.inventory_file, '-e', '@' + clus.settings_file, playbook]
+
+    log.info('Running "%s" in ansible directory "%s"',
+             ansible_playbook_command,
+             ANSIBLE_PATH)
+
+    process = subprocess.Popen(
+        ansible_playbook_command,
+        cwd=ANSIBLE_PATH)
+
+    returncode = process.wait()
+
+    if returncode > 0:
+        raise RuntimeError('Playbook failed with rc %s.'
+                           % returncode)
+
+def create_inventory(cluster):
+    """
+    Creates inventory for the ansible playbook. Needs the droplet's hostname
+    for identification and the IP for connecting with Ansible
+
+    :param cluster.Cluster cluster: Cluster object to for which inventory file
+        will be written. Used for getting hostname and IP address.
+    """
+    # Create inventory
+    with open(os.path.join(ANSIBLE_PATH, 'inventory.yml.example'),
+              'r') as stream:
+        inventory = yaml.safe_load(stream)
+
+    inventory['all']['hosts'][cluster.hostname] = \
+        inventory['all']['hosts']['oas-dev']
+    del inventory['all']['hosts']['oas-dev']
+
+    inventory['all']['hosts'][cluster.hostname]['ansible_host'] = \
+        cluster.ip_address
+    inventory['all']['children']['master']['hosts'] = cluster.hostname
+    inventory['all']['children']['worker']['hosts'] = cluster.hostname
+
+    file_contents = yaml.safe_dump(inventory, default_flow_style=False)
+    log.debug(file_contents)
+    with open(cluster.inventory_file, 'w') as stream:
+        stream.write(file_contents)
+    return inventory
diff --git a/openappstack/cluster.py b/openappstack/cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..d728c54a1a37dc19433a079688ccc929de5a5400
--- /dev/null
+++ b/openappstack/cluster.py
@@ -0,0 +1,236 @@
+"""Contains code for managing the files related to an OpenAppStack cluster."""
+
+import configparser
+import logging
+import os
+import random
+import string
+import sys
+import yaml
+from openappstack import ansible, cosmos
+
+CLUSTER_PATH = os.path.join(os.getcwd(), 'clusters')
+
+log = logging.getLogger(__name__)  # pylint: disable=invalid-name
+
+"""Greenhost region where VPS will be started with create_droplet"""
+DEFAULT_REGION = 'ams1'
+"""Default disk size"""
+DEFAULT_DISK_SIZE_GB = 25
+"""Default amount of memory"""
+DEFAULT_MEMORY_SIZE_MB = 6144
+"""Default "image" (operating system): 19  =  Debian buster-x64 """
+DEFAULT_IMAGE = 19
+
+
+class Cluster:
+    """
+    Helper class for cluster-related paths, files, etc.
+
+    :param str cluster_name: Identifier of the cluster. A folder in
+        CLUSTER_PATH will be creaeted with this name.
+    :param bool load_data: If this is true, `load_data` function is called at
+        the end of the constructor.
+    """
+
+    def __init__(self, cluster_name, load_data=False):
+        self.name = cluster_name
+        self.cluster_dir = os.path.join(CLUSTER_PATH, cluster_name)
+        self.ip_address = None
+        self.hostname = None
+        self.domain = None
+        # By default, use Let's Encrypt's staging environment
+        self.acme_staging = True
+        # Set this to False if the data needs to be (re)loaded from file
+        self.data_loaded = False
+        # Load data from inventory.yml and settings.yml
+        if load_data:
+            self.load_data()
+
+    def load_data(self):
+        """
+        Loads cluster data from inventory.yml and settings.yml files
+
+        Set self.data_loaded to False if this function should re-read data
+        from file.
+        """
+        if not self.data_loaded:
+            with open(self.settings_file, 'r') as stream:
+                settings = yaml.safe_load(stream)
+                self.ip_address = settings['ip_address']
+                self.domain = settings['domain']
+
+            log.debug("""Read data from settings.yml:
+                ip address: %s
+                domain: %s""", self.ip_address, self.domain)
+
+            with open(self.inventory_file, 'r') as stream:
+                inventory = yaml.safe_load(stream)
+                # Work with the master node from the inventory
+                self.hostname = inventory['all']['children']['master']['hosts']
+
+            log.debug(
+                'Read data from inventory.yml:\n\thostname: %s', self.hostname)
+        else:
+            log.debug('Not loading cluster data from file. Set '
+                      'Cluster.data_loaded to False if you want a reload.')
+        self.data_loaded = True
+
+
+    def create_droplet(self, ssh_key_id=0, hostname=None):
+        """
+        Uses the Cosmos API to create a droplet with OAS default spec
+
+        :param int ssh_key_id: SSH key ID in Greenhost Cosmos.
+        :param str hostname: hostname of the droplet created at GH.
+            If not provided, a hostname will be auto-generated.
+        """
+        if hostname is None:
+            # Use random generated ID in case we're not running in
+            # gitlab CI and there's no CI_PIPELINE_ID env var
+            hostname = ''.join(
+                random.choice(string.ascii_lowercase + string.digits)
+                for _ in range(10))
+            log.info('Generated hostname %s', hostname)
+        droplet = cosmos.create_droplet(
+            name=hostname,
+            ssh_key_id=ssh_key_id,
+            region=DEFAULT_REGION,
+            size=DEFAULT_MEMORY_SIZE_MB,
+            disk=DEFAULT_DISK_SIZE_GB,
+            image=DEFAULT_IMAGE)
+        droplet_id = droplet['droplet']['id']
+        log.info('Created droplet id: %s', droplet_id)
+        cosmos.wait_for_state(droplet_id, 'running')
+        self.set_info_by_droplet_id(droplet_id)
+
+    def set_info_by_droplet_id(self, droplet_id):
+        """
+        Sets info about the cluster based on the Greenhost VPS id
+
+        :param int droplet_id: Droplet ID at Greenhost
+        """
+        droplet = cosmos.get_droplet(droplet_id)
+        self.ip_address = droplet['networks']['v4'][0]['ip_address']
+        self.hostname = droplet['name']
+
+    def write_cluster_files(self):
+        """Creates an inventory.yml and settings.yml file for the cluster"""
+        self.make_cluster_directory()
+        ansible.create_inventory(self)
+
+        # Create settings
+        with open(os.path.join(ansible.ANSIBLE_PATH, 'group_vars',
+                               'all', 'settings.yml.example'),
+                  'r') as stream:
+            settings = yaml.safe_load(stream)
+
+        settings['ip_address'] = self.ip_address
+        settings['domain'] = self.domain
+        settings['admin_email'] = 'admin@{0}'.format(self.domain)
+        settings['acme_staging'] = self.acme_staging
+        settings['cluster_dir'] = self.cluster_dir
+
+        file_contents = yaml.safe_dump(settings, default_flow_style=False)
+        log.debug(file_contents)
+        with open(self.settings_file, 'w') as stream:
+            stream.write(file_contents)
+
+        # Set self.data_loaded to True because the data in the class now
+        # reflects the data in the file.
+        self.data_loaded = True
+
+    def make_cluster_directory(self):
+        """Make sure the cluster's file directory exists"""
+        os.makedirs(self.cluster_dir, exist_ok=True)
+
+    @property
+    def inventory_file(self):
+        """Path to the ansible inventory.yml for this cluster"""
+        return os.path.join(self.cluster_dir, 'inventory.yml')
+
+    @property
+    def settings_file(self):
+        """Path to the ansible settings.yml for this cluster"""
+        return os.path.join(self.cluster_dir, 'settings.yml')
+
+    @property
+    def behave_file(self):
+        """Path to 'behave.ini' which is used for acceptance tests"""
+        return os.path.join(self.cluster_dir, 'behave.ini')
+
+    @property
+    def secret_dir(self):
+        """Path where all the passwords for cluster admins are saved"""
+        return os.path.join(self.cluster_dir, 'secrets')
+
+    def write_behave_config(self, config_path):
+        """
+        Write behave config file for the cluster.
+
+        :param str config_path: Configuration is written to config_path (e.g.
+            /home/you/openappstack/test/behave.ini). If config_path already
+            exists, the program is aborted.
+        """
+        if os.path.isfile(config_path):
+            log.error('%s file already exists, not overwriting '
+                      'file! Remove the file if you want to run behave. '
+                      'Program will exit now', config_path)
+            sys.exit(2)
+        secret_directory = self.secret_dir
+        with open(os.path.join(
+                secret_directory, 'nextcloud_admin_password'), 'r') as stream:
+            nextcloud_admin_password = yaml.safe_load(stream)
+
+        with open(os.path.join(
+                secret_directory, 'grafana_admin_password'), 'r') as stream:
+            grafana_admin_password = yaml.safe_load(stream)
+
+        behave_config = configparser.ConfigParser()
+        behave_config['behave'] = {}
+        behave_config['behave']['format'] = 'rerun'
+        behave_config['behave']['outfiles'] = 'rerun_failing.features'
+        behave_config['behave']['show_skipped'] = 'false'
+
+        behave_config['behave.userdata'] = {}
+
+        behave_config['behave.userdata']['nextcloud.url'] = \
+            'https://files.{}'.format(self.domain)
+        behave_config['behave.userdata']['nextcloud.username'] = 'admin'
+        behave_config['behave.userdata']['nextcloud.password'] = \
+            nextcloud_admin_password
+
+        behave_config['behave.userdata']['grafana.url'] = \
+            'https://grafana.{}'.format(self.domain)
+        behave_config['behave.userdata']['grafana.username'] = 'admin'
+        behave_config['behave.userdata']['grafana.password'] = \
+            grafana_admin_password
+
+        with open(config_path, 'w') as config_file:
+            behave_config.write(config_file)
+
+    def print_info(self):
+        """Writes information about the cluster. Useful for debugging"""
+        info_string = """
+Cluster "{name}":
+  - IP address: {ip_address}
+  - Hostname: {hostname}
+  - Domain: {domain}
+
+Configuration:
+  - Inventory file: {inventory_file}
+  - Settings file: {settings_file}
+
+Kubectl:
+
+To use kubectl with this cluster, copy-paste this in your terminal:
+
+export KUBECONFIG={secret_dir}/kube_config_cluster.yml"""
+        print(info_string.format(
+            name=self.name,
+            ip_address=self.ip_address,
+            hostname=self.hostname,
+            domain=self.domain,
+            inventory_file=self.inventory_file,
+            settings_file=self.settings_file,
+            secret_dir=self.secret_dir))
diff --git a/test/cosmos.py b/openappstack/cosmos.py
similarity index 94%
rename from test/cosmos.py
rename to openappstack/cosmos.py
index 9655052f4b8a741c73d2877d4a70b5170f558774..c23b7506dd155e526dd135d613baa75f6aa4b099 100755
--- a/test/cosmos.py
+++ b/openappstack/cosmos.py
@@ -14,6 +14,7 @@ import requests
 from tabulate import tabulate
 from pytz import timezone
 
+
 # Helper functions
 def request_api(resource: str, request_type: str = 'GET',
                 data: str = ''):
@@ -129,11 +130,12 @@ def delete_domain_record(domain: str, id: int):
 
 
 def delete_domain_records_by_name(domain: str, name_regex: str):
-    """Delete all domain records in a given domain matching a regex.
+    r"""Delete all domain records in a given domain matching a regex.
 
     Examples:
       delete_domain_records_by_name('openappstack.net', '^\*.ci-')
       delete_domain_records_by_name('openappstack.net', '^ci-')
+
     """
     all = get_domain_records_by_name(domain, name_regex)
     for record in all:
@@ -227,7 +229,7 @@ def list_domain_records(domain: str):
         record['id'], record['name'], record['type'], record['data']]
         for record in records]
     log.info(tabulate(table_records,
-             headers=['ID', 'Name', 'Type', 'Data']))
+                      headers=['ID', 'Name', 'Type', 'Data']))
 
 
 def list_droplets():
@@ -270,17 +272,21 @@ def terminate_droplet(id: int):
     delete_droplet(id)
 
 
-def terminate_droplets_by_name(name_regex: str, ndays: int = 0, domain: str = 'openappstack.net'):
+def terminate_droplets_by_name(name_regex: str, ndays: int = 0,
+                               domain: str = 'openappstack.net'):
     r"""
-    Terminate droplets matching a regex and for x days older than current day. 
-    Droplets defined on the env variable NO_TERMINATE_DROPLETS will not be delated
+    Terminate droplets matching a regex and for x days older than current day.
+
+    Droplets defined on the env variable NO_TERMINATE_DROPLETS will not be
+    delated
 
     Example how to terminate all CI instances:
         terminate_old_droplets(name_regex='^ci\d+', ndays=5)
       will match i.e 'ci1234' , 'ci1', with a creation time older than 5 days
     """
-
-    threshold_time = (datetime.now(tz=timezone('Europe/Stockholm')) - timedelta(days=ndays)).strftime("%Y-%m-%dT%H:%M:%S+00:00")
+    threshold_time = (datetime.now(tz=timezone('Europe/Stockholm')) -
+                      timedelta(days=ndays)).\
+        strftime("%Y-%m-%dT%H:%M:%S+00:00")
     all = get_droplets()
 
     noterminate_droplets = []
@@ -291,10 +297,12 @@ def terminate_droplets_by_name(name_regex: str, ndays: int = 0, domain: str = 'o
         if droplet['name'] not in noterminate_droplets:
             if re.match(name_regex, droplet['name']):
                 if droplet['created_at'] < threshold_time:
-                    delete_domain_records_by_name(domain, '^\*.'+droplet['name'])
+                    delete_domain_records_by_name(
+                        domain, '^\*.'+droplet['name'])
                     delete_domain_records_by_name(domain, '^'+droplet['name'])
                     terminate_droplet(droplet['id'])
 
+
 def wait_for_ssh(ip: str):
     """Wait for ssh to be reachable on port 22."""
     log.info('Waiting for ssh to become available on ip %s', ip)
@@ -304,6 +312,8 @@ def wait_for_ssh(ip: str):
     while sock.connect_ex((ip, 22)) != 0:
         sleep(1)
 
+    log.info('SSH became available on ip %s', ip)
+
 
 def wait_for_state(id: int, state):
     """Wait for a droplet to reach a certain state."""
diff --git a/test/requirements.txt b/requirements.txt
similarity index 78%
rename from test/requirements.txt
rename to requirements.txt
index 480ce481e2e2b8ef90e6394397a233592401592d..377c7c9f97eac1d0c40fe4289561fc668c3902f9 100644
--- a/test/requirements.txt
+++ b/requirements.txt
@@ -11,8 +11,8 @@ pyopenssl>=19.0.0
 pytest>=4.3.0
 requests>=2.19.1
 tabulate>=0.8.3
-testinfra>=2.0.0
+testinfra>=3.0.0
 setuptools>=40.6.2
 wheel>=0.33.1
 pytz>=2019.1
-
+-e git+https://open.greenhost.net/greenhost/cloud-api#egg=greenhost_cloud
diff --git a/test/README.md b/test/README.md
index 21ceee2faf255a4038f21805b4d8079138aa2854..5ff076a3f371fb4decbb77fb44d20e889eb3023a 100644
--- a/test/README.md
+++ b/test/README.md
@@ -10,7 +10,7 @@ Specify host manually:
 
 Run cert test manually using the ansible inventory file:
 
-    OAS_DOMAIN='example.openappstack.net' py.test -v -m 'certs' \
+    ADDRESS='example.openappstack.net' py.test -v -m 'certs' \
       --connection=ansible \
       --ansible-inventory=../ansible/inventory.yml \
       --hosts='ansible://*'
@@ -18,11 +18,11 @@ Run cert test manually using the ansible inventory file:
 Run cert test manually against a different cluster, not configured in any
 ansible inventory file, either by using pytest:
 
-    OAS_DOMAIN='example.openappstack.net' py.test -v -m 'certs'
+    ADDRESS='example.openappstack.net' py.test -v -m 'certs'
 
 or directly (allows better debugging since pytest won't eat stdout):
 
-    OAS_DOMAIN='example.openappstack.net' pytest/test_certs.py
+    ADDRESS='example.openappstack.net' pytest/test_certs.py
 
 ## Issues
 
diff --git a/test/ansible.cfg b/test/ansible.cfg
deleted file mode 120000
index 0b986ffbd15ce7c5f43039dd53f3dbdad2981e03..0000000000000000000000000000000000000000
--- a/test/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../ansible/ansible.cfg
\ No newline at end of file
diff --git a/test/bootstrap.yml b/test/bootstrap.yml
deleted file mode 120000
index d29cf97c10ac85c0dc68fa448f5d9aaeae9714fe..0000000000000000000000000000000000000000
--- a/test/bootstrap.yml
+++ /dev/null
@@ -1 +0,0 @@
-../ansible/bootstrap.yml
\ No newline at end of file
diff --git a/test/ci-bootstrap.py b/test/ci-bootstrap.py
deleted file mode 100755
index 547ebdea57b835511c26ae859d3e3078c79946f4..0000000000000000000000000000000000000000
--- a/test/ci-bootstrap.py
+++ /dev/null
@@ -1,330 +0,0 @@
-#!/usr/bin/env python3
-r"""
-Used by CI to bootstrap a new cluster and run tests.
-
-Env vars needed:
-- COSMOS_API_TOKEN
-
-Env vars optional:
-- NO_TERMINATE_DROPLETS: list of droplet ids that should not be
-  removed when deleting droplets.
-
-Install requirements:
-
-- Alpine using `requirements.txt`:
-
-    apk --no-cache add python3-dev build-base libffi-dev linux-headers \
-      openssl-dev openssh-client
-    pip3 install -r requirements.txt
-
-- Apline using packages (much faster):
-
-    apk --no-cache add ansible musl-dev linux-headers gcc py3-psutil \
-      openssh-client
-    pip3 install requests tabulate testinfra
-
-
-- Debian (using deb packages):
-    apt-get install -y --no-install-recommends ansible gcc libc6-dev \
-      python3-pip python3-setuptools python3-wheel \
-      python3-psutil
-    pip3 install requests tabulate testinfra
-"""
-
-import argparse
-import configparser
-import logging
-import os
-import random
-import shlex
-import string
-import subprocess
-import sys
-import traceback
-import yaml
-import cosmos
-
-SETTINGS_FILE = './group_vars/all/settings.yml'
-ANSIBLE_INVENTORY = './inventory.yml'
-
-
-def main():  # pylint: disable=too-many-statements,too-many-branches
-    """Do everything."""
-    # Parse command line arguments
-    parser = argparse.ArgumentParser(
-        description='Run bootstrap script'
-        'to deploy Openappstack to a given node.')
-
-    group = parser.add_mutually_exclusive_group(required=True)
-    group.add_argument(
-        '--create-droplet',
-        action='store_true',
-        help='Create droplet automatically')
-    group.add_argument(
-        '--droplet-id',
-        metavar='ID',
-        type=int,
-        help='ID of droplet to deploy to')
-
-    group.add_argument(
-        '--use-existing-inventory',
-        action='store_true',
-        help='Assumes inventory.yml has already been generated')
-
-    parser.add_argument(
-        '--ssh-key-id',
-        metavar='ID',
-        type=int,
-        default=411,
-        help='ID of ssh key to deploy with (default: 411)')
-
-    parser.add_argument(
-        '--terminate',
-        action='store_true',
-        help='Terminate droplet after deploy (shutdown and delete)')
-
-    parser.add_argument(
-        '--verbose',
-        action='store_true',
-        help='Be more verbose')
-
-    parser.add_argument(
-        '--ansible-param',
-        metavar=['PARAM[=VALUE]'],
-        action='append',
-        nargs=1,
-        help=('forward ansible parameters to the ansible-playbook call '
-              '(two dashes are prepended to PARAM)'))
-    parser.add_argument(
-        '--run-ansible',
-        action='store_true',
-        help='Runs the ansible bootstrap process')
-
-    parser.add_argument(
-        '--create-domain-records',
-        action='store_true',
-        help='Creates DNS entries for the cluster')
-
-    parser.add_argument(
-        '--write-behave-config',
-        action='store_true',
-        help='Writes a configuration file for behave with cluster information')
-
-    args = parser.parse_args()
-    verbose = args.verbose
-    loglevel = logging.DEBUG if verbose else logging.INFO
-    init_logging(log, loglevel)
-
-    # Setup logging for cosmos module
-    log_cosmos = logging.getLogger('cosmos')
-    init_logging(log_cosmos, loglevel)
-
-    if not args.use_existing_inventory:
-        # Start bootstrapping
-        if args.create_droplet:
-            # Create droplet
-
-            # image: 19 = Debian buster-x64
-            # ssh_keys
-            # - 411: ci, ed25519
-            # - 407: varac
-
-            if "CI_PIPELINE_ID" in os.environ:
-                instance_id = os.environ['CI_PIPELINE_ID']
-            else:
-                # Use random generated ID in case we're not running in
-                # gitlab CI and there's no CI_PIPELINE_ID env var
-                instance_id = ''.join(
-                    random.choice(string.ascii_lowercase + string.digits)
-                    for _ in range(10))
-
-            droplet = cosmos.create_droplet(
-                name='ci-' + instance_id,
-                ssh_key_id=args.ssh_key_id,
-                region='ams1',
-                size=8192,
-                disk=20,
-                image=19)
-            droplet_id = droplet['droplet']['id']
-            log.info('Created droplet id: %s', droplet_id)
-            cosmos.wait_for_state(droplet_id, 'running')
-        else:
-            droplet_id = args.droplet_id
-
-        if verbose:
-            cosmos.list_droplets()
-
-        # Get droplet ip
-        droplet = cosmos.get_droplet(droplet_id)
-        droplet_ip = droplet['networks']['v4'][0]['ip_address']
-        droplet_name = droplet['name']
-
-        # Create inventory
-        with open('../ansible/inventory.yml.example', 'r') as stream:
-            inventory = yaml.safe_load(stream)
-        inventory['all']['hosts'][droplet_name] = \
-            inventory['all']['hosts']['oas-dev']
-        del inventory['all']['hosts']['oas-dev']
-
-        inventory['all']['hosts'][droplet_name]['ansible_host'] = droplet_ip
-        inventory['all']['hosts'][droplet_name]['ansible_ssh_extra_args'] = \
-            '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-        inventory['all']['children']['master']['hosts'] = droplet_name
-        inventory['all']['children']['worker']['hosts'] = droplet_name
-
-        with open(ANSIBLE_INVENTORY, 'w') as stream:
-            yaml.safe_dump(inventory, stream, default_flow_style=False)
-
-        # Create settings
-        with open('../ansible/group_vars/all/settings.yml.example',
-                  'r') as stream:
-            settings = yaml.safe_load(stream)
-
-        settings['ip_address'] = droplet_ip
-        settings['domain'] = droplet_name + '.ci.openappstack.net'
-        settings['admin_email'] = "admin@{0}".format(settings['domain'])
-        settings['acme_staging'] = True
-
-        # Make sure settings file directory exists
-        settings_file_dir = os.path.dirname(SETTINGS_FILE)
-        if not os.path.exists(settings_file_dir):
-            os.makedirs(settings_file_dir)
-
-        with open(SETTINGS_FILE, 'w') as stream:
-            yaml.safe_dump(settings, stream, default_flow_style=False)
-
-        log.debug(yaml.safe_dump(inventory, default_flow_style=False))
-        log.debug(yaml.safe_dump(settings, default_flow_style=False))
-
-        # Wait for ssh
-        cosmos.wait_for_ssh(droplet_ip)
-    else:
-        # Work with the master node from the inventory
-        with open(ANSIBLE_INVENTORY, 'r') as stream:
-            inventory = yaml.safe_load(stream)
-        droplet_name = inventory['all']['children']['master']['hosts']
-        droplet_ip = inventory['all']['hosts'][droplet_name]['ansible_host']
-        log.info("Read data from inventory:\n\tname: %s\n\tip: %s",
-                 droplet_name, droplet_ip)
-
-        # For if write_behave_config is called later:
-        settings = None
-
-    if args.create_domain_records:
-        # Create domain records
-        domain_record = cosmos.create_domain_record(
-            domain='openappstack.net', name=droplet_name + '.ci',
-            data=droplet_ip, record_type='A', update=True)
-        log.info("Domain record: %s", domain_record)
-
-        domain_record = cosmos.create_domain_record(
-            domain='openappstack.net', name='*.' + droplet_name + '.ci',
-            data=droplet_name + '.ci', record_type='CNAME', update=True)
-        log.info("Domain record: %s", domain_record)
-
-        if verbose:
-            cosmos.list_domain_records('openappstack.net')
-
-    if args.run_ansible:
-
-        run_ansible('./bootstrap.yml', args.ansible_param)
-
-    if args.write_behave_config:
-        write_behave_config(settings=settings)
-
-    if args.terminate:
-        cosmos.terminate_droplets_by_name(droplet_name)
-
-
-def run_ansible(playbook, ansible_params):
-    """Call `ansible-playbook` directly to run the specified playbook."""
-    # playbook path here is relative to private_data_dir/project, see
-    # https://ansible-runner.readthedocs.io/en/latest/intro.html#inputdir
-    ansible_playbook_cmd = 'ansible-playbook %s' % playbook
-
-    if ansible_params:
-        for param in ansible_params:
-            if len(param) > 1:
-                log.warning('More than 1 parameter. Ignoring the rest! Use '
-                            '--ansible-param several times to supply '
-                            'more than 1 parameter')
-            param = param[0]
-            ansible_playbook_cmd += ' --' + param
-
-    log.info('Running %s', ansible_playbook_cmd)
-
-    result = subprocess.run(shlex.split(ansible_playbook_cmd))
-
-    if result.returncode > 0:
-        try:
-            raise RuntimeError('Playbook failed with rc %s.'
-                               % result.returncode)
-        except RuntimeError:
-            traceback.print_exc()
-            sys.exit(result.returncode)
-
-
-def write_behave_config(settings=None):
-    """Write behave config file for later use."""
-    if settings is None:
-        with open(SETTINGS_FILE) as stream:
-            settings = yaml.safe_load(stream)
-
-    secret_directory = settings['secret_directory']
-
-    with open(os.path.join(
-            secret_directory, 'nextcloud_admin_password'), 'r') as stream:
-        nextcloud_admin_password = yaml.safe_load(stream)
-
-    with open(os.path.join(
-            secret_directory, 'grafana_admin_password'), 'r') as stream:
-        grafana_admin_password = yaml.safe_load(stream)
-
-    behave_config = configparser.ConfigParser()
-    behave_config['behave'] = {}
-    behave_config['behave']['format'] = 'rerun'
-    behave_config['behave']['outfiles'] = 'rerun_failing.features'
-    behave_config['behave']['show_skipped'] = 'false'
-
-    behave_config['behave.userdata'] = {}
-
-    behave_config['behave.userdata']['nextcloud.url'] = \
-        'https://files.{}'.format(settings['domain'])
-    behave_config['behave.userdata']['nextcloud.username'] = 'admin'
-    behave_config['behave.userdata']['nextcloud.password'] = \
-        nextcloud_admin_password
-
-    behave_config['behave.userdata']['grafana.url'] = \
-        'https://grafana.{}'.format(settings['domain'])
-    behave_config['behave.userdata']['grafana.username'] = 'admin'
-    behave_config['behave.userdata']['grafana.password'] = \
-        grafana_admin_password
-
-    with open('./behave/behave.ini', 'w') as configfile:
-        behave_config.write(configfile)
-
-
-def init_logging(logger, loglevel):
-    """
-    Configure logging.
-
-    - debug and info go to stdout
-    - warning and above go to stderr
-    """
-    logger.setLevel(loglevel)
-    stdout = logging.StreamHandler(sys.stdout)
-    stdout.setLevel(loglevel)
-    stdout.addFilter(lambda record: record.levelno <= logging.INFO)
-
-    stderr = logging.StreamHandler()
-    stderr.setLevel(logging.WARNING)
-
-    logger.addHandler(stdout)
-    logger.addHandler(stderr)
-
-
-if __name__ == "__main__":
-    # Setup logging for this script
-    log = logging.getLogger(__name__)  # pylint: disable=invalid-name
-
-    main()
diff --git a/test/pytest/test_certs.py b/test/pytest/test_certs.py
index 2e9c152b3dd27f0d0e52e250e7bb94485ecb7f0b..472311e1613aca6db185ddb97918beed9e658ee0 100755
--- a/test/pytest/test_certs.py
+++ b/test/pytest/test_certs.py
@@ -97,8 +97,8 @@ def valid_cert(domain: str, ca_file: str = '/tmp/custom_ca_bundle.crt'):
 
 @pytest.mark.certs
 def test_cert_validation(host):
-    domain = os.environ.get("OAS_DOMAIN")
-    assert domain, "Please export OAS_DOMAIN as environment variable."
+    domain = os.environ.get("ADDRESS")
+    assert domain, "Please export ADDRESS as environment variable."
 
     add_custom_cert_authorities(certifi.where(),
                                 ['pytest/le-staging-bundle.pem'])
diff --git a/test/roles b/test/roles
deleted file mode 120000
index e4109d3736f47053d8deb67b0356f17ce2e2b9e0..0000000000000000000000000000000000000000
--- a/test/roles
+++ /dev/null
@@ -1 +0,0 @@
-../ansible/roles
\ No newline at end of file