diff --git a/.gitignore b/.gitignore
index 519ccbc11536d0325eff0fbb31639c1056560db1..3d7bff39efba0c906d69cffafd17040a54a4f9f6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,6 +19,7 @@
 /test/behave/behave.ini
 /test/behave/rerun_failing.features
 /test/cluster_data/
+/clusters
 
 # Ignore files created during tests
 /test/behave/**/screenshots/
@@ -26,6 +27,7 @@
 # Etc
 __pycache__
 *.swp
+*.pyc
 
 # Documentation files
 /docs/_build
diff --git a/openappstack/__main__.py b/openappstack/__main__.py
index 0051b827929a7aa1e48f5a87a47aafbaf4bc3b24..0eaf2dd398ea7110322780ee6d589a36202aecb1 100755
--- a/openappstack/__main__.py
+++ b/openappstack/__main__.py
@@ -32,27 +32,39 @@ Install requirements:
 """
 
 import argparse
-import configparser
 import logging
 import os
-import random
-import string
 import sys
-import yaml
-from openappstack import cosmos
-from openappstack import ansible
+from openappstack import name, cluster, cosmos, ansible
 
-SETTINGS_FILE = './group_vars/all/settings.yml'
 
 
 def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-locals
     """Do everything."""
     # Parse command line arguments
     parser = argparse.ArgumentParser(
-        description='Run bootstrap script'
+        prog=name,
+        description='Run bootstrap script '
         'to deploy Openappstack to a given node.')
 
-    group = parser.add_mutually_exclusive_group(required=True)
+    parser.add_argument(
+        'cluster_name',
+        metavar='CLUSTER_NAME',
+        type=str,
+        help="Name of the cluster you want to use openappstack with")
+
+    subparsers = parser.add_subparsers(help="Available subcommands")
+
+    create_parser = subparsers.add_parser('create', help='Creates a new cluster')
+    create_parser.set_defaults(func=create)
+
+    create_parser.add_argument(
+        'domain',
+        metavar="DOMAIN_NAME",
+        help='Domain name to run OpenAppStack under')
+
+    group = create_parser.add_mutually_exclusive_group(required=True)
+
     group.add_argument(
         '--create-droplet',
         action='store_true',
@@ -61,162 +73,152 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
         '--droplet-id',
         metavar='ID',
         type=int,
-        help='ID of droplet to deploy to')
-
-    group.add_argument(
-        '--use-existing-inventory',
-        action='store_true',
-        help='Assumes inventory.yml has already been generated')
+        help='ID of existing Greenhost VPS to use')
 
-    parser.add_argument(
+    droplet_creation_group = create_parser.add_argument_group(
+        'droplet creation',
+        'When using --create-droplet, you need to provide:')
+    droplet_creation_group.add_argument(
         '--hostname',
         type=str,
-        help='If called with --create-droplet, this is the machine hostname')
+        help='hostname of the new machine. If not provided, a hostname is generated')
 
-    parser.add_argument(
+    droplet_creation_group.add_argument(
         '--ssh-key-id',
         metavar='ID',
         type=int,
         help='ID of ssh key to deploy with (default: 411)')
 
+    droplet_creation_group.add_argument(
+        '--create-domain-records',
+        action='store_true',
+        help=('Creates DNS entries at Greenhost pointing the subdomain and '
+              'domain to the cluster.'))
+
+    droplet_creation_group.add_argument(
+        '--subdomain',
+        type=str,
+        metavar="SUBDOMAIN",
+        default="",
+        help=('Use a custom subdomain for the generated domain records. '
+              'Defaults to empty string (no subdomain)'))
+
+    install_parser = subparsers.add_parser(
+        'install',
+        help=("Use this to run the ansible playbook that sets up your VPS to run "
+              "OpenAppStack. The ansible-playbook process will run in the "
+              "'{ansible_path}' directory, so do not use relative paths with these "
+              "arguments.").format(ansible_path=ansible.ANSIBLE_PATH))
+    install_parser.set_defaults(func=install)
+
     parser.add_argument(
         '--terminate',
         action='store_true',
-        help='Terminate droplet after deploy (shutdown and delete)')
+        help=('Shutdown and delete droplet identified by VPS identifier after '
+              'finishing'))
 
     parser.add_argument(
+        '-v',
         '--verbose',
         action='store_true',
         help='Be more verbose')
 
-    parser.add_argument(
+    # Ansible related
+    install_parser.add_argument(
         '--ansible-param',
         metavar=['PARAM[=VALUE]'],
         action='append',
         nargs=1,
         help=('forward ansible parameters to the ansible-playbook call '
               '(two dashes are prepended to PARAM)'))
-    parser.add_argument(
-        '--run-ansible',
-        action='store_true',
-        help='Runs the ansible bootstrap process')
 
-    parser.add_argument(
-        '--create-domain-records',
-        type=str,
-        metavar="DOMAIN_NAME",
-        help='Creates DNS entries for the cluster. Provide with a domain name')
-
-    parser.add_argument(
-        '--domain-record-subdomain',
-        type=str,
-        metavar="SUBDOMAIN",
-        default="",
-        help=('Use a custom subdomain for the generated domain records. '
-              'Defaults to empty string (no subdomain)'))
+    test_parser = subparsers.add_parser(
+        'test',
+        help=("Write test configuration and run tests on your cluster"))
+    test_parser.set_defaults(func=test)
 
-    parser.add_argument(
+    test_parser.add_argument(
         '--write-behave-config',
         action='store_true',
         help='Writes a configuration file for behave with cluster information')
 
+    info_parser = subparsers.add_parser(
+        'info',
+        help=("Show information about a cluster"))
+    info_parser.set_defaults(func=info)
+
     args = parser.parse_args()
-    verbose = args.verbose
-    loglevel = logging.DEBUG if verbose else logging.INFO
+    loglevel = logging.DEBUG if args.verbose else logging.INFO
     init_logging(log, loglevel)
 
     # Setup logging for cosmos module
     log_cosmos = logging.getLogger('cosmos')
     init_logging(log_cosmos, loglevel)
 
-    if not args.use_existing_inventory:
-        # Start bootstrapping
-        if args.create_droplet:
-            if not args.ssh_key_id:
-                log.error("SSH Key id required when using --create-droplet")
-
-            if args.hostname:
-                hostname = args.hostname
-            else:
-                # Use random generated ID in case we're not running in
-                # gitlab CI and there's no CI_PIPELINE_ID env var
-                hostname = ''.join(
-                    random.choice(string.ascii_lowercase + string.digits)
-                    for _ in range(10))
-                log.info('Generated hostname %s', hostname)
-
-            droplet = cosmos.create_droplet(
-                name=hostname,
-                ssh_key_id=args.ssh_key_id,
-                region='ams1',
-                size=8192,
-                disk=20,
-                # image: 19 = Debian buster-x64
-                image=19)
-            droplet_id = droplet['droplet']['id']
-            log.info('Created droplet id: %s', droplet_id)
-            cosmos.wait_for_state(droplet_id, 'running')
-        else:
-            droplet_id = args.droplet_id
-
-        if verbose:
-            cosmos.list_droplets()
+    log.debug("Parsed arguments: %s", str(args))
+
+    clus = cluster.Cluster(args.cluster_name)
+    args.func(clus, args)
 
-        # Get droplet ip
-        droplet = cosmos.get_droplet(droplet_id)
-        droplet_ip = droplet['networks']['v4'][0]['ip_address']
-        droplet_name = droplet['name']
-        ansible.create_inventory(droplet_name, droplet_ip)
 
 
-        # Create settings
-        with open('../ansible/group_vars/all/settings.yml.example',
-                  'r') as stream:
-            settings = yaml.safe_load(stream)
+    if args.terminate:
+        cosmos.terminate_droplets_by_name(clus.hostname)
 
-        settings['ip_address'] = droplet_ip
-        settings['domain'] = droplet_name + '.ci.openappstack.net'
-        settings['admin_email'] = "admin@{0}".format(settings['domain'])
-        settings['acme_staging'] = True
+def info(clus, _args):
+    """Shows cluster information and then exits"""
+    clus.load_data()
+    clus.print_info()
 
-        # Make sure settings file directory exists
-        settings_file_dir = os.path.dirname(SETTINGS_FILE)
-        if not os.path.exists(settings_file_dir):
-            os.makedirs(settings_file_dir)
 
-        with open(SETTINGS_FILE, 'w') as stream:
-            yaml.safe_dump(settings, stream, default_flow_style=False)
+def create(clus, args):
+    """Parses arguments for the "create" subcommand"""
+    clus = cluster.Cluster(args.cluster_name)
 
-        log.debug(yaml.safe_dump(settings, default_flow_style=False))
+    if args.create_droplet:
+        if not args.ssh_key_id:
+            log.error("SSH Key id required when using --create-droplet")
 
-        # Wait for ssh
-        cosmos.wait_for_ssh(droplet_ip)
+    if args.subdomain:
+        domain = "{subdomain}.{domain}".format(
+            subdomain=args.subdomain, domain=args.domain)
     else:
-        # Work with the master node from the inventory
-        with open(ansible.ANSIBLE_INVENTORY, 'r') as stream:
-            inventory = yaml.safe_load(stream)
-        droplet_name = inventory['all']['children']['master']['hosts']
-        droplet_ip = inventory['all']['hosts'][droplet_name]['ansible_host']
-        log.info("Read data from inventory:\n\tname: %s\n\tip: %s",
-                 droplet_name, droplet_ip)
+        domain = args.domain
+    clus.domain = domain
+    if args.create_droplet:
+        clus.create_droplet(*args)
+        if args.verbose:
+            cosmos.list_droplets()
+    elif args.droplet_id:
+        clus.set_info_by_droplet_id(args.droplet_id)
+
+    # Write inventory.yml and settings.yml files
+    clus.write_cluster_files()
 
-        # For if write_behave_config is called later:
-        settings = None
+    # Wait for ssh
+    cosmos.wait_for_ssh(clus.ip_address)
 
     if args.create_domain_records:
-        create_domain_records(args.create_domain_records, args.domain_record_subdomain, droplet_ip)
-        if verbose:
+        create_domain_records(args.domain, args.subdomain, clus.ip_address)
+        if args.verbose:
             cosmos.list_domain_records('openappstack.net')
 
-    if args.run_ansible:
 
-        ansible.run_ansible('./bootstrap.yml', args.ansible_param)
+def install(clus, args):
+    """Parses arguments for the "install" subcommand"""
+    clus.load_data()
+    ansible.run_ansible(
+        clus,
+        os.path.join(ansible.ANSIBLE_PATH, 'bootstrap.yml'),
+        args.ansible_param)
 
+
+def test(clus, args):
+    """Parses arguments for the "test" subcommand"""
+    clus.load_data()
     if args.write_behave_config:
-        write_behave_config(settings=settings)
+        clus.write_behave_config()
 
-    if args.terminate:
-        cosmos.terminate_droplets_by_name(droplet_name)
 
 def create_domain_records(domain, subdomain, droplet_ip):
     """
@@ -234,44 +236,6 @@ def create_domain_records(domain, subdomain, droplet_ip):
     log.info("Domain record: %s", domain_record)
 
 
-def write_behave_config(settings=None):
-    """Write behave config file for later use."""
-    if settings is None:
-        with open(SETTINGS_FILE) as stream:
-            settings = yaml.safe_load(stream)
-
-    secret_directory = settings['secret_directory']
-
-    with open(os.path.join(
-            secret_directory, 'nextcloud_admin_password'), 'r') as stream:
-        nextcloud_admin_password = yaml.safe_load(stream)
-
-    with open(os.path.join(
-            secret_directory, 'grafana_admin_password'), 'r') as stream:
-        grafana_admin_password = yaml.safe_load(stream)
-
-    behave_config = configparser.ConfigParser()
-    behave_config['behave'] = {}
-    behave_config['behave']['format'] = 'rerun'
-    behave_config['behave']['outfiles'] = 'rerun_failing.features'
-    behave_config['behave']['show_skipped'] = 'false'
-
-    behave_config['behave.userdata'] = {}
-
-    behave_config['behave.userdata']['nextcloud.url'] = \
-        'https://files.{}'.format(settings['domain'])
-    behave_config['behave.userdata']['nextcloud.username'] = 'admin'
-    behave_config['behave.userdata']['nextcloud.password'] = \
-        nextcloud_admin_password
-
-    behave_config['behave.userdata']['grafana.url'] = \
-        'https://grafana.{}'.format(settings['domain'])
-    behave_config['behave.userdata']['grafana.username'] = 'admin'
-    behave_config['behave.userdata']['grafana.password'] = \
-        grafana_admin_password
-
-    with open('./behave/behave.ini', 'w') as configfile:
-        behave_config.write(configfile)
 
 
 def init_logging(logger, loglevel):
@@ -285,6 +249,7 @@ def init_logging(logger, loglevel):
     stdout = logging.StreamHandler(sys.stdout)
     stdout.setLevel(loglevel)
     stdout.addFilter(lambda record: record.levelno <= logging.INFO)
+    logger.info("Set log level to %d", loglevel)
 
     stderr = logging.StreamHandler()
     stderr.setLevel(logging.WARNING)
@@ -295,6 +260,5 @@ def init_logging(logger, loglevel):
 
 if __name__ == "__main__":
     # Setup logging for this script
-    log = logging.getLogger(__name__)  # pylint: disable=invalid-name
-
+    log = logging.getLogger("openappstack")  # pylint: disable=invalid-name
     main()
diff --git a/openappstack/ansible.py b/openappstack/ansible.py
index e148b1af7886889b40aba65a5c3c449c07365e54..77cc20a5239ba247e839bba77d6b70b0011cfb67 100644
--- a/openappstack/ansible.py
+++ b/openappstack/ansible.py
@@ -2,6 +2,7 @@
 Module responsible for running the Ansible part of the OpenAppStack setup.
 """
 import logging
+import os
 import shlex
 import subprocess
 import sys
@@ -10,14 +11,26 @@ import yaml
 
 log = logging.getLogger(__name__)  # pylint: disable=invalid-name
 
-ANSIBLE_INVENTORY = './inventory.yml'
+ANSIBLE_INVENTORY = './clusters/{cluster_name}/inventory.yml'
+ANSIBLE_PATH = os.path.join(os.path.dirname(__file__),
+                            '..', "ansible")
 
-def run_ansible(playbook, ansible_params):
-    """Call `ansible-playbook` directly to run the specified playbook."""
+def run_ansible(clus, playbook, ansible_params=None):
+    """
+    Call `ansible-playbook` in a subprocess to run the specified playbook. Runs
+    in the package's ansible directory.
+
+    Parameters:
+    - playbook: path to the playbook to run.
+    - ansible_params: Optionally provide a list of lists with ansible params.
+      Each inner list may only contain one element. Can be directly forwarded
+      from argparse.
+
+      Example: ansible_params = [[become-user=root], [verbose]]
+    """
     # playbook path here is relative to private_data_dir/project, see
     # https://ansible-runner.readthedocs.io/en/latest/intro.html#inputdir
-    ansible_playbook_cmd = 'ansible-playbook %s' % playbook
-
+    ansible_param_string = ""
     if ansible_params:
         for param in ansible_params:
             if len(param) > 1:
@@ -25,39 +38,53 @@ def run_ansible(playbook, ansible_params):
                             '--ansible-param several times to supply '
                             'more than 1 parameter')
             param = param[0]
-            ansible_playbook_cmd += ' --' + param
+            ansible_param_string += ' --' + param
+
+    ansible_playbook_cmd = \
+        ('ansible-playbook {ansible_param_string} -i {inventory} '
+         '-e @{settings} {playbook}').format(
+             ansible_param_string=ansible_param_string,
+             inventory=clus.inventory_file,
+             settings=clus.settings_file,
+             playbook=playbook
+         )
 
-    log.info('Running %s', ansible_playbook_cmd)
+    log.info('Running "%s" in ansible directory "%s"',
+             ansible_playbook_cmd,
+             ANSIBLE_PATH)
 
-    result = subprocess.run(shlex.split(ansible_playbook_cmd))
+    process = subprocess.Popen(
+        shlex.split(ansible_playbook_cmd),
+        cwd=ANSIBLE_PATH)
 
-    if result.returncode > 0:
-        try:
-            raise RuntimeError('Playbook failed with rc %s.'
-                               % result.returncode)
-        except RuntimeError:
-            traceback.print_exc()
-            sys.exit(result.returncode)
+    returncode = process.wait()
 
-def create_inventory(droplet_name, droplet_ip):
+    if returncode > 0:
+        raise RuntimeError('Playbook failed with rc %s.'
+                           % returncode)
+
+def create_inventory(cluster):
     """
     Creates inventory for the ansible playbook. Needs the droplet's hostname
     for identification and the IP for connecting with Ansible
     """
     # Create inventory
-    with open('../ansible/inventory.yml.example', 'r') as stream:
+    with open(os.path.join(ANSIBLE_PATH, "inventory.yml.example"),
+              'r') as stream:
         inventory = yaml.safe_load(stream)
-    inventory['all']['hosts'][droplet_name] = \
+
+    inventory['all']['hosts'][cluster.hostname] = \
         inventory['all']['hosts']['oas-dev']
     del inventory['all']['hosts']['oas-dev']
 
-    inventory['all']['hosts'][droplet_name]['ansible_host'] = droplet_ip
-    inventory['all']['hosts'][droplet_name]['ansible_ssh_extra_args'] = \
+    inventory['all']['hosts'][cluster.hostname]['ansible_host'] = \
+        cluster.ip_address
+    inventory['all']['hosts'][cluster.hostname]['ansible_ssh_extra_args'] = \
         '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-    inventory['all']['children']['master']['hosts'] = droplet_name
-    inventory['all']['children']['worker']['hosts'] = droplet_name
+    inventory['all']['children']['master']['hosts'] = cluster.hostname
+    inventory['all']['children']['worker']['hosts'] = cluster.hostname
 
-    with open(ANSIBLE_INVENTORY, 'w') as stream:
+    with open(cluster.inventory_file, 'w') as stream:
         yaml.safe_dump(inventory, stream, default_flow_style=False)
     log.debug(yaml.safe_dump(inventory, default_flow_style=False))
     return inventory
diff --git a/openappstack/cluster.py b/openappstack/cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..17f4d034c52cd53302ffa4fed3c220ff419ee3f0
--- /dev/null
+++ b/openappstack/cluster.py
@@ -0,0 +1,176 @@
+"""Contains code for managing the files related to an OpenAppStack cluster"""
+
+import configparser
+import logging
+import os
+import random
+import string
+import yaml
+from openappstack import ansible, cosmos
+
+CLUSTER_PATH = os.path.join(os.getcwd(), "clusters")
+
+log = logging.getLogger(__name__)  # pylint: disable=invalid-name
+
+class Cluster:
+    """helper class for cluster-related paths, files, etc."""
+
+
+    def __init__(self, cluster_name, load_data=False):
+        self.name = cluster_name
+        self.cluster_dir = os.path.join(CLUSTER_PATH, cluster_name)
+        self.ip_address = None
+        self.hostname = None
+        self.domain = None
+
+        # Load data from inventory.yml and settings.yml
+        if load_data:
+            self.load_data()
+
+    def load_data(self):
+        """Loads cluster data from inventory.yml and settings.yml files"""
+        # Work with the master node from the inventory
+
+        with open(self.settings_file, 'r') as stream:
+            settings = yaml.safe_load(stream)
+            self.ip_address = settings['ip_address']
+            self.domain = settings['domain']
+
+        log.debug("""Read data from settings.yml:
+            ip address: %s
+            domain: %s""", self.ip_address, self.domain)
+
+        with open(self.inventory_file, 'r') as stream:
+            inventory = yaml.safe_load(stream)
+        self.hostname = inventory['all']['children']['master']['hosts']
+
+        log.debug("Read data from inventory.yml:\n\thostname: %s", self.hostname)
+
+
+    def create_droplet(self, ssh_key_id=0, hostname=None):
+        """Uses the Cosmos API to create a droplet with OAS default spec"""
+        if hostname is None:
+            # Use random generated ID in case we're not running in
+            # gitlab CI and there's no CI_PIPELINE_ID env var
+            hostname = ''.join(
+                random.choice(string.ascii_lowercase + string.digits)
+                for _ in range(10))
+            log.info('Generated hostname %s', hostname)
+        droplet = cosmos.create_droplet(
+            name=hostname,
+            ssh_key_id=ssh_key_id,
+            region='ams1',
+            size=4096,
+            disk=25,
+            # image: 19 = Debian buster-x64
+            image=19)
+        droplet_id = droplet['droplet']['id']
+        log.info('Created droplet id: %s', droplet_id)
+        cosmos.wait_for_state(droplet_id, 'running')
+        self.set_info_by_droplet_id(droplet_id)
+
+    def set_info_by_droplet_id(self, droplet_id):
+        """Sets info about the cluster based on the Greenhost VPS id"""
+        droplet = cosmos.get_droplet(droplet_id)
+        self.ip_address = droplet['networks']['v4'][0]['ip_address']
+        self.hostname = droplet['name']
+
+    def write_cluster_files(self):
+        """Creates an inventory.yml and settings.yml file for the cluster"""
+        self.make_cluster_directory()
+        ansible.create_inventory(self)
+
+        # Create settings
+        with open(os.path.join(ansible.ANSIBLE_PATH, "group_vars",
+                               "all", "settings.yml.example"),
+                  'r') as stream:
+            settings = yaml.safe_load(stream)
+
+        settings['ip_address'] = self.ip_address
+        settings['domain'] = self.domain
+        settings['admin_email'] = "admin@{0}".format(self.domain)
+        settings['acme_staging'] = True
+        settings['secret_directory'] = self.secret_dir
+
+        with open(self.settings_file, 'w') as stream:
+            yaml.safe_dump(settings, stream, default_flow_style=False)
+
+        log.debug(yaml.safe_dump(settings, default_flow_style=False))
+
+    def make_cluster_directory(self):
+        """Make sure the cluster's file directory exists"""
+        if not os.path.exists(self.cluster_dir):
+            os.makedirs(self.cluster_dir)
+
+    @property
+    def inventory_file(self):
+        """Path to the ansible inventory.yml for this cluster"""
+        return os.path.join(self.cluster_dir, "inventory.yml")
+
+    @property
+    def settings_file(self):
+        """Path to the ansible settings.yml for this cluster"""
+        return os.path.join(self.cluster_dir, "settings.yml")
+
+    @property
+    def behave_file(self):
+        """Path to 'behave.ini' which is used for acceptance tests"""
+        return os.path.join(self.cluster_dir, "behave.ini")
+
+    @property
+    def secret_dir(self):
+        """Path where all the passwords for cluster admins are saved"""
+        return os.path.join(self.cluster_dir, 'secrets')
+
+    def write_behave_config(self):
+        """Write behave config file for the cluster."""
+        secret_directory = self.secret_dir
+        with open(os.path.join(
+                secret_directory, 'nextcloud_admin_password'), 'r') as stream:
+            nextcloud_admin_password = yaml.safe_load(stream)
+
+        with open(os.path.join(
+                secret_directory, 'grafana_admin_password'), 'r') as stream:
+            grafana_admin_password = yaml.safe_load(stream)
+
+        behave_config = configparser.ConfigParser()
+        behave_config['behave'] = {}
+        behave_config['behave']['format'] = 'rerun'
+        behave_config['behave']['outfiles'] = 'rerun_failing.features'
+        behave_config['behave']['show_skipped'] = 'false'
+
+        behave_config['behave.userdata'] = {}
+
+        behave_config['behave.userdata']['nextcloud.url'] = \
+            'https://files.{}'.format(self.domain)
+        behave_config['behave.userdata']['nextcloud.username'] = 'admin'
+        behave_config['behave.userdata']['nextcloud.password'] = \
+            nextcloud_admin_password
+
+        behave_config['behave.userdata']['grafana.url'] = \
+            'https://grafana.{}'.format(self.domain)
+        behave_config['behave.userdata']['grafana.username'] = 'admin'
+        behave_config['behave.userdata']['grafana.password'] = \
+            grafana_admin_password
+
+        with open(self.behave_file, 'w') as configfile:
+            behave_config.write(configfile)
+
+    def print_info(self):
+        """Writes information about the cluster. Useful for debugging"""
+        print(("""Cluster "{name}":
+  - IP address: {ip_address}
+  - Hostname: {hostname}
+  - Domain: {domain}
+
+Configuration:
+  - Inventory file: {inventory_file}
+  - Settings file: {settings_file}
+  - Behave file: {behave_file}""").format(
+            name=self.name,
+            ip_address=self.ip_address,
+            hostname=self.hostname,
+            domain=self.domain,
+            inventory_file=self.inventory_file,
+            settings_file=self.settings_file,
+            behave_file=self.behave_file))
diff --git a/test/ansible.cfg b/test/ansible.cfg
deleted file mode 120000
index 0b986ffbd15ce7c5f43039dd53f3dbdad2981e03..0000000000000000000000000000000000000000
--- a/test/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../ansible/ansible.cfg
\ No newline at end of file
diff --git a/test/bootstrap.yml b/test/bootstrap.yml
deleted file mode 120000
index d29cf97c10ac85c0dc68fa448f5d9aaeae9714fe..0000000000000000000000000000000000000000
--- a/test/bootstrap.yml
+++ /dev/null
@@ -1 +0,0 @@
-../ansible/bootstrap.yml
\ No newline at end of file