diff --git a/openappstack/__init__.py b/openappstack/__init__.py
index e82e91afb032bdecc0491cd399332bf291700fc1..a85c322e6af4271383833a7a63dc8bf43c760de4 100644
--- a/openappstack/__init__.py
+++ b/openappstack/__init__.py
@@ -1 +1 @@
-name="openappstack"
+name='openappstack'
diff --git a/openappstack/__main__.py b/openappstack/__main__.py
index 44180ec29527e046f2081b8e30bf79e762f46d7c..3440d3fa807400b131657b3da4a474a5d8b1011b 100755
--- a/openappstack/__main__.py
+++ b/openappstack/__main__.py
@@ -64,16 +64,16 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
         'cluster_name',
         metavar='CLUSTER_NAME',
         type=str,
-        help="Name of the cluster you want to use openappstack with")
+        help='Name of the cluster you want to use openappstack with')
 
-    subparsers = parser.add_subparsers(help="Available subcommands")
+    subparsers = parser.add_subparsers(help='Available subcommands')
 
     create_parser = subparsers.add_parser('create', help='Creates a new cluster')
     create_parser.set_defaults(func=create)
 
     create_parser.add_argument(
         'domain',
-        metavar="DOMAIN_NAME",
+        metavar='DOMAIN_NAME',
         help='Domain name to run OpenAppStack under')
 
     group = create_parser.add_mutually_exclusive_group(required=True)
@@ -100,8 +100,7 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
         '--ssh-key-id',
         metavar='ID',
         type=int,
-        help=('Greenhost-specific database ID of ssh key to deploy with'
-              '(default: 411)'))
+        help='Greenhost-specific database ID of ssh key to deploy with')
 
     droplet_creation_group.add_argument(
         '--create-domain-records',
@@ -112,11 +111,16 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
     droplet_creation_group.add_argument(
         '--subdomain',
         type=str,
-        metavar="SUBDOMAIN",
-        default="",
+        metavar='SUBDOMAIN',
         help=('Use a custom subdomain for the generated domain records. '
               'Defaults to empty string (no subdomain)'))
 
+    droplet_creation_group.add_argument(
+        '--acme-live-environment',
+        action='store_true',
+        help=("Use this for production clusters. Uses live Let's Encrypt "
+              'environment instead of staging'))
+
     install_parser = subparsers.add_parser(
         'install',
         help=("Use this to run the ansible playbook that sets up your VPS to run "
@@ -199,31 +203,46 @@ def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-lo
         clus.load_data()
         cosmos.terminate_droplets_by_name(clus.hostname)
 
+    if not hasattr(args, 'func') and not args.terminate_droplet:
+        parser.print_help()
+        sys.exit(1)
 
 def info(clus, _args):
-    """Shows cluster information and then exits"""
+    """
+    Shows cluster information and then exits
+
+    :param cluster.Cluster clus: cluster to show information about
+    :param argparse.Namespace _args: ignored, added for compatibility with
+        create, install and test functions.
+    """
     clus.load_data()
     clus.print_info()
 
 
 def create(clus, args):
-    """Parses arguments for the "create" subcommand"""
+    """
+    Parses arguments for the 'create' subcommand
+
+    :param cluster.Cluster clus: Cluster object to edit
+    :param argparse.Namespace args: Command line arguments
+    """
     clus = cluster.Cluster(args.cluster_name)
 
     if args.create_droplet:
         if not args.ssh_key_id:
-            log.error("--ssh-key-id required when using --create-droplet")
-            sys.exit(1)
-        if not args.hostname:
-            log.error("--hostname required when using --create-droplet")
+            log.error('--ssh-key-id required when using --create-droplet')
             sys.exit(1)
 
     if args.subdomain:
-        domain = "{subdomain}.{domain}".format(
+        domain = '{subdomain}.{domain}'.format(
             subdomain=args.subdomain, domain=args.domain)
     else:
         domain = args.domain
     clus.domain = domain
+
+    # Set acme_staging to False so we use Let's Encrypt's live environment
+    if args.acme_live_environment:
+        clus.acme_staging = False
     if args.create_droplet:
         clus.create_droplet(ssh_key_id=args.ssh_key_id, hostname=args.hostname)
         if args.verbose:
@@ -236,15 +255,20 @@ def create(clus, args):
     # Write inventory.yml and settings.yml files
     clus.write_cluster_files()
 
-
     if args.create_domain_records:
-        create_domain_records(args.domain, args.subdomain, clus.ip_address)
+        create_domain_records(
+            args.domain, clus.ip_address, subdomain=args.subdomain)
         if args.verbose:
-            cosmos.list_domain_records('openappstack.net')
+            cosmos.list_domain_records(args.domain)
 
 
 def install(clus, args):
-    """Parses arguments for the "install" subcommand"""
+    """
+    Parses arguments for the 'install' subcommand
+
+    :param cluster.Cluster clus: Cluster object to install OAS to
+    :param argparse.Namespace args: Command line arguments
+    """
     clus.load_data()
     ansible.run_ansible(
         clus,
@@ -253,7 +277,12 @@ def install(clus, args):
 
 
 def test(clus, args):
-    """Runs behave or testinfra test. Overwrites behave_path/behave.ini!"""
+    """
+    Runs behave or testinfra test. Overwrites behave_path/behave.ini!
+
+    :param cluster.Cluster clus: Cluster object to run tests on
+    :param argparse.Namespace args: Command line arguments
+    """
 
     # At the moment we only have one type if test, but more tests will be added
     # to this in the future. If args.run_test is empty, we run all the tests
@@ -272,33 +301,47 @@ def test(clus, args):
         clus.write_behave_config(behave_ini)
         command = []
         if args.behave_rerun_failing:
-            command.append("@rerun_failing.features")
+            command.append('@rerun_failing.features')
         if args.behave_headless:
-            command.append("-D headless=True")
+            command.append('-D headless=True')
         for tag in args.behave_tags:
             log.info(command)
-            command.append("-t {tag}".format(tag=tag))
-        log.info("Running behave command %s", command)
+            command.append('-t {tag}'.format(tag=tag))
+        log.info('Running behave command %s', command)
         behave_main(command)
 
         # Remove behave.ini so we don't leave secrets hanging around.
         os.remove(behave_ini)
 
 
-def create_domain_records(domain, subdomain, droplet_ip):
+def create_domain_records(domain, droplet_ip, subdomain=None):
     """
-    Creates domain records at Greenhost in the way OpenAppStack expects it
+    Creates 2 domain records at Greenhost. An A record at subdomain.domain,
+    pointing to droplet_ip and a CNAME record pointing *.subdomain.domain to
+    the first record.
+
+    :param str domain: the domain the cluster will be hosted on.
+    :param str droplet_ip: The IP address the A record will point to.
+    :param subdomain: Optional subdomain to host OAS on.
+    :type subdomain: str or None
     """
-    # Create domain records
+    if subdomain is None:
+        subdomain_arg = "@"
+
     domain_record = cosmos.create_domain_record(
-        domain=domain, name=subdomain,
+        domain=domain, name=subdomain_arg,
         data=droplet_ip, record_type='A', update=True)
-    log.info("Domain record: %s", domain_record)
+    log.info('Domain record: %s', domain_record)
+
+    subdomain_arg = '*'
+
+    if subdomain is not None:
+        subdomain_arg += '.' + subdomain
 
     domain_record = cosmos.create_domain_record(
-        domain=domain, name='*.' + subdomain,
+        domain=domain, name=subdomain_arg,
         data=subdomain, record_type='CNAME', update=True)
-    log.info("Domain record: %s", domain_record)
+    log.info('Domain record: %s', domain_record)
 
 
 def init_logging(logger, loglevel):
@@ -307,12 +350,16 @@ def init_logging(logger, loglevel):
 
     - debug and info go to stdout
     - warning and above go to stderr
+
+    :param logger: Logger to initialise
+    :param int loglevel: Log level from 0 to 50 (use logging.WARNING,
+        logging.INFO, etc.)
     """
     logger.setLevel(loglevel)
     stdout = logging.StreamHandler(sys.stdout)
     stdout.setLevel(loglevel)
     stdout.addFilter(lambda record: record.levelno <= logging.INFO)
-    logger.info("Set log level to %d", loglevel)
+    logger.info('Set log level to %d', loglevel)
 
     stderr = logging.StreamHandler()
     stderr.setLevel(logging.WARNING)
@@ -321,7 +368,7 @@ def init_logging(logger, loglevel):
     logger.addHandler(stderr)
 
 
-if __name__ == "__main__":
+if __name__ == '__main__':
     # Setup logging for this script
-    log = logging.getLogger("openappstack")  # pylint: disable=invalid-name
+    log = logging.getLogger('openappstack')  # pylint: disable=invalid-name
     main()
diff --git a/openappstack/ansible.py b/openappstack/ansible.py
index 1c08fb0fc5b4f81435c318d8c38fd9e3b1eaa170..2a8fa01fac3ea6425b26d1578492e80dc15e05f9 100644
--- a/openappstack/ansible.py
+++ b/openappstack/ansible.py
@@ -11,20 +11,18 @@ log = logging.getLogger(__name__)  # pylint: disable=invalid-name
 
 ANSIBLE_INVENTORY = './clusters/{cluster_name}/inventory.yml'
 ANSIBLE_PATH = os.path.join(os.path.dirname(__file__),
-                            '..', "ansible")
+                            '..', 'ansible')
 
 def run_ansible(clus, playbook, ansible_params=None):
     """
     Call `ansible-playbook` in a subprocess to run the specified playbook. Runs
     in the package's ansible directory.
 
-    Parameters:
-    - playbook: path to the playbook to run.
-    - ansible_params: Optionally provide a list of lists with ansible params.
-      Each inner list may only contain one element. Can be directly forwarded
-      from argparse.
-
-      Example: ansible_params = [[become-user=root], [verbose]]
+    :param str playbook: path to the playbook to run.
+    :param list ansible_params: Optionally provide a list of lists with ansible
+        params.  Each inner list may only contain one element. Can be directly
+        forwarded from argparse. Example:
+        `ansible_params = [[become-user=root], [verbose]]`
     """
     # playbook path here is relative to private_data_dir/project, see
     # https://ansible-runner.readthedocs.io/en/latest/intro.html#inputdir
@@ -65,9 +63,12 @@ def create_inventory(cluster):
     """
     Creates inventory for the ansible playbook. Needs the droplet's hostname
     for identification and the IP for connecting with Ansible
+
+    :param cluster.Cluster cluster: Cluster object to for which inventory file
+        will be written. Used for getting hostname and IP address.
     """
     # Create inventory
-    with open(os.path.join(ANSIBLE_PATH, "inventory.yml.example"),
+    with open(os.path.join(ANSIBLE_PATH, 'inventory.yml.example'),
               'r') as stream:
         inventory = yaml.safe_load(stream)
 
diff --git a/openappstack/cluster.py b/openappstack/cluster.py
index 260c2a41efcb89a07592aaba3311e4a4dd10e807..9300602944008d656b0d9d49b7dd00b355dbbfb0 100644
--- a/openappstack/cluster.py
+++ b/openappstack/cluster.py
@@ -9,12 +9,28 @@ import sys
 import yaml
 from openappstack import ansible, cosmos
 
-CLUSTER_PATH = os.path.join(os.getcwd(), "clusters")
+CLUSTER_PATH = os.path.join(os.getcwd(), 'clusters')
 
 log = logging.getLogger(__name__)  # pylint: disable=invalid-name
 
+"""Greenhost region where VPS will be started with create_droplet"""
+DEFAULT_REGION = 'ams1'
+"""Default disk size"""
+DEFAULT_DISK_SIZE_GB = 25
+"""Default amount of memory"""
+DEFAULT_MEMORY_SIZE_MB = 4096
+"""Default "image" (operating system): 19  =  Debian buster-x64 """
+DEFAULT_IMAGE = 19
+
 class Cluster:
-    """helper class for cluster-related paths, files, etc."""
+    """
+    Helper class for cluster-related paths, files, etc.
+
+    :param str cluster_name: Identifier of the cluster. A folder in
+        CLUSTER_PATH will be creaeted with this name.
+    :param bool load_data: If this is true, `load_data` function is called at
+        the end of the constructor.
+    """
 
     def __init__(self, cluster_name, load_data=False):
         self.name = cluster_name
@@ -22,6 +38,8 @@ class Cluster:
         self.ip_address = None
         self.hostname = None
         self.domain = None
+        # By default, use Let's Encrypt's staging environment
+        self.acme_staging = True
 
         # Load data from inventory.yml and settings.yml
         if load_data:
@@ -29,8 +47,6 @@ class Cluster:
 
     def load_data(self):
         """Loads cluster data from inventory.yml and settings.yml files"""
-        # Work with the master node from the inventory
-
         with open(self.settings_file, 'r') as stream:
             settings = yaml.safe_load(stream)
             self.ip_address = settings['ip_address']
@@ -42,12 +58,19 @@ class Cluster:
 
         with open(self.inventory_file, 'r') as stream:
             inventory = yaml.safe_load(stream)
+        # Work with the master node from the inventory
         self.hostname = inventory['all']['children']['master']['hosts']
 
-        log.debug("Read data from inventory.yml:\n\thostname: %s", self.hostname)
+        log.debug('Read data from inventory.yml:\n\thostname: %s', self.hostname)
 
     def create_droplet(self, ssh_key_id=0, hostname=None):
-        """Uses the Cosmos API to create a droplet with OAS default spec"""
+        """
+        Uses the Cosmos API to create a droplet with OAS default spec
+
+        :param int ssh_key_id: SSH key ID in Greenhost Cosmos.
+        :param str hostname: hostname of the droplet created at GH.
+            If not provided, a hostname will be auto-generated.
+        """
         if hostname is None:
             # Use random generated ID in case we're not running in
             # gitlab CI and there's no CI_PIPELINE_ID env var
@@ -58,18 +81,21 @@ class Cluster:
         droplet = cosmos.create_droplet(
             name=hostname,
             ssh_key_id=ssh_key_id,
-            region='ams1',
-            size=4096,
-            disk=25,
-            # image: 19 = Debian buster-x64
-            image=19)
+            region=DEFAULT_REGION,
+            size=DEFAULT_MEMORY_SIZE_MB,
+            disk=DEFAULT_DISK_SIZE_GB,
+            image=DEFAULT_IMAGE)
         droplet_id = droplet['droplet']['id']
         log.info('Created droplet id: %s', droplet_id)
         cosmos.wait_for_state(droplet_id, 'running')
         self.set_info_by_droplet_id(droplet_id)
 
     def set_info_by_droplet_id(self, droplet_id):
-        """Sets info about the cluster based on the Greenhost VPS id"""
+        """
+        Sets info about the cluster based on the Greenhost VPS id
+
+        :param int droplet_id: Droplet ID at Greenhost
+        """
         droplet = cosmos.get_droplet(droplet_id)
         self.ip_address = droplet['networks']['v4'][0]['ip_address']
         self.hostname = droplet['name']
@@ -80,15 +106,15 @@ class Cluster:
         ansible.create_inventory(self)
 
         # Create settings
-        with open(os.path.join(ansible.ANSIBLE_PATH, "group_vars",
-                               "all", "settings.yml.example"),
+        with open(os.path.join(ansible.ANSIBLE_PATH, 'group_vars',
+                               'all', 'settings.yml.example'),
                   'r') as stream:
             settings = yaml.safe_load(stream)
 
         settings['ip_address'] = self.ip_address
         settings['domain'] = self.domain
-        settings['admin_email'] = "admin@{0}".format(self.domain)
-        settings['acme_staging'] = True
+        settings['admin_email'] = 'admin@{0}'.format(self.domain)
+        settings['acme_staging'] = self.acme_staging
         settings['cluster_dir'] = self.cluster_dir
 
         with open(self.settings_file, 'w') as stream:
@@ -98,23 +124,22 @@ class Cluster:
 
     def make_cluster_directory(self):
         """Make sure the cluster's file directory exists"""
-        if not os.path.exists(self.cluster_dir):
-            os.makedirs(self.cluster_dir)
+        os.makedirs(self.cluster_dir, exist_ok=True)
 
     @property
     def inventory_file(self):
         """Path to the ansible inventory.yml for this cluster"""
-        return os.path.join(self.cluster_dir, "inventory.yml")
+        return os.path.join(self.cluster_dir, 'inventory.yml')
 
     @property
     def settings_file(self):
         """Path to the ansible settings.yml for this cluster"""
-        return os.path.join(self.cluster_dir, "settings.yml")
+        return os.path.join(self.cluster_dir, 'settings.yml')
 
     @property
     def behave_file(self):
         """Path to 'behave.ini' which is used for acceptance tests"""
-        return os.path.join(self.cluster_dir, "behave.ini")
+        return os.path.join(self.cluster_dir, 'behave.ini')
 
     @property
     def secret_dir(self):
@@ -125,10 +150,9 @@ class Cluster:
         """
         Write behave config file for the cluster.
 
-        Configuration is written to config_path (e.g.
-        /home/you/openappstack/test/behave.ini)
-
-        If config_path already exists, the program is aborted.
+        :param str config_path: Configuration is written to config_path (e.g.
+            /home/you/openappstack/test/behave.ini). If config_path already
+            exists, the program is aborted.
         """
         if os.path.isfile(config_path):
             log.error('%s file already exists, not overwriting '
@@ -169,7 +193,8 @@ class Cluster:
 
     def print_info(self):
         """Writes information about the cluster. Useful for debugging"""
-        print(("""Cluster "{name}":
+        info_string = """
+Cluster "{name}":
   - IP address: {ip_address}
   - Hostname: {hostname}
   - Domain: {domain}
@@ -182,7 +207,8 @@ Kubectl:
 
 To use kubectl with this cluster, copy-paste this in your terminal:
 
-export KUBECONFIG={secret_dir}/kube_config_cluster.yml""").format(
+export KUBECONFIG={secret_dir}/kube_config_cluster.yml"""
+        print(info_string.format(
             name=self.name,
             ip_address=self.ip_address,
             hostname=self.hostname,