Skip to content
Snippets Groups Projects
cluster.py 10.3 KiB
Newer Older
"""Contains code for managing the files related to an OpenAppStack cluster."""
Maarten de Waard's avatar
Maarten de Waard committed
import base64
import sys
Maarten de Waard's avatar
Maarten de Waard committed
from kubernetes import client, config
CLUSTER_PATH = os.path.join(os.getcwd(), 'clusters')

log = logging.getLogger(__name__)  # pylint: disable=invalid-name


"""Current possible RAM/VCPUs combinations:

512 MiB / 1 CPU core
1 GiB / 1 CPU core
2 GiB / 1 CPU core
3 GiB / 2 CPU cores
4 GiB / 2 CPU cores
6 GiB / 3 CPU cores
8 GiB / 4 CPU cores
12 GiB / 6 CPU cores
16 GiB / 8 CPU cores
"""

"""Greenhost region where VPS will be started with create_droplet"""
DEFAULT_REGION = 'ams1'
"""Default disk size"""
DEFAULT_DISK_SIZE_GB = 30
"""Default amount of memory"""
Maarten de Waard's avatar
Maarten de Waard committed
DEFAULT_MEMORY_SIZE_MB = 12288
"""Default "image" (operating system): 19  =  Debian buster-x64 """
DEFAULT_IMAGE = 19

    """
    Helper class for cluster-related paths, files, etc.

    :param str cluster_name: Identifier of the cluster. A folder in
        CLUSTER_PATH will be creaeted with this name.
    :param bool load_data: If this is true, `load_data` function is called at
        the end of the constructor.
    """

    def __init__(self, cluster_name, load_data=False):
        self.name = cluster_name
        self.cluster_dir = os.path.join(CLUSTER_PATH, cluster_name)
        self.ip_address = None
        self.hostname = None
        self.domain = None
        # Set this to False if the data needs to be (re)loaded from file
        self.data_loaded = False
        # Load data from inventory.yml
        # Can be used to use a custom disk image.
        self.disk_image_id = DEFAULT_IMAGE
        self.docker_mirror_server = None
        self.docker_mirror_endpoint = None
        self.docker_mirror_username = None
        self.docker_mirror_password = None
    def load_data(self):
        Loads cluster data from inventory.yml and files
        Set self.data_loaded to False if this function should re-read data
        if not self.data_loaded:
            with open(self.inventory_file, 'r') as stream:
                inventory = yaml.safe_load(stream)
Varac's avatar
Varac committed
                # print(inventory)
                # Work with the master node from the inventory
                self.hostname = inventory['all']['children']['master']['hosts']
                self.domain = \
                        inventory['all']['hosts'][self.hostname]['domain']
                self.ip_address = \
                        inventory['all']['hosts'][self.hostname]['ansible_host']
            log.debug("""Read data from inventory:
                ip address: %s
                domain: %s
                hostname: %s""", self.ip_address, self.domain, self.hostname)
            log.debug('Not loading cluster data from file. Set '
                      'Cluster.data_loaded to False if you want a reload.')
        self.data_loaded = True
    def create_droplet(self, ssh_key_id=0, hostname=None):
        """
        Uses the Cosmos API to create a droplet with OAS default spec

        :param int ssh_key_id: SSH key ID in Greenhost Cosmos.
        :param str hostname: hostname of the droplet created at GH. Defaults to
            the cluster name
            # If hostname is not set use cluster name for it.

        if hostname == "master":
            ram = 12288
        else:
            ram = DEFAULT_MEMORY_SIZE_MB

        droplet = greenhost_cloud.create_droplet(
            ssh_key_id=[ssh_key_id],
            region=DEFAULT_REGION,
            disk=DEFAULT_DISK_SIZE_GB,
            image=self.disk_image_id)
        droplet_id = droplet['droplet']['id']
        log.info('Created droplet id: %s', droplet_id)
        greenhost_cloud.wait_for_state(droplet_id, 'running')
        self.set_info_by_droplet_id(droplet_id)

    def set_info_by_droplet_id(self, droplet_id):
        """
        Sets info about the cluster based on the Greenhost VPS id

        :param int droplet_id: Droplet ID at Greenhost
        """
        droplet = greenhost_cloud.get_droplet(droplet_id)
        self.ip_address = droplet['networks']['v4'][0]['ip_address']
        self.hostname = droplet['name']

    def set_info_by_hostname(self, hostname):
        """
        Sets info based on hostname, assuming that the hostname can be found
        with the Cosmos API
        """
        hostname = r"^{}$".format(hostname)
        droplets = greenhost_cloud.get_droplets_by_name(hostname)
        if droplets == []:
            log.error("Droplet with hostname %s not found", hostname)
            sys.exit(3)
        self.ip_address = droplets[0]['networks']['v4'][0]['ip_address']
        self.hostname = droplets[0]['name']

    def set_info_by_ip_and_hostname(self, ip_address, hostname):
        """
        Sets info based on hostname and IP address provided by the user. No API
        needed
        """
        self.ip_address = ip_address
        self.hostname = hostname

        """Creates an inventory.yml and dotenv file for the cluster"""
Arie Peterson's avatar
Arie Peterson committed
        self.make_cluster_directories()
        dotenv_file = """CLUSTER_NAME={name}
CLUSTER_DIR={cluster_dir}
IP_ADDRESS={ip_address}
HOSTNAME={hostname}
KUBECONFIG={cluster_dir}/kube_config_cluster.yml
"""

        with open(self.dotenv_file, 'w') as stream:
            stream.write(dotenv_file.format(
                name=self.name,
                cluster_dir=self.cluster_dir,
                ip_address=self.ip_address,
                hostname=self.hostname,
                domain=self.domain,
            ))
            log.info("Created %s", self.dotenv_file)

        # Set self.data_loaded to True because the data in the class now
        # reflects the data in the file.
        self.data_loaded = True

Arie Peterson's avatar
Arie Peterson committed
    def make_cluster_directories(self):
        """Make sure the cluster's file directory exists"""
        os.makedirs(self.cluster_dir, exist_ok=True)

    @property
    def inventory_file(self):
        """Path to the ansible inventory.yml for this cluster"""
        return os.path.join(self.cluster_dir, 'inventory.yml')
        """Path to the .cluster.env file with relevant environment variables"""
        return os.path.join(self.cluster_dir, '.cluster.env')
    def dump_secrets(self):
Varac's avatar
Varac committed
        Shows all OAS cluster secrets.
Varac's avatar
Varac committed
        all_secrets = {
            'flux-system': {
                'oas-kube-prometheus-stack-variables': ['grafana_admin_password'],
                'oas-nextcloud-variables': [
                    'nextcloud_mariadb_password',
                    'nextcloud_mariadb_root_password',
                    'nextcloud_password',
                    'onlyoffice_jwt_secret',
                    'onlyoffice_postgresql_password',
                    'onlyoffice_rabbitmq_password'],
                'oas-rocketchat-variables': [
Varac's avatar
Varac committed
                    'rocketchat_admin_password',
                    'mongodb_root_password',
                    'mongodb_password'],
Varac's avatar
Varac committed
                'oas-single-sign-on-variables': [
Varac's avatar
Varac committed
                    'userbackend_admin_username',
                    'userbackend_admin_password',
                    'userbackend_postgres_password',
                    'hydra_system_secret'],
Varac's avatar
Varac committed
                'oas-wordpress-variables': [
Varac's avatar
Varac committed
                    'wordpress_admin_password',
                    'wordpress_mariadb_password',
                    'wordpress_mariadb_root_password']
            },
            'oas': {
                'oas-alertmanager-basic-auth': ['pass'],
                'oas-prometheus-basic-auth': ['pass']
        for namespace, sec in all_secrets.items():
            for app, app_secrets in sec.items():
Varac's avatar
Varac committed
                for app_secret in app_secrets:
                    secret = self.get_password_from_kubernetes(
                        app,
                        app_secret,
                        namespace)
                    print(f'{app}: {app_secret}={secret}')
Maarten de Waard's avatar
Maarten de Waard committed
    def get_password_from_kubernetes(self, secret, key, namespace):
        """
        Reads a password from the Kubernetes cluster. Always returns a string,
        but returns "password not found" if no password was found.

        :param string secret: The name of the secret in the cluster
        :param string key: The key inside the secret that contains the base64
            encoded password
        :param string namespace: The namespace the secret is in
        """
        kubeconfig = os.path.join(self.cluster_dir, 'kube_config_cluster.yml')
Maarten de Waard's avatar
Maarten de Waard committed
        config.load_kube_config(config_file=kubeconfig)
        api = client.CoreV1Api()
        try:
            secret_data = api.read_namespaced_secret(secret, namespace)
        except client.rest.ApiException:
Maarten de Waard's avatar
Maarten de Waard committed
            print(f"Secret {secret} not found in namespace '{namespace}'")
            return "password not found"
        try:
            password = secret_data.data[key]
        except KeyError:
            print(f"Could not get password from secret '{secret}' in namespace"
                  " '{namespace}' with key '{key}'")
Maarten de Waard's avatar
Maarten de Waard committed
            return "password not found"

        return base64.b64decode(password).decode('utf-8')


    def print_info(self, args):
        """Writes information about the cluster. Useful for debugging.

        :param argparse.Namespace args: If the --ip-address argument is given,
            only prints the machine's IP address.
        """
        if args.ip_address:
            print(self.ip_address)
        else:
            info_string = """
    Cluster "{name}":
      - IP address: {ip_address}
      - Hostname: {hostname}
      - Domain: {domain}

    Configuration:
      - Inventory file: {inventory_file}

    Kubectl:

    To use kubectl with this cluster, copy-paste this in your terminal:

    export KUBECONFIG={cluster_dir}/kube_config_cluster.yml"""
            print(info_string.format(
                name=self.name,
                ip_address=self.ip_address,
                hostname=self.hostname,
                domain=self.domain,
                inventory_file=self.inventory_file,
                cluster_dir=self.cluster_dir))