"""Contains code for managing the files related to an OpenAppStack cluster.""" import configparser import logging import os import sys import yaml import greenhost_cloud from openappstack import ansible CLUSTER_PATH = os.path.join(os.getcwd(), 'clusters') log = logging.getLogger(__name__) # pylint: disable=invalid-name """Current possible RAM/VCPUs combinations: 512 MiB / 1 CPU core 1 GiB / 1 CPU core 2 GiB / 1 CPU core 3 GiB / 2 CPU cores 4 GiB / 2 CPU cores 6 GiB / 3 CPU cores 8 GiB / 4 CPU cores 12 GiB / 6 CPU cores 16 GiB / 8 CPU cores """ """Greenhost region where VPS will be started with create_droplet""" DEFAULT_REGION = 'ams1' """Default disk size""" DEFAULT_DISK_SIZE_GB = 30 """Default amount of memory""" DEFAULT_MEMORY_SIZE_MB = 8192 """Default "image" (operating system): 19 = Debian buster-x64 """ DEFAULT_IMAGE = 19 class Cluster: """ Helper class for cluster-related paths, files, etc. :param str cluster_name: Identifier of the cluster. A folder in CLUSTER_PATH will be creaeted with this name. :param bool load_data: If this is true, `load_data` function is called at the end of the constructor. """ def __init__(self, cluster_name, load_data=False): self.name = cluster_name self.cluster_dir = os.path.join(CLUSTER_PATH, cluster_name) self.ip_address = None self.hostname = None self.domain = None # By default, use Let's Encrypt's live environment self.acme_staging = False # By default, let auto-update listen to the main OpenAppStack flux repo # for updates. self.local_flux = False # Set this to False if the data needs to be (re)loaded from file self.data_loaded = False # Load data from inventory.yml and settings.yml if load_data: self.load_data() # Can be used to use a custom disk image. self.disk_image_id = DEFAULT_IMAGE def load_data(self): """ Loads cluster data from inventory.yml and settings.yml files Set self.data_loaded to False if this function should re-read data from file. """ if not self.data_loaded: with open(self.settings_file, 'r') as stream: settings = yaml.safe_load(stream) self.ip_address = settings['ip_address'] self.domain = settings['domain'] log.debug("""Read data from settings.yml: ip address: %s domain: %s""", self.ip_address, self.domain) with open(self.inventory_file, 'r') as stream: inventory = yaml.safe_load(stream) # Work with the master node from the inventory self.hostname = inventory['all']['children']['master']['hosts'] log.debug( 'Read data from inventory.yml:\n\thostname: %s', self.hostname) else: log.debug('Not loading cluster data from file. Set ' 'Cluster.data_loaded to False if you want a reload.') self.data_loaded = True def create_droplet(self, ssh_key_id=0, hostname=None): """ Uses the Cosmos API to create a droplet with OAS default spec :param int ssh_key_id: SSH key ID in Greenhost Cosmos. :param str hostname: hostname of the droplet created at GH. Defaults to the cluster name """ if hostname is None: # If hostname is not set use cluster name for it. hostname = self.name if hostname == "master": ram = 12288 else: ram = DEFAULT_MEMORY_SIZE_MB droplet = greenhost_cloud.create_droplet( name=hostname, ssh_key_id=[ssh_key_id], region=DEFAULT_REGION, size=ram, disk=DEFAULT_DISK_SIZE_GB, image=self.disk_image_id) droplet_id = droplet['droplet']['id'] log.info('Created droplet id: %s', droplet_id) greenhost_cloud.wait_for_state(droplet_id, 'running') self.set_info_by_droplet_id(droplet_id) def set_info_by_droplet_id(self, droplet_id): """ Sets info about the cluster based on the Greenhost VPS id :param int droplet_id: Droplet ID at Greenhost """ droplet = greenhost_cloud.get_droplet(droplet_id) self.ip_address = droplet['networks']['v4'][0]['ip_address'] self.hostname = droplet['name'] def set_info_by_hostname(self, hostname): """ Sets info based on hostname, assuming that the hostname can be found with the Cosmos API """ hostname = r"^{}$".format(hostname) droplets = greenhost_cloud.get_droplets_by_name(hostname) if droplets == []: log.error("Droplet with hostname %s not found", hostname) sys.exit(3) self.ip_address = droplets[0]['networks']['v4'][0]['ip_address'] self.hostname = droplets[0]['name'] def set_info_by_ip_and_hostname(self, ip_address, hostname): """ Sets info based on hostname and IP address provided by the user. No API needed """ self.ip_address = ip_address self.hostname = hostname def write_cluster_files(self): """Creates an inventory.yml and settings.yml file for the cluster""" self.make_cluster_directories() ansible.create_inventory(self) # Create settings with open(os.path.join(ansible.ANSIBLE_PATH, 'group_vars', 'all', 'settings.yml.example'), 'r') as stream: settings = yaml.safe_load(stream) settings['ip_address'] = self.ip_address settings['domain'] = self.domain settings['admin_email'] = 'admin@{0}'.format(self.domain) settings['flux']['local_flux'] = self.local_flux settings['cluster_dir'] = self.cluster_dir # Configure apps to handle invalid certs i.e. from # Letsencrypt staging API settings['acme_staging'] = self.acme_staging nextcloud_extra_values = """ onlyoffice: unauthorizedStorage: true httpsHstsEnabled: false """ if self.acme_staging: settings['nextcloud_extra_values'] = \ yaml.load(nextcloud_extra_values) file_contents = yaml.safe_dump(settings, default_flow_style=False) log.debug(file_contents) # Create CLUSTER_DIR/group_vars/all/ if non-existent vars_dir = os.path.dirname(self.settings_file) if not os.path.exists(vars_dir): os.makedirs(vars_dir) with open(self.settings_file, 'w') as stream: stream.write(file_contents) log.info("Created %s", self.settings_file) dotenv_file = """CLUSTER_NAME={name} CLUSTER_DIR={cluster_dir} IP_ADDRESS={ip_address} HOSTNAME={hostname} FQDN={domain} LOCAL_FLUX={local_flux} KUBECONFIG={secret_dir}/kube_config_cluster.yml """ with open(self.dotenv_file, 'w') as stream: stream.write(dotenv_file.format( name=self.name, cluster_dir=self.cluster_dir, ip_address=self.ip_address, hostname=self.hostname, domain=self.domain, local_flux=self.local_flux, secret_dir=self.secret_dir )) log.info("Created %s", self.dotenv_file) # Set self.data_loaded to True because the data in the class now # reflects the data in the file. self.data_loaded = True def make_cluster_directories(self): """Make sure the cluster's file directory exists""" os.makedirs(self.cluster_dir, exist_ok=True) os.makedirs(self.secret_dir, exist_ok=True) @property def inventory_file(self): """Path to the ansible inventory.yml for this cluster""" return os.path.join(self.cluster_dir, 'inventory.yml') @property def settings_file(self): """Path to the ansible settings.yml for this cluster""" return os.path.join(self.cluster_dir, 'group_vars', 'all', 'settings.yml') @property def dotenv_file(self): """Path to the .cluster.env file with relevant environment variables""" return os.path.join(self.cluster_dir, '.cluster.env') @property def behave_file(self): """Path to 'behave.ini' which is used for acceptance tests""" return os.path.join(self.cluster_dir, 'behave.ini') @property def secret_dir(self): """Path where all the passwords for cluster admins are saved""" return os.path.join(self.cluster_dir, 'secrets') def write_behave_config(self, config_path): """ Write behave config file for the cluster. :param str config_path: Configuration is written to config_path (e.g. /home/you/openappstack/test/behave.ini). If config_path already exists, the program is aborted. """ if os.path.isfile(config_path): log.error('%s file already exists, not overwriting ' 'file! Remove the file if you want to run behave. ' 'Program will exit now', config_path) sys.exit(2) secret_directory = self.secret_dir with open(os.path.join( secret_directory, 'nextcloud_admin_password'), 'r') as stream: nextcloud_admin_password = yaml.safe_load(stream) with open(os.path.join( secret_directory, 'rocketchat_admin_password'), 'r') as stream: rocketchat_admin_password = yaml.safe_load(stream) with open(os.path.join( secret_directory, 'wordpress_admin_password'), 'r') as stream: wordpress_admin_password = yaml.safe_load(stream) with open(os.path.join( secret_directory, 'grafana_admin_password'), 'r') as stream: grafana_admin_password = yaml.safe_load(stream) behave_config = configparser.ConfigParser() behave_config['behave'] = {} behave_config['behave']['format'] = 'rerun' behave_config['behave']['outfiles'] = 'rerun_failing.features' behave_config['behave']['show_skipped'] = 'false' behave_config['behave.userdata'] = {} behave_config['behave.userdata']['nextcloud.url'] = \ 'https://files.{}'.format(self.domain) behave_config['behave.userdata']['nextcloud.username'] = 'admin' behave_config['behave.userdata']['nextcloud.password'] = \ nextcloud_admin_password behave_config['behave.userdata']['onlyoffice.url'] = \ 'https://office.{}/welcome'.format(self.domain) behave_config['behave.userdata']['rocketchat.url'] = \ 'https://chat.{}'.format(self.domain) behave_config['behave.userdata']['rocketchat.username'] = 'admin' behave_config['behave.userdata']['rocketchat.password'] = \ rocketchat_admin_password behave_config['behave.userdata']['wordpress.url'] = \ 'https://www.{}/wp-login.php'.format(self.domain) behave_config['behave.userdata']['wordpress.username'] = 'admin' behave_config['behave.userdata']['wordpress.password'] = \ wordpress_admin_password behave_config['behave.userdata']['grafana.url'] = \ 'https://grafana.{}'.format(self.domain) behave_config['behave.userdata']['grafana.username'] = 'admin' behave_config['behave.userdata']['grafana.password'] = \ grafana_admin_password with open(config_path, 'w') as config_file: behave_config.write(config_file) def print_info(self, args): """Writes information about the cluster. Useful for debugging. :param argparse.Namespace args: If the --ip-address argument is given, only prints the machine's IP address. """ if args.ip_address: print(self.ip_address) else: info_string = """ Cluster "{name}": - IP address: {ip_address} - Hostname: {hostname} - Domain: {domain} Configuration: - Inventory file: {inventory_file} - Settings file: {settings_file} Kubectl: To use kubectl with this cluster, copy-paste this in your terminal: export KUBECONFIG={secret_dir}/kube_config_cluster.yml""" print(info_string.format( name=self.name, ip_address=self.ip_address, hostname=self.hostname, domain=self.domain, inventory_file=self.inventory_file, settings_file=self.settings_file, secret_dir=self.secret_dir))