Skip to content
Snippets Groups Projects
Verified Commit 41f0dc62 authored by Maarten de Waard's avatar Maarten de Waard :angel:
Browse files

first version of a CLI client, not working yet, CI not adjusted either

parent 13cbc4d4
No related branches found
No related tags found
No related merge requests found
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
/test/behave/behave.ini /test/behave/behave.ini
/test/behave/rerun_failing.features /test/behave/rerun_failing.features
/test/cluster_data/ /test/cluster_data/
/clusters
# Ignore files created during tests # Ignore files created during tests
/test/behave/**/screenshots/ /test/behave/**/screenshots/
...@@ -26,6 +27,7 @@ ...@@ -26,6 +27,7 @@
# Etc # Etc
__pycache__ __pycache__
*.swp *.swp
*.pyc
# Documentation files # Documentation files
/docs/_build /docs/_build
...@@ -32,27 +32,39 @@ Install requirements: ...@@ -32,27 +32,39 @@ Install requirements:
""" """
import argparse import argparse
import configparser
import logging import logging
import os import os
import random
import string
import sys import sys
import yaml from openappstack import name, cluster, cosmos, ansible
from openappstack import cosmos
from openappstack import ansible
SETTINGS_FILE = './group_vars/all/settings.yml'
def main(): # pylint: disable=too-many-statements,too-many-branches,too-many-locals def main(): # pylint: disable=too-many-statements,too-many-branches,too-many-locals
"""Do everything.""" """Do everything."""
# Parse command line arguments # Parse command line arguments
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Run bootstrap script' prog=name,
description='Run bootstrap script '
'to deploy Openappstack to a given node.') 'to deploy Openappstack to a given node.')
group = parser.add_mutually_exclusive_group(required=True) parser.add_argument(
'cluster_name',
metavar='CLUSTER_NAME',
type=str,
help="Name of the cluster you want to use openappstack with")
subparsers = parser.add_subparsers(help="Available subcommands")
create_parser = subparsers.add_parser('create', help='Creates a new cluster')
create_parser.set_defaults(func=create)
create_parser.add_argument(
'domain',
metavar="DOMAIN_NAME",
help='Domain name to run OpenAppStack under')
group = create_parser.add_mutually_exclusive_group(required=True)
group.add_argument( group.add_argument(
'--create-droplet', '--create-droplet',
action='store_true', action='store_true',
...@@ -61,162 +73,152 @@ def main(): # pylint: disable=too-many-statements,too-many-branches,too-many-lo ...@@ -61,162 +73,152 @@ def main(): # pylint: disable=too-many-statements,too-many-branches,too-many-lo
'--droplet-id', '--droplet-id',
metavar='ID', metavar='ID',
type=int, type=int,
help='ID of droplet to deploy to') help='ID of existing Greenhost VPS to use')
group.add_argument(
'--use-existing-inventory',
action='store_true',
help='Assumes inventory.yml has already been generated')
parser.add_argument( droplet_creation_group = create_parser.add_argument_group(
'droplet creation',
'When using --create-droplet, you need to provide:')
droplet_creation_group.add_argument(
'--hostname', '--hostname',
type=str, type=str,
help='If called with --create-droplet, this is the machine hostname') help='hostname of the new machine. If not provided, a hostname is generated')
parser.add_argument( droplet_creation_group.add_argument(
'--ssh-key-id', '--ssh-key-id',
metavar='ID', metavar='ID',
type=int, type=int,
help='ID of ssh key to deploy with (default: 411)') help='ID of ssh key to deploy with (default: 411)')
droplet_creation_group.add_argument(
'--create-domain-records',
action='store_true',
help=('Creates DNS entries at Greenhost pointing the subdomain and '
'domain to the cluster.'))
droplet_creation_group.add_argument(
'--subdomain',
type=str,
metavar="SUBDOMAIN",
default="",
help=('Use a custom subdomain for the generated domain records. '
'Defaults to empty string (no subdomain)'))
install_parser = subparsers.add_parser(
'install',
help=("Use this to run the ansible playbook that sets up your VPS to run "
"OpenAppStack. The ansible-playbook process will run in the "
"'{ansible_path}' directory, so do not use relative paths with these "
"arguments.").format(ansible_path=ansible.ANSIBLE_PATH))
install_parser.set_defaults(func=install)
parser.add_argument( parser.add_argument(
'--terminate', '--terminate',
action='store_true', action='store_true',
help='Terminate droplet after deploy (shutdown and delete)') help=('Shutdown and delete droplet identified by VPS identifier after '
'finishing'))
parser.add_argument( parser.add_argument(
'-v',
'--verbose', '--verbose',
action='store_true', action='store_true',
help='Be more verbose') help='Be more verbose')
parser.add_argument( # Ansible related
install_parser.add_argument(
'--ansible-param', '--ansible-param',
metavar=['PARAM[=VALUE]'], metavar=['PARAM[=VALUE]'],
action='append', action='append',
nargs=1, nargs=1,
help=('forward ansible parameters to the ansible-playbook call ' help=('forward ansible parameters to the ansible-playbook call '
'(two dashes are prepended to PARAM)')) '(two dashes are prepended to PARAM)'))
parser.add_argument(
'--run-ansible',
action='store_true',
help='Runs the ansible bootstrap process')
parser.add_argument( test_parser = subparsers.add_parser(
'--create-domain-records', 'test',
type=str, help=("Write test configuration and run tests on your cluster"))
metavar="DOMAIN_NAME", test_parser.set_defaults(func=test)
help='Creates DNS entries for the cluster. Provide with a domain name')
parser.add_argument(
'--domain-record-subdomain',
type=str,
metavar="SUBDOMAIN",
default="",
help=('Use a custom subdomain for the generated domain records. '
'Defaults to empty string (no subdomain)'))
parser.add_argument( test_parser.add_argument(
'--write-behave-config', '--write-behave-config',
action='store_true', action='store_true',
help='Writes a configuration file for behave with cluster information') help='Writes a configuration file for behave with cluster information')
info_parser = subparsers.add_parser(
'info',
help=("Show information about a cluster"))
info_parser.set_defaults(func=info)
args = parser.parse_args() args = parser.parse_args()
verbose = args.verbose loglevel = logging.DEBUG if args.verbose else logging.INFO
loglevel = logging.DEBUG if verbose else logging.INFO
init_logging(log, loglevel) init_logging(log, loglevel)
# Setup logging for cosmos module # Setup logging for cosmos module
log_cosmos = logging.getLogger('cosmos') log_cosmos = logging.getLogger('cosmos')
init_logging(log_cosmos, loglevel) init_logging(log_cosmos, loglevel)
if not args.use_existing_inventory: log.debug("Parsed arguments: %s", str(args))
# Start bootstrapping
if args.create_droplet: clus = cluster.Cluster(args.cluster_name)
if not args.ssh_key_id: args.func(clus, args)
log.error("SSH Key id required when using --create-droplet")
if args.hostname:
hostname = args.hostname
else:
# Use random generated ID in case we're not running in
# gitlab CI and there's no CI_PIPELINE_ID env var
hostname = ''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(10))
log.info('Generated hostname %s', hostname)
droplet = cosmos.create_droplet(
name=hostname,
ssh_key_id=args.ssh_key_id,
region='ams1',
size=8192,
disk=20,
# image: 19 = Debian buster-x64
image=19)
droplet_id = droplet['droplet']['id']
log.info('Created droplet id: %s', droplet_id)
cosmos.wait_for_state(droplet_id, 'running')
else:
droplet_id = args.droplet_id
if verbose:
cosmos.list_droplets()
# Get droplet ip
droplet = cosmos.get_droplet(droplet_id)
droplet_ip = droplet['networks']['v4'][0]['ip_address']
droplet_name = droplet['name']
ansible.create_inventory(droplet_name, droplet_ip)
# Create settings if args.terminate:
with open('../ansible/group_vars/all/settings.yml.example', cosmos.terminate_droplets_by_name(clus.hostname)
'r') as stream:
settings = yaml.safe_load(stream)
settings['ip_address'] = droplet_ip def info(clus, _args):
settings['domain'] = droplet_name + '.ci.openappstack.net' """Shows cluster information and then exits"""
settings['admin_email'] = "admin@{0}".format(settings['domain']) clus.load_data()
settings['acme_staging'] = True clus.print_info()
# Make sure settings file directory exists
settings_file_dir = os.path.dirname(SETTINGS_FILE)
if not os.path.exists(settings_file_dir):
os.makedirs(settings_file_dir)
with open(SETTINGS_FILE, 'w') as stream: def create(clus, args):
yaml.safe_dump(settings, stream, default_flow_style=False) """Parses arguments for the "create" subcommand"""
clus = cluster.Cluster(args.cluster_name)
log.debug(yaml.safe_dump(settings, default_flow_style=False)) if args.create_droplet:
if not args.ssh_key_id:
log.error("SSH Key id required when using --create-droplet")
# Wait for ssh if args.subdomain:
cosmos.wait_for_ssh(droplet_ip) domain = "{subdomain}.{domain}".format(
subdomain=args.subdomain, domain=args.domain)
else: else:
# Work with the master node from the inventory domain = args.domain
with open(ansible.ANSIBLE_INVENTORY, 'r') as stream: clus.domain = domain
inventory = yaml.safe_load(stream) if args.create_droplet:
droplet_name = inventory['all']['children']['master']['hosts'] clus.create_droplet(*args)
droplet_ip = inventory['all']['hosts'][droplet_name]['ansible_host'] if args.verbose:
log.info("Read data from inventory:\n\tname: %s\n\tip: %s", cosmos.list_droplets()
droplet_name, droplet_ip) elif args.droplet_id:
clus.set_info_by_droplet_id(args.droplet_id)
# Write inventory.yml and settings.yml files
clus.write_cluster_files()
# For if write_behave_config is called later: # Wait for ssh
settings = None cosmos.wait_for_ssh(clus.ip_address)
if args.create_domain_records: if args.create_domain_records:
create_domain_records(args.create_domain_records, args.domain_record_subdomain, droplet_ip) create_domain_records(args.domain, args.subdomain, clus.ip_address)
if verbose: if args.verbose:
cosmos.list_domain_records('openappstack.net') cosmos.list_domain_records('openappstack.net')
if args.run_ansible:
ansible.run_ansible('./bootstrap.yml', args.ansible_param) def install(clus, args):
"""Parses arguments for the "install" subcommand"""
clus.load_data()
ansible.run_ansible(
clus,
os.path.join(ansible.ANSIBLE_PATH, 'bootstrap.yml'),
args.ansible_param)
def test(clus, args):
"""Parses arguments for the "test" subcommand"""
clus.load_data()
if args.write_behave_config: if args.write_behave_config:
write_behave_config(settings=settings) clus.write_behave_config()
if args.terminate:
cosmos.terminate_droplets_by_name(droplet_name)
def create_domain_records(domain, subdomain, droplet_ip): def create_domain_records(domain, subdomain, droplet_ip):
""" """
...@@ -234,44 +236,6 @@ def create_domain_records(domain, subdomain, droplet_ip): ...@@ -234,44 +236,6 @@ def create_domain_records(domain, subdomain, droplet_ip):
log.info("Domain record: %s", domain_record) log.info("Domain record: %s", domain_record)
def write_behave_config(settings=None):
"""Write behave config file for later use."""
if settings is None:
with open(SETTINGS_FILE) as stream:
settings = yaml.safe_load(stream)
secret_directory = settings['secret_directory']
with open(os.path.join(
secret_directory, 'nextcloud_admin_password'), 'r') as stream:
nextcloud_admin_password = yaml.safe_load(stream)
with open(os.path.join(
secret_directory, 'grafana_admin_password'), 'r') as stream:
grafana_admin_password = yaml.safe_load(stream)
behave_config = configparser.ConfigParser()
behave_config['behave'] = {}
behave_config['behave']['format'] = 'rerun'
behave_config['behave']['outfiles'] = 'rerun_failing.features'
behave_config['behave']['show_skipped'] = 'false'
behave_config['behave.userdata'] = {}
behave_config['behave.userdata']['nextcloud.url'] = \
'https://files.{}'.format(settings['domain'])
behave_config['behave.userdata']['nextcloud.username'] = 'admin'
behave_config['behave.userdata']['nextcloud.password'] = \
nextcloud_admin_password
behave_config['behave.userdata']['grafana.url'] = \
'https://grafana.{}'.format(settings['domain'])
behave_config['behave.userdata']['grafana.username'] = 'admin'
behave_config['behave.userdata']['grafana.password'] = \
grafana_admin_password
with open('./behave/behave.ini', 'w') as configfile:
behave_config.write(configfile)
def init_logging(logger, loglevel): def init_logging(logger, loglevel):
...@@ -285,6 +249,7 @@ def init_logging(logger, loglevel): ...@@ -285,6 +249,7 @@ def init_logging(logger, loglevel):
stdout = logging.StreamHandler(sys.stdout) stdout = logging.StreamHandler(sys.stdout)
stdout.setLevel(loglevel) stdout.setLevel(loglevel)
stdout.addFilter(lambda record: record.levelno <= logging.INFO) stdout.addFilter(lambda record: record.levelno <= logging.INFO)
logger.info("Set log level to %d", loglevel)
stderr = logging.StreamHandler() stderr = logging.StreamHandler()
stderr.setLevel(logging.WARNING) stderr.setLevel(logging.WARNING)
...@@ -295,6 +260,5 @@ def init_logging(logger, loglevel): ...@@ -295,6 +260,5 @@ def init_logging(logger, loglevel):
if __name__ == "__main__": if __name__ == "__main__":
# Setup logging for this script # Setup logging for this script
log = logging.getLogger(__name__) # pylint: disable=invalid-name log = logging.getLogger("openappstack") # pylint: disable=invalid-name
main() main()
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
Module responsible for running the Ansible part of the OpenAppStack setup. Module responsible for running the Ansible part of the OpenAppStack setup.
""" """
import logging import logging
import os
import shlex import shlex
import subprocess import subprocess
import sys import sys
...@@ -10,14 +11,26 @@ import yaml ...@@ -10,14 +11,26 @@ import yaml
log = logging.getLogger(__name__) # pylint: disable=invalid-name log = logging.getLogger(__name__) # pylint: disable=invalid-name
ANSIBLE_INVENTORY = './inventory.yml' ANSIBLE_INVENTORY = './clusters/{cluster_name}/inventory.yml'
ANSIBLE_PATH = os.path.join(os.path.dirname(__file__),
'..', "ansible")
def run_ansible(playbook, ansible_params): def run_ansible(clus, playbook, ansible_params=None):
"""Call `ansible-playbook` directly to run the specified playbook.""" """
Call `ansible-playbook` in a subprocess to run the specified playbook. Runs
in the package's ansible directory.
Parameters:
- playbook: path to the playbook to run.
- ansible_params: Optionally provide a list of lists with ansible params.
Each inner list may only contain one element. Can be directly forwarded
from argparse.
Example: ansible_params = [[become-user=root], [verbose]]
"""
# playbook path here is relative to private_data_dir/project, see # playbook path here is relative to private_data_dir/project, see
# https://ansible-runner.readthedocs.io/en/latest/intro.html#inputdir # https://ansible-runner.readthedocs.io/en/latest/intro.html#inputdir
ansible_playbook_cmd = 'ansible-playbook %s' % playbook ansible_param_string = ""
if ansible_params: if ansible_params:
for param in ansible_params: for param in ansible_params:
if len(param) > 1: if len(param) > 1:
...@@ -25,39 +38,53 @@ def run_ansible(playbook, ansible_params): ...@@ -25,39 +38,53 @@ def run_ansible(playbook, ansible_params):
'--ansible-param several times to supply ' '--ansible-param several times to supply '
'more than 1 parameter') 'more than 1 parameter')
param = param[0] param = param[0]
ansible_playbook_cmd += ' --' + param ansible_param_string += ' --' + param
ansible_playbook_cmd = \
('ansible-playbook {ansible_param_string} -i {inventory} '
'-e @{settings} {playbook}').format(
ansible_param_string=ansible_param_string,
inventory=clus.inventory_file,
settings=clus.settings_file,
playbook=playbook
)
log.info('Running %s', ansible_playbook_cmd) log.info('Running "%s" in ansible directory "%s"',
ansible_playbook_cmd,
ANSIBLE_PATH)
result = subprocess.run(shlex.split(ansible_playbook_cmd)) process = subprocess.Popen(
shlex.split(ansible_playbook_cmd),
cwd=ANSIBLE_PATH)
if result.returncode > 0: returncode = process.wait()
try:
raise RuntimeError('Playbook failed with rc %s.'
% result.returncode)
except RuntimeError:
traceback.print_exc()
sys.exit(result.returncode)
def create_inventory(droplet_name, droplet_ip): if returncode > 0:
raise RuntimeError('Playbook failed with rc %s.'
% returncode)
def create_inventory(cluster):
""" """
Creates inventory for the ansible playbook. Needs the droplet's hostname Creates inventory for the ansible playbook. Needs the droplet's hostname
for identification and the IP for connecting with Ansible for identification and the IP for connecting with Ansible
""" """
# Create inventory # Create inventory
with open('../ansible/inventory.yml.example', 'r') as stream: with open(os.path.join(ANSIBLE_PATH, "inventory.yml.example"),
'r') as stream:
inventory = yaml.safe_load(stream) inventory = yaml.safe_load(stream)
inventory['all']['hosts'][droplet_name] = \
inventory['all']['hosts'][cluster.hostname] = \
inventory['all']['hosts']['oas-dev'] inventory['all']['hosts']['oas-dev']
del inventory['all']['hosts']['oas-dev'] del inventory['all']['hosts']['oas-dev']
inventory['all']['hosts'][droplet_name]['ansible_host'] = droplet_ip inventory['all']['hosts'][cluster.hostname]['ansible_host'] = \
inventory['all']['hosts'][droplet_name]['ansible_ssh_extra_args'] = \ cluster.ip_address
inventory['all']['hosts'][cluster.hostname]['ansible_ssh_extra_args'] = \
'-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
inventory['all']['children']['master']['hosts'] = droplet_name inventory['all']['children']['master']['hosts'] = cluster.hostname
inventory['all']['children']['worker']['hosts'] = droplet_name inventory['all']['children']['worker']['hosts'] = cluster.hostname
with open(ANSIBLE_INVENTORY, 'w') as stream: with open(cluster.inventory_file, 'w') as stream:
yaml.safe_dump(inventory, stream, default_flow_style=False) yaml.safe_dump(inventory, stream, default_flow_style=False)
log.debug(yaml.safe_dump(inventory, default_flow_style=False)) log.debug(yaml.safe_dump(inventory, default_flow_style=False))
return inventory return inventory
"""Contains code for managing the files related to an OpenAppStack cluster"""
import configparser
import logging
import os
import random
import string
import yaml
from openappstack import ansible, cosmos
CLUSTER_PATH = os.path.join(os.getcwd(), "clusters")
log = logging.getLogger(__name__) # pylint: disable=invalid-name
class Cluster:
"""helper class for cluster-related paths, files, etc."""
def __init__(self, cluster_name, load_data=False):
self.name = cluster_name
self.cluster_dir = os.path.join(CLUSTER_PATH, cluster_name)
self.ip_address = None
self.hostname = None
self.domain = None
# Load data from inventory.yml and settings.yml
if load_data:
self.load_data()
def load_data(self):
"""Loads cluster data from inventory.yml and settings.yml files"""
# Work with the master node from the inventory
with open(self.settings_file, 'r') as stream:
settings = yaml.safe_load(stream)
self.ip_address = settings['ip_address']
self.domain = settings['domain']
log.debug("""Read data from settings.yml:
ip address: %s
domain: %s""", self.ip_address, self.domain)
with open(self.inventory_file, 'r') as stream:
inventory = yaml.safe_load(stream)
self.hostname = inventory['all']['children']['master']['hosts']
log.debug("Read data from inventory.yml:\n\thostname: %s", self.hostname)
def create_droplet(self, ssh_key_id=0, hostname=None):
"""Uses the Cosmos API to create a droplet with OAS default spec"""
if hostname is None:
# Use random generated ID in case we're not running in
# gitlab CI and there's no CI_PIPELINE_ID env var
hostname = ''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(10))
log.info('Generated hostname %s', hostname)
droplet = cosmos.create_droplet(
name=hostname,
ssh_key_id=ssh_key_id,
region='ams1',
size=4096,
disk=25,
# image: 19 = Debian buster-x64
image=19)
droplet_id = droplet['droplet']['id']
log.info('Created droplet id: %s', droplet_id)
cosmos.wait_for_state(droplet_id, 'running')
self.set_info_by_droplet_id(droplet_id)
def set_info_by_droplet_id(self, droplet_id):
"""Sets info about the cluster based on the Greenhost VPS id"""
droplet = cosmos.get_droplet(droplet_id)
self.ip_address = droplet['networks']['v4'][0]['ip_address']
self.hostname = droplet['name']
def write_cluster_files(self):
"""Creates an inventory.yml and settings.yml file for the cluster"""
self.make_cluster_directory()
ansible.create_inventory(self)
# Create settings
with open(os.path.join(ansible.ANSIBLE_PATH, "group_vars",
"all", "settings.yml.example"),
'r') as stream:
settings = yaml.safe_load(stream)
settings['ip_address'] = self.ip_address
settings['domain'] = self.domain
settings['admin_email'] = "admin@{0}".format(self.domain)
settings['acme_staging'] = True
settings['secret_directory'] = self.secret_dir
with open(self.settings_file, 'w') as stream:
yaml.safe_dump(settings, stream, default_flow_style=False)
log.debug(yaml.safe_dump(settings, default_flow_style=False))
def make_cluster_directory(self):
"""Make sure the cluster's file directory exists"""
if not os.path.exists(self.cluster_dir):
os.makedirs(self.cluster_dir)
@property
def inventory_file(self):
"""Path to the ansible inventory.yml for this cluster"""
return os.path.join(self.cluster_dir, "inventory.yml")
@property
def settings_file(self):
"""Path to the ansible settings.yml for this cluster"""
return os.path.join(self.cluster_dir, "settings.yml")
@property
def behave_file(self):
"""Path to 'behave.ini' which is used for acceptance tests"""
return os.path.join(self.cluster_dir, "behave.ini")
@property
def secret_dir(self):
"""Path where all the passwords for cluster admins are saved"""
return os.path.join(self.cluster_dir, 'secrets')
def write_behave_config(self):
"""Write behave config file for the cluster."""
secret_directory = self.secret_dir
with open(os.path.join(
secret_directory, 'nextcloud_admin_password'), 'r') as stream:
nextcloud_admin_password = yaml.safe_load(stream)
with open(os.path.join(
secret_directory, 'grafana_admin_password'), 'r') as stream:
grafana_admin_password = yaml.safe_load(stream)
behave_config = configparser.ConfigParser()
behave_config['behave'] = {}
behave_config['behave']['format'] = 'rerun'
behave_config['behave']['outfiles'] = 'rerun_failing.features'
behave_config['behave']['show_skipped'] = 'false'
behave_config['behave.userdata'] = {}
behave_config['behave.userdata']['nextcloud.url'] = \
'https://files.{}'.format(self.domain)
behave_config['behave.userdata']['nextcloud.username'] = 'admin'
behave_config['behave.userdata']['nextcloud.password'] = \
nextcloud_admin_password
behave_config['behave.userdata']['grafana.url'] = \
'https://grafana.{}'.format(self.domain)
behave_config['behave.userdata']['grafana.username'] = 'admin'
behave_config['behave.userdata']['grafana.password'] = \
grafana_admin_password
with open(self.behave_file, 'w') as configfile:
behave_config.write(configfile)
def print_info(self):
"""Writes information about the cluster. Useful for debugging"""
print(("""Cluster "{name}":
- IP address: {ip_address}
- Hostname: {hostname}
- Domain: {domain}
Configuration:
- Inventory file: {inventory_file}
- Settings file: {settings_file}
- Behave file: {behave_file}""").format(
name=self.name,
ip_address=self.ip_address,
hostname=self.hostname,
domain=self.domain,
inventory_file=self.inventory_file,
settings_file=self.settings_file,
behave_file=self.behave_file))
../ansible/ansible.cfg
\ No newline at end of file
../ansible/bootstrap.yml
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment