diff --git a/.gitignore b/.gitignore index 519ccbc11536d0325eff0fbb31639c1056560db1..88b8e1aba3119de64f1955aab1d6a8182ef66bf7 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,6 @@ # Ignore files created by ansible-playbook *.retry -/ansible/cluster_data/ # Virtualenvs for the project /env @@ -18,7 +17,7 @@ /test/inventory* /test/behave/behave.ini /test/behave/rerun_failing.features -/test/cluster_data/ +/clusters # Ignore files created during tests /test/behave/**/screenshots/ @@ -26,6 +25,7 @@ # Etc __pycache__ *.swp +*.pyc # Documentation files /docs/_build diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 42793d2d09660e9b21b1f93189862e1619306ee2..bdd63d4dac224e1a766afbf4f3f7bccbad31399f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -7,6 +7,14 @@ stages: - cleanup image: "${CI_REGISTRY_IMAGE}/openappstack-ci:${CI_COMMIT_REF_NAME}" +variables: + SSH_KEY_ID: "411" + HOSTNAME: "ci-${CI_PIPELINE_ID}" + # Repeated values, because we're not allowed to use a variable in a variable + SUBDOMAIN: "ci-${CI_PIPELINE_ID}.ci" + DOMAIN: "openappstack.net" + ADDRESS: "ci-${CI_PIPELINE_ID}.ci.openappstack.net" + ANSIBLE_HOST_KEY_CHECKING: "False" ci_test_image: stage: build @@ -17,12 +25,11 @@ ci_test_image: entrypoint: [""] script: - echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json - - /kaniko/executor --context ${CI_PROJECT_DIR}/test --dockerfile ${CI_PROJECT_DIR}/test/Dockerfile --destination $CI_REGISTRY_IMAGE/openappstack-ci:${CI_COMMIT_REF_NAME} + - /kaniko/executor --context ${CI_PROJECT_DIR} --dockerfile ${CI_PROJECT_DIR}/Dockerfile --destination $CI_REGISTRY_IMAGE/openappstack-ci:${CI_COMMIT_REF_NAME} only: changes: - - .gitlab-ci.yml - - test/Dockerfile - - test/requirements.txt + - Dockerfile + - requirements.txt bootstrap: stage: setup-cluster @@ -31,17 +38,15 @@ bootstrap: script: # Ensure test/ is not world-writable otherwise ansible-playbook refuses to run, see # https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir - - chmod 755 test/ - - cd test/ + - chmod 755 ansible/ - eval $(ssh-agent -s) - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null - - ANSIBLE_HOST_KEY_CHECKING=False python3 -u ./ci-bootstrap.py --create-droplet --create-domain-records --run-ansible --ansible-param skip-tags=helmfile + - echo "hostname $HOSTNAME, subdomain $SUBDOMAIN, domain $DOMAIN, address $ADDRESS" + - python3 -m openappstack $HOSTNAME create --create-droplet $DOMAIN --hostname $HOSTNAME --ssh-key-id $SSH_KEY_ID --create-domain-records --subdomain $SUBDOMAIN + - python3 -m openappstack $HOSTNAME install --ansible-param='--skip-tags=helmfile' artifacts: paths: - - test/cluster_data/rke.log - - test/cluster_data/helmfile.log - - test/inventory.yml - - test/group_vars/all/settings.yml + - ./clusters expire_in: 1 month when: always only: @@ -50,6 +55,8 @@ bootstrap: - ansible/**/* - helmfiles/**/* - test/**/* + - openappstack/**/* + - .gitlab-ci.yml install: stage: install-apps @@ -58,16 +65,15 @@ install: script: # Ensure test/ is not world-writable otherwise ansible-playbook refuses to run, see # https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir - - chmod 755 test/ - - cd test/ + - chmod 755 ansible/ - eval $(ssh-agent -s) - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null - - python3 -u ./ci-bootstrap.py --use-existing-inventory --run-ansible --ansible-param tags=helmfile --write-behave-config + - python3 -m openappstack $HOSTNAME install --ansible-param='--tags=helmfile' # Show versions of installed apps/binaries - ansible master -m shell -a 'oas-version-info.sh 2>&1' artifacts: paths: - - test/behave/behave.ini + - ./clusters expire_in: 1 month only: changes: @@ -75,47 +81,50 @@ install: - ansible/**/* - helmfiles/**/* - test/**/* + - openappstack/**/* + - .gitlab-ci.yml testinfra: stage: health-test script: + - mkdir ~/.ssh - eval $(ssh-agent -s) - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null - - cd test/ - - py.test -v -m 'testinfra' --connection=ansible --ansible-inventory=./inventory.yml --hosts='ansible://*' + - echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config + - cd ansible/ + - pytest -v -m 'testinfra' --connection=ansible --ansible-inventory=../clusters/$HOSTNAME/inventory.yml --hosts='ansible://*' only: changes: - .gitlab-ci.yml - ansible/**/* - helmfiles/**/* - test/**/* + - openappstack/**/* + - .gitlab-ci.yml certs: stage: health-test - variables: - OAS_DOMAIN: 'ci-${CI_PIPELINE_ID}.ci.openappstack.net' allow_failure: true script: + - mkdir ~/.ssh - eval $(ssh-agent -s) - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null - - cd test/ - - py.test -s -m 'certs' --connection=ansible --ansible-inventory=./inventory.yml --hosts='ansible://*' + - echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config + - cd ansible/ + - pytest -s -m 'certs' --connection=ansible --ansible-inventory=../clusters/$HOSTNAME/inventory.yml --hosts='ansible://*' only: changes: - .gitlab-ci.yml - ansible/**/* - helmfiles/**/* - test/**/* + - openappstack/**/* + - .gitlab-ci.yml behave-nextcloud: stage: integration-test script: - # Debug failing CI caches which happened in the past - - find . -name behave.ini - - ls -al test/behave - - grep -v 'password' test/behave/behave.ini - - cd test/behave/ - - behave -D headless=True -t nextcloud || behave -D headless=True @rerun_failing.features -t nextcloud + - python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags nextcloud || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags nextcloud artifacts: paths: - test/behave/screenshots/ @@ -128,16 +137,13 @@ behave-nextcloud: - ansible/**/* - helmfiles/**/* - test/**/* + - openappstack/**/* + - .gitlab-ci.yml behave-grafana: stage: integration-test script: - # Debug failing CI caches which happened in the past - - find . -name behave.ini - - ls -al test/behave - - grep -v 'password' test/behave/behave.ini - - cd test/behave/ - - behave -D headless=True -t grafana || behave -D headless=True @rerun_failing.features -t grafana + - python3 -m openappstack $HOSTNAME test --behave-headless --behave-tags grafana || python3 -m openappstack $HOSTNAME test --behave-headless --behave-rerun-failing --behave-tags grafana artifacts: paths: - test/behave/screenshots/ @@ -149,13 +155,14 @@ behave-grafana: - ansible/**/* - helmfiles/**/* - test/**/* + - openappstack/**/* + - .gitlab-ci.yml terminate: stage: cleanup script: # Remove droplet after successful tests - - cd test/ - - echo "$CI_COMMIT_MESSAGE" | grep '!ci_dont_terminate' && echo 'Termination of droplet disabled in commit message.' || python3 -u ./ci-bootstrap.py --use-existing-inventory --terminate + - echo "$CI_COMMIT_MESSAGE" | grep '!ci_dont_terminate' && echo 'Termination of droplet disabled in commit message.' || python3 -m openappstack $HOSTNAME --terminate # Remove droplet older than 2 days - python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^ci-\", 2)" only: @@ -164,6 +171,8 @@ terminate: - ansible/**/* - helmfiles/**/* - test/**/* + - openappstack/**/* + - .gitlab-ci.yml # This trivial job works around a Gitlab bug: if no job runs at all due to # `only`, Gitlab gets confused and doesn't allow you to merge the MR: @@ -171,4 +180,4 @@ terminate: gitlab-merge-workaround: stage: cleanup script: - - echo "That went well." + - echo "That went well" diff --git a/test/Dockerfile b/Dockerfile similarity index 89% rename from test/Dockerfile rename to Dockerfile index db990ab4fafa440a51721ec63236f37712cdccda..5c818f390215af99c1abdef9c66fc0a563bbfbc0 100644 --- a/test/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ FROM alpine:3.9 -LABEL name="Openappstack bootstrap CI test image" +LABEL name="OpenAppStack management" LABEL version="4.0" LABEL vendor1="Greenhost" diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index 4b23ebb7d69bdf9fcfeaa39dfb1dc6e58106c1e6..6d10434b131fe62383bd8f2cd89128a5b1cb4561 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -16,6 +16,12 @@ msg: > "Release name ({{ release_name }}) should start with lower case." + - name: cluster_dir variable is needed + assert: + that: "cluster_dir is defined" + msg: > + "Please define the variable `cluster_dir`." + - name: Configure all hosts hosts: all # We use `mitogen_linear` as default strategy. However, diff --git a/ansible/group_vars/all/oas.yml b/ansible/group_vars/all/oas.yml index 9acf99a0c6b702db749f178f9939b16aa0ee3ba1..4f6f0a416cbf01736cc7ae21731521fbbaf73dbf 100644 --- a/ansible/group_vars/all/oas.yml +++ b/ansible/group_vars/all/oas.yml @@ -8,13 +8,18 @@ log_directory: "/var/log/OpenAppStack" # Use python3 on cluster nodes for ansible ansible_python_interpreter: "/usr/bin/env python3" -# Nextcloud administrator password. If you do not change this value, it gets -# generated and stored in `{{ secret_directory }}/nextcloud_admin_password`. -# You can also choose your own password and fill it in here instead. -nextcloud_password: "{{ lookup('password', '{{ secret_directory }}/nextcloud_admin_password chars=ascii_letters') }}" -nextcloud_mariadb_password: "{{ lookup('password', '{{ secret_directory }}/nextcloud_mariadb_password chars=ascii_letters') }}" -nextcloud_mariadb_root_password: "{{ lookup('password', '{{ secret_directory }}/nextcloud_mariadb_root_password chars=ascii_letters') }}" -grafana_admin_password: "{{ lookup('password', '{{ secret_directory }}/grafana_admin_password chars=ascii_letters') }}" +# Application passwords. If you do not change the default values, they get +# generated and stored in the `clusters/CLUSTERNAME/secrets/` directory. +# You can also choose your own passwords and fill it in here instead. + +# Nextcloud administrator password +nextcloud_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/nextcloud_admin_password chars=ascii_letters') }}" +# Nextcloud mariadb password for nextcloud db +nextcloud_mariadb_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/nextcloud_mariadb_password chars=ascii_letters') }}" +# Nextcloud mariadb root password +nextcloud_mariadb_root_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/nextcloud_mariadb_root_password chars=ascii_letters') }}" +# Grafana administrator password +grafana_admin_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/grafana_admin_password chars=ascii_letters') }}" # Kubernetes version kubernetes_version: "v1.14.3-rancher1-1" diff --git a/ansible/group_vars/all/settings.yml.example b/ansible/group_vars/all/settings.yml.example index 6e84e7b5fa8624a4921a0a04b6ae86194c960b49..b5e08491e872f38a286ed35c61ddf4b7b4f4c0fe 100644 --- a/ansible/group_vars/all/settings.yml.example +++ b/ansible/group_vars/all/settings.yml.example @@ -12,8 +12,6 @@ release_name: "test" # `true` when you are testing something. # Important: Don't quote this variable ! acme_staging: false -# Directory where we save all the passwords etc. -secret_directory: "./cluster_data/secrets" # Which apps to install from the helmfile.d/ dir helmfiles: - 00-storage diff --git a/ansible/pytest b/ansible/pytest new file mode 120000 index 0000000000000000000000000000000000000000..f8833a455ace67ab8a260440fb7acc93555f64a4 --- /dev/null +++ b/ansible/pytest @@ -0,0 +1 @@ +../test/pytest \ No newline at end of file diff --git a/ansible/pytest.ini b/ansible/pytest.ini new file mode 120000 index 0000000000000000000000000000000000000000..26fb6a77a75156deb6ac6dbe7d9300bf257ffd43 --- /dev/null +++ b/ansible/pytest.ini @@ -0,0 +1 @@ +../test/pytest.ini \ No newline at end of file diff --git a/ansible/roles/finalize/tasks/sync_cluster_data.yml b/ansible/roles/finalize/tasks/sync_cluster_data.yml index 33ea808939cb0043951a791cac18143f53ad9e82..523b38a933ca02771beb4088786bff8313646454 100644 --- a/ansible/roles/finalize/tasks/sync_cluster_data.yml +++ b/ansible/roles/finalize/tasks/sync_cluster_data.yml @@ -11,10 +11,10 @@ flat: yes loop: - src: "{{ data_directory }}/rke/kube_config_cluster.yml" - dest: "{{ secret_directory }}/kube_config_cluster.yml" + dest: "{{ cluster_dir }}/secrets/kube_config_cluster.yml" - src: "{{ log_directory }}/rke.log" - dest: cluster_data/rke.log + dest: "{{ cluster_dir }}/cluster_data/rke.log" - src: "{{ data_directory }}/rke/cluster.yml" - dest: cluster_data/rke_cluster.yml + dest: "{{ cluster_dir }}/cluster_data/rke_cluster.yml" - src: "{{ log_directory }}/helmfile.log" - dest: cluster_data/helmfile.log + dest: "{{ cluster_dir }}/cluster_data/helmfile.log" diff --git a/openappstack/__init__.py b/openappstack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a85c322e6af4271383833a7a63dc8bf43c760de4 --- /dev/null +++ b/openappstack/__init__.py @@ -0,0 +1 @@ +name='openappstack' diff --git a/openappstack/__main__.py b/openappstack/__main__.py new file mode 100755 index 0000000000000000000000000000000000000000..887b1298d6d6a611e903af1cd0b2ade5646c51bc --- /dev/null +++ b/openappstack/__main__.py @@ -0,0 +1,360 @@ +#!/usr/bin/env python3 +r""" +Used by CI to bootstrap a new cluster and run tests. + +Env vars needed: +- COSMOS_API_TOKEN + +Env vars optional: +- NO_TERMINATE_DROPLETS: list of droplet ids that should not be + removed when deleting droplets. + +Install requirements: + +- On Alpine, install with: + + apk --no-cache add python3-dev build-base libffi-dev linux-headers \ + openssl-dev openssh-client + pip3 install -r requirements.txt +""" + +import argparse +import logging +import os +import sys +from behave.__main__ import main as behave_main +from openappstack import name, cluster, cosmos, ansible + +ALL_TESTS = ['behave'] + + +def main(): # pylint: disable=too-many-statements,too-many-branches,too-many-locals + """ + Parse arguments and start program. Depending on the arguments parsed, + one of three functions is called: + + - create, responsible for setting up VPSs and their local configuration + files + - install, responsible for setting up Kubernetes and OpenAppStack on those + VPSs + - test, responsible for testing if all the setup steps worked. + + Run python3 -m openappstack --help for more information. + """ + # Parse command line arguments + parser = argparse.ArgumentParser( + prog=name, + description='Run bootstrap script ' + 'to deploy Openappstack to a given node.') + + parser.add_argument( + 'cluster_name', + metavar='CLUSTER_NAME', + type=str, + help='Name of the cluster you want to use openappstack with') + + subparsers = parser.add_subparsers(help='Available subcommands') + + create_parser = subparsers.add_parser('create', help='Creates a new cluster') + create_parser.set_defaults(func=create) + + create_parser.add_argument( + 'domain', + metavar='DOMAIN_NAME', + help='Domain name to run OpenAppStack under') + + group = create_parser.add_mutually_exclusive_group(required=True) + + group.add_argument( + '--create-droplet', + action='store_true', + help='Create droplet automatically') + group.add_argument( + '--droplet-id', + metavar='ID', + type=int, + help='ID of existing Greenhost VPS to use') + + droplet_creation_group = create_parser.add_argument_group( + 'droplet creation', + 'When using --create-droplet, you need to provide:') + droplet_creation_group.add_argument( + '--hostname', + type=str, + help='hostname of the new machine. If not provided, a hostname is generated') + + droplet_creation_group.add_argument( + '--ssh-key-id', + metavar='ID', + type=int, + help='Greenhost-specific database ID of ssh key to deploy with') + + droplet_creation_group.add_argument( + '--create-domain-records', + action='store_true', + help=('Creates DNS entries at Greenhost pointing the subdomain and ' + 'domain to the cluster.')) + + droplet_creation_group.add_argument( + '--subdomain', + type=str, + metavar='SUBDOMAIN', + help=('Use a custom subdomain for the generated domain records. ' + 'Defaults to no subdomain')) + + droplet_creation_group.add_argument( + '--acme-live-environment', + action='store_true', + help=("Use this for production clusters. Uses live Let's Encrypt " + 'environment instead of staging')) + + install_parser = subparsers.add_parser( + 'install', + help=("Use this to run the ansible playbook that sets up your VPS to run " + "OpenAppStack. The ansible-playbook process will run in the " + "'{ansible_path}' directory, so do not use relative paths with these " + "arguments.").format(ansible_path=ansible.ANSIBLE_PATH)) + install_parser.set_defaults(func=install) + + parser.add_argument( + '--terminate-droplet', + action='store_true', + help=('Shutdown AND DELETE droplet identified by VPS identifier after ' + 'finishing')) + + parser.add_argument( + '-v', + '--verbose', + action='store_true', + help='Be more verbose') + + # Ansible related + install_parser.add_argument( + '--ansible-param', + metavar=['PARAM[=VALUE]'], + action='append', + nargs=1, + help='forward ansible parameters to the ansible-playbook call') + + test_parser = subparsers.add_parser( + 'test', + help=("Write test configuration and run tests on your cluster")) + + test_parser.set_defaults(func=test) + + test_parser.add_argument( + '--run-test', + action='append', + help=('Run only a specific kind of test. If not provided, all tests ' + 'are run. The currently available tests are: ') \ + + ','.join(ALL_TESTS)) + + test_parser.add_argument( + '--behave-rerun-failing', + action='store_true', + help=('Run behave with @rerun_failing.features')) + test_parser.add_argument( + '--behave-tags', + nargs='+', + default=[], + help=('Only run behave tests with these tags')) + test_parser.add_argument( + '--behave-headless', + action='store_true', + help=('Run behave in headless mode')) + + info_parser = subparsers.add_parser( + 'info', + help=("Show information about a cluster")) + + info_parser.set_defaults(func=info) + + args = parser.parse_args() + loglevel = logging.DEBUG if args.verbose else logging.INFO + init_logging(log, loglevel) + + # Setup logging for cosmos module + log_cosmos = logging.getLogger('cosmos') + init_logging(log_cosmos, loglevel) + + log.debug("Parsed arguments: %s", str(args)) + + clus = cluster.Cluster(args.cluster_name) + + if hasattr(args, 'func'): + args.func(clus, args) + + if args.terminate_droplet: + # In case none of the subparser's functions have been called, load data + clus.load_data() + cosmos.terminate_droplets_by_name('^{}$'.format(clus.hostname)) + + if not hasattr(args, 'func') and not args.terminate_droplet: + parser.print_help() + sys.exit(1) + +def info(clus, _args): + """ + Shows cluster information and then exits + + :param cluster.Cluster clus: cluster to show information about + :param argparse.Namespace _args: ignored, added for compatibility with + create, install and test functions. + """ + clus.load_data() + clus.print_info() + + +def create(clus, args): + """ + Parses arguments for the 'create' subcommand + + :param cluster.Cluster clus: Cluster object to edit + :param argparse.Namespace args: Command line arguments + """ + clus = cluster.Cluster(args.cluster_name) + + if args.create_droplet: + if not args.ssh_key_id: + log.error('--ssh-key-id required when using --create-droplet') + sys.exit(1) + + if args.subdomain: + domain = '{subdomain}.{domain}'.format( + subdomain=args.subdomain, domain=args.domain) + else: + domain = args.domain + clus.domain = domain + + # Set acme_staging to False so we use Let's Encrypt's live environment + if args.acme_live_environment: + clus.acme_staging = False + if args.create_droplet: + clus.create_droplet(ssh_key_id=args.ssh_key_id, hostname=args.hostname) + if args.verbose: + cosmos.list_droplets() + # Wait for ssh + cosmos.wait_for_ssh(clus.ip_address) + elif args.droplet_id: + clus.set_info_by_droplet_id(args.droplet_id) + + # Write inventory.yml and settings.yml files + clus.write_cluster_files() + + if args.create_domain_records: + create_domain_records( + args.domain, clus.ip_address, subdomain=args.subdomain) + if args.verbose: + cosmos.list_domain_records(args.domain) + + +def install(clus, args): + """ + Parses arguments for the 'install' subcommand + + :param cluster.Cluster clus: Cluster object to install OAS to + :param argparse.Namespace args: Command line arguments + """ + clus.load_data() + ansible.run_ansible( + clus, + os.path.join(ansible.ANSIBLE_PATH, 'bootstrap.yml'), + args.ansible_param) + + +def test(clus, args): + """ + Runs behave or testinfra test. Overwrites behave_path/behave.ini! + + :param cluster.Cluster clus: Cluster object to run tests on + :param argparse.Namespace args: Command line arguments + """ + + # At the moment we only have one type if test, but more tests will be added + # to this in the future. If args.run_test is empty, we run all the tests + run_test = args.run_test + if not run_test: + run_test = ALL_TESTS + + if 'behave' in run_test: + behave_path = os.path.join(os.path.dirname(__file__), '..', 'test', + 'behave') + # Run from the behave directory so behave automatically loads all the + # necessary files + os.chdir(behave_path) + clus.load_data() + behave_ini = os.path.join(behave_path, 'behave.ini') + clus.write_behave_config(behave_ini) + command = [] + if args.behave_rerun_failing: + command.append('@rerun_failing.features') + if args.behave_headless: + command.append('-D headless=True') + for tag in args.behave_tags: + log.info(command) + command.append('-t {tag}'.format(tag=tag)) + log.info('Running behave command %s', command) + behave_main(command) + + # Remove behave.ini so we don't leave secrets hanging around. + os.remove(behave_ini) + + +def create_domain_records(domain, droplet_ip, subdomain=None): + """ + Creates 2 domain records at Greenhost. An A record at subdomain.domain, + pointing to droplet_ip and a CNAME record pointing *.subdomain.domain to + the first record. + + :param str domain: the domain the cluster will be hosted on. + :param str droplet_ip: The IP address the A record will point to. + :param subdomain: Optional subdomain to host OAS on. + :type subdomain: str or None + """ + subdomain_arg = subdomain + if subdomain is None: + subdomain_arg = "@" + + domain_record = cosmos.create_domain_record( + domain=domain, name=subdomain_arg, + data=droplet_ip, record_type='A', update=True) + log.info('Domain record: %s', domain_record) + + subdomain_arg = '*' + if subdomain is not None: + subdomain_arg += '.' + subdomain + + domain_record = cosmos.create_domain_record( + domain=domain, name=subdomain_arg, + data=subdomain, record_type='CNAME', update=True) + log.info('Domain record: %s', domain_record) + + +def init_logging(logger, loglevel): + """ + Configure logging. + + - debug and info go to stdout + - warning and above go to stderr + + :param logger: Logger to initialise + :param int loglevel: Log level from 0 to 50 (use logging.WARNING, + logging.INFO, etc.) + """ + logger.setLevel(loglevel) + stdout = logging.StreamHandler(sys.stdout) + stdout.setLevel(loglevel) + stdout.addFilter(lambda record: record.levelno <= logging.INFO) + logger.info('Set log level to %d', loglevel) + + stderr = logging.StreamHandler() + stderr.setLevel(logging.WARNING) + + logger.addHandler(stdout) + logger.addHandler(stderr) + + +if __name__ == '__main__': + # Setup logging for this script + log = logging.getLogger('openappstack') # pylint: disable=invalid-name + main() diff --git a/openappstack/ansible.py b/openappstack/ansible.py new file mode 100644 index 0000000000000000000000000000000000000000..ff954249a5aec02f2471681af9a52312ff7ebefc --- /dev/null +++ b/openappstack/ansible.py @@ -0,0 +1,83 @@ +""" +Module responsible for running the Ansible part of the OpenAppStack setup. +""" +import logging +import os +import subprocess +import yaml + +log = logging.getLogger(__name__) # pylint: disable=invalid-name + +ANSIBLE_INVENTORY = './clusters/{cluster_name}/inventory.yml' +ANSIBLE_PATH = os.path.join(os.path.dirname(__file__), + '..', 'ansible') + +def run_ansible(clus, playbook, ansible_params=None): + """ + Call `ansible-playbook` in a subprocess to run the specified playbook. Runs + in the package's ansible directory. + + :param str playbook: path to the playbook to run. + :param list ansible_params: Optionally provide a list of lists with ansible + params. Each inner list may only contain one element. Can be directly + forwarded from argparse. Example: + `ansible_params = [[become-user=root], [verbose]]` + """ + # playbook path here is relative to private_data_dir/project, see + # https://ansible-runner.readthedocs.io/en/latest/intro.html#inputdir + ansible_playbook_command = ['ansible-playbook'] + if ansible_params: + for param in ansible_params: + if len(param) > 1: + log.warning('More than 1 parameter. Ignoring the rest! Use ' + '--ansible-param several times to supply ' + 'more than 1 parameter') + param = param[0] + ansible_playbook_command.append(param) + ansible_playbook_command += \ + ['-e', 'cluster_dir=' + clus.cluster_dir] + + ansible_playbook_command += \ + ['-i', clus.inventory_file, '-e', '@' + clus.settings_file, playbook] + + log.info('Running "%s" in ansible directory "%s"', + ansible_playbook_command, + ANSIBLE_PATH) + + process = subprocess.Popen( + ansible_playbook_command, + cwd=ANSIBLE_PATH) + + returncode = process.wait() + + if returncode > 0: + raise RuntimeError('Playbook failed with rc %s.' + % returncode) + +def create_inventory(cluster): + """ + Creates inventory for the ansible playbook. Needs the droplet's hostname + for identification and the IP for connecting with Ansible + + :param cluster.Cluster cluster: Cluster object to for which inventory file + will be written. Used for getting hostname and IP address. + """ + # Create inventory + with open(os.path.join(ANSIBLE_PATH, 'inventory.yml.example'), + 'r') as stream: + inventory = yaml.safe_load(stream) + + inventory['all']['hosts'][cluster.hostname] = \ + inventory['all']['hosts']['oas-dev'] + del inventory['all']['hosts']['oas-dev'] + + inventory['all']['hosts'][cluster.hostname]['ansible_host'] = \ + cluster.ip_address + inventory['all']['children']['master']['hosts'] = cluster.hostname + inventory['all']['children']['worker']['hosts'] = cluster.hostname + + file_contents = yaml.safe_dump(inventory, default_flow_style=False) + log.debug(file_contents) + with open(cluster.inventory_file, 'w') as stream: + stream.write(file_contents) + return inventory diff --git a/openappstack/cluster.py b/openappstack/cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..7bf626fc5c7efcc2951ef22d7b5eec0b7e737fc8 --- /dev/null +++ b/openappstack/cluster.py @@ -0,0 +1,233 @@ +"""Contains code for managing the files related to an OpenAppStack cluster""" + +import configparser +import logging +import os +import random +import string +import sys +import yaml +from openappstack import ansible, cosmos + +CLUSTER_PATH = os.path.join(os.getcwd(), 'clusters') + +log = logging.getLogger(__name__) # pylint: disable=invalid-name + +"""Greenhost region where VPS will be started with create_droplet""" +DEFAULT_REGION = 'ams1' +"""Default disk size""" +DEFAULT_DISK_SIZE_GB = 25 +"""Default amount of memory""" +DEFAULT_MEMORY_SIZE_MB = 6144 +"""Default "image" (operating system): 19 = Debian buster-x64 """ +DEFAULT_IMAGE = 19 + +class Cluster: + """ + Helper class for cluster-related paths, files, etc. + + :param str cluster_name: Identifier of the cluster. A folder in + CLUSTER_PATH will be creaeted with this name. + :param bool load_data: If this is true, `load_data` function is called at + the end of the constructor. + """ + + def __init__(self, cluster_name, load_data=False): + self.name = cluster_name + self.cluster_dir = os.path.join(CLUSTER_PATH, cluster_name) + self.ip_address = None + self.hostname = None + self.domain = None + # By default, use Let's Encrypt's staging environment + self.acme_staging = True + # Set this to False if the data needs to be (re)loaded from file + self.data_loaded = False + # Load data from inventory.yml and settings.yml + if load_data: + self.load_data() + + def load_data(self): + """ + Loads cluster data from inventory.yml and settings.yml files + + Set self.data_loaded to False if this function should re-read data + from file. + """ + if not self.data_loaded: + with open(self.settings_file, 'r') as stream: + settings = yaml.safe_load(stream) + self.ip_address = settings['ip_address'] + self.domain = settings['domain'] + + log.debug("""Read data from settings.yml: + ip address: %s + domain: %s""", self.ip_address, self.domain) + + with open(self.inventory_file, 'r') as stream: + inventory = yaml.safe_load(stream) + # Work with the master node from the inventory + self.hostname = inventory['all']['children']['master']['hosts'] + + log.debug('Read data from inventory.yml:\n\thostname: %s', self.hostname) + else: + log.debug('Not loading cluster data from file. Set ' + 'Cluster.data_loaded to False if you want a reload.') + self.data_loaded = True + + def create_droplet(self, ssh_key_id=0, hostname=None): + """ + Uses the Cosmos API to create a droplet with OAS default spec + + :param int ssh_key_id: SSH key ID in Greenhost Cosmos. + :param str hostname: hostname of the droplet created at GH. + If not provided, a hostname will be auto-generated. + """ + if hostname is None: + # Use random generated ID in case we're not running in + # gitlab CI and there's no CI_PIPELINE_ID env var + hostname = ''.join( + random.choice(string.ascii_lowercase + string.digits) + for _ in range(10)) + log.info('Generated hostname %s', hostname) + droplet = cosmos.create_droplet( + name=hostname, + ssh_key_id=ssh_key_id, + region=DEFAULT_REGION, + size=DEFAULT_MEMORY_SIZE_MB, + disk=DEFAULT_DISK_SIZE_GB, + image=DEFAULT_IMAGE) + droplet_id = droplet['droplet']['id'] + log.info('Created droplet id: %s', droplet_id) + cosmos.wait_for_state(droplet_id, 'running') + self.set_info_by_droplet_id(droplet_id) + + def set_info_by_droplet_id(self, droplet_id): + """ + Sets info about the cluster based on the Greenhost VPS id + + :param int droplet_id: Droplet ID at Greenhost + """ + droplet = cosmos.get_droplet(droplet_id) + self.ip_address = droplet['networks']['v4'][0]['ip_address'] + self.hostname = droplet['name'] + + def write_cluster_files(self): + """Creates an inventory.yml and settings.yml file for the cluster""" + self.make_cluster_directory() + ansible.create_inventory(self) + + # Create settings + with open(os.path.join(ansible.ANSIBLE_PATH, 'group_vars', + 'all', 'settings.yml.example'), + 'r') as stream: + settings = yaml.safe_load(stream) + + settings['ip_address'] = self.ip_address + settings['domain'] = self.domain + settings['admin_email'] = 'admin@{0}'.format(self.domain) + settings['acme_staging'] = self.acme_staging + settings['cluster_dir'] = self.cluster_dir + + file_contents = yaml.safe_dump(settings, default_flow_style=False) + log.debug(file_contents) + with open(self.settings_file, 'w') as stream: + stream.write(file_contents) + + # Set self.data_loaded to True because the data in the class now + # reflects the data in the file. + self.data_loaded = True + + def make_cluster_directory(self): + """Make sure the cluster's file directory exists""" + os.makedirs(self.cluster_dir, exist_ok=True) + + @property + def inventory_file(self): + """Path to the ansible inventory.yml for this cluster""" + return os.path.join(self.cluster_dir, 'inventory.yml') + + @property + def settings_file(self): + """Path to the ansible settings.yml for this cluster""" + return os.path.join(self.cluster_dir, 'settings.yml') + + @property + def behave_file(self): + """Path to 'behave.ini' which is used for acceptance tests""" + return os.path.join(self.cluster_dir, 'behave.ini') + + @property + def secret_dir(self): + """Path where all the passwords for cluster admins are saved""" + return os.path.join(self.cluster_dir, 'secrets') + + def write_behave_config(self, config_path): + """ + Write behave config file for the cluster. + + :param str config_path: Configuration is written to config_path (e.g. + /home/you/openappstack/test/behave.ini). If config_path already + exists, the program is aborted. + """ + if os.path.isfile(config_path): + log.error('%s file already exists, not overwriting ' + 'file! Remove the file if you want to run behave. ' + 'Program will exit now', config_path) + sys.exit(2) + secret_directory = self.secret_dir + with open(os.path.join( + secret_directory, 'nextcloud_admin_password'), 'r') as stream: + nextcloud_admin_password = yaml.safe_load(stream) + + with open(os.path.join( + secret_directory, 'grafana_admin_password'), 'r') as stream: + grafana_admin_password = yaml.safe_load(stream) + + behave_config = configparser.ConfigParser() + behave_config['behave'] = {} + behave_config['behave']['format'] = 'rerun' + behave_config['behave']['outfiles'] = 'rerun_failing.features' + behave_config['behave']['show_skipped'] = 'false' + + behave_config['behave.userdata'] = {} + + behave_config['behave.userdata']['nextcloud.url'] = \ + 'https://files.{}'.format(self.domain) + behave_config['behave.userdata']['nextcloud.username'] = 'admin' + behave_config['behave.userdata']['nextcloud.password'] = \ + nextcloud_admin_password + + behave_config['behave.userdata']['grafana.url'] = \ + 'https://grafana.{}'.format(self.domain) + behave_config['behave.userdata']['grafana.username'] = 'admin' + behave_config['behave.userdata']['grafana.password'] = \ + grafana_admin_password + + with open(config_path, 'w') as config_file: + behave_config.write(config_file) + + def print_info(self): + """Writes information about the cluster. Useful for debugging""" + info_string = """ +Cluster "{name}": + - IP address: {ip_address} + - Hostname: {hostname} + - Domain: {domain} + +Configuration: + - Inventory file: {inventory_file} + - Settings file: {settings_file} + +Kubectl: + +To use kubectl with this cluster, copy-paste this in your terminal: + +export KUBECONFIG={secret_dir}/kube_config_cluster.yml""" + print(info_string.format( + name=self.name, + ip_address=self.ip_address, + hostname=self.hostname, + domain=self.domain, + inventory_file=self.inventory_file, + settings_file=self.settings_file, + secret_dir=self.secret_dir)) diff --git a/openappstack/cosmos.py b/openappstack/cosmos.py new file mode 100755 index 0000000000000000000000000000000000000000..fc69c25113f2453a3dbb27589b45f499a8307230 --- /dev/null +++ b/openappstack/cosmos.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python3 +"""Python module with helper functions to use the cosmos API.""" + +import json +from datetime import datetime +from datetime import timedelta +import logging +import os +import re +import socket +from time import sleep + +import requests +from tabulate import tabulate +from pytz import timezone + +# Helper functions +def request_api(resource: str, request_type: str = 'GET', + data: str = ''): + """Query the cosmos API.""" + if 'COSMOS_API_TOKEN' in os.environ: + api_token = os.environ['COSMOS_API_TOKEN'] + else: + raise ValueError('Please export the COSMOS_API_TOKEN ' + 'environment variable.') + + headers = {'Content-Type': 'application/json', + 'Authorization': 'Bearer {0}'.format(api_token)} + api_url_base = 'https://service.greenhost.net/api/v2' + api_url = '{0}/{1}'.format(api_url_base, resource) + + if request_type == 'GET': + response = requests.get(api_url, headers=headers) + elif request_type == 'DELETE': + response = requests.delete(api_url, headers=headers) + elif request_type == 'POST': + response = requests.post( + api_url, headers=headers, data=json.dumps(data)) + elif request_type == 'PUT': + response = requests.put( + api_url, headers=headers, data=json.dumps(data)) + else: + raise ValueError('Specify one of GET/DELETE/POST/PUT as request_type.') + + log.debug('Request: %s, %s, data: %s', + response.url, request_type, data) + log.debug('Response code: %s', response.status_code) + + status_code_ok = [200, 201, 202, 204] + if response.status_code in status_code_ok: + if response.content: + log.debug('Response: %s\n', response.json()) + return json.loads(response.content.decode('utf-8')) + else: + return None + else: + raise requests.HTTPError('WARNING: Got response code ', + response.status_code, response.text) + + +# API calls +def create_domain_record(domain: str, name: str, data: str, + record_type: str = 'A', update: bool = False): + """Create domain record. + + If 'update' is set to True, the record will be updated if it exists. + """ + log.info('Creating domain record') + + record = { + 'name': name, + 'data': data, + 'type': record_type + } + # Check if record exists + existing_record = get_domain_record_by_name(domain=domain, name=name, + record_type=record_type) + if existing_record: + if update: + log.info('Domain record exists - Updating the record.') + response = request_api( + 'domains/%s/records/%s' % (domain, existing_record['id']), + 'PUT', record) + else: + raise ValueError('Domain record exists - Doing nothing,' + 'please use "update=True" to update existing' + 'records.') + else: + log.info('Creating new record.') + response = request_api('domains/%s/records/' % domain, 'POST', record) + + return response['domain_record'] + + +def create_droplet(name: str, ssh_key_id: int, region: str = 'ams1', + size: int = 2048, disk: int = 20, image: int = 18): + """Create a droplet. + + Required values: + - name (str): Name of the droplet + - ssh_key_id (int): ssh key id to add + + Optional values with their default values: + - image (str): 18 (Ubuntu 18.04 x64) + - region (str): 'ams1' (Amsterdam 1) + - size (int): 2048 (2GB RAM) + - disk (int): 20 (20GB disk space) + """ + log.info('Creating droplet') + + data = { + "name": name, + "region": region, + "size": size, + "disk": disk, + "image": image, + "ssh_keys": ssh_key_id + } + response = request_api('droplet', 'POST', data) + return response + + +def delete_domain_record(domain: str, id: int): + """Delete a domain record.""" + log.info('Deleting domain record %s', id) + response = request_api('domains/{0}/records/{1}'.format(domain, id), + 'DELETE') + return response + + +def delete_domain_records_by_name(domain: str, name_regex: str): + """Delete all domain records in a given domain matching a regex. + + Examples: + delete_domain_records_by_name('openappstack.net', '^\*.ci-') + delete_domain_records_by_name('openappstack.net', '^ci-') + """ + all = get_domain_records_by_name(domain, name_regex) + for record in all: + delete_domain_record(domain, record['id']) + + +def delete_droplet(id: int): + """Delete a droplet. Droplet needs to be stopped first.""" + log.info('Deleting %s', id) + response = request_api('droplets/{0}'.format(id), 'DELETE') + return response + + +def get_domain_record(domain: str, id: int): + """Get details for given domain record.""" + response = request_api('domains/{0}/records/{1}'.format(domain, id)) + return response['domain_record'] + + +def get_domain_records(domain: str): + """Get domain records for given domain.""" + response = request_api('domains/{0}/records'.format(domain)) + return response['domain_records'] + + +def get_domain_record_by_name(domain: str, name: str, + record_type: str = 'A'): + """ + Get domain record for given name and type. + + Example: + get_domain_record_by_name(domain='openappstack.net', name='varac-oas') + """ + records = get_domain_records(domain=domain) + matching = None + for record in records: + if record['name'] == name and record['type'] == record_type: + matching = record + break + if not matching: + log.info('No domain record found.') + + return matching + + +def get_domain_records_by_name(domain: str, name_regex: str): + r""" + Get all information about domain records matching a regex in their names. + + Example: + get_domain_records_by_name(name_regex='^ci\d+') + """ + all_records = get_domain_records(domain) + matching = [record for record in all_records + if re.match(name_regex, record['name'])] + return matching + + +def get_droplets(): + """Get all information about all droplets.""" + response = request_api('droplets') + return response['droplets'] + + +def get_droplets_by_name(name_regex: str): + r""" + Get all information about droplets matching a regex in their names. + + Example: + get_droplets_by_name(name_regex='^ci\d+') + """ + all_droplets = get_droplets() + matching = [droplet for droplet in all_droplets + if re.match('^ci+', droplet['name'])] + return matching + + +def get_droplet(id: int): + """Get information about specified droplet.""" + response = request_api('droplets/{0}'.format(id)) + return response['droplet'] + + +def list_domain_records(domain: str): + """List domain records for given domain.""" + records = get_domain_records(domain) + + log.debug(json.dumps(records, sort_keys=True, indent=2)) + + table_records = [[ + record['id'], record['name'], record['type'], record['data']] + for record in records] + log.info(tabulate(table_records, + headers=['ID', 'Name', 'Type', 'Data'])) + + +def list_droplets(): + """List all droplets by their ID, Name, IP and state.""" + droplets = get_droplets() + + log.debug(json.dumps(droplets, sort_keys=True, indent=2)) + + table_droplets = [ + [ + droplet['id'], + droplet['name'], + ', '.join([x['ip_address'] for x in droplet['networks']['v4']]), + droplet['status'] + ] + for droplet in droplets] + + log.info(tabulate(table_droplets, + headers=['ID', 'Name', 'IPv4', 'Status'])) + + +def shutdown_droplet(id: int): + """Shut down specified droplet (through a power_off call).""" + log.info('Shutting down %s', id) + data = {"type": "power_off"} + response = request_api('droplets/{0}/actions'.format(id), 'POST', data) + return response + + +def status_droplet(id: int): + """Get status of specified droplet.""" + response = get_droplet(id) + return response['status'] + + +def terminate_droplet(id: int): + """Terminate a droplet by powering it down and deleting it.""" + shutdown_droplet(id) + wait_for_state(id, 'stopped') + delete_droplet(id) + + +def terminate_droplets_by_name(name_regex: str, ndays: int = 0, domain: str = 'openappstack.net'): + r""" + Terminate droplets matching a regex and for x days older than current day. + Droplets defined on the env variable NO_TERMINATE_DROPLETS will not be delated + + Example how to terminate all CI instances: + terminate_old_droplets(name_regex='^ci\d+', ndays=5) + will match i.e 'ci1234' , 'ci1', with a creation time older than 5 days + """ + + threshold_time = (datetime.now(tz=timezone('Europe/Stockholm')) - timedelta(days=ndays)).strftime("%Y-%m-%dT%H:%M:%S+00:00") + all = get_droplets() + + noterminate_droplets = [] + if 'NO_TERMINATE_DROPLETS' in os.environ: + noterminate_droplets = os.environ['NO_TERMINATE_DROPLETS'].split(',') + + for droplet in all: + if droplet['name'] not in noterminate_droplets: + if re.match(name_regex, droplet['name']): + if droplet['created_at'] < threshold_time: + delete_domain_records_by_name(domain, '^\*.'+droplet['name']) + delete_domain_records_by_name(domain, '^'+droplet['name']) + terminate_droplet(droplet['id']) + +def wait_for_ssh(ip: str): + """Wait for ssh to be reachable on port 22.""" + log.info('Waiting for ssh to become available on ip %s', ip) + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + while sock.connect_ex((ip, 22)) != 0: + sleep(1) + + log.info('SSH became available on ip %s', ip) + + +def wait_for_state(id: int, state): + """Wait for a droplet to reach a certain state.""" + log.info('Waiting for droplet %s to reach %s state...', id, state) + status = status_droplet(id) + log.debug(status) + + while status != state: + sleep(1) + status = status_droplet(id) + + +# When called from from ipython, setup +# logging to console +try: + __IPYTHON__ + log = logging.getLogger() + log.addHandler(logging.StreamHandler()) + log.setLevel(logging.INFO) +except NameError: + log = logging.getLogger(__name__) diff --git a/test/requirements.txt b/requirements.txt similarity index 96% rename from test/requirements.txt rename to requirements.txt index 691784a6d6133eab8d8ba2366c3dee62a794479f..377c7c9f97eac1d0c40fe4289561fc668c3902f9 100644 --- a/test/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ pyopenssl>=19.0.0 pytest>=4.3.0 requests>=2.19.1 tabulate>=0.8.3 -testinfra>=2.0.0 +testinfra>=3.0.0 setuptools>=40.6.2 wheel>=0.33.1 pytz>=2019.1 diff --git a/test/README.md b/test/README.md index 21ceee2faf255a4038f21805b4d8079138aa2854..5ff076a3f371fb4decbb77fb44d20e889eb3023a 100644 --- a/test/README.md +++ b/test/README.md @@ -10,7 +10,7 @@ Specify host manually: Run cert test manually using the ansible inventory file: - OAS_DOMAIN='example.openappstack.net' py.test -v -m 'certs' \ + ADDRESS='example.openappstack.net' py.test -v -m 'certs' \ --connection=ansible \ --ansible-inventory=../ansible/inventory.yml \ --hosts='ansible://*' @@ -18,11 +18,11 @@ Run cert test manually using the ansible inventory file: Run cert test manually against a different cluster, not configured in any ansible inventory file, either by using pytest: - OAS_DOMAIN='example.openappstack.net' py.test -v -m 'certs' + ADDRESS='example.openappstack.net' py.test -v -m 'certs' or directly (allows better debugging since pytest won't eat stdout): - OAS_DOMAIN='example.openappstack.net' pytest/test_certs.py + ADDRESS='example.openappstack.net' pytest/test_certs.py ## Issues diff --git a/test/ansible.cfg b/test/ansible.cfg deleted file mode 120000 index 0b986ffbd15ce7c5f43039dd53f3dbdad2981e03..0000000000000000000000000000000000000000 --- a/test/ansible.cfg +++ /dev/null @@ -1 +0,0 @@ -../ansible/ansible.cfg \ No newline at end of file diff --git a/test/bootstrap.yml b/test/bootstrap.yml deleted file mode 120000 index d29cf97c10ac85c0dc68fa448f5d9aaeae9714fe..0000000000000000000000000000000000000000 --- a/test/bootstrap.yml +++ /dev/null @@ -1 +0,0 @@ -../ansible/bootstrap.yml \ No newline at end of file diff --git a/test/ci-bootstrap.py b/test/ci-bootstrap.py deleted file mode 100755 index 1ab001c36838821da5163e04214e91234acb3fbb..0000000000000000000000000000000000000000 --- a/test/ci-bootstrap.py +++ /dev/null @@ -1,330 +0,0 @@ -#!/usr/bin/env python3 -r""" -Used by CI to bootstrap a new cluster and run tests. - -Env vars needed: -- COSMOS_API_TOKEN - -Env vars optional: -- NO_TERMINATE_DROPLETS: list of droplet ids that should not be - removed when deleting droplets. - -Install requirements: - -- Alpine using `requirements.txt`: - - apk --no-cache add python3-dev build-base libffi-dev linux-headers \ - openssl-dev openssh-client - pip3 install -r requirements.txt - -- Apline using packages (much faster): - - apk --no-cache add ansible musl-dev linux-headers gcc py3-psutil \ - openssh-client - pip3 install requests tabulate testinfra - - -- Debian (using deb packages): - apt-get install -y --no-install-recommends ansible gcc libc6-dev \ - python3-pip python3-setuptools python3-wheel \ - python3-psutil - pip3 install requests tabulate testinfra -""" - -import argparse -import configparser -import logging -import os -import random -import shlex -import string -import subprocess -import sys -import traceback -import yaml -import greenhost_cloud - -SETTINGS_FILE = './group_vars/all/settings.yml' -ANSIBLE_INVENTORY = './inventory.yml' - - -def main(): # pylint: disable=too-many-statements,too-many-branches - """Do everything.""" - # Parse command line arguments - parser = argparse.ArgumentParser( - description='Run bootstrap script' - 'to deploy Openappstack to a given node.') - - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument( - '--create-droplet', - action='store_true', - help='Create droplet automatically') - group.add_argument( - '--droplet-id', - metavar='ID', - type=int, - help='ID of droplet to deploy to') - - group.add_argument( - '--use-existing-inventory', - action='store_true', - help='Assumes inventory.yml has already been generated') - - parser.add_argument( - '--ssh-key-id', - metavar='ID', - type=int, - default=411, - help='ID of ssh key to deploy with (default: 411)') - - parser.add_argument( - '--terminate', - action='store_true', - help='Terminate droplet after deploy (shutdown and delete)') - - parser.add_argument( - '--verbose', - action='store_true', - help='Be more verbose') - - parser.add_argument( - '--ansible-param', - metavar=['PARAM[=VALUE]'], - action='append', - nargs=1, - help=('forward ansible parameters to the ansible-playbook call ' - '(two dashes are prepended to PARAM)')) - parser.add_argument( - '--run-ansible', - action='store_true', - help='Runs the ansible bootstrap process') - - parser.add_argument( - '--create-domain-records', - action='store_true', - help='Creates DNS entries for the cluster') - - parser.add_argument( - '--write-behave-config', - action='store_true', - help='Writes a configuration file for behave with cluster information') - - args = parser.parse_args() - verbose = args.verbose - loglevel = logging.DEBUG if verbose else logging.INFO - init_logging(log, loglevel) - - # Setup logging for greenhost_cloud module - log_greenhost_cloud = logging.getLogger('greenhost_cloud') - init_logging(log_greenhost_cloud, loglevel) - - if not args.use_existing_inventory: - # Start bootstrapping - if args.create_droplet: - # Create droplet - - # image: 19 = Debian buster-x64 - # ssh_keys - # - 411: ci, ed25519 - # - 407: varac - - if "CI_PIPELINE_ID" in os.environ: - instance_id = os.environ['CI_PIPELINE_ID'] - else: - # Use random generated ID in case we're not running in - # gitlab CI and there's no CI_PIPELINE_ID env var - instance_id = ''.join( - random.choice(string.ascii_lowercase + string.digits) - for _ in range(10)) - - droplet = greenhost_cloud.create_droplet( - name='ci-' + instance_id, - ssh_key_id=args.ssh_key_id, - region='ams1', - size=8192, - disk=20, - image=19) - droplet_id = droplet['droplet']['id'] - log.info('Created droplet id: %s', droplet_id) - greenhost_cloud.wait_for_state(droplet_id, 'running') - else: - droplet_id = args.droplet_id - - if verbose: - greenhost_cloud.list_droplets() - - # Get droplet ip - droplet = greenhost_cloud.get_droplet(droplet_id) - droplet_ip = droplet['networks']['v4'][0]['ip_address'] - droplet_name = droplet['name'] - - # Create inventory - with open('../ansible/inventory.yml.example', 'r') as stream: - inventory = yaml.safe_load(stream) - inventory['all']['hosts'][droplet_name] = \ - inventory['all']['hosts']['oas-dev'] - del inventory['all']['hosts']['oas-dev'] - - inventory['all']['hosts'][droplet_name]['ansible_host'] = droplet_ip - inventory['all']['hosts'][droplet_name]['ansible_ssh_extra_args'] = \ - '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' - inventory['all']['children']['master']['hosts'] = droplet_name - inventory['all']['children']['worker']['hosts'] = droplet_name - - with open(ANSIBLE_INVENTORY, 'w') as stream: - yaml.safe_dump(inventory, stream, default_flow_style=False) - - # Create settings - with open('../ansible/group_vars/all/settings.yml.example', - 'r') as stream: - settings = yaml.safe_load(stream) - - settings['ip_address'] = droplet_ip - settings['domain'] = droplet_name + '.ci.openappstack.net' - settings['admin_email'] = "admin@{0}".format(settings['domain']) - settings['acme_staging'] = True - - # Make sure settings file directory exists - settings_file_dir = os.path.dirname(SETTINGS_FILE) - if not os.path.exists(settings_file_dir): - os.makedirs(settings_file_dir) - - with open(SETTINGS_FILE, 'w') as stream: - yaml.safe_dump(settings, stream, default_flow_style=False) - - log.debug(yaml.safe_dump(inventory, default_flow_style=False)) - log.debug(yaml.safe_dump(settings, default_flow_style=False)) - - # Wait for ssh - greenhost_cloud.wait_for_ssh(droplet_ip) - else: - # Work with the master node from the inventory - with open(ANSIBLE_INVENTORY, 'r') as stream: - inventory = yaml.safe_load(stream) - droplet_name = inventory['all']['children']['master']['hosts'] - droplet_ip = inventory['all']['hosts'][droplet_name]['ansible_host'] - log.info("Read data from inventory:\n\tname: %s\n\tip: %s", - droplet_name, droplet_ip) - - # For if write_behave_config is called later: - settings = None - - if args.create_domain_records: - # Create domain records - domain_record = greenhost_cloud.create_domain_record( - domain='openappstack.net', name=droplet_name + '.ci', - data=droplet_ip, record_type='A', update=True) - log.info("Domain record: %s", domain_record) - - domain_record = greenhost_cloud.create_domain_record( - domain='openappstack.net', name='*.' + droplet_name + '.ci', - data=droplet_name + '.ci', record_type='CNAME', update=True) - log.info("Domain record: %s", domain_record) - - if verbose: - greenhost_cloud.list_domain_records('openappstack.net') - - if args.run_ansible: - - run_ansible('./bootstrap.yml', args.ansible_param) - - if args.write_behave_config: - write_behave_config(settings=settings) - - if args.terminate: - greenhost_cloud.terminate_droplets_by_name(droplet_name) - - -def run_ansible(playbook, ansible_params): - """Call `ansible-playbook` directly to run the specified playbook.""" - # playbook path here is relative to private_data_dir/project, see - # https://ansible-runner.readthedocs.io/en/latest/intro.html#inputdir - ansible_playbook_cmd = 'ansible-playbook %s' % playbook - - if ansible_params: - for param in ansible_params: - if len(param) > 1: - log.warning('More than 1 parameter. Ignoring the rest! Use ' - '--ansible-param several times to supply ' - 'more than 1 parameter') - param = param[0] - ansible_playbook_cmd += ' --' + param - - log.info('Running %s', ansible_playbook_cmd) - - result = subprocess.run(shlex.split(ansible_playbook_cmd)) - - if result.returncode > 0: - try: - raise RuntimeError('Playbook failed with rc %s.' - % result.returncode) - except RuntimeError: - traceback.print_exc() - sys.exit(result.returncode) - - -def write_behave_config(settings=None): - """Write behave config file for later use.""" - if settings is None: - with open(SETTINGS_FILE) as stream: - settings = yaml.safe_load(stream) - - secret_directory = settings['secret_directory'] - - with open(os.path.join( - secret_directory, 'nextcloud_admin_password'), 'r') as stream: - nextcloud_admin_password = yaml.safe_load(stream) - - with open(os.path.join( - secret_directory, 'grafana_admin_password'), 'r') as stream: - grafana_admin_password = yaml.safe_load(stream) - - behave_config = configparser.ConfigParser() - behave_config['behave'] = {} - behave_config['behave']['format'] = 'rerun' - behave_config['behave']['outfiles'] = 'rerun_failing.features' - behave_config['behave']['show_skipped'] = 'false' - - behave_config['behave.userdata'] = {} - - behave_config['behave.userdata']['nextcloud.url'] = \ - 'https://files.{}'.format(settings['domain']) - behave_config['behave.userdata']['nextcloud.username'] = 'admin' - behave_config['behave.userdata']['nextcloud.password'] = \ - nextcloud_admin_password - - behave_config['behave.userdata']['grafana.url'] = \ - 'https://grafana.{}'.format(settings['domain']) - behave_config['behave.userdata']['grafana.username'] = 'admin' - behave_config['behave.userdata']['grafana.password'] = \ - grafana_admin_password - - with open('./behave/behave.ini', 'w') as configfile: - behave_config.write(configfile) - - -def init_logging(logger, loglevel): - """ - Configure logging. - - - debug and info go to stdout - - warning and above go to stderr - """ - logger.setLevel(loglevel) - stdout = logging.StreamHandler(sys.stdout) - stdout.setLevel(loglevel) - stdout.addFilter(lambda record: record.levelno <= logging.INFO) - - stderr = logging.StreamHandler() - stderr.setLevel(logging.WARNING) - - logger.addHandler(stdout) - logger.addHandler(stderr) - - -if __name__ == "__main__": - # Setup logging for this script - log = logging.getLogger(__name__) # pylint: disable=invalid-name - - main() diff --git a/test/pytest/test_certs.py b/test/pytest/test_certs.py index 2e9c152b3dd27f0d0e52e250e7bb94485ecb7f0b..472311e1613aca6db185ddb97918beed9e658ee0 100755 --- a/test/pytest/test_certs.py +++ b/test/pytest/test_certs.py @@ -97,8 +97,8 @@ def valid_cert(domain: str, ca_file: str = '/tmp/custom_ca_bundle.crt'): @pytest.mark.certs def test_cert_validation(host): - domain = os.environ.get("OAS_DOMAIN") - assert domain, "Please export OAS_DOMAIN as environment variable." + domain = os.environ.get("ADDRESS") + assert domain, "Please export ADDRESS as environment variable." add_custom_cert_authorities(certifi.where(), ['pytest/le-staging-bundle.pem']) diff --git a/test/roles b/test/roles deleted file mode 120000 index e4109d3736f47053d8deb67b0356f17ce2e2b9e0..0000000000000000000000000000000000000000 --- a/test/roles +++ /dev/null @@ -1 +0,0 @@ -../ansible/roles \ No newline at end of file