Skip to content
Snippets Groups Projects
Verified Commit 942161b5 authored by Maarten de Waard's avatar Maarten de Waard :angel:
Browse files

Merge branch 'master' into 291-separate-oas-python-library-for-behave-functions

parents ece4aa28 df341a49
No related branches found
No related tags found
No related merge requests found
Showing
with 155 additions and 95 deletions
......@@ -23,11 +23,6 @@ default:
ci_test_image:
stage: build
image:
# We need a shell to provide the registry credentials, so we need to use the
# kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image)
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
only:
changes:
- .gitlab-ci.yml
......@@ -40,10 +35,16 @@ bootstrap:
script:
- echo "hostname $HOSTNAME, subdomain $SUBDOMAIN, domain $DOMAIN, address $ADDRESS"
- python3 -m openappstack $HOSTNAME create --create-droplet $DOMAIN --hostname $HOSTNAME --ssh-key-id $SSH_KEY_ID --create-domain-records --subdomain $SUBDOMAIN
# Allows ansible debugging early in the beginning
- chmod 700 ansible
- cp clusters/ci-${CI_PIPELINE_ID}/inventory.yml ansible/
- cp clusters/ci-${CI_PIPELINE_ID}/settings.yml ansible/group_vars/all/
- python3 -m openappstack $HOSTNAME install --ansible-param='--skip-tags=helmfile'
artifacts:
paths:
- ./clusters
- ansible/inventory.yml
- ansible/group_vars/all/settings.yml
expire_in: 1 month
when: always
only:
......@@ -60,10 +61,14 @@ install:
script:
- python3 -m openappstack $HOSTNAME install --ansible-param='--tags=helmfile'
# Show versions of installed apps/binaries
- chmod 700 ansible
- cd ansible
- ansible master -m shell -a 'oas-version-info.sh 2>&1'
artifacts:
paths:
- ./clusters
- ansible/inventory.yml
- ansible/group_vars/all/settings.yml
expire_in: 1 month
when: always
only:
......
# Optional environment variables:
# - KANIKO_BUILD_IMAGENAME: Build/target image image
#
# - KANIKO_BUILD_IMAGENAME: Build/target image name.
# If empty, image URL will be the root of the gitlab project path, i.e.
# `open.greenhost.net:4567/GROUP/PROJECT:TAG
# Is set, images will be named like
# `open.greenhost.net:4567/GROUP/PROJECT/KANIKO_BUILD_IMAGENAME:TAG
#
# - KANIKO_CONTEXT: The subdir which holds the Dockerfile, leave unset if
# the Dockerfile is located at root level of the project.
#
# - KANIKO_ADDITIONAL_ARGS: This variable allows you to pass in ARG values that
# are used at build time. Similarly to Docker you can specify multiple arguments
# using the format `--build-arg NAME=VALUE` as many times as you need.
.kaniko_build:
stage: build
image:
......@@ -11,4 +21,5 @@
entrypoint: [""]
script:
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --context ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.} --dockerfile ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.}/Dockerfile --destination $CI_REGISTRY_IMAGE/${KANIKO_BUILD_IMAGENAME/#//}:${CI_COMMIT_REF_NAME}
- if [ -n "${KANIKO_BUILD_IMAGENAME}" ]; then export IMAGENAME="/${KANIKO_BUILD_IMAGENAME}"; fi
- /kaniko/executor --context ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.} --dockerfile ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.}/Dockerfile --destination ${CI_REGISTRY_IMAGE}${IMAGENAME}:${CI_COMMIT_REF_NAME} ${KANIKO_ADDITIONAL_ARGS}
......@@ -22,14 +22,10 @@ onlyoffice_rabbitmq_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/
grafana_admin_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/grafana_admin_password chars=ascii_letters') }}"
# Kubernetes version
kubernetes_version: "v1.14.3-rancher1-1"
# git repo versions
git_charts_version: 'HEAD'
git_local_storage_version: 'HEAD'
# version of the https://open.greenhost.net/openappstack/nextcloud repo
git_nextcloud_version: 'd882b6952c32b5cce03e6ad9a534035ce6f01230'
git_nextcloud_version: '3bce89f0765b52a24a71dcee8ad4537f718c33e0'
# Application versions
helm:
......@@ -50,14 +46,20 @@ krew:
checksum: 'sha256:dc2f2e1ec8a0acb6f3e23580d4a8b38c44823e948c40342e13ff6e8e12edb15a'
rke:
# You can change the kubernetes version used by rke in
# `ansible/group_vars/all/settings.yml.example`
#
# https://github.com/rancher/rke/releases
version: '0.2.7'
version: '0.3.2'
# Also possible:
# checksum: 'sha256:https://github.com/rancher/rke/releases/download/v0.2.4/sha256sum.txt'
checksum: 'sha256:7c05727aa3d6f8c4b5f60b057f1fe7883af48d5a778e3b1668f178dda84883ee'
checksum: 'sha256:96b366fe1faaa668b3e47f5b6d4bfd6334224e33c21e55dc79ec96f85e0e48e8'
cert_manager:
# cert-manager requires custom resource definitions applied before installing
# the helm chart. See https://hub.helm.sh/charts/jetstack/cert-manager for
# details
crd_version: '0.9'
prometheus:
crd_version: 'v0.34.0'
......@@ -23,6 +23,7 @@ helmfiles:
# Optional, custom rke config.
# I.e. you can set the desired Kubernetes version but please be aware of
# the [every rke release has only a few supported kubernetes versions](https://rancher.com/docs/rke/latest/en/config-options/#kubernetes-version).
# See also https://rancher.com/blog/2019/keeping-k8s-current-with-rancher
#
# rke_custom_config:
# kubernetes_version: "v1.14.3-rancher1-1"
......
......@@ -11,15 +11,6 @@
delete: true
become: true
- name: Clone charts repo
tags:
- git
git:
repo: 'https://open.greenhost.net/openappstack/charts'
dest: '{{ data_directory }}/source/repos/charts'
version: '{{ git_charts_version }}'
become: true
- name: Create OAS namespaces
tags:
- kubernetes
......
......@@ -4,8 +4,7 @@
tags:
- helmfile
- prometheus
# NOTE: Change the commit hash in the URL when upgrading Prometheus
command: '/snap/bin/kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/v0.31.1/example/prometheus-operator-crd/{{ item }}'
command: '/snap/bin/kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/{{ prometheus.crd_version }}/example/prometheus-operator-crd/{{ item }}'
loop:
- alertmanager.crd.yaml
- prometheus.crd.yaml
......@@ -32,7 +31,7 @@
recurse: true
when: prometheus_pv_name.stdout
- name: Install prometheus and graphana
- name: Install prometheus and grafana
include_role:
name: "helmfile"
tasks_from: "apply"
......@@ -43,7 +42,7 @@
environment:
- GRAFANA_ADMIN_PASSWORD: "{{ grafana_admin_password }}"
tags:
- monitoring
- helmfile
- prometheus
vars:
helmfile: '15-monitoring'
......
......@@ -24,7 +24,9 @@ ingress:
options: {}
# Set this to none, so we can install nginx ourselves.
provider: none
kubernetes_version: 'v1.14.3-rancher1-1'
# If `kubernetes_version` is not set, the default kubernetes version for the
# specified rke version in `ansible/group_vars/all/oas.yml` is used.
# kubernetes_version: ''
monitoring:
options: {}
provider: ''
......
......@@ -30,3 +30,4 @@ For more information, go to `the OpenAppStack website`_.
testing_instructions
design
reference
troubleshooting
# Troubleshooting
Note: `cluster$` indicates that the commands should be run as root on your OAS cluster.
## HTTPS Certificates
OAS uses [cert-manager](http://docs.cert-manager.io/en/latest/) to automatically
fetch [Let's Encrypt](https://letsencrypt.org/) certificates for all deployed
services. If you experience invalid SSL certificates (i.e. your browser warns you
when visiting Nextcloud (`https://files.YOUR.CLUSTER.DOMAIN`) here's how to
debug this:
Did you create your cluster using the `--acme-live-environment` argument ?
Please check the resulting value of the `acme_staging` key in
`clusters/YOUR_CLUSTERNAME/settings.yml`. If this is set to `true`, certificates
are fetched from the [Let's Encrypt staging API](https://letsencrypt.org/docs/staging-environment/),
which can't be validated by default in your browser.
Are all pods in the `cert-manager` namespace in the `READY` state ?
cluster$ kubectl -n cert-manager get pods
Show the logs of the main `cert-manager` pod:
cluster$ kubectl -n cert-manager logs -l "app.kubernetes.io/name=cert-manager"
You can `grep` for your cluster domain or for any specific subdomain to narrow
down results.
## Purge OAS and install from scratch
If ever things fail beyond possible recovery, here's how to completely purge an OAS installation in order to start from scratch:
cluster$ apt purge docker-ce-cli containerd.io
cluster$ mount | egrep '^(tmpfs.*kubelet|nsfs.*docker)' | cut -d' ' -f 3 | xargs umount
cluster$ rm -rf /var/lib/docker /var/lib/OpenAppStack /etc/kubernetes /var/lib/etcd /var/lib/rancher /var/lib/kubelet /var/log/OpenAppStack /var/log/containers /var/log/pods
......@@ -7,9 +7,9 @@ releases:
- name: "oas-{{ .Environment.Values.releaseName }}-prometheus"
namespace: "oas"
chart: "stable/prometheus-operator"
# NOTE: If you change this version, also change the commit hash in
# ansible/roles/apps/tasks/helmfiles.yml:34
version: 5.15.0
# NOTE: If you change this version, also update the crd_version number in
# ansible/group_vars/all/oas.yml
version: "7.4.0"
values:
- "../values/prometheus.yaml.gotmpl"
- "/etc/OpenAppStack/values/apps/prometheus.yaml.gotmpl"
......
......@@ -3,16 +3,11 @@ environments:
values:
- "/etc/OpenAppStack/values/local.yaml"
# Note: needs helm-git plugin (https://github.com/aslafy-z/helm-git)
repositories:
- name: onlyoffice-documentserver
url: git+https://open.greenhost.net/openappstack/nextcloud@onlyoffice-documentserver?ref=master
releases:
- name: "oas-{{ .Environment.Values.releaseName }}-files"
namespace: "oas-apps"
# Install from file path, so you don't run into https://github.com/roboll/helmfile/issues/726
chart: "../../repos/nextcloud/nextcloud-onlyoffice"
chart: "../../repos/nextcloud"
values:
- "../values/nextcloud.yaml.gotmpl"
- "/etc/OpenAppStack/values/apps/nextcloud.yaml.gotmpl"
......
nextcloud:
# Set this to true to debug your nextcloud
debug: false
nextcloud:
host: "files.{{ .Environment.Values.domain }}"
password: "{{ requiredEnv "NEXTCLOUD_PASSWORD" }}"
......@@ -60,7 +63,8 @@ nextcloud:
successThreshold: 1
failureThreshold: 3
onlyoffice-documentserver:
onlyoffice:
server_name: "office.{{ .Environment.Values.domain }}"
ingress:
enabled: true
annotations:
......@@ -74,16 +78,7 @@ onlyoffice-documentserver:
- hosts:
- "office.{{ .Environment.Values.domain }}"
secretName: oas-{{ .Environment.Values.releaseName }}-office
onlyoffice:
server_name: "office.{{ .Environment.Values.domain }}"
jwtSecret: "{{ requiredEnv "ONLYOFFICE_JWT_SECRET" }}"
postgresql:
postgresqlPassword: "{{ requiredEnv "ONLYOFFICE_POSTGRESQL_PASSWORD" }}"
rabbitmq:
rabbitmq:
password: "{{ requiredEnv "ONLYOFFICE_RABBITMQ_PASSWORD" }}"
livenessProbe:
initialDelaySeconds: 120
timeoutSeconds: 20
......@@ -96,3 +91,10 @@ onlyoffice-documentserver:
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
postgresql:
postgresqlPassword: "{{ requiredEnv "ONLYOFFICE_POSTGRESQL_PASSWORD" }}"
rabbitmq:
rabbitmq:
password: "{{ requiredEnv "ONLYOFFICE_RABBITMQ_PASSWORD" }}"
......@@ -5,6 +5,20 @@ coreDns:
kubeDns:
enabled: false
kubeControllerManager:
enabled: false
# If you enable this, you need the following selector:
service:
selector:
k8s-app: kube-controller-manager
kubeScheduler:
enabled: false
# If you enable this, you need the following selector:
service:
selector:
k8s-app: kube-scheduler
alertmanager:
alertmanagerSpec:
logFormat: logfmt
......
......@@ -52,11 +52,9 @@ def request_api(resource: str, request_type: str = 'GET',
if response.content:
log.debug('Response: %s\n', response.json())
return json.loads(response.content.decode('utf-8'))
else:
return None
else:
raise requests.HTTPError('WARNING: Got response code ',
response.status_code, response.text)
return None
raise requests.HTTPError('WARNING: Got response code ',
response.status_code, response.text)
# API calls
......@@ -93,7 +91,7 @@ def create_domain_record(domain: str, name: str, data: str,
return response['domain_record']
def create_droplet(name: str, ssh_key_id: int, region: str = 'ams1',
def create_droplet(name: str, ssh_key_id: int, region: str = 'ams1', # pylint: disable=too-many-arguments
size: int = 2048, disk: int = 20, image: int = 18):
"""Create a droplet.
......@@ -121,10 +119,10 @@ def create_droplet(name: str, ssh_key_id: int, region: str = 'ams1',
return response
def delete_domain_record(domain: str, id: int):
def delete_domain_record(domain: str, record_id: int):
"""Delete a domain record."""
log.info('Deleting domain record %s', id)
response = request_api('domains/{0}/records/{1}'.format(domain, id),
log.info('Deleting domain record %s', record_id)
response = request_api('domains/{0}/records/{1}'.format(domain, record_id),
'DELETE')
return response
......@@ -137,21 +135,21 @@ def delete_domain_records_by_name(domain: str, name_regex: str):
delete_domain_records_by_name('openappstack.net', '^ci-')
"""
all = get_domain_records_by_name(domain, name_regex)
for record in all:
all_records = get_domain_records_by_name(domain, name_regex)
for record in all_records:
delete_domain_record(domain, record['id'])
def delete_droplet(id: int):
def delete_droplet(droplet_id: int):
"""Delete a droplet. Droplet needs to be stopped first."""
log.info('Deleting %s', id)
response = request_api('droplets/{0}'.format(id), 'DELETE')
log.info('Deleting %s', droplet_id)
response = request_api('droplets/{0}'.format(droplet_id), 'DELETE')
return response
def get_domain_record(domain: str, id: int):
def get_domain_record(domain: str, droplet_id: int):
"""Get details for given domain record."""
response = request_api('domains/{0}/records/{1}'.format(domain, id))
response = request_api('domains/{0}/records/{1}'.format(domain, droplet_id))
return response['domain_record']
......@@ -209,13 +207,13 @@ def get_droplets_by_name(name_regex: str):
"""
all_droplets = get_droplets()
matching = [droplet for droplet in all_droplets
if re.match('^ci+', droplet['name'])]
if re.match(name_regex, droplet['name'])]
return matching
def get_droplet(id: int):
def get_droplet(droplet_id: int):
"""Get information about specified droplet."""
response = request_api('droplets/{0}'.format(id))
response = request_api('droplets/{0}'.format(droplet_id))
return response['droplet']
......@@ -225,9 +223,11 @@ def list_domain_records(domain: str):
log.debug(json.dumps(records, sort_keys=True, indent=2))
table_records = [[
record['id'], record['name'], record['type'], record['data']]
for record in records]
table_records = [
[
record['id'], record['name'], record['type'], record['data']
] for record in records
]
log.info(tabulate(table_records,
headers=['ID', 'Name', 'Type', 'Data']))
......@@ -251,25 +251,25 @@ def list_droplets():
headers=['ID', 'Name', 'IPv4', 'Status']))
def shutdown_droplet(id: int):
def shutdown_droplet(droplet_id: int):
"""Shut down specified droplet (through a power_off call)."""
log.info('Shutting down %s', id)
log.info('Shutting down %s', droplet_id)
data = {"type": "power_off"}
response = request_api('droplets/{0}/actions'.format(id), 'POST', data)
response = request_api('droplets/{0}/actions'.format(droplet_id), 'POST', data)
return response
def status_droplet(id: int):
def status_droplet(droplet_id: int):
"""Get status of specified droplet."""
response = get_droplet(id)
response = get_droplet(droplet_id)
return response['status']
def terminate_droplet(id: int):
def terminate_droplet(droplet_id: int):
"""Terminate a droplet by powering it down and deleting it."""
shutdown_droplet(id)
wait_for_state(id, 'stopped')
delete_droplet(id)
shutdown_droplet(droplet_id)
wait_for_state(droplet_id, 'stopped')
delete_droplet(droplet_id)
def terminate_droplets_by_name(name_regex: str, ndays: int = 0,
......@@ -287,51 +287,51 @@ def terminate_droplets_by_name(name_regex: str, ndays: int = 0,
threshold_time = (datetime.now(tz=timezone('Europe/Stockholm')) -
timedelta(days=ndays)).\
strftime("%Y-%m-%dT%H:%M:%S+00:00")
all = get_droplets()
all_droplets = get_droplets()
noterminate_droplets = []
if 'NO_TERMINATE_DROPLETS' in os.environ:
noterminate_droplets = os.environ['NO_TERMINATE_DROPLETS'].split(',')
for droplet in all:
for droplet in all_droplets:
if droplet['name'] not in noterminate_droplets:
if re.match(name_regex, droplet['name']):
if droplet['created_at'] < threshold_time:
delete_domain_records_by_name(
domain, '^\*.'+droplet['name'])
domain, r'^\*.'+droplet['name'])
delete_domain_records_by_name(domain, '^'+droplet['name'])
terminate_droplet(droplet['id'])
def wait_for_ssh(ip: str):
def wait_for_ssh(droplet_ip: str):
"""Wait for ssh to be reachable on port 22."""
log.info('Waiting for ssh to become available on ip %s', ip)
log.info('Waiting for ssh to become available on ip %s', droplet_ip)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while sock.connect_ex((ip, 22)) != 0:
while sock.connect_ex((droplet_ip, 22)) != 0:
sleep(1)
log.info('SSH became available on ip %s', ip)
log.info('SSH became available on ip %s', droplet_ip)
def wait_for_state(id: int, state):
def wait_for_state(droplet_id: int, state):
"""Wait for a droplet to reach a certain state."""
log.info('Waiting for droplet %s to reach %s state...', id, state)
status = status_droplet(id)
log.info('Waiting for droplet %s to reach %s state...', droplet_id, state)
status = status_droplet(droplet_id)
log.debug(status)
while status != state:
sleep(1)
status = status_droplet(id)
status = status_droplet(droplet_id)
# When called from from ipython, setup
# logging to console
try:
__IPYTHON__
log = logging.getLogger()
__IPYTHON__ # pylint: disable=pointless-statement
log = logging.getLogger() # pylint: disable=invalid-name
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
except NameError:
log = logging.getLogger(__name__)
log = logging.getLogger(__name__) # pylint: disable=invalid-name
......@@ -5,12 +5,12 @@ Feature: Test grafana admin login
Scenario: Open grafana
When I open the grafana URL
Then I wait on element "//input[@name='username']" for 25000ms to be visible
Then I wait on element "//input[@name='user']" for 25000ms to be visible
And I expect that element "#inputPassword" is visible
Scenario: Login to grafana
Given the element "//input[@name='username']" is visible
When I enter the "grafana" "username" in the inputfield "//input[@name='username']"
Given the element "//input[@name='user']" is visible
When I enter the "grafana" "username" in the inputfield "//input[@name='user']"
And I enter the "grafana" "password" in the inputfield "#inputPassword"
And I click on the button "//div[@id='login-view']//button[@type='submit']"
Then I wait on element "/html/body/grafana-app/sidemenu/a" for 25000ms to be visible
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment