diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 2ae11a983fe440dbbf3d58e5e7fa9ed59f3d5628..7ac9cff5e89af2bae881d0d41bc79b9033b704cd 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -23,11 +23,6 @@ default:
 
 ci_test_image:
   stage: build
-  image:
-    # We need a shell to provide the registry credentials, so we need to use the
-    # kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image)
-    name: gcr.io/kaniko-project/executor:debug
-    entrypoint: [""]
   only:
     changes:
       - .gitlab-ci.yml
@@ -40,10 +35,16 @@ bootstrap:
   script:
     - echo "hostname $HOSTNAME, subdomain $SUBDOMAIN, domain $DOMAIN, address $ADDRESS"
     - python3 -m openappstack $HOSTNAME create --create-droplet $DOMAIN --hostname $HOSTNAME --ssh-key-id $SSH_KEY_ID --create-domain-records --subdomain $SUBDOMAIN
+    # Allows ansible debugging early in the beginning
+    - chmod 700 ansible
+    - cp clusters/ci-${CI_PIPELINE_ID}/inventory.yml ansible/
+    - cp clusters/ci-${CI_PIPELINE_ID}/settings.yml ansible/group_vars/all/
     - python3 -m openappstack $HOSTNAME install --ansible-param='--skip-tags=helmfile'
   artifacts:
     paths:
     - ./clusters
+    - ansible/inventory.yml
+    - ansible/group_vars/all/settings.yml
     expire_in: 1 month
     when: always
   only:
@@ -60,10 +61,14 @@ install:
   script:
     - python3 -m openappstack $HOSTNAME install --ansible-param='--tags=helmfile'
     # Show versions of installed apps/binaries
+    - chmod 700 ansible
+    - cd ansible
     - ansible master -m shell -a 'oas-version-info.sh 2>&1'
   artifacts:
     paths:
     - ./clusters
+    - ansible/inventory.yml
+    - ansible/group_vars/all/settings.yml
     expire_in: 1 month
     when: always
   only:
diff --git a/.gitlab/ci_templates/kaniko.yml b/.gitlab/ci_templates/kaniko.yml
index 967b8b3316f47da0832d7f06c6b4d0717cd891e9..c1cea5b7a488b32ac97fe1fc4b0c6d4b4d8586e6 100644
--- a/.gitlab/ci_templates/kaniko.yml
+++ b/.gitlab/ci_templates/kaniko.yml
@@ -1,7 +1,17 @@
 # Optional environment variables:
-# - KANIKO_BUILD_IMAGENAME: Build/target image image
+#
+# - KANIKO_BUILD_IMAGENAME: Build/target image name.
+#   If empty, image URL will be the root of the gitlab project path, i.e.
+#     `open.greenhost.net:4567/GROUP/PROJECT:TAG
+#   Is set, images will be named like
+#     `open.greenhost.net:4567/GROUP/PROJECT/KANIKO_BUILD_IMAGENAME:TAG
+#
 # - KANIKO_CONTEXT: The subdir which holds the Dockerfile, leave unset if
 #                   the Dockerfile is located at root level of the project.
+#
+# - KANIKO_ADDITIONAL_ARGS: This variable allows you to pass in ARG values that
+#   are used at build time. Similarly to Docker you can specify multiple arguments
+#   using the format `--build-arg NAME=VALUE` as many times as you need.
 .kaniko_build:
   stage: build
   image:
@@ -11,4 +21,5 @@
     entrypoint: [""]
   script:
     - echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
-    - /kaniko/executor --context ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.} --dockerfile ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.}/Dockerfile --destination $CI_REGISTRY_IMAGE/${KANIKO_BUILD_IMAGENAME/#//}:${CI_COMMIT_REF_NAME}
+    - if [ -n "${KANIKO_BUILD_IMAGENAME}" ]; then  export IMAGENAME="/${KANIKO_BUILD_IMAGENAME}"; fi
+    - /kaniko/executor --context ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.} --dockerfile ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.}/Dockerfile --destination ${CI_REGISTRY_IMAGE}${IMAGENAME}:${CI_COMMIT_REF_NAME} ${KANIKO_ADDITIONAL_ARGS}
diff --git a/ansible/group_vars/all/oas.yml b/ansible/group_vars/all/oas.yml
index 95d9797fd922ea4761c52d77619308a15c392302..e63d594373bffdd6242ea14f2df0be43ca80737e 100644
--- a/ansible/group_vars/all/oas.yml
+++ b/ansible/group_vars/all/oas.yml
@@ -22,14 +22,10 @@ onlyoffice_rabbitmq_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/
 
 grafana_admin_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/grafana_admin_password chars=ascii_letters') }}"
 
-# Kubernetes version
-kubernetes_version: "v1.14.3-rancher1-1"
-
 # git repo versions
-git_charts_version: 'HEAD'
 git_local_storage_version: 'HEAD'
 # version of the https://open.greenhost.net/openappstack/nextcloud repo
-git_nextcloud_version: 'd882b6952c32b5cce03e6ad9a534035ce6f01230'
+git_nextcloud_version: '3bce89f0765b52a24a71dcee8ad4537f718c33e0'
 
 # Application versions
 helm:
@@ -50,14 +46,20 @@ krew:
   checksum: 'sha256:dc2f2e1ec8a0acb6f3e23580d4a8b38c44823e948c40342e13ff6e8e12edb15a'
 
 rke:
+  # You can change the kubernetes version used by rke in
+  # `ansible/group_vars/all/settings.yml.example`
+  #
   # https://github.com/rancher/rke/releases
-  version: '0.2.7'
+  version: '0.3.2'
   # Also possible:
   # checksum: 'sha256:https://github.com/rancher/rke/releases/download/v0.2.4/sha256sum.txt'
-  checksum: 'sha256:7c05727aa3d6f8c4b5f60b057f1fe7883af48d5a778e3b1668f178dda84883ee'
+  checksum: 'sha256:96b366fe1faaa668b3e47f5b6d4bfd6334224e33c21e55dc79ec96f85e0e48e8'
 
 cert_manager:
   # cert-manager requires custom resource definitions applied before installing
   # the helm chart. See https://hub.helm.sh/charts/jetstack/cert-manager for
   # details
   crd_version: '0.9'
+
+prometheus:
+  crd_version: 'v0.34.0'
diff --git a/ansible/group_vars/all/settings.yml.example b/ansible/group_vars/all/settings.yml.example
index 51b2984df49020e3e0f75f7c2f206629247a552c..bf9c3ce75e816c1f866a27a301674c631447e590 100644
--- a/ansible/group_vars/all/settings.yml.example
+++ b/ansible/group_vars/all/settings.yml.example
@@ -23,6 +23,7 @@ helmfiles:
 # Optional, custom rke config.
 # I.e. you can set the desired Kubernetes version but please be aware of
 # the [every rke release has only a few supported kubernetes versions](https://rancher.com/docs/rke/latest/en/config-options/#kubernetes-version).
+# See also https://rancher.com/blog/2019/keeping-k8s-current-with-rancher
 #
 # rke_custom_config:
 #   kubernetes_version: "v1.14.3-rancher1-1"
diff --git a/ansible/roles/apps/tasks/init.yml b/ansible/roles/apps/tasks/init.yml
index 4a7ed7210494a6758c69debd2c3d20040c155401..bc6aa8874ec0f1b6b0962393da4a0e9960c6aa54 100644
--- a/ansible/roles/apps/tasks/init.yml
+++ b/ansible/roles/apps/tasks/init.yml
@@ -11,15 +11,6 @@
     delete: true
   become: true
 
-- name: Clone charts repo
-  tags:
-    - git
-  git:
-    repo: 'https://open.greenhost.net/openappstack/charts'
-    dest: '{{ data_directory }}/source/repos/charts'
-    version: '{{ git_charts_version }}'
-  become: true
-
 - name: Create OAS namespaces
   tags:
     - kubernetes
diff --git a/ansible/roles/apps/tasks/prometheus.yml b/ansible/roles/apps/tasks/prometheus.yml
index db9e49a8dd6611404832ac3de12819a4e9ff2414..9d4f800114c62132a6820dec7b2a18a486a36ed5 100644
--- a/ansible/roles/apps/tasks/prometheus.yml
+++ b/ansible/roles/apps/tasks/prometheus.yml
@@ -4,8 +4,7 @@
   tags:
     - helmfile
     - prometheus
-  # NOTE: Change the commit hash in the URL when upgrading Prometheus
-  command: '/snap/bin/kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/v0.31.1/example/prometheus-operator-crd/{{ item }}'
+  command: '/snap/bin/kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/{{ prometheus.crd_version }}/example/prometheus-operator-crd/{{ item }}'
   loop:
     - alertmanager.crd.yaml
     - prometheus.crd.yaml
@@ -32,7 +31,7 @@
     recurse: true
   when: prometheus_pv_name.stdout
 
-- name: Install prometheus and graphana
+- name: Install prometheus and grafana
   include_role:
     name: "helmfile"
     tasks_from: "apply"
@@ -43,7 +42,7 @@
       environment:
         - GRAFANA_ADMIN_PASSWORD: "{{ grafana_admin_password }}"
   tags:
-    - monitoring
+    - helmfile
     - prometheus
   vars:
       helmfile: '15-monitoring'
diff --git a/ansible/roles/rke_configuration/files/cluster-defaults.yml b/ansible/roles/rke_configuration/files/cluster-defaults.yml
index 4e3f9b50aeb01783c183e48a75120eda52538fd9..ced099187c91c3afc039f4768fad1eda86b44594 100644
--- a/ansible/roles/rke_configuration/files/cluster-defaults.yml
+++ b/ansible/roles/rke_configuration/files/cluster-defaults.yml
@@ -24,7 +24,9 @@ ingress:
   options: {}
   # Set this to none, so we can install nginx ourselves.
   provider: none
-kubernetes_version: 'v1.14.3-rancher1-1'
+# If `kubernetes_version` is not set, the default kubernetes version for the
+# specified rke version in `ansible/group_vars/all/oas.yml` is used.
+# kubernetes_version: ''
 monitoring:
   options: {}
   provider: ''
diff --git a/docs/index.rst b/docs/index.rst
index b7638b7b6017f6b22841ec5e6321fd7cacc7b163..b10a8df901e4b57f299931d09873b9c12f3a5e17 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -30,3 +30,4 @@ For more information, go to `the OpenAppStack website`_.
    testing_instructions
    design
    reference
+   troubleshooting
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
new file mode 100644
index 0000000000000000000000000000000000000000..5c2572caf24cbecd9d906f95fcee0e1a480cfd95
--- /dev/null
+++ b/docs/troubleshooting.md
@@ -0,0 +1,37 @@
+# Troubleshooting
+
+Note: `cluster$` indicates that the commands should be run as root on your OAS cluster.
+
+## HTTPS Certificates
+
+OAS uses [cert-manager](http://docs.cert-manager.io/en/latest/) to automatically
+fetch [Let's Encrypt](https://letsencrypt.org/) certificates for all deployed
+services. If you experience invalid SSL certificates (i.e. your browser warns you
+when visiting Nextcloud (`https://files.YOUR.CLUSTER.DOMAIN`) here's how to
+debug this:
+
+Did you create your cluster using the `--acme-live-environment` argument ?
+Please check the resulting value of the `acme_staging` key in
+`clusters/YOUR_CLUSTERNAME/settings.yml`. If this is set to `true`, certificates
+are fetched from the [Let's Encrypt staging API](https://letsencrypt.org/docs/staging-environment/),
+which can't be validated by default in your browser.
+
+Are all pods in the `cert-manager` namespace in the `READY` state ?
+
+    cluster$ kubectl -n cert-manager get pods
+
+Show the logs of the main `cert-manager` pod:
+
+    cluster$ kubectl -n cert-manager logs -l "app.kubernetes.io/name=cert-manager"
+
+You can `grep` for your cluster domain or for any specific subdomain to narrow
+down results.
+
+
+## Purge OAS and install from scratch
+
+If ever things fail beyond possible recovery, here's how to completely purge an OAS installation in order to start from scratch:
+
+    cluster$ apt purge docker-ce-cli containerd.io
+    cluster$ mount | egrep '^(tmpfs.*kubelet|nsfs.*docker)' | cut -d' ' -f 3 | xargs umount
+    cluster$ rm -rf /var/lib/docker /var/lib/OpenAppStack /etc/kubernetes /var/lib/etcd /var/lib/rancher /var/lib/kubelet /var/log/OpenAppStack /var/log/containers /var/log/pods
diff --git a/helmfiles/helmfile.d/15-monitoring.yaml b/helmfiles/helmfile.d/15-monitoring.yaml
index 1db3eaa6a9f47f555a4c537d26af5ce881654d25..a73e191cac33f9ff881cebb349ceae1bcbebe5c7 100644
--- a/helmfiles/helmfile.d/15-monitoring.yaml
+++ b/helmfiles/helmfile.d/15-monitoring.yaml
@@ -7,9 +7,9 @@ releases:
   - name: "oas-{{ .Environment.Values.releaseName }}-prometheus"
     namespace: "oas"
     chart: "stable/prometheus-operator"
-    # NOTE: If you change this version, also change the commit hash in
-    # ansible/roles/apps/tasks/helmfiles.yml:34
-    version: 5.15.0
+    # NOTE: If you change this version, also update the crd_version number in
+    # ansible/group_vars/all/oas.yml
+    version: "7.4.0"
     values:
     - "../values/prometheus.yaml.gotmpl"
     - "/etc/OpenAppStack/values/apps/prometheus.yaml.gotmpl"
diff --git a/helmfiles/helmfile.d/20-nextcloud.yaml b/helmfiles/helmfile.d/20-nextcloud.yaml
index d144945c7e7514b66e5280316a02010d23ed0681..6167231fe2a8874203c3087e007fe44a6cb37e5a 100644
--- a/helmfiles/helmfile.d/20-nextcloud.yaml
+++ b/helmfiles/helmfile.d/20-nextcloud.yaml
@@ -3,16 +3,11 @@ environments:
     values:
       - "/etc/OpenAppStack/values/local.yaml"
 
-# Note: needs helm-git plugin (https://github.com/aslafy-z/helm-git)
-repositories:
-  - name: onlyoffice-documentserver
-    url: git+https://open.greenhost.net/openappstack/nextcloud@onlyoffice-documentserver?ref=master
-
 releases:
   - name: "oas-{{ .Environment.Values.releaseName }}-files"
     namespace: "oas-apps"
     # Install from file path, so you don't run into https://github.com/roboll/helmfile/issues/726
-    chart: "../../repos/nextcloud/nextcloud-onlyoffice"
+    chart: "../../repos/nextcloud"
     values:
     - "../values/nextcloud.yaml.gotmpl"
     - "/etc/OpenAppStack/values/apps/nextcloud.yaml.gotmpl"
diff --git a/helmfiles/values/nextcloud.yaml.gotmpl b/helmfiles/values/nextcloud.yaml.gotmpl
index eb0b641c184ad8cad754dc52c95e19262b29203d..e0a84e681f3d24f78286b82336749a8bccd96ca0 100644
--- a/helmfiles/values/nextcloud.yaml.gotmpl
+++ b/helmfiles/values/nextcloud.yaml.gotmpl
@@ -1,4 +1,7 @@
 nextcloud:
+  # Set this to true to debug your nextcloud
+  debug: false
+
   nextcloud:
     host: "files.{{ .Environment.Values.domain }}"
     password: "{{ requiredEnv "NEXTCLOUD_PASSWORD" }}"
@@ -60,7 +63,8 @@ nextcloud:
     successThreshold: 1
     failureThreshold: 3
 
-onlyoffice-documentserver:
+onlyoffice:
+  server_name: "office.{{ .Environment.Values.domain }}"
   ingress:
     enabled: true
     annotations:
@@ -74,16 +78,7 @@ onlyoffice-documentserver:
       - hosts:
           - "office.{{ .Environment.Values.domain }}"
         secretName: oas-{{ .Environment.Values.releaseName }}-office
-
-  onlyoffice:
-    server_name: "office.{{ .Environment.Values.domain }}"
   jwtSecret: "{{ requiredEnv "ONLYOFFICE_JWT_SECRET" }}"
-  postgresql:
-    postgresqlPassword: "{{ requiredEnv "ONLYOFFICE_POSTGRESQL_PASSWORD" }}"
-  rabbitmq:
-    rabbitmq:
-      password: "{{ requiredEnv "ONLYOFFICE_RABBITMQ_PASSWORD" }}"
-
   livenessProbe:
     initialDelaySeconds: 120
     timeoutSeconds: 20
@@ -96,3 +91,10 @@ onlyoffice-documentserver:
     periodSeconds: 30
     successThreshold: 1
     failureThreshold: 3
+
+postgresql:
+  postgresqlPassword: "{{ requiredEnv "ONLYOFFICE_POSTGRESQL_PASSWORD" }}"
+
+rabbitmq:
+  rabbitmq:
+    password: "{{ requiredEnv "ONLYOFFICE_RABBITMQ_PASSWORD" }}"
diff --git a/helmfiles/values/prometheus.yaml.gotmpl b/helmfiles/values/prometheus.yaml.gotmpl
index 4f05cd179e3fe0e6f1aa1c0456686109f7fa854b..1f6b5571b3c3b9c08ff20ff78d84ea9e87986cb6 100644
--- a/helmfiles/values/prometheus.yaml.gotmpl
+++ b/helmfiles/values/prometheus.yaml.gotmpl
@@ -5,6 +5,20 @@ coreDns:
 kubeDns:
   enabled: false
 
+kubeControllerManager:
+  enabled: false
+  # If you enable this, you need the following selector:
+  service:
+    selector:
+      k8s-app: kube-controller-manager
+
+kubeScheduler:
+  enabled: false
+  # If you enable this, you need the following selector:
+  service:
+    selector:
+      k8s-app: kube-scheduler
+
 alertmanager:
   alertmanagerSpec:
     logFormat: logfmt
diff --git a/openappstack/cosmos.py b/openappstack/cosmos.py
index c23b7506dd155e526dd135d613baa75f6aa4b099..5c5f5ab4dd73533c42f35bb069b4cb712ec391a3 100755
--- a/openappstack/cosmos.py
+++ b/openappstack/cosmos.py
@@ -52,11 +52,9 @@ def request_api(resource: str, request_type: str = 'GET',
         if response.content:
             log.debug('Response: %s\n', response.json())
             return json.loads(response.content.decode('utf-8'))
-        else:
-            return None
-    else:
-        raise requests.HTTPError('WARNING: Got response code ',
-                                 response.status_code, response.text)
+        return None
+    raise requests.HTTPError('WARNING: Got response code ',
+                             response.status_code, response.text)
 
 
 # API calls
@@ -93,7 +91,7 @@ def create_domain_record(domain: str, name: str, data: str,
     return response['domain_record']
 
 
-def create_droplet(name: str, ssh_key_id: int, region: str = 'ams1',
+def create_droplet(name: str, ssh_key_id: int, region: str = 'ams1',  # pylint: disable=too-many-arguments
                    size: int = 2048, disk: int = 20, image: int = 18):
     """Create a droplet.
 
@@ -121,10 +119,10 @@ def create_droplet(name: str, ssh_key_id: int, region: str = 'ams1',
     return response
 
 
-def delete_domain_record(domain: str, id: int):
+def delete_domain_record(domain: str, record_id: int):
     """Delete a domain record."""
-    log.info('Deleting domain record %s', id)
-    response = request_api('domains/{0}/records/{1}'.format(domain, id),
+    log.info('Deleting domain record %s', record_id)
+    response = request_api('domains/{0}/records/{1}'.format(domain, record_id),
                            'DELETE')
     return response
 
@@ -137,21 +135,21 @@ def delete_domain_records_by_name(domain: str, name_regex: str):
       delete_domain_records_by_name('openappstack.net', '^ci-')
 
     """
-    all = get_domain_records_by_name(domain, name_regex)
-    for record in all:
+    all_records = get_domain_records_by_name(domain, name_regex)
+    for record in all_records:
         delete_domain_record(domain, record['id'])
 
 
-def delete_droplet(id: int):
+def delete_droplet(droplet_id: int):
     """Delete a droplet. Droplet needs to be stopped first."""
-    log.info('Deleting %s', id)
-    response = request_api('droplets/{0}'.format(id), 'DELETE')
+    log.info('Deleting %s', droplet_id)
+    response = request_api('droplets/{0}'.format(droplet_id), 'DELETE')
     return response
 
 
-def get_domain_record(domain: str, id: int):
+def get_domain_record(domain: str, droplet_id: int):
     """Get details for given domain record."""
-    response = request_api('domains/{0}/records/{1}'.format(domain, id))
+    response = request_api('domains/{0}/records/{1}'.format(domain, droplet_id))
     return response['domain_record']
 
 
@@ -209,13 +207,13 @@ def get_droplets_by_name(name_regex: str):
     """
     all_droplets = get_droplets()
     matching = [droplet for droplet in all_droplets
-                if re.match('^ci+', droplet['name'])]
+                if re.match(name_regex, droplet['name'])]
     return matching
 
 
-def get_droplet(id: int):
+def get_droplet(droplet_id: int):
     """Get information about specified droplet."""
-    response = request_api('droplets/{0}'.format(id))
+    response = request_api('droplets/{0}'.format(droplet_id))
     return response['droplet']
 
 
@@ -225,9 +223,11 @@ def list_domain_records(domain: str):
 
     log.debug(json.dumps(records, sort_keys=True, indent=2))
 
-    table_records = [[
-        record['id'], record['name'], record['type'], record['data']]
-        for record in records]
+    table_records = [
+        [
+            record['id'], record['name'], record['type'], record['data']
+        ] for record in records
+    ]
     log.info(tabulate(table_records,
                       headers=['ID', 'Name', 'Type', 'Data']))
 
@@ -251,25 +251,25 @@ def list_droplets():
                       headers=['ID', 'Name', 'IPv4', 'Status']))
 
 
-def shutdown_droplet(id: int):
+def shutdown_droplet(droplet_id: int):
     """Shut down specified droplet (through a power_off call)."""
-    log.info('Shutting down %s', id)
+    log.info('Shutting down %s', droplet_id)
     data = {"type": "power_off"}
-    response = request_api('droplets/{0}/actions'.format(id), 'POST', data)
+    response = request_api('droplets/{0}/actions'.format(droplet_id), 'POST', data)
     return response
 
 
-def status_droplet(id: int):
+def status_droplet(droplet_id: int):
     """Get status of specified droplet."""
-    response = get_droplet(id)
+    response = get_droplet(droplet_id)
     return response['status']
 
 
-def terminate_droplet(id: int):
+def terminate_droplet(droplet_id: int):
     """Terminate a droplet by powering it down and deleting it."""
-    shutdown_droplet(id)
-    wait_for_state(id, 'stopped')
-    delete_droplet(id)
+    shutdown_droplet(droplet_id)
+    wait_for_state(droplet_id, 'stopped')
+    delete_droplet(droplet_id)
 
 
 def terminate_droplets_by_name(name_regex: str, ndays: int = 0,
@@ -287,51 +287,51 @@ def terminate_droplets_by_name(name_regex: str, ndays: int = 0,
     threshold_time = (datetime.now(tz=timezone('Europe/Stockholm')) -
                       timedelta(days=ndays)).\
         strftime("%Y-%m-%dT%H:%M:%S+00:00")
-    all = get_droplets()
+    all_droplets = get_droplets()
 
     noterminate_droplets = []
     if 'NO_TERMINATE_DROPLETS' in os.environ:
         noterminate_droplets = os.environ['NO_TERMINATE_DROPLETS'].split(',')
 
-    for droplet in all:
+    for droplet in all_droplets:
         if droplet['name'] not in noterminate_droplets:
             if re.match(name_regex, droplet['name']):
                 if droplet['created_at'] < threshold_time:
                     delete_domain_records_by_name(
-                        domain, '^\*.'+droplet['name'])
+                        domain, r'^\*.'+droplet['name'])
                     delete_domain_records_by_name(domain, '^'+droplet['name'])
                     terminate_droplet(droplet['id'])
 
 
-def wait_for_ssh(ip: str):
+def wait_for_ssh(droplet_ip: str):
     """Wait for ssh to be reachable on port 22."""
-    log.info('Waiting for ssh to become available on ip %s', ip)
+    log.info('Waiting for ssh to become available on ip %s', droplet_ip)
 
     sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
 
-    while sock.connect_ex((ip, 22)) != 0:
+    while sock.connect_ex((droplet_ip, 22)) != 0:
         sleep(1)
 
-    log.info('SSH became available on ip %s', ip)
+    log.info('SSH became available on ip %s', droplet_ip)
 
 
-def wait_for_state(id: int, state):
+def wait_for_state(droplet_id: int, state):
     """Wait for a droplet to reach a certain state."""
-    log.info('Waiting for droplet %s to reach %s state...', id, state)
-    status = status_droplet(id)
+    log.info('Waiting for droplet %s to reach %s state...', droplet_id, state)
+    status = status_droplet(droplet_id)
     log.debug(status)
 
     while status != state:
         sleep(1)
-        status = status_droplet(id)
+        status = status_droplet(droplet_id)
 
 
 # When called from from ipython, setup
 # logging to console
 try:
-    __IPYTHON__
-    log = logging.getLogger()
+    __IPYTHON__  # pylint: disable=pointless-statement
+    log = logging.getLogger()  # pylint: disable=invalid-name
     log.addHandler(logging.StreamHandler())
     log.setLevel(logging.INFO)
 except NameError:
-    log = logging.getLogger(__name__)
+    log = logging.getLogger(__name__)  # pylint: disable=invalid-name
diff --git a/test/behave/features/grafana.feature b/test/behave/features/grafana.feature
index adc2282d1607d490e62d10122cbac98ea727f88c..e37fdfecc4c94f3d8ff01409f635747e955dfaa9 100644
--- a/test/behave/features/grafana.feature
+++ b/test/behave/features/grafana.feature
@@ -5,12 +5,12 @@ Feature: Test grafana admin login
 
 Scenario: Open grafana
   When I open the grafana URL
-  Then I wait on element "//input[@name='username']" for 25000ms to be visible
+  Then I wait on element "//input[@name='user']" for 25000ms to be visible
   And I expect that element "#inputPassword" is visible
 
 Scenario: Login to grafana
-  Given the element "//input[@name='username']" is visible
-  When I enter the "grafana" "username" in the inputfield "//input[@name='username']"
+  Given the element "//input[@name='user']" is visible
+  When I enter the "grafana" "username" in the inputfield "//input[@name='user']"
   And I enter the "grafana" "password" in the inputfield "#inputPassword"
   And I click on the button "//div[@id='login-view']//button[@type='submit']"
   Then I wait on element "/html/body/grafana-app/sidemenu/a" for 25000ms to be visible