diff --git a/ansible/roles/apps/tasks/flux.yml b/ansible/roles/apps/tasks/flux.yml index f2565c9c8cd1640c64db214dab6b0878fa3228f2..ddafeb9fffde54bd692dac6014f5ffa9cf0325b9 100644 --- a/ansible/roles/apps/tasks/flux.yml +++ b/ansible/roles/apps/tasks/flux.yml @@ -27,36 +27,6 @@ # helm-operator shell: helm upgrade --install --repo "https://charts.fluxcd.io" --namespace oas --version 1.0.1 --set helm.versions=v3 --set configureRepositories.enable=true --set configureRepositories.repositories[0].name=stable --set configureRepositories.repositories[0].url=https://kubernetes-charts.storage.googleapis.com --set configureRepositories.repositories[1].name=bitnami --set configureRepositories.repositories[1].url=https://charts.bitnami.com/bitnami --set chartsSyncInterval=20m --set statusUpdateInterval=30s helm-operator helm-operator -- name: Create Kubernetes secret with local-path-provisioner settings - tags: - - config - - flux - - local-path-provisioner - vars: - flux: - name: "local-path-provisioner" - namespace: "oas" - include_tasks: - file: flux_secret.yml - apply: - tags: - - config - - flux - - local-path-provisioner - -# We have to install local-path-provisioner before other charts, otherwise the PVCs -# created by those charts will not have the right default storageclass assigned -# to them. -# It will still be managed by flux afterwards. -- name: Create local-path-provisioner HelmResource - tags: - - config - - flux - - local-path-provisioner - k8s: - state: present - resource_definition: "{{ lookup('file', 'local-path-provisioner.yaml') | from_yaml }}" - - name: Install flux tags: - flux diff --git a/ansible/roles/apps/templates/settings/local-path-provisioner.yaml b/ansible/roles/apps/templates/settings/local-path-provisioner.yaml deleted file mode 100644 index ba3e88d9ab5f2b64672582682f598cbcf8f6e13c..0000000000000000000000000000000000000000 --- a/ansible/roles/apps/templates/settings/local-path-provisioner.yaml +++ /dev/null @@ -1,12 +0,0 @@ -nodePathMap: - - node: DEFAULT_PATH_FOR_NON_LISTED_NODES - paths: - - "/var/lib/OpenAppStack/local-storage" -storageClass: - defaultClass: true - name: "local-storage" -# We temporarily use our own build, as we want the human-readable directories, -# but that is not yet released. -image: - repository: "open.greenhost.net:4567/openappstack/openappstack/local-path-provisioner" - tag: "unreleased" diff --git a/ansible/roles/setup/tasks/k3s.yml b/ansible/roles/setup/tasks/k3s.yml index c6b2411d8e418f1a827dad143a7501d6f6f437b9..74f230644e1717f96b13d16502d1f9477d53cc4f 100644 --- a/ansible/roles/setup/tasks/k3s.yml +++ b/ansible/roles/setup/tasks/k3s.yml @@ -1,7 +1,16 @@ - name: Build Cluster tags: - rke - shell: curl -sfL https://get.k3s.io | sh - + shell: curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --no-deploy traefik" sh - + +- name: Replace hostname in kubeconfig + lineinfile: + dest: "/etc/rancher/k3s/k3s.yaml" + state: present + regexp: "^ server:" + line: " server: https://{{ ip_address }}:6443" + insertafter: "- cluster:" + - name: Set kubeconfig variable lineinfile: diff --git a/flux/oas/local-path-provisioner.yaml b/flux/oas/local-path-provisioner.yaml deleted file mode 100644 index 232c3e97a01c6df40146f7fbee4b88d3b1722bd0..0000000000000000000000000000000000000000 --- a/flux/oas/local-path-provisioner.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -apiVersion: helm.fluxcd.io/v1 -kind: HelmRelease -metadata: - name: local-path-provisioner - namespace: oas - annotations: - flux.weave.works/automated: "false" -spec: - releaseName: local-path-provisioner - chart: - git: https://github.com/rancher/local-path-provisioner - ref: aeb020883b0968c6c9294d0c1e635c813bccc437 - path: deploy/chart - valuesFrom: - - secretKeyRef: - name: local-path-provisioner-settings - key: values.yaml - timeout: 120 diff --git a/test/pytest/test_docker.py b/test/pytest/test_docker.py deleted file mode 100644 index ff116ccda5b119218b6d67deef7d20323e470f2e..0000000000000000000000000000000000000000 --- a/test/pytest/test_docker.py +++ /dev/null @@ -1,15 +0,0 @@ -import pytest - - -@pytest.mark.testinfra -def test_docker_is_installed(host): - docker = host.package("docker-ce") - assert docker.is_installed - assert docker.version.startswith("5:18.09") - - -@pytest.mark.testinfra -def test_docker_running_and_enabled(host): - docker = host.service("docker") - assert docker.is_running - assert docker.is_enabled diff --git a/test/pytest/test_helmreleases.py b/test/pytest/test_helmreleases.py index 1612469d8b5e68bcfeb1156e7c9733371cbb31d7..ff9f488404430f8bf7a5e33d4ffe194abb357253 100644 --- a/test/pytest/test_helmreleases.py +++ b/test/pytest/test_helmreleases.py @@ -7,9 +7,9 @@ This module contains tests for: (i.e., condition = Ready for all the related pods) """ import os -import pytest from kubernetes import client, config from kubernetes.client.rest import ApiException +import pytest EXPECTED_RELEASES = { 'cert-manager': [ @@ -17,7 +17,6 @@ EXPECTED_RELEASES = { ], 'oas': [ 'ingress', - 'local-path-provisioner', 'prometheus', 'grafana', 'loki-stack', @@ -64,7 +63,10 @@ def get_release_status(name, namespace, api): else: raise print("**** NOT DEPLOYED, status: %s *****" % release_status) - + except KeyError as ex: + # Take a look at the 'phase' if 'releaseStatus' does not exist + release_status = release['status']['phase'] + print(release_status) return release_status @@ -97,7 +99,7 @@ def run_around_tests(): @pytest.mark.helmreleases -def test_helmreleases(host): +def test_helmreleases(): """ Checks if all desired HelmReleases installed by weave flux are in 'deployed' state. @@ -115,7 +117,7 @@ def test_helmreleases(host): @pytest.mark.apps_running -def test_apps_running(host): +def test_apps_running(): """ Checks if all the pods related to releases in EXPECTED_RELEASES are ready