Skip to content
Snippets Groups Projects
Verified Commit bb9008fa authored by Arie Peterson's avatar Arie Peterson
Browse files

Install all applications via flux

parent 9c2b3a32
No related branches found
No related tags found
No related merge requests found
Showing
with 290 additions and 78 deletions
......@@ -45,16 +45,18 @@
- name: production
server: "https://acme-v02.api.letsencrypt.org/directory"
- name: Install cert-manager
- name: Create Kubernetes secret with cert-manager settings
tags:
- helmfile
- config
- flux
- cert-manager
include_role:
name: "helmfile"
tasks_from: "apply"
apply:
tags:
- helmfile
- cert-manager
vars:
helmfile: '05-cert-manager'
k8s:
state: present
definition:
api_version: v1
kind: Secret
metadata:
namespace: "oas"
name: "cert-manager-settings"
data:
values.yaml: "{{ lookup('template','cert-manager-values.yaml') | b64encode }}"
---
- name: Clone local-storage repo
tags:
- git
- helmfile
- local-storage
git:
repo: 'https://open.greenhost.net/openappstack/local-storage'
dest: '{{ data_directory }}/source/repos/local-storage'
version: '{{ git_local_storage_version }}'
- name: Install local-storage provisioner
- name: Create Kubernetes secret with local-storage values
tags:
- helmfile
- config
- flux
- local-storage
include_role:
name: "helmfile"
tasks_from: "apply"
apply:
tags:
- helmfile
- local-storage
vars:
helmfile: '00-storage'
k8s:
state: present
definition:
api_version: v1
kind: Secret
metadata:
namespace: "oas"
name: "local-storage-settings"
data:
values.yaml: "{{ lookup('template','local-storage-values.yaml') | b64encode }}"
......@@ -8,10 +8,8 @@
tags: [ helmfile ]
when: '"00-flux" in helmfiles'
- name: Install local-storage
- name: Perform tasks necessary for local-storage
import_tasks: local-storage.yml
tags: [ helmfile ]
when: '"00-storage" in helmfiles'
- name: Install cert-manager
import_tasks: cert-manager.yml
......
---
- name: Create Kubernetes secret with NextCloud values
- name: Create Kubernetes secret with NextCloud settings
tags:
- config
- flux
- oas
- nextcloud
k8s:
state: present
......@@ -13,6 +12,6 @@
kind: Secret
metadata:
namespace: "oas-apps"
name: "oas"
name: "nextcloud-settings"
data:
nextcloud.yaml: "{{ lookup('template','secrets.nextcloud.yaml') | b64encode }}"
values.yaml: "{{ lookup('template','nextcloud-values.yaml') | b64encode }}"
---
- name: Install nginx ingress controller
- name: Create Kubernetes secret with nginx-ingress settings
tags:
- helmfile
- config
- flux
- nginx
include_role:
name: "helmfile"
tasks_from: "apply"
apply:
tags:
- helmfile
- nginx
vars:
helmfile: '10-nginx'
k8s:
state: present
definition:
api_version: v1
kind: Secret
metadata:
namespace: "oas"
name: "ingress-settings"
data:
values.yaml: "{{ lookup('template','ingress-values.yaml') | b64encode }}"
......@@ -31,21 +31,19 @@
recurse: true
when: prometheus_pv_name.stdout
- name: Install prometheus and grafana
include_role:
name: "helmfile"
tasks_from: "apply"
apply:
tags:
- monitoring
- prometheus
environment:
- GRAFANA_ADMIN_PASSWORD: "{{ grafana_admin_password }}"
- name: Create Kubernetes secret with monitoring settings
tags:
- helmfile
- config
- flux
- monitoring
- prometheus
vars:
helmfile: '15-monitoring'
# Force needed for upgrading from 5 to 6, see
# https://github.com/helm/charts/tree/master/stable/prometheus-operator#upgrading-from-5xx-to-6xx
helmfile_apply_args: '--args="--force"'
k8s:
state: present
definition:
api_version: v1
kind: Secret
metadata:
namespace: "oas"
name: "monitoring-settings"
data:
values.yaml: "{{ lookup('template','monitoring-values.yaml') | b64encode }}"
ingressShim:
{% if acme_staging %}
defaultIssuerName: "letsencrypt-staging"
{% else %}
defaultIssuerName: "letsencrypt-production"
{% endif %}
defaultIssuerKind: ClusterIssuer
controller:
service:
externalIPs: ["{{ ip_address }}"]
resources:
limits:
cpu: 100m
memory: 1Gi
requests:
cpu: 50m
memory: 64Mi
storageDirectory: "/var/lib/OpenAppStack/local-storage"
defaultStorageClass: true
# Depending on which DNS solution you have installed in your cluster enable the right exporter
coreDns:
enabled: true
kubeDns:
enabled: false
kubeControllerManager:
enabled: false
# If you enable this, you need the following selector:
service:
selector:
k8s-app: kube-controller-manager
kubeScheduler:
enabled: false
# If you enable this, you need the following selector:
service:
selector:
k8s-app: kube-scheduler
alertmanager:
alertmanagerSpec:
logFormat: logfmt
storage:
volumeClaimTemplate:
metadata:
name: alertmanager
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 2Gi
prometheus:
prometheusSpec:
storageSpec:
volumeClaimTemplate:
metadata:
name: prometheus
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 5Gi
service:
type: NodePort
prometheusOperator:
createCustomResource: false
defaultRules:
rules:
# Currently OAS only focusses on a single-node cluster and therfore
# one `KubeCPUOvercommit` and one `KubeMemOvercommit` alerts always
# fire, because a single-node cluster is not considered a redundant setup.
# We disable the whole `kubernetesResources` alert bundle and install
# the other alerts which make sense from this bundle in the next step below
# (`additionalPrometheusRulesMap`).
# See https://open.greenhost.net/openappstack/openappstack/issues/368 for
# details.
kubernetesResources: false
additionalPrometheusRulesMap:
kubernetes-resources:
groups:
- name: kubernetes-resources
rules:
- alert: KubeCPUOvercommit
annotations:
message: Cluster has overcommitted CPU resource requests for Namespaces.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuovercommit
expr: |-
sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="cpu"})
/
sum(node:node_num_cpu:sum)
> 1.5
for: 5m
labels:
severity: warning
- alert: KubeMemOvercommit
annotations:
message: Cluster has overcommitted memory resource requests for Namespaces.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubememovercommit
expr: |-
sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="memory"})
/
sum(node_memory_MemTotal_bytes{job="node-exporter"})
> 1.5
for: 5m
labels:
severity: warning
- alert: KubeQuotaExceeded
annotations:
{% raw %}
message: Namespace {{`{{ $labels.namespace }}`}} is using {{`{{ printf "%0.0f" $value }}`}}% of its {{`{{ $labels.resource }}`}} quota.
{% endraw %}
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubequotaexceeded
expr: |-
100 * kube_resourcequota{job="kube-state-metrics", type="used"}
/ ignoring(instance, job, type)
(kube_resourcequota{job="kube-state-metrics", type="hard"} > 0)
> 90
for: 15m
labels:
severity: warning
- alert: CPUThrottlingHigh
annotations:
{% raw %}
message: '{{ printf "%0.0f" $value }}% throttling of CPU in namespace {{ $labels.namespace }} for container {{ $labels.container_name }} in pod {{ $labels.pod_name }}.'
{% endraw %}
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh
expr: |-
100 * sum(increase(container_cpu_cfs_throttled_periods_total{container_name!="", }[5m])) by (container_name, pod_name, namespace)
/
sum(increase(container_cpu_cfs_periods_total{}[5m])) by (container_name, pod_name, namespace)
> 25
for: 15m
labels:
severity: warning
grafana:
adminPassword: "{{ grafana_admin_password }}"
ingress:
enabled: true
annotations:
kubernetes.io/tls-acme: "true"
hosts:
- "grafana.{{ domain }}"
tls:
- secretName: grafana-tls
hosts:
- "grafana.{{ domain }}"
persistence:
enabled: true
size: 2Gi
accessModes: ["ReadWriteOnce"]
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: cert-manager
namespace: oas
annotations:
flux.weave.works/automated: "false"
spec:
releaseName: cert-manager
chart:
repository: https://charts.jetstack.io
name: cert-manager
version: 0.11.0
valuesFrom:
- secretKeyRef:
name: cert-manager-settings
key: values.yaml
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: local-storage
namespace: oas
annotations:
flux.weave.works/automated: "false"
spec:
releaseName: local-storage
chart:
git: https://open.greenhost.net/openappstack/local-storage
ref: master
path: .
valuesFrom:
- secretKeyRef:
name: local-storage-settings
key: values.yaml
timeout: 120
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: monitoring
namespace: oas
annotations:
flux.weave.works/automated: "false"
spec:
releaseName: monitoring
chart:
repository: https://kubernetes-charts.storage.googleapis.com/
name: prometheus-operator
version: 7.4.0
valuesFrom:
- secretKeyRef:
name: monitoring-settings
key: values.yaml
......@@ -16,6 +16,6 @@ spec:
path: .
valuesFrom:
- secretKeyRef:
name: oas
key: nextcloud.yaml
name: nextcloud-settings
key: values.yaml
timeout: 900
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: ingress
namespace: oas
annotations:
flux.weave.works/automated: "false"
spec:
releaseName: ingress
chart:
repository: https://kubernetes-charts.storage.googleapis.com/
name: nginx-ingress
version: 1.26.1
valuesFrom:
- secretKeyRef:
name: ingress-settings
key: values.yaml
environments:
oas:
values:
- "/etc/OpenAppStack/values/local.yaml"
releases:
- name: "oas-{{ .Environment.Values.releaseName }}-local-storage"
namespace: "oas"
chart: "../../repos/local-storage/"
values:
- "../values/local-storage.yaml"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment