diff --git a/ansible/roles/apps/tasks/core.yml b/ansible/roles/apps/tasks/core.yml index 6abc2dfd275d0c65c3fdc5547968cc70dab2cdd4..5cf16e1bd0d027da3811965cd3e509994ea94803 100644 --- a/ansible/roles/apps/tasks/core.yml +++ b/ansible/roles/apps/tasks/core.yml @@ -44,8 +44,8 @@ # --set resources.limits.cpu=500m \ # --set resources.limits.memory=1Gi \ # # Delay the liveness and readiness probe a bit to prevent restarts - # --set livenessProbe.initialDelaySeconds=10s \ - # --set readinessProbe.initialDelaySeconds=10s \ + # --set livenessProbe.initialDelaySeconds=10 \ + # --set readinessProbe.initialDelaySeconds=10 \ # # Helm release name # helm-operator # # Chart name @@ -68,8 +68,8 @@ --set statusUpdateInterval=30s \ --set resources.limits.cpu=500m \ --set resources.limits.memory=1Gi \ - --set livenessProbe.initialDelaySeconds=10s \ - --set readinessProbe.initialDelaySeconds=10s \ + --set livenessProbe.initialDelaySeconds=10 \ + --set readinessProbe.initialDelaySeconds=10 \ {{ extra_opts }} \ helm-operator \ helm-operator diff --git a/ansible/roles/apps/templates/settings/prometheus-stack.yaml b/ansible/roles/apps/templates/settings/prometheus-stack.yaml index 013d1c0346b46994b3f99df0c66e35d633b0a9ae..0b202537469f14c44045afcf44ac37ae13c652f7 100644 --- a/ansible/roles/apps/templates/settings/prometheus-stack.yaml +++ b/ansible/roles/apps/templates/settings/prometheus-stack.yaml @@ -201,14 +201,13 @@ grafana: plugins: - grafana-piechart-panel -# Resource limits suggested by cablespagetti -# resources: -# limits: -# cpu: 500m -# memory: 128Mi -# requests: -# cpu: 25m -# memory: 64Mi + resources: + limits: + cpu: 40m + memory: 256Mi + requests: + cpu: 10m + memory: 128Mi # # sidecar: # resources: @@ -219,32 +218,32 @@ grafana: # cpu: 5m # memory: 64Mi -#prometheusOperator: -# resources: -# limits: -# cpu: 1 -# memory: 512Mi -# requests: -# cpu: 50m -# memory: 128Mi +prometheusOperator: + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 50m + memory: 128Mi # priorityClassName: high-priority -#prometheus-node-exporter: -# resources: -# limits: -# cpu: 50m -# memory: 50Mi -# requests: -# cpu: 5m -# memory: 16Mi +prometheus-node-exporter: + resources: + limits: + cpu: 40m + memory: 32Mi + requests: + cpu: 10m + memory: 16Mi # priorityClassName: high-priority kube-state-metrics: -# resources: -# limits: -# cpu: 1 -# memory: 512Mi -# requests: -# cpu: 5m -# memory: 128Mi + resources: + limits: + cpu: 40m + memory: 64Mi + requests: + cpu: 10m + memory: 32Mi # priorityClassName: high-priority diff --git a/ansible/roles/apps/templates/settings/rocketchat.yaml b/ansible/roles/apps/templates/settings/rocketchat.yaml index ff34541dbcbaeaedc02b51d7942b7d5f0cd92279..b089b966de6a852ebc2aa65800573519659bc099 100644 --- a/ansible/roles/apps/templates/settings/rocketchat.yaml +++ b/ansible/roles/apps/templates/settings/rocketchat.yaml @@ -88,6 +88,14 @@ podAnnotations: # Let the backup system include rocketchat data. backup.velero.io/backup-volumes: "rocket-data" +resources: + limits: + cpu: 20m + memory: 1024Mi + requests: + cpu: 10m + memory: 512Mi + mongodb: mongodbRootPassword: "{{ rocketchat_mongodb_root_password }}" mongodbPassword: "{{ rocketchat_mongodb_password }}" @@ -97,6 +105,13 @@ mongodb: persistence: enabled: true size: 2Gi + resources: + limits: + cpu: 40m + memory: 1024Mi + requests: + cpu: 20m + memory: 512Mi image: tag: 3.13.0 diff --git a/ansible/roles/apps/templates/settings/wordpress.yaml b/ansible/roles/apps/templates/settings/wordpress.yaml index 526ec1ec1acff6bf34da502b96d1d5a321ef4870..85905ef3b7587670aaad732b4a1d77155e0789ca 100644 --- a/ansible/roles/apps/templates/settings/wordpress.yaml +++ b/ansible/roles/apps/templates/settings/wordpress.yaml @@ -1,3 +1,4 @@ +--- wordpress: config: db: @@ -47,18 +48,25 @@ database: annotations: # Let the backup system include nextcloud database data. backup.velero.io/backup-volumes: "data" + resources: + limits: + cpu: 20m + memory: 512Mi + requests: + cpu: 10m + memory: 256Mi replication: enabled: false # It's advisable to set resource limits to prevent your K8s cluster from # crashing -# resources: -# limits: -# cpu: 100m -# memory: 512Mi -# requests: -# cpu: 50m -# memory: 256Mi +resources: + limits: + cpu: 50m + memory: 256Mi + requests: + cpu: 10m + memory: 128Mi ingress: enabled: true