From 9c641830b17dd61fb684642a53e2e7592a5d1c6b Mon Sep 17 00:00:00 2001 From: Kevin Wilder Date: Wed, 23 Dec 2020 06:51:13 -0700 Subject: [PATCH 1/8] explode subchart archives --- chart/charts/cert-manager-v0.10.1.tgz | Bin 11396 -> 0 bytes chart/charts/cert-manager/.helmignore | 21 + chart/charts/cert-manager/Chart.yaml | 16 + chart/charts/cert-manager/OWNERS | 8 + chart/charts/cert-manager/README.md | 154 ++ .../cert-manager/cainjector/.helmignore | 21 + .../charts/cert-manager/cainjector/Chart.yaml | 17 + .../cainjector/templates/NOTES.txt | 0 .../cainjector/templates/_helpers.tpl | 32 + .../cainjector/templates/deployment.yaml | 75 + .../cainjector/templates/rbac.yaml | 50 + .../cainjector/templates/serviceaccount.yaml | 14 + .../cert-manager/cainjector/values.yaml | 42 + .../charts/cainjector/.helmignore | 21 + .../cert-manager/charts/cainjector/Chart.yaml | 16 + .../charts/cainjector/templates/NOTES.txt | 0 .../charts/cainjector/templates/_helpers.tpl | 32 + .../cainjector/templates/deployment.yaml | 75 + .../charts/cainjector/templates/rbac.yaml | 50 + .../cainjector/templates/serviceaccount.yaml | 14 + .../charts/cainjector/values.yaml | 42 + chart/charts/cert-manager/requirements.yaml | 6 + chart/charts/cert-manager/templates/NOTES.txt | 15 + .../cert-manager/templates/_helpers.tpl | 92 ++ .../cert-manager/templates/deployment.yaml | 135 ++ chart/charts/cert-manager/templates/rbac.yaml | 420 +++++ .../cert-manager/templates/service.yaml | 22 + .../templates/serviceaccount.yaml | 16 + .../templates/servicemonitor.yaml | 35 + .../templates/webhook-apiservice.yaml | 22 + .../templates/webhook-deployment.yaml | 82 + .../templates/webhook-mutating-webhook.yaml | 39 + .../cert-manager/templates/webhook-rbac.yaml | 76 + .../templates/webhook-service.yaml | 24 + .../templates/webhook-serviceaccount.yaml | 16 + .../templates/webhook-validating-webhook.yaml | 48 + chart/charts/cert-manager/values.yaml | 172 ++ chart/charts/gitlab-runner-0.18.1.tgz | Bin 14037 -> 0 bytes chart/charts/gitlab-runner/.gitlab-ci.yml | 66 + .../gitlab-runner/.gitlab/changelog.yml | 36 + chart/charts/gitlab-runner/.helmignore | 24 + chart/charts/gitlab-runner/CHANGELOG.md | 183 ++ chart/charts/gitlab-runner/CONTRIBUTING.md | 16 + chart/charts/gitlab-runner/Chart.yaml | 16 + chart/charts/gitlab-runner/LICENSE | 22 + chart/charts/gitlab-runner/Makefile | 20 + chart/charts/gitlab-runner/NOTICE | 30 + chart/charts/gitlab-runner/README.md | 3 + .../charts/gitlab-runner/templates/NOTES.txt | 14 + .../charts/gitlab-runner/templates/_cache.tpl | 28 + .../gitlab-runner/templates/_env_vars.tpl | 95 ++ .../gitlab-runner/templates/_helpers.tpl | 78 + .../gitlab-runner/templates/configmap.yaml | 129 ++ .../gitlab-runner/templates/deployment.yaml | 160 ++ chart/charts/gitlab-runner/templates/hpa.yaml | 16 + .../gitlab-runner/templates/role-binding.yaml | 19 + .../charts/gitlab-runner/templates/role.yaml | 23 + .../gitlab-runner/templates/secrets.yaml | 15 + .../templates/service-account.yaml | 15 + chart/charts/gitlab-runner/values.yaml | 389 +++++ chart/charts/grafana-4.0.1.tgz | Bin 18232 -> 0 bytes chart/charts/grafana/.helmignore | 23 + chart/charts/grafana/Chart.yaml | 19 + chart/charts/grafana/README.md | 305 ++++ chart/charts/grafana/ci/default-values.yaml | 1 + .../ci/with-dashboard-json-values.yaml | 53 + .../grafana/ci/with-dashboard-values.yaml | 19 + .../grafana/dashboards/custom-dashboard.json | 1 + chart/charts/grafana/templates/NOTES.txt | 37 + chart/charts/grafana/templates/_helpers.tpl | 51 + chart/charts/grafana/templates/_pod.tpl | 360 ++++ .../charts/grafana/templates/clusterrole.yaml | 28 + .../grafana/templates/clusterrolebinding.yaml | 23 + .../configmap-dashboard-provider.yaml | 27 + chart/charts/grafana/templates/configmap.yaml | 72 + .../templates/dashboards-json-configmap.yaml | 38 + .../charts/grafana/templates/deployment.yaml | 49 + .../grafana/templates/headless-service.yaml | 22 + chart/charts/grafana/templates/ingress.yaml | 41 + .../templates/poddisruptionbudget.yaml | 25 + .../grafana/templates/podsecuritypolicy.yaml | 55 + chart/charts/grafana/templates/pvc.yaml | 29 + chart/charts/grafana/templates/role.yaml | 35 + .../charts/grafana/templates/rolebinding.yaml | 30 + .../charts/grafana/templates/secret-env.yaml | 17 + chart/charts/grafana/templates/secret.yaml | 23 + chart/charts/grafana/templates/service.yaml | 50 + .../grafana/templates/serviceaccount.yaml | 16 + .../charts/grafana/templates/statefulset.yaml | 49 + .../templates/tests/test-configmap.yaml | 20 + .../tests/test-podsecuritypolicy.yaml | 32 + .../grafana/templates/tests/test-role.yaml | 17 + .../templates/tests/test-rolebinding.yaml | 20 + .../templates/tests/test-serviceaccount.yaml | 12 + .../charts/grafana/templates/tests/test.yaml | 67 + chart/charts/grafana/values.yaml | 464 ++++++ chart/charts/postgresql-8.9.4.tgz | Bin 33145 -> 0 bytes chart/charts/postgresql/.helmignore | 21 + chart/charts/postgresql/Chart.yaml | 23 + chart/charts/postgresql/README.md | 580 +++++++ .../postgresql/ci/commonAnnotations.yaml | 4 + .../charts/postgresql/ci/default-values.yaml | 1 + .../ci/shmvolume-disabled-values.yaml | 2 + chart/charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + chart/charts/postgresql/templates/NOTES.txt | 60 + .../charts/postgresql/templates/_helpers.tpl | 452 +++++ .../postgresql/templates/configmap.yaml | 29 + .../templates/extended-config-configmap.yaml | 24 + .../templates/initialization-configmap.yaml | 27 + .../templates/metrics-configmap.yaml | 16 + .../postgresql/templates/metrics-svc.yaml | 29 + .../postgresql/templates/networkpolicy.yaml | 41 + .../templates/podsecuritypolicy.yaml | 40 + .../postgresql/templates/prometheusrule.yaml | 26 + chart/charts/postgresql/templates/role.yaml | 22 + .../postgresql/templates/rolebinding.yaml | 22 + .../charts/postgresql/templates/secrets.yaml | 26 + .../postgresql/templates/serviceaccount.yaml | 14 + .../postgresql/templates/servicemonitor.yaml | 37 + .../templates/statefulset-slaves.yaml | 302 ++++ .../postgresql/templates/statefulset.yaml | 457 +++++ .../postgresql/templates/svc-headless.yaml | 22 + .../charts/postgresql/templates/svc-read.yaml | 46 + chart/charts/postgresql/templates/svc.yaml | 44 + .../charts/postgresql/values-production.yaml | 556 +++++++ chart/charts/postgresql/values.schema.json | 103 ++ chart/charts/postgresql/values.yaml | 562 +++++++ chart/charts/prometheus-10.0.0.tgz | Bin 26549 -> 0 bytes chart/charts/prometheus/.helmignore | 23 + chart/charts/prometheus/Chart.yaml | 20 + chart/charts/prometheus/README.md | 476 ++++++ chart/charts/prometheus/templates/NOTES.txt | 112 ++ .../charts/prometheus/templates/_helpers.tpl | 276 ++++ .../templates/alertmanager-clusterrole.yaml | 21 + .../alertmanager-clusterrolebinding.yaml | 16 + .../templates/alertmanager-configmap.yaml | 14 + .../templates/alertmanager-deployment.yaml | 134 ++ .../templates/alertmanager-ingress.yaml | 38 + .../templates/alertmanager-networkpolicy.yaml | 19 + .../templates/alertmanager-pdb.yaml | 13 + .../alertmanager-podsecuritypolicy.yaml | 48 + .../templates/alertmanager-pvc.yaml | 32 + .../alertmanager-service-headless.yaml | 30 + .../templates/alertmanager-service.yaml | 52 + .../alertmanager-serviceaccount.yaml | 8 + .../templates/alertmanager-statefulset.yaml | 150 ++ .../kube-state-metrics-clusterrole.yaml | 87 + ...kube-state-metrics-clusterrolebinding.yaml | 16 + .../kube-state-metrics-deployment.yaml | 68 + .../kube-state-metrics-networkpolicy.yaml | 19 + .../templates/kube-state-metrics-pdb.yaml | 13 + .../kube-state-metrics-podsecuritypolicy.yaml | 42 + .../kube-state-metrics-serviceaccount.yaml | 8 + .../templates/kube-state-metrics-svc.yaml | 40 + .../templates/node-exporter-daemonset.yaml | 116 ++ .../node-exporter-podsecuritypolicy.yaml | 55 + .../templates/node-exporter-role.yaml | 17 + .../templates/node-exporter-rolebinding.yaml | 19 + .../templates/node-exporter-service.yaml | 40 + .../node-exporter-serviceaccount.yaml | 8 + .../templates/pushgateway-clusterrole.yaml | 21 + .../pushgateway-clusterrolebinding.yaml | 16 + .../templates/pushgateway-deployment.yaml | 97 ++ .../templates/pushgateway-ingress.yaml | 35 + .../templates/pushgateway-networkpolicy.yaml | 19 + .../prometheus/templates/pushgateway-pdb.yaml | 13 + .../pushgateway-podsecuritypolicy.yaml | 44 + .../prometheus/templates/pushgateway-pvc.yaml | 30 + .../templates/pushgateway-service.yaml | 40 + .../templates/pushgateway-serviceaccount.yaml | 8 + .../templates/server-clusterrole.yaml | 47 + .../templates/server-clusterrolebinding.yaml | 16 + .../templates/server-configmap.yaml | 73 + .../templates/server-deployment.yaml | 212 +++ .../prometheus/templates/server-ingress.yaml | 40 + .../templates/server-networkpolicy.yaml | 17 + .../prometheus/templates/server-pdb.yaml | 13 + .../templates/server-podsecuritypolicy.yaml | 53 + .../prometheus/templates/server-pvc.yaml | 34 + .../templates/server-service-headless.yaml | 26 + .../prometheus/templates/server-service.yaml | 50 + .../templates/server-serviceaccount.yaml | 10 + .../templates/server-statefulset.yaml | 220 +++ .../prometheus/templates/server-vpa.yaml | 24 + chart/charts/prometheus/values.yaml | 1468 +++++++++++++++++ chart/charts/redis-10.3.4.tgz | Bin 29578 -> 0 bytes chart/charts/redis/.helmignore | 3 + chart/charts/redis/Chart.yaml | 21 + chart/charts/redis/README.md | 490 ++++++ chart/charts/redis/ci/default-values.yaml | 1 + chart/charts/redis/ci/dev-values.yaml | 9 + chart/charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 ++++++ .../redis/ci/production-sentinel-values.yaml | 524 ++++++ chart/charts/redis/ci/production-values.yaml | 525 ++++++ chart/charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + chart/charts/redis/templates/NOTES.txt | 104 ++ chart/charts/redis/templates/_helpers.tpl | 355 ++++ chart/charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + chart/charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 79 + chart/charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 410 +++++ .../redis/templates/redis-master-svc.yaml | 39 + chart/charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 428 +++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + chart/charts/redis/templates/secret.yaml | 14 + chart/charts/redis/values-production.yaml | 583 +++++++ chart/charts/redis/values.schema.json | 152 ++ chart/charts/redis/values.yaml | 583 +++++++ 220 files changed, 19019 insertions(+) delete mode 100644 chart/charts/cert-manager-v0.10.1.tgz create mode 100755 chart/charts/cert-manager/.helmignore create mode 100755 chart/charts/cert-manager/Chart.yaml create mode 100755 chart/charts/cert-manager/OWNERS create mode 100755 chart/charts/cert-manager/README.md create mode 100755 chart/charts/cert-manager/cainjector/.helmignore create mode 100755 chart/charts/cert-manager/cainjector/Chart.yaml create mode 100755 chart/charts/cert-manager/cainjector/templates/NOTES.txt create mode 100755 chart/charts/cert-manager/cainjector/templates/_helpers.tpl create mode 100755 chart/charts/cert-manager/cainjector/templates/deployment.yaml create mode 100755 chart/charts/cert-manager/cainjector/templates/rbac.yaml create mode 100755 chart/charts/cert-manager/cainjector/templates/serviceaccount.yaml create mode 100755 chart/charts/cert-manager/cainjector/values.yaml create mode 100755 chart/charts/cert-manager/charts/cainjector/.helmignore create mode 100755 chart/charts/cert-manager/charts/cainjector/Chart.yaml create mode 100755 chart/charts/cert-manager/charts/cainjector/templates/NOTES.txt create mode 100755 chart/charts/cert-manager/charts/cainjector/templates/_helpers.tpl create mode 100755 chart/charts/cert-manager/charts/cainjector/templates/deployment.yaml create mode 100755 chart/charts/cert-manager/charts/cainjector/templates/rbac.yaml create mode 100755 chart/charts/cert-manager/charts/cainjector/templates/serviceaccount.yaml create mode 100755 chart/charts/cert-manager/charts/cainjector/values.yaml create mode 100755 chart/charts/cert-manager/requirements.yaml create mode 100755 chart/charts/cert-manager/templates/NOTES.txt create mode 100755 chart/charts/cert-manager/templates/_helpers.tpl create mode 100755 chart/charts/cert-manager/templates/deployment.yaml create mode 100755 chart/charts/cert-manager/templates/rbac.yaml create mode 100755 chart/charts/cert-manager/templates/service.yaml create mode 100755 chart/charts/cert-manager/templates/serviceaccount.yaml create mode 100755 chart/charts/cert-manager/templates/servicemonitor.yaml create mode 100755 chart/charts/cert-manager/templates/webhook-apiservice.yaml create mode 100755 chart/charts/cert-manager/templates/webhook-deployment.yaml create mode 100755 chart/charts/cert-manager/templates/webhook-mutating-webhook.yaml create mode 100755 chart/charts/cert-manager/templates/webhook-rbac.yaml create mode 100755 chart/charts/cert-manager/templates/webhook-service.yaml create mode 100755 chart/charts/cert-manager/templates/webhook-serviceaccount.yaml create mode 100755 chart/charts/cert-manager/templates/webhook-validating-webhook.yaml create mode 100755 chart/charts/cert-manager/values.yaml delete mode 100644 chart/charts/gitlab-runner-0.18.1.tgz create mode 100755 chart/charts/gitlab-runner/.gitlab-ci.yml create mode 100755 chart/charts/gitlab-runner/.gitlab/changelog.yml create mode 100755 chart/charts/gitlab-runner/.helmignore create mode 100755 chart/charts/gitlab-runner/CHANGELOG.md create mode 100755 chart/charts/gitlab-runner/CONTRIBUTING.md create mode 100755 chart/charts/gitlab-runner/Chart.yaml create mode 100755 chart/charts/gitlab-runner/LICENSE create mode 100755 chart/charts/gitlab-runner/Makefile create mode 100755 chart/charts/gitlab-runner/NOTICE create mode 100755 chart/charts/gitlab-runner/README.md create mode 100755 chart/charts/gitlab-runner/templates/NOTES.txt create mode 100755 chart/charts/gitlab-runner/templates/_cache.tpl create mode 100755 chart/charts/gitlab-runner/templates/_env_vars.tpl create mode 100755 chart/charts/gitlab-runner/templates/_helpers.tpl create mode 100755 chart/charts/gitlab-runner/templates/configmap.yaml create mode 100755 chart/charts/gitlab-runner/templates/deployment.yaml create mode 100755 chart/charts/gitlab-runner/templates/hpa.yaml create mode 100755 chart/charts/gitlab-runner/templates/role-binding.yaml create mode 100755 chart/charts/gitlab-runner/templates/role.yaml create mode 100755 chart/charts/gitlab-runner/templates/secrets.yaml create mode 100755 chart/charts/gitlab-runner/templates/service-account.yaml create mode 100755 chart/charts/gitlab-runner/values.yaml delete mode 100644 chart/charts/grafana-4.0.1.tgz create mode 100755 chart/charts/grafana/.helmignore create mode 100755 chart/charts/grafana/Chart.yaml create mode 100755 chart/charts/grafana/README.md create mode 100755 chart/charts/grafana/ci/default-values.yaml create mode 100755 chart/charts/grafana/ci/with-dashboard-json-values.yaml create mode 100755 chart/charts/grafana/ci/with-dashboard-values.yaml create mode 100755 chart/charts/grafana/dashboards/custom-dashboard.json create mode 100755 chart/charts/grafana/templates/NOTES.txt create mode 100755 chart/charts/grafana/templates/_helpers.tpl create mode 100755 chart/charts/grafana/templates/_pod.tpl create mode 100755 chart/charts/grafana/templates/clusterrole.yaml create mode 100755 chart/charts/grafana/templates/clusterrolebinding.yaml create mode 100755 chart/charts/grafana/templates/configmap-dashboard-provider.yaml create mode 100755 chart/charts/grafana/templates/configmap.yaml create mode 100755 chart/charts/grafana/templates/dashboards-json-configmap.yaml create mode 100755 chart/charts/grafana/templates/deployment.yaml create mode 100755 chart/charts/grafana/templates/headless-service.yaml create mode 100755 chart/charts/grafana/templates/ingress.yaml create mode 100755 chart/charts/grafana/templates/poddisruptionbudget.yaml create mode 100755 chart/charts/grafana/templates/podsecuritypolicy.yaml create mode 100755 chart/charts/grafana/templates/pvc.yaml create mode 100755 chart/charts/grafana/templates/role.yaml create mode 100755 chart/charts/grafana/templates/rolebinding.yaml create mode 100755 chart/charts/grafana/templates/secret-env.yaml create mode 100755 chart/charts/grafana/templates/secret.yaml create mode 100755 chart/charts/grafana/templates/service.yaml create mode 100755 chart/charts/grafana/templates/serviceaccount.yaml create mode 100755 chart/charts/grafana/templates/statefulset.yaml create mode 100755 chart/charts/grafana/templates/tests/test-configmap.yaml create mode 100755 chart/charts/grafana/templates/tests/test-podsecuritypolicy.yaml create mode 100755 chart/charts/grafana/templates/tests/test-role.yaml create mode 100755 chart/charts/grafana/templates/tests/test-rolebinding.yaml create mode 100755 chart/charts/grafana/templates/tests/test-serviceaccount.yaml create mode 100755 chart/charts/grafana/templates/tests/test.yaml create mode 100755 chart/charts/grafana/values.yaml delete mode 100644 chart/charts/postgresql-8.9.4.tgz create mode 100755 chart/charts/postgresql/.helmignore create mode 100755 chart/charts/postgresql/Chart.yaml create mode 100755 chart/charts/postgresql/README.md create mode 100755 chart/charts/postgresql/ci/commonAnnotations.yaml create mode 100755 chart/charts/postgresql/ci/default-values.yaml create mode 100755 chart/charts/postgresql/ci/shmvolume-disabled-values.yaml create mode 100755 chart/charts/postgresql/files/README.md create mode 100755 chart/charts/postgresql/files/conf.d/README.md create mode 100755 chart/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100755 chart/charts/postgresql/templates/NOTES.txt create mode 100755 chart/charts/postgresql/templates/_helpers.tpl create mode 100755 chart/charts/postgresql/templates/configmap.yaml create mode 100755 chart/charts/postgresql/templates/extended-config-configmap.yaml create mode 100755 chart/charts/postgresql/templates/initialization-configmap.yaml create mode 100755 chart/charts/postgresql/templates/metrics-configmap.yaml create mode 100755 chart/charts/postgresql/templates/metrics-svc.yaml create mode 100755 chart/charts/postgresql/templates/networkpolicy.yaml create mode 100755 chart/charts/postgresql/templates/podsecuritypolicy.yaml create mode 100755 chart/charts/postgresql/templates/prometheusrule.yaml create mode 100755 chart/charts/postgresql/templates/role.yaml create mode 100755 chart/charts/postgresql/templates/rolebinding.yaml create mode 100755 chart/charts/postgresql/templates/secrets.yaml create mode 100755 chart/charts/postgresql/templates/serviceaccount.yaml create mode 100755 chart/charts/postgresql/templates/servicemonitor.yaml create mode 100755 chart/charts/postgresql/templates/statefulset-slaves.yaml create mode 100755 chart/charts/postgresql/templates/statefulset.yaml create mode 100755 chart/charts/postgresql/templates/svc-headless.yaml create mode 100755 chart/charts/postgresql/templates/svc-read.yaml create mode 100755 chart/charts/postgresql/templates/svc.yaml create mode 100755 chart/charts/postgresql/values-production.yaml create mode 100755 chart/charts/postgresql/values.schema.json create mode 100755 chart/charts/postgresql/values.yaml delete mode 100644 chart/charts/prometheus-10.0.0.tgz create mode 100755 chart/charts/prometheus/.helmignore create mode 100755 chart/charts/prometheus/Chart.yaml create mode 100755 chart/charts/prometheus/README.md create mode 100755 chart/charts/prometheus/templates/NOTES.txt create mode 100755 chart/charts/prometheus/templates/_helpers.tpl create mode 100755 chart/charts/prometheus/templates/alertmanager-clusterrole.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-configmap.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-deployment.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-ingress.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-networkpolicy.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-pdb.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-podsecuritypolicy.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-pvc.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-service-headless.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-service.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-serviceaccount.yaml create mode 100755 chart/charts/prometheus/templates/alertmanager-statefulset.yaml create mode 100755 chart/charts/prometheus/templates/kube-state-metrics-clusterrole.yaml create mode 100755 chart/charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml create mode 100755 chart/charts/prometheus/templates/kube-state-metrics-deployment.yaml create mode 100755 chart/charts/prometheus/templates/kube-state-metrics-networkpolicy.yaml create mode 100755 chart/charts/prometheus/templates/kube-state-metrics-pdb.yaml create mode 100755 chart/charts/prometheus/templates/kube-state-metrics-podsecuritypolicy.yaml create mode 100755 chart/charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml create mode 100755 chart/charts/prometheus/templates/kube-state-metrics-svc.yaml create mode 100755 chart/charts/prometheus/templates/node-exporter-daemonset.yaml create mode 100755 chart/charts/prometheus/templates/node-exporter-podsecuritypolicy.yaml create mode 100755 chart/charts/prometheus/templates/node-exporter-role.yaml create mode 100755 chart/charts/prometheus/templates/node-exporter-rolebinding.yaml create mode 100755 chart/charts/prometheus/templates/node-exporter-service.yaml create mode 100755 chart/charts/prometheus/templates/node-exporter-serviceaccount.yaml create mode 100755 chart/charts/prometheus/templates/pushgateway-clusterrole.yaml create mode 100755 chart/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml create mode 100755 chart/charts/prometheus/templates/pushgateway-deployment.yaml create mode 100755 chart/charts/prometheus/templates/pushgateway-ingress.yaml create mode 100755 chart/charts/prometheus/templates/pushgateway-networkpolicy.yaml create mode 100755 chart/charts/prometheus/templates/pushgateway-pdb.yaml create mode 100755 chart/charts/prometheus/templates/pushgateway-podsecuritypolicy.yaml create mode 100755 chart/charts/prometheus/templates/pushgateway-pvc.yaml create mode 100755 chart/charts/prometheus/templates/pushgateway-service.yaml create mode 100755 chart/charts/prometheus/templates/pushgateway-serviceaccount.yaml create mode 100755 chart/charts/prometheus/templates/server-clusterrole.yaml create mode 100755 chart/charts/prometheus/templates/server-clusterrolebinding.yaml create mode 100755 chart/charts/prometheus/templates/server-configmap.yaml create mode 100755 chart/charts/prometheus/templates/server-deployment.yaml create mode 100755 chart/charts/prometheus/templates/server-ingress.yaml create mode 100755 chart/charts/prometheus/templates/server-networkpolicy.yaml create mode 100755 chart/charts/prometheus/templates/server-pdb.yaml create mode 100755 chart/charts/prometheus/templates/server-podsecuritypolicy.yaml create mode 100755 chart/charts/prometheus/templates/server-pvc.yaml create mode 100755 chart/charts/prometheus/templates/server-service-headless.yaml create mode 100755 chart/charts/prometheus/templates/server-service.yaml create mode 100755 chart/charts/prometheus/templates/server-serviceaccount.yaml create mode 100755 chart/charts/prometheus/templates/server-statefulset.yaml create mode 100755 chart/charts/prometheus/templates/server-vpa.yaml create mode 100755 chart/charts/prometheus/values.yaml delete mode 100644 chart/charts/redis-10.3.4.tgz create mode 100755 chart/charts/redis/.helmignore create mode 100755 chart/charts/redis/Chart.yaml create mode 100755 chart/charts/redis/README.md create mode 100755 chart/charts/redis/ci/default-values.yaml create mode 100755 chart/charts/redis/ci/dev-values.yaml create mode 100755 chart/charts/redis/ci/extra-flags-values.yaml create mode 100755 chart/charts/redis/ci/insecure-sentinel-values.yaml create mode 100755 chart/charts/redis/ci/production-sentinel-values.yaml create mode 100755 chart/charts/redis/ci/production-values.yaml create mode 100755 chart/charts/redis/ci/redis-lib-values.yaml create mode 100755 chart/charts/redis/ci/redisgraph-module-values.yaml create mode 100755 chart/charts/redis/templates/NOTES.txt create mode 100755 chart/charts/redis/templates/_helpers.tpl create mode 100755 chart/charts/redis/templates/configmap.yaml create mode 100755 chart/charts/redis/templates/headless-svc.yaml create mode 100755 chart/charts/redis/templates/health-configmap.yaml create mode 100755 chart/charts/redis/templates/metrics-prometheus.yaml create mode 100755 chart/charts/redis/templates/metrics-svc.yaml create mode 100755 chart/charts/redis/templates/networkpolicy.yaml create mode 100755 chart/charts/redis/templates/psp.yaml create mode 100755 chart/charts/redis/templates/redis-master-statefulset.yaml create mode 100755 chart/charts/redis/templates/redis-master-svc.yaml create mode 100755 chart/charts/redis/templates/redis-role.yaml create mode 100755 chart/charts/redis/templates/redis-rolebinding.yaml create mode 100755 chart/charts/redis/templates/redis-serviceaccount.yaml create mode 100755 chart/charts/redis/templates/redis-slave-statefulset.yaml create mode 100755 chart/charts/redis/templates/redis-slave-svc.yaml create mode 100755 chart/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100755 chart/charts/redis/templates/secret.yaml create mode 100755 chart/charts/redis/values-production.yaml create mode 100755 chart/charts/redis/values.schema.json create mode 100755 chart/charts/redis/values.yaml diff --git a/chart/charts/cert-manager-v0.10.1.tgz b/chart/charts/cert-manager-v0.10.1.tgz deleted file mode 100644 index baa0065c4d285510981e410b5c7e4d44bf0403d3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11396 zcmY*3HF}`q6VWilvH9emXza=_2gnVVpU@@QD(Q)Qs&}NP*dZOQ@61)v@`Kk z`DM>1X=-B&cIo5kyvgxw@O513u`r$-#|)f3Dypb|S{ThFx*4n=PfU{X)WyyebtfZ* zhXK<*U)`Qz$us3&!$wZrT?*@P1&}CXwMg#nc~RYN2Zd!gh+r1Ln{^#f8~x#tFua}O z$n)*l@$+@a|GIhs9{B;og0}2oe!~KP13$m|_I9(nKa6j`J+`bzg;MQ7)Y-oiw5WdH zRKL#$e8IEITd)Od_VAp#!^Xz7jY!fhPLRD)Aq(G1)G`le%%h?h(uhumb!`WS zTe6&cv0Z*fLgS2foMS~|Cog^%}hsBR#_aDc*O| z^0eQ3g3Mv{cA2Fiz`gEh$+unIc8#O~sK{`>TwYIwJSB9Z!Un{{?3;+Zn9;#X4PvM|NU=7S+w|AM%0Qa?(l4MB9LXvne zv3#Lo2#g7{)un(y06YY`oPV%IU>i*TEx?>1@l`X}R5|$B`}{6f@FVht7e8i<+;BCf z>_)n}FkMhV*aZu~X=V}%<%XAw%COp5y33QV%_9@ItK)7Bce6dQ^5Ggy#!q3)9L$UW zNX6=9&n`61ABTDL;Etua)wpq|5t0Xr7kWwSK6%S7XT8Bq@7D)6$+&wug8<10FN|aPRquB*X@dwUmVffe?-@!FlfhknfB;6<|z4&W|8PLyhO6`jloI{+E(bN|Z3l z+)Juh(r1tar_Vty@Q1FKu4euiDlaApM;dJ|R3F~Aa8Ba>i92d6b0lX%_z_LVHNEUy zk;&8O(c*jDXGd~41c1Sw>k`X%*ofqMMlJ;=XYyZoXUicyGkq(Q!YKZQvv%-zq^an3 z#}e~&Xv*l|d(*MsvyZ7gP1*0m5xBgP;qXa?crl@zg9f^@P1Y=`V_b+7UY=sk_ASvi z0A{TFdt@`puO%f9yu;t|FeWK*(3B^^0;pjnaiI8%rFJJ`(@S#DNu&gE%n}1)pLCM_ z;b=xIN&MpxQGVWkqvPU3UuPA>N8MxULo>6ggm%|e!{s(gN|w9o)sAHkjfITqEmv4(38zLMQdL1ra>(9`hZsx(l*k%-hyLij zwTCwhwKP1GiyDQB`}RCqK(iG+)7HC(F+iugQdyc6=+Y4TX^^5m zt3lSJXi!CzDpZ79rdV=Zb1dD%Np3!r{TfDtvHLnZ^hRq)*NdoZJJxR&NlormicR?; z=K=JvQX|j*;qo|~Kqq|{K*GDvHuxSVk;q&^V=@kRq+`V3F@Dhr!0XQ^E{R4Ci;TOv z>Lx`;oBoS(vMQ5|%9Wb=7~{Z^621whQga(~%5_Gt5O|rL@vL(l^9C|wi!ikZLXNp5 zord=urDt%;%YGLA`~x8syhN6I<10!=cq%|Yn2I!UtM(lr$*>g8oWe_Itj3?BB`^!OJu7MeSiR_ol~i!-8>$ER zpw-61b%FJPKfVX-1%{e0`Z;>}!W&16_A|*Io=zrf2eAf|Sw3dg)2{uakAtQVB6gB2 z-kQ|4ocQ}Ms?r>;8LKJm7!DYN3ykiQK9%2tuKMSSV|MuatIn9`_~U)XU7mtex?wGe zn;bCJP3irf5&i9mQDUS1f4;N&z#rwYF&O}p z^00rw!fHeUhSXFGg%m9sV`zAhuuQ&)CLllaK39spe%5}-Dg}aN2vre3F5bO(*E8gp z@6nwPu;z}bGv*hu3&5QStwEr3yXCAJGSF!;OiF9=8|S(qy8eFc$LJ{i?eg^UsO|jV z?w6IZ=T9Q%6qg(o@PSSf8gq1widGyPbK>dk&aW3n|3W-NwnXye6$dw!8iB#+ZOLs0 z5bm?r`^?Sz&Jz!9O1Q|$Q=Ahk*Qcm6Z4gFICod@4*)H3ggjS^Z;)gd zsdc%SkM9$)5h)e_rAxN2DYES+Wmkqy@-{MloD?~G>{{Ycqi+}}oUrHhr6ZkNqPxNSFGjWu`FmmsqaXTGxH zT=$`vRdE8DX+M~t&-=HTzUwc$4$yBLg}%QTq{w8^h2A@%RO7>VR659Z_C3q0Jbm;8p{Qb>OryF~HQy8>iAJ#6Zd&6QFVHz?}iq2el) z3DXeSNahX`&LPYc+G8IgS&+!*feVRuzijt`CY7z(idxKRZaLM1DNBIj6)X}$Zlx<+ zDMXftB5^1-!(C;CvIa?+Z&aq0i79S~9mm1X4vWfzP^XTP>N zLtAG4lm>wO()WL)hSTg%#X+Rqf`Wqj9?(GogqOsPA0UR@IO5+5hT{DNMsvpK24kdt za7T?E%Dlm)T{C$^n&Vt;5fm+$#X0s3IQHXh^MA#xGaL>oBB^R0EBUZARgU^uoRKRe27(SWSy<56PwFS)k1VKgvmQxzK zvA3(Y`qC35qYEC?ufI~rwK1frbTDwTceo#(?^ye$QuMVbVnA#6=!lM zVP>yBeOu@vBKMWRO~Up4n=$!lIF#t*l4DsQ>xFv{=X&cpme#Hk%?hx2R0J)i-_=st^*ha~>Daa~NmT zwXCM1gU^{r8I$f_G`sDbf2=D6eWpca(|ZIl+<`W)ZirH^dIkoTK|uNMm`Rf$f+6Oc zDVXUL?nJ+Y0)Gsn0B`bHCg;$vSLO!^tK|(rOXF0O-H|x4ZQ z`LyRn`0Lk8*J)ugwx5wggKFk%t{d-pQLaQzsL}e91za4I%xxj1#(3%3+zaq?OX6uj z1~1`VcMVme@z`|(c5A${nDtK(R6CZ!OQ&!QM&}R)!39q6^q;2*_iqT*acdA}N7mkC zPf1*&TAwr`tfrSoO&^pvDwERmxKhV+mdxawIET$|MooF#@ZL0J?Jup4iZ8?0YBWMfBe8f*`WH=zX*Y=3foxmH!&CATr2ej1%!ohU(N;vt zq1D&~^5_}ot2m`OD>jZ;XU@A9OX)NzMv}%36G5%M4s5j8EB)=YUd!$W%r7Y71BQF- zr&CXy9y-4E-PzPt%uRYu+a2-DxuOlw-oJcx)9AJ64}B|4F=w6Qc}CQz^7HxFz_pq% zyfqo8)vv|r|Ej(xw4W}!?>|0C^ZkWY+VH*((Qjz?%vbO3?1;~4O$pArlFZ^%x}9m+ z_*+*d%~#nN-#(}{tal94iDxoV#|Eo+Yom+jyRU0xk3Ki@%`>(F-+&;S;LfU}0F6#ikGq577|5Yk0ATY1y4f$|d|bTOW*~_5 zk*p`v3k{n%3$_-VJ$sIO``7^nyIUX zzR#_Kq-FUjeScoEI8@@Q!1vB~tb+<@=lrD@YSBkwB4;C@)of8`98_!>!?a6kjK)L4 zq|D5c>WiB@@|*WfXp87N+bGs)*@2~`LOhr!0ZRExL#bbOX?<@zisrb1!N$YRVW%rAXl9zt0N+qe zNfo+hDYlZyfpL9!gOeVRQ1_crk^z2v4Zc1#ofeg+BSnh%IjHA_WfWWNZaa1yCRz2+ zahYA`bbUB#fglME`tT()V;d9pe1-AE<>GD%cIK~s#U8&RcGb!s)9IG+>~H<%l^3ZV z*4vZ9&lx#}zIEM=q@0#ErKyYB99`SoerQ44$pPD`3|R%^kfupo3@w#TRaHV*SEaLP z^<0w+OcwmD&@jpd))Q+LYfYG#m|7>g2Q#wMFgYR%W`&*`8)x=~Wu2i$zpc>qqnRYt zDE(OIaQj%1{_}vZPoH2j-NVYg7(_9o!(+s}eaH3?Xz%JdFlR5U>P zuKXS_1s+g(FVvsxRA98O@-=T=*-yEENsXWpT+`s4?0K)3^NGDebx~>==9QcPAK_K# zAM4c-bhiuadpLm&`yF<0lMwbX_w|K)brZ|<{VFbxINA*b8Tr}m_uW#_hy|$wqs)3L z;tm_xXxOEkyRf_OhuzMnV=YM6H2^c;Zn+R9P_@`YK_QZssu&z^2S>MGTh+`bwKbEw z;b8~#6NiH}FYZ%RtPt)J5`PZQ*NMP}Zb|8~k`+Zop{`@qisq&1Aoa?GJ+fN$`FnGF zL33NPV-#sMJ7_s`p-R*DBEyovskJ9*G;LkvqR7$wBL-#_a8&ZQ*5HWHzg%VACBlQ{ z-%q07Va>D_Timt^XDC9@PQZ#$IhFD8I(l}gmgmG})YTmz!@lk35=8w!INlzz)-2bAYhwe3M- z{#fuHJo+k@%G3!ecOXS;gFN3Q#vR2|JaOXB3J>52^)lBmHDy56JP?HZEd2p`>oe#%0=eH+dRtGB;>BxKenYOYjGS?HW(J+i z$iL;75e{LB$t^u}PIgdq*T(+TGU#a$w)`zzcC?NH)SLt@JTA11gWOSG#@|5?K|#-7 zJv^Rj6H47qf%o-I2K-4D^G`nqk}Eud>OVY+Wo(V4**2JqW5tIa`ziE#^q!2=_FV_X zNq!cj{FvbQbN{5_m~qd@IOVY{oM+_(ZxQM|~7k{HRIyn`(S|$&w^FQ&xe&P`Tw@ zV|%yGUSM!KWyN!s;>k683Cvp_r8Wn+Zk8=HLj&j;Nb%OQvBPI`1j^y1NKHR(lOOV2 zyBA?L^blU`;);>DkT7S=;)Io(t1Lc$_6-uPJlxmQ#je(WvMV+y_7i(kZyPA#Hs?eC zjO5$%8YN=I&vdyszK-Rt-q5DP_)V3hi{T^O<8Y^v!RmZ4PK%zv1Rut9`T|5QT45^5 z3u6K})YF=TuI+h#HmVdE37uIHT2t9`4)oLjm%Fd{WL`mP*Wfz+Ae3i!py^l7$~9dqC@O?=yBIRKpo`(e6dMs(0XDKQn}B zJG>>fm!fnqo9>4Ve13M#o1-~kfgQw3`TL!Y<#kd)Y(J&{rM60hYE$>nD{~AvkslCb zwd{W7Zm_Czy}t1TxMR_`^-$tOB3{=q>lO*2@2e*sdT#A5UgCJ+jn}t_@c}DBO>Bdf zz?qgap%r^9%{UQnCasd)TKuRK*5|LbrjnX#k9fSkDob|JfsC@j<8&$8-OplbyY&w} zeZFU3X^CH4Qkyy`rrT@S9c=lCjx30rP~a>(cJz?WpzA=JD(9!>$plP9nIfZnA4u4GY5S20a+Qz^n)w^eLr2#OiTu*J<9l}~KR{rgkq3ZVES z7aNCd-_I)Jot;nS@ZX*XXp@3oUf#n@L3v-kzrWn6W)SZEnrQdk+}wJ@y;j%Oe%@P3haEP1 zPSJujKXIb7Tg$uUnO$w&GsV|B{fFXL! zpoCjJp`sAgK=W&M32_5`nggy>kXbWVS|+MSKN(h7Y$Fy-4@>zQ{a)&C{Uy_F`ZWtX zyVsR059o<9BW89ywq|HmW3IDXoliMxeCUclO-#*qbaS8Gko`^m9}w~L#^=9l zXL%E(P5<8V1Z?%`iG%*teO8h^o?W$7kizKdg=7g-&uSQxcDNxnkJaX?JJpN<*<-_Y zMuSjjzIK9yIzY~xGoc>>O~_iz4kJdpdLmWYPe7Chx;+ppCaRw@iofXz2(}~T0d!IY zdfDP_asL7-hjLfJ3SNNEqW-kfnN_ee3iaG`*a`Wkuq?kpJ$m<3xS35xi091j37CqU zW-HyzzyH}pM!c?38*cC93jx!IvJ>*Rz5xFf(2%GPNyX2P7q`UEo;?pI@Y6nw5Y(;b z7Z&(&BMl=;iDv4NWlNU;`V11{0*!1x1i683UdT{L@=+vOtNQK2QFpf={TO1*w*WH_S-aYr zt>2*gBE*_P17tQOcGI*U!$=V5d({g*j$eB^ilI3~fD%JVve&a$Lht_8g&fp5Wzfo& zdwb!IroyLy4|C4rA!uIUjqd!&UwpJf+6XrZ%=yGJO@Cy1;TletOQC}_I;Rvgvlftt zO2)Q#dp`v4-p*X8B+sq8Z@jp0g!}naLF)a0c?x=(DW$TXZrL7Yc_+S1*%yHAfCbL{ zb916#hnNdlv2YHB(?!wDdUzNd(C5STli+w5beP#9MsJV@SOaTlO_MAVQ5oUU?NYjK z9GhmW6av0>vI)C9T<5=iUNzUHf4Recb_r>Iz2u>2{hQMq)&|Le5Xkt05cPvrxd8l) zKc*2n6bD>I-Vhoj5IV^N8zR?^=>8< z_-;5A5>5qasF3Amc?!9jd{8|Kvsv)_pokEYx)qG%6G?PPVP`F=w=M)Z5I*v5DEjZO zc+>&7xZ>*|d!zni-I2Am0M?R8%>D?rAA~EpQ9>_oJ1Qh81~hPof`>MLYb8eBB+#J* zAE~NtM+G$gC`CM&57(xpc+;W617_h$9<_8w?Kz}P%%Mc40TC+fMLH9bDjF=eicn-o z#+jpIaVk{Xa!u^mU)TIjh9&LKQQivt8Bex$&Ajv9ebo8`AiuuwS`k zi7(_<$A^?-AK*3Ia8%9nx&zWc6dGPCn|ave^NEBAO%QglJ!aBaUd+(O{Ys{K{jxT5H zt?pNpeApg|?kvn^ZxyN;C6{PHbYj|W6xa`^AeroFla*&>qiDm^zp8E>5n?j?(^)}L zTL{@S-_LYI_Ev)wvRlZIaMzqE)7mLs?uQ4aL7(GcohKbRV`jX?{pck?D)ljv<{V!c z+X`8R{o!FuW9yneISvYzTfU_%+2JpfCi75%^6?)w!$eC;bUv)df`iIYe&~JmPV>ym zgwYYst+IF*D#zNzJ!yM{sH7+@`fWj)i*e>{E{1SbuBLZ}LT|(yJUEvSa3R67lw>a_ zCx$E9IEI{*@xyBooJlGRHWO1Aa`GQ8oP(P={@F z`(b!$)>=hwu~hDXTeMEJ@FRc4sxonc#%7oDaR4*5#&}RMJcQg_i>&p&c?c6D^{8+b z_@q1X24zkS^$Nwv^nx3K#WmjJvZpXj<*)!-x$~@FzGyly_eS9z$fv#5@D_&2D}BO zYsz)LB7?$N0#4e^k7ZAO{=TFC?|v$;Ogp>4-@?l$IRs~j#zQz2jU}2SOf(ug>)j2@ zs=2w4@!Wca;*oWi)aV43QzQk-j~;nDdR5~!B&U2I;E#n80*9sRtlpo3=7MvBh;*hd zb?yzv;UQGkcl))otC@(p*hTg@kp! zid#wZPoxp%BbvUUX+p}tLOZAoHGBjG2qV?@=-3jmKc%k@Mr#e=G;vW{{0_>wH1%5QyjrBhhWbd!*lrUu0 zbwV*>WZxq)yv)Gj5GSF_Drqb65aH(b6;RJ z56nXsmfZxgn%WcUj#NATxZr-HxKd>&?iQ0UUz4C+@rf9so66KjxoNuN`)NAH*i;zd z=E#~r&-gFy5Md+kSH<=VE09)ITr8%TlGXPC{(Bm236QXzBZ5*PHKsi?TlaYa-!rvg zYb4U3$-A8`s?$Zy8T5>CoRWV%_#jLa8ML)DVotFuV4yB76rB;=`O$J(S9VyzXley7>x=}mUJ-gr`c2o6v zrU$a?;Y9_$&EGuCPi+-D(xVDa@9IPg>k9}(4>l8_JL0geh#|;d=_CVjcrcZ`cSqiV zX-;=fJhz83Rb48LEM*_KoOtjmcyu|j0W@KsBxaA|^djv@ z%U1qp9By(gtgGH=w-0jp5{>YWXBTq2sot~{uZY|kk|8f%G}Lmm!fg|_9BmnG-@K)h zdeh4P)`9Bu(K%ep^cl10|1?HSveY9Lz9VHuNp(h};H9c2B2^m+s8IDTH_*U;sx2id z=QJ#?LCl(Zm8!N##n_A|Q}-2@v5c<6GzVyHgx1wZ^;%b5m5638oljKVh$|?=M@I`8 zov`M7Cvm0o8Qgn4y^%VE)vzaubFeH`hE(2gf@#9$!Q9}EltcMGA0N4{MznWCk%qc? zU{g^SE$PW8B*b!m|2=9c3k!bnT$LzB3v4BEkZTd4c%8Kwxo&wnPp&4U&yQqGjYkir zE7~CnHPd1RLPMe(JN}6pP_wz1UT`2y+f<}erdxE35KjSB)v`9?+&h|e+)=gcJ5+q( zM7N7D1vP+fp#g(v5Hut<=Z zXV`0oL;E+|pN9Ug8m>lW@m!P@W%S*7>bh;z>+*-5$NVxo5%0daHNX~cZbdqEld@fo z<%clF5{9MzxwD*YRCj|_m!Hk&9r+eGPINT-*esz0m)d!kbw7!y=uPOWKUC+!N$*rx zPg`PJrHKMl^ly@Vh=#{xjT;!8NWQaXWF5cQQh!3Oiq&%3*Y~fTuqx0B5nmO^{Q{&b z0pk7*D#;*zYKZ>IdztvkgAEI#|J)ffo}V*E)Q^2mBad6yXlQgc^(_pNo~YqtT{6_@ z^Cy@>tPTjA?;W$>J9mfO>P{LM*L!;l_&|-NoB37@W0p0^4hv#G4r49WBu&O@BrA;{ zv^vsK_wbd=us`Y^=PzsRHwoSXQ~3Kn;bb+RrVFzPbm}3Zc%qpwagOVqq@&ZA_FsO9cwxDW9vnoza0>@eHKW z4Q(;?zkE)`%q5e{B|RRN%FQwKB|)Idp*MabgNzw6eV$+~`Teeg!YBEjC(HLFg45O< zX$17Gp@YR6GF#(p$zPA!fey7q_{~ zV=i>7diGM{1}V7-Hf#;cWyoBSceNOU*MakYG8}6b(zoJt1(FNvy{mPiXuOC)TyDRk zYkkjBD7lsnXi0PZF7_&we|2P zt%%81l??brSLpV&4^61tXcyS#HRT>X?;NZ5($|5W zpHXOmAlTm>RiExSGgfIGwjOI*UEmpF9rP;A=QjQ8nSL~dOfpnfrRX8tMPe+M{+H%tF|-VU@wGt#vZrT68JRO5r7fB|ahSLLuX#sm5Bul`#< zikmj*pEkfI63^>yZoUw#iVrt%uzAo-d8%tJIf1@ihWm1fR7geAj*v~j$|3^MF?^o! zg6TcNtvZUk#barFY4UD30v2EEvceNf6Cc7fJoM^jt)rQ5NFAviTYZcqp_@*JWkBED z_>CJX2OB-VwWVg_(uUepS+noTg>Zx)<$HgxV?C?Fzr37WKsV?8H&w(JwKbh{zJ{Hj z-ijk5oG0*EyCjbfoc3gFBIJUFU=@^+u#V*Bn?8pwk#^@UJP$+4yynR=yr z`IaO9FGkg+G*>9Hy5S>6SXz5h-Cj7;{`fC@w}IbZE|e*6I(Ba`$!k4sfxJn+rTXP> zspel@Uacg47KHReC)GQZ$DeP%S-I;9_Af0W8(v@->EJOseANLPkK4lpxhr*8Wu zOk?qCoW0sDXuIlPaA9lf`74gc(=EULG&atdE=4Hrw4#@mnG{U(hlE6!t&4Qy5?&G* z%I{Qh5*ZJ+`Oeuz3T+Y-nDP#~6UaRSeeDgblAdlj)2aDCg2YB=5 z+w%nKN-AvvX#s2g+XaB^DUduezSJW?TUW%_O&=in+>{l+Af{c43w;a_4`9{)#qZ|O z^1yH7b%*1tB?e@B;0MZ{8NdH8=DvsW%iaB@HU;E<-+0%9vIYM4zli?@{x|Gj7zQ=p z^;|W~HPnB$2;ttM18eX>9XzKf$p6GghFp%XmkKt7|69ZHR&DJUp<`oeEIz+ Pa15fwv48?|0|)yb2V9@j diff --git a/chart/charts/cert-manager/.helmignore b/chart/charts/cert-manager/.helmignore new file mode 100755 index 0000000..f0c1319 --- /dev/null +++ b/chart/charts/cert-manager/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/chart/charts/cert-manager/Chart.yaml b/chart/charts/cert-manager/Chart.yaml new file mode 100755 index 0000000..a1cb6bf --- /dev/null +++ b/chart/charts/cert-manager/Chart.yaml @@ -0,0 +1,16 @@ +appVersion: v0.10.1 +description: A Helm chart for cert-manager +home: https://github.com/jetstack/cert-manager +icon: https://raw.githubusercontent.com/jetstack/cert-manager/master/logo/logo.png +keywords: +- cert-manager +- kube-lego +- letsencrypt +- tls +maintainers: +- email: james@jetstack.io + name: munnerz +name: cert-manager +sources: +- https://github.com/jetstack/cert-manager +version: v0.10.1 diff --git a/chart/charts/cert-manager/OWNERS b/chart/charts/cert-manager/OWNERS new file mode 100755 index 0000000..68d3a41 --- /dev/null +++ b/chart/charts/cert-manager/OWNERS @@ -0,0 +1,8 @@ +approvers: +- munnerz +- simonswine +- kragniz +reviewers: +- munnerz +- unguiculus +- kragniz diff --git a/chart/charts/cert-manager/README.md b/chart/charts/cert-manager/README.md new file mode 100755 index 0000000..e892bdf --- /dev/null +++ b/chart/charts/cert-manager/README.md @@ -0,0 +1,154 @@ +# cert-manager + +cert-manager is a Kubernetes addon to automate the management and issuance of +TLS certificates from various issuing sources. + +It will ensure certificates are valid and up to date periodically, and attempt +to renew certificates at an appropriate time before expiry. + +## Prerequisites + +- Kubernetes 1.7+ + +## Installing the Chart + +Full installation instructions, including details on how to configure extra +functionality in cert-manager can be found in the [getting started docs](https://docs.cert-manager.io/en/latest/getting-started/). + +To install the chart with the release name `my-release`: + +```console +## IMPORTANT: you MUST install the cert-manager CRDs **before** installing the +## cert-manager Helm chart +$ kubectl apply \ + -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.10/deploy/manifests/00-crds.yaml + +## If you are installing on openshift : +$ oc create \ + -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.10/deploy/manifests/00-crds.yaml + +## IMPORTANT: if the cert-manager namespace **already exists**, you MUST ensure +## it has an additional label on it in order for the deployment to succeed +$ kubectl label namespace cert-manager certmanager.k8s.io/disable-validation="true" + +## For openshift: +$ oc label namespace cert-manager certmanager.k8s.io/disable-validation=true + +## Add the Jetstack Helm repository +$ helm repo add jetstack https://charts.jetstack.io + + +## Install the cert-manager helm chart +$ helm install --name my-release --namespace cert-manager jetstack/cert-manager +``` + +In order to begin issuing certificates, you will need to set up a ClusterIssuer +or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer). + +More information on the different types of issuers and how to configure them +can be found in our documentation: + +https://docs.cert-manager.io/en/latest/tasks/issuers/index.html + +For information on how to configure cert-manager to automatically provision +Certificates for Ingress resources, take a look at the `ingress-shim` +documentation: + +https://docs.cert-manager.io/en/latest/tasks/issuing-certificates/ingress-shim.html + +> **Tip**: List all releases using `helm list` + +## Upgrading the Chart + +Special considerations may be required when upgrading the Helm chart, and these +are documented in our full [upgrading guide](https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html). +Please check here before perform upgrades! + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the cert-manager chart and their default values. + +| Parameter | Description | Default | +| --------- | ----------- | ------- | +| `global.imagePullSecrets` | Reference to one or more secrets to be used when pulling images | `[]` | +| `global.rbac.create` | If `true`, create and use RBAC resources (includes sub-charts) | `true` | +| `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` | +| `image.tag` | Image tag | `v0.10.1` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `replicaCount` | Number of cert-manager replicas | `1` | +| `clusterResourceNamespace` | Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources | Same namespace as cert-manager pod +| `leaderElection.Namespace` | Override the namespace used to store the ConfigMap for leader election | Same namespace as cert-manager pod +| `extraArgs` | Optional flags for cert-manager | `[]` | +| `extraEnv` | Optional environment variables for cert-manager | `[]` | +| `serviceAccount.create` | If `true`, create a new service account | `true` | +| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | | +| `resources` | CPU/memory resource requests/limits | | +| `securityContext.enabled` | Enable security context | `false` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `affinity` | Node affinity for pod assignment | `{}` | +| `tolerations` | Node tolerations for pod assignment | `[]` | +| `ingressShim.defaultIssuerName` | Optional default issuer to use for ingress resources | | +| `ingressShim.defaultIssuerKind` | Optional default issuer kind to use for ingress resources | | +| `ingressShim.defaultACMEChallengeType` | Optional default challenge type to use for ingresses using ACME issuers | | +| `ingressShim.defaultACMEDNS01ChallengeProvider` | Optional default DNS01 challenge provider to use for ingresses using ACME issuers with DNS01 | | +| `prometheus.enabled` | Enable Prometheus monitoring | `true` | +| `prometheus.servicemonitor.enabled` | Enable Prometheus Operator ServiceMonitor monitoring | `false` +| `prometheus.servicemonitor.namespace` | Define namespace where to deploy the ServiceMonitor resource | (namespace where you are deploying) | +| `prometheus.servicemonitor.prometheusInstance` | Prometheus Instance definition | `default` | +| `prometheus.servicemonitor.targetPort` | Prometheus scrape port | `9402` | +| `prometheus.servicemonitor.path` | Prometheus scrape path | `/metrics` | +| `prometheus.servicemonitor.interval` | Prometheus scrape interval | `60s` | +| `prometheus.servicemonitor.labels` | Add custom labels to ServiceMonitor | | +| `prometheus.servicemonitor.scrapeTimeout` | Prometheus scrape timeout | `30s` | +| `podAnnotations` | Annotations to add to the cert-manager pod | `{}` | +| `podDnsPolicy` | Optional cert-manager pod [DNS policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-policy) | | +| `podDnsConfig` | Optional cert-manager pod [DNS configurations](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-config) | | +| `podLabels` | Labels to add to the cert-manager pod | `{}` | +| `priorityClassName`| Priority class name for cert-manager and webhook pods | `""` | +| `http_proxy` | Value of the `HTTP_PROXY` environment variable in the cert-manager pod | | +| `https_proxy` | Value of the `HTTPS_PROXY` environment variable in the cert-manager pod | | +| `no_proxy` | Value of the `NO_PROXY` environment variable in the cert-manager pod | | +| `webhook.enabled` | Toggles whether the validating webhook component should be installed | `true` | +| `webhook.replicaCount` | Number of cert-manager webhook replicas | `1` | +| `webhook.podAnnotations` | Annotations to add to the webhook pods | `{}` | +| `webhook.extraArgs` | Optional flags for cert-manager webhook component | `[]` | +| `webhook.resources` | CPU/memory resource requests/limits for the webhook pods | | +| `webhook.nodeSelector` | Node labels for webhook pod assignment | `{}` | +| `webhook.image.repository` | Webhook image repository | `quay.io/jetstack/cert-manager-webhook` | +| `webhook.image.tag` | Webhook image tag | `v0.10.1` | +| `webhook.image.pullPolicy` | Webhook image pull policy | `IfNotPresent` | +| `webhook.injectAPIServerCA` | if true, the apiserver's CABundle will be automatically injected into the ValidatingWebhookConfiguration resource | `true` | +| `cainjector.enabled` | Toggles whether the cainjector component should be installed (required for the webhook component to work) | `true` | +| `cainjector.replicaCount` | Number of cert-manager cainjector replicas | `1` | +| `cainjector.podAnnotations` | Annotations to add to the cainjector pods | `{}` | +| `cainjector.extraArgs` | Optional flags for cert-manager cainjector component | `[]` | +| `cainjector.resources` | CPU/memory resource requests/limits for the cainjector pods | | +| `cainjector.nodeSelector` | Node labels for cainjector pod assignment | `{}` | +| `cainjector.image.repository` | cainjector image repository | `quay.io/jetstack/cert-manager-cainjector` | +| `cainjector.image.tag` | cainjector image tag | `v0.10.1` | +| `cainjector.image.pullPolicy` | cainjector image pull policy | `IfNotPresent` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml . +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Contributing + +This chart is maintained at [github.com/jetstack/cert-manager](https://github.com/jetstack/cert-manager/tree/master/deploy/charts/cert-manager). diff --git a/chart/charts/cert-manager/cainjector/.helmignore b/chart/charts/cert-manager/cainjector/.helmignore new file mode 100755 index 0000000..f0c1319 --- /dev/null +++ b/chart/charts/cert-manager/cainjector/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/chart/charts/cert-manager/cainjector/Chart.yaml b/chart/charts/cert-manager/cainjector/Chart.yaml new file mode 100755 index 0000000..a0451ea --- /dev/null +++ b/chart/charts/cert-manager/cainjector/Chart.yaml @@ -0,0 +1,17 @@ +name: cainjector +apiVersion: v1 +# The version and appVersion fields are set automatically by the release tool +version: v0.1.0 +appVersion: v0.1.0 +description: A Helm chart for deploying the cert-manager cainjector component +home: https://github.com/jetstack/cert-manager +sources: + - https://github.com/jetstack/cert-manager +keywords: + - cert-manager + - kube-lego + - letsencrypt + - tls +maintainers: + - name: munnerz + email: james@jetstack.io diff --git a/chart/charts/cert-manager/cainjector/templates/NOTES.txt b/chart/charts/cert-manager/cainjector/templates/NOTES.txt new file mode 100755 index 0000000..e69de29 diff --git a/chart/charts/cert-manager/cainjector/templates/_helpers.tpl b/chart/charts/cert-manager/cainjector/templates/_helpers.tpl new file mode 100755 index 0000000..f7465cb --- /dev/null +++ b/chart/charts/cert-manager/cainjector/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cainjector.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cainjector.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cainjector.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/chart/charts/cert-manager/cainjector/templates/deployment.yaml b/chart/charts/cert-manager/cainjector/templates/deployment.yaml new file mode 100755 index 0000000..0d3e918 --- /dev/null +++ b/chart/charts/cert-manager/cainjector/templates/deployment.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cainjector.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ include "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "cainjector.chart" . }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ include "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.strategy }} + strategy: + {{- . | toYaml | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: {{ include "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "cainjector.chart" . }} + annotations: + {{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "cainjector.fullname" . }} + {{- if .Values.global.priorityClassName }} + priorityClassName: {{ .Values.global.priorityClassName | quote }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + {{- if .Values.global.logLevel }} + - --v={{ .Values.global.logLevel }} + {{- end }} + {{- if .Values.global.leaderElection.namespace }} + - --leader-election-namespace={{ .Values.global.leaderElection.namespace }} + {{- else }} + - --leader-election-namespace=$(POD_NAMESPACE) + {{- end }} + {{- if .Values.extraArgs }} +{{ toYaml .Values.extraArgs | indent 10 }} + {{- end }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} diff --git a/chart/charts/cert-manager/cainjector/templates/rbac.yaml b/chart/charts/cert-manager/cainjector/templates/rbac.yaml new file mode 100755 index 0000000..6487404 --- /dev/null +++ b/chart/charts/cert-manager/cainjector/templates/rbac.yaml @@ -0,0 +1,50 @@ +{{- if .Values.global.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "cainjector.fullname" . }} + labels: + app: {{ template "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "cainjector.chart" . }} +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["configmaps", "events"] + verbs: ["get", "create", "update", "patch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cainjector.fullname" . }} + labels: + app: {{ template "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "cainjector.chart" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cainjector.fullname" . }} +subjects: + - name: {{ include "cainjector.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + kind: ServiceAccount +{{- end -}} diff --git a/chart/charts/cert-manager/cainjector/templates/serviceaccount.yaml b/chart/charts/cert-manager/cainjector/templates/serviceaccount.yaml new file mode 100755 index 0000000..67f186f --- /dev/null +++ b/chart/charts/cert-manager/cainjector/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "cainjector.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ include "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "cainjector.chart" . }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }} +{{- end }} diff --git a/chart/charts/cert-manager/cainjector/values.yaml b/chart/charts/cert-manager/cainjector/values.yaml new file mode 100755 index 0000000..29769d1 --- /dev/null +++ b/chart/charts/cert-manager/cainjector/values.yaml @@ -0,0 +1,42 @@ +global: + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + + # Optional priority class to be used for the cert-manager pods + priorityClassName: "" + rbac: + create: true + + leaderElection: + # Override the namespace used to store the ConfigMap for leader election + namespace: "" + +replicaCount: 1 + +strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + +podAnnotations: {} + +# Optional additional arguments for cainjector +extraArgs: [] + +resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + +nodeSelector: {} + +image: + repository: quay.io/jetstack/cert-manager-cainjector + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + pullPolicy: IfNotPresent diff --git a/chart/charts/cert-manager/charts/cainjector/.helmignore b/chart/charts/cert-manager/charts/cainjector/.helmignore new file mode 100755 index 0000000..f0c1319 --- /dev/null +++ b/chart/charts/cert-manager/charts/cainjector/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/chart/charts/cert-manager/charts/cainjector/Chart.yaml b/chart/charts/cert-manager/charts/cainjector/Chart.yaml new file mode 100755 index 0000000..a5f0cff --- /dev/null +++ b/chart/charts/cert-manager/charts/cainjector/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +appVersion: v0.10.1 +description: A Helm chart for deploying the cert-manager cainjector component +home: https://github.com/jetstack/cert-manager +keywords: +- cert-manager +- kube-lego +- letsencrypt +- tls +maintainers: +- email: james@jetstack.io + name: munnerz +name: cainjector +sources: +- https://github.com/jetstack/cert-manager +version: v0.10.1 diff --git a/chart/charts/cert-manager/charts/cainjector/templates/NOTES.txt b/chart/charts/cert-manager/charts/cainjector/templates/NOTES.txt new file mode 100755 index 0000000..e69de29 diff --git a/chart/charts/cert-manager/charts/cainjector/templates/_helpers.tpl b/chart/charts/cert-manager/charts/cainjector/templates/_helpers.tpl new file mode 100755 index 0000000..f7465cb --- /dev/null +++ b/chart/charts/cert-manager/charts/cainjector/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cainjector.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cainjector.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cainjector.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/chart/charts/cert-manager/charts/cainjector/templates/deployment.yaml b/chart/charts/cert-manager/charts/cainjector/templates/deployment.yaml new file mode 100755 index 0000000..0d3e918 --- /dev/null +++ b/chart/charts/cert-manager/charts/cainjector/templates/deployment.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cainjector.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ include "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "cainjector.chart" . }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ include "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.strategy }} + strategy: + {{- . | toYaml | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: {{ include "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "cainjector.chart" . }} + annotations: + {{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "cainjector.fullname" . }} + {{- if .Values.global.priorityClassName }} + priorityClassName: {{ .Values.global.priorityClassName | quote }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + {{- if .Values.global.logLevel }} + - --v={{ .Values.global.logLevel }} + {{- end }} + {{- if .Values.global.leaderElection.namespace }} + - --leader-election-namespace={{ .Values.global.leaderElection.namespace }} + {{- else }} + - --leader-election-namespace=$(POD_NAMESPACE) + {{- end }} + {{- if .Values.extraArgs }} +{{ toYaml .Values.extraArgs | indent 10 }} + {{- end }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} diff --git a/chart/charts/cert-manager/charts/cainjector/templates/rbac.yaml b/chart/charts/cert-manager/charts/cainjector/templates/rbac.yaml new file mode 100755 index 0000000..6487404 --- /dev/null +++ b/chart/charts/cert-manager/charts/cainjector/templates/rbac.yaml @@ -0,0 +1,50 @@ +{{- if .Values.global.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "cainjector.fullname" . }} + labels: + app: {{ template "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "cainjector.chart" . }} +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["configmaps", "events"] + verbs: ["get", "create", "update", "patch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cainjector.fullname" . }} + labels: + app: {{ template "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "cainjector.chart" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cainjector.fullname" . }} +subjects: + - name: {{ include "cainjector.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + kind: ServiceAccount +{{- end -}} diff --git a/chart/charts/cert-manager/charts/cainjector/templates/serviceaccount.yaml b/chart/charts/cert-manager/charts/cainjector/templates/serviceaccount.yaml new file mode 100755 index 0000000..67f186f --- /dev/null +++ b/chart/charts/cert-manager/charts/cainjector/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "cainjector.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ include "cainjector.name" . }} + app.kubernetes.io/name: {{ include "cainjector.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "cainjector.chart" . }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }} +{{- end }} diff --git a/chart/charts/cert-manager/charts/cainjector/values.yaml b/chart/charts/cert-manager/charts/cainjector/values.yaml new file mode 100755 index 0000000..29769d1 --- /dev/null +++ b/chart/charts/cert-manager/charts/cainjector/values.yaml @@ -0,0 +1,42 @@ +global: + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + + # Optional priority class to be used for the cert-manager pods + priorityClassName: "" + rbac: + create: true + + leaderElection: + # Override the namespace used to store the ConfigMap for leader election + namespace: "" + +replicaCount: 1 + +strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + +podAnnotations: {} + +# Optional additional arguments for cainjector +extraArgs: [] + +resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + +nodeSelector: {} + +image: + repository: quay.io/jetstack/cert-manager-cainjector + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + pullPolicy: IfNotPresent diff --git a/chart/charts/cert-manager/requirements.yaml b/chart/charts/cert-manager/requirements.yaml new file mode 100755 index 0000000..b117e63 --- /dev/null +++ b/chart/charts/cert-manager/requirements.yaml @@ -0,0 +1,6 @@ +# requirements.yaml +dependencies: +- name: cainjector + version: "v0.1.0" + repository: "file://cainjector" + condition: cainjector.enabled diff --git a/chart/charts/cert-manager/templates/NOTES.txt b/chart/charts/cert-manager/templates/NOTES.txt new file mode 100755 index 0000000..7edd135 --- /dev/null +++ b/chart/charts/cert-manager/templates/NOTES.txt @@ -0,0 +1,15 @@ +cert-manager has been deployed successfully! + +In order to begin issuing certificates, you will need to set up a ClusterIssuer +or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer). + +More information on the different types of issuers and how to configure them +can be found in our documentation: + +https://docs.cert-manager.io/en/latest/reference/issuers.html + +For information on how to configure cert-manager to automatically provision +Certificates for Ingress resources, take a look at the `ingress-shim` +documentation: + +https://docs.cert-manager.io/en/latest/reference/ingress-shim.html diff --git a/chart/charts/cert-manager/templates/_helpers.tpl b/chart/charts/cert-manager/templates/_helpers.tpl new file mode 100755 index 0000000..b116334 --- /dev/null +++ b/chart/charts/cert-manager/templates/_helpers.tpl @@ -0,0 +1,92 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cert-manager.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "cert-manager.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cert-manager.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "cert-manager.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "cert-manager.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Expand the name of the chart. +Manually fix the 'app' and 'name' labels to 'webhook' to maintain +compatibility with the v0.9 deployment selector. +*/}} +{{- define "webhook.name" -}} +{{- printf "webhook" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "webhook.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- printf "%s-webhook" .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-webhook" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-webhook" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "webhook.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "webhook.selfSignedIssuer" -}} +{{ printf "%s-selfsign" (include "webhook.fullname" .) }} +{{- end -}} + +{{- define "webhook.rootCAIssuer" -}} +{{ printf "%s-ca" (include "webhook.fullname" .) }} +{{- end -}} + +{{- define "webhook.rootCACertificate" -}} +{{ printf "%s-ca" (include "webhook.fullname" .) }} +{{- end -}} + +{{- define "webhook.servingCertificate" -}} +{{ printf "%s-tls" (include "webhook.fullname" .) }} +{{- end -}} diff --git a/chart/charts/cert-manager/templates/deployment.yaml b/chart/charts/cert-manager/templates/deployment.yaml new file mode 100755 index 0000000..c3804e9 --- /dev/null +++ b/chart/charts/cert-manager/templates/deployment.yaml @@ -0,0 +1,135 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "cert-manager.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.strategy }} + strategy: + {{- . | toYaml | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: + {{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.prometheus.enabled (not .Values.prometheus.servicemonitor.enabled) }} + prometheus.io/path: "/metrics" + prometheus.io/scrape: 'true' + prometheus.io/port: '9402' + {{- end }} + spec: + serviceAccountName: {{ template "cert-manager.serviceAccountName" . }} + {{- if .Values.global.priorityClassName }} + priorityClassName: {{ .Values.global.priorityClassName | quote }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + {{- if .Values.global.logLevel }} + - --v={{ .Values.global.logLevel }} + {{- end }} + {{- if .Values.clusterResourceNamespace }} + - --cluster-resource-namespace={{ .Values.clusterResourceNamespace }} + {{- else }} + - --cluster-resource-namespace=$(POD_NAMESPACE) + {{- end }} + {{- if .Values.global.leaderElection.namespace }} + - --leader-election-namespace={{ .Values.global.leaderElection.namespace }} + {{- else }} + - --leader-election-namespace=$(POD_NAMESPACE) + {{- end }} + {{- if .Values.extraArgs }} +{{ toYaml .Values.extraArgs | indent 10 }} + {{- end }} + {{- with .Values.ingressShim }} + {{- if .defaultIssuerName }} + - --default-issuer-name={{ .defaultIssuerName }} + {{- end }} + {{- if .defaultIssuerKind }} + - --default-issuer-kind={{ .defaultIssuerKind }} + {{- end }} + {{- if .defaultACMEChallengeType }} + - --default-acme-issuer-challenge-type={{ .defaultACMEChallengeType }} + {{- end }} + {{- if .defaultACMEDNS01ChallengeProvider }} + - --default-acme-issuer-dns01-provider-name={{ .defaultACMEDNS01ChallengeProvider }} + {{- end }} + {{- end }} + - --webhook-namespace=$(POD_NAMESPACE) + - --webhook-ca-secret={{ include "webhook.rootCACertificate" . }} + - --webhook-serving-secret={{ include "webhook.servingCertificate" . }} + - --webhook-dns-names={{ include "webhook.fullname" . }},{{ include "webhook.fullname" . }}.{{ .Release.Namespace }},{{ include "webhook.fullname" . }}.{{ .Release.Namespace }}.svc + ports: + - containerPort: 9402 + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.extraEnv }} +{{ toYaml .Values.extraEnv | indent 10 }} + {{- end }} + {{- if .Values.http_proxy }} + - name: HTTP_PROXY + value: {{ .Values.http_proxy }} + {{- end }} + {{- if .Values.https_proxy }} + - name: HTTPS_PROXY + value: {{ .Values.https_proxy }} + {{- end }} + {{- if .Values.no_proxy }} + - name: NO_PROXY + value: {{ .Values.no_proxy }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- if .Values.podDnsPolicy }} + dnsPolicy: {{ .Values.podDnsPolicy }} +{{- end }} +{{- if .Values.podDnsConfig }} + dnsConfig: +{{ toYaml .Values.podDnsConfig | indent 8 }} +{{- end }} diff --git a/chart/charts/cert-manager/templates/rbac.yaml b/chart/charts/cert-manager/templates/rbac.yaml new file mode 100755 index 0000000..694eee4 --- /dev/null +++ b/chart/charts/cert-manager/templates/rbac.yaml @@ -0,0 +1,420 @@ +{{- if .Values.global.rbac.create -}} + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "cert-manager.fullname" . }}-leaderelection + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +rules: + # Used for leader election by the controller + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create", "update", "patch"] + +--- + +# Issuer controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-issuers + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["issuers", "issuers/status"] + verbs: ["update"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# ClusterIssuer controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["clusterissuers", "clusterissuers/status"] + verbs: ["update"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# Certificates controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-certificates + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"] + verbs: ["update"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificaterequests", "clusterissuers", "issuers", "orders"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates/finalizers"] + verbs: ["update"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["orders"] + verbs: ["create", "delete"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# Orders controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-orders + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["orders", "orders/status"] + verbs: ["update"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["orders", "clusterissuers", "issuers", "challenges"] + verbs: ["get", "list", "watch"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["challenges"] + verbs: ["create", "delete"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["certmanager.k8s.io"] + resources: ["orders/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# Challenges controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-challenges + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +rules: + # Use to update challenge resource status + - apiGroups: ["certmanager.k8s.io"] + resources: ["challenges", "challenges/status"] + verbs: ["update"] + # Used to watch challenges, issuer and clusterissuer resources + - apiGroups: ["certmanager.k8s.io"] + resources: ["challenges", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + # Need to be able to retrieve ACME account private key to complete challenges + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + # Used to create events + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + # HTTP01 rules + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "delete", "update"] +{{- if .Values.global.isOpenshift }} + # We require the ability to specify a custom hostname when we are creating + # new ingress resources. + # See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148 + - apiGroups: ["route.openshift.io"] + resources: ["routes/custom-host"] + verbs: ["create"] +{{- end }} + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["certmanager.k8s.io"] + resources: ["challenges/finalizers"] + verbs: ["update"] + # DNS01 rules (duplicated above) + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + +--- + +# ingress-shim controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificaterequests"] + verbs: ["create", "update", "delete"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["extensions"] + resources: ["ingresses/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cert-manager.fullname" . }}-leaderelection + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cert-manager.fullname" . }}-leaderelection +subjects: + - name: {{ template "cert-manager.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-issuers + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cert-manager.fullname" . }}-controller-issuers +subjects: + - name: {{ template "cert-manager.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers +subjects: + - name: {{ template "cert-manager.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-certificates + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cert-manager.fullname" . }}-controller-certificates +subjects: + - name: {{ template "cert-manager.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-orders + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cert-manager.fullname" . }}-controller-orders +subjects: + - name: {{ template "cert-manager.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-challenges + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cert-manager.fullname" . }}-controller-challenges +subjects: + - name: {{ template "cert-manager.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim +subjects: + - name: {{ template "cert-manager.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "cert-manager.fullname" . }}-view + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "cert-manager.fullname" . }}-edit + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] + +{{- end }} diff --git a/chart/charts/cert-manager/templates/service.yaml b/chart/charts/cert-manager/templates/service.yaml new file mode 100755 index 0000000..734fdef --- /dev/null +++ b/chart/charts/cert-manager/templates/service.yaml @@ -0,0 +1,22 @@ +{{- if .Values.prometheus.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "cert-manager.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +spec: + type: ClusterIP + ports: + - protocol: TCP + port: 9402 + targetPort: 9402 + selector: + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/chart/charts/cert-manager/templates/serviceaccount.yaml b/chart/charts/cert-manager/templates/serviceaccount.yaml new file mode 100755 index 0000000..cad1456 --- /dev/null +++ b/chart/charts/cert-manager/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }} +{{- end }} +metadata: + name: {{ template "cert-manager.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} +{{- end }} diff --git a/chart/charts/cert-manager/templates/servicemonitor.yaml b/chart/charts/cert-manager/templates/servicemonitor.yaml new file mode 100755 index 0000000..ea92a65 --- /dev/null +++ b/chart/charts/cert-manager/templates/servicemonitor.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.servicemonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "cert-manager.fullname" . }} +{{- if .Values.prometheus.servicemonitor.namespace }} + namespace: {{ .Values.prometheus.servicemonitor.namespace }} +{{- else }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} + labels: + app: {{ template "cert-manager.name" . }} + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ template "cert-manager.chart" . }} + prometheus: {{ .Values.prometheus.servicemonitor.prometheusInstance }} +{{- if .Values.prometheus.servicemonitor.labels }} +{{ toYaml .Values.prometheus.servicemonitor.labels | indent 4}} +{{- end }} +spec: + jobLabel: {{ template "cert-manager.fullname" . }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "cert-manager.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + endpoints: + - targetPort: {{ .Values.prometheus.servicemonitor.targetPort }} + path: {{ .Values.prometheus.servicemonitor.path }} + interval: {{ .Values.prometheus.servicemonitor.interval }} + scrapeTimeout: {{ .Values.prometheus.servicemonitor.scrapeTimeout }} +{{- end }} diff --git a/chart/charts/cert-manager/templates/webhook-apiservice.yaml b/chart/charts/cert-manager/templates/webhook-apiservice.yaml new file mode 100755 index 0000000..ed46424 --- /dev/null +++ b/chart/charts/cert-manager/templates/webhook-apiservice.yaml @@ -0,0 +1,22 @@ +{{- if .Values.webhook.enabled -}} +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.webhook.certmanager.k8s.io + labels: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "webhook.chart" . }} + annotations: + certmanager.k8s.io/inject-ca-from-secret: "{{ .Release.Namespace }}/{{ include "webhook.servingCertificate" . }}" +spec: + group: webhook.certmanager.k8s.io + groupPriorityMinimum: 1000 + versionPriority: 15 + service: + name: {{ include "webhook.fullname" . }} + namespace: "{{ .Release.Namespace }}" + version: v1beta1 +{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-deployment.yaml b/chart/charts/cert-manager/templates/webhook-deployment.yaml new file mode 100755 index 0000000..a686f56 --- /dev/null +++ b/chart/charts/cert-manager/templates/webhook-deployment.yaml @@ -0,0 +1,82 @@ +{{- if .Values.webhook.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "webhook.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "webhook.chart" . }} +spec: + replicas: {{ .Values.webhook.replicaCount }} + selector: + matchLabels: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.webhook.strategy }} + strategy: + {{- . | toYaml | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "webhook.chart" . }} + annotations: + {{- if .Values.webhook.podAnnotations }} +{{ toYaml .Values.webhook.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "webhook.fullname" . }} + {{- if .Values.global.priorityClassName }} + priorityClassName: {{ .Values.global.priorityClassName | quote }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.webhook.image.repository }}:{{ default .Chart.AppVersion .Values.webhook.image.tag }}" + imagePullPolicy: {{ .Values.webhook.image.pullPolicy }} + args: + {{- if .Values.global.logLevel }} + - --v={{ .Values.global.logLevel }} + {{- end }} + - --secure-port=6443 + - --tls-cert-file=/certs/tls.crt + - --tls-private-key-file=/certs/tls.key + {{- if .Values.webhook.extraArgs }} +{{ toYaml .Values.webhook.extraArgs | indent 10 }} + {{- end }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: +{{ toYaml .Values.webhook.resources | indent 12 }} + volumeMounts: + - name: certs + mountPath: /certs + volumes: + - name: certs + secret: + secretName: {{ include "webhook.servingCertificate" . }} + {{- with .Values.webhook.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.webhook.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.webhook.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-mutating-webhook.yaml b/chart/charts/cert-manager/templates/webhook-mutating-webhook.yaml new file mode 100755 index 0000000..a89d75d --- /dev/null +++ b/chart/charts/cert-manager/templates/webhook-mutating-webhook.yaml @@ -0,0 +1,39 @@ +{{- if .Values.webhook.enabled -}} +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: {{ include "webhook.fullname" . }} + labels: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "webhook.chart" . }} + annotations: +{{- if .Values.webhook.injectAPIServerCA }} + certmanager.k8s.io/inject-apiserver-ca: "true" +{{- end }} +webhooks: + - name: webhook.certmanager.k8s.io + rules: + - apiGroups: + - "certmanager.k8s.io" + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - certificates + - issuers + - clusterissuers + - orders + - challenges + - certificaterequests + failurePolicy: Fail + clientConfig: + service: + name: kubernetes + namespace: default + path: /apis/webhook.certmanager.k8s.io/v1beta1/mutations +{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-rbac.yaml b/chart/charts/cert-manager/templates/webhook-rbac.yaml new file mode 100755 index 0000000..428882d --- /dev/null +++ b/chart/charts/cert-manager/templates/webhook-rbac.yaml @@ -0,0 +1,76 @@ +{{- if .Values.webhook.enabled -}} +{{- if .Values.global.rbac.create -}} +### Webhook ### +--- +# apiserver gets the auth-delegator role to delegate auth decisions to +# the core apiserver +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ include "webhook.fullname" . }}:auth-delegator + labels: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "webhook.chart" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- apiGroup: "" + kind: ServiceAccount + name: {{ include "webhook.fullname" . }} + namespace: {{ .Release.Namespace }} + +--- + +# apiserver gets the ability to read authentication. This allows it to +# read the specific configmap that has the requestheader-* entries to +# api agg +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: {{ include "webhook.fullname" . }}:webhook-authentication-reader + namespace: kube-system + labels: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "webhook.chart" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- apiGroup: "" + kind: ServiceAccount + name: {{ include "webhook.fullname" . }} + namespace: {{ .Release.Namespace }} + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "webhook.fullname" . }}:webhook-requester + labels: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "webhook.chart" . }} +rules: +- apiGroups: + - admission.certmanager.k8s.io + resources: + - certificates + - certificaterequests + - issuers + - clusterissuers + verbs: + - create +{{- end -}} +{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-service.yaml b/chart/charts/cert-manager/templates/webhook-service.yaml new file mode 100755 index 0000000..a1c32b4 --- /dev/null +++ b/chart/charts/cert-manager/templates/webhook-service.yaml @@ -0,0 +1,24 @@ +{{- if .Values.webhook.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "webhook.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "webhook.chart" . }} +spec: + type: ClusterIP + ports: + - name: https + port: 443 + targetPort: 6443 + selector: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-serviceaccount.yaml b/chart/charts/cert-manager/templates/webhook-serviceaccount.yaml new file mode 100755 index 0000000..7b41731 --- /dev/null +++ b/chart/charts/cert-manager/templates/webhook-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.webhook.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "webhook.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "webhook.chart" . }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }} +{{- end -}} +{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-validating-webhook.yaml b/chart/charts/cert-manager/templates/webhook-validating-webhook.yaml new file mode 100755 index 0000000..523d45f --- /dev/null +++ b/chart/charts/cert-manager/templates/webhook-validating-webhook.yaml @@ -0,0 +1,48 @@ +{{- if .Values.webhook.enabled -}} +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: {{ include "webhook.fullname" . }} + labels: + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "webhook.chart" . }} + annotations: +{{- if .Values.webhook.injectAPIServerCA }} + certmanager.k8s.io/inject-apiserver-ca: "true" +{{- end }} +webhooks: + - name: webhook.certmanager.k8s.io + namespaceSelector: + matchExpressions: + - key: "certmanager.k8s.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - {{ .Release.Namespace }} + rules: + - apiGroups: + - "certmanager.k8s.io" + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - certificates + - issuers + - clusterissuers + - certificaterequests + failurePolicy: Fail + sideEffects: None + clientConfig: + service: + name: kubernetes + namespace: default + path: /apis/webhook.certmanager.k8s.io/v1beta1/validations +{{- end -}} diff --git a/chart/charts/cert-manager/values.yaml b/chart/charts/cert-manager/values.yaml new file mode 100755 index 0000000..672fcc4 --- /dev/null +++ b/chart/charts/cert-manager/values.yaml @@ -0,0 +1,172 @@ +# Default values for cert-manager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + isOpenshift: false + # - name: "image-pull-secret" + + # Optional priority class to be used for the cert-manager pods + priorityClassName: "" + rbac: + create: true + + logLevel: 2 + + leaderElection: + # Override the namespace used to store the ConfigMap for leader election + namespace: "" + +replicaCount: 1 + +strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + +image: + repository: quay.io/jetstack/cert-manager-controller + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + pullPolicy: IfNotPresent + +# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer +# resources. By default, the same namespace as cert-manager is deployed within is +# used. This namespace will not be automatically created by the Helm chart. +clusterResourceNamespace: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +# Optional additional arguments +extraArgs: [] + # Use this flag to set a namespace that cert-manager will use to store + # supporting resources required for each ClusterIssuer (default is kube-system) + # - --cluster-resource-namespace=kube-system + # When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted + # - --enable-certificate-owner-ref=true + +extraEnv: [] +# - name: SOME_VAR +# value: 'some value' + +resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + +# Pod Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +securityContext: + enabled: false + fsGroup: 1001 + runAsUser: 1001 + +podAnnotations: {} + +podLabels: {} +# Optional DNS settings, useful if you have a public and private DNS zone for +# the same domain on Route 53. What follows is an example of ensuring +# cert-manager can access an ingress or DNS TXT records at all times. +# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for +# the cluster to work. +# podDnsPolicy: "None" +# podDnsConfig: +# nameservers: +# - "1.1.1.1" +# - "8.8.8.8" + +nodeSelector: {} + +ingressShim: {} + # defaultIssuerName: "" + # defaultIssuerKind: "" + # defaultACMEChallengeType: "" + # defaultACMEDNS01ChallengeProvider: "" + +prometheus: + enabled: true + servicemonitor: + enabled: false + prometheusInstance: default + targetPort: 9402 + path: /metrics + interval: 60s + scrapeTimeout: 30s + labels: {} + +webhook: + enabled: true + replicaCount: 1 + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + podAnnotations: {} + + # Optional additional arguments for webhook + extraArgs: [] + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + nodeSelector: {} + + image: + repository: quay.io/jetstack/cert-manager-webhook + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + pullPolicy: IfNotPresent + + # If true, the apiserver's cabundle will be automatically injected into the + # webhook's ValidatingWebhookConfiguration resource by the CA injector. + # in future this will default to false, as the apiserver can use the loopback + # configuration caBundle to talk to itself in kubernetes 1.11+ + # see https://github.com/kubernetes/kubernetes/pull/62649 + injectAPIServerCA: true + +cainjector: + enabled: true + +# Use these variables to configure the HTTP_PROXY environment variables +# http_proxy: "http://proxy:8080" +# http_proxy: "http://proxy:8080" +# no_proxy: 127.0.0.1,localhost + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core +# for example: +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: foo.bar.com/role +# operator: In +# values: +# - master +affinity: {} + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core +# for example: +# tolerations: +# - key: foo.bar.com/role +# operator: Equal +# value: master +# effect: NoSchedule +tolerations: [] diff --git a/chart/charts/gitlab-runner-0.18.1.tgz b/chart/charts/gitlab-runner-0.18.1.tgz deleted file mode 100644 index 13b217137a6de10003849304d0c5ab04a0c90686..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14037 zcmV;`HY&*Dc zVQyr3R8em|NM&qo0PMZ%e%m;*I5=O2Pf?b0#&(ZM-F%DA`kYo|CDCbH@=9_tIqCe= z5D7_WQv{nJZEMomciCsz7uz3hyos`8$K#&)kNF|7NT5(vC{z^+1z<`-7mdvz@;n@r zTQd}d)&kAlKRw}7sZ=V*hlk?-N~MzjzjAn3`BSxibbMT`)vCN)wSIhf^e3o1B@(Ni zh=nNlQ{}tc$`ki5@*tmlzyTxFYrsv_K)xUUtk$inRW%&U?11>8c=wKk=V%OlAua~7 z`MX&d`m9kdOHs0HIxk1QOOeCMW|=Kmi05TLpub`}ETbC~qM%$mten*AWhr$dmrkkW zdsE{YFK%hzu!dp6l!S%}HZe?au|xmdBPCsx(? zqx*T)lO^m1xe;bhtpM&H|MlA8(LrwfAJl3unJ1wJZU^|e1>Ss%Az_DPEO-~oU2vq$f_8;v4M|mGFbqU6)z*#y`(O&7yRQ+ z)4fFtwgY4$KM&EA)3oEF5VLS+AF#_XFdRZiVW%_c((p3C413{DhLT3Pd>xH(;NcK6 zi%_9O$`QkXIgN;e%K|zpZ~Fl@{m6BRH#OxuPE`)I2BcdOWDI9mck2KLH#mT+{yFU4 zl5hroKtpO%cMrlD3gH%c0^I;l2@7!m;S7Uh5TGd{9t)RxVTIDO>3ohnhn4&7=GjHt znuT+BJHv8Qw<0@7e!B|Xm@NwEf<}SqP_FSs12Iv3Q9nT38o`hXS)u_T$KjIc;aerf zjFS}RxnRL?Mi|T^Nz)iZfWv^`8|+9C+)il7sUu_#)j6F_c&k+XYDBPa;>)00I6j!Ys7X?0&~fT;hOGXMk<$Ib8FK{tY5KWbjk0zhf$0 z>P-QAG@8!Tu!xcJsgt2QM!;P>ybHG(nR>`&#=Yn^png9Nm$5GzTx5 zk8!{?u}wWY3IgnfVz4}p1mxUsCFGcy*OHQOhRvx<$H+AqwzWo0nr%Q;3e6dw5Dzon zUrb6A%;+u9Nr*k(JN69Q*IG1WpL1h1j)?0#AKn~^ju+zK2DuHWS8`0wNbU{;mrkc# z&A9jmyB0KW5OLAi#ULj>fCAhA2aluaJ`it0_u&==-ag9@p@R3LC=OS^N zBYn>C%}dZK;wChhqi_{jGG5$@nW$mVh#9Q@~Oj0`yl==<2!8gA54r$R{A2! z-{4>@%1&{}zq*8p|8G%f&-fn_69GlPa4^2#QKrcXi6E45fj!awnNh9@DgZD zToNw$=#e3f@*H_+ij%U8>i{wE1!&uixcxP6xt65?P|sZmaKuY=Cuf%|_mMcbY1^1F z%{d9?N?FJWbzOSPMI@u0gBxOFXxcW7yin?JGLaoa7&wG+8FHlJCJf$VcdktHF2h)K zgeBRuC)yK?WfH7{DioleODe45sg~!_PzaB-HaUdYJ~s&5RtqJQD$F8#28aQ^W)_;G zzf%w0GAXds#U={87@Z;k=Y?f8Q3qeMvYf|>iI|*+NoiY{iW(D1oJ`(;^zc^@+)VIn zz}IiNkxRS~PXpnQTn2QG!x@ek%(>@oGr)Jk)&!f5%Q^K(NVx}D-p&k0lTJ)N6cu2T z!x5z5z)TID{r?E z4`UHeNQQ@)&FUEg2Ov!d+;Sh&qaNl`sD?duG{e=rJ{m_}7&WR#R^=ckSG*<-V9x#M zc*ZR4Fmz$zjX6 zDLKJGg0E)w9=_qqnoLZOhNds4`1^oFAi@$;M>Y84NR4od$5^^iWC9)yf#J|9Ku5<9 z_(U|NWLdF8qTFQJ%c_IrBvV?VRRisNEr}AK*p$R{D1=%XMNUdJ$Jg5#A6*$|oXu$D zI-Er=wXdbGi4w!2Le+9O>`O&zX}Oitmw?Z_Vl*TaJ$#mWHAGVeh%suDWD1g?V?m2M z_9nwVk}84QAkKPT(YjZKt&Blzi%6ZxAtZS6U#Y&CB6xdg26Z|kk zYWl>!HaXFzB09&ZAo8xf5KX5mx^=Zs2@4%Wo`|wxn|J3pKtefM83iZ=>Ft9^#90#K z7Br0s{_&>__S{iHe9D}78?h{M zO5~7`c$*w=&98*^tlPwutMG>SSmaYyIgeHBJExhJa?!-_Bp&QKX(hBYiI#X01_2hVO?hK+nMR5>G#3sZuSMo~KQ+_amlmG) zA&Y|px@!`dPlhcAz;y=-Lp=9Y$d7Z%<5PZxZOY__FhdM}jaaCIYt|EVG4D(2xQ-jY2;P z&&iyG4S|3*&s?g=I11wcEdv|bGvZAPkW1mEB#s8tTnJ_C(s6l?xC0^GogC^>NSTdX zY|Wi*Cuu##Io*0}CHVMHhk;og#)x|(F$69ks;U~>Z%H7n9@kK51rm8&WpMxF9rkd5 zT;4Gp023o_P)~j<24;+AONbY@?{f6+usRY`q@DCY-jVOF@nQ%50z0xS|5d=&+`M7z z(kUx5Y)1hJ7iF>J#mqUMGjJfLglt~pltZ`;uYk+09lA>XHF{G{Be6ka2|vs70H(=Z zEz>0-XToWW@M-B2jRHCC`w1^fD{I6=$r{oXf5^DK=0hn^l{*RJ? zcdfzwA|QFA7BB4pJJ<^eav4mBiyJG%WJ>9jPa=KaCAL)Lsj3;_5eopRf=g{YK({?g zb8tT5sh#zZ;S-I(7EVb#dD%?%?k9aozBq-ay!90oWC5$}BkqAwQ>^XX--f7SiP+TB z%4S9s)W{@qkw;J8uJK}}OxUbQcL>@wETKR6n-sjAf3yGyHH^(C6ah`?N^O!MI<12` zXRB3EjxVy>rF`*(nn-X%gk1mx#Jt{+?`65+!p99@_4K<-6wVq@lD#vMOfIDh3}z@u zd)Ly*sOKACb-7C0beup$2TFQBPBmBNr?1aV> z{)`xDWg&x5i4cM9N6I0qRVwq`%iawRZUYkH3v}1yb0jVt2T8d(p3`8V>mMFnkmV1B zFb=D=xUB6*eHDUfKsAGeeqRWqmd7InrFN1-<$g#4bL~>gFzu!cRq@faEM3W}Rpb}b zB$0fNT^|P<<2Vz=p9@9#O$9PbMHxz_M5u9hax^+RFkgLbbw>TGZnxbZ_1hmhBB`pB zB{i93X_r*I!gpZ-$vufRzsoIiJ28&r{7QFcSnbZ^}2e?BHwp0_ZpEazq1B>u2!o1P_5LyRUJqL{7k~R%bM5_dm4zqpSXT0@QY?hpz&+ zk&7~U)WP7QF?MChwbc<3SD!i}o=B!1C-1v#rUWBm+Pt+?$eRBgU{|c9%N2}e+(PQ& zAhmMy4?vL1a%dvYF_Xq}SPl~`3J~?Fvx0}Ia2a*ZMUjH~`PC_1d+n+eLGyxDDc#=E z`jGTE;}s(8-H?C^{=p3j2p1%^_@d+c#QRkS7*%-1kC$gqfa9E{+nF?(BEJHw;0Iws zw23zrO9%|7+TfLa#P|?E415%zISz5a_JLT~f?rG%dpA((HZR(zADZV^?b0v%3O=(J9IL0o$-4lq2aH?AM(3vAiO3l6)XiK!K zK%-(nyd)(^-#FO1lytKqk1-Y^?A>VP#(C2K!5kyE@~8Eo){_s)3e9U-k>aXfwh{$9 zVehgEF^Ie-yNdN`4yQPMCtl#;ex1wB8+#anHl6#CSOQb4@B+isn!K>9+X}X3m$Z(F zCFSpIKZ;FmywI~$L)+1~hOoID=jLQWJZ}7W_j4VP(nr$nJ%zg5MyeU1p^z66Q@`Pa z>%Zd_;BHQ%KOC_XvpEJP6DnFL#qDilNojM|wC~W|cX30+e5DlYF(lIu;>8PLwjvLx z2h68ooXyC9Q4x``k*DyL3raIQ*V-q*)5t{u+|uA02SPD3F?_99QOi~HEK#y}14`Wf z%XHXtwfrSb4A=iM-YwNUhFvZVB*2**y#G3ds&%yY9erDDX|}hGFl}Exf64N+UqtpC zEo6o)lKAgKIEz>&rWI2oR|ttTlXA_Qmy=vh<(1^SOsw3b`y03&(o(NFMW7=)z=UhO zXh7-yjwq?YM177O7ot-8jUO4Zu2Rk12oRSN;HC>D;i+BKmxq11Azjoec5Zj6HTI}NU4Rjry;Uk5F~a~k3t zIQ>$n@|1HW{d43vaun&o;8U?#q`tNp-};L>VYGY zIGh|bk!RdVW&B1u5;KWB0Uo&|TI!c0HToiY;; z#`erdac|6t*T*6%#HZcz6}n5klC#KEHWU}&P{af;)g%Ex_?KF4L6z>Nb`+`@;Yg?M ziDDrNrZ{Zg-~dhW2RXt;Oz4l^^Ob&b8>jHx7fEpCZg1EgSm9mx1Z}L@|68qB^ZS1f z4-V@u`~RNf`TErq+u1zZjU24QP)Z)Gc!O36Eco`#_>|gB)XyZoc=T58K8kk`sm){Q zJxlTSm?SuY!UlZ(x@r3yTd~K^{PxY*e$q#5Yiahoo$kAaSU7Qrlgf8m0P>RkYJ4s# zmi=~S*quwdTz3a-_aW&=mR806CawSAP{B+qi;_1!TuW@o!sk9c@@96>On1M|Bnvql^6T}9M5X| zuU5LScVlun$f}~;O7>b4lb^`NfX;i}_7af$^q5*KFR~zUYQ+D2)gBB-tzNfv)$g~v ztxtu}t4qL_Hx%8|OdP^Z%s>*qEhVMf?>d8FyU*>}Pr?b{T$%t4V3$uXcV~$Ggco^S zpd_~ymG<_Q&{W%&xIWICAPzk>MNU48EV1~z=M)e$Y`zkS6)Gi;A8ntvNL&(C`w(=2~bqXpS(Yw);eU-bH)9z|&?9qa*( zJPEz`rw?wxdZjBL&Sss=ws{**)xq~Q zKfL2q((I=FrkZ(7x9OHP@3j<759DErs)zCKab1=r?O`o0yFwc@wB#KXITlyv=iIyK ztZ<+Uz~_z@$cGq;{I7y6;d_IQMHQ@EKJiLV>1-roH=W@pK0O$=|27=;uezh=VASn( zNBv%JxFV{P35+}ttBB#?5g1(!+WmVy+_h~k1$e%P3Xw$p2r}OFdsml_rbD*+G=xkZ zLB!if6EN8X`P84QVv=Gaw7{&!SAdddHImgno&@(*xB<_&S)!GyRtT=@8O z&e~gWvFtiqKUjlNvR(*iGs*8pvAmGYa?Eg&u;?a za77aak!QnEUHnPrgJ?1#cTh4DV0vvrVj1t+)oqAVHsQAjx#IX4K)x^Vw~UWiLKo#j z-Ug?I!5GU^MH!vrbds?=-jZO~k=rCh8oSfAgPH%qO1aDt33@}kRHv@& zV0i+afXJm%pAw7I`Jt*SWGRtyzqS`d*)$G$V$zI|f9vzge}@43F0wI{{#t_4sPqWI zRRvh*LPVSXhY?Yi`=%8&YkNYj#pbYI zy=a>pX{*bwEx3w|Ws-)&&Ws=1gCC zS`$^{l*zN&@Lh-l54mz^u=S&&I5{9EQTMFJXYt!ue8fz0U*!#};nqxU2oNWKWD*hO zj25w!>KD%HIs!$#RG!PL)I9`^QxMz=7El_y7(2w9mS9&X1}NQ>mJ!>7UHn@CzDf1H zl!kdFk#3yEy5_!%3b+zVW0%0u1S@#DE99?2PNxoB*DFrc6C09PNd0v@TRIy&GfHUy zyS#sLOIuJ{u9m2orEhx*NWmgmA*8&f|Fy&1v$$lmDwDSP@cDtE~AeD;Kekx$*6C@@A9mMt@4<7uI zv%=snf607^j|77+U;dNB@s#?LM-V3|Z_QS#Js6CBYJWO?wQHlW0QTOBs)Kf`-yRlK zja!#hC&XBRfBJB#f`g(~Apfq{dw1R*H7_sEJ2I1S)atj-+TCHNc|JHTZ3zQ#-)|Fe>H@p(PD?#~^^lt(w$m+x8wEeD;md|zhT*^2>e1GZ-rOdMb8 zFh|!|pBV(a(Rt*CL>vaROFVUwk2sbdP!!HYk#uo!9w-*&#TM9~y}i}wUiLd5n!~md zK8;EN>M1VlX?PQ|Ml4qz_PL_pWhPKx_V}>(Q@fkz;!23?Mo8u$(iLMlPT9Jz>EhP5 zfXxzprTrf>Ie{$KAPL1@y9CBI3_?B;ZzaY4$z61&e$Gh#S6&2HJ%J3&nImIl}2 zAXk0O$7zAimu!as1T#*!W=kdPLOC61mdpODA*%mrh6GRtZ@)o)-~s zLX)|XzRDpTK)uQz9^Q-0%Cc(-~Zq^4@8u+w;5 zJFM+2LyV7;)0|D<=iKM?zDJ)(^3i82|;sv&z0+H2*e=v*((_Vf*58I5@3Wq)3T7 zh4^&$)h@%oK^01`z82Q_R@(bdaHtket^`V%YsA!{weadI`37_XE8!>*8G+SCIN7Si z8I@gMSy~)b$G~g8OTkW(2lavSk?U$EicU{;a<08pS7%Xpb&VJM@QMp+<8(R0EPZ~! zDr8dUOwfSi!C|IQY4BIPfrn#Pgiq=?pL`_{O^Z5$8ji&JH(mlu#Xk0yA?1i(W@-BLB*j(Pie#yUClhRk-__@ePs^*7 zYH#~0{Cw(|>X?$%{he6`d>VPoZs5Ncz4VbwPb!n^xQD%)EMojc9s~2+H=(G_$8d^F zpdfmZBnGEWr!OssX;WXbcm|PYM(WF{IHLc%F^SKaFTf)?06&9;auS89JS_!yGD?7K zgwUq>BTHIgqec6IW8;dHgc6l0aaM@&gCQ>oc>b^_GY!C00Jz+7Bvy(SENh}d^;E!b z)AXok3Kt03A-PcpopKW%-XQn%)qfkQX$QReD&KR#uRwk#F%B@g)+X3h6$mf8%Gxf0 zNV%1XV+p`qjD0u+@zbH6LQ|9$u7YsaOqbl?RgMOkKsjQ8c~r7R`e7qKx01{b zb1>8%=pWYAjAw6IzxkQsKda(@$zJpC4*;%-|JCcq`STx+E5|SKzh`;U@jvAItQ-dc z&teJqH;e)@U#J{4eR{kpfGIZf$IZZg4EG+z!40{%At%!o z=6(;W&@cV^Rr%!HtAirfbF~di`G}1aJFtDtqIt5yk>QP6+k0i{^;YW*VsdYIDYq!X zogVld*bLPUk61L%GF`R8^o?JewxKGlZDF%1J%}_0eN2)?Xqnjo@^J%pxC^+mqL8?a z5q6T<{|X;rqfTAn(m%du_23kLFCTxYBse+qG8Hh%6}RG2_pml!M;))3t`VQOc~4UX z)f8Rvn>82urhwmDGHHT%B#aNgjt7BG@=RX}pu{#__S|8!oqgC*BA?uUxAP?~tWK_8 ztyt8`dPcD<-91;FPkv7xxrU?;T#bck1rkf!Sow$|o`by*PSWshs2k1kg}8Gv0jznY zz$Aez{fJAH0|oSU{y?Y1w6_1LU8K5>?|}9cH0QkbbxL>{M>-Z z-y@x#G-%Nhr@)C@kvD*!#ck7@p6^pk$(ggp*RK%LPiXEY^ECZ2k%Vd`saRXm za^)#}`?je?{`ysB+kN|1O6QPsSOwdxWT^~U_R5j`V5i^@9=k%IiW@FSE#V{9D;l!t zEDb%QEA4Y5s+KBJsT*JZVTyxKEj`TGWkAQckrL{3Ldk{*CzI6_!cvbD;r98{_DS&&5#|s zq?cFpB%b@>;*138K{CeDncI4O9uPv9FF9rgI;KWK7&pUMCOKb#S1MvD9}l5pid9zM+rm5aS=-otkx%+aqjNa69hXJ)#KyUJrlbf33(e!%tN}X;PvC> zW~IdNhlF^}mSKWExqW4BNI?|?$iO%?vV>VBEE>3hD?Nf@iZFZJ3g z^JVKK$8>9taHn&0$7k>-VJLkKM1fg}OE0^Y_or|aFWqM@fGB7#Q|qBkhD9s$>Dv(s z8cf%DXnU!H>y!4@m#%Yfdw+D?{h#Gowf-~n(Gz5T%V*90&&qMVzI6Wg(aZU- z&+_EQoR7YJGct=p>Vg5{P0Kg6F%FSlAbL*&@^?PTaWARU)b|hs-?v(n+*H1F^0Yh) z>)W?@aYnQ^6sPR>@kFmMEw5+s_e*P9kJ9#;>jVmqlH=nhRZ3M$UpcV4T2e|?)3*{7 z=s8|qUC0pYxms$g&!#GF)%Xvni_J0d9O6yCR}rik|J8bZY5li$RDBu$&++85YEqF+ zGp7l}Hxz|4KJMjZH}ZCmWX-rJOzK7~#KA}6V0HWQw{NAEeA%Ziej{b9l!}h#UrUSq z@3#oe$DmSd$R}!rX=N`MTFE`46Y|{8gq|PBY!;1w#dgRVhN%!5WTM;OX%&_@s?9>_ zUt`1FKiPAi{(mY5aGn0=%0B1+9UUIL=>O+<{&4+&kLvk1)cBE$h2qruo;BceNvw$! zbyO0R7FeV%+Wh?WEBs1{2c>-|t*-PXePHksjM5Kr%ds&iLgNMpV}*kF^WNrl;pL`v z-Q{Lq6#GByS*8EeX-MDO#yb69sV$xVUVpj&<$0du$c8niC*;1%cX<;L8v+@beUJvv zMaTao^|!n@u;-)SA{_4|Um1nJ|1izv9{3%`M+exmvl$iXQ2hN6S3`MGu>Y>lYW**y z+C*~F_FZ;hZTz=-l-2*WN~QV||9O_@0senVxo76{X(hjua=pREy5$78f{ft}vxPIh z>c7XF;rn!VVa`n1p7Abo>C{?0K?Go(|5vRY<^8`(?dAN>XL*#fFk&|4t_uxkI56US zx;=brPB03^PX8{x1@(g&N#?ydA$P*z6Te_W?!;&D_DwX6U(6BlLhK>W#tpbgei#4_ zwIi{yF%*jCEd3LtEOH0}xd!y))$r=IIPH#4nNzqLW(GUM1o>|Y1;fm?YM6?4p#w%@ zZ;wc{HB(w4Uk5<3Ee|SPMdK6~&LWyFs*z60=Ez7F9!Jxp-16rT+cS^Y$OQ+F!=z-E z2N{6KzGht66c?6}A2uWYNY^rfVgya(>GIeg!MSFO(I%~j|8Y%n_O^z#S%2n{84Pl#D)nA(dn z43~I<4kjHY@flRwmqbBaY|V>-Wf-a+IQRy;)aU9uRa-+X06;tvf@#_=!Cq)m&s`+n zd}LpX6F3ck+Zmsb&yg1)ms>B-T_{BxAkUr&lZNkXtbxxF3q@Wp3PLhLwoVQAQ8-h7 z#$1%#_`wRNe-|I{olgQhB3=WIorJbAmTvhHQJD?^7}Q5s{qqK}dZS!^^)8``|Zn~EUEK5(}gWox6mB@oqFh&ak-#GBC5SG!XZf`fT`V+^M0Qf~^s1H%rO0hCzz=5xh-{j>Guue)Yo z^S{6Rb??8;a=Em(qkwS<9))C%Phagid@l6Y|6uxK0%!L|PmMG{lcTX${<&(t`kDaw z#(!!2zLiU05|9VP@>%r~*yM!Q23S92e(FPH*TPuZs?(WtX?Pi6hP`n4Yty|&3nro7 z$Z1&uHa^_YkH#*s%d!V=nK;$i2hqt_{qt{S;yL(ERMbUNZ+`z>=kJ@aYdfmMf98gb zRo+YbI>0^^xh;39G?=DH5-tF5no;1wtFKx@zooHc3(wXdVsyn}BpOpQz{rtIlqWP26EW{xF$O~8PhD}19z>e zvN=CYQ>9bGWlvm2q?3Kk2@U2@5<$aLu5kaS_eLHJS6V6+;K^yMn{ohRJ@UErzS);o z5`Pf@FZ;d!X}9E?Wtgdo{q}jgIcSgi?YFXKt9NnH8S>xx$^}W~$S38SgRkZRC%!l>FvIfl&&?Y-?tiwj|ED#=&;MIq+be&x`Tp)F=l^k8B~y!gB#(zlb;0%9&ZlHVkKk0gHUG z3l7)}yDm&!I+llF5O2EA)e#EG4HkY&>OJxtV;ej?mDBoNKfn`mhaI^CWoyrZp64!r zdZH#r349!IcMV&Hbv76cLKcX!-Om0zT~c5&=;?S6smQrYE^kv1+$`I9+}nG z-vP5Ct7Zd#h6!)U9VGk3rP`8v*W^jKY7g92tzL!QH#qPd_z?wY9I-vagn?9J;y4j9 zSqN1}ojBwM`s6m)s#RaZZi_?z6iqOJAMM4&!S-UZ7ZZD&F!1Pm7+6U$@Hk=Mp;@aa zn3*uFVM6=Pr`#>XVMx3w6FdBpv*hB09(h|46Pqjb9y@V2h`2|V!B(wSf!(Hy?~v!< z04{NegE{g%_|e68cX!z3JZn0RR{x?Q)hcwUgCTu%=T`OP1a|)yFCzGa;m5%&a>)WO zzJo87q@bQIh%07@2ObSE@8tu-giGQWVqCRjXEE_YB<-2l|vT>cNi={}qTI3V0F}PYIz-g8=b4ljEjt+}?+86sX!NETE9C4LBoxm@-*$OxDIQT`ehI6K> zhLuY^(XqUfCp5Tb(o7?8$Yg@WQOSb);S71Xb*>V}_QWuuqb5*Ect|cP>6w*e-d3%8 zykdN>m5Z8HnjF-EOxN+7A-iYjS98MCyrD*8)U6 z0a2CglVd44>Py_6Cd5>2%z>@yYc4$EaD>bzQ9jb+bBJUhC_ZC>lb;OgnrycE+N{Qy z^Jag^E@%J3TessN3UB#PmCP*Y@M6hvrhTnOs;}!DPjrp*r%)xmId(BG_=T zO=yT&_d!pZ#jxyOb-V5UsCPBIyc)`zl!cd_z0>skd^GG_w0l=W*gC8fsPNc)7Lj;z z7$a1X7LFrSF>5EQ#X-_?kTPVIgqDg~tEQ^vFLBa!#b{wzMNAxhDuJ9+gjB~oWNbxV z^0rkyt|cfXS+32?4zD4@CW7I7fK`udUj$GfW9&3EWopPxScSueB6Cj<0PGjfG*F#oPXbyM&^Unred}j1j{QP*06py@L(5 zs5SCUGqUCqtX;t?J=XV!h7(M_%Y9^tz)E5{r^*)K`xxqI44Oj@d*#6K3V$1nY<=MT5?T4kMmYxMEfb zTh#+$59jIt-|YBHoy!c=m-@eYpe4@3x8PEF14tq(W~@^2OC} zjvzGyJN8nlNAajWN#x@sttnX)<2MbEjVF;SE(YTqi*?j-Qu$a{=6o3k!uv*^J)>d* ze=PyoOx*(LP$3g_p>4$_b>g~eK1y*Y_8CAxxtiOm3Oo2Fn)^~zihTg9!a4R@<8_qa zb!65KBvGOnaRG@JA=h2V_=e~;?M`weQ>#JoIt>?J-W?zL3_z=P&R$zykxC}uQ5e*< zvxXRp<`_E73qb~_qMAlQ-KnO$@KJpGguReb!`rK_C%YoZuAgYK4->Kvb$8qgqAIP- zR<(AdVI0Itp1Z>b8^Pd)VXIoLCon207@Ofyu_|U@*I=t!IZ;xCq@NNGC2gZC)x?QV z`3s`w19F`7K>^~k&{d3)sVBXRlh-1BA!VmiK1lj>`K^P4RDP#3Q-VJhzqZNQ~+k{?Zuyt~{ zo>Q-dP$vFd;?5-L77RvWfXD%x&) zdRnLKq3X)4ce!<}8_}nM>}L-05b)CnX%vaGu$mnizEvSdh_A@W1avdD{iMFWJ5IFR zo*XN>78r?cZN>8vFoA?S43HaO{l5T^`Q z;?W2X6Q3z6rbz)4)Vt%JJihz$Fs#s?n- zXpXr*k@zdT8^lb&FyS)5J_<0zF;s49&{P`yt>Z&MFtV@31=O0ul8NyIg z(YEH!o?&owa{0H^3weKpGZb=?#Z}Ugh!JztK67WjoY=X%;!Wc#hrKk16y(>_`KuRNM3Zexx9uT_ro`oCT| zc-jB+EYEO;g;%Ro2kc&ShI@w5qW(gtG}yKGxHorV3YRXeZjDPE%n6fz3=`^gywKWq zA0}M)(~0oHrg&dOCA2MtMXR$`6V!kshl{zy*%6+SSf#*KV zsY53Ge=Nun`D1LhpPHBLbM>v1lduJ4u~O!`hE08fnN*DlcKgacaNf*Gs8}TCYPT~w z&r(8|F^K|?u$eS~4h2T{R~wAP#5N3r_cj{yIhq}Juu_x)1>OOE(yOr!Me@u~wXccW zHiN&?cj+3z>I!%xtA3(f8)_^H!9+lThwF8l4l0D66+b8&gzX`k&wr`tNeI_q@b z!JDfgpM!x2g}2XO*aP060@Y~`IIxR$pL^)T=9|uWXZUH~c-t9vIn1}cJ~ZL7*&lXV zSLe+>Twe7rdxJJKyJyhtbvxa+{Z98?`=Z?)TF~i2w+HPH?d}i;@0;i6yeXr3HGJRe zbNryyyZqGeyn8={_r3G8c7FhG+Hl@!zBzBprn;Y?b>8e;?88~}qWP{ZYW0Ti+kJx< zli0z>_ig@)x7X}Kvo-AWx||GQvYM^oJ`8*PVO;fNXVBh+*fC`>CN{zO-!uNa4e!)ixi=MyDNBp# z#-D#Is0pT88#9RmNA=q4qvO}`v+?IYn>t+eYIg~{Jz0W_6mQQIS5KH~Ga-~@((w;g z=D(7xBcn|6-uS|8xT;vpZBa6vuxA(rWHg{R7!5yN3dX(Kl^L6nNZl|Q{0?CNJD(eo z@8h8HWrzRm(p&NOUNN|{-pB5FuX#4Q>Yu|Yl=K9Rg)~4@Y{?tkkxy89+8G~GL0z^a zIOTf;r%1{aMSCWiJ4XklLOglLtNat*@vnB%c<=G4V4<}97;kpaymCk&xK&V`mzQbw z>>NtDCQ4vZ{&J~SiPxIVsRMt#TT$Q{$A4D;%Lw*Ux3O;icTmrt|5iJ$zv%yGc|MYG z1_7RmK)9Scs5zc?+tsw;Ws>+ZNqm_kzDyDyI!QE5(=guBKt##I83`QlQ6SQ*=RWmB z&|R#P`ZN$@SS+m;=%IZGXOv+hRga5$E}+aSQn7A<+;|1R59keXWE5W%xb<+LB+o#; zICFdK;sm9CpUG|3{>Li;Puj-1{a^KirTJgIT6?ko&+%;M5-{ImcP^Ktxgj7-EuzcZ z7zQX;Tx4>4HkPAyL8AbEO7U!Ire<#~Dj37`Kz00960CX-WI H00031q6S0p diff --git a/chart/charts/gitlab-runner/.gitlab-ci.yml b/chart/charts/gitlab-runner/.gitlab-ci.yml new file mode 100755 index 0000000..8c677e6 --- /dev/null +++ b/chart/charts/gitlab-runner/.gitlab-ci.yml @@ -0,0 +1,66 @@ +default: + image: registry.gitlab.com/gitlab-org/gitlab-build-images:gitlab-charts-build-base + tags: + - gitlab-org + +variables: + GIT_CLONE_PATH: $CI_BUILDS_DIR/gitlab-runner + +stages: +- test +- release + +lint: + stage: test + script: + - helm lint . + +release development: + stage: release + script: + - helm init --client-only + - helm package . + when: manual + only: + - branches + except: + - master + artifacts: + paths: + - gitlab-runner*.tgz + expire_in: 7d + +release beta: + stage: release + variables: + S3_URL: s3://${S3_BUCKET}${S3_PATH} + REPO_URL: https://${S3_BUCKET}.s3.amazonaws.com${S3_PATH} + script: + - apk add --no-cache py-pip + - pip install awscli + - helm init --client-only + - 'beta_info=$(git describe --long | sed -r "s/v[0-9\.]+(-rc[0-9]+)?-//")' + - 'build_time=$(date +%s)' + - 'sed -r "s/(version: [0-9\.]+-beta)/\1-${build_time}-${beta_info}/" -i Chart.yaml' + - 'sed -r "s/appVersion: .*/appVersion: bleeding/" -i Chart.yaml' + - 'sed -r "s/imagePullPolicy: IfNotPresent/imagePullPolicy: Always/" -i values.yaml' + - mkdir -p public/ + - aws s3 cp ${S3_URL}/index.yaml public/index.yaml || true + - (cd public; helm package ../) + - helm repo index public --merge public/index.yaml --url ${REPO_URL} + - aws s3 sync public ${S3_URL} --acl public-read + - 'echo "To install repository run: helm repo add gitlab-runner-beta ${REPO_URL} && helm repo update"' + only: + - master@gitlab-org/charts/gitlab-runner + +release stable: + stage: release + script: + - curl --request POST + --form "token=$CI_JOB_TOKEN" + --form ref=master + --form "variables[CHART_NAME]=$CI_PROJECT_NAME" + --form "variables[RELEASE_REF]=$CI_COMMIT_REF_NAME" + https://gitlab.com/api/v4/projects/2860651/trigger/pipeline + only: + - /\Av[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)?\Z/@gitlab-org/charts/gitlab-runner diff --git a/chart/charts/gitlab-runner/.gitlab/changelog.yml b/chart/charts/gitlab-runner/.gitlab/changelog.yml new file mode 100755 index 0000000..3d069ab --- /dev/null +++ b/chart/charts/gitlab-runner/.gitlab/changelog.yml @@ -0,0 +1,36 @@ +default_scope: other +names: + new-feature: New features + security-fix: Security fixes + fix: Bug fixes + maintenance: Maintenance + documentation: Documentation changes + other: Other changes +order: +- new-feature +- security-fix +- fix +- maintenance +- documentation +- other +label_matchers: +- labels: + - documentation + scope: documentation +- labels: + - feature + scope: new-feature +- labels: + - security + scope: security-fix +- labels: + - bug + scope: fix +- labels: + - technical debt + scope: maintenance +- labels: + - backstage + scope: maintenance +authorship_labels: +- Community contribution diff --git a/chart/charts/gitlab-runner/.helmignore b/chart/charts/gitlab-runner/.helmignore new file mode 100755 index 0000000..73d4b16 --- /dev/null +++ b/chart/charts/gitlab-runner/.helmignore @@ -0,0 +1,24 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +gitlab-runner*.tgz +scripts/ diff --git a/chart/charts/gitlab-runner/CHANGELOG.md b/chart/charts/gitlab-runner/CHANGELOG.md new file mode 100755 index 0000000..cb66a09 --- /dev/null +++ b/chart/charts/gitlab-runner/CHANGELOG.md @@ -0,0 +1,183 @@ +## v0.18.1 (2020-07-01) + +### Maintenance + +- Update GitLab Runner version to 13.1.1 + +## v0.18.0 (2020-06-19) + +### Maintenance + +- Update GitLab Runner version to 13.1.0 + +### Other changes + +- Fix unregister when using token secret !231 (Bernd @arabus) +- Support specifying pod security context. !219 (Chen Yufei @cyfdecyf) + +## v0.17.1 (2020-06-01) + +### Maintenance + +- Update GitLab Runner version to 13.0.1 + +## v0.17.0 (2020-05-20) + +### New features + +- Expose settings for kubernetes resource limits and requests overwrites !220 (Alexander Petermann @lexxxel) +- Add support for setting Node Tolerations !188 (Zeyu Ye @Shuliyey) + +### Maintenance + +- Update GitLab Runner version to 13.0.0 +- Update package name in note !234 +- Pin CI jobs to gitlab-org runners !222 + +## v0.16.0 (2020-04-22) + +### New features + +- Add Service Account annotation support !211 (David Rosson @davidrosson) + +### Bug fixes + +- Support correct spelling of GCS secret !214 (Arthur Wiebe @arthur65) + +### Maintenance + +- Remove dependency of `gitlab-runner-builder` runner !221 +- Fix linting for forks with a different name than "gitlab-runner" !218 +- Install gitlab-changelog installation !217 + +### Other changes + +- Update GitLab Runner version to 12.10.1 +- Change listen address to not force IPv6 !213 (Fábio Matavelli @fabiomatavelli) + +## v0.15.0 (2020-03-20) + +### Maintenance + +- Update GitLab Runner version to 12.9.0 +- Update changelog generator configuration !212 +- Replace changelog entries generation script !209 + +### Other changes + +- Fix values.yaml typo !210 (Brian Choy @bycEEE) + +## v0.14.0 (2020-02-22) + +- Update GitLab Runner version to 12.8.0 + +## v0.13.0 (2020-01-20) + +- Add podLabels to the deployment !198 +- Mount custom-certs in configure init container !202 + +## v0.12.0 (2019-12-22) + +- Add `apiVersion: v1` to chart.yaml !195 +- Add documentation to protected Runners !193 +- Make securityContext configurable !199 +- Update GitLab Runner version to 12.6.0 + +## v0.11.0 (2019-11-20) + +- Variables for RUNNER_OUTPUT_LIMIT, and KUBERNETES_POLL_TIMEOUT !50 +- Add support for register protected Runners !185 + +## v0.10.1 (2019-10-28) + +- Update GitLab Runner to 12.4.1 + +## v0.10.0 (2019-10-21) + +- Updated GitLab Runner to 12.4.0 +- Use updated project path to release helm chart !172 +- Update resources API to stable verson !167 +- Add support for specifying log format !170 +- Use the cache.secret template to check if the secretName is set !166 +- Drop need for helm force update for now !181 +- Fix image version detection for old helm versions !173 + +## v0.9.0 (2019-09-20) + +- Use updated project path to release helm chart !172 +- Enabling horizontal pod auto-scaling based on custom metrics !127 +- Change base image used for CI jobs !156 +- Remove DJ as a listed chart maintainer !160 +- Release beta version on master using Bleeding Edge image !155 +- Update definition of 'release beta' CI jobs !164 +- Fix certs path in the comment in values file !148 +- Implement support for run-untagged option !140 +- Use new location for helm charts repo !162 +- Follow-up to adding run-untagged support !165 + +## v0.8.0 (2019-08-22) + +- Add suport for graceful stop !150 + +## v0.7.0 (2019-07-22) + +- Fix broken anchor link for gcs cache docs !135 +- Allow user to set rbac roles !112 +- Bump used Runner version to 12.1.0 !149 + +## v0.6.0 (2019-06-24) + +- Allow to manually build the package for development branches !120 +- When configuring cache: if no S3 secret assume IAM role !111 +- Allow to define request_concurrency value !121 +- Bump used Runner version to 12.0.0 !138 + +## v0.5.0 (2019-05-22) + +- Bump used Runner version to 11.11.0 !126 + +## v0.4.1 (2019-04-24) + +- Bump used Runner version to 11.10.1 !113 + +## v0.4.0 (2019-04-22) + +- Bump used Runner version to 11.10.0-rc2 !108 +- Fix a typo in values.yaml !101 +- Add pod labels for jobs !98 +- add hostAliases for pod assignment !89 +- Configurable deployment annotations !44 +- Add pod annotations for jobs !97 +- Bump used Runner version to 11.10.0-rc1 !107 + +## v0.3.0 (2019-03-22) + +- Change mount of secret with S3 distributed cache credentials !64 +- Add environment variables to runner !48 +- Replace S3_CACHE_INSECURE with CACHE_S3_INSECURE !90 +- Update values.yaml to remove invalid anchor in comments !85 +- Bump used Runner version to 11.9.0 !102 + +## v0.2.0 (2019-02-22) + +- Fix the error caused by unset 'locked' value !79 +- Create LICENSE file !76 +- Add CONTRIBUTING.md file !81 +- Add plain MIT text into LICENSE and add NOTICE !80 +- Fix incorrect custom secret documentation !71 +- Add affinity, nodeSelector and tolerations for pod assignment !56 +- Ignore scripts directory when buildin helm chart !83 +- Bump used Runner version to 11.8.0-rc1 !87 +- Fix year in Changelog - it's already 2019 !84 + +## v0.1.45 (2019-01-22) + +- Trigger release only for tagged versions !72 +- Fixes typos in values.yaml comments !60 +- Update chart to bring closer to helm standard template !43 +- Add nodeSelector config parameter for CI job pods !19 +- Prepare CHANGELOG management !75 +- Track app version in Chart.yaml !74 +- Fix the error caused by unset 'locked' value !79 +- Bump used Runner version to 11.7.0 !82 + diff --git a/chart/charts/gitlab-runner/CONTRIBUTING.md b/chart/charts/gitlab-runner/CONTRIBUTING.md new file mode 100755 index 0000000..1e55f92 --- /dev/null +++ b/chart/charts/gitlab-runner/CONTRIBUTING.md @@ -0,0 +1,16 @@ +## Developer Certificate of Origin + License + +By contributing to GitLab B.V., You accept and agree to the following terms and +conditions for Your present and future Contributions submitted to GitLab B.V. +Except for the license granted herein to GitLab B.V. and recipients of software +distributed by GitLab B.V., You reserve all right, title, and interest in and to +Your Contributions. All Contributions are subject to the following DCO + License +terms. + +[DCO + License](https://gitlab.com/gitlab-org/dco/blob/master/README.md) + +All Documentation content that resides under the [docs/ directory](/docs) of this +repository is licensed under Creative Commons: +[CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/). + +_This notice should stay as the first item in the CONTRIBUTING.md file._ diff --git a/chart/charts/gitlab-runner/Chart.yaml b/chart/charts/gitlab-runner/Chart.yaml new file mode 100755 index 0000000..efd54a7 --- /dev/null +++ b/chart/charts/gitlab-runner/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +appVersion: 13.1.1 +description: GitLab Runner +icon: https://gitlab.com/uploads/-/system/project/avatar/250833/runner_logo.png +keywords: +- git +- ci +- deploy +maintainers: +- email: support@gitlab.com + name: GitLab Inc. +name: gitlab-runner +sources: +- https://hub.docker.com/r/gitlab/gitlab-runner/ +- https://docs.gitlab.com/runner/ +version: 0.18.1 diff --git a/chart/charts/gitlab-runner/LICENSE b/chart/charts/gitlab-runner/LICENSE new file mode 100755 index 0000000..df96b29 --- /dev/null +++ b/chart/charts/gitlab-runner/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2018-2019 GitLab B.V. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/chart/charts/gitlab-runner/Makefile b/chart/charts/gitlab-runner/Makefile new file mode 100755 index 0000000..d83671c --- /dev/null +++ b/chart/charts/gitlab-runner/Makefile @@ -0,0 +1,20 @@ +GITLAB_CHANGELOG_VERSION ?= master +GITLAB_CHANGELOG = .tmp/gitlab-changelog-$(GITLAB_CHANGELOG_VERSION) + +.PHONY: generate_changelog +generate_changelog: export CHANGELOG_RELEASE ?= dev +generate_changelog: $(GITLAB_CHANGELOG) + # Generating new changelog entries + @$(GITLAB_CHANGELOG) -project-id 6329679 \ + -release $(CHANGELOG_RELEASE) \ + -starting-point-matcher "v[0-9]*.[0-9]*.[0-9]*" \ + -config-file .gitlab/changelog.yml \ + -changelog-file CHANGELOG.md + +$(GITLAB_CHANGELOG): OS_TYPE ?= $(shell uname -s | tr '[:upper:]' '[:lower:]') +$(GITLAB_CHANGELOG): DOWNLOAD_URL = "https://storage.googleapis.com/gitlab-runner-tools/gitlab-changelog/$(GITLAB_CHANGELOG_VERSION)/gitlab-changelog-$(OS_TYPE)-amd64" +$(GITLAB_CHANGELOG): + # Installing $(DOWNLOAD_URL) as $(GITLAB_CHANGELOG) + @mkdir -p $(shell dirname $(GITLAB_CHANGELOG)) + @curl -sL "$(DOWNLOAD_URL)" -o "$(GITLAB_CHANGELOG)" + @chmod +x "$(GITLAB_CHANGELOG)" diff --git a/chart/charts/gitlab-runner/NOTICE b/chart/charts/gitlab-runner/NOTICE new file mode 100755 index 0000000..aa3eb4d --- /dev/null +++ b/chart/charts/gitlab-runner/NOTICE @@ -0,0 +1,30 @@ +With regard to the GitLab Software: + +The MIT License (MIT) + +Copyright (c) 2018-2019 GitLab B.V. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +--- + +For all third party components incorporated into the GitLab Software, those +components are licensed under the original license provided by the owner of the +applicable component. + diff --git a/chart/charts/gitlab-runner/README.md b/chart/charts/gitlab-runner/README.md new file mode 100755 index 0000000..a05c351 --- /dev/null +++ b/chart/charts/gitlab-runner/README.md @@ -0,0 +1,3 @@ +# GitLab Runner Helm Chart + +This chart deploys a GitLab Runner instance into your Kubernetes cluster. For more information, please review [our documentation](http://docs.gitlab.com/ee/install/kubernetes/gitlab_runner_chart.html). \ No newline at end of file diff --git a/chart/charts/gitlab-runner/templates/NOTES.txt b/chart/charts/gitlab-runner/templates/NOTES.txt new file mode 100755 index 0000000..467a281 --- /dev/null +++ b/chart/charts/gitlab-runner/templates/NOTES.txt @@ -0,0 +1,14 @@ +{{- if include "gitlab-runner.gitlabUrl" . }} +Your GitLab Runner should now be registered against the GitLab instance reachable at: {{ include "gitlab-runner.gitlabUrl" . }} +{{- else -}} +############################################################################## +## WARNING: You did not specify an gitlabUrl in your 'helm install' call. ## +############################################################################## + +This deployment will be incomplete until you provide the URL that your +GitLab instance is reachable at: + + helm upgrade {{ .Release.Name }} \ + --set gitlabUrl=http://gitlab.your-domain.com,runnerRegistrationToken=your-registration-token \ + gitlab/gitlab-runner +{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/_cache.tpl b/chart/charts/gitlab-runner/templates/_cache.tpl new file mode 100755 index 0000000..80b001c --- /dev/null +++ b/chart/charts/gitlab-runner/templates/_cache.tpl @@ -0,0 +1,28 @@ +{{- define "gitlab-runner.cache" }} +{{- if .Values.runners.cache.cacheType }} +- name: CACHE_TYPE + value: {{ default "" .Values.runners.cache.cacheType | quote }} +- name: CACHE_PATH + value: {{ coalesce .Values.runners.cache.cachePath .Values.runners.cache.s3CachePath | default "" | quote }} +{{- if .Values.runners.cache.cacheShared }} +- name: CACHE_SHARED + value: "true" +{{- end }} +{{- if eq .Values.runners.cache.cacheType "s3" }} +- name: CACHE_S3_SERVER_ADDRESS + value: {{ include "gitlab-runner.cache.s3ServerAddress" . }} +- name: CACHE_S3_BUCKET_NAME + value: {{ default "" .Values.runners.cache.s3BucketName | quote }} +- name: CACHE_S3_BUCKET_LOCATION + value: {{ default "" .Values.runners.cache.s3BucketLocation | quote }} +{{- if .Values.runners.cache.s3CacheInsecure }} +- name: CACHE_S3_INSECURE + value: "true" +{{- end }} +{{- end }} +{{- if eq .Values.runners.cache.cacheType "gcs" }} +- name: CACHE_GCS_BUCKET_NAME + value: {{ default "" .Values.runners.cache.gcsBucketName | quote }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/_env_vars.tpl b/chart/charts/gitlab-runner/templates/_env_vars.tpl new file mode 100755 index 0000000..d8c83f2 --- /dev/null +++ b/chart/charts/gitlab-runner/templates/_env_vars.tpl @@ -0,0 +1,95 @@ +{{- define "gitlab-runner.runner-env-vars" }} +- name: CI_SERVER_URL + value: {{ include "gitlab-runner.gitlabUrl" . }} +- name: CLONE_URL + value: {{ default "" .Values.runners.cloneUrl | quote }} +- name: RUNNER_REQUEST_CONCURRENCY + value: {{ default 1 .Values.runners.requestConcurrency | quote }} +- name: RUNNER_EXECUTOR + value: "kubernetes" +- name: REGISTER_LOCKED + {{ if or (not (hasKey .Values.runners "locked")) .Values.runners.locked -}} + value: "true" + {{- else -}} + value: "false" + {{- end }} +- name: RUNNER_TAG_LIST + value: {{ default "" .Values.runners.tags | quote }} +- name: RUNNER_OUTPUT_LIMIT + value: {{ default "" .Values.runners.outputLimit | quote }} +- name: KUBERNETES_IMAGE + value: {{ .Values.runners.image | quote }} +{{ if .Values.runners.privileged }} +- name: KUBERNETES_PRIVILEGED + value: "true" +{{ end }} +- name: KUBERNETES_NAMESPACE + value: {{ default .Release.Namespace .Values.runners.namespace | quote }} +- name: KUBERNETES_POLL_TIMEOUT + value: {{ default "" .Values.runners.pollTimeout | quote }} +- name: KUBERNETES_CPU_LIMIT + value: {{ default "" .Values.runners.builds.cpuLimit | quote }} +- name: KUBERNETES_CPU_LIMIT_OVERWRITE_MAX_ALLOWED + value: {{ default "" .Values.runners.builds.cpuLimitOverwriteMaxAllowed | quote }} +- name: KUBERNETES_MEMORY_LIMIT + value: {{ default "" .Values.runners.builds.memoryLimit | quote }} +- name: KUBERNETES_MEMORY_LIMIT_OVERWRITE_MAX_ALLOWED + value: {{ default "" .Values.runners.builds.memoryLimitOverwriteMaxAllowed | quote }} +- name: KUBERNETES_CPU_REQUEST + value: {{ default "" .Values.runners.builds.cpuRequests | quote }} +- name: KUBERNETES_CPU_REQUEST_OVERWRITE_MAX_ALLOWED + value: {{ default "" .Values.runners.builds.cpuRequestsOverwriteMaxAllowed | quote }} +- name: KUBERNETES_MEMORY_REQUEST + value: {{ default "" .Values.runners.builds.memoryRequests| quote }} +- name: KUBERNETES_MEMORY_REQUEST_OVERWRITE_MAX_ALLOWED + value: {{ default "" .Values.runners.builds.memoryRequestsOverwriteMaxAllowed | quote }} +- name: KUBERNETES_SERVICE_ACCOUNT + value: {{ default "" .Values.runners.serviceAccountName | quote }} +- name: KUBERNETES_SERVICE_CPU_LIMIT + value: {{ default "" .Values.runners.services.cpuLimit | quote }} +- name: KUBERNETES_SERVICE_MEMORY_LIMIT + value: {{ default "" .Values.runners.services.memoryLimit | quote }} +- name: KUBERNETES_SERVICE_CPU_REQUEST + value: {{ default "" .Values.runners.services.cpuRequests | quote }} +- name: KUBERNETES_SERVICE_MEMORY_REQUEST + value: {{ default "" .Values.runners.services.memoryRequests | quote }} +- name: KUBERNETES_HELPER_CPU_LIMIT + value: {{ default "" .Values.runners.helpers.cpuLimit | quote }} +- name: KUBERNETES_HELPER_MEMORY_LIMIT + value: {{ default "" .Values.runners.helpers.memoryLimit | quote }} +- name: KUBERNETES_HELPER_CPU_REQUEST + value: {{ default "" .Values.runners.helpers.cpuRequests | quote }} +- name: KUBERNETES_HELPER_MEMORY_REQUEST + value: {{ default "" .Values.runners.helpers.memoryRequests | quote }} +- name: KUBERNETES_HELPER_IMAGE + value: {{ default "" .Values.runners.helpers.image | quote }} +- name: KUBERNETES_PULL_POLICY + value: {{ default "" .Values.runners.imagePullPolicy | quote }} +{{- if .Values.runners.pod_security_context }} +{{- if .Values.runners.pod_security_context.run_as_non_root }} +- name: KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_NON_ROOT + value: "true" +{{- end }} +{{- if .Values.runners.pod_security_context.run_as_user }} +- name: KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_USER + value: {{ .Values.runners.pod_security_context.run_as_user | quote }} +{{- end }} +{{- if .Values.runners.pod_security_context.run_as_group }} +- name: KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_GROUP + value: {{ .Values.runners.pod_security_context.run_as_group | quote }} +{{- end }} +{{- if .Values.runners.pod_security_context.fs_group }} +- name: KUBERNETES_POD_SECURITY_CONTEXT_FS_GROUP + value: {{ .Values.runners.pod_security_context.fs_group | quote }} +{{- end }} +{{- end }} +{{- if .Values.runners.cache -}} +{{ include "gitlab-runner.cache" . }} +{{- end }} +{{- if .Values.envVars -}} +{{ range .Values.envVars }} +- name: {{ .name }} + value: {{ .value | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/chart/charts/gitlab-runner/templates/_helpers.tpl b/chart/charts/gitlab-runner/templates/_helpers.tpl new file mode 100755 index 0000000..b9f1c51 --- /dev/null +++ b/chart/charts/gitlab-runner/templates/_helpers.tpl @@ -0,0 +1,78 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "gitlab-runner.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "gitlab-runner.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "gitlab-runner.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Define the name of the secret containing the tokens +*/}} +{{- define "gitlab-runner.secret" -}} +{{- default (include "gitlab-runner.fullname" .) .Values.runners.secret | quote -}} +{{- end -}} + +{{/* +Define the name of the s3 cache secret +*/}} +{{- define "gitlab-runner.cache.secret" -}} +{{- if .Values.runners.cache.secretName -}} +{{- .Values.runners.cache.secretName | quote -}} +{{- end -}} +{{- end -}} + +{{/* +Template for outputing the gitlabUrl +*/}} +{{- define "gitlab-runner.gitlabUrl" -}} +{{- .Values.gitlabUrl | quote -}} +{{- end -}} + +{{/* +Template runners.cache.s3ServerAddress in order to allow overrides from external charts. +*/}} +{{- define "gitlab-runner.cache.s3ServerAddress" }} +{{- default "" .Values.runners.cache.s3ServerAddress | quote -}} +{{- end -}} + +{{/* +Define the image, using .Chart.AppVersion and GitLab Runner image as a default value +*/}} +{{- define "gitlab-runner.image" }} +{{- $appVersion := ternary "bleeding" (print "v" .Chart.AppVersion) (eq .Chart.AppVersion "bleeding") -}} +{{- $image := printf "gitlab/gitlab-runner:alpine-%s" $appVersion -}} +{{- default $image .Values.image }} +{{- end -}} + +{{/* +Unregister runners on pod stop +*/}} +{{- define "gitlab-runner.unregisterRunners" -}} +{{- if or (and (hasKey .Values "unregisterRunners") .Values.unregisterRunners) (and (not (hasKey .Values "unregisterRunners")) .Values.runnerRegistrationToken) -}} +lifecycle: + preStop: + exec: + command: ["/entrypoint", "unregister", "--all-runners"] +{{- end -}} +{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/configmap.yaml b/chart/charts/gitlab-runner/templates/configmap.yaml new file mode 100755 index 0000000..ed1230a --- /dev/null +++ b/chart/charts/gitlab-runner/templates/configmap.yaml @@ -0,0 +1,129 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "gitlab-runner.fullname" . }} + labels: + app: {{ include "gitlab-runner.fullname" . }} + chart: {{ include "gitlab-runner.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: + entrypoint: | + #!/bin/bash + set -e + mkdir -p /home/gitlab-runner/.gitlab-runner/ + cp /scripts/config.toml /home/gitlab-runner/.gitlab-runner/ + + # Register the runner + if [[ -f /secrets/accesskey && -f /secrets/secretkey ]]; then + export CACHE_S3_ACCESS_KEY=$(cat /secrets/accesskey) + export CACHE_S3_SECRET_KEY=$(cat /secrets/secretkey) + fi + + if [[ -f /secrets/gcs-applicaton-credentials-file ]]; then + export GOOGLE_APPLICATION_CREDENTIALS="/secrets/gcs-applicaton-credentials-file" + elif [[ -f /secrets/gcs-application-credentials-file ]]; then + export GOOGLE_APPLICATION_CREDENTIALS="/secrets/gcs-application-credentials-file" + else + if [[ -f /secrets/gcs-access-id && -f /secrets/gcs-private-key ]]; then + export CACHE_GCS_ACCESS_ID=$(cat /secrets/gcs-access-id) + # echo -e used to make private key multiline (in google json auth key private key is oneline with \n) + export CACHE_GCS_PRIVATE_KEY=$(echo -e $(cat /secrets/gcs-private-key)) + fi + fi + + if [[ -f /secrets/runner-registration-token ]]; then + export REGISTRATION_TOKEN=$(cat /secrets/runner-registration-token) + fi + + if [[ -f /secrets/runner-token ]]; then + export CI_SERVER_TOKEN=$(cat /secrets/runner-token) + fi + + if ! sh /scripts/register-the-runner; then + exit 1 + fi + + # Start the runner + exec /entrypoint run --user=gitlab-runner \ + --working-directory=/home/gitlab-runner + + config.toml: | + concurrent = {{ .Values.concurrent }} + check_interval = {{ .Values.checkInterval }} + log_level = {{ default "info" .Values.logLevel | quote }} + {{- if .Values.logFormat }} + log_format = {{ .Values.logFormat | quote }} + {{- end }} + {{- if .Values.metrics.enabled }} + listen_address = ':9252' + {{- end }} + configure: | + set -e + cp /init-secrets/* /secrets + register-the-runner: | + #!/bin/bash + MAX_REGISTER_ATTEMPTS=30 + + for i in $(seq 1 "${MAX_REGISTER_ATTEMPTS}"); do + echo "Registration attempt ${i} of ${MAX_REGISTER_ATTEMPTS}" + /entrypoint register \ + {{- range .Values.runners.imagePullSecrets }} + --kubernetes-image-pull-secrets {{ . | quote }} \ + {{- end }} + {{- range $key, $val := .Values.runners.nodeSelector }} + --kubernetes-node-selector {{ $key | quote }}:{{ $val | quote }} \ + {{- end }} + {{- range .Values.runners.nodeTolerations }} + {{- $keyValue := .key }} + {{- if eq (.operator | default "Equal") "Equal" }} + {{- $keyValue = print $keyValue "=" (.value | default "" ) }} + {{- end }} + --kubernetes-node-tolerations {{ $keyValue }}:{{ .effect | quote }} \ + {{- end }} + {{- range $key, $value := .Values.runners.podLabels }} + --kubernetes-pod-labels {{ $key | quote }}:{{ $value | quote }} \ + {{- end }} + {{- range $key, $val := .Values.runners.podAnnotations }} + --kubernetes-pod-annotations {{ $key | quote }}:{{ $val | quote }} \ + {{- end }} + {{- range $key, $value := .Values.runners.env }} + --env {{ $key | quote -}} = {{- $value | quote }} \ + {{- end }} + {{- if and (hasKey .Values.runners "runUntagged") .Values.runners.runUntagged }} + --run-untagged=true \ + {{- end }} + {{- if and (hasKey .Values.runners "protected") .Values.runners.protected }} + --access-level="ref_protected" \ + {{- end }} + {{- if .Values.runners.pod_security_context }} + {{- if .Values.runners.pod_security_context.supplemental_groups }} + {{- range $gid := .Values.runners.pod_security_context.supplemental_groups }} + --kubernetes-pod-security-context-supplemental-groups {{ $gid | quote }} \ + {{- end }} + {{- end }} + {{- end }} + --non-interactive + + retval=$? + + if [ ${retval} = 0 ]; then + break + elif [ ${i} = ${MAX_REGISTER_ATTEMPTS} ]; then + exit 1 + fi + + sleep 5 + done + + exit 0 + + check-live: | + #!/bin/bash + if /usr/bin/pgrep -f .*register-the-runner; then + exit 0 + elif /usr/bin/pgrep gitlab.*runner; then + exit 0 + else + exit 1 + fi diff --git a/chart/charts/gitlab-runner/templates/deployment.yaml b/chart/charts/gitlab-runner/templates/deployment.yaml new file mode 100755 index 0000000..c7c6007 --- /dev/null +++ b/chart/charts/gitlab-runner/templates/deployment.yaml @@ -0,0 +1,160 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "gitlab-runner.fullname" . }} + labels: + app: {{ include "gitlab-runner.fullname" . }} + chart: {{ include "gitlab-runner.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + replicas: 1 + selector: + matchLabels: + app: {{ include "gitlab-runner.fullname" . }} + template: + metadata: + labels: + app: {{ include "gitlab-runner.fullname" . }} + chart: {{ include "gitlab-runner.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + {{- range $key, $value := .Values.podLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- if .Values.metrics.enabled }} + prometheus.io/scrape: 'true' + prometheus.io/port: '9252' + {{- end }} + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- if .Values.securityContext.fsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end}} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + initContainers: + - name: configure + command: ['sh', '/config/configure'] + image: {{ include "gitlab-runner.image" . }} + imagePullPolicy: {{ default "" .Values.imagePullPolicy | quote }} + env: + {{ include "gitlab-runner.runner-env-vars" . | indent 8 }} + volumeMounts: + - name: runner-secrets + mountPath: /secrets + readOnly: false + - name: scripts + mountPath: /config + readOnly: true + - name: init-runner-secrets + mountPath: /init-secrets + readOnly: true + {{- if .Values.certsSecretName }} + - name: custom-certs + readOnly: true + mountPath: /home/gitlab-runner/.gitlab-runner/certs/ + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + serviceAccountName: {{ if .Values.rbac.create }}{{ include "gitlab-runner.fullname" . }}{{ else }}"{{ .Values.rbac.serviceAccountName }}"{{ end }} + containers: + - name: {{ include "gitlab-runner.fullname" . }} + image: {{ include "gitlab-runner.image" . }} + imagePullPolicy: {{ default "" .Values.imagePullPolicy | quote }} + {{- include "gitlab-runner.unregisterRunners" . | nindent 8 }} + command: ["/bin/bash", "/scripts/entrypoint"] + env: + {{ include "gitlab-runner.runner-env-vars" . | indent 8 }} + livenessProbe: + exec: + command: ["/bin/bash", "/scripts/check-live"] + initialDelaySeconds: 60 + timeoutSeconds: 1 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + exec: + command: ["/usr/bin/pgrep","gitlab.*runner"] + initialDelaySeconds: 10 + timeoutSeconds: 1 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + ports: + - name: metrics + containerPort: 9252 + volumeMounts: + - name: runner-secrets + mountPath: /secrets + - name: etc-gitlab-runner + mountPath: /home/gitlab-runner/.gitlab-runner + - name: scripts + mountPath: /scripts + {{- if .Values.certsSecretName }} + - name: custom-certs + readOnly: true + mountPath: /home/gitlab-runner/.gitlab-runner/certs/ + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumes: + - name: runner-secrets + emptyDir: + medium: "Memory" + - name: etc-gitlab-runner + emptyDir: + medium: "Memory" + - name: init-runner-secrets + projected: + sources: + {{- if and .Values.runners.cache .Values.runners.cache.cacheType }} + {{- if and (include "gitlab-runner.cache.secret" .) (eq .Values.runners.cache.cacheType "s3") }} + - secret: + name: {{ include "gitlab-runner.cache.secret" . }} + {{- end }} + {{- if eq .Values.runners.cache.cacheType "gcs"}} + - secret: + # Outdated default secret "s3access" kept for compatibilty with older installs using it. + # Will be removed in next major release: https://gitlab.com/gitlab-org/charts/gitlab-runner/merge_requests/177 + name: {{ default "s3access" (include "gitlab-runner.cache.secret" .) }} + {{- end }} + {{- end }} + - secret: + name: {{ include "gitlab-runner.secret" . }} + items: + - key: runner-registration-token + path: runner-registration-token + - key: runner-token + path: runner-token + {{- if .Values.certsSecretName }} + - name: custom-certs + secret: + secretName: {{ .Values.certsSecretName }} + {{- end }} + - name: scripts + configMap: + name: {{ include "gitlab-runner.fullname" . }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: +{{ toYaml .Values.hostAliases | indent 8 }} + {{- end }} diff --git a/chart/charts/gitlab-runner/templates/hpa.yaml b/chart/charts/gitlab-runner/templates/hpa.yaml new file mode 100755 index 0000000..bce03c0 --- /dev/null +++ b/chart/charts/gitlab-runner/templates/hpa.yaml @@ -0,0 +1,16 @@ +{{- if .Values.hpa}} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "gitlab-runner.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "gitlab-runner.fullname" . }} + minReplicas: {{ default 1 .Values.hpa.minReplicas }} + maxReplicas: {{ default 1 .Values.hpa.maxReplicas }} + metrics: +{{ toYaml .Values.hpa.metrics | indent 2 }} +{{- end}} diff --git a/chart/charts/gitlab-runner/templates/role-binding.yaml b/chart/charts/gitlab-runner/templates/role-binding.yaml new file mode 100755 index 0000000..5810043 --- /dev/null +++ b/chart/charts/gitlab-runner/templates/role-binding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: {{ if .Values.rbac.clusterWideAccess }}"ClusterRoleBinding"{{ else }}"RoleBinding"{{ end }} +metadata: + name: {{ include "gitlab-runner.fullname" . }} + labels: + app: {{ include "gitlab-runner.fullname" . }} + chart: {{ include "gitlab-runner.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: {{ if .Values.rbac.clusterWideAccess }}"ClusterRole"{{ else }}"Role"{{ end }} + name: {{ include "gitlab-runner.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ include "gitlab-runner.fullname" . }} + namespace: "{{ .Release.Namespace }}" +{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/role.yaml b/chart/charts/gitlab-runner/templates/role.yaml new file mode 100755 index 0000000..502ef2c --- /dev/null +++ b/chart/charts/gitlab-runner/templates/role.yaml @@ -0,0 +1,23 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: {{ if .Values.rbac.clusterWideAccess }}"ClusterRole"{{ else }}"Role"{{ end }} +metadata: + name: {{ include "gitlab-runner.fullname" . }} + labels: + app: {{ include "gitlab-runner.fullname" . }} + chart: {{ include "gitlab-runner.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +rules: +- apiGroups: [""] + {{- if .Values.rbac.resources }} + resources: [{{ join ", " .Values.rbac.resources }}] + {{- else }} + resources: ["*"] + {{- end }} + {{- if .Values.rbac.verbs }} + verbs: [{{ join ", " .Values.rbac.verbs }}] + {{- else }} + verbs: ["*"] + {{- end }} +{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/secrets.yaml b/chart/charts/gitlab-runner/templates/secrets.yaml new file mode 100755 index 0000000..e3374f4 --- /dev/null +++ b/chart/charts/gitlab-runner/templates/secrets.yaml @@ -0,0 +1,15 @@ +{{- if or .Values.runnerRegistrationToken .Values.runnerToken -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "gitlab-runner.secret" . }} + labels: + app: {{ include "gitlab-runner.fullname" . }} + chart: {{ include "gitlab-runner.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + runner-registration-token: {{ default "" .Values.runnerRegistrationToken | b64enc | quote }} + runner-token: {{ default "" .Values.runnerToken | b64enc | quote }} +{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/service-account.yaml b/chart/charts/gitlab-runner/templates/service-account.yaml new file mode 100755 index 0000000..1ccea4e --- /dev/null +++ b/chart/charts/gitlab-runner/templates/service-account.yaml @@ -0,0 +1,15 @@ +{{- if .Values.rbac.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{- range $key, $value := .Values.rbac.serviceAccountAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + name: {{ include "gitlab-runner.fullname" . }} + labels: + app: {{ include "gitlab-runner.fullname" . }} + chart: {{ include "gitlab-runner.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- end -}} diff --git a/chart/charts/gitlab-runner/values.yaml b/chart/charts/gitlab-runner/values.yaml new file mode 100755 index 0000000..5fac332 --- /dev/null +++ b/chart/charts/gitlab-runner/values.yaml @@ -0,0 +1,389 @@ +## GitLab Runner Image +## +## By default it's using gitlab/gitlab-runner:alpine-v{VERSION} +## where {VERSION} is taken from Chart.yaml from appVersion field +## +## ref: https://hub.docker.com/r/gitlab/gitlab-runner/tags/ +## +# image: gitlab/gitlab-runner:alpine-v11.6.0 + +## Specify a imagePullPolicy +## 'Always' if imageTag is 'latest', else set to 'IfNotPresent' +## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images +## +imagePullPolicy: IfNotPresent + +## The GitLab Server URL (with protocol) that want to register the runner against +## ref: https://docs.gitlab.com/runner/commands/README.html#gitlab-runner-register +## +# gitlabUrl: http://gitlab.your-domain.com/ + +## The Registration Token for adding new Runners to the GitLab Server. This must +## be retrieved from your GitLab Instance. +## ref: https://docs.gitlab.com/ce/ci/runners/README.html +## +# runnerRegistrationToken: "" + +## The Runner Token for adding new Runners to the GitLab Server. This must +## be retrieved from your GitLab Instance. It is token of already registered runner. +## ref: (we don't yet have docs for that, but we want to use existing token) +## +# runnerToken: "" +# +## Unregister all runners before termination +## +## Updating the runner's chart version or configuration will cause the runner container +## to be terminated and created again. This may cause your Gitlab instance to reference +## non-existant runners. Un-registering the runner before termination mitigates this issue. +## ref: https://docs.gitlab.com/runner/commands/README.html#gitlab-runner-unregister +## +# unregisterRunners: true + +## When stopping the runner, give it time to wait for its jobs to terminate. +## +## Updating the runner's chart version or configuration will cause the runner container +## to be terminated with a graceful stop request. terminationGracePeriodSeconds +## instructs Kubernetes to wait long enough for the runner pod to terminate gracefully. +## ref: https://docs.gitlab.com/runner/commands/#signals +terminationGracePeriodSeconds: 3600 + +## Set the certsSecretName in order to pass custom certficates for GitLab Runner to use +## Provide resource name for a Kubernetes Secret Object in the same namespace, +## this is used to populate the /home/gitlab-runner/.gitlab-runner/certs/ directory +## ref: https://docs.gitlab.com/runner/configuration/tls-self-signed.html#supported-options-for-self-signed-certificates +## +# certsSecretName: + +## Configure the maximum number of concurrent jobs +## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section +## +concurrent: 10 + +## Defines in seconds how often to check GitLab for a new builds +## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section +## +checkInterval: 30 + +## Configure GitLab Runner's logging level. Available values are: debug, info, warn, error, fatal, panic +## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section +## +# logLevel: + +## Configure GitLab Runner's logging format. Available values are: runner, text, json +## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section +## +# logFormat: + +## For RBAC support: +rbac: + create: false + ## Define specific rbac permissions. + # resources: ["pods", "pods/exec", "secrets"] + # verbs: ["get", "list", "watch", "create", "patch", "delete"] + + ## Run the gitlab-bastion container with the ability to deploy/manage containers of jobs + ## cluster-wide or only within namespace + clusterWideAccess: false + + ## Use the following Kubernetes Service Account name if RBAC is disabled in this Helm chart (see rbac.create) + ## + # serviceAccountName: default + + ## Specify annotations for Service Accounts, useful for annotations such as eks.amazonaws.com/role-arn + ## + ## ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + ## + # serviceAccountAnnotations: {} + +## Configure integrated Prometheus metrics exporter +## ref: https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server +metrics: + enabled: true + +## Configuration for the Pods that that the runner launches for each new job +## +runners: + ## Default container image to use for builds when none is specified + ## + image: ubuntu:16.04 + + ## Specify one or more imagePullSecrets + ## + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # imagePullSecrets: [] + + ## Specify the image pull policy: never, if-not-present, always. The cluster default will be used if not set. + ## + # imagePullPolicy: "" + + ## Defines number of concurrent requests for new job from GitLab + ## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section + ## + # requestConcurrency: 1 + + ## Specify whether the runner should be locked to a specific project: true, false. Defaults to true. + ## + # locked: true + + ## Specify the tags associated with the runner. Comma-separated list of tags. + ## + ## ref: https://docs.gitlab.com/ce/ci/runners/#using-tags + ## + # tags: "" + + ## Specify if jobs without tags should be run. + ## If not specified, Runner will default to true if no tags were specified. In other case it will + ## default to false. + ## + ## ref: https://docs.gitlab.com/ce/ci/runners/#allowing-runners-with-tags-to-pick-jobs-without-tags + ## + # runUntagged: true + + ## Specify whether the runner should only run protected branches. + ## Defaults to False. + ## + ## ref: https://docs.gitlab.com/ee/ci/runners/#protected-runners + ## + # protected: true + + ## Run all containers with the privileged flag enabled + ## This will allow the docker:dind image to run if you need to run Docker + ## commands. Please read the docs before turning this on: + ## ref: https://docs.gitlab.com/runner/executors/kubernetes.html#using-docker-dind + ## + privileged: false + + ## The name of the secret containing runner-token and runner-registration-token + # secret: gitlab-runner + + ## Namespace to run Kubernetes jobs in (defaults to the same namespace of this release) + ## + # namespace: + + ## The amount of time, in seconds, that needs to pass before the runner will + ## timeout attempting to connect to the container it has just created. + ## ref: https://docs.gitlab.com/runner/executors/kubernetes.html + pollTimeout: 180 + + ## Set maximum build log size in kilobytes, by default set to 4096 (4MB) + ## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section + outputLimit: 4096 + + ## Distributed runners caching + ## ref: https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/configuration/autoscale.md#distributed-runners-caching + ## + ## If you want to use s3 based distributing caching: + ## First of all you need to uncomment General settings and S3 settings sections. + ## + ## Create a secret 's3access' containing 'accesskey' & 'secretkey' + ## ref: https://aws.amazon.com/blogs/security/wheres-my-secret-access-key/ + ## + ## $ kubectl create secret generic s3access \ + ## --from-literal=accesskey="YourAccessKey" \ + ## --from-literal=secretkey="YourSecretKey" + ## ref: https://kubernetes.io/docs/concepts/configuration/secret/ + ## + ## If you want to use gcs based distributing caching: + ## First of all you need to uncomment General settings and GCS settings sections. + ## + ## Access using credentials file: + ## Create a secret 'google-application-credentials' containing your application credentials file. + ## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runnerscachegcs-section + ## You could configure + ## $ kubectl create secret generic google-application-credentials \ + ## --from-file=gcs-application-credentials-file=./path-to-your-google-application-credentials-file.json + ## ref: https://kubernetes.io/docs/concepts/configuration/secret/ + ## + ## Access using access-id and private-key: + ## Create a secret 'gcsaccess' containing 'gcs-access-id' & 'gcs-private-key'. + ## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-cache-gcs-section + ## You could configure + ## $ kubectl create secret generic gcsaccess \ + ## --from-literal=gcs-access-id="YourAccessID" \ + ## --from-literal=gcs-private-key="YourPrivateKey" + ## ref: https://kubernetes.io/docs/concepts/configuration/secret/ + cache: {} + ## General settings + # cacheType: s3 + # cachePath: "gitlab_runner" + # cacheShared: true + + ## S3 settings + # s3ServerAddress: s3.amazonaws.com + # s3BucketName: + # s3BucketLocation: + # s3CacheInsecure: false + # secretName: s3access + + ## GCS settings + # gcsBucketName: + ## Use this line for access using access-id and private-key + # secretName: gcsaccess + ## Use this line for access using google-application-credentials file + # secretName: google-application-credentials + + ## Build Container specific configuration + ## + builds: {} + # cpuLimit: 200m + # cpuLimitOverwriteMaxAllowed: 400m + # memoryLimit: 256Mi + # memoryLimitOverwriteMaxAllowed: 512Mi + # cpuRequests: 100m + # cpuRequestsOverwriteMaxAllowed: 200m + # memoryRequests: 128Mi + # memoryRequestsOverwriteMaxAllowed: 256Mi + + ## Service Container specific configuration + ## + services: {} + # cpuLimit: 200m + # memoryLimit: 256Mi + # cpuRequests: 100m + # memoryRequests: 128Mi + + ## Helper Container specific configuration + ## + helpers: {} + # cpuLimit: 200m + # memoryLimit: 256Mi + # cpuRequests: 100m + # memoryRequests: 128Mi + # image: "gitlab/gitlab-runner-helper:x86_64-${CI_RUNNER_REVISION}" + + ## Helper container security context configuration + ## Refer to https://docs.gitlab.com/runner/executors/kubernetes.html#using-security-context + # pod_security_context: + # run_as_non_root: true + # run_as_user: 100 + # run_as_group: 100 + # fs_group: 65533 + # supplemental_groups: [101, 102] + + ## Service Account to be used for runners + ## + # serviceAccountName: + + ## If Gitlab is not reachable through $CI_SERVER_URL + ## + # cloneUrl: + + ## Specify node labels for CI job pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + # nodeSelector: {} + + ## Specify node tolerations for CI job pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + # nodeTolerations: {} + + ## Specify pod labels for CI job pods + ## + # podLabels: {} + + ## Specify annotations for job pods, useful for annotations such as iam.amazonaws.com/role + # podAnnotations: {} + + ## Configure environment variables that will be injected to the pods that are created while + ## the build is running. These variables are passed as parameters, i.e. `--env "NAME=VALUE"`, + ## to `gitlab-runner register` command. + ## + ## Note that `envVars` (see below) are only present in the runner pod, not the pods that are + ## created for each build. + ## + ## ref: https://docs.gitlab.com/runner/commands/#gitlab-runner-register + ## + # env: + # NAME: VALUE + + +## Configure securitycontext +## ref: http://kubernetes.io/docs/user-guide/security-context/ +## +securityContext: + fsGroup: 65533 + runAsUser: 100 + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} + # limits: + # memory: 256Mi + # cpu: 200m + # requests: + # memory: 128Mi + # cpu: 100m + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + # Example: The gitlab runner manager should not run on spot instances so you can assign + # them to the regular worker nodes only. + # node-role.kubernetes.io/worker: "true" + +## List of node taints to tolerate (requires Kubernetes >= 1.6) +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + # Example: Regular worker nodes may have a taint, thus you need to tolerate the taint + # when you assign the gitlab runner manager with nodeSelector or affinity to the nodes. + # - key: "node-role.kubernetes.io/worker" + # operator: "Exists" + +## Configure environment variables that will be present when the registration command runs +## This provides further control over the registration process and the config.toml file +## ref: `gitlab-runner register --help` +## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html +## +# envVars: +# - name: RUNNER_EXECUTOR +# value: kubernetes + +## list of hosts and IPs that will be injected into the pod's hosts file +hostAliases: [] + # Example: + # - ip: "127.0.0.1" + # hostnames: + # - "foo.local" + # - "bar.local" + # - ip: "10.1.2.3" + # hostnames: + # - "foo.remote" + # - "bar.remote" + +## Annotations to be added to manager pod +## +podAnnotations: {} + # Example: + # iam.amazonaws.com/role: + +## Labels to be added to manager pod +## +podLabels: {} + # Example: + # owner.team: + +## HPA support for custom metrics: +## This section enables runners to autoscale based on defined custom metrics. +## In order to use this functionality, Need to enable a custom metrics API server by +## implementing "custom.metrics.k8s.io" using supported third party adapter +## Example: https://github.com/directxman12/k8s-prometheus-adapter +## +#hpa: {} + # minReplicas: 1 + # maxReplicas: 10 + # metrics: + # - type: Pods + # pods: + # metricName: gitlab_runner_jobs + # targetAverageValue: 400m diff --git a/chart/charts/grafana-4.0.1.tgz b/chart/charts/grafana-4.0.1.tgz deleted file mode 100644 index 2bfeda8dc27d0a02cec153702785f4d61477b1b1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18232 zcmV)@K!Lv>iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMYcciT3$IC}opr`SVhPU2pZk{!qCPv@j-+&IbjG!I|fY1gbw zel|ox62>IK0-zi<_WkU4;YETkNw!s|(__w>#v<|9*x1-FY-~&z8lx2TUrrGd!5k&= zf2{o(3;8Z zKV!xTO@}bs?x8Fze(nXk!A>v2JY*yj>ecBKLyS>G(g_GkV;EBgzvh_D-jpVI2vZ?4KJ51mK?W%nJrc@J?H7hF zg9#DSe3Wy{LYfMkiXfzktyBD)ATBWLXZa{5VV@+EeoQCyOfZ!43^AOFv)#eqOOU0L z-bFsb#gPB+cJO^L@CUsFkyId(V#bF(A8>+5JcQp+I^_gU|2C1oW!F7`6iM*yr?N1M z^YDV;zlDT{v{IG{{i!UQAj*O`ZtE7DH4?`vcQ=2^ba%49;Z1Au>!Ebp2ANX zQhI{KF=oY3AOG-TC)nNz29J9)`))TF1lzrDO_C4KJwBF7W+=`v|9u3&V*dYh@ch|M zo&P`I*?!>v_wle13Wq&_kYOb75CqF{4oX40C;nU|Hzy|n$GV+GeJS*P;Fl_l)`EG{U$o^K1y^M3o($~`_ z$81JIydQ=%Pese<6`M8xo?pXRi1-k;d%cYfIKddsr7Q_W z4EReHBa%W!qlj>pE9p7PqX`xP&v1CI(deM)>POYp5WZeZz!Q@0&k%`6F_zHEH|Wd96v=nU zyzCr8gtM5=6DesWJQig^uh%09nkaNJoKa2$W%EKiDfA8ifhJa)1jzC@ zKB6%R=R-Igzo+7eVUAPLlgo4iJ}3f4aXbe}Z)7}I_+SjpfsR4-c$CNS2@V++JkYhw zFGzAOV1yw-X|CGP3p)amf|%lxS($pPVbED>H1Z|kD$TGjhvF-U{EU$q64+;WLbzb_ zzG`1_o~(p+XjqXC$^2NC{0YzXM~UWMPhc)yGn8PtP-JmjBLW|gaR|ZWC(7hepyMa;eF1@ zBn1RVj8KK`7;!G=7X+0ufdd%>$D+3ZS-r@x*Nf<7DtY7q;?ogDEaHc4>kMTXqZuPe z;C`61*pfgjQ79I!v}O;Q)MlDWHa6ghMzCKBJP9d{oEMdXZ_xS4KM;+qH}}+g(2n~M=)kI0h$X=B3<6_J9zSz zqUc8yqcp_K*4|V+lrmEvh|mpO5-~OGqNs1wkHe#~GMr$c3KhW2Gpx*9`BJV;37-6j zfru)qs`V#1iE!Vvsd=&aQA)o8nx2(H-z!Oj*aV9sS1d^)c3c<3TwH< zm>er0xdgtgiB10^B69tI0ebJNjfozEk+C@B++fBHLz-}f2>c-pWdof2Ce7x-8=_ikincLH zQA~bQ?ql6Vbx_$Xl++_M%h4cNRoL^?Z~TRDLDGrB(NM}(B3@4EWqN=F8fvk1IM!p@ zf(Qu&^imT0lw@#8;uun@j3KG@2|xn4KuqMBuBsNAT3lRfEQ}^tOmWVArL-`kU9mj1 z(hLwA5y23fOUc5liBmOEbn6vh-6by9K0M(V!wARpab)BuMLNBMkTp4s&Vkm$VL}c_9}HDpU#t#u1LBA*NAuK2zQeoX*Z=YY9rx1e;ND zcv2CoBb5GzNEp*RQadf9m`?g6{TB``i|0N`1(o8I`>Nsorh!`ikn+uAc^dGk2Z&47 zLsIReFt9%6a6rQg%*G_fy$w6gp%P*qpL+U_zG>tS?K(0>oYb#zIwOoKT?J-{5y>|s z_d!B4tPxfd4>T3j@jNPv1-c#WAL^gMMV2lDz>yTid9$2-ytn=y}-Cqyi5)KO$!4<=D}?| zZ%Q}Q^*UTKx^-U?f0rZ10ASrnjv-?P=C?Fb_YW<^h*mBZVx&^U3`}b_9RpJ(jM~yN zup|}z%EubO`ejuyFhKaB{H0woatSGs9_JI1a+uQ`E~Pq6QZ7&&YXU-2)F7+p8cnkCK4)<;5=VJz=ux)3 zRx4fWLmD95$OMNZOB|Mt#XDgR#h3}>+SF!~A5Kx4DyN+^c1RhU=HtgzA~7nCdLqK5SUiwuSDk8$&3wfX5qiVYonyUA;7+meqAfs?HTIUxrl3F) zX$~uu@x$q>A)HbvdCM}Iy{dUZF{j2FD)l62;=ZAPS+bDIWMk%?o-)h|{BJ2R>@Tei zAr2RK9#m$$G)wFXu`-h;*e{Be4!(bJiTJD1;Rlb7_kTJ2pOX*o?LJJ-1x?!O%DEgm z5clgv1KGMM?s7_jTp!~k#X{1~PVt6ey*S)=B@?f(ZT(q!CEGwO*tSHxv*7!s3ECH! z3m4K_WBv$nytjMvHvQ4wuG=oaXm8g9vN;0a7>_YieiK2Z${_Xm*wE=alvT(ePcijHCm(e?b@OwB}21eVSGgjCvEIYAoV?_6K=oL ziL=iRP$+VZ+U85IVBtp8EZEmndp0PZ3lbX0!YK|f&c>9T>t5?)xm459phxu)`wJX$25-Vr8M2mw9&$PmN+eE z)l9#h&oLsg+LsfE32FVyrc?vUkcuOe1%f8AG|@>qLotc8hc6e?Kz_B0zP>N7e8Uo= zGfpPyS)NtC=;=Sx?6HFIqQ@GkDpWM+^b{vqtjwy>T!Ajk3rVzRQygbffQu}y$SV!2 zyMrn*R4nyERN4JNJJ!Gq@-C#lAzSlhYd32}*643k5n%D(;?F&=`j*b+{2|$0$w*tg zRmK&dVwI;@&3+B%vTaEimWW6SNHN!QS=g5ocOG=Pwl?5gx2C{R8_Js310CxI)Xbk( zod8@=R@%+hjMfnMEGZQ|*q>Lv6Xr%aK8xvOA_w-Objt$3r%&1_;h$|-oZ8uZ0bVj! zvt*1G&&p`Z+L|GEOQI;Se$7jtXfU<4{-UL`I97cM`4OoYe=5cI*IUJX(a7*(EO{E$Xju8u|YK}`oR!@kpW`v|BK#K5cK%{K^ zgM{Xin1HL&cTQ5{K6m>g99W*FI^ac1FS$5vTZiH?*G6$hxpsm}k~DDu`)}Xs2q^`w z6s&PvcBnD0KfbpwD`+UJmkNW_q!DAm3v=G=}5_IKG@C9Rj}cF3iKMEfP$+H@Q(wpA6I zRvT8Y;e@fJT`!E5RFY|H`qth0G>zvl!c#OOG`CGv>XrPV;%vP=2XR>VDlEhnzr~`l zRsUS8rv3bfwL`2S!TVB;Itj!Vv9>cNOU{4p?(S{Z&j0Lg?>&7u|8pPDb^vd%fH}<> zJpTF>f}gcBr-%uF>+8o#n=BMaX?q$19!k3c?3s}X7FLn<{c;h=R*$h%?Ql>KC0t+2 z4~6~eSv`B4$FZ#IK_JWcR5>IwB&ILCuYp{gK{X!W+Jh_59y##+2!}Mn@Lzz#Ded)k z0wufORJZnwPoW4Lo)M%i9R~p>kTOv3udjioHMhgT`x7lL+^AuNN3&P1k+Z@egDwmO zd=>_#H4|s~^(&BZ0i=zWwce3y6;}JYBMy42h*!o8J|4e)!Z#%^9rnI{^?{*uf*TF1 z;7`*mSi4AqE8od#IF0n5fMbqL$u}l6+M@mAv0_>?!ijLGN0#A55C^Yp;PWYt8aySDm3oX;B$}~ zK(4(as&|#C-fV(*cvJ&UHqCDZA`v)I&~br8E*G)c>yq!xk897u5KgI`(MJZ7;iCd$ z#gta??*`H@o&#--9|}m&90aY4#-XXl6>c;OS48CHDzo>vGJ7Ylj(J7p5wb!{S5-gAsAhMxi9aunDf=?;GW6Cb2QT50US0A^iu*2fd-qQfy(g{gR z*L5LCmE_<8XRuwI9Fd|{8x|eXxTjhtW3|&_3pmlQt}xhG_hVWFfM52H-ygnzGc2Qe zrM}Cp5fJ_5FMm->R;jo#m0Q+ zpWj+xTl3S@|J`kb_4Khs|9`r@TR;E*Y_R+MLI1yx=j+%0cQ7N#5I7df4IugYi$tO& zoZ=yT*H`?k|6T9Zm&`IL*G-~=!-AeE=bN45I@k8gugZ%S=!8RxXImyFOMaMP#-!eQ z1%l;i2zyV}pCmcS$7AvZJilyAE=gZ=#Qt}^m(~YYcinYfYWqKbx=>39q z>#DqveaKO87~v4*9IMy}_^BF03S)v}$5;5-SufwmL1rK(^t;@AfEV}VgyrnA6GD3>~d{qabFKYX$JtU7QO zn9!|M#dra0E|Qfcx29}m1U@Am7dzTWM5SESQtbOT>av+=%*POHTN z<4L!c+}E!KaXm4cORcK%W>}erRBrI#eOQoR}+Z_wX!wTJ*n+M(fl4{$r{BxAXkz_Vc>`cW3aB|LI=X&Ilujhzy{R_3So@+xjIpeV17t$s|3fJTB0a=4DSMsH3*lQ_NB;fQPD2Y33 zT>(iWoC?@!cugz4HIjkIGR+Jju4ACJO0$P2K-ofHf&J!eH5Q}M+g5>lx}sjKGq_98} zMLI?MZb-pp8q%Of8rHIGi_VUGdACr^#h+XBx&mjA7I_9~49MJu&oEh;CQ ztcQ?#pUp|#VlTHz`PJbTMavbiXNNpl~44Le*GA4hfkf)rFVR zO*rH(qV_87{=8au&Vs2BqD`^m*Fp|jh_NHzPd7n!RaliMyXrRy(;>M2h@^c!HB_L| zAiyW^{7}+LauE>*ezwY?3;56f1k+eG6xM=O#{Y12LiV!>8ko5oRgW(4d{#`-JN0R$Wm~Cn z)L5%(fmE^i?$znvKOEG_`|aV$X|3Sf{U2YwZCeJ5kfS4Z%JpL#d;Q_p&Yu)+N zoJQ+y9ltvH@bUQNt43=hDvI^8!bH|WVw!~ae*fL8lcW8Xo|a&t*|H0kvmQF(tOjs4 zAUpZ#@aXLH?aA5CuZ|C2|6>*S>MeFav{01Wwr(xNE32S9^Sp#Jb`ypHzPWH)lur|q zP#aQP!ButhQgyQ@-lck5%SPE6NzqoZ%J7_?t1yH$)wz|(9XWSfX61(4ej`?O0}fOS z_=XHjLT5Z)XDBK9ph3jh@7E{jOdYM@%|JTgR7Bu|8$_iFy&wh3@vX$uT z3VzM0z||#RWe1K|-vHNU!7i~`Z)T;os{ge)wz+m{ra_0US1T zmeKri?UaSaykl&ZM*$u=Wn8nft7gqiebg*S#_)tPcrr!&Cp@=hy?=3TtiF>!!_jP%E+k)*`{lnjkIO>sNEhv_M%cu*%{r z4)(1Lt)+{qJsV0Za$8HTpl*x~orI2LqLI~8;b+_}vyMPHG=!bRFGd@_yk{5}@v z0-5}c{aFhjb2E|sAytLdhEYx5zn+2AdNv7fo9(s)OTVvmRm+0geS)N@kufT0NaG=# zzC7yg)XAQ|puZx~dwa`{rQH%WjMG{0=JnahtCt^-4^RJbwtw*M@cr4xlUK)0G`E+l znsBcfO@_rQo0jhQQ!wtd<14IQWsz1)jGwZjQve#9HPY^%m%_ zD`W0x|K#MC561_qV9%EQcfz2PLZFGoBBQ`^Oco?=5)`(f>+t=_>HgcdXGd>8zBzn< zvKURp%^(Xgwd|}#69aD6oE5wvDwjlc0$M5I-RaSKSI@$p+hS~KV0)>Vck{jrigXCp)UUHwG1`XH4xX@*3$0Xw6M1XVA(SAaXM=Qrgq6z z(@$3^(Bh-3U-Q*$p<1G~i3NVbF8^TLe#1(84XyhOO{1~mZFaAokOeANQMer~-nyu? zX|HRLAB(FSZrPS|Cx!g(UQw}HizaFRu-9s~m3KV#ZpyuH;xy}09=p2_icgKb?j<21 zu4dcexTpfv;i|YDY97fI+0kN~7k0^znl2W}{}`o0ana_8UEi5KF0j1W6}?&uP?E)_ zKMmYXN8EG)H{8FPOAZ&gkUPYho37g7=nLx!+p#Q~?+O03^}9Mv>SeRgt3~v=PUG_G4{h6;t1&cmDgkygK+=W5O`Lrr)wn?Z zokz$85_p`)91A_t4QW?QNjm9OXSi+r*qFj9x6}d$>Z+T=U2djEKX+}r;$6Bui#Hek zxiNL?4ejZ6AuUAxN^YSpkyB+Ob=FY+E?2Cf+jlC}rYz*VXhFnEuAwgIZnEU6>uV)I z+MGLGpe&q%Kj-P(-~VZz|1ily7>)6ot^;3k{&#zCr=I`k+4E<+59dGb<7pAwY=lC4 z1nr6I^im&+BngocUl$~eiW@19X^eYr&UjS=ifP zoue#U8n>hc$rKrvYqO|1IT@6OpE~(DY@~Lu1*Vu0p~5WO5+~-cW2;qF-uk5*soW6W zY76Z)Y=;}jzH(h!`Q{wzSRj_4v=-|=%%LW&_11MY<4oPZelrkO6vWlb+&%9zY=GxU zn`^%p2!zfoE=sQ928eFjQ?_`2ha@aGrAosF0gL|{oTI=Tgd-M67%Wgu3}&r|9`r@SL6TBpKcEx`2T%8 zYv=zzYMQy77`PQTG*tS(SBmglT@YqmYI;UaDkmpRu#4u3YsaeI@q0gHa!1o)0smi<_h)(j z=ciBW`9GdLeaQcOKhF(V`c2|!R^em+7D7S~eo@>~ap%_8ukbHQQt-CCwvrVo;oklQ zR}11&q@4L;n=1!OQc^^?d@7?;KG!#2ag38K{m;)RRwoc_tV*$YD?h^ZHM}T($)e2- zQ|HemvFJEpIUd6N^6Qk;R}*rnhV$)6rU3GMZGTHRYGSClFJ(}Z6E&M!P^f*oQa{7 z0J7H*GiJ)zEKCiv z}G15_!~Si9A|viactwMU)q#;gi+n-BDXMUB!KA z$&?>n(>#sjbg7%T;NFfdw?UzA6g6b0)7&Z#FGce!~vClc;Oq|2T{3Ji)10 zmjh@C|9`sOxc}?fv*$Yx{Qo{4twE|)yyQNS%~fJ|H|ty;Wf|`mtp1>&tq0FiJz!ix z46TPr%(u>6hu>vWIm6*lMb{`hi&tFR_N8u|7speS(Tqemee2-=1~ae(uZ?k*yy`?S zB=E$XE#~G|eXN41!0*n@8w;Qri9@0QRie7#ux>iAj7D$U2%-f3`v&j|BzK-|*Xr7S zo+o{KS-4#ECgj~Dlz$Z1tL1|q5y$GRv{%~-@HSQde2R9S?eRQuM^|*wZau8t=y4XWTqx~v}See8uTPS=f^`jFi zdAMvOZP&KGwZMCLDo<1Yn_?8jm~-FeFSxT7xXk|F89c4&e|vjRANK$5I9`1j+m#1ch)-RMUZhDh!_Psg2z38}XS;MFgN0bTI zA>4BDtPhV&ze=@r3R`g0o@-H9oy3qud0ScMt6!5b_=~`)-jthdkFY>i<{av}?J}nv z`=8nYVez-f5m76%xGgZw#!YGz4w z19K%lklP#0T^g=n+Wk-fxIJh!_I1xi++k%ARSE%&P8>irsi`NrOzy{a5oL7FtS<2VjOnq{W^d7Zyn1t{hSS^-+MDYjhL0TYXW2eW zXpwjgWf_vc%bq1gtTYamr-CH7+Ve{7wGX7Vy%tYr#>kAsc!Hy$4tJ9CBOyGpA71eg z#Yi6xvn3e*H75*52aIMfQHDk&CW2ryof~jqa)@y(V9aO&2MVuS%KW9dfP?*)XCHok zb$ony@Cqn{mw*58%X?!qzW(sbdy@s$hu0^sP7e>vkDm_TzBPZIygGeTygzw$`qBMy z^m6~m{CfZD^z6sO_XlSuua18{G?)MA*T?(6*ed@xIeYo{;}0KB&5L(Gz5ig_+y8iQ zczX8B@!{#KvWM5Y2PbQz{P4I);`P|RvFVwL7a61f!lA%X@kVc17C$3;nO-6m?H?VM zmD=)6$*ou3^%Hw1Cc0ZjzR@%o~lICB! zzvo#N;{>My#U?jNSJ5%or`5Z@+XU#xlnND1Ip+c=7T9m1owUgRS-3{u?|-ad|2^BQ z+kd+|gNOaUdwK44|4)U8b?y5dwXOU=wkmNS)~1$a)$k1mv93e)Mjq9c;}e~oj7f@O z@*8HhkMhOJU05-U!Vq)*jz(6PG+nHf@s-6(XRC6v*6uZ_`ftlL&w3Vq%`q>pcrZCN zI}?R+@*A$UZ)$X0!tPeB6@=Oex^KPS>ra#auQBnTd6wCKJG*uJZ};i5y$Ak(A5WXg zZ*o7?ch44OadPcJmGkEh&A0E*1pGZhH@-G9@71mLCyjJoW!A2abbxEj8d#oM?>z(1GH%W>3;H|x!y| z$OkpxcT)qLl#8pRNwkR>Z<>+O$u8NMTk>|9C0Ci_Hu7}DY+L5Z8xQbiX(s>R_S{XM zCjZx}fsfPKJ)Qp?G|vBTKil1VIRAey&)20lgmnbz%g?ur(bxBb{b@XOI<);mhJMYl zbIQJcioSNsT=OC}H%F5>x(-(`+S|ox=z62tk~6LG{I_$iY)v$_O#ge@IRCS|_x!>C ze=pAsPAsmS5&X%S06=-YY{g%!UVr53ZtmvtZfwJ=;z#CQOQWzJWKfh z)4}uI#`<&mQ>yeLU+wg866S>NQSmm&T%y-&&c~kI2}H>pmi*a=SR&^0ptnC10G# z^fuv%3gKxjXfs0Ak8w8dz&4eNbEUeTbIi2mhWuLLuU~5(Vlsx#0lcLs`Vqw_4U2oL zn@Pgm8f_xBm^7y`3r+Q1YrAdL+JxhxdMCJ@!^pfit}O-^lrTcr?WE3L@k+p3%IgW^(a$j&n~r|MJ$UkDriXKA)LNE zGVcUp6D*Fp8Yn*-rXbfHN}~k}?>&vw*(tg1q|(%}_)=4BbJMw_-@cTvbXCxBbi~-N z=T!H%mR%3e-JeeV4{49>I@G}B{=Y{4kKLW;5BlGIJnbCZxa;js(2`Ugb|YYo_rH9@ zt(h7Me(y_R*04cx7dyCR{g+8v?q~n^>F%KA{?~{6kN5KYVeKXF^a|1gRh2-Tsl-wJ{t5$BHxW&B8$HuRNF)5>(GzoZ6ZjPR z5uWwaJdVLX1pgAezkZcnoXL{c9z4ofylEd|9g2_?EiKd@CV2c@_WdOKY$0aI2qfSj6$#+cVw`*Wm<%m zOa(SkL3>IB_v^xh2u{J)+zZZkC#QrDMM4Sn%SjEPe%s^{>PL}SLjBU_*n@Ps-LsJY zujwf8a{d2VWB=zN{_}pGJM;gWoD#b80pUNX1TZIL+s?mgdHPMyzAB`DeqgYF%4aG6 zU&B$*W&D3<@T`9S=iovAzn|w@@&7v=5dE`?f%`epsj;{2W1Xj%i+hb+czD+FEad-d z3I$uj{|9^9dv*T*?Ab&9?|XS}%>V!QMTq^W8R4S!e_!|i?mmCsxc_(e;r!3NJa^jv zJEGCu6u5uGJ-wDtA8(4|B;eEjl+p`t2wHaLDi&fz9$e_RzoB^(Rz@t5NF281-5!w3luf`Xtl<`5~zwCN`U z{}}o?XZ@IlDDKB(gumdh4}Dd*FRS)VaqR(rC#R5?kR|f>k-FAL*C`q|_%iS6cOXfG z1X&)(I@rmC;HcL}<=c*HjoK?$PuNuEb%h+FDUI-ni5@ia(0Oy0?4)QsmK0^Kd$zys z(xRX-W^PQS^Wp~Wh8;BC!b>a@{5`21K6x(L$^Up;;Q1=in-TRoI>GzAfF>JMDJwdG zSLHf(-`aH7#Rk@L0uIF4Zw(~%AfC+TEM>Q6|J8CDn5Iqy*#^)?{I6U9^E zNyhM)e8G`s!++TfK=KNpsj4YKf($c=Ns5DBaBy;VA}GVX4R}eDgr@NG%M&Shc`uj{ z(O3V~;P-;jZ>+EWYhO$!efb~zlh4wA+0Y1u7kLI_5@X)`F5s70@4H}xE_&YuBFTE+ z{eQg;_!%)ma}I|GuXrz*@sLKi7nqIcUO*y@`no)$|LUo09Zvo&n1fya$FKGe-n|Nv z=y&L2@%n$Zx3k??|AXy^_|JQJHsFn>0QftO6Ht~@ulF4srd*&n)|q85@d%1we902@ z#TD?$)cyR#L@ZiqOWCBq+3Rg=!0FrnIynBXUhn+;Jftb7G44GADQke}9)JsJWZJ5- zpsHv9kiAI|3}C0UsHali%D0+<$e+e^Q$iiileuqRoDUZP;QOk4S-cCRk16Q_Dz~5d z5stCI>a){ggtM5=6P$`xP)wz=g6{KFW%X5>959^F8RlSz{!?)^jx3VV6sLj%;+%#A z3CmVXB*`{(H{8WlphuQX7*aEfQm{uR!04+|Fd#_rB_!xyl)+5f)xF-nd7(QnujMak zSUicz1%}ynuoFB5{|A`u1cP9(S;<+9gan0QARJKIBdHt-5@Zx(7%_}4BowB-6k`Qo zg3=ttfI_92EAThql)|oT9-i!{5j+jHcY^Iri9f;%xXevHTod{aFJMO&8)yXRNakGY z`@P;393iP$3(R2Ia|H*Ohm5ET@0OO<<@KFGH+`;pSN`hHP0HS;yi>Dx1?Tq6*!lAD zzt58qW3b*`IN?y zH&nMfpMzOMk_)de!YeqoMOPZ(Z3lRLuHdTq`nmzD5=6~I*a4SU#EE)C*nPeuv82OO z_3w7N&`XFBs(P?T#=0Q80_Aj}V>mKZK-E#QjdLt#$)YCKe91V@56ADRIAWONbOGTmgUfhDyXnh;%UrIo1g@_O`Q2i; z+*CpfTe))eL{m#;cQUooCgaiJPQI4YmuI~qT$Cu3s zwSc}XpO8AT0*t2WZwpzyylwlYz*Xn5$RE`yXb)4J8_PwJg0G|ex#Zxtz*q$?n@nl} zzFO$Z)@q0AE_}5mM_zkdS2pqhDi@@6x4xeBsP{7$%I%jq7c?7Op?)JW8Ey312Pn6^1wyIMQy|1#tE6 zU?^C)rj&Qf#tyh-9kn&y0oSL`YoPltf}tZMrd`05aH*mSSxPC)7|w7S$v^Q7v$+D- z>^K=WK>!~)9_O)km)lPyoxrD16Nhn9XjjbsFDJ17_Q#?_sTDIC$CzpN86Woh<$cFK zClek_h?wRhb(iub_fb6Z&B*+)sNJ9BB*K03pTYu{S+DPDge?~tQzp$2W05Eu zs!{$ z;vZ6bG{%~O(}An(WLXEg(uuw&ZpD7wPIx%QQ66KqQ2lLaz!%?hD5}B}DyEo$n4%QS zb?ckASLw6`-%K{vh2f7JLwP;82@Q|~=Ylc}5fZ3U{!SW7J$cyz4fs@=Q*;bY6?k%< z&BAk_49=aTXVPw7BV3&Pwg|2h@>>r(y{qzKY-!D?qL6g^%_S#3#whxQ5rIFX zAzqm+H{n{OonN(I*xjk8-;ooyUq{{2UKd>M9o99tK7m&%UQ*f0W}%-kDzMU@JZMK@ zlfLrNQKy1_1uCQp5b%5i5n+Xsp+RmIt~+@krO=wv%k%&Vw1~4_!Qoh^1=xa!rjLbG zkFZ&W^is;0Dail{P?1o123IPaYb|gs@RMGtkWoLBWqk!qA@p>#T+L^C1?Quj&qwsj ztvoSpaJBkLm%}B{q*DG|#ns=$_qYVERzK--xT-$WyLv_|a4pcon{Y|pq@jYte}YlM zZ_Oxfs^{#-%A+Q*qv5DXfR6HaT8}uLtycF6UMWiyr!&H+R%d635$&DH5a(EFdv?3x zt{QGSd(CK~-P^0_g{bQDQ+XAiBxZ|1Z_meb&rPp}d`Q5tD&Qd$#sY%5{7)Z{fxI)1i$yI?A5ow(lHz-5Hf+TnU>lMF5ZZ4q2W6mT8FTLD+X z^~yv7FT&U&xNIbG197*9D~t09S!TLxxQ=vDv#Mf>NV|G^(TeeNs3fBr-W_NWKRMKoO z+{Qbi(TZD(>w)X-N+{ECt+=xN-U6}Y%|cWbkE z@p`nLac$}@Z5_@p$R(LD67?^>=f3G^I0&|borO^oO}LI0L@m|fS{Al+N2A=rwRBso zgsUjJxb-`RM;N$Dq1)k+D#H*|;JS`Pf=d!%;25!Rsy2HZ*)0t=6h*wBs~}R|cS76~ zX?t24tqZL#xQd$wQg<<2!N-*RnwQe3LjI;o6#t2WPTM&T*RejN|im}4|(k_%&mC2C?_>6#ygNOTj3~mXASmX+%9=z8SI#l}2`>&m(r{hix#jo; zTnd7A;AF#cC>kjFYX!hcxH81~C1uef*lq=vZCJx)e=UJc`yyJd)#`A=-E1CJ!?&79 z2d)(+vKwC-f>kC`mGqZQrvujt6L~whsuQW2)rwyB|n#Au0?vfimZE$G~ zqg^+BskIM#?7*a{l!%b(>UnE>X{V3FIWk}xQnV|mock>uC4syg>$1^Nweg<8{X5_=z)dsDuE&}7xxFaN>tlDT`dzQ^#lp)@sIm29Y;iU z{#3Jt$XR%VK{v})LR{7Oahbom&?=3!Y}HzG+Nr3be>)k9b$qF7@2ivkX^34n*Z6$}*mP3-HtkR*~$f46n zxz;VrJKz#%5_D7;1iQh_Pwch8wcxaN3tUT1Y2PgzE25WaOi^^;dVbp?p(HD63((Qz zU54o>$AWFucYPftx!(a~olfM{7nEeN(pwv$Gb(+{93^qjZ+>+1)R|xneH*cp@NR}! zKPDr4e!K6MGZCaiWB>8iLY469Ve!*9WqlQ`Y*Lxp1bq75ry{qG-l;FKr&7%@3`qVT z#`qEvET*)wuOC%5`_2Am06*fGUaE?mCXx{;)M+OG{*uKgHL4;~^``XFswfzRQ`66| z#A?}%M5YnbI>Ddppqz4>^zUDsrb8!V=T|40py%kS-mvF<34*}+rE-B~DhH2jPHi89 z#7M1%+q57eR%t9wN z0mpbe1bdpTxDlPY*Hia*OzBCA#bGpro#K+;8PN%b?VHjgGK8a|PxVpdR&63EXK~p- zLzm_RowU>>f51|urAc4@?w2B>pCBp3T_9_twkzky{gYRFyOovW1i^%MZ^fK6P|=S? zK$vn=MHF*&O*O+JXDR&8$%pr5D&o0XG>)Q5Y%w|CKO&Czb^)g$jc}wV{Rs{x0ldMC z5wY3ZS4o<(B6HCcaTsBoR)Z>Y7OUkKs!Uf@YW;JX7cL(JqrF{&D;=5%_}3i8yx0an zV_`hJvMHPjHZQ@8a7eYj%>iXuOr%bN$5iqM8-t-h7Zf7GFM{6TSi81#Lquao8XHu( zt;)_74^v;;Ix(YDZI>OYLZa+Uv3HHH0tv&Od9iYC}(TAN9YQ+<+>#*mZxzP#R7#?|;>Q`6RmTtLFP(&|O4^=>B;oyE%7 zf&x_aiD)V!d=b86zkl6%EqgAJWtOGJlkDNIJFjIQ z@*mra#hO~4c2~Q)OWe~mhMlFca7$VnyIXSQ*3a9?ZvnanMfo~AQW@H8)5K*oG8;5y z@k=D+NR;2SxJ{eG6Kdmp`KE2gde#k!1!f#*F7)1RinU+6hjdhkl%|m~1QZzEduxI6 zDrrJ?Du^kS)L%j*1^6W+Lg2Kgg^J$JQNDb2=%`{`1n^R&W#e|;)tX9ZN=zJes<*-H zrfSdKy=P|Q6-Q>TAPRcDePCExH#m*-_9Ed{WuU3klS&U-w6-Ntf`H*D5A8;7lE;Eb zMXs{VByrWNwj}2)XAGy32}=%>F`8i(&mkdloYik$u*x$jodV}fupvi)9YM1uypCvH=NqnPEW5L3#zklV16T{uP%p1b)j&w`6|INq?hy2ztE}XW-6c04;;@6&uiQGc8h-t}((Pl8e~wQpvG+ZhKW%+#-P$ zNrTEDX&vucs9D!G55R>(W!sb^MVVZ=j37}`1;gANHWCyPlqxe^&S7OCTbS%tB8reo zQ!!?=CDCa_Lms%|q*N|sFn3hM5T687z$DXeIR zjw|!+Mo5eb=C6@#$#I0Zw!8G#gR**gMS@1c8-}te5v7$z*+g0ti$PE+WHA#)-;OO0EbBJkuy+bop9kBnaOGS21W;bx;#-PBOGQuaww@q?TaAW@+A z9<0}0P(4+!EpWwIke*i68`zB@$Mu?M8MdX`;>cxI8rY${| zg$tfpul2DVR;Q-LLcQiu6FaRCD?#h*;ldQ72s3-ZgZ%um{Mu+OLF~fFXGJIOl6i-| z=;ZKjM8x}S*B^3^)zhJX4youB`mG z_WXR4n}X6P{PU%}NGen>BTDy&^irU2#N2h2s|7{o|npT9%On zV?C2f0)FdU=>HoZ>tGH^-z+*``w-T3`P&Nje{4S+)bjuDKHqt`|Knbs4S0*SUyg9C z*Xd9JxsFqwGjne4Vx;NB2Eecq~mv7sjNoAagGf+}m%$s1Cx?6Ii=dJnGiv$_^+2;Q5n0_8rP zr;=QIl>*n!uj(Kwn~ycPJ!zUY#?~AifG2sbI8@t&b>)$Qo}!`G!`WDhA^mnB&}6ql3uhB zafVVHx4hTeM?aUlzyGKDSR)S|;W5K}dPY)#*$inMdC$1F7|P#hs_;_7iQR`k$dhEg zWF!7C3jgu{Qu@oqsyp$Wo6z5yaj)s=mjCNB1D5Ikd(Z0fe{W~-aR2kYJa>@)HhyDU zj<*rRvAuZtG_#**&$D@3WHr$q=oTk4=s1+JZs|vT>s zr<}uwhC`WKT;yQwxSVsP3M9v$Q+hX?mo#&(j8@RM$AdXRD9$*3FMiX~Yir!kUHVwI v{s)7d`uczVaR2B1JYTQBH3SdO!}IVwJP*&ec>ezY00960rCStT0Hgr`<4W;@ diff --git a/chart/charts/grafana/.helmignore b/chart/charts/grafana/.helmignore new file mode 100755 index 0000000..8cade13 --- /dev/null +++ b/chart/charts/grafana/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.vscode +.project +.idea/ +*.tmproj +OWNERS diff --git a/chart/charts/grafana/Chart.yaml b/chart/charts/grafana/Chart.yaml new file mode 100755 index 0000000..59b0f9d --- /dev/null +++ b/chart/charts/grafana/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 6.4.2 +description: The leading tool for querying and visualizing time series and metrics. +engine: gotpl +home: https://grafana.net +icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png +kubeVersion: ^1.8.0-0 +maintainers: +- email: zanhsieh@gmail.com + name: zanhsieh +- email: rluckie@cisco.com + name: rtluckie +- email: maor.friedman@redhat.com + name: maorfr +name: grafana +sources: +- https://github.com/grafana/grafana +tillerVersion: '>=2.12.0' +version: 4.0.1 diff --git a/chart/charts/grafana/README.md b/chart/charts/grafana/README.md new file mode 100755 index 0000000..fcc14de --- /dev/null +++ b/chart/charts/grafana/README.md @@ -0,0 +1,305 @@ +# Grafana Helm Chart + +* Installs the web dashboarding system [Grafana](http://grafana.org/) + +## TL;DR; + +```console +$ helm install stable/grafana +``` + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/grafana +``` + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 4.0.0 (And 3.12.1) + +This version requires Helm >= 2.12.0. + +## Configuration + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `replicas` | Number of nodes | `1` | +| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | +| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | +| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | +| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | +| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| +| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "fsGroup": 472}` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `image.repository` | Image repository | `grafana/grafana` | +| `image.tag` | Image tag (`Must be >= 5.0.0`) | `6.3.5` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets | `{}` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `80` | +| `service.portName` | Name of the port on the service | `service` | +| `service.targetPort` | Internal service is port | `3000` | +| `service.nodePort` | Kubernetes service nodePort | `nil` | +| `service.annotations` | Service annotations | `{}` | +| `service.labels` | Custom labels | `{}` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.path` | Ingress accepted path | `/` | +| `ingress.hosts` | Ingress accepted hostnames | `[]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | +| `extraContainers` | Sidecar containers to add to the grafana pod | `{}` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `persistence.enabled` | Use persistent volume to store data | `false` | +| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `false` | +| `persistence.size` | Size of persistent volume claim | `10Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` | +| `persistence.storageClassName` | Type of persistent volume claim | `nil` | +| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | +| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | +| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` | +| `initChownData.enabled` | If false, don't reset data ownership at startup | true | +| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | +| `initChownData.image.tag` | init-chown-data container image tag | `latest` | +| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | +| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | +| `schedulerName` | Alternate scheduler name | `nil` | +| `env` | Extra environment variables passed to pods | `{}` | +| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment | `""` | +| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` | +| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | +| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | +| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` | +| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | +| `plugins` | Plugins to be loaded along with Grafana | `[]` | +| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | +| `notifiers` | Configure grafana notifiers | `{}` | +| `dashboardProviders` | Configure grafana dashboard providers | `{}` | +| `dashboards` | Dashboards to import | `{}` | +| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | +| `grafana.ini` | Grafana's primary configuration | `{}` | +| `ldap_enabled` | Enable LDAP authentication | `false` | +| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | +| `ldap.config ` | Grafana's LDAP configuration | `""` | +| `annotations` | Deployment annotations | `{}` | +| `labels` | Deployment labels | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Pod labels | `{}` | +| `podPortName` | Name of the grafana port on the pod | `grafana` | +| `sidecar.image` | Sidecar image | `kiwigrid/k8s-sidecar:0.1.20` | +| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | +| `sidecar.resources` | Sidecar resources | `{}` | +| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | +| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | +| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | +| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | +| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | +| `sidecar.dashboards.provider.type` | Provider type | `file` | +| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | +| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | +| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | +| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | +| `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | +| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | +| `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | +| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | +| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | +| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` | +| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | +| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | +| `serviceAccount.annotations` | ServiceAccount annotations | +| `serviceAccount.create` | Create service account | `true` | +| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | +| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | +| `rbac.create` | Create and use RBAC resources | `true` | +| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | +| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` | +| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` | +| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | +| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | +| `command` | Define command to be executed by grafana container at startup | `nil` | +| `testFramework.enabled` | Whether to create test-related resources | `true` | +| `testFramework.image` | `test-framework` image repository. | `dduportal/bats` | +| `testFramework.tag` | `test-framework` image tag. | `0.4.0` | +| `testFramework.securityContext` | `test-framework` securityContext | `{}` | +| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | + + +### Example of extraVolumeMounts + +```yaml +- extraVolumeMounts: + - name: plugins + mountPath: /var/lib/grafana/plugins + subPath: configs/grafana/plugins + existingClaim: existing-grafana-claim + readOnly: false +``` + +## Import dashboards + +There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +dashboards: + default: + some-dashboard: + json: | + { + "annotations": + + ... + # Complete json file here + ... + + "title": "Some Dashboard", + "uid": "abcd1234", + "version": 1 + } + custom-dashboard: + # This is a path to a file inside the dashboards directory inside the chart directory + file: dashboards/custom-dashboard.json + prometheus-stats: + # Ref: https://grafana.com/dashboards/2 + gnetId: 2 + revision: 2 + datasource: Prometheus + local-dashboard: + url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json +``` + +## BASE64 dashboards + +Dashboards could be storaged in a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) +A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. +If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. + +### Gerrit use case: +Gerrit API for download files has the following schema: https://yourgerritserver/a/{project-name}/branches/{branch-id}/files/{file-id}/content where {project-name} and +{file-id} usualy has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard +the url value is https://yourgerritserver/a/user%2Frepo/branches/master/files/dir1%2Fdir2%2Fdashboard/content + +## Sidecar for dashboards + +If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with +a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported +dashboards are deleted/updated. + +A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside +one configmap is currently not properly mirrored in grafana. + +Example dashboard config: +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + k8s-dashboard.json: |- + [...] +``` + +## Sidecar for datasources + +If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the data sources in grafana can be imported. The secrets must be created before `helm install` so +that the datasources init container can list the secrets. + +Secrets are recommended over configmaps for this usecase because datasources usually contain private +data like usernames and passwords. Secrets are the more appropriate cluster ressource to manage those. + +Example datasource config adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): +``` +apiVersion: v1 +kind: Secret +metadata: + name: sample-grafana-datasource + labels: + grafana_datasource: "1" +type: Opaque +stringData: + datasource.yaml: |- + # config file version + apiVersion: 1 + + # list of datasources that should be deleted from the database + deleteDatasources: + - name: Graphite + orgId: 1 + + # list of datasources to insert/update depending + # whats available in the database + datasources: + # name of the datasource. Required + - name: Graphite + # datasource type. Required + type: graphite + # access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: + # basic auth username + basicAuthUser: + # basic auth password + basicAuthPassword: + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false + +``` diff --git a/chart/charts/grafana/ci/default-values.yaml b/chart/charts/grafana/ci/default-values.yaml new file mode 100755 index 0000000..fc2ba60 --- /dev/null +++ b/chart/charts/grafana/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/chart/charts/grafana/ci/with-dashboard-json-values.yaml b/chart/charts/grafana/ci/with-dashboard-json-values.yaml new file mode 100755 index 0000000..e0c4e41 --- /dev/null +++ b/chart/charts/grafana/ci/with-dashboard-json-values.yaml @@ -0,0 +1,53 @@ +dashboards: + my-provider: + my-awesome-dashboard: + # An empty but valid dashboard + json: | + { + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.3.5" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [], + "schemaVersion": 19, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": ["5s"] + }, + "timezone": "", + "title": "Dummy Dashboard", + "uid": "IdcYQooWk", + "version": 1 + } + datasource: Prometheus diff --git a/chart/charts/grafana/ci/with-dashboard-values.yaml b/chart/charts/grafana/ci/with-dashboard-values.yaml new file mode 100755 index 0000000..7b662c5 --- /dev/null +++ b/chart/charts/grafana/ci/with-dashboard-values.yaml @@ -0,0 +1,19 @@ +dashboards: + my-provider: + my-awesome-dashboard: + gnetId: 10000 + revision: 1 + datasource: Prometheus +dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'my-provider' + orgId: 1 + folder: '' + type: file + updateIntervalSeconds: 10 + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards/my-provider diff --git a/chart/charts/grafana/dashboards/custom-dashboard.json b/chart/charts/grafana/dashboards/custom-dashboard.json new file mode 100755 index 0000000..9e26dfe --- /dev/null +++ b/chart/charts/grafana/dashboards/custom-dashboard.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/chart/charts/grafana/templates/NOTES.txt b/chart/charts/grafana/templates/NOTES.txt new file mode 100755 index 0000000..1193aa0 --- /dev/null +++ b/chart/charts/grafana/templates/NOTES.txt @@ -0,0 +1,37 @@ +1. Get your '{{ .Values.adminUser }}' user password by running: + + kubectl get secret --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo + +2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ template "grafana.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local +{{ if .Values.ingress.enabled }} + From outside the cluster, the server URL(s) are: +{{- range .Values.ingress.hosts }} + http://{{ . }} +{{- end }} +{{ else }} + Get the Grafana URL to visit by running these commands in the same shell: +{{ if contains "NodePort" .Values.service.type -}} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{ else if contains "LoadBalancer" .Values.service.type -}} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + http://$SERVICE_IP:{{ .Values.service.port -}} +{{ else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "grafana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 3000 +{{- end }} +{{- end }} + +3. Login with the password from step 1 and the username: {{ .Values.adminUser }} + +{{- if not .Values.persistence.enabled }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Grafana pod is terminated. ##### +################################################################################# +{{- end }} diff --git a/chart/charts/grafana/templates/_helpers.tpl b/chart/charts/grafana/templates/_helpers.tpl new file mode 100755 index 0000000..f6880cd --- /dev/null +++ b/chart/charts/grafana/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "grafana.serviceAccountNameTest" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} +{{- else -}} + {{ default "default" .Values.serviceAccount.nameTest }} +{{- end -}} +{{- end -}} diff --git a/chart/charts/grafana/templates/_pod.tpl b/chart/charts/grafana/templates/_pod.tpl new file mode 100755 index 0000000..8018dd7 --- /dev/null +++ b/chart/charts/grafana/templates/_pod.tpl @@ -0,0 +1,360 @@ +{{- define "grafana.pod" -}} +{{- if .Values.schedulerName }} +schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +serviceAccountName: {{ template "grafana.serviceAccountName" . }} +{{- if .Values.schedulerName }} +schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +{{- if .Values.securityContext }} +securityContext: +{{ toYaml .Values.securityContext | indent 2 }} +{{- end }} +{{- if .Values.priorityClassName }} +priorityClassName: {{ .Values.priorityClassName }} +{{- end }} +{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.extraInitContainers) }} +initContainers: +{{- end }} +{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" + imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} + securityContext: + runAsUser: 0 + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsUser }}", "/var/lib/grafana"] + resources: +{{ toYaml .Values.initChownData.resources | indent 6 }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh" ] + env: +{{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: {{ template "grafana.name" . }}-sc-datasources + image: "{{ .Values.sidecar.image }}" + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.datasources.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.extraInitContainers }} +{{ toYaml .Values.extraInitContainers | indent 2 }} +{{- end }} +{{- if .Values.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end}} +{{- end }} +containers: +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ template "grafana.name" . }}-sc-dashboard + image: "{{ .Values.sidecar.image }}" + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.dashboards.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{- end}} + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . }} + {{- end }} + {{- end}} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- if .Values.dashboards }} +{{- range $provider, $dashboards := .Values.dashboards }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" +{{- end }} +{{- end }} +{{- end }} +{{- end -}} +{{- if .Values.dashboardsConfigMaps }} +{{- range keys .Values.dashboardsConfigMaps }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" +{{- end }} +{{- end }} +{{- if .Values.datasources }} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml" + subPath: datasources.yaml +{{- end }} +{{- if .Values.notifiers }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml" + subPath: notifiers.yaml +{{- end }} +{{- if .Values.dashboardProviders }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml" + subPath: dashboardproviders.yaml +{{- end }} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml +{{- end}} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + containerPort: {{ .Values.service.port }} + protocol: TCP + - name: {{ .Values.podPortName }} + containerPort: 3000 + protocol: TCP + env: + {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if not .Values.env.GF_SECURITY_ADMIN_PASSWORD }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ template "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} +{{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" +{{- end }} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + {{- if .Values.envRenderSecret }} + envFrom: + - secretRef: + name: {{ template "grafana.fullname" . }}-env + {{- end }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 6 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 6 }} + resources: +{{ toYaml .Values.resources | indent 6 }} +{{- if .Values.extraContainers }} +{{ toYaml .Values.extraContainers | indent 2}} +{{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: +{{ toYaml . | indent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: +{{ toYaml . | indent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: +{{ toYaml . | indent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ template "grafana.fullname" . }} +{{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} +{{- end }} + {{- if .Values.dashboards }} + {{- range keys .Values.dashboards }} + - name: dashboards-{{ . }} + configMap: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{ $root := . }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ template "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} +{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} +{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }} +# nothing +{{- else }} + - name: storage + emptyDir: {} +{{- end -}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: {} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-provider + configMap: + name: {{ template "grafana.fullname" . }}-config-dashboards +{{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: {} +{{- end -}} +{{- range .Values.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} +{{- end }} +{{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} +{{- end }} +{{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} +{{- end -}} +{{- end }} diff --git a/chart/charts/grafana/templates/clusterrole.yaml b/chart/charts/grafana/templates/clusterrole.yaml new file mode 100755 index 0000000..d141280 --- /dev/null +++ b/chart/charts/grafana/templates/clusterrole.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-clusterrole +{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }} +rules: +{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }} +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end}} +{{- with .Values.rbac.extraClusterRoleRules }} +{{ toYaml . | indent 0 }} +{{- end}} +{{- else }} +rules: [] +{{- end}} +{{- end}} diff --git a/chart/charts/grafana/templates/clusterrolebinding.yaml b/chart/charts/grafana/templates/clusterrolebinding.yaml new file mode 100755 index 0000000..0ffe9ff --- /dev/null +++ b/chart/charts/grafana/templates/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "grafana.fullname" . }}-clusterrolebinding + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ template "grafana.fullname" . }}-clusterrole + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/chart/charts/grafana/templates/configmap-dashboard-provider.yaml b/chart/charts/grafana/templates/configmap-dashboard-provider.yaml new file mode 100755 index 0000000..c65e415 --- /dev/null +++ b/chart/charts/grafana/templates/configmap-dashboard-provider.yaml @@ -0,0 +1,27 @@ +{{- if .Values.sidecar.dashboards.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-config-dashboards + namespace: {{ .Release.Namespace }} +data: + provider.yaml: |- + apiVersion: 1 + providers: + - name: '{{ .Values.sidecar.dashboards.provider.name }}' + orgId: {{ .Values.sidecar.dashboards.provider.orgid }} + folder: '{{ .Values.sidecar.dashboards.provider.folder }}' + type: {{ .Values.sidecar.dashboards.provider.type }} + disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} + options: + path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} +{{- end}} diff --git a/chart/charts/grafana/templates/configmap.yaml b/chart/charts/grafana/templates/configmap.yaml new file mode 100755 index 0000000..d24d0c8 --- /dev/null +++ b/chart/charts/grafana/templates/configmap.yaml @@ -0,0 +1,72 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{- if .Values.plugins }} + plugins: {{ join "," .Values.plugins }} +{{- end }} + grafana.ini: | +{{- range $key, $value := index .Values "grafana.ini" }} + [{{ $key }}] + {{- range $elem, $elemVal := $value }} + {{ $elem }} = {{ $elemVal }} + {{- end }} +{{- end }} + +{{- if .Values.datasources }} +{{ $root := . }} + {{- range $key, $value := .Values.datasources }} + {{ $key }}: | +{{ tpl (toYaml $value | indent 4) $root }} + {{- end -}} +{{- end -}} + +{{- if .Values.notifiers }} + {{- range $key, $value := .Values.notifiers }} + {{ $key }}: | +{{ toYaml $value | indent 4 }} + {{- end -}} +{{- end -}} + +{{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{ $key }}: | +{{ toYaml $value | indent 4 }} + {{- end -}} +{{- end -}} + +{{- if .Values.dashboards }} + download_dashboards.sh: | + #!/usr/bin/env sh + set -euf + {{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{- range $value.providers }} + mkdir -p {{ .options.path }} + {{- end }} + {{- end }} + {{- end }} + + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} + curl -sk \ + --connect-timeout 60 \ + --max-time 60 \ + {{- if not $value.b64content }} + -H "Accept: application/json" \ + -H "Content-Type: application/json;charset=UTF-8" \ + {{- end }} + {{- if $value.url -}}{{ $value.url }}{{- else -}} https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download{{- end -}}{{ if $value.datasource }}| sed 's|\"datasource\":[^,]*|\"datasource\": \"{{ $value.datasource }}\"|g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \ + > /var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json + {{- end -}} + {{- end }} + {{- end }} +{{- end }} diff --git a/chart/charts/grafana/templates/dashboards-json-configmap.yaml b/chart/charts/grafana/templates/dashboards-json-configmap.yaml new file mode 100755 index 0000000..8fb1396 --- /dev/null +++ b/chart/charts/grafana/templates/dashboards-json-configmap.yaml @@ -0,0 +1,38 @@ +{{- if .Values.dashboards }} +{{ $files := .Files }} +{{- range $provider, $dashboards := .Values.dashboards }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }} + namespace: {{ $.Release.Namespace }} + labels: + app: {{ template "grafana.name" $ }} + chart: {{ template "grafana.chart" $ }} + release: {{ $.Release.Name }} + heritage: {{ $.Release.Service }} + dashboard-provider: {{ $provider }} +{{- if $dashboards }} +data: +{{- $dashboardFound := false }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} +{{- $dashboardFound = true }} +{{ print $key | indent 2 }}.json: +{{- if hasKey $value "json" }} + |- +{{ $value.json | indent 6 }} +{{- end }} +{{- if hasKey $value "file" }} +{{ toYaml ( $files.Get $value.file ) | indent 4}} +{{- end }} +{{- end }} +{{- end }} +{{- if not $dashboardFound }} + {} +{{- end }} +{{- end }} +--- +{{- end }} + +{{- end }} diff --git a/chart/charts/grafana/templates/deployment.yaml b/chart/charts/grafana/templates/deployment.yaml new file mode 100755 index 0000000..ce94d84 --- /dev/null +++ b/chart/charts/grafana/templates/deployment.yaml @@ -0,0 +1,49 @@ +{{ if (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc")) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.labels }} +{{ toYaml .Values.labels | indent 4 }} +{{- end }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ template "grafana.name" . }} + release: {{ .Release.Name }} +{{- with .Values.deploymentStrategy }} + strategy: +{{ toYaml . | trim | indent 4 }} +{{- end }} + template: + metadata: + labels: + app: {{ template "grafana.name" . }} + release: {{ .Release.Name }} +{{- with .Values.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} +{{- if not .Values.admin.existingSecret }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} +{{- end }} +{{- with .Values.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} +{{- end }} \ No newline at end of file diff --git a/chart/charts/grafana/templates/headless-service.yaml b/chart/charts/grafana/templates/headless-service.yaml new file mode 100755 index 0000000..c0c182a --- /dev/null +++ b/chart/charts/grafana/templates/headless-service.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + clusterIP: None + selector: + app: {{ template "grafana.name" . }} + release: {{ .Release.Name }} + type: ClusterIP +{{- end }} diff --git a/chart/charts/grafana/templates/ingress.yaml b/chart/charts/grafana/templates/ingress.yaml new file mode 100755 index 0000000..6077c10 --- /dev/null +++ b/chart/charts/grafana/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "grafana.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +{{- $extraPaths := .Values.ingress.extraPaths -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ . }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} +{{- end }} diff --git a/chart/charts/grafana/templates/poddisruptionbudget.yaml b/chart/charts/grafana/templates/poddisruptionbudget.yaml new file mode 100755 index 0000000..60f58bf --- /dev/null +++ b/chart/charts/grafana/templates/poddisruptionbudget.yaml @@ -0,0 +1,25 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "grafana.name" . }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.labels }} +{{ toYaml .Values.labels | indent 4 }} +{{- end }} +spec: +{{- if .Values.podDisruptionBudget.minAvailble }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailble }} +{{- end }} +{{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + app: {{ template "grafana.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/chart/charts/grafana/templates/podsecuritypolicy.yaml b/chart/charts/grafana/templates/podsecuritypolicy.yaml new file mode 100755 index 0000000..a1d87c8 --- /dev/null +++ b/chart/charts/grafana/templates/podsecuritypolicy.yaml @@ -0,0 +1,55 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + {{- if .Values.rbac.pspUseAppArmor }} + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + {{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + # Default set from Docker, without DAC_OVERRIDE or CHOWN + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} diff --git a/chart/charts/grafana/templates/pvc.yaml b/chart/charts/grafana/templates/pvc.yaml new file mode 100755 index 0000000..780de6c --- /dev/null +++ b/chart/charts/grafana/templates/pvc.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.persistence.finalizers }} + finalizers: +{{ toYaml . | indent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + storageClassName: {{ .Values.persistence.storageClassName }} +{{- end -}} diff --git a/chart/charts/grafana/templates/role.yaml b/chart/charts/grafana/templates/role.yaml new file mode 100755 index 0000000..2653f6c --- /dev/null +++ b/chart/charts/grafana/templates/role.yaml @@ -0,0 +1,35 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }} +rules: +{{- if .Values.rbac.pspEnabled }} +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "grafana.fullname" . }}] +{{- end }} +{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }} +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end }} +{{- with .Values.rbac.extraRoleRules }} +{{ toYaml . | indent 0 }} +{{- end}} +{{- else }} +rules: [] +{{- end }} +{{- end }} diff --git a/chart/charts/grafana/templates/rolebinding.yaml b/chart/charts/grafana/templates/rolebinding.yaml new file mode 100755 index 0000000..680be28 --- /dev/null +++ b/chart/charts/grafana/templates/rolebinding.yaml @@ -0,0 +1,30 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "grafana.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- if .Values.rbac.namespaced }} +roleRef: + kind: Role + name: {{ template "grafana.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{- end }} +{{- end -}} diff --git a/chart/charts/grafana/templates/secret-env.yaml b/chart/charts/grafana/templates/secret-env.yaml new file mode 100755 index 0000000..36c14b5 --- /dev/null +++ b/chart/charts/grafana/templates/secret-env.yaml @@ -0,0 +1,17 @@ +{{- if .Values.envRenderSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "grafana.fullname" . }}-env + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: +{{- range $key, $val := .Values.envRenderSecret }} + {{ $key }}: {{ $val | b64enc | quote }} +{{- end -}} +{{- end }} diff --git a/chart/charts/grafana/templates/secret.yaml b/chart/charts/grafana/templates/secret.yaml new file mode 100755 index 0000000..4f02fa3 --- /dev/null +++ b/chart/charts/grafana/templates/secret.yaml @@ -0,0 +1,23 @@ +{{- if not .Values.admin.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: + admin-user: {{ .Values.adminUser | b64enc | quote }} + {{- if .Values.adminPassword }} + admin-password: {{ .Values.adminPassword | b64enc | quote }} + {{- else }} + admin-password: {{ randAlphaNum 40 | b64enc | quote }} + {{- end }} + {{- if not .Values.ldap.existingSecret }} + ldap-toml: {{ .Values.ldap.config | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/chart/charts/grafana/templates/service.yaml b/chart/charts/grafana/templates/service.yaml new file mode 100755 index 0000000..f18df46 --- /dev/null +++ b/chart/charts/grafana/templates/service.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} + {{- end -}} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} +{{- if .Values.service.externalIPs }} + externalIPs: +{{ toYaml .Values.service.externalIPs | indent 4 }} +{{- end }} + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.targetPort }} +{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{.Values.service.nodePort}} +{{ end }} + selector: + app: {{ template "grafana.name" . }} + release: {{ .Release.Name }} diff --git a/chart/charts/grafana/templates/serviceaccount.yaml b/chart/charts/grafana/templates/serviceaccount.yaml new file mode 100755 index 0000000..37a8e6a --- /dev/null +++ b/chart/charts/grafana/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/chart/charts/grafana/templates/statefulset.yaml b/chart/charts/grafana/templates/statefulset.yaml new file mode 100755 index 0000000..ebe3c2c --- /dev/null +++ b/chart/charts/grafana/templates/statefulset.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ template "grafana.name" . }} + release: {{ .Release.Name }} + serviceName: {{ template "grafana.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "grafana.name" . }} + release: {{ .Release.Name }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} +{{- if not .Values.admin.existingSecret }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} +{{- end }} +{{- with .Values.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: {{ .Values.persistence.accessModes }} + storageClassName: {{ .Values.persistence.storageClassName }} + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- end }} diff --git a/chart/charts/grafana/templates/tests/test-configmap.yaml b/chart/charts/grafana/templates/tests/test-configmap.yaml new file mode 100755 index 0000000..bf5bde3 --- /dev/null +++ b/chart/charts/grafana/templates/tests/test-configmap.yaml @@ -0,0 +1,20 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +data: + run.sh: |- + @test "Test Health" { + url="http://{{ template "grafana.fullname" . }}/api/health" + + code=$(curl -s -o /dev/null -I -w "%{http_code}" $url) + [ "$code" == "200" ] + } +{{- end }} diff --git a/chart/charts/grafana/templates/tests/test-podsecuritypolicy.yaml b/chart/charts/grafana/templates/tests/test-podsecuritypolicy.yaml new file mode 100755 index 0000000..662d4a2 --- /dev/null +++ b/chart/charts/grafana/templates/tests/test-podsecuritypolicy.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: true + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + fsGroup: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - projected + - secret +{{- end }} diff --git a/chart/charts/grafana/templates/tests/test-role.yaml b/chart/charts/grafana/templates/tests/test-role.yaml new file mode 100755 index 0000000..9d34fbd --- /dev/null +++ b/chart/charts/grafana/templates/tests/test-role.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "grafana.fullname" . }}-test] +{{- end }} diff --git a/chart/charts/grafana/templates/tests/test-rolebinding.yaml b/chart/charts/grafana/templates/tests/test-rolebinding.yaml new file mode 100755 index 0000000..0a11db2 --- /dev/null +++ b/chart/charts/grafana/templates/tests/test-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "grafana.fullname" . }}-test +subjects: +- kind: ServiceAccount + name: {{ template "grafana.serviceAccountNameTest" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/chart/charts/grafana/templates/tests/test-serviceaccount.yaml b/chart/charts/grafana/templates/tests/test-serviceaccount.yaml new file mode 100755 index 0000000..e6a46f9 --- /dev/null +++ b/chart/charts/grafana/templates/tests/test-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "grafana.serviceAccountNameTest" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/chart/charts/grafana/templates/tests/test.yaml b/chart/charts/grafana/templates/tests/test.yaml new file mode 100755 index 0000000..e0e4883 --- /dev/null +++ b/chart/charts/grafana/templates/tests/test.yaml @@ -0,0 +1,67 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ template "grafana.fullname" . }}-test + labels: + app: {{ template "grafana.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" + annotations: + "helm.sh/hook": test-success + namespace: {{ .Release.Namespace }} +spec: + serviceAccountName: {{ template "grafana.serviceAccountNameTest" . }} + {{- if .Values.testFramework.securityContext }} + securityContext: {{ toYaml .Values.testFramework.securityContext | nindent 4 }} + {{- end }} + initContainers: + - name: test-framework + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + command: + - "bash" + - "-c" + - | + set -ex + # copy bats to tools dir + cp -R /usr/local/libexec/ /tools/bats/ + volumeMounts: + - mountPath: /tools + name: tools + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 4 }} + {{- end }} + containers: + - name: {{ .Release.Name }}-test + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + command: ["/tools/bats/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + - mountPath: /tools + name: tools + volumes: + - name: tests + configMap: + name: {{ template "grafana.fullname" . }}-test + - name: tools + emptyDir: {} + restartPolicy: Never +{{- end }} diff --git a/chart/charts/grafana/values.yaml b/chart/charts/grafana/values.yaml new file mode 100755 index 0000000..45cd9fc --- /dev/null +++ b/chart/charts/grafana/values.yaml @@ -0,0 +1,464 @@ +rbac: + create: true + pspEnabled: true + pspUseAppArmor: true + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: +# annotations: + +replicas: 1 + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# minAvailble: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + repository: grafana/grafana + tag: 6.4.2 + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +testFramework: + enabled: true + image: "dduportal/bats" + tag: "0.4.0" + securityContext: {} + +securityContext: + runAsUser: 472 + fsGroup: 472 + + +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + repository: appropriate/curl + tag: latest + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana + +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + annotations: {} + labels: {} + portName: service + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + hosts: + - chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: false + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # subPath: "" + # existingClaim: + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the prometheus-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + repository: busybox + tag: "1.30" + pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Extra environment variables that will be pass onto deployment pods +env: {} + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc +envRenderSecret: {} + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume + # mountPath: /mnt/volume + # readOnly: true + # existingClaim: volume-claim + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-prometheus-server +# access: proxy +# isDefault: true + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # b64content: true + +## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/data + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: kiwigrid/k8s-sidecar:0.1.20 + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + dashboards: + enabled: false + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # If specified, the sidecar will search for dashboard config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + datasources: + enabled: false + # label that the configmaps with datasources are marked with + label: grafana_datasource + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null diff --git a/chart/charts/postgresql-8.9.4.tgz b/chart/charts/postgresql-8.9.4.tgz deleted file mode 100644 index bdd82702d40e02e0dfe3efb27638401b7b2b3ce8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33145 zcmV*GKxw}piwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0POwyciXnID2|_>^;gVQdT#92ljvXI6N$+0S z{aO$SN!TF?769$2alU{1XW>PHFNu=nM|RD%cVm%w3PP%b76^FPA=-V`y> zo1rNDkM%y=+uPeOpFdasZf|dw|J~Vn_T)cyp1yegbbI^R)2A>0V|(Z6v+bS#fbBKI zQTwD^Aod^I*Y2x4xNqb^3Hg8-Co~?w#ZCt$N%nJRr}whA-3c)F8A*hCr)UJml);H6 zoU{LVvjr#yI{Fv(g~xD+WJwf4fCL&Ljv+!Zn&1e>0{Dyz9CZLT-k$EiIy&2gOCqMS zK8}(p;)DZ@0e==a=43=dB4&W%K)&Px#Q|ag_k0>9AwjW^dmS84NQ?(Cp&|)8QySp` zOod4Jpx?i|yzJ%V-J@*M?+{;(!j>2j5u=Fod>Zu;=UDJQ7s&s_`_8f;(Zu=bsRG{4 z&i3b>otM2No^(Fp*(GHGA9Os(3pzhKUWSa02l^YsNl1K!tBwbLm~w$xCqg6^h{Tww z)^LPKH~^p8m;cv+xC0O)Io?Tn5H9zCj)`~*9)ls2{X^Be_;ve$KFr9XPvvI@P}vK;}H4<3RBE)Kmg3=|IeOn zKiw|#{}<1mJn;Y9cpg21*C8FD5cbtV1`?qOh6FJbVSyR%JbDBtAx0c;K}-b(F+~Ez zlyJBtVF>gBGe!dW9#<-F0g3%E4M;qJ04F#OaO@Mzw8Wruoml7`_K_8As(Kpox6paTxy8^D*Zoksx0 zCI%4A?BB;I!g&d&$$@$c;5mi=j%WI7R*MVD&;;*=h;!NOne)3NM{5|lBU;S>4M&w_ z7#`>HOY&sRe-&$H8FO}4wLlZzmt$5<$sx0BXr?Zq>TNwLcf(6GV_O0O9aB zL;`d1cnfgIF>owY@x$>k6(|!$Z|A%wx<=?&FT#2=2=cQ;tJbY#CSB% zC{VZ@WXj~8aF50Ue>NQYmFN5o!Ex559=ZM5wOIVp%YQ`|Mj4i^pV)5@)lYl~jSuAA*_op~WL(H}S_a;5)N)7N+ zw^z~Q8dn_(BrBA>&od?hXCX#uC`b}wFY8AudXMss}^$LF$ z3V<4PWs*Uv5FG#`Nd~a9y}i>ZAXB}`A!ZlE$FS@BQYKv=;QT$;O?yZ;-S3zermI?c zCb^9i&~&V4nqd^o0DmT2$OUIw0Mleo>P_l{CT_Lt1HxsaTyYpl!ctfA%T+boCNx$n zM%!(w(`ck#3#9lo_HiP3KT%cs5-Hi-nRlIpCvwzUVz}zko_fQ9(GV+Ea8^K-EOLtf zNQvCdDcj)LNr#P)U(=dn_o~Hks%Su#I!`r6O%Ugolm+Ik{VA7ah%WG{ZjcXPC)ZW^ zEcU02#x&)M3lael4e$VHyajYT-U5arD)2cA zK1*XkBJ61r^Da@?n~ErW^!ojt_uK9p@6Ex7gEtcF9JoCN+W@+BJnri5eql7yk~)n> zT0T>=A~) z74h2v*}+-fNRD{313b+eg+0x!K;qZFlSJH;T+#{@H4eEvEGKi}0!5WX=}k^6ilad^ zJ9plhg^_KV71zw@1~yWT1Gtn@d6>_gWHu6_p_NhDx8Y`GMJ;3h?2ke^>PJX%>(hhX z{iB0k6g<)kI-YpQv&LXg>5Vk@#)NUs<-xj>9ap4xdNXWRn$3khpDI-vYu;+9B$9!B>~p$bNo;u{cl13*d`iV*A*Hh?-y znuJ0@wre+JQV91Z6$_g)+dOQIv#upl4q&bP!*tA94$%DgT$ZZ}kh%n!*6O zBgDEINqL>@FZlSof+kZ)q*ga3lOvSa?Sv*j1Dphk7hC&)7~eZG1`4y=v{oD@xllY|1&SZyVzSaBBKmun3Bg0iYk*`)aR+CtywLaMPP z9Z2avW$f*_C&3h1`trCDQT6yTxucl<;CX`9wd`rN2o|8 zaOJ5Z6s48}3iba5OSw?*r<9BAwJN0ElnO+%+$Ukim5Q|U)05uLiywR2z3u*!XAlL? zwRp@psWNWNTi#l8epfvYYoK_wy{Hbc^QhQ#2EkwfKurrK`yi{9S~J{T`*m=a3MBg> zG)7EW8%+VZ!bVs!FhBy7_=XXsqRp(^CdxBz77C69n<-1Pr-AAPg*Df)z@F)L^hsMZ zJC|?Ib>lrg9l(`2f`do@t3M)fpHJ-z>`y6l_bG51VKF7~q}Q#(X^X)&&UFFCGR!69 zYo)JMpRgfWnxxmS0{)EsRG4$&62X}l%HWgzDk{|05DtM|I3$+JFpUFkJ4t8&F~)(yp0&WEYs39|>i`{>ku|e71$sasERxwf$rJgTz7MM`kn` zsKycVr{_ormUXAzjEP*!_MJb)QY%$IM9AIELt%HK%~r)xnzsXO7dW9@@9a;# z&IN(DJax^8#8l>9jD;i^5>M>A{v+9><P zV^}?h%LYX_43#JtTF6wV`b?|{WSte~M0*;@orXpi7&j=;eo&7d!Qt3ixVH*EQ`+?V zU+$~FneygLy#D*amiz?& zTOYs*g{E%%8xz5o9fn@R%}e ziCC$_BMc*&#(^a-Y=ko5fa^K;7I3CwW#w5bv&TWE&&;jG0Pml%HDW9Tav1o6C|d!_Cf~^ zo{q9jjdf4RSHI>Af@ZJ~Iq(bDpvZIQ1R7&F`Jix?9h+DQoI%FZk%p_=RbZ5S54)b! z>a-j?q5(Dz86KQs6#T}Bz;9zE$T;~E58%hwq(-}te!o`<~SDpM;a)PYp4k3og$cnO!GXY%0cJ7`fR3F)NzSyx-!?Y0bf8{=M|+q|O1*6p-7;yen|JlV6Jc=-Dw? zSG!EudGhp`V%-Mjd?wzyuKKk*cP&a+i*2^; zs>+r!aZ1AH`Upn(jZ-FfXSy|WZmL+3<34AGYIaTe0%8Sk9c83jvm!Uh6Bz#Aa>>4nul*@L3yPtu02iLdJkU zCn52PaF?Hr26v`faPULE|Aw9Z+ z2VXwn*+A9X0-8v%Ev5QlyrpeClH1zme*dauP$Ov8c#yM7?Q^6}ky@z>$pwz(I+LWs zFbatJl`uNOtqcZjy})fpUe<}HJLx}iDyuh}9b5QB{;o5+N$rN9a36SOXBWz8!*EYQBYOO4=|5h{Dac4%3 zYk5RFXzq4xY}-4rr*d0Sjs;nW+!6YOflI=c z5`@NFFhq=;QB7HOz<9$szCV_L@^Cv@eqet|)`~(b$?(^7f*lw7H)xU*8mzrQ5Lc4H_jTu8?ck7lyI#KU)*j=z5G%F)0hj+vJLBbHG7T~<6iBEy^L>W5wP36A4_rfKUE@0OTp_8@uZb_ z(tX7zUAGJ4U~E>!e9m^sXo`7^!)&6??L3Cmo= zI;sX{&WmNO5wzuq%r(HYqSmdopOrTY02Q^pswvOWmFu{o}Xq4$gYwvsh0b^Yef0Y(IZd&j0v)XXhdRcS&za(HR$@b}+$I&M@1igQyN4&Z_yZ5nM-=;cSF%wB#NP6N89QtdF1btp`n6+Wlp@f=*&IF$8&VtF zz<<<~wXwK4{N2q>)x|ps6XQ*sIr>Zan-`Q?gP9$z8NH3DSJPQBfG)|D1{@nc{^;qN#GYhTC zqJ6)1;YvBuzjy|o7hs9#MDx&dpAa(_9})?M1T$+%rrff$yqN7F zsUEc*nk?6HshDF2&tnmIo)9zei2Q{m-xfbVKE@ZAJ#L2~Pwy-nyH;NrL|>8W9nyR?u49d!Sl={fagKN z{|LeJR6JZuYm4_@VYHc1$T9(xm}E7q+tBFT0t)Ox0A&PwaWI%{ANWShLP=>eyO2k0>frD|-Ns3zGW6Z%ILyEotI$Vd0z| zoPIdmt64j(J8sMFy}ZdPWj$hCIJP&{?Dph;%xreQ+U3`p zx>qIc97UGCTWmk>Kiy~No(njvB?>t>Z%o-GVnI8~;1__}lL3@!|37EWSczTn(_mzQ6%& zI0!6*V92Km!Pl>w*>P>|Rzs9&f(u6DNjL+BrJka*AqWcSf)bmAiRF-RATc0F7!6XL zHO2|F>2-b}@t9^d?tuEd=Ux+$yWjVx^wJZwuQD5`ur1H@;>lh-r(|I|@7m&1d}{4~ z=TjUenDL%S)^QcUeEZ+iXHTD(?SC(}x1T@Q|8C>?@}>VhT##s>{Qbrx#8RgG97(kJ zQ#^q0`(M9yWU0=<=fuS3YV({q*-|FLUdNR4j4w)oQEB<%{w*N>`NX7&QfRaS#A?*Byb?d6U zkbTI3a2R19r5r1_Lil@j#APg!1cw1v4kgNwS2G&58`~J^gapQVsS>^=*x#IwnOwTs z0Xg!gocKE{(^#Mt#-wbrKfY7L8vNXTQXDul;i6k9=oUGwg||xsm@pEHF?9cvd;cj# zT%k4H?=oc4{P(v=JvvgShKN?5*6*?;^iJ)K#j~)vm}(bT&>Lnv2dJxu+r17Ex$Uh= zH=0;Zu}GP=cA_L+i#rmR>5wn zktyI=9jSF!3YInl8Rz^JhY>vK?d$+g6J-w53nL55(+MiI_ncJCvkaM8GGnEbXQB7N zeVX8{&L|aN8vjYr!js<4HUt#&__45^0TElkDO_UZx2Y1^L`kTxoKx(Tj6FryA)OGv z*Vzp@wch=Yh4Sx5AomHOB7MumL-gm&I?x)IK$XlsHr^-soH?w`Tu_vq2iAP-6)!0= zMQ!n%Jp2F22Z}=H)h)t{wH7Z)hugm75#>qOR4_NZ7kdjG*x}r9z1Lh?tFVtztgr6 zsV#Thruf1InVHDCS#(!EA%#YkTASsXjVm(iUSDR|)c3=}d{~%ww=iFqeP*>s>U^V} z>$TcS%_v@~1GU5I!iGq~_KkeE*{E`qO4g&X)Zc>E8{4Qm{CE5^HkV+I<_2G6J&Wn+ zU)UEm1x23dbG|`UmC>VEd3IVNwH7;S80`tKnCd6b`>)S(=8m**C&N+gVC(N~s6Bir zP(wq#I;tp{tqV2RLmw7Z_18%(g){f!iAA`py`*9>>gubtz?`jk6DX`KG2KUm5@&hY z)U)&N6`8A=$QI_|mNMAr{X#;_d#^(}g08-CqeRu+&H7ULEiK*6nvGNOE|ctgUEna?6Pnq=>WGp7lhI%b%Q`Yp%aMru18&GDr?2`B@v;B6M zs1bR!pjthBndvlj+=+D1Y7<_pfYGLX#lFsSf2j5nnMP-Zs5~ndALltq7rJ+uwTovOBokzu+V}71UK)&p01(!pa)lW zxz}>+-1^)zp>p^$_92;^6Rz*$*52NAb=`Q(EBGU&B6t06(VJUk>bjS=^6CDPJBF_X z=oKNzr?wDfu-HFb!~GGwG4yF7>aIu?QPS^a+6E5bceuJTRceYCi?&m*T{&F5I%KXj z!MXq78y2^7grqmua^;S{VpNhV4gaBhnC4X5EYvI8DXO=w{ImPdZnx4`z5v>`Q)5Y+ zI~q$hbM0vE*-?IvN^`?=k=4{f6=SR6Z9evv#o?0Ub~;(12JWl?=?B;P(+wjS49 z#LeM+D#RpbL{sEk2btf3_Fko?>NJQEk%S7|E^Mf4KB0S|e`cdVV%ih5C#G1xg04v( z)s_3UuV3eK*u44Dqwv=COMwQS-Nn)LMpUm@o&PulNYnGsq5UYgX zmx#sco&^#s;b*pW5$2N8^vw*uEI3AFfe&suRxLZoGL3N<{H#)E*MHHsJv;Aem9=6g zy)ZyY!6Io#iUclh>r@XuJC-PkHuYJOVG{RcBLlhau$KPTC^!GpswNSyXk7Rn90 za>f=@tg`b!K;y^4=rL1la%&fE>X3DpAd=?!bJmla??%C9!wJ+ChbGNUl!#feRLa85 z&F0Vg0@h6(&zJ)^$cDcKPC3PA{-JL^R@xmFFrs1#LtA{P;2b)2hG5cybLB=tA`@x z7;yS(ch9kU+t-59We7kmt%k`?cw)|>ib*`!2>IqiW%lwqTi}#aSZsipRE9=kpe(>l z-*$GU(gs{o_G!*!%)3rShlZ$oRI;3n=G`MXNtBU4Nj%Z-2=IiV@# zGVMj$_NpSSCMspFU^AGimaQV_t|Kk)=TjU1n;l&BW+=LT1n~U$-|Z()oz$=gt6Rn%E?X%m!|6=yo6a!|?A;n@6C!6{}$s2s#@$uo1SQK4|fQ6am<$6WmL zwR+E1L#~O1yT@wd&h8O5;R<3B2RIh+EChlMLJAUtVu zKxrb_ull;NLdyeVsMdD(?>^z#mQ4`vc;+c&wQ61^M3K$F74R5+!oGHu{@G1DUuT}r zMe*X5IR|fUxutMcJ^a_`${WNA_cL+EzsXZ8|4Uvn-Zk}s1@eFA`OdSV z{C~3Z^x1>_zm>~ULr`F~Titl<4uLn7PyK-E)w=JY4 z!{6r$n@kGmCg;+g28f!(6D@>vZ#|Ot8x~Nt{J-cgc3t+rkpI7Y_N2o9pFVrw|F`k9 z;QuZ6zy2bwp<^Ow=KfgNSq2T^kk_z=sk~g}obG<7cq`_@S zgL!gbE=_4#Qqo~WV#H?qnOre!JV9c*1>dQQbR0hZ<%`N!uD`?AuL_yp+3vA0AFg2Z z;u(&87p?UieX&4mvpBvkCFs1Oq$?~H(~dstRPI1?nrS7KYWcst!D>x*z(xFj=jrp3 z{qN<=XD=W4|7|=qCo9$q(H?k|-xqE$TPw?U9J$SkeqhtLq_l&%n9jP(s}-*iXqe4h zDyZkAcTL<6&%$Re|K}I}I!=HV>i zVHRJy<+dHihr40(nb%xBJGU8RoFSE);|ccVF!gQRjyb>FhRsS3h3j#g%cg286z_M~ zY%E;7quZzn#e2Kw0asVOfF|VzuF27cMofiB9Ez2XzB>G;ogZJC7Xq;f7ANX;sXTWs zA>mKoI1{kk6tu>wxmU*C@4Xv_ z^b$xsqG=pB7a?-PtsT~#zi5WaiHs_Q9<;m@EU!0aG%9vm!k2w&@RobDFL6WI0J`dq zuud^rp?M*HPD0`nk=^>jEqukGhwQGH9Q^7Iuyxa!b!z8Hv73@wvO3b1BRc!EUeZn# zC1-hvE^w!I43;<(Mk6eyIOV{7h7!DhHkAf(hg;@a`CoPYyixn#%kupnPqufSKG^?m z{|oxWBW#@l*)c)QE}pfs=5~<5@lai)Wtqwd2vSnrEZ9lZ>E$Bqip67$d`vFd&L(g zhx_>pmd3mMJ;z3ZuvBvG$44m_r}E1&jZZ065OVy6#OdckIaOvio~iF?Vghkd_?po) z;q^^w@YS(Y#@#)LNIZa@PA?$p6-jLP93L`IvPxc5sitUjKq_kPZ_3) zG4(S8;z^;A)x|>PytDzz*tzep<0!hdc`v?sSssFUnxS3Esv`1hI_YPM7bxVZ6?{0=3= zD8BH~iOcN%Zg9wH%6zQ8miL1M+hP}(jZ~48<5Ep!Fv@k@)f3{OS|9`Om+|JXM|G(1Y@E`{MO~e2V@hKil96K_8>6x-JVh$~Ic&bv2tNHYx z;6AeJ`!tP3#~wYXp0i6w7o=;2&GPj(rn#Pb=~8KlfhHDM$+U< z`lF_7b^%>Bp6=!*l)~mc`=@8G0x6#>IcM|wPv=U*UTxq;#d|C7Y+)a*nqF{ib-7ok z%wh=Y?`~{0P`hv&6JQlyN^Sn!_OMxoSr#b*+gy1uY;OLYyJl|>&w{5x{wqF*j6QYK z=Rey|UzE>(o<4tw|G$;T_0Vllrwv0Xg>csB-18KKg1+x63g58gUsA{URuqN5@^|y_RJI@N||Asn_40Wt_{(n|zq<-tT$N*HwRU3eufBe5x z-G3K9fwrit_Y-InOw`yyMf!ZBo&}k|9F7AO{GT}sBz18>?Qu?Gf!T%YVs78GA=ql| zioVXY^E~#R z@_E0WDR{p8f4cK>d%NQQ{qo`b?^d367OpmtUQL%DIPTzQrpQ^XvAEkZ1V&M0_gfae z6<;(pq+XcG*V-H}<;<8X#vk8N^l`(OQ5|@#91yMSHL~V-ndtK=MnQ-crsYbV|fiSd)4 z;4d0AN{y132Dln&=)SEBFs!LtL{&Sb)-agyn8ZXl*Y(>s&0S}8R_|8O5VJBRI4_$! zcWbWFzrZXav1;HoLq0yijHoay8V4>c^NJOiVao;=;>CCf?rE7(JU~*yy)l`8$3x*D3BKGvXUzqXY*pmwP)fF<^IKcIy|(6N@0DmSDm=SczT#QrMo z*DTN)E?KQa9cN5bM?n@SdEP~>x_Et9az%4D4!Olrj-PIg!6vViYg#RWwZY!;T8zi` zpg$sUpHE9~J^yp*%~dJ9y6lWxGw+4&TyZx_cdIBp9=5-=t38~@snW(-9s3M^&Q&^G}s!Gf*yEQw6{WNc#4hLMyi^K>+~s`*~7*g%zKwXZ^tO{1z3 znKP-`>`k|M`O2rFLcXYarGLMoC@v1O zthHLeVnwZ`TITMVD_U5-ZLY!I?W$kPZmnQ7)^i<3gX4>`cBrTJ)#1D2-J`?v{exHU zUzb*xvN)PqWHDkhljjJ!DxE?3Zu$U^(n(X&9^2o^+p~ACPY=%i=gs+tw{PAb9h~nU zp4Ru4sP!O5G$J#`)J`O(|`g9cP&w)~b&y+3QhY6}nB)c7cC6ISE!98{+qTj%Fr zNVt&13ZS0so}K;n_H_UJm%}$r!|&5X^hZR*C?d|qWi|k#zre4!N+xw`V=E}@0Q{wp z*33Q{w!h<q%<4@ zpd|adDAyV~JK9x!{rdLo-5e&jjEHOQWFgCGbh6hBXZ~Wi(cpkeZ>ui7{EzjnyGqEy zii@rS-#uP&l`*$fv={5&OHb>XXY-nq_zlcm>nST`ByXTS1PH5ELh#eGPk$9J@9b-ny=kMxIpe{5V{5Tl${sLl4J=x*P_W% zTdNXfshMj~WvQiWk!7i^73gxF&2tsyWRbKjS_&5^C->ckSqs6pXdRw2thHQxn|-dm z^0oynm2R$`GjTQKBQRF84gYY>?3<%u%csUsi`Kf&N?r(Qd-~|e9hsh?f9u?ltsRs?~7L0JJrndH?$h& zwKv zR$LV1PyB{NL=0f(HE98z8_vBzqBltM3`Y2}6gtYBke3TDVx7FqmufptlK=^PCKwWU zGSi5x++I~IiYv$)T5yZHEvWBOxA*U67pON9Nacm={0xB9?%{|A zWig|{sU28er3jpk9;>r>*N|;~dRy4C{*hu{-brvlS>~8trR_jZZnT4t`2e26V;8(Wqk4C<6Ev-%4$W_i(?Rj|l|uc7sYfoIG2_l*aj z8QippK&#tu)YWc3NmVfBM4`>;-ibq7z3YXMXmbE4#-gnN!$UAyOK|Qm7;Q0_PDqv^ zjE8Wvhj6q7hrbWuX!kb4>meNNAsp=?9PJ?-?S6-&ee)g%_s*B2<&L2tAT6)r#-+`v zv&_D>1z0PGrY){tf#4xF?IAX;wAyYsHf{Z3W496jS7y^0Sdte6qg|;UEExRx;ql?S z{a5F^r?1Z%aXz<$uW)VWmK{+qhq6W&QUY2)gye~186)xUz`A9QH+eTq(0r?Py`4EQb zJnIU>bhqD5bYLOH;w<*3jK(zOdo+rOfG!=6yPJzJc=nIuy0BmcE0!a=;-j> z94c>0?^x-(rOcMXx&VQh&&4v#?LJPUvmAmf&Ez$suWvHab^bIz1dBcdi?*beHiW#i zpt*VB@(_`FV-cyVhh0|ivj3`)Y?g_tTtQD$mXL$^qPEz4ubf(^oUA>^7iY6=(7-$# z{m+~8gX7&-Zw}7i?C+i|-tEpX??N}GIcSCZ=Gv{cLY2h;O32R74o*MJVFQJxIc68F zHbkmrx#@1#uH0lVv76U$@BMnvLiTF7{VA@OwPoaqaN&;H0`TrlgEG?HsbM#9&=zZ_ z2frNtzm|AocuYPojk))~wC;hYW2c9f9lY8-J2>AzZr6k43;V}yVSjaayx+cQ5(m|$ zEf>^EQj~~UwN`$c<3eUwfb1I(7=`O7uR*702fL?xzn<^Dd)JzIAm&fckr1spi0R{( z!#D5RvnbWen1rG|r+R<-rd{hP3mfFYJS(jgG9M7*dq)*(X_YX%JfNnQEOMf7&I(Gs z)Z~VENK)~Tq~ak-#Y2*cha?pbNh%(aR5&x<7+f1r{}W~&js=^kJLo)p9-;znzwqf6 z|09(D?@j*v>crmaLT5fBiQsv3ZSChK9mP+%ppoj&sDd0`;~2Tc~|*D%|#CK4I{ZJ_o$}!Yu%*UIZ4S<-`R~F z41QLT@Py$2x`#1P7GMT~f*ygL2bj?mVvGa%i6_`6;~5~xn%SuLmQdfE(2Ge(M=0zS z_Fv`lo88sV4fc26?Vg{#eSf-laQ^=Eu+h|#FcOO~bh9nu;6H=@fAat4Jd9|7e+~$j zqgRkLxSN}es!0VF0#JCYi%a$o4k8l$6XN8IxP?l zZC;m@)@PAuXmKsX^pHlgNd?pPfg_`@%r_#`k#Z5virTN&)Y~|m4M4Eewdf%>;RE>C z^~~nH_eYA^%##bMy9Hgd>n`-H^6_+kUp(A72Pch-vT|(l)6S1CoB2sjS}VjCR4ccN zGpLNUDgWTNu=A|hzC7geyYIRDzNLhg_rz%2&S1RKtQJ99el{1_k(aU|uR5_cFf2We zs_kb{PQ|(!ZIUV$x_8}u%f^}3a(1DNU5PSGGv98u&N`3k8uM`Nx6gzN5>Hgt$Xe)& z#^bE6_>gCt98@p5Z`FsIG5PYi-~$5R)!6ow>0te-NZqCv;cug{Z+C#ka`cdl^C1~0 ztTzMa-9GjCKQH_&f!-PtfX>hVxxMpr`)MWr=gy0V{GYe+xcNUXc5F6Jog`|BOrFiz z7r*gbo{O_>RR@XYeTsG52%L&)g=W3m25yJatF)n^)@P-7LW^npc^5%k8R{YAtyI-o{5SU3dNLM_GvijyAaf3;>N|R^ zgWgi1LXXSoy=zi@0V>NNW0hG8mhlyaQZ4qSIZOQ7HEPJRn=9u1?_=$Fc*;+c{=X*G zUp));|LwB=|9t1k!~HL}@+_m-TPNO|QteCauT{4%wS9}q{hXd}L%T0^zjpP$)czgn z_YbQ5gKGbv+CQlFw_(!$_BDHRZ;_+_^`JcHUGQP=3_+`@hzBfXjI^Xw1lytuT|92k22gC?XIUMdE@J=sb^k3K)ogN7=>g#fh z{_9)Z96Rg(^k8@Y=%5z`>+WOj`hT*s^WQhGPh%A3fqfB*(F8|QP2e*wa0DA~PxoIP zoo(s`k7B@4GDVzlpfM=#Tuw$LBx0tt2O0wxC=L({xaZR-35irpdY#TMl)5BC$WW=_rSs86mb|~9D~4I=or;Y%06irj$a=h z|35g4C5reK_?QXLj9?4KVTxlBsD4ivjj)*FR8g{^iR@nCe?9GOdm}8+Q~CYm10;-I5V@Vk z%t~iDW(h+Y2bhI3IYlHMGsFc;eUUP3=u1(foj`@I&O4e>seGwXrBEV&TB=n!dWSg~ z4+nKCWeIYBC;QTQYs}1YhdC+KPO*z1OMqsi_BOmi#Dkn}3?EMrLlKtq|NF+HB>S-m z9ILejA>o3nX7aBfrbt-Yw5{R*f|O}=s-fv_I-S44_us!G$@kw6;0@sd_oe$rBOm-919MJ zbLtZ$?9$9hLQhnvDmav3C_6)=?sQDUkY1{RqEU#sgYj&_MqvT{W)(R!koPO5n6PZh zFSJ#o)477`lR;ARt*-pFo_(yt10+qY`I!srzQM%2`kgCpxo6q(cllfw)~n7H3{9E= zmC)f7PY4%mHf)}c*Qz+|D{%^kvKBB??K(5P{^tsYF$o)ovbes3ow-REhIx<9y3Nx) z&-86P=PWyW7*fanQ*vTsYdtt|hSLlGJVX;SLx1wXPg-jfk zRM1k6mGb8x&<5G@0tQPR+z|F|oOW!mVLk1jAR=*wfCdW6nz(fex*_ZsTj~jAqTF@? zJ4z9Dabi!SD`5 zfKxNRGwhh+;LGxQBR-QmIzQisN|t?>&su0l=k;QX^g!FA!*}kpDyXreO+YCyVPBu`^d%*&feKR=vY2^S@`0 z<;trHEeAX5dez0~&9Otqps!@rwj$tDnuY<4FmOEAY;tSQ6%5r)va3ohg&oON-x@t` zVMAA7ZXp*G62t^SVK@V;211ReRqlzP4{?B-Ys@c+T#*bXSR3aGX5w`yj%$(Vb{MTN z{FH6*0Qpy*qNgrUcq4z zExNW>z{EXK64#;)He;toKWvYkyH^q&Z7?D+3Giq+9L>c}t$x@ZJ9n=nO6;gZxD}Vy zTi{r>y!zh8$@RA5~fcJyVV zZFZrDCvvOHFz1rN#)gt4cJyLX4fR$AX^qeNu#?oB&eZp){Ndv?8ewMLd>pjM1?JN@ z#=cr>clxZ6cGQKhtt=Rt$Ea$2XxWWK(AmA9nEY8Ec0|}tXUeg&lVRrFn=`<%&t^B` zg|qsz#+9X9=UdTEZr`!C8LHg^rJAVtkX8sOi>wpc{;@1HB5|-Xs4F~cTv_Tu;N`G` zl6#GuwaTpQj@t#+uU#vW<%m@r2cQWeF)!iBhO#ih{^pGw3y9 zN8RP!20Q!5Qu|Zmx6DLPt@7NnXT8|T?ku0Xvh04oNGSIaQES0YP0F=-yHK-7cWSNN z{S7~B*`vGIN%?}!R*iSly?xBs&KC}->>`jSnRZ581?nb*ing$%=l}! z7FTd=HlSmj^@!UwcJGeSC3beMZ8l`V@myWAu?lwLG&;*i zUPDAK?PNrw4nozj_!M2>0`A<#;g||MfIXG}14bAo3?rcrQsa3vyyD(}Emn6IDmlC6 z?_*SF(YUfHiNPtVwhldb&+#}7m83_(1&Vzf6hs@xLXdcJYfJD7hSBWYfpbYzDeSa0 zDVrWw^mXrRMckOl1Txz8Cp;@SfoI{KUd~i6r^KHEv4L-n-c=WWptm+e$O-`& zk1;c$mRVuBsB132u^p+^kU3G*Ij9%wJCv}ft$InQIE&Mu)I{rBQhq>6yL2{c7-+1t zO^s`P3`RI$`4`HLjjZR#9a!VmqJcW|VE(Ga9JoEsi9XN)ypl?4>4#{iubfI7neRyK?320spbB;%APmMd4DR3Zx`JU%UWp-Wa4G7w7(pCr zFXrFOwFtq`L448j9Ar11fvN&Hz95XoDi6X1Vnl~q&u9vl%H=gApRo4$i+z57lLt!)XDPTu#Sk=BnnI;*EIWp-Z=v{l_!TPaNxGA$KV(v7J1&Od- zS9#ipYmDP&o;72~z2~%%c5Iqu~UBua3k&H7p!J^SKL%6g?olo@;cbiwxqXp zG2o?H(L3YnbZxF8$<{(UwO0f;wONZD+rk1g-S%dFJ+Y|n9kGkrx`KV89$usnJy$+S=?he zXUqM4u3-55+0!R06Hfse(2D;klOunD%(T+dGK^kIKe^OuW@z{w|`Sc@)tU7E^F||Gl z##V8s*|gR}JJaZc3bt$$GM9Pc;V2QaePRPq6$|PI_@d9Jk@}$%P3sLsh%WF3--Hty zZ17vd*cA)~OILQpD`Ur>Ml_g@BV`u^Bux0JtN<@xzO=-Y;b%jqC$~GCZC$^N9iEQn;YRI3(@~yQ z#IPHKpA2XB15sBn>~^nv4qCv@qBMS2ioKkf^nH6-`aVZvHmJJ$XYDHs`99|Shz3pW zE*d+#su;*}_ZBx=8$87*_>B>P-^M;(@<6>xJLJzr*^pe?ImLgZn2URgp(_~v_?oPf zb{6eKtGiHjTyx)qxq8elboKiBEUR>ZVA~zKiUZki3?69+C60>m#1cfacBFlbz%pN$8yK8 zAld2_?3!}-A4BWJj-VlCiaV z%q4=woOSn(lGZ7-6n3;h>xiatgKtj(J7$xmOd0nMH#dfz`5TNfcJ3iwEUfFp4krQj zQSEXn^tc-Y5*HRj#f4?CvpPiUeAbGcR{j}RI(FIK(1)DB#=<$NWYb6bAm`4WwV1?8 z*qQHvFb6xC1Hyg44BQZQ{JDNGbFhBC`zbw6b6A+khirLW*lB0M`0JT440hT@m@H$$IFk*p4&1e$HEu&&#!$31 zVca!l;AXHh--6NJgmDkhvli?K%pwviz4tXkK0d*W(BKUFG!80kNX4|t4;hPzF%C4( zG3{WO$Y%8YaCiBvDHpAAz-iOY73`h7?;qiavRUT315A5{@qS1ma&Ll-uRy~&fZ>zp zFOJA?O9HQcZPz*>m+R};lH$Ef3q0VBkGHL*gckmMFI-Pc&wAnmO4u3m*NmnO!L(PP zKDrlG<#$xwv*!wiJKNi)`cA&Y%pE^v%FKVfvVF!tsOep8veW;E5=ybu)X8~0Sx%F2MP(zKO|C5R;EJ5sLdr+ z%A|V7&rQ(I=OiRP5t6;|1o@a34gs%V*NszG)>d0UCs@(d!x?b(T4jLN4Z)bv$R;C> zCpsdRq4Ajb;M;4uZDj6xuycLWX_4=cmx!CiPA{2U#VrQ%rd4@ z9*UDV(p?rLI=V!b!vUbNzznA7La9rs?_(U`VC_+(SMcI`^{d!Xz;eDEc1)$k=rIMy zX;?&&LECztE7-YSeQm)&0cooaUYWeEjy71Ve~;9`wMg_5cIFGMlFZ8L-bbOe7QR!$ z&U~Tumy=l~?93Nhe>s^|!p?l5^_P=bCG5->T7Nm26=A`M&)Z$peg$U&F#$-9Kf+>) zQ!Y1Gl-;$9Y4usl9^GQ6)oJKoHqYLIb3WVSnLt8rs@|_?tVX?=ziNQ@)07z`?p^ZdzC(li;ED{UMF3OhoD>ze(4eaM09FFawoX-B` zP=viVffwsYcBF{xW}lnHjvmoFz3Uq0JsRwo?btHNF;#8}H*3aDF5OPku*uF-5LXyk4$c!p`-$av4!~!Isy`l}p%Z;>tNQzMNuKC78_RiBIhrmGvjM<>yL= zwx9?*KqC&0v+(@-bFjnYSZl0bAJvE7H>N@)e9-R~)y;tV+1K7c@H3znYz!9Cb(6$V?aa?OF3iqJ`UXv%9iu{kk8|nhh8&9CPP6 zVMVyBgp(_hZHsUABG=f!`ygW`JZG@KXzTX0>v1`pe3!R!&ZTIhmCv0TTF3 zFeLC~aaOeVsyJ{{{vO$L%Fh)HzlfRgxHzR@NaBfxz1z|frZ1V`Nl)9m+XNxQo<6!d zp#hxPefs?j(pt+NosE#+8a)|yPHBkMZENz6S0oPP0@*N8Tl!Yxld}`Kwk|3AbXN(M zuZf*b$J+XI<|V{Xge61Ji5w&g8G0Va0zTo{&*}{2<5u?P??XUrlBy(ng2Hf$!w5*s z1q#Dq4}PJ{9<*$AI>X^mav&Pw&UfIJglOh59Ad;V{Idg~`^yiV>_yT)Yg=Aoes1L@ z_K`p%#PQG3%nQulswsf#y-^a7PUqbe0~*l_$rhtXU8<+dgvHzOvy1PCS=6iTUJbYi z(Xi*(2E!Q;d&55m1Q}d0if{l!2h?G&)7cGGyr5*`;cN>K{A2g%jk;)FAEw%Svw40| znZxDnv~hdrn&_*~0=S&YCJd(Sl=@S|#7gtyjZNRZ8H&P2?EVeD|Nb3GzW;sz|DdS? zD&_iCbn89v(P`)R4d=&Zr}OBMxg*j8IoWWrfr9xRC z1)4bLiUIX`&w$(WY1Gei|MaKy(i60w#sOwrpg8cDZqSo$cuvX9PUld0I&i^gJPBvO zu%sUxn|o;FC`kbXAhC9`4N?`;2{^tWjH=9&J$N@I9B|B)PBWt^GgfM~q%xbfKSlAR z9CS{``Ejj46Xk!QB!{{2+D*cu?f#w4D~$ks_HO4CL5wdoP3RURDv983dL%M7#V#=8 zaunuDvzUa9e{nb)kx#+Cs2X>ZJ7fiS6eM z3*fWZpE4TLlq+6K1Vl8zjXmYX_f+Wtbm@59E%#VgEbdkZXh){ms3QC+lDe`3Rtp@* zX>^v?$cMUJf~%?APKJ_Zw|lqmk~4okx0Mh1yuPE(DhX$5ofv{si)1S!d&`W6o@enh zgmH)_`f|fSDg^R-oS`wMa&eJZGbu+z=KQJ$yCJ95KISe*%G~~(MaZZ0GS(v6u!lW$ z*hc=8?CB`K3|FU=%6!cgyR|cdgW{^~bawSsmx3k`;tL!q?>44PH7IjErlWshUl`BH z0FUK%Ofy^CiM~wAC37dhlC~D`?iGky&Z z7!Y4q`S$kFtM1ocr*jx**)|O#I0mY` z)OilQo=z;ZU!G2zV)TEBL9NWn#Wk&)Rc@w&a0qCOVKkF%iENOC8d<9avOT5(TQ*3{ zN-i){ajDq{p~|CI!EaWs%6n*gwAWIG6v_R7N=Kpz+PUZ6)%AuYiW_!1hhv49K;?+h z{E^!2oI~D?Q>{o+(wItBWhukQOwyl22oC=3qL9c}8=0X7#=xGh5Bati^yjUM&buk) z*wkEhWds3WRkP9=TMntY{f|?iMGGwgM`L00B zc7|lLE7Ed_jtveDUBLJ^D^=2N}jj(x0cB_Hk9F(b(Rgo?ULsuhBaF>{}hL^F5N3|bEkW1uX+CN&WDQ=vy`>MNaV22sj|o_4Jh5@iU_ zO(8I*p=@NR$=twN-aXJLOP|oIPCH1a7+I4M>tfRSN++W{5dp>>&&F5 z&X~K>U7Hh8vouz-V>D1@;n7!?xNLtz$zK$^?N|pg${?hZiJU3Bx!lZ$9Z*Te7X<0S z=h?G0LvrWuj6_K|GkYMjLNk{&Y4JnHS}@uk%Q0i6sR`E7=|W z&!7WfVUmA)%49lFy?cszc=ClZ@2a2fr~K_n<^nu`-S864xca7)#)10NHY{IdE#{D) zs94vXCr_W%18sNC9kn62hfph{fFwZU(yGZ=b3B@4W@^Qjj8&o%pDZ=gkGZX)Ay8ve z@1(NYaiI7nY{*vNr=1^PZrU7}vSq3A8a0@xzOb*%ZP`xLv?G_7+^p%(N?JRt`_qry z?luko)evR=98$zNE)LQp|`{A$Gm8b zEJeVmc~DX;6c#fY2FdiDQPg7MlF=0N7>C)`>D*R-q_sIO#+ROf+PfggL!L&kCuQxX zc6+62*R3r2hGo9zYV$t{=?H~zffzxf5OXE^RB@k5=syGZaf}$DdmXdEcSy}cz&oabyYq~tF8V6fSw0^2>wHDyI1qtt?i>w zC~Gk9azTwbFYkMei@F*$FE2Vj)S%^uDy}N8I(0KphUu#2m8|H%e-<4KU*Pd4fWS@X=akywf>j`>#Yv9rc_<;2>j5w=x{I#~_Kcuf2>6g}9L zo0)7oWgJqqUz0Wey2`#*2FkKSQk|Y;JFmJ#Tjb9xUB#8!I9;OIRs}k16hRk=gzAWo z#A+XA&H=LpphO{@V8#g-_~v%vTu_E9t`7kc$SMc9KE%POXu}#ivD`9=sU1hAk@h;B zY!CwUB}T9pA`)p0Pgy|a`u_je`|{?tactlDTb}|W-5T2|QH!@EdN*B9vE6Q0+p(YR zB==Uj%Z5ZqVuvDm3DS-xlkfiOEC4}@vYo`8zEfq@^f=tG8~_IgXZana5|Y-i>Tw!t z?URi|Mjp$}(vzWu)5Dkk5C(1f(iyb3!? zpPaqh69hTz2O5VdDMq4Wk8(0hmGoKaz=j8r2wX$OLf1({4pljqWZ zxw+dLzufA+?Cf@ZH40?pz3irOoK18$3`WkZ*R$5~PXC2>ytA{rVUFsSNz;)!)4iF= zEKeiY&CC6%Xmj>S{q2HQ=-kl}_=V+I@c ze2FM%MU$zDR7%Ed7b||1c-5iJhhB0}Hj+>-iW6|0XGxx63QyM^nAZy}x_vgIjunW8 z*{xEMSl)e%LO1fohkax};uHuN$Z(~Z@yc!+ybCoN9;y&31uR!%oub$PSn|Fw+$fP+ z3mGlkhpyw5l?vKCpN&KjOrtneKB+TsKJH5C(JU_U&dV6tj z-YzcpUpp6P!;6der|0|5t)n+bm;0};hUbSD%Xi-okN1zxk>+QI`@_p&M+{k&o2t1C zB2@j=HXZ#B$eVC%6nCozwZo&ba?nw!6M3*v03~?k@~mcGSvIZ1xTJYxb70~0t|qi0 z1uFTmak?y(&I3*AR+cO{?IS6$Bhud5oSN4iF%%!wLX1?TCP79Y1-k0Kr3-Zvw1y?a zB5rx4V5z5aoT0-9?p7L>Ih==lqo$1`mk0ZS&eC9%lca?0o5|EK1w-pBl>uxv7)*7V zsOVxAOfsM}FcW*Djb|A+OQ`^DQ>=r4JJr`jrKY;FwQG z1192GLEY6M%&(d<&*El#MFcppvbLVhNw({ZXY<%EKP9csKApYT)exohJJ$OV@r9q$ z*ckqe%foi3*XgxceVD{r2P3lMTL(Mms8P2%m*@Fwr2~-1*gj)+#Yo+nj|(&sqd z`^YiS;s#g+Sb1gGal`T2=yi4*f7~66tvl+i)^v~jLGEJqg z8jYiDO^ZmuUL#?0#?~B0Mo~^QWGyx9z07fAm{~9qIhBSMKXjaB&n&`u0PY6#DY?Q1 z5xKvlGV*d2Dn}6IL$@+%7N8a0+GW{r>jY&ot zNj)M}p8?H#;y{fP2s#v~;@FQgLg(V zJc>bH#2NSk7)GOUma0kc$!shjJfP5ND+*GPq;VFHV@leMO6CPUKGIo$0@n>3`oC00cdO6EkAstIPZG2APflG~$r-swZrhrVy64ivSI z_lIZYJ}~#a(1wCW+o?TVA_<)8~W9 z#~|{>`o2AoZ?IDzoyMicLU1d3N8kP$L_Y3BhwpyvkdK?0?uYMw6|^UtkTgl|)LDS8 z;R`WJl?1knV_odAYs~E8iMR>WEh_!kt_jP&$qS~*hfw34$8rv^bF4b@aDH~gflR|d zrKY>Mx{-EU!Vi38C2rU%lW`oU#B13{6u7MpqOlSqkda}M;hLDsY)n&_t)NGds5!*L zQty(|r<>$q*zG^xe9R~JP5&C#MjokR zImskgL(NL6#sra;DxHVq_aD`b14R#@P&b!<#FkuQl+P36^pESRGJAJBJDum)ZG6O^ zwM**2FvNY^+3ajf@gAB8+2n5g{8>UT`9R`VX9Zg{5Bt5g=#w(JzX`L>l)Bm3>4+oR zD{#92A zblR1pIM2F~y4BtOiru19A_zSnYv83 zHg6T|5rdMI&f*l#Gvq~r5k5JwNZ1CDXz&?@Fz{v>P0jY~xRhw6D4s;6X`Hf@UnVuk z^-TXTp6aq<fPVOw`_{l!WuHbe_p!z0ul5{QV^>2`h^WrX$1zE%WskLT zff%bqrOo3^xhV{tw=S|cNy=?47xV1d@?aV@@ZVur@1FtBo;8H{(EnZhJiHtp4@41n z$)4pYnwfZu3wc*02W$Rv|KM_X1fO0STSReigH5(@NF>DCWtV)|aX1Yj2k=$ub2k9d z?{~I3Y+*BQ{ODi}JcU7F=fN~(agmyYVi8Jmq0c+1BjZpi2E%Y7M{r?mlEt!vQ69XrK=ub(WB5@ ztnu}eMU5^CZt zso_?!R&vK&QYKy-LU52Do88-R+nU{zG)`0+E-L1)YlkP1BX|eF>)7?kx@@{~bJ0Lp zWzxS1*s^C%vB7+<{6Jag< zPG=;RsAo-*;xwD7^foZd45z}>pnT)rO^OoW0X1jWTHPu-v(1xi0cRdYPwy~xxT^nR zczk^N{_5c5?DD^D*{>GlbWf&$QoC8DgLBY`siROvA6-3$v68x>$*9UEkBA!;#tF2d z@DJQ_mWGv?ki0=w(ytk{ND*HJGuPy2Z3{U8cAc??EDlxb=02&$cqOr~JB=f@Qqjp= zMkH~tde~t*9Edf^!;msp&s3NINz^}yOspe0_zX$E&Q5`;mJ zzs6Q)i|gVbkSdDOhjSFyYr4id9tr}pH#<;{)8V_zD!u3w=ibK>UFzOlWcf{}%+dp9 zg`4&yV%dOMYnDe}_d5U8`LAm+N@Ya(Bd+^+r?fB8CW9Z4?~u+^0wS&O#3`z4gcCPO zu*~b%7YFC>j`j`^@b{Wd1{PLf{rdEHzhGe-X6+W*K}QrO9D}73P)RkE#jH4DZzisX zuiuQBH5WIffW~G(P#H|I%AD4S*3#m?y zYTLO`;mw3Q35EC8ek%5dm&4b?3x4vG=w|bz3rCYY=~yBU(PbgcqY8?Mvo;8ZW1eIj z;3w_mW2<8^7j?N8wArm(5^Uh5)DnbD0QF(;DV{yEV6IqUK?hDNHzI*`@K1smz;S{W zTc26#CJs??rgbDMMwT$WwWnJQ4*>?SL~t02)ztmZdg?CA!6;FnmD-6n2;;E~oiT57 zcuqmOVklhEc)bMwKmwy>1RI57*9anp03|9)W;O*`$1^pTg@GD5ma!3)tR9mf)tSpX zk!w1#IFhn+4=Jr}=2c7hX`B=-Dbe`lG!9kqg7NNS z@~^!B;#PX4>DL zevk5Vlsn^~3+y3|hDGi>3_bYXI(ofc@A=LS{_XX8)xW*|*4E$pTe~}3z25fL*7Lvh z`n%iPy}yax<8<-vUk)<)-+JF&SG9BhCcmW-A2b9CE*(AV&f@svKs1w7c`U}XVXyOy zt)}v-cdLTwEE|ZX*KanSmgv93ue|;FgmTH;ud z3~5j?ND<;XX~<0M9f8QK#V8L#Ukf>vz!1dhDE}aG@#Z>D%;10Nue|>CY);-y9{bMw zkHHG=TmQYS?ehBH1exGz{r`xcb^e2Wzk*rzSGVoy_vind^-mZY=Lf_6lY`FOf4n;G z-TybM^8a>k>uLS}kl$XMEQCSUOqa~zGV;4|S_AZDAX75}P+yzL_W7xBj<# zn-%%LzuDV5_FraEORR+b)7k&YSN%oRa9B0NFYyi!_ zVpV+OatIthKu;o^je(!raAJZwfAt9bwLfS5*TX$OLLGOn|Lv{a?IrzxZ|iCQ{~^Ct zOB@p321m?s(r6htH&etw%opb0Z{%DRZ#YE` z@U9S^^YX81K~L>h?5e&OO0A*T3+Ec+3|0o~8jq7!OQdQt5MK=e4Sgo;x`wa`=m)~) zeaE|oBM#~uRD{UnbRhbD^wDFWiGI&ZU-lY8v|2Xxw5-D6(PNJ0M~P~U@bT6}Fe!ko zMWDr6C^MzAwYE^9Rs<0fi#0cm_8Jvu$xx5zkZYBC(>(B17c0=+R+1_&$tgPs-NTY# z3OLApZdFv%X@R$AQMn@_(^M{CPpeLa4pW1p8V!KyTt+!z_Mvs&UqV^vRQh!%RE1qU zUK0C7niC8`V#vZI4Q^znJTur`)7zFv+=&Y}?wYu!JLIvG<_ZQ4T`bT)gZxNhjGiED zTo=%jd}WEy;wVEr3oTv`FMh$5`DS=>Bz!f>3GgzA5CFzkBZn4u#VPRH9#*WoEhyfL zBX1=2%(LY-1l3`*ny`v4vXs415}AUO5H1Qv=E(5I8?o?JTt}uimqBDbv5Z?6ytn}? zttDI!#zCb@Sn&EDR`cl0<}uj~FTpa)alk^q zjoo}Z!};@Bj_vUh+qsYN5YH&h@FM3O0~ydu2Hr-AF{U0dX?ZZ(g7BdY5HfPA6<`Te z7{}Z>jx6B@8^PEN7+m1=kLd@*4bmc9xUsY2G3OCslnQ7?;A^oCgzT9x8b)B=;9U+5opZS;;sp%)!`EY5aNO%C zh-k}!#!w|m>@N*=3EF}j+V2Fv;OSc_h8db=i=szp30R~Byd%FtKMz&PDHoa`0e)mfm}Ra6WsiT0mT%1E<(zI*i>!r(3SDZ~0}s8!B+BnH8yt;>!!|(eWT-;}eagWmjb{l+ytE%13fZ4$Q98YMV2|>;}xEmjI zE)pY1ILZg+Hm-WMtCQcb%?nrkl(;rR0|1|l>W7gxy$29w97ki7WV%ZzS6v{A9Xr0) zNOS_drVYcDmi9OoB6^GhC%7n)3SK#{WY=t@BRQ@qt(359d0yl=1>mKtocO8nNaefc z5*B4Ob(xh^S0yeJanu&^WYXqn$19m6;lf^#3K_kl>VGxJ2-}BugHIfwf$_I>kFvP>ry|j8BNG1@AY<(Mah0Z_z4e7 zn#Obb^zvW;@_7MK^HZ6;4Ix(NN5eACooLiO#`XsCntL#XF%Zq{CfE;H9EOdR16pBD z1}HQ+r64;K^Tm~OkC#OOQ41{7alt2q0oalqm0*v9zAP9<-6c#I3%( zMavoWkjBs1L!l8B^{LLhG>^nOoT8#QQ-9xRxapP*G=c$!8C3g>zTWG&ALofm@zU|h z4kq{p9xVd>XZMwDCtfyF2ruTDu_$%6u{;=q!a=^$KfCJxy5{1(x{T4YG!%B?kqGAO z4ItUV%jOozx&!kB9-ScXFGW(rGqQYDQ%^!otH9zKEf{gr$e)9-hMzkjx<6{l-$hY_ z#JfaM6UhB1G!ojdgp)N8b!s#O&4MNj&1noIaNlRGM=fF>yzXRH8_?)ysXT~o@S?~G z@$=~^-4G-v3`BDz(-aBFKgvlk% zDx4tmM?$7k{U;IK9rz=xZI~jUM(K3VMkq}FV}QXc=xSLiBQ3`!Ec=Hw_q_~Zs#ND{ zIcKHQ8I}%KXLq|VUmFy*I%OM9%=)}VNuJfa_F&dzw$g=4cMGtDB`V&BBcL-7Vk;sL zia%BAmGadsk*afCckZ-a<;+iX&;O^Lb=N@MmvtvAQ7i4T3Bs5%fr}iqXief6HlVmIF5X6k$)qld;$nW z6)13}X)4DG4J;2*vRSnVv>w>9?j|w)U$^VW4SdjjJzFHu zWB4;Lx`LxKRCL)K)C2J*j+7<9&i2-3qiko+oYc{*TQli@_lkT3=3ci2C3?^vXmDX`H7y))>(AJ;Qbr3m3s`~HuUWZ1mkV5`tXJ!Z z1L&i)evrmQ*fDiEMW81`VYjVP#3cgd>105?ID&Qah(s|ZqsZfpxB&%qLZ?|rXNE@5 zSW&c=P%`f!zynn2h(X{vnxh((`Z$bp-(&aJ6b`HER)%4>l`5U3!8r4{m`R*sr%a!W zI=Q}Y7ubh$1g;^IyC?d+4*d^g4>l3YG+P4OS=SrD?cfiv@i@;YuK~}NI1+j`7jrd7 zjObQNye=@YXrDF7U`Pe-lWB&*ffnq^TG*dhh}X#Eu_kfd_0>&R&*qfJVt4!GnsilY z)yQV3Kls3vAqY^d%O3CgVK7Q%y0C|91{LgX_jfl8HUk#uG+@f;J_ zN9tB&^JHQyU4&6Fk8jwzfQEu%cd6b2jG~TfG+>G6&!3B8yg=j>C769)FAPLt zAoYW28mGz!YFsZH;cTY@Ev_m5g=ra9N(QlVBkQy{%h`n32TG6=RI4S9CPu^EE;&q* z>i0k0TR-vpY+Y@7&ALyTO#|s?O)th)iR&DC7(`Pp3nW$2udh3P-5Vgqa5YgLhHVkW zOI4wHR|J~$I9g?GF`mV-u4cowwW_ou#QHSZgi*pANSI+rroN!lg%F3oQhFru53;^y za{LipOdR<3Iy(x;=7kHuTt;%L{2KDZa>Im9Y;JdmfA((AIujTl0$US!N3iG5|W)1E8ozfXfqokqBk z>ENGgAYL2>HPQu4Pr|vn$Woc9X*Gcp>}H5{eSvwbeZ&}NI78>rvV!qpVmk2pG!D_` z?VxIRThu5|Q>y}{8|7vWtmJtfqj%0*LhE9eDyIV{v=Ns)VVy+B=y>9*0y?kl;^qwHhuMEYz}o%_gIcLUF1urL8yiol(5N2 z-OF-#68p#Q8%gXBmtW%6ly)xW%)I1nG!{CKS`*6g=yzZ-s4sqh}gN9*cw zn5bLU6A%4P9*WHE`&g(&%U3qpijx5WK``e*MFPFozk3|dlN_C#Suuq!kM@e^@{3$CP?v+)7Pu05DQ6{*+ySLp zHQt19{3z$nv?X`MX%sHR5#{5wsWb@`DpSU_FN$xMl@hERwkIAgdmj6g>=NDVI^-A4 zr)bJNNAct3oSB@Vhe|7@%`CjxOzK!V#1soCEjiDiDg}T4uuo{wzS+6!$_zKRT#&s>tHVU<@n>*B~MzEA%BGjiO48{REyrWaTHvAqM zkLs6b>gXOme_@g_UCv^kvmsJ;1jb5r26#;u9{&~5hBhP1O3sh8|C%*Quk(zoY{6XVwzyU7-i_~=ly(FJ%b?`c3nnr(#5NW~2VE@L zTT%)bS8-MqLyIiU(7T_LaVf1zg}I@NchJb-&LXh+NZAnzQC+Y6j>Do4O6+;{SXwTM z6zBc7;GSuW-dQP{;Qi9O2cHsHjV8Q?KJdink7_Z%@7jzd3$mNOe94kRf` zW-7;7pvS3%&zew2v-TA>biVArcup@=Z9}#lCvg<;s;aT03dgKg5_OFqJ>|3(b1H?l z26%?HsvF?ZTRo1(64dTX8Vnd3W;=W2n097+aGn!K8!4ke{cbfVefA(d=t`Hjah$4H zcMD2;hF!GCx6TYTIb&1WA9rM+UtdpXhs@2U2{R*T7ucsT7dQ7+i!={`KXbZy>C2?Z zHjhpuXy&)kYA0?g#v&^(1!CrmOodxBhmJewDm;OT$TxU@V6K3(%22^H{3V|%$D#dL zPK%e2ou&hkOs^b=^{Y@$KXdu7nx8)x0i(^D0MDlAh%a9}UWLssqARv`dp+?JdehsA zCcvQkbN5zgcMnJU*6K#3cq)W_JfPSG`+RA2+V2RqoYM-s-uEyWAJ&Jeyd^1=ceH@- zfe!yc^Z3h`CcHh1^K@NrbSmx7=W_`%vQwDboUbjI#+8p8-xz{wdhA)YX@gOFofnl5 z_?JsTos~H9r_j!&(9V6zitRW-u)8m>in}@6Z5roFM=G=fb!7&s1w}?v1bpGLyqiFJ z(uC$r0#Z|Og(t;c2+dBWSH5*cr&~h&Pp)8cG%pU05B4sF-|;1$fyM9w$=()w!;1s) z{+EL{P{3GbGK{C4nbfwhD;topZa;5}wY}3fZw~e@*EYmuyW8Wj)Cqsqaz=oP2VVVll1e=X9Xdar1oV2^bS$EURbUdmsbhQ zD@0}?mPPpgC2jjt@qd=^pE{HcrSpf5qerUa9{jiM-R9kA#Y&F~PI-CLFUCPl-_b3}-7AV~uh|f5eo68}7_L)0dj#_YRf)dc|;68huw;_>2q!OZV@d z0C$b+{bO4u<_R_cL-v)qUV5Y>0GbKbZ5$xOJB>xL(v3K_N zkqFUS^@1V9yh4XY-M?2Y8haRi$g-)M-Of@f4%YiBlR>EOXICr-ty%)3I1ZJJR **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL admin user | `postgres` | +| `postgresqlPassword` | PostgreSQL admin password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAcccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=12.0.0` + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/chart/charts/postgresql/ci/commonAnnotations.yaml b/chart/charts/postgresql/ci/commonAnnotations.yaml new file mode 100755 index 0000000..a936299 --- /dev/null +++ b/chart/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,4 @@ +commonAnnotations: + helm.sh/hook: "pre-install, pre-upgrade" + helm.sh/hook-weight: "-1" + diff --git a/chart/charts/postgresql/ci/default-values.yaml b/chart/charts/postgresql/ci/default-values.yaml new file mode 100755 index 0000000..fc2ba60 --- /dev/null +++ b/chart/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/chart/charts/postgresql/ci/shmvolume-disabled-values.yaml b/chart/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100755 index 0000000..347d3b4 --- /dev/null +++ b/chart/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/chart/charts/postgresql/files/README.md b/chart/charts/postgresql/files/README.md new file mode 100755 index 0000000..1813a2f --- /dev/null +++ b/chart/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/chart/charts/postgresql/files/conf.d/README.md b/chart/charts/postgresql/files/conf.d/README.md new file mode 100755 index 0000000..184c187 --- /dev/null +++ b/chart/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/chart/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/chart/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100755 index 0000000..cba3809 --- /dev/null +++ b/chart/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/chart/charts/postgresql/templates/NOTES.txt b/chart/charts/postgresql/templates/NOTES.txt new file mode 100755 index 0000000..3b5e6c6 --- /dev/null +++ b/chart/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,60 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} + +{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/chart/charts/postgresql/templates/_helpers.tpl b/chart/charts/postgresql/templates/_helpers.tpl new file mode 100755 index 0000000..e13caad --- /dev/null +++ b/chart/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,452 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/chart/charts/postgresql/templates/configmap.yaml b/chart/charts/postgresql/templates/configmap.yaml new file mode 100755 index 0000000..18ca98e --- /dev/null +++ b/chart/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,29 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/chart/charts/postgresql/templates/extended-config-configmap.yaml b/chart/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100755 index 0000000..04fc917 --- /dev/null +++ b/chart/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/chart/charts/postgresql/templates/initialization-configmap.yaml b/chart/charts/postgresql/templates/initialization-configmap.yaml new file mode 100755 index 0000000..3c489bd --- /dev/null +++ b/chart/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,27 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/chart/charts/postgresql/templates/metrics-configmap.yaml b/chart/charts/postgresql/templates/metrics-configmap.yaml new file mode 100755 index 0000000..c812292 --- /dev/null +++ b/chart/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/chart/charts/postgresql/templates/metrics-svc.yaml b/chart/charts/postgresql/templates/metrics-svc.yaml new file mode 100755 index 0000000..69f1a8d --- /dev/null +++ b/chart/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/chart/charts/postgresql/templates/networkpolicy.yaml b/chart/charts/postgresql/templates/networkpolicy.yaml new file mode 100755 index 0000000..340cb58 --- /dev/null +++ b/chart/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,41 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/chart/charts/postgresql/templates/podsecuritypolicy.yaml b/chart/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100755 index 0000000..6b15374 --- /dev/null +++ b/chart/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,40 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/chart/charts/postgresql/templates/prometheusrule.yaml b/chart/charts/postgresql/templates/prometheusrule.yaml new file mode 100755 index 0000000..917b3ea --- /dev/null +++ b/chart/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/chart/charts/postgresql/templates/role.yaml b/chart/charts/postgresql/templates/role.yaml new file mode 100755 index 0000000..c99842a --- /dev/null +++ b/chart/charts/postgresql/templates/role.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/chart/charts/postgresql/templates/rolebinding.yaml b/chart/charts/postgresql/templates/rolebinding.yaml new file mode 100755 index 0000000..b61bee2 --- /dev/null +++ b/chart/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/chart/charts/postgresql/templates/secrets.yaml b/chart/charts/postgresql/templates/secrets.yaml new file mode 100755 index 0000000..12a2b7c --- /dev/null +++ b/chart/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,26 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/chart/charts/postgresql/templates/serviceaccount.yaml b/chart/charts/postgresql/templates/serviceaccount.yaml new file mode 100755 index 0000000..7583136 --- /dev/null +++ b/chart/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/chart/charts/postgresql/templates/servicemonitor.yaml b/chart/charts/postgresql/templates/servicemonitor.yaml new file mode 100755 index 0000000..ec7df64 --- /dev/null +++ b/chart/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,37 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/chart/charts/postgresql/templates/statefulset-slaves.yaml b/chart/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100755 index 0000000..179841f --- /dev/null +++ b/chart/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,302 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/chart/charts/postgresql/templates/statefulset.yaml b/chart/charts/postgresql/templates/statefulset.yaml new file mode 100755 index 0000000..9eb1cad --- /dev/null +++ b/chart/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,457 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- tpl .Values.master.extraInitContainers . | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.port" .)) $database | quote }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/chart/charts/postgresql/templates/svc-headless.yaml b/chart/charts/postgresql/templates/svc-headless.yaml new file mode 100755 index 0000000..6f31bc8 --- /dev/null +++ b/chart/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} diff --git a/chart/charts/postgresql/templates/svc-read.yaml b/chart/charts/postgresql/templates/svc-read.yaml new file mode 100755 index 0000000..754445a --- /dev/null +++ b/chart/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,46 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave +{{- end }} diff --git a/chart/charts/postgresql/templates/svc.yaml b/chart/charts/postgresql/templates/svc.yaml new file mode 100755 index 0000000..d24b2a6 --- /dev/null +++ b/chart/charts/postgresql/templates/svc.yaml @@ -0,0 +1,44 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master diff --git a/chart/charts/postgresql/values-production.yaml b/chart/charts/postgresql/values-production.yaml new file mode 100755 index 0000000..01e6039 --- /dev/null +++ b/chart/charts/postgresql/values-production.yaml @@ -0,0 +1,556 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.7.0-debian-10-r90 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotiations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r99 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/chart/charts/postgresql/values.schema.json b/chart/charts/postgresql/values.schema.json new file mode 100755 index 0000000..ac2de6e --- /dev/null +++ b/chart/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "replication.enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/chart/charts/postgresql/values.yaml b/chart/charts/postgresql/values.yaml new file mode 100755 index 0000000..8c766f9 --- /dev/null +++ b/chart/charts/postgresql/values.yaml @@ -0,0 +1,562 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.7.0-debian-10-r90 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotiations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r99 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/chart/charts/prometheus-10.0.0.tgz b/chart/charts/prometheus-10.0.0.tgz deleted file mode 100644 index a5599382e1279ec3ddcd0b547f9c504c553fea73..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26549 zcmZ5{Q*>rsux)Ic9ouFn={V`wwr$(CZQHhO+qU)H{?EDNKHNRlsK>R(#+p@g)+~ZZ zC^Vq|E&vq}jlQ@7qoKGoyOcW@n*oaoqmd$;xrQPayR3=|yR@p6g}#lEyOO-^Z*gNQ zYoM!cbJtDI6vi!Gv0f0v)y442!s(bq>gl3Wd$RJ2tnmXwie*!ol3%#QM9}CFdcb-3 z&55tAy^y%jaeD>B5s4`Lq!`dzT2`+*T3clKMlo-2#Z>?&2&!AUJ>HK=liYqiMmH}n zS^xnY#4sQJ3^(D}4?@qK68h<952Qn-1)A-v%(%(!;t z>5#k$7y-vI&I0c-tOJ+$$S^4h4lDwK*wNAc`9ZO6(j1o)q*1&BEP3_|C*TYryy!l( z!L$-Xsh?Y=E~IN0ps;)a{?8Ol0A`daTw!`?)BBPA$WwG<7g)Hwlpn-ch|xhYkq=*9 zueL(KrrQ}7L?HqYut;1X@*b;5wv6C_BU%yK<1kgYT^*8$9NZ`}jF=t>NeDMSZ>XYE zhUh#O4CR5<(mJHJ^u^23s7->XeCqhDXZFkwpIZnyXG+&T4CEOplsJx~dZ<5iJ|W>_ z&=BQG6~)8=>j9iyLT~SAA8B*}_!~(ua*iI_#76hbkI!iE1lSCFH(D<&J~{*G06q{{ zJ~yV*MKZA>{4hAWS53H{kbGntdlK4jGoV!}7js{NuCy{P_q``ftwTx>rhUCApcw2> z`5-~{!5nF}LzEar%{%pRIOD;Rj0~Mn_~n)4%RZ$Pex+MVNhs8UK|v~1h<5!5ea0fZV;&NN^?A&fGh5d$4PB*GE7 zHPBnEj3VT|dTJ=_1mCSookDv=@1w90#)}fx4oEvZVe_ycppgQniVS10JM5!|1@xjc zC~P{FWiUQCfe0YfDa^2P%uUH=1VYqij0Qg&ex*#=gLy^xE#Jhld9JuaO77R2t70~v zS^ksibG}HO4ACuU4Ced_`lC?!UV?ZaQI>(Xv(f%ovFlT?Vb!z*HL1v}*0aotPnLX( z?NR>=Yb+4T;pE7mgjiTpl-yC(6+9KTN^$(OzaS(L77-(0c8WsY;Hzn~e#AairIyT$ zvguoKj##B~NX*T%LM(R)803s`h4f5{Y`Rn#5IX>J4j~x}W!P=#7^KBqKf!&phjpkY zWW*1g=qsMTqZgMuIzKWg?%ErB+P1K0{ddxMba)^SNkgc--r%SLHAuyr!h0SelnJbw zc{DQc930&&XhijO!iKs0uC_KFz0~`IHm#UgX11)1%35q>quNZHY+9{~LqVhf?^v@i>lHI7QL)t-8nIW`kOF8T#Bg#j;i3!&jQP{x2R!J^68o+*M9_s;%2P5WCYT7y2N<+- z&Z8H=u2*!vcgjfyoLCTbW;mnm7KNBRmZ_wdO3jpc_`z;Ul`3d*Q z1jm=hiD%5<2V8`~YABytfTW0a=a8-__%6P;Ha%aS+uF)7yIqs9@Yj*SbH`07i!F2c zh+?j0DBRh7sA{H6y;WyPy7OemWj80Elo*!kSC-?S)e_h~%6U%sH zNn#SAoEuXfeqEnV_o|dC1EC0~`=LMTN+iw5gO(~6L~i*QbO3{uVguz7`jsLNIl@Nq zeMKCY<=hRkNOfnei&q$fUiW6v`7zpurO+rxyNxUP3f0McZ&TIYTkmwCKNBs?~ z;S#a1y&lka+WH>tFLflWju=lH0)(%V`=p}F2qR6QLI6c+g&g*ETC&G+Ey;gVfV1GG z2{q?BzT&m6+h(KB534Nst7F)j2~I-5TiL4=WcKDFg+87qi>X9?BfbfeF%2 z_viB+^d9>Qy-PyQYexTWzb_};8!|f{E3WwDhypJZM55l&Td@G6f}RW}y3aRd=i9s4 zpSEA!gcKb4s-0xNe|dxd^juA%B#8cqsBQSnuHYrwUq8tJwRj~V_=c%@-(fFJf~~aZ zd#0wo2~UH0iYlEia$wF84ZSK|-Rfh=m2nf&Rvu0Lq0+BFEd@)3x8Fa@-(Z!^3L1Dc ziUg~IS-u@F4NCC13_gOmg-sMTAwVvov(L_4Ah{wqUD1gQ4|JwGLYfjA^Z5^6|9kka zImh+IU^9G4s>v-}pyG;Thuoy)sc8LpB?c-y3Y03OiVi7`<>lKdXqsJ}H`!Ggf+As! zJwIF_8v=6ZB1b_ExRn2LztN%`CheX)#L^eAljU#4e8@s02u~Xp#)vUC7+I=xvt?LZ zf6v`rO@xRN9*iX9Hq3-Q(1H_9gn2j{1cxX$nlRRAuAD-=A~ID_cLG+4LdT(Dm{Q7c zJD}W(zcfLDLc%2uREmA3j1WS0xkO+il*Fr$Cejq);fRC1!LY5a&Ux6^$q;Jrf#Cs!XLKNU933$idPD{T#GLUV*KRKw&ijri+2pO={0>JwJSfw;a zA0UwmXH6J?jwPXmeH{uTbL;I2C37e@nv444#n;#Kae4YYTrEHD@J^{z;%ushP0N7$ zy!034b-nBFb#60Hs0qvyP#I)NRAxfW+Doj?xlJMAG98y$>YaqPUqu&3Dp?2Z!dTRXC*x_XF(*4Fu$$60As8nVhn#EN_7#&)rFvZL*lSCrP`F1`hI8-~U76&@ZI zhQ3d(lJb!RX_msie_%>34AJx2>pC}tSD4VRETHi}%YO`)laN_El-oGvuh1`kd*7k3 zP;GW`q9+O>qY8#NuAf(oIhBHmFk8SeEIdyxpUHoIt|wEgRTlm$^XJIYIZd*RB#T!`Rq&O#hf7Ufit&eN zj5$WDa8vZiGgCC*pQhh6CGv>eTwfX#S!#O^-|<3WMGt&jiWiH;ovIppcgJ_K$5l6U z^3)n$YI?z1kVP|j0Nh>v&Y!+ZHxIiVwbg&B_CI=%6I6)P0wG1|37tdSL&|mBrhy(k zkKo1>6-yolT8kKG$IbRvRK0J4)q!`LZCzs3A=}+HHR~aIzzi?m;9T*#_13}c-50SC zqdKGy>piT1OQ%QPNrGXSw1D%&+@@(BuUW3X*K9uB>NLg!N?Kftw;{DVv2WQTArEdE zikFQeZDm^Bj};0n?h92)?bfMqT_!?omDb~synAnRjDEue);T2a~58OUwGyWQ+B+6846;dz=8E8NRGnEWCokY*llCk zTeBMOJio&4)lez(a!(p6f3lRx`n;LQ6ci^K&r@zA2WpSBHGyT7lWTK6qy$V8DxXX& zXh_OdISv!iXM!bdbMoq~SVm~Cb{*OD9D2L&{L*fZOB^3DBUZQPa`4aP zqw4bzW~>g^&&s=H9gmTEXcdU@cfl#m`129b`DlJm#^zW&O`r<5)3Hov0O!$)-i7z@ zc%j*nh1a_GEF%A{zv$y@a-Ik-$E0y+1VSY)7t!hk**k?|ich*W2==HT95T~@sNDq{&pOUF4_6C~Z60%AfEeHf0 zyxPLHfH&e6fOYHa09(Wrz?7JLWMp7GcQ*Xz{SmuV9hT&LvJcE3zKiYoWfE-0bXK>! z{q6Yb{jrrn2T;&^EV#%ohlaeIk|qqhr&BI{EU&&;V*arG{@A!mQJa&wVr0m#$V^~- zZD|IG#t1+*>(;bn?f1-pJp=i67#KlOWBF!v zZVvE08H)hq_C$s=yw+eD4e756sozQ@FhUSlh{EYKx{6csm@qc5#LVEY0QZN7zSuv& zO++sTOO(|DygyB(FGvSokpQn8u09P6;EP~WknOZ$okim9%CgOXvBBs&!yfK)(l-r{ZJ-;G8w*y>>_P7r< zwh(bE&N1XX^OLd~aPVjF#5Iv>Vam(4%LYk5Or{eq00z_nmLnX=7sGIUQok|6M77?F z8)`rcNyIGPFn2<0$VuQL&XC3hg7ZY}f^!(URd5On9x8gSb!0I}d9|hr@m7saVLyT? zmA2Tm(UX0NxEG2=#KWfR4QrbbUypK&9Zo$8C}R_oLB1sso`s5Axfj-0Aw&1@<>v6fxF9dXu`u?`MyYvC#&2` zPYeGC@1S=$`A`Vd!Xcr@H2%2_B@|Fja#ivLW}Y&I>?=!0GrAxct>_$@P4@&N{UmrV z4nPsMs05FDZE<$C813f-h!bmOZ?fx7>!CrDuo~>;9$BpToXN{x&+{N9OHLToZ?`z; z-3OhmzWOu+fde%wLUWk-VUu}+2d75v^Y*9<|J|KE?!6l(hkx- zYQMc6F870Q-t}{$Kly5QgN8^0;Eh3y+MZx9A*1^`rHW%>2b4(Fwv(M<+uqJsKRt_s z+_j86WGdRzFDj`R0(!${SWS+Qjhea~-Qm3yWU#5<-so;;Bv}*>N;YUHz!%|*K`N)~ zzb_@eb4EE!9#+;zjGsSE2$C3l7O#XXB2x!ms#v#SIwQYlz38qon`+7FeTK7EwT?En zx3^DPI;Nc|@A2=$sP~#8iyId8f1sVyb|vS~JUMom=w>jXU#Lb3FG*}SqZ11P^)bQU z;meWnWWJthkbLmIII9%U&{Ezq*e#~n*^3pg>C{)1awU4q2Qx76wW)f=VcV>wEUfb1 zX4aOQ&Nitr*y#G$Pch7ajZ8%vwP`Xi6dz8TbIqAJTg|u@sX|O=znSHxjFa}hy(z38 zXE_CnpO(6ZwLW*vb$Kt#K-Itk5iuG2F{z?O_i|R49pKK{1DKQc6&st0>O9*|6_m_#^MT_MK8bzqF&<+`@a@p~`(3n9my)<VJuPeUJl0DQI zXO50I;!sce-c@v#0lv8gJT!|!bb1>mw=UBqR&z`h*M*S=YTplMm0QQe5J-#((N;c!y1!t0QQS;+&`Yuanu_ z;h692DF0{WudgrcDMC=gAOoa?OEqdj2hC_h#!-7};dD5*F9Q!&D!-J3=#EPEE<&ocuCMrWo6y9*Qp21iQL|E<8341_6_uHv8dEBg9{w< z#D$~~NMEiwNgBC^p4gn_Te9h_3bKUCm|hrO#rVt1MMAvc@>AGtv7TiVLXeaWRR_Gk zjwJIqdm-i8e-AwoVNFFzD1}j=3WM{9Qsw5S9SS^)v}O^d_cw7DD7TudB&z^3ciwhD zh5Lu2cm(Ua;L`1BPY^q%X@Nu>?LNe4URZm+$K`H<5|5JHy8KQN5Bbq|arY;U5XuG> z7BimvyTrYve!*4D7DpsArKN3xL=Q}coKQ&Dd**}X%^zL}{^loFlCV_IB=s4`grevE zBcBSQW5i6VKyqrlH+6#Ymmdr|A5TlN!?m0trUv>uZ+p^?H#&|DBjuieWuFXe$`Vb) zAnnIqLB8#D-TBqQsgtqaqv_VU!8+NuS1;d}hMMQt%85Ir>PhN#QCJ%3P@u@N>&!|v zY(*(sdJuBY)xK$o0+-Fw0C8N4&<_>10Qbu?bgxD5#xTm?V9nCx{iNs0g ztM+|v{H}JDG?b>fp49|s6l2JX*1~+txgq*Iz+0H9Yzm=7~zg-GfT9%)naw|$}&3N%Ue6H+88rF zUokNqhxUV;=^AlAA@jcEImlt(dvK>1hRVhidbx-*QE)}sTiL{A=SQz-zX-U-0CrFA4|#ZXAIPGds9;Yf4$D`@oFt=NtyCC}rxaF->Wz}8?im(WvWa0MH~^8CfXn?1F_?9SfO<2|Jop$3?rpO0C_{H!Na z+2xSxl9|?CKuk*Bwmtrsa6a7-+dL>2^}Vifon~UL&E<{nP+16sMy?~BYk=2{>3iK< zz)S=~K$lBzzXp%zj=`hq(Sv_%cjA&F8)y&KOUFhsXzIG*FqQKQQ;{XQY7UVHI03>Lth}$iZ<%U`S7R!xv=I zO~9YOtuE1FKg;*1F+ALN_8kdI?>KOigaWML5bM*v-ONA~{`x9?qa4c*{iQz#aIdk> z7@XI{*wYO^A!`%NV2741;;Xp&$@O?ZRH@)h%hl=I)zlHUr5YW(-qyfuvbwSFZK^g&R#+s>{ zfz#qrW+CP_&=4v4l^(pUV#>wuKMumpN0`t?3BS$}FB{a3u|>z##EH_O^udyHiLtz} z5@Y*}PQP%9NVs%FITh}8Q{tj=+#04MtP3hZN@5?;vjkvHXAII38PQ?Sy7ve%LkakQ zUZQJyFlc|1y!I|TfOCamUR-yCX4dOW!l$iEuMnBSJ>4*7t`pOgn_^mv$34V=OwOkS z1w}g*8=*AME2d68cP@lqvdc1lWK}LYhbczBp^oM&nV6H1(}<^Auh7c@>^V8RthkHA zYHA%ACB)m99>|0v8NBhzhxcnDpP$DV$QEqH)ofka)sj~teteb>y3k5ENW^gEvS)hO zb?O~z+c6rL<3bX>%=sVb1O4o&hsB7S1y5SQLWunKQ2kJ2%4X_`^ZD33y?2MqAIzhJO!cH{Sw)_cT!6mus^W|`?;0GuZoOhj_ z621AEwrqD+de!G}!OASmMCwNo=TJPh=s!3vdu)4Grzg%v_wUqbM^Wp4Ej??tBoPb0 zi|B_PjnV`NA(bqI=2*@`kk@)&Wd(hbcfiK<^M6pdU@)6wW?4Q*Io31P{wkH!rBcMU z5OROtO&1(d_t#;aKA?^9#QI8+_7Y&F=PkImaGrC72Rgga2u8 zC)pnSUT<&n`?`H58n$i0N?Cz*}EUKn|07Tn`IAa62y?IYioTJbu_C@riV9VWSpAL^NZ?!pEDd zV{aEALJ^Qt`n(69W{?iJBQQSS8?#z(|CTo@>H^NGpQto}p2x_ZC#u=;&c;-|t}!jo zH#nyL(^E&R&?(`9gr?MKIW(%JyCGpeL%mxz=;N;|3rMp4k7z?5Em?lc)z+Proqn`Qn^ z1O5O{VwQsADf?9_GY;cTlO+fmNT@SJo)PVw6Xd>t{WDqLrDOB!*7+fv*VpIeGO{$M-D>+oL7L~$=XOM@6(4HY%8==i^WCeK z^_**>u%)TqK-q{$=|$l3+x_EYefTm_bryDzteNiGga*WuQ6fhvT9Z!=ky*2q;y6Zf z;OG5O=U4`1lm$CqIZ4TLlyD11c7F3FXwFhx=q~%5Jq{sWM6%-N>djqOIVg@8e`oip zkO&0m$KB!OWB5{6)Ey5L9cE0;;QO-|(79ExU`0p_N5xJhL@*W?Dah?icw)MS&78iE zLVP%!QP)Iy<=-(H(l)N6DiiaYLnaT*QV?b!95p0AI_#itJ0S0%sg=oo*uSG|?jTB| z`|QBlTm<-}Xzpsn81wdMM;^MwMBmHNL29tei)BG-I?G0^r2Nm`3uz_>sWbF@$@Z_G z1BIBt6U-#%7>x;Tgr41=p~IpN3%b`eB#!k<7e&%C4--0th^SAI(=k*!PT1{2;)*dT zcY7hPaaql)n&Zj2JQ# zf2;-&4%`IKv~t26e4MK?RDbManHw18>UwcEv1`kJr zKX(tsM#VZmcddCjIKFTD(Q{&pLYKkB#F1ht9GPhQ;iCIqz1#Uec3pFPrHlq-vu8%q zcpzBB-D?Xcot?Y%5uMLYz>B<%Zp*8v2I)-=*s=;aMuZ51F0J3jp`OK3ZXf2((T#RXp?9Z?3?iu!y?(#e{7(&(#Y)LM0jCA(rf-1~y~76lbzaC&?!MHT5yFw39Q7=NysQ2upp zVa|H4RzWl`-(e3~mZxjNQYigW32E#YzQ}jFurkE5j&B^M+!$_U4oiktiPe{g=s6}@ z;y9oal}+86=zHI&_{FmMqvtr7%wm!@&T{b92IO1Bs8EGN9-j2aBiomF_{G?rKCM^J zoHp`ea13u97%6!Gu8>km=8+jWaiKzz;O$c*V19zloH-|NFHa|GJhoMIq3+4x#E{%8 zojDm?Bb@mF%F@S+qLa;IK?kVmZVI6vCW6M>sEyk;RkV(BjccwaQgd)=vgqP=;DL$` z_fz95vMN_aP~zI+A*D)7n>=-uMwv);W&S_$3$ z31@Z9H%slTU(W;8HFL`wmd#SLNV4S(A$1@lz-fAgnYluiVG7=cI*m zX>SL#iaIh%2n$JrZuv+NWhAEMmk>=yT#Gj83pZCbOx*1d(r(;L(22d=%7%1@Zuf)U z{ky~AHNySG^tsQ0+s5ZzV3*bIo*M-ztNHL|RQu>#?QNAmm)yYpy*fRBJ0~w5g3C+prW-4IfW6wSr7hnA6P#5+ zQm(V8fKMP*E^-8b?7HM}F`bv*iEob+57>IdN0fMpsb!}yCOb?ADc7cvN+(I(%nO7m z=HCInlf&*AWp>zxiK)lWw(Tv$uX5~5lt)AT`kOMZ+chR@@*sl2Gv&qbi=|%X$Se4a zlO~DzXeq_z=H!sNyR#lI3djwhDVj|u*j>2jn`27#!9QXjc6i=kl^6fR@1c|j2pvTv zxIP20=P*bCd=+Re5E_6}VtmE?VYoKHVeTEp&XS>4IOfU{E`aH*Ol_o%4?PZ7myk8@ z;H1GsLpGP-JRtp;JGD)PU9R?Vy)@>0vE7{!648qRq96SK?1E}}fIzXqP@a$8=u*1x zUIxi6vBw~WFYsk%cW_ikF*@6x>h zd#H7u4Oth(?f}iyT^ba>k8gfnuOwfVQ$YW`%+Kb;q6c6!NEOWZAg)jXC!54ZGWFGd zxuv46DsEHK`pZ;3oTu$kJpjZXR0tV{GarUv(sQV2-z?am|B`g{F@7MC7&|>%+4K7x zMYi}xLlGr+8~+f^fmOSV`DAshfK?e?OQUelG9St4ur#zkqXvc828bFZNA2FDNgQsg ziTNKa=oz{F05Lna1ed!Jn})2YiK?Soa4kJ2fwQ2MAQy~Ss<2&m{qpagu(Jrh@XS`I zoe+z0j-_a35o5|vW5^MWV``NxmSy{X@V|eO&g>wvnAT|3|2*kz-bAXMo_O{@T^Mdd z(S6sxVjp0nP-4FLXuy*(&V{kgDv>X8L}G1DF%p6Glk;M8cu1n9aCtIfYrd*Ws{-NUoPp{V!P~l5%mvACX z4GNOEjO$QO1S`o8g9Hd7@x~p|1 z=iiH16z&E-NF&T;QdT*L^85X!2j@6&zHpR(ZM;-forJ<^!fW~z_DEt_Nsdop9{uqatsiMLtX@%aWUQL! zkoO(@{y$kX8*@Ww|apXjDQ?y?_UF`(3CPD@_!Wbf2{|*M9h6IVyai) z7J3StH7gb7)4D++*Ra~72ye36;7G#?m!1t$ZRNVTVXORDzvkU8y(bxqAjN5y3zu~r zV8XwnlL3p$Fnu-`h18W%QV#-EBXuG4o@%Q3w*_)!*LB=v6US#5lHtuiw&9+p|hmGCYveq+!w=yRGNo@m9K$j)k`fx&9YME+B-a17WX@FSx63)5BHb9 z17>r7>laW%d$UDN>FI;ROBe5@ZwjP^jsx3!*6bLEo}q^{1@&=n_2xXbo(ueJ4wo^C z!lS5Ei{_E1xy7i~FD*hH48$`a)#`B2bvrozvwU&pNQQa9*QnVK!p`q-)u#(kpx6 zs_|_mNq49S3d3l$wMBg#%rIgT5=7c2SYVX=+-P$m68Va2%t7nzeJ{A&d_#W`R%#B7 z(Y!xtHsCGrr5c3tbYi2BvP8u-SW)xaL7uV~;G`spaB{i3X{I?VTB8-QX9IEBnIu{e zNE^=R8;%X>={;Fd3M&jBeh8*GqWm89Ga0BM`h;5FAj+-N-A!-Y+eo8#DdaPTlLSFw zddyAoI33M7X^zI{zY85h))2h-g9Sk#hlk;;x}u}3Xkfe3ph2{2E@O_m(=oN5;R41H z87Ow`>Q|=Ztl#`76(`FAaNUkojR(oN;tv%Q8~y|w9%g^fs^l2I_g?DTu>7#EelP!= zuSEAII&QW<>1OZQCQM_W5NU(7*x%NUr!#%#s{un! z{!}p#lkaUPP5+_TcZ+V^)!kV0MTmIcCE&-qIRMxmn=?+jhhR&i+K0SR!I!oNV^Sh{ zanJ^lAC(?|xTa;z?vJ|Kr(l9FFTK}0htTmVr(o-%Kb5h`hU0#v!vU^wc1cal%}Gt%fATzgbn|_R z9Vil&sCovFGYag$$Qcv(ivQV`p3C3wvcMS=_8w5szT*ud227JkU5W@vLUX#j)~U1< zocp`4ex2sHA1*(e>PG+Yc;?qo-e9B2zab7BCEMO`!M1bAJ>PgG~UhT12SB|bW z39|R592XPo;`P3t^1eTKDrfUC_Zrw?D&hi?23Ku&CEv*ziu6nn=^C>CsQ>WHAVtn7 zOdA|?p_#mCkz+vJIpzH2zhz*9!2iDn zphE;8)RL^JmW{IX5f;HVr*JjKm=g~L*PRJmAB{Ue<(|tiV6gBH6JYfaAF#Vn*uKKN zgWoda((G9{!D9gHKqd;0*_!;zsFaWWC=C6twq>oeh$3wD%J6~Bdv}>rf35i1fJ4*P zX;*_-koj-q4beXx2x`fXy&%K7X{s_{TnGbFl3SD zxgJb)Fw+Z{#`e$sbZw?+1DR0k$n{zhWuBc&K>NP!*Gs_}UkgT0mEIZey*(Y5Ge~fS z$#8!%ZhU|7(+&f$5L`Zbt9dQ&@2U6uCVidnqEb?j*w3tvtA`zNiEO4t4a)h%()EX^ zzZaV=J6al$;*f)+aG<0-E|A@%4VLV`ETSa#qaKA>f``%|n{H*tV+02BgA)CON$y_C z@6Qh6WRUrtZA(&c+?w-riP(8W!cJ0kD{vi$OrQmq&LfTumxB^%FQkGh8FX$B>NuyD zKYQ7}dw+lS78as>6X0Fn__jPQSw~7II(5gb6dwE{)XlN8EZSJKXdSSQJC%Vob&3`d zZB9GN!>Hu`CbByl04)-?QAnv*C?$kR}KQscbXC`6Ey7Jbf(Ew zgmbF)@ooUx{E|dS+8!K5jHc4Qai+6ez=0^HI%#t~6DyW{Rax%ag4W9oU-dCRKC(AD zQ{A5*TwbrYIy1og+5F6pBh75XDWC;f@#07XmH%;a$kBq}>0~xf%c>smyq}h@4BEGR z3)mmrpF;(B|2ugPTOB{Lra~-egthjigYE4z^p%w(Hx6cPK|!@gqg)Z~;kxspxTob% zF88|{!@iqdM}94*@!EUs+Iw7;(f%pY;UdM2#2r2Tc9M%}2 zSYQT_YrUGk`IJ4_4acMCAD#C<_#n`NPvZ#&{jM1ZUYLQixt^6u>?FsRW&A_ox5xA+ zx$Z7Zfd5_W=#R63XY@zAD?&daq<-;mT=lT;u8pwotzgWZalU%VVV=GLVeYQndfONA zXEhEKeyyVS*7Ob9)5?7LnNXps4LRz|hsCXNRiA&8-%~8AG1eJ)w6VR}FL5Jqbl?h+*X@itR`Bb(m=M(UiZ#>G@qZQ}qTgZfNAr}KsV^gpjE zIXr}b>I_gHPlG+{;{b*h&)0|kR&;`1XO?x_7xTY>)p3w*0YEHdY~)LC=wScMk1LJr zzL}=Pr!7~$BPtHpT0*{A;7J!*8Jo9w>saK9G?%u89@*vrN>YROJ<3WSnHh}+7)Sql zp0ZsfHlW{8?0B$##j&kMu$YdKQtQ6TJX@2foYlqJm}Xlg&t_SnHRumbVWL7cL}g;c z!{3Z-C4urf9`g(~wT5?^jdE5^lop_l5Tg9mmO0Bg(=RJBlo<{=)|XHHJ0Z(YPQ>?0 zroi^d63XM_?gTE$-)Y9t{E~AkOn=S6TG(AG(@K{>)Nu=szy4NKt`avWE_?IHZa(gE ztNYh#PGy*FhfmDovDFsU# z{>t-jr*SdpC2falUWE1$k=6X@mBGk^_>XL{QjYj~zM0kiH#DR{NztQ+kjEf0W9b0g zjFTUI0%{TbQ^Do0aSLE;0AjlW4E7H$eBa*70D7}ePc|n3jn)P7xsYRG3@Ov#O{7sg z$%tj-*@$j{P5RE6f9vKwKvwgoR|c(*{2j1gnD7laEc};%CK@AtO0huel=f2&foyIX zyxTKpY~{=6Jpi@w;8hYozE5v;Za@6LZm#%SAF3Jvnz+!-h-kO%m6~*FUNi!lEHCDp zlf_f~?C88b*~Rjk^i43tyEQJ&zOKgK#)0FlAWQ6dUe9oJm`Xm-oR-9m^?MIAEiRV8 zCp7o!RAXH&Q0AD5-QR36|9VpQQ#AFx-DxK}KLm+L&huhS9bqmkyEV$-eh@!^xB2*BJ$0%Uy}#Jdom(RWHcGXGzInT3aAC0hHWSU4bTPK$CvT(sdUBLRwR{l{ z)hr64)U0IMNfy^B_?)=V+o~GaHbZ=Xi+a$PW8}|>yzS$oU@iKCaIm!5T-?#BuCJ-> zrEAE2D^wkpQx_N{GUzPH5v64ZodMCjjveayz)G>(a_A6yj*!|Nrw&P%6vPgT zE!=v~>VU~q8?FE!PqO}$_Q`Sl4{Wfbj52j6_oc(;U1I@v{Ib4c%$TY(1TA!Ax|*sg z2`ZTDiBqek%;_4_hN>C@BkWQtbGKzbKsDmsf*=0Nmy?%z%@>}4pKs$GV12v}2cSUP zLYxOHLAIUDh0|+SM0gmIN+v^$U>4q#Gc3kxbQR$?LqGg0lGi-Q67c%^V3+^ubGb&% zfA^C^ky1rlB*-Y)@%W=N5vRx!Nv4S}(PE?J5;CZJxtQ?7fd4q}gL4izJopCO-QDlU z{Ma783!e6JZ&=4$>za+#MtGbeH!wEE7#pEIx9t;$YUzB?Hf;3&Gs`TQf-;jqZ@hK z6HcA&Ce6|ftv3zio6!Or8J3@>23(5QDU2%u28QGtG)+w@?k$$D3Jr5F1&s;LEiKvR zrrFyw7!3=|n~~1r)Som?2$r(|Rfq2+Sx^80ub0{b0PpInx8;%6^M~fchdbDI{nn9% zeDxY$wReH8d+Bf+ATeR0I5y9wRh9fE%`g3t+y5^kOxkgBZd?Pbd2_yPr+>2lew!S& zFoW0mdpSbyT5z9k{OJW)m!_}(o}2?HGXVd4RfPpVr+52<+F$>QN!^s6+Ki9$mi0bs zGOvb=F3{QJBRkc*3?LB?mQya+N9Pur#l4kdF7%0hT?!())RXKdVu5PAswZ(ml z*x6wtWIwywZqjPRU_bNG#-P6twKYDyB&Te>@U-UGnRwo)ncH*Mrtc^^bjB92?7cM6 zH4kO})Gl5XFH_nd*LqU<^~KpFDf|XA{C{-{?*Gv#;%76Td)7b+q(}R58f{)1danJu zEm?2m6C9SLi_E`w{;Z=a@#Jl9`7QVVW7?nFKXCnFvlzyZ>amw@D>l;22T*}m+*OP* z|G%=q==6J#D~BY(0bm-T7uK8Kj`XeWK~7C=?fT0i>sI?_^UPCMtUK6lm^0$NcA;}* zSuCS;-96X--&CNp^7sIj`p#PNQ*T)4bwjtR-{``9!kkkyNL6iL?WVpW4HvblpxTP; zdi>~$onlR3BZ5q&TaxOsE@bR6bE&&!OB})K#$s~LPV#0O)T24r{T3OxzgzNnMz#HC z820GB5(ro>f9v-Au?M!Y?$6u_zP6)zr=ohZ2hKA;_xchyf*$-w?%<&QJiybAbL-vL z_E=p3aEoXEG*R~~46@~_L{)d}E|;Ze73`uTnX4yVUZU6}M)I9)&8}jv^q`(*Oey|H z=>uI=dG;eMm9}E=p>k*7_b9A#>jwAA>RiJ}y)>i4^dxmj{pLc)xut6^Y)fq*%YZ|_ z%#4vF2~+U@BlDL5#Wde`WFe?h*s8~mVVqf-$UG>=rZs8#-NJZ{I-f%H{fLqf30##k zh+=5i|BuF#O#BaxkBMM*>M=KR&C*ulK30p|p~~J=$KWy(D%QtC)Cclbl}a&q(0^wqI2#x`ujJePdk!IHUAll=}Z5{`&2s6$P+&YHI{+Pbm8K ze}F6Y+N5T!l6qO=Bt)NhSPObAv(_fdKK|%(eCRg*n5wk^c>jT)&1w7l%R{eCsKXNE zD-BY#vEgTPtxu`or+Y1~O4LQ`MD!tj<`q7)Z|fxaXEX2gmv8h??1%Z$KT(!|_(xkInN9|-3!jCG~&9Kbpt1@5om+$qN?(`QxpX%pu&#vte zur<*b0FeE>)8!M2a&~j@zN^(_u``>m>=@H!JFbk^O?+$>XjfjU9f`PZ+pl2i&CqLt ze?QI+I41UK2Ce6Y!Hs-v_7ZAMiEbZ9s`M^nFfQArUsA2ST%Hiu<;${V_nXo0OQo|<1<}cF6LI3%&$f~eLD=bp!6Ip@8lpVQmw@!F}J<*4Mh@$xZ+&pJ8Y&gg8YrO$6x26W-8H|ufSY2NpRO{K_3_}q{oRxHU@QywGS%|6HA3PL z_1|)o#9zOfk#b`XSplC+1u@b+c>>dtZ+059A+6eZV+mk6ZC+`BHMa&ewM>S~y_?T_ zjaLtq6L0M)XU3&N>3Kk5g48a_u9K3ev7}nPew?ZrlnY`GV>q>Tyq+xi4ad__bVgmH7||GU#4cEPp-71)1Sz5yoP0p> zO7LXO3|!-4QK6dNoxQYOjuXgd7fEityJU5b*ECg2r#!-4ppMiRnsI>l7Z-w`Tv#)c z3EIC|!%fi?+1Po-@4Pq+lvVTxgp#EiPe`afU%({sBTy6cPHpB*V-k>gLShti4?gzv zY^v4#lKvv^;RPn**SKO6y*5FNx{aZOvjx&NYH+c_ zrUC|QxGTmA_SfEEnBbZv@jQB6D|$i{uNix}r01=gGkJD<5o7)|i;0|9!Or`d73qUgUY4GPxw%Np8WSCa z+5VEL4bMwfwYd61RQrIqaO!94t%>LKPJiOwgO2DaI=c4*umR~8(ogrMMY%juZ)K-NG4rc(f;UQ(>M4#c+dycXQcKbor*0)#sgl62+#aC# zs*WMVSN7U&-aozo^kpn4;wTnUk8jgz$Dtl);W#hZ<9Lpq_+&;eC@*pfzFNG!366a5 z)K}l4E8&(vKJ!3mawT^mUZb&SlaUDZB0Q#$V#E~5x6aVI#6?!37EX5Ju7bmAse~;I z{2akbvfhTC`saTP87k=M?0y(ie96_y1MIEs+w5(4ncdkhllow-c>5+yeG4{U3}SCZ zt9K*Od!yC+cAkyv3#(Fb42z-dn_#_F<%?h@5JxFRZ)X&-8HvJXKtC616d|%y`l2oY z@w`yOg}7|yiq^B|O9rv-{5i<3H+>GMHD=F&vx&)b0M^f4t>l-jg)cAgEt_Lu9R^jA zT%ih1bzX&P24jM?Gzy=0Q?P8?%@u@f&q?^J9s-s0;+vS~3jNEk*4{6+Sg&(&YWf0X zRv=WN(83`KAoZz&M1=|+X{nM|p?9<4EGf5gc~L4rtcb}f z*-$)HS8RQ`94N&~E%f3X(K(JKlD|+Gpi@M#bhW4wSZXWea4R#TRPpf4Zp~%=u_q5o zou!-3eBvZH@MRYC|-lSQK8Cjl}KuxG%}S9RcDZCUu9#1S2ptzk2HgO zhH0~+#FUh=QC{LXE{WCI2j4Pps_sV*fEypv>3XbcJn{X@Y^@@Gfv;e`*L{{+t4fmf z(#1^Ia~EEIFf-q4u0Ph-w63pZZ6ZB6tyrofErr;X1=ua1T_t$dFAHVh=3=GZW#p)r z|K8T(*!&3IjyX`B{m%wcKgp-T{{Qf(nEz?`c(}9w-_Fz8{-=1UaqG#Ss$G<-@;+sv z_|hq#ik-TRY)^$YmW_i{Q~j3hlv?ZNo#A_#MMP3<=e`k+YW}8u(4%^LsG1Z~1@v~= zqqdMesxanFiK9Hl+vbeAHPhHX#Z$-sH<0>CJ`MbT@c6J4|L@`9&i;2R&(i$gp8@JF zB!Kesht2T7O{9HlX?R=6_*9i|X|S32>UH~n1F3)H)3pB|JuJ%qj}9O0;(u@DskAKq zf~kMDv5{*M{?v@YMsxktpt|uiKb24I{$D@!k9?ZW|K<3?hb;b&x)C}1u0FN=fBn=y z@@eA#hl4>${(rp7|GAatHvE5=G&QJiWNVp(s@hvoQGdJngKpQixFKs$We?jX463wC zZI>&kP4!Ks32KRI%M3wP&{j_lv<$=zUGkt7^=dk@f)J4H z_DmYEG6TIPFGyPwWqOX|st)}{yq1!F;y!nf6mOA^>yA@h)mD#PXPU8WFt(ecPrh3~{ZpBev zE{j`Z?UfqdSnjr^+PJl}Z8dh3%d+Tdk1GW`CSd`vc~gf`v6jV5U#t4B zi!#K|RN%9smyA`S=f+u0NZ8E{yqo=mJmHj@vC-k!tThlZl(~VdEf-bK9z#sJpUgb?(YA$@~m|J zw=)3h8oABDZRGyjgu1&fV&6A?#aH{(?thD|-Cq8`!{MN8|2sI|<^SHwbDR1N8Oe5?_9ErcRFY-_77M8(*x4-3NY+LwMs=a5xYhIcJ2aYDjZGIabL z#oUDGtJh3=s%ZIh7j?LP9k0HYSByiv$#50TcecBr$1Q>$@8S#Z;tTKM3-96!-}Ufd zyYRxh@WQ+B!rKim{Kew?`WN5r#x~Ah?xG9pXCu*tcaeLmMrIcmcNZ6T7Z>*yhv3f~ z7x#9e;w~Q+cm24yyTG_hAg~LJyLnBccPH!uW$E-M zGkV%R*;8l#b(O$v#D5q*8kFL{9SwHzAGh+XYX5bGD8~VePEt--0HYeGA*hSsc-s5$ zgEWg*(c%uZW!SG8)+q^da9_kmv~1dRmB7qGO8GaOp-CIFwStR)Js^A9FiuhYvUOaM z$~$$-0jO;-R?_?eA6YGCR+@y6BYpcQ;ixx4Qt%y1Jvu=i#%*#n=z zA1UFwpGLm08?&xSA{=1kJqml;EY-7`jxgiklM#$Q0Z#F}#vK&g-1NU+ZUcPZ_kpO` zVzTg;+y55*@6Av&$J3ZlwAnVA?EjAsj|=qw=;86s{(mdaJ~)M(BN{Wn2~e{Su4gC) z;}l0SqE-Tf4{(ZDud@%{&M=cP!U*LIuo;RXFpbDqS$g7l`T$TAL5{DG)P`8^VH|e$ zL5!yId;{*Q`!^JVYs_ch-w%2~DzHf`YYLP=f+&b^jC!5kv-68{PAKZ^gQsLZCo%Z- z={X27Wu4v>^S=C7q2K9^|3&-qU-M!%?TdfRPj(geb3kJld`J^8!4YDe?|bYz>3rWC z!w;SBdwiaBzW@I^``}kdF-aMC@$5P4^b$&bM*;8jaEM@Em80bMPUp=(Uq3%P|C(fI zYyY1;KY8}*d2b$Wx{ccXKO77WOZh(zAMND7?L7P7G+P0k&inkwZ}(@MCv4QW>_&v> zcJKf|@cwB;(h$5>8~rJnCn*=C;%Ab^q1@kj^&k!=vLevi<^jMAK%j!`G3lJCjZ49t z#F!H*K&j6?@PdPYL=g%&19QYF4j7nFGS@1sl%h}zW(+_MruYiQ0LL8BD;Ti{0A0Z- z6{i{*O%R}u2~}Q)>;VWdOCq>nDrg{LX~dZ%R3Lz{5DcbMM1kP#oCy(tkvS6WhFFp? z%m5inb{-1iIY!d(uGi`8?}N85|Kr)&e{?#RmzUburt=*T3>M&+aTrB_aUlR$qbthF zx-Vi*NtgzrkxoaF++YSN2V+7w;}j-h{MyVF2s3#QkXTgu@60__RT}mCj>qbOaTf5j{xG3`DTo$hqrl5EVxTQw9Cikdcg#USNda5-Oe^^40<-QahnS1!nlS53!T z(IT18#iCGy$*dhC22~(o_0bF;R$q+NX;!P;96cbwzIWdJc7NabeE=AeD?kdYYKbP0 z!a3rIil30nRXRrYP6$)Ed2}~bW1Y_5!S~<4#mV>IkAO^XB1S;>o9TJJl*1#MQfu&C zjH@OiHJ|z+iV#Qgvpw?Gs`E~Qu9nv*4>d(|a)lT$lWq?%nI|MhvDoj7kpM$(gou23 zsOgGi1#6Xz)%u3-o$_U{r2EVH^-DBGXVXAPUjsHYk*7u&xyk| zjv^2vMT^v-OSFtN0n0EGp9MxSx&{+M=a7Rjnh3Fv&(K1ysWh2V80yh_Cx3|QlI$iF zT}e?$wW3i1^|Ykpi!a?A_J(4R@GJqgbKnd{}gywqkpcBMjcnm6opcT zKp?g$nE;x`u|NmfRn@6V>^dPpe)u8Zh?2hq6egk$`Y2$E!NW0tG2t^rvy#^{93hMD zF+n0!Hz$Mw1cRBiw0lLuq*1tZ4|tbgo-#4SfFUkM;ZiV(%U-8*B4^O7xZ~d7fjyn6 z*qiQ3?J4ow)F2a(uMxrWB-21<+(XErnP4#nn91;(V*5aj#pugDV}TfoCSGt+Qmp3r z9L1qpPI)&4>ozUuP4z&$kqb_ZunNza6BxBH2?y7Ne&~U7q~`s-)v{RdZ8w8dQF3cP z?CWiKAZJOSN)_*vzgT+xy`8M>tO+YkP`mVd-3#2Qzpjws)yl zYVRR?^yIWXkbEcvm-&Y-iNebsco73aLqzS}N$mQ9p#E#{HDbNM$RoJif+D<%7R0mdXm&-Kps z7D!Syn?jDR;lle}^-_H@2FUVPcDK`uNC2bU%zOkM9}Z@wW}z2M;SQ{U%zI=~mB}BH z@eO-gZ|CkSv&Jle$08Pg(}XH1kcbyb?zmFQrUjfwitxRMwxmAf=ItNaJjN(R3ds zxPLX2cFQ>+-InnT3z zVrHaIU005D0OL+b0Mz27Q8izRW~O=-`nnn-X(}a8WgW2`xtKeOCWs=j?24q;8EGwP zKg}!_;1r6PE!GN2kjlXJlFo~szNbiD7hEWSOL8Cwi^8vc7xIDZ)cd^a7c9JmMt$%P zdYz8Vd&D$YgOaaDu^b-AS(E~cpar%$f+qZOOvS>1EF=43FWR>?j`0Hk4n@GEui6`d zJs}9~c?#%Uhx)YdDE0^!o2vOb!;%g##H{=%0mQu{6Z^$ksrS@WB zWXukVgNTT(;TVJ@6?9PEu`osBp4tmW9l0`r0>~4vb==HWmw0WRJvCoCEyzAVZ(K|> zlBl^*r*mp*gHPa@q)6&iAb!cBovwMnC(!MBeswM#2_X}&KYv+3ea#X%jvHr%n+>(Y3DzoGc0~FThV~8vD)gIh-~E=5Q(?UJZJOgWkXo zCwCBR1Sg8>yj}wAi^*%kPbp$3=0(_<6O91+R?=JX0p%(ZtPX zFErb3unwv!r@R6ry!XEq;dxDC1x#z>IWTh$NOTP$15pGfX%s1i7iQq{=Ekwhl~$JK zam0>4_&ru!N#3ah|Z1UlmwNeZm+CC{q+8Dit|m_DU0_F!`C1 zxf(aKHe{d31VC+NX=DL;a<>ExQH~`rKzjA)IaOrtq%hkPJbmy*x`(4|ir<#bSQBwF zl>JpATSTBfoI9YvE5Qlmt(%gz&_W&^a9n_=>q{gvocn5cdqvVG1o;wKaWjBBgxDDo zof$im&=r-ILm`h}Re&uqqC~O`OxIx2vnf3}sjGqIdo>GDQUg-vk*)+LzRG3i-`wR? z<8lCXW%*X+l2%#*l%QBnwmP;uEsB?veuuHm{Sr(h~~m+zL~lVPg!q@`79j^0R&#NE{w)q-J@=h)$U7GXJ3El=X2HH z*XTOPh)v;;#~xmQ%MF- zkQA;YW%U(-`|>V?B4%r_|NdmydsKm_aT*aoB_6pGi3f|CJe^*_$d{P2avwG)z!Yt~ zf*?hI0un69XPg9y^7)h?rQ6A|g6Ut&-B1Tna;nh@0Qlora zvid*)04lvuh4ffwKV9mIR8N!}P*uQO+gxA1KSMD5C&e7Si33zx7YBMgi?Ovua4P;AZ?#j%s+MBm+Q?i0e`X+5oK7q@=DB9Q(!~ZI;yoyGonffPi zdH4%%hHF`Nw}pSM?~;@`_$P3g#tfC^?Z!MmZQIjTfHur)*_?Jq6^)#y;}BCxQKbs2 zO{1LX$?~rRC*qmxh^v4QQhw->dXz?@ib7Ur)Rf&wP=F^3MQp9+v?QsH?h1f)Cc622 z2|g8?vcu0^N`pg&DGEWH&c}$BhREPtCX8h(QiqJCOi(Nenf6_kS1 zm4KDQ*!ekf<}Or*K*C(}YaEEcUqOWDxaw?O@FEs4pDIcAeYQIT_01=O6UKRBLm8h0-Ry3~Y`uQ%VXtMHb2wt@HJ z)K6(yIRQgynhcCg|MKEgP+uW@R~>4YH?4x)2w7c8yQOd~{t_j@SQiEN@x)yFqwt^} z^{U(ma&KeC1!;P53ZA}rcE(IbVIy<3Xt=s_bvvzjgE4>Td&!B^E0Bf5uUaM#jLPfs zHHpzBAQW6ajWCM28f^8tqG7&BjXw#{X5G0U<)vq@q$a3QP`T%| zPXOuLcjtZQ(&xi0z-I5eDpsLKUt274S%&6Q+txt#scwJA^EnO|EW3ee(V*7jHnx=O za*w1cw<(BZE1>ln-O4bI1qEhJGiB9iLzKoQ&`PcaM#5As95s*&p3p5Y*IK%Q2DUP^ zqj#KJvNExqpU%j2{0wrK%XCUWkmA?_5R&*F7r+o#(lfahJZ6Ry0J)I%C{GjVu9Bi6 zM6*G}+ioCUcO{7O{wS0v;pW>M)lhwVW1O!i5mC z0zwgC{bd@~i=st~hByv;KT6_Q0nl99<0=l2Ny75PQaqID#vX2Z5TlzO1sN7Fhxp4w*pM^|58ya!l) zo~Z<1IL6lWqW&;j*}#Smw5{5?twU{PKOk2Ys)tuE2Dx)l zaYn3apezlQ!MVZMY5`~Q)e6VAGSK>iVF_LTsn7u{+J24rHK8AzrRc(ve4EXn@~cso z^s%aCnWI#!RN1{3&z1y#`r=tL=q8!7)QnHubRaGzYI=H61+=WJUmey)rKCpS>q$sO z@V6-bHZ)}ol4%A{)c62X6zH3v^4c*rsUCYDr`-*T+-`q-f@NXNRLhhOC`X=jK`bLm zIxszAWHDB%7*k8S!?B#s3TUkaJQw$6WIJzbez~qdRO>`KFVuoK=f}BTlJmlqR8>ni zexQ?MElt)z%~fjyDN2@sxRPWY zta>@p8Tbk@(ht2XL^@d37^K?pTG*rdz!oU052DW+)dwoG9Q6Sy8;*KAxsu(e-QG80 zHtHK1Z8ho}+eV{NXJ0o+%60=4aDKWVK+rOM{S2na;+MtMLgKbub`o^J1oi;7r zH^E*{y0@X1OknNqHe0|th_*3+weIEZmRg&_dPk_i6t<$xm5pNS+jiIgl5ICo?`FH$ z<=U&WkgaXkF_2xZ%^FkL+9r*q>~gKSHna5&6ijB9Yp7^Eo13%NHtIE=U9RPV8Ewmk zYs_fNF-_Xq(mE~r&b|&Ytv>m>6xurY78zcPYjB6!wL7BPtrYFzeBk_NP)C5QwuTv-*{z*#lepy9$0Z!^ns~9DcS(JkVU483}PM= z7npByL4iF{Vs8ZY&WMb2rDCb<0kP8P7zRDnSoLZ1?5C5b?%`4&4~rpyY(t;&2G0WI zxQ7uJR$O-eH2$xpv!?ox*Y%w+>)=$MGqXxX;QEBw4Aq=74|~Jo-eD1pd&X>rR)5lD z#ax~OD3JFCV#}4|+?d!X(%Di62gsZJ`5Z`wnJ2dEyRlC4CZ`b@!>Fg!Xcs0Np=kae z_?WGFHLallF- zb&y0%;s#tzG({astbkzEX})eA%V&C4%saXSw2919DyY?-OyVgwU^l`O6fA-W?fq7O z|02db&lskT(wN&yS3wzJ8V54~#Sk+bBHQ`g5Jpx=DtWn1|$s zm}v{EVU=};KCG5j7y7sKSFbcIj16y^sYb0KD`#BvDGbXkRHzMEDRJStS~upVB)qKh zE;np!!z#5Ju{89lH$$ni0};zBD;0FYz49GnRP^vbWk6rUX|9+D%zmkXh;u$jdIu zJA{NU8|yii<^hz`pW&@fqLcaw7%!tN09f=CbTEkPpNy-iGN$ns2PC2ixLT z+j8;uG*_d2Z5(_9M}|5q48Pq*li}X#ke3ThP?&&1Py!G5w&4gof^n7WeqAfxXapOA zUC_$5fa+LvXY1Y4|CeFZ8ow2=tWKC^EqwsNucs$~jDJS~FP-2Ft;xlHa35hAo7T#o z1_;$UUo?;TgFMtH9Fr^KY^;t0#eMQ6Jv)lJtkM9ocK0Wq%~i2%YA;Ycs+a0(lmf)U z4N5ge_4gsB$HBbfGkt&iGg~pi-P+qhgB$qk0LyE z;i1KkOH<`iwm&F0-$)_q2m@O(j)4oZZ3F{v2lG3kYLqK)ii+sxEcQ&Xb}v6>NQJfq zoQflJptM%@1aSh0NnfUg1QRD$|76}HaSbB6o1pb9fx)a)%W>M`CpU8A>P$pp+MUN=KQ4t{6ba zmgf{ic`w@npt`?phG%j|U8t;95PVc`niFUi=MPa5M7sXIhjMcq?X ztQqSDr=o$qJsmRXOr?XU1@~T-1Uhqs)KseCn@kZQ+qiJEQ!sS^`A7f6X7&nu?Xp<* zzj&@9+P${F=kg8s)ae*~i_Dvqr4O+3Yh8A`LMZxx7Ek1@;rj>KpTAvZh5<5XmYuxs zvIl-9)J)!krE)BK694_LOfaEPX+uAr96cJSR1+uqCIDZdXz>6*@INQ7UMkxWnbU#E z0l+$`l9k;-N-FX&YGqZ>5^TS+8Hr@BU7gMhGcgJr2WbRpv=9wt*NJ%wE=69(C?fGx z4hG3VYUYt?^7KK~Gfc9F`%ZuNWk#XSzJW6A%C^);i_XZzCMvm;Trj4%`UOB&eH{T9 zOKqusT?Bu)9Qeal;N_fcautZ5p7F3=JN?j!OZSyQoSS3hj0iFg;}D3yw4UE~$to&& zV9LoHN>x#$&W^^OO{G66im8{`{P$#f$~lZvS(PE4CJH67j!bGqlMO4UNVc)x6HULQ zU4tfI_BqcIP|Q(1%MEDCj2D(kguTZ{pd=dI+*mo-K7Y<6FG2JyR2u6k*~+AL%+ewx z6m_$UuR=LP~ZC;RV=l5{%L{L>0WSj#t!U{hd_2bwBLDt=#P zMvE6J{}q#ZAeCCD90>99AX~}uR!NrA!|0}w0SO6vPS(+i{cYOv*_#N1Mrca%u4_|}fXw^9BhEMsKJ>F<2L}&8qR5%MitWT-ngK%&<|vrK7_)hSZsZx# z-o8A404A6+uBns38v!UYYy8#58zd0ZoWCo@B0kTEVF#m7O1k3;Z>;Kn@+)Jj3Jl@EWK8|~z$ zzH^hn9AW((QK&S8flOq(@%T&dM@`f|_xQuO#M4BEIv-tzzO%n&L5OycV+!1~3`9hR z?oe<9C;T%SjIYE$!Ul=G72}$BpsnK67w&W*xtqPnWSc0ivs)sjE5-KgKl<#i+VOZlUC^0If4`ApUA`^q6W@!Ixf_EMm(Ez|2v?hJE$0 Z$tIg@vdKTmcK`qY|Np_;&LIF^0|3aB8({zd diff --git a/chart/charts/prometheus/.helmignore b/chart/charts/prometheus/.helmignore new file mode 100755 index 0000000..825c007 --- /dev/null +++ b/chart/charts/prometheus/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +OWNERS diff --git a/chart/charts/prometheus/Chart.yaml b/chart/charts/prometheus/Chart.yaml new file mode 100755 index 0000000..0b153ed --- /dev/null +++ b/chart/charts/prometheus/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 2.15.2 +description: Prometheus is a monitoring system and time series database. +engine: gotpl +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +maintainers: +- email: gianrubio@gmail.com + name: gianrubio +- email: zanhsieh@gmail.com + name: zanhsieh +name: prometheus +sources: +- https://github.com/prometheus/alertmanager +- https://github.com/prometheus/prometheus +- https://github.com/prometheus/pushgateway +- https://github.com/prometheus/node_exporter +- https://github.com/kubernetes/kube-state-metrics +tillerVersion: '>=2.8.0' +version: 10.0.0 diff --git a/chart/charts/prometheus/README.md b/chart/charts/prometheus/README.md new file mode 100755 index 0000000..9f84bf0 --- /dev/null +++ b/chart/charts/prometheus/README.md @@ -0,0 +1,476 @@ +# Prometheus + +[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. + +## TL;DR; + +```console +$ helm install stable/prometheus +``` + +## Introduction + +This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.3+ with Beta APIs enabled + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/prometheus +``` + +The command deploys Prometheus on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Prometheus 2.x + +Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/) + +Users of this chart will need to update their alerting rules to the new format before they can upgrade. + +## Upgrading from previous chart versions. + +Version 9.0 adds a new option to enable or disable the Prometheus Server. +This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. +To install the server `server.enabled` must be set to `true`. + +As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. + +### Example migration + +Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: + +1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: + + ``` + alertmanager: + enabled: false + alertmanagerFiles: + alertmanager.yml: "" + kubeStateMetrics: + enabled: false + nodeExporter: + enabled: false + pushgateway: + enabled: false + server: + extraArgs: + storage.local.retention: 720h + serverFiles: + alerts: "" + prometheus.yml: "" + rules: "" + ``` + +1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. + + ``` + prometheus.yml: + ... + remote_read: + - url: http://prometheus-old/api/v1/read + ... + ``` + + Old data will be available when you query the new prometheus instance. + +## Scraping Pod Metrics via Annotations + +This chart uses a default configuration that causes prometheus +to scrape a variety of kubernetes resource types, provided they have the correct annotations. +In this section we describe how to configure pods to be scraped; +for information on how other resource types can be scraped you can +do a `helm template` to get the kubernetes resource definitions, +and then reference the prometheus configuration in the ConfigMap against the prometheus documentation +for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) +and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). + +In order to get prometheus to scrape pods, you must add annotations to the the pods as below: + +``` +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "8080" +spec: +... +``` + +You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. +`prometheus.io/port` should be set to the port that your pod serves metrics from. +Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be +enclosed in double quotes. + +## Configuration + +The following table lists the configurable parameters of the Prometheus chart and their default values. + +Parameter | Description | Default +--------- | ----------- | ------- +`alertmanager.enabled` | If true, create alertmanager | `true` +`alertmanager.name` | alertmanager container name | `alertmanager` +`alertmanager.image.repository` | alertmanager container image repository | `prom/alertmanager` +`alertmanager.image.tag` | alertmanager container image tag | `v0.20.0` +`alertmanager.image.pullPolicy` | alertmanager container image pull policy | `IfNotPresent` +`alertmanager.prefixURL` | The prefix slug at which the server can be accessed | `` +`alertmanager.baseURL` | The external url at which the server can be accessed | `"http://localhost:9093"` +`alertmanager.extraArgs` | Additional alertmanager container arguments | `{}` +`alertmanager.extraSecretMounts` | Additional alertmanager Secret mounts | `[]` +`alertmanager.configMapOverrideName` | Prometheus alertmanager ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}}` and setting this value will prevent the default alertmanager ConfigMap from being generated | `""` +`alertmanager.configFromSecret` | The name of a secret in the same kubernetes namespace which contains the Alertmanager config, setting this value will prevent the default alertmanager ConfigMap from being generated | `""` +`alertmanager.configFileName` | The configuration file name to be loaded to alertmanager. Must match the key within configuration loaded from ConfigMap/Secret. | `alertmanager.yml` +`alertmanager.ingress.enabled` | If true, alertmanager Ingress will be created | `false` +`alertmanager.ingress.annotations` | alertmanager Ingress annotations | `{}` +`alertmanager.ingress.extraLabels` | alertmanager Ingress additional labels | `{}` +`alertmanager.ingress.hosts` | alertmanager Ingress hostnames | `[]` +`alertmanager.ingress.extraPaths` | Ingress extra paths to prepend to every alertmanager host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]` +`alertmanager.ingress.tls` | alertmanager Ingress TLS configuration (YAML) | `[]` +`alertmanager.nodeSelector` | node labels for alertmanager pod assignment | `{}` +`alertmanager.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`alertmanager.affinity` | pod affinity | `{}` +`alertmanager.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` +`alertmanager.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` +`alertmanager.schedulerName` | alertmanager alternate scheduler name | `nil` +`alertmanager.persistentVolume.enabled` | If true, alertmanager will create a Persistent Volume Claim | `true` +`alertmanager.persistentVolume.accessModes` | alertmanager data Persistent Volume access modes | `[ReadWriteOnce]` +`alertmanager.persistentVolume.annotations` | Annotations for alertmanager Persistent Volume Claim | `{}` +`alertmanager.persistentVolume.existingClaim` | alertmanager data Persistent Volume existing claim name | `""` +`alertmanager.persistentVolume.mountPath` | alertmanager data Persistent Volume mount root path | `/data` +`alertmanager.persistentVolume.size` | alertmanager data Persistent Volume size | `2Gi` +`alertmanager.persistentVolume.storageClass` | alertmanager data Persistent Volume Storage Class | `unset` +`alertmanager.persistentVolume.volumeBindingMode` | alertmanager data Persistent Volume Binding Mode | `unset` +`alertmanager.persistentVolume.subPath` | Subdirectory of alertmanager data Persistent Volume to mount | `""` +`alertmanager.podAnnotations` | annotations to be added to alertmanager pods | `{}` +`alertmanager.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | +`alertmanager.replicaCount` | desired number of alertmanager pods | `1` +`alertmanager.statefulSet.enabled` | If true, use a statefulset instead of a deployment for pod management | `false` +`alertmanager.statefulSet.podManagementPolicy` | podManagementPolicy of alertmanager pods | `OrderedReady` +`alertmanager.statefulSet.headless.annotations` | annotations for alertmanager headless service | `{}` +`alertmanager.statefulSet.headless.labels` | labels for alertmanager headless service | `{}` +`alertmanager.statefulSet.headless.enableMeshPeer` | If true, enable the mesh peer endpoint for the headless service | `{}` +`alertmanager.statefulSet.headless.servicePort` | alertmanager headless service port | `80` +`alertmanager.priorityClassName` | alertmanager priorityClassName | `nil` +`alertmanager.resources` | alertmanager pod resource requests & limits | `{}` +`alertmanager.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for Alert Manager containers | `{}` +`alertmanager.service.annotations` | annotations for alertmanager service | `{}` +`alertmanager.service.clusterIP` | internal alertmanager cluster service IP | `""` +`alertmanager.service.externalIPs` | alertmanager service external IP addresses | `[]` +`alertmanager.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`alertmanager.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`alertmanager.service.servicePort` | alertmanager service port | `80` +`alertmanager.service.sessionAffinity` | Session Affinity for alertmanager service, can be `None` or `ClientIP` | `None` +`alertmanager.service.type` | type of alertmanager service to create | `ClusterIP` +`alertmanagerFiles.alertmanager.yml` | Prometheus alertmanager configuration | example configuration +`configmapReload.name` | configmap-reload container name | `configmap-reload` +`configmapReload.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload` +`configmapReload.image.tag` | configmap-reload container image tag | `v0.3.0` +`configmapReload.image.pullPolicy` | configmap-reload container image pull policy | `IfNotPresent` +`configmapReload.extraArgs` | Additional configmap-reload container arguments | `{}` +`configmapReload.extraVolumeDirs` | Additional configmap-reload volume directories | `{}` +`configmapReload.extraConfigmapMounts` | Additional configmap-reload configMap mounts | `[]` +`configmapReload.resources` | configmap-reload pod resource requests & limits | `{}` +`initChownData.enabled` | If false, don't reset data ownership at startup | true +`initChownData.name` | init-chown-data container name | `init-chown-data` +`initChownData.image.repository` | init-chown-data container image repository | `busybox` +`initChownData.image.tag` | init-chown-data container image tag | `latest` +`initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` +`initChownData.resources` | init-chown-data pod resource requests & limits | `{}` +`kubeStateMetrics.enabled` | If true, create kube-state-metrics | `true` +`kubeStateMetrics.name` | kube-state-metrics container name | `kube-state-metrics` +`kubeStateMetrics.image.repository` | kube-state-metrics container image repository| `quay.io/coreos/kube-state-metrics` +`kubeStateMetrics.image.tag` | kube-state-metrics container image tag | `v1.9.0` +`kubeStateMetrics.image.pullPolicy` | kube-state-metrics container image pull policy | `IfNotPresent` +`kubeStateMetrics.args` | kube-state-metrics container arguments | `{}` +`kubeStateMetrics.nodeSelector` | node labels for kube-state-metrics pod assignment | `{}` +`kubeStateMetrics.podAnnotations` | annotations to be added to kube-state-metrics pods | `{}` +`kubeStateMetrics.deploymentAnnotations` | annotations to be added to kube-state-metrics deployment | `{}` +`kubeStateMetrics.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | +`kubeStateMetrics.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`kubeStateMetrics.replicaCount` | desired number of kube-state-metrics pods | `1` +`kubeStateMetrics.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` +`kubeStateMetrics.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` +`kubeStateMetrics.priorityClassName` | kube-state-metrics priorityClassName | `nil` +`kubeStateMetrics.resources` | kube-state-metrics resource requests and limits (YAML) | `{}` +`kubeStateMetrics.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for kube-state-metrics containers | `{}` +`kubeStateMetrics.service.annotations` | annotations for kube-state-metrics service | `{prometheus.io/scrape: "true"}` +`kubeStateMetrics.service.clusterIP` | internal kube-state-metrics cluster service IP | `None` +`kubeStateMetrics.service.externalIPs` | kube-state-metrics service external IP addresses | `[]` +`kubeStateMetrics.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`kubeStateMetrics.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`kubeStateMetrics.service.servicePort` | kube-state-metrics service port | `80` +`kubeStateMetrics.service.type` | type of kube-state-metrics service to create | `ClusterIP` +`nodeExporter.enabled` | If true, create node-exporter | `true` +`nodeExporter.name` | node-exporter container name | `node-exporter` +`nodeExporter.image.repository` | node-exporter container image repository| `prom/node-exporter` +`nodeExporter.image.tag` | node-exporter container image tag | `v0.18.1` +`nodeExporter.image.pullPolicy` | node-exporter container image pull policy | `IfNotPresent` +`nodeExporter.extraArgs` | Additional node-exporter container arguments | `{}` +`nodeExporter.extraHostPathMounts` | Additional node-exporter hostPath mounts | `[]` +`nodeExporter.extraConfigmapMounts` | Additional node-exporter configMap mounts | `[]` +`nodeExporter.hostNetwork` | If true, node-exporter pods share the host network namespace | `true` +`nodeExporter.hostPID` | If true, node-exporter pods share the host PID namespace | `true` +`nodeExporter.nodeSelector` | node labels for node-exporter pod assignment | `{}` +`nodeExporter.podAnnotations` | annotations to be added to node-exporter pods | `{}` +`nodeExporter.pod.labels` | labels to be added to node-exporter pods | `{}` +`nodeExporter.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` +`nodeExporter.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` +`nodeExporter.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | +`nodeExporter.podSecurityPolicy.enabled` | Specify if a Pod Security Policy for node-exporter must be created | `false` +`nodeExporter.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`nodeExporter.priorityClassName` | node-exporter priorityClassName | `nil` +`nodeExporter.resources` | node-exporter resource requests and limits (YAML) | `{}` +`nodeExporter.securityContext` | securityContext for containers in pod | `{}` +`nodeExporter.service.annotations` | annotations for node-exporter service | `{prometheus.io/scrape: "true"}` +`nodeExporter.service.clusterIP` | internal node-exporter cluster service IP | `None` +`nodeExporter.service.externalIPs` | node-exporter service external IP addresses | `[]` +`nodeExporter.service.hostPort` | node-exporter service host port | `9100` +`nodeExporter.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`nodeExporter.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`nodeExporter.service.servicePort` | node-exporter service port | `9100` +`nodeExporter.service.type` | type of node-exporter service to create | `ClusterIP` +`podSecurityPolicy.enabled` | If true, create & use pod security policies resources | `false` +`pushgateway.enabled` | If true, create pushgateway | `true` +`pushgateway.name` | pushgateway container name | `pushgateway` +`pushgateway.image.repository` | pushgateway container image repository | `prom/pushgateway` +`pushgateway.image.tag` | pushgateway container image tag | `v1.0.1` +`pushgateway.image.pullPolicy` | pushgateway container image pull policy | `IfNotPresent` +`pushgateway.extraArgs` | Additional pushgateway container arguments | `{}` +`pushgateway.ingress.enabled` | If true, pushgateway Ingress will be created | `false` +`pushgateway.ingress.annotations` | pushgateway Ingress annotations | `{}` +`pushgateway.ingress.hosts` | pushgateway Ingress hostnames | `[]` +`pushgateway.ingress.extraPaths` | Ingress extra paths to prepend to every pushgateway host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]` +`pushgateway.ingress.tls` | pushgateway Ingress TLS configuration (YAML) | `[]` +`pushgateway.nodeSelector` | node labels for pushgateway pod assignment | `{}` +`pushgateway.podAnnotations` | annotations to be added to pushgateway pods | `{}` +`pushgateway.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | +`pushgateway.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`pushgateway.replicaCount` | desired number of pushgateway pods | `1` +`pushgateway.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` +`pushgateway.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` +`pushgateway.schedulerName` | pushgateway alternate scheduler name | `nil` +`pushgateway.persistentVolume.enabled` | If true, Prometheus pushgateway will create a Persistent Volume Claim | `false` +`pushgateway.persistentVolume.accessModes` | Prometheus pushgateway data Persistent Volume access modes | `[ReadWriteOnce]` +`pushgateway.persistentVolume.annotations` | Prometheus pushgateway data Persistent Volume annotations | `{}` +`pushgateway.persistentVolume.existingClaim` | Prometheus pushgateway data Persistent Volume existing claim name | `""` +`pushgateway.persistentVolume.mountPath` | Prometheus pushgateway data Persistent Volume mount root path | `/data` +`pushgateway.persistentVolume.size` | Prometheus pushgateway data Persistent Volume size | `2Gi` +`pushgateway.persistentVolume.storageClass` | Prometheus pushgateway data Persistent Volume Storage Class | `unset` +`pushgateway.persistentVolume.volumeBindingMode` | Prometheus pushgateway data Persistent Volume Binding Mode | `unset` +`pushgateway.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""` +`pushgateway.priorityClassName` | pushgateway priorityClassName | `nil` +`pushgateway.resources` | pushgateway pod resource requests & limits | `{}` +`pushgateway.service.annotations` | annotations for pushgateway service | `{}` +`pushgateway.service.clusterIP` | internal pushgateway cluster service IP | `""` +`pushgateway.service.externalIPs` | pushgateway service external IP addresses | `[]` +`pushgateway.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`pushgateway.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`pushgateway.service.servicePort` | pushgateway service port | `9091` +`pushgateway.service.type` | type of pushgateway service to create | `ClusterIP` +`pushgateway.strategy.type` | Deployment strategy | `{ "type": "RollingUpdate" }` +`rbac.create` | If true, create & use RBAC resources | `true` +`server.enabled` | If false, Prometheus server will not be created | `true` +`server.name` | Prometheus server container name | `server` +`server.image.repository` | Prometheus server container image repository | `prom/prometheus` +`server.image.tag` | Prometheus server container image tag | `v2.15.2` +`server.image.pullPolicy` | Prometheus server container image pull policy | `IfNotPresent` +`server.configPath` | Path to a prometheus server config file on the container FS | `/etc/config/prometheus.yml` +`server.global.scrape_interval` | How frequently to scrape targets by default | `1m` +`server.global.scrape_timeout` | How long until a scrape request times out | `10s` +`server.global.evaluation_interval` | How frequently to evaluate rules | `1m` +`server.extraArgs` | Additional Prometheus server container arguments | `{}` +`server.extraFlags` | Additional Prometheus server container flags | `["web.enable-lifecycle"]` +`server.extraInitContainers` | Init containers to launch alongside the server | `[]` +`server.prefixURL` | The prefix slug at which the server can be accessed | `` +`server.baseURL` | The external url at which the server can be accessed | `` +`server.env` | Prometheus server environment variables | `[]` +`server.extraHostPathMounts` | Additional Prometheus server hostPath mounts | `[]` +`server.extraConfigmapMounts` | Additional Prometheus server configMap mounts | `[]` +`server.extraSecretMounts` | Additional Prometheus server Secret mounts | `[]` +`server.extraVolumeMounts` | Additional Prometheus server Volume mounts | `[]` +`server.extraVolumes` | Additional Prometheus server Volumes | `[]` +`server.configMapOverrideName` | Prometheus server ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.server.configMapOverrideName}}` and setting this value will prevent the default server ConfigMap from being generated | `""` +`server.ingress.enabled` | If true, Prometheus server Ingress will be created | `false` +`server.ingress.annotations` | Prometheus server Ingress annotations | `[]` +`server.ingress.extraLabels` | Prometheus server Ingress additional labels | `{}` +`server.ingress.hosts` | Prometheus server Ingress hostnames | `[]` +`server.ingress.extraPaths` | Ingress extra paths to prepend to every Prometheus server host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]` +`server.ingress.tls` | Prometheus server Ingress TLS configuration (YAML) | `[]` +`server.nodeSelector` | node labels for Prometheus server pod assignment | `{}` +`server.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`server.affinity` | pod affinity | `{}` +`server.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` +`server.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` +`server.priorityClassName` | Prometheus server priorityClassName | `nil` +`server.schedulerName` | Prometheus server alternate scheduler name | `nil` +`server.persistentVolume.enabled` | If true, Prometheus server will create a Persistent Volume Claim | `true` +`server.persistentVolume.accessModes` | Prometheus server data Persistent Volume access modes | `[ReadWriteOnce]` +`server.persistentVolume.annotations` | Prometheus server data Persistent Volume annotations | `{}` +`server.persistentVolume.existingClaim` | Prometheus server data Persistent Volume existing claim name | `""` +`server.persistentVolume.mountPath` | Prometheus server data Persistent Volume mount root path | `/data` +`server.persistentVolume.size` | Prometheus server data Persistent Volume size | `8Gi` +`server.persistentVolume.storageClass` | Prometheus server data Persistent Volume Storage Class | `unset` +`server.persistentVolume.volumeBindingMode` | Prometheus server data Persistent Volume Binding Mode | `unset` +`server.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""` +`server.emptyDir.sizeLimit` | emptyDir sizeLimit if a Persistent Volume is not used | `""` +`server.podAnnotations` | annotations to be added to Prometheus server pods | `{}` +`server.podLabels` | labels to be added to Prometheus server pods | `{}` +`server.alertmanagers` | Prometheus AlertManager configuration for the Prometheus server | `{}` +`server.deploymentAnnotations` | annotations to be added to Prometheus server deployment | `{}` +`server.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | +`server.replicaCount` | desired number of Prometheus server pods | `1` +`server.statefulSet.enabled` | If true, use a statefulset instead of a deployment for pod management | `false` +`server.statefulSet.annotations` | annotations to be added to Prometheus server stateful set | `{}` +`server.statefulSet.labels` | labels to be added to Prometheus server stateful set | `{}` +`server.statefulSet.podManagementPolicy` | podManagementPolicy of server pods | `OrderedReady` +`server.statefulSet.headless.annotations` | annotations for Prometheus server headless service | `{}` +`server.statefulSet.headless.labels` | labels for Prometheus server headless service | `{}` +`server.statefulSet.headless.servicePort` | Prometheus server headless service port | `80` +`server.resources` | Prometheus server resource requests and limits | `{}` +`server.verticalAutoscaler.enabled` | If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) | `false` +`server.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for server containers | `{}` +`server.service.annotations` | annotations for Prometheus server service | `{}` +`server.service.clusterIP` | internal Prometheus server cluster service IP | `""` +`server.service.externalIPs` | Prometheus server service external IP addresses | `[]` +`server.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`server.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`server.service.nodePort` | Port to be used as the service NodePort (ignored if `server.service.type` is not `NodePort`) | `0` +`server.service.servicePort` | Prometheus server service port | `80` +`server.service.sessionAffinity` | Session Affinity for server service, can be `None` or `ClientIP` | `None` +`server.service.type` | type of Prometheus server service to create | `ClusterIP` +`server.service.statefulsetReplica.enabled` | If true, send the traffic from the service to only one replica of the replicaset | `false` +`server.service.statefulsetReplica.replica` | Which replica to send the traffice to | `0` +`server.sidecarContainers` | array of snippets with your sidecar containers for prometheus server | `""` +`serviceAccounts.alertmanager.create` | If true, create the alertmanager service account | `true` +`serviceAccounts.alertmanager.name` | name of the alertmanager service account to use or create | `{{ prometheus.alertmanager.fullname }}` +`serviceAccounts.kubeStateMetrics.create` | If true, create the kubeStateMetrics service account | `true` +`serviceAccounts.kubeStateMetrics.name` | name of the kubeStateMetrics service account to use or create | `{{ prometheus.kubeStateMetrics.fullname }}` +`serviceAccounts.nodeExporter.create` | If true, create the nodeExporter service account | `true` +`serviceAccounts.nodeExporter.name` | name of the nodeExporter service account to use or create | `{{ prometheus.nodeExporter.fullname }}` +`serviceAccounts.pushgateway.create` | If true, create the pushgateway service account | `true` +`serviceAccounts.pushgateway.name` | name of the pushgateway service account to use or create | `{{ prometheus.pushgateway.fullname }}` +`serviceAccounts.server.create` | If true, create the server service account | `true` +`serviceAccounts.server.name` | name of the server service account to use or create | `{{ prometheus.server.fullname }}` +`server.terminationGracePeriodSeconds` | Prometheus server Pod termination grace period | `300` +`server.retention` | (optional) Prometheus data retention | `"15d"` +`serverFiles.alerts` | (Deprecated) Prometheus server alerts configuration | `{}` +`serverFiles.rules` | (Deprecated) Prometheus server rules configuration | `{}` +`serverFiles.alerting_rules.yml` | Prometheus server alerts configuration | `{}` +`serverFiles.recording_rules.yml` | Prometheus server rules configuration | `{}` +`serverFiles.prometheus.yml` | Prometheus server scrape configuration | example configuration +`extraScrapeConfigs` | Prometheus server additional scrape configuration | "" +`alertRelabelConfigs` | Prometheus server [alert relabeling configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs) for H/A prometheus | "" +`networkPolicy.enabled` | Enable NetworkPolicy | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install stable/prometheus --name my-release \ + --set server.terminationGracePeriodSeconds=360 +``` + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install stable/prometheus --name my-release -f values.yaml +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +Note that you have multiple yaml files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, + +```yaml +# values.yaml +# ... + +# service1-alert.yaml +serverFiles: + alerts: + service1: + - alert: anAlert + # ... + +# service2-alert.yaml +serverFiles: + alerts: + service2: + - alert: anAlert + # ... +``` + +```console +$ helm install stable/prometheus --name my-release -f values.yaml -f service1-alert.yaml -f service2-alert.yaml +``` + +### RBAC Configuration +Roles and RoleBindings resources will be created automatically for `server` and `kubeStateMetrics` services. + +To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. + +> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. + +### ConfigMap Files +AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. + +Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. + +### Ingress TLS +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [kube-lego](https://github.com/jetstack/kube-lego)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```console +kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: + +```yaml +server: + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: prometheus-server-tls + hosts: + - prometheus.domain.com +``` + +### NetworkPolicy + +Enabling Network Policy for Prometheus will secure connections to Alert Manager +and Kube State Metrics by only accepting connections from Prometheus Server. +All inbound connections to Prometheus Server are still allowed. + +To enable network policy for Prometheus, install a networking plugin that +implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. + +If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need +to manually create a networkpolicy which allows it. diff --git a/chart/charts/prometheus/templates/NOTES.txt b/chart/charts/prometheus/templates/NOTES.txt new file mode 100755 index 0000000..0e8868f --- /dev/null +++ b/chart/charts/prometheus/templates/NOTES.txt @@ -0,0 +1,112 @@ +{{- if .Values.server.enabled -}} +The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.server.ingress.enabled -}} +From outside the cluster, the server URL(s) are: +{{- range .Values.server.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Prometheus server URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.server.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.server.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} +{{- else if contains "ClusterIP" .Values.server.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.server.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 +{{- end }} +{{- end }} + +{{- if .Values.server.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Server pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{ if .Values.alertmanager.enabled }} +The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.alertmanager.ingress.enabled -}} +From outside the cluster, the alertmanager URL(s) are: +{{- range .Values.alertmanager.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Alertmanager URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.alertmanager.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} +{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.alertmanager.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 +{{- end }} +{{- end }} + +{{- if .Values.alertmanager.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the AlertManager pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{- if .Values.nodeExporter.podSecurityPolicy.enabled }} +{{- else }} +################################################################################# +###### WARNING: Pod Security Policy has been moved to a global property. ##### +###### use .Values.podSecurityPolicy.enabled with pod-based ##### +###### annotations ##### +###### (e.g. .Values.nodeExporter.podSecurityPolicy.annotations) ##### +################################################################################# +{{- end }} + +{{ if .Values.pushgateway.enabled }} +The Prometheus PushGateway can be accessed via port {{ .Values.pushgateway.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.pushgateway.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.pushgateway.ingress.enabled -}} +From outside the cluster, the pushgateway URL(s) are: +{{- range .Values.pushgateway.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the PushGateway URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.pushgateway.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.pushgateway.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.pushgateway.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.pushgateway.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.pushgateway.service.servicePort }} +{{- else if contains "ClusterIP" .Values.pushgateway.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.pushgateway.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 +{{- end }} +{{- end }} +{{- end }} + +For more information on running Prometheus, visit: +https://prometheus.io/ diff --git a/chart/charts/prometheus/templates/_helpers.tpl b/chart/charts/prometheus/templates/_helpers.tpl new file mode 100755 index 0000000..295aa01 --- /dev/null +++ b/chart/charts/prometheus/templates/_helpers.tpl @@ -0,0 +1,276 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create unified labels for prometheus components +*/}} +{{- define "prometheus.common.matchLabels" -}} +app: {{ template "prometheus.name" . }} +release: {{ .Release.Name }} +{{- end -}} + +{{- define "prometheus.common.metaLabels" -}} +chart: {{ template "prometheus.chart" . }} +heritage: {{ .Release.Service }} +{{- end -}} + +{{- define "prometheus.alertmanager.labels" -}} +{{ include "prometheus.alertmanager.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.alertmanager.matchLabels" -}} +component: {{ .Values.alertmanager.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.kubeStateMetrics.labels" -}} +{{ include "prometheus.kubeStateMetrics.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.kubeStateMetrics.matchLabels" -}} +component: {{ .Values.kubeStateMetrics.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.labels" -}} +{{ include "prometheus.nodeExporter.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.matchLabels" -}} +component: {{ .Values.nodeExporter.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.labels" -}} +{{ include "prometheus.pushgateway.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.matchLabels" -}} +component: {{ .Values.pushgateway.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.server.labels" -}} +{{ include "prometheus.server.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.server.matchLabels" -}} +component: {{ .Values.server.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified alertmanager name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} + +{{- define "prometheus.alertmanager.fullname" -}} +{{- if .Values.alertmanager.fullnameOverride -}} +{{- .Values.alertmanager.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified kube-state-metrics name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.kubeStateMetrics.fullname" -}} +{{- if .Values.kubeStateMetrics.fullnameOverride -}} +{{- .Values.kubeStateMetrics.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.kubeStateMetrics.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.kubeStateMetrics.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified node-exporter name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.nodeExporter.fullname" -}} +{{- if .Values.nodeExporter.fullnameOverride -}} +{{- .Values.nodeExporter.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.fullname" -}} +{{- if .Values.server.fullnameOverride -}} +{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified pushgateway name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.pushgateway.fullname" -}} +{{- if .Values.pushgateway.fullnameOverride -}} +{{- .Values.pushgateway.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "prometheus.deployment.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "prometheus.daemonset.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "prometheus.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "prometheus.podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.3-0, <1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the alertmanager component +*/}} +{{- define "prometheus.serviceAccountName.alertmanager" -}} +{{- if .Values.serviceAccounts.alertmanager.create -}} + {{ default (include "prometheus.alertmanager.fullname" .) .Values.serviceAccounts.alertmanager.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.alertmanager.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the kubeStateMetrics component +*/}} +{{- define "prometheus.serviceAccountName.kubeStateMetrics" -}} +{{- if .Values.serviceAccounts.kubeStateMetrics.create -}} + {{ default (include "prometheus.kubeStateMetrics.fullname" .) .Values.serviceAccounts.kubeStateMetrics.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.kubeStateMetrics.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the nodeExporter component +*/}} +{{- define "prometheus.serviceAccountName.nodeExporter" -}} +{{- if .Values.serviceAccounts.nodeExporter.create -}} + {{ default (include "prometheus.nodeExporter.fullname" .) .Values.serviceAccounts.nodeExporter.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.nodeExporter.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the pushgateway component +*/}} +{{- define "prometheus.serviceAccountName.pushgateway" -}} +{{- if .Values.serviceAccounts.pushgateway.create -}} + {{ default (include "prometheus.pushgateway.fullname" .) .Values.serviceAccounts.pushgateway.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.pushgateway.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the server component +*/}} +{{- define "prometheus.serviceAccountName.server" -}} +{{- if .Values.serviceAccounts.server.create -}} + {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.server.name }} +{{- end -}} +{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-clusterrole.yaml b/chart/charts/prometheus/templates/alertmanager-clusterrole.yaml new file mode 100755 index 0000000..3cfc133 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-clusterrole.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.alertmanager.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml b/chart/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml new file mode 100755 index 0000000..925afcd --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.alertmanager.fullname" . }} +{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-configmap.yaml b/chart/charts/prometheus/templates/alertmanager-configmap.yaml new file mode 100755 index 0000000..f2d78e2 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.alertmanager.enabled (and (empty .Values.alertmanager.configMapOverrideName) (empty .Values.alertmanager.configFromSecret)) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.alertmanagerFiles }} + {{ $key }}: | +{{ toYaml $value | default "{}" | indent 4 }} +{{- end -}} +{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-deployment.yaml b/chart/charts/prometheus/templates/alertmanager-deployment.yaml new file mode 100755 index 0000000..5202407 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-deployment.yaml @@ -0,0 +1,134 @@ +{{- if and .Values.alertmanager.enabled (not .Values.alertmanager.statefulSet.enabled) -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + {{- if .Values.server.strategy }} + strategy: +{{ toYaml .Values.server.strategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: +{{ toYaml .Values.alertmanager.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + spec: +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/{{ .Values.alertmanager.configFileName }} + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + - --cluster.advertise-address=$(POD_IP):6783 + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + + ports: + - containerPort: 9093 + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/#/status + port: 9093 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.name }} + image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9093{{ .Values.alertmanager.prefixURL }}/-/reload + resources: +{{ toYaml .Values.configmapReload.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} + {{- end }} + volumes: + - name: config-volume + {{- if empty .Values.alertmanager.configFromSecret }} + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.alertmanager.configFromSecret }} + {{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + - name: storage-volume + {{- if .Values.alertmanager.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.alertmanager.persistentVolume.existingClaim }}{{ .Values.alertmanager.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end -}} +{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-ingress.yaml b/chart/charts/prometheus/templates/alertmanager-ingress.yaml new file mode 100755 index 0000000..a6a9b29 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-ingress.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.alertmanager.fullname" . }} +{{- $servicePort := .Values.alertmanager.service.servicePort -}} +{{- $extraPaths := .Values.alertmanager.ingress.extraPaths -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: +{{- if .Values.alertmanager.ingress.annotations }} + annotations: +{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- range $key, $value := .Values.alertmanager.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: + rules: + {{- range .Values.alertmanager.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: /{{ rest $url | join "/" }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- if .Values.alertmanager.ingress.tls }} + tls: +{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-networkpolicy.yaml b/chart/charts/prometheus/templates/alertmanager-networkpolicy.yaml new file mode 100755 index 0000000..0bcbd27 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-networkpolicy.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.alertmanager.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9093 +{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-pdb.yaml b/chart/charts/prometheus/templates/alertmanager-pdb.yaml new file mode 100755 index 0000000..c38df77 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-pdb.yaml @@ -0,0 +1,13 @@ +{{- if .Values.alertmanager.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.alertmanager.labels" . | nindent 6 }} +{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-podsecuritypolicy.yaml b/chart/charts/prometheus/templates/alertmanager-podsecuritypolicy.yaml new file mode 100755 index 0000000..70f8033 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-podsecuritypolicy.yaml @@ -0,0 +1,48 @@ +{{- if .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + annotations: +{{- if .Values.alertmanager.podSecurityPolicy.annotations }} +{{ toYaml .Values.alertmanager.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.alertmanager.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-pvc.yaml b/chart/charts/prometheus/templates/alertmanager-pvc.yaml new file mode 100755 index 0000000..400aba5 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-pvc.yaml @@ -0,0 +1,32 @@ +{{- if not .Values.alertmanager.statefulSet.enabled -}} +{{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}} +{{- if not .Values.alertmanager.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }} +{{- if .Values.alertmanager.persistentVolume.storageClass }} +{{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.alertmanager.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.alertmanager.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-service-headless.yaml b/chart/charts/prometheus/templates/alertmanager-service-headless.yaml new file mode 100755 index 0000000..8d619e8 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-service-headless.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.alertmanager.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.statefulSet.headless.labels }} +{{ toYaml .Values.alertmanager.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }}-headless +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.alertmanager.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9093 +{{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-service.yaml b/chart/charts/prometheus/templates/alertmanager-service.yaml new file mode 100755 index 0000000..7919643 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-service.yaml @@ -0,0 +1,52 @@ +{{- if .Values.alertmanager.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.service.annotations }} + annotations: +{{ toYaml .Values.alertmanager.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.service.labels }} +{{ toYaml .Values.alertmanager.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: +{{- if .Values.alertmanager.service.clusterIP }} + clusterIP: {{ .Values.alertmanager.service.clusterIP }} +{{- end }} +{{- if .Values.alertmanager.service.externalIPs }} + externalIPs: +{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.alertmanager.service.servicePort }} + protocol: TCP + targetPort: 9093 + {{- if .Values.alertmanager.service.nodePort }} + nodePort: {{ .Values.alertmanager.service.nodePort }} + {{- end }} +{{- if .Values.alertmanager.service.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- if .Values.alertmanager.service.sessionAffinity }} + sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }} +{{- end }} + type: "{{ .Values.alertmanager.service.type }}" +{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-serviceaccount.yaml b/chart/charts/prometheus/templates/alertmanager-serviceaccount.yaml new file mode 100755 index 0000000..4ff4558 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if and .Values.alertmanager.enabled .Values.serviceAccounts.alertmanager.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-statefulset.yaml b/chart/charts/prometheus/templates/alertmanager-statefulset.yaml new file mode 100755 index 0000000..811d678 --- /dev/null +++ b/chart/charts/prometheus/templates/alertmanager-statefulset.yaml @@ -0,0 +1,150 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: + serviceName: {{ template "prometheus.alertmanager.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + podManagementPolicy: {{ .Values.alertmanager.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: +{{ toYaml .Values.alertmanager.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + spec: +{{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} +{{- end }} +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/alertmanager.yml + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + - --cluster.advertise-address=$(POD_IP):6783 + {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - --cluster.listen-address=0.0.0.0:6783 + {{- range $n := until (.Values.alertmanager.replicaCount | int) }} + - --cluster.peer={{ template "prometheus.alertmanager.fullname" $ }}-{{ $n }}.{{ template "prometheus.alertmanager.fullname" $ }}-headless:6783 + {{- end }} + {{- end }} + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + + ports: + - containerPort: 9093 + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/#/status + port: 9093 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.name }} + image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://localhost:9093{{ .Values.alertmanager.prefixURL }}/-/reload + resources: +{{ toYaml .Values.configmapReload.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} +{{- if .Values.alertmanager.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: {} +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-clusterrole.yaml b/chart/charts/prometheus/templates/kube-state-metrics-clusterrole.yaml new file mode 100755 index 0000000..9f5be97 --- /dev/null +++ b/chart/charts/prometheus/templates/kube-state-metrics-clusterrole.yaml @@ -0,0 +1,87 @@ +{{- if and .Values.kubeStateMetrics.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.kubeStateMetrics.fullname" . }} +{{- end }} + - apiGroups: + - "" + resources: + - namespaces + - nodes + - persistentvolumeclaims + - pods + - services + - resourcequotas + - replicationcontrollers + - limitranges + - persistentvolumeclaims + - persistentvolumes + - endpoints + - secrets + - configmaps + verbs: + - list + - watch + - apiGroups: + - extensions + resources: + - daemonsets + - deployments + - ingresses + - replicasets + verbs: + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - statefulsets + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - list + - watch + - apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - list + - watch + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - list + - watch + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - list + - watch +{{- end }} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml b/chart/charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml new file mode 100755 index 0000000..5e3b275 --- /dev/null +++ b/chart/charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.kubeStateMetrics.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} +{{- end -}} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-deployment.yaml b/chart/charts/prometheus/templates/kube-state-metrics-deployment.yaml new file mode 100755 index 0000000..eaeda96 --- /dev/null +++ b/chart/charts/prometheus/templates/kube-state-metrics-deployment.yaml @@ -0,0 +1,68 @@ +{{- if .Values.kubeStateMetrics.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.kubeStateMetrics.deploymentAnnotations }} + annotations: +{{ toYaml .Values.kubeStateMetrics.deploymentAnnotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} +spec: + selector: + matchLabels: + {{- include "prometheus.kubeStateMetrics.matchLabels" . | nindent 6 }} + replicas: {{ .Values.kubeStateMetrics.replicaCount }} + template: + metadata: + {{- if .Values.kubeStateMetrics.podAnnotations }} + annotations: +{{ toYaml .Values.kubeStateMetrics.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.kubeStateMetrics.labels" . | nindent 8 }} +{{- if .Values.kubeStateMetrics.pod.labels }} +{{ toYaml .Values.kubeStateMetrics.pod.labels | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }} +{{- if .Values.kubeStateMetrics.priorityClassName }} + priorityClassName: "{{ .Values.kubeStateMetrics.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.kubeStateMetrics.name }} + image: "{{ .Values.kubeStateMetrics.image.repository }}:{{ .Values.kubeStateMetrics.image.tag }}" + imagePullPolicy: "{{ .Values.kubeStateMetrics.image.pullPolicy }}" + {{- if .Values.kubeStateMetrics.args }} + args: + {{- range $key, $value := .Values.kubeStateMetrics.args }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 8080 + resources: +{{ toYaml .Values.kubeStateMetrics.resources | indent 12 }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.kubeStateMetrics.nodeSelector }} + nodeSelector: +{{ toYaml .Values.kubeStateMetrics.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.kubeStateMetrics.securityContext }} + securityContext: +{{ toYaml .Values.kubeStateMetrics.securityContext | indent 8 }} + {{- end }} + {{- if .Values.kubeStateMetrics.tolerations }} + tolerations: +{{ toYaml .Values.kubeStateMetrics.tolerations | indent 8 }} + {{- end }} + {{- if .Values.kubeStateMetrics.affinity }} + affinity: +{{ toYaml .Values.kubeStateMetrics.affinity | indent 8 }} + {{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-networkpolicy.yaml b/chart/charts/prometheus/templates/kube-state-metrics-networkpolicy.yaml new file mode 100755 index 0000000..56893ce --- /dev/null +++ b/chart/charts/prometheus/templates/kube-state-metrics-networkpolicy.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.kubeStateMetrics.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} + labels: + {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.kubeStateMetrics.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 10 }} + - ports: + - port: 8080 +{{- end -}} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-pdb.yaml b/chart/charts/prometheus/templates/kube-state-metrics-pdb.yaml new file mode 100755 index 0000000..3f3411d --- /dev/null +++ b/chart/charts/prometheus/templates/kube-state-metrics-pdb.yaml @@ -0,0 +1,13 @@ +{{- if .Values.kubeStateMetrics.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} + labels: + {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.kubeStateMetrics.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.kubeStateMetrics.labels" . | nindent 6 }} +{{- end }} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-podsecuritypolicy.yaml b/chart/charts/prometheus/templates/kube-state-metrics-podsecuritypolicy.yaml new file mode 100755 index 0000000..d1afcb8 --- /dev/null +++ b/chart/charts/prometheus/templates/kube-state-metrics-podsecuritypolicy.yaml @@ -0,0 +1,42 @@ +{{- if .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} + labels: + {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} + annotations: +{{- if .Values.kubeStateMetrics.podSecurityPolicy.annotations }} +{{ toYaml .Values.kubeStateMetrics.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'secret' + allowedHostPaths: [] + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml b/chart/charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml new file mode 100755 index 0000000..5f97480 --- /dev/null +++ b/chart/charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if and .Values.kubeStateMetrics.enabled .Values.serviceAccounts.kubeStateMetrics.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }} +{{- end -}} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-svc.yaml b/chart/charts/prometheus/templates/kube-state-metrics-svc.yaml new file mode 100755 index 0000000..717d85f --- /dev/null +++ b/chart/charts/prometheus/templates/kube-state-metrics-svc.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.kubeStateMetrics.enabled .Values.kubeStateMetrics.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.kubeStateMetrics.service.annotations }} + annotations: +{{ toYaml .Values.kubeStateMetrics.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} +{{- if .Values.kubeStateMetrics.service.labels }} +{{ toYaml .Values.kubeStateMetrics.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} +spec: +{{- if .Values.kubeStateMetrics.service.clusterIP }} + clusterIP: {{ .Values.kubeStateMetrics.service.clusterIP }} +{{- end }} +{{- if .Values.kubeStateMetrics.service.externalIPs }} + externalIPs: +{{ toYaml .Values.kubeStateMetrics.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.kubeStateMetrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.kubeStateMetrics.service.loadBalancerIP }} +{{- end }} +{{- if .Values.kubeStateMetrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.kubeStateMetrics.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.kubeStateMetrics.service.servicePort }} + protocol: TCP + targetPort: 8080 + selector: + {{- include "prometheus.kubeStateMetrics.matchLabels" . | nindent 4 }} + type: "{{ .Values.kubeStateMetrics.service.type }}" +{{- end }} diff --git a/chart/charts/prometheus/templates/node-exporter-daemonset.yaml b/chart/charts/prometheus/templates/node-exporter-daemonset.yaml new file mode 100755 index 0000000..478f10a --- /dev/null +++ b/chart/charts/prometheus/templates/node-exporter-daemonset.yaml @@ -0,0 +1,116 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: {{ template "prometheus.daemonset.apiVersion" . }} +kind: DaemonSet +metadata: +{{- if .Values.nodeExporter.deploymentAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.deploymentAnnotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +spec: + selector: + matchLabels: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 6 }} + {{- if .Values.nodeExporter.updateStrategy }} + updateStrategy: +{{ toYaml .Values.nodeExporter.updateStrategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.nodeExporter.podAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 8 }} +{{- if .Values.nodeExporter.pod.labels }} +{{ toYaml .Values.nodeExporter.pod.labels | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{- if .Values.nodeExporter.priorityClassName }} + priorityClassName: "{{ .Values.nodeExporter.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }} + image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}" + imagePullPolicy: "{{ .Values.nodeExporter.image.pullPolicy }}" + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + {{- range $key, $value := .Values.nodeExporter.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9100 + hostPort: {{ .Values.nodeExporter.service.hostPort }} + resources: +{{ toYaml .Values.nodeExporter.resources | indent 12 }} + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: sys + mountPath: /host/sys + readOnly: true + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- if .mountPropagation }} + mountPropagation: {{ .mountPropagation }} + {{- end }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.nodeExporter.hostNetwork }} + hostNetwork: true + {{- end }} + {{- if .Values.nodeExporter.hostPID }} + hostPID: true + {{- end }} + {{- if .Values.nodeExporter.tolerations }} + tolerations: +{{ toYaml .Values.nodeExporter.tolerations | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeExporter.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.securityContext }} + securityContext: +{{ toYaml .Values.nodeExporter.securityContext | indent 8 }} + {{- end }} + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + +{{- end -}} diff --git a/chart/charts/prometheus/templates/node-exporter-podsecuritypolicy.yaml b/chart/charts/prometheus/templates/node-exporter-podsecuritypolicy.yaml new file mode 100755 index 0000000..825794b --- /dev/null +++ b/chart/charts/prometheus/templates/node-exporter-podsecuritypolicy.yaml @@ -0,0 +1,55 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + annotations: +{{- if .Values.nodeExporter.podSecurityPolicy.annotations }} +{{ toYaml .Values.nodeExporter.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'hostPath' + - 'secret' + allowedHostPaths: + - pathPrefix: /proc + readOnly: true + - pathPrefix: /sys + readOnly: true + {{- range .Values.nodeExporter.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: {{ .Values.nodeExporter.hostNetwork }} + hostPID: {{ .Values.nodeExporter.hostPID }} + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + hostPorts: + - min: 1 + max: 65535 +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/node-exporter-role.yaml b/chart/charts/prometheus/templates/node-exporter-role.yaml new file mode 100755 index 0000000..49a6874 --- /dev/null +++ b/chart/charts/prometheus/templates/node-exporter-role.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if or (default .Values.nodeExporter.podSecurityPolicy.enabled false) (.Values.podSecurityPolicy.enabled) }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "prometheus.nodeExporter.fullname" . }} +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/node-exporter-rolebinding.yaml b/chart/charts/prometheus/templates/node-exporter-rolebinding.yaml new file mode 100755 index 0000000..e56e5ff --- /dev/null +++ b/chart/charts/prometheus/templates/node-exporter-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ template "prometheus.nodeExporter.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/node-exporter-service.yaml b/chart/charts/prometheus/templates/node-exporter-service.yaml new file mode 100755 index 0000000..55c683b --- /dev/null +++ b/chart/charts/prometheus/templates/node-exporter-service.yaml @@ -0,0 +1,40 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.nodeExporter.service.annotations }} + annotations: +{{ toYaml .Values.nodeExporter.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{- if .Values.nodeExporter.service.labels }} +{{ toYaml .Values.nodeExporter.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +spec: +{{- if .Values.nodeExporter.service.clusterIP }} + clusterIP: {{ .Values.nodeExporter.service.clusterIP }} +{{- end }} +{{- if .Values.nodeExporter.service.externalIPs }} + externalIPs: +{{ toYaml .Values.nodeExporter.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.nodeExporter.service.loadBalancerIP }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.nodeExporter.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: metrics + port: {{ .Values.nodeExporter.service.servicePort }} + protocol: TCP + targetPort: 9100 + selector: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 4 }} + type: "{{ .Values.nodeExporter.service.type }}" +{{- end -}} diff --git a/chart/charts/prometheus/templates/node-exporter-serviceaccount.yaml b/chart/charts/prometheus/templates/node-exporter-serviceaccount.yaml new file mode 100755 index 0000000..a922b23 --- /dev/null +++ b/chart/charts/prometheus/templates/node-exporter-serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if and .Values.nodeExporter.enabled .Values.serviceAccounts.nodeExporter.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{- end -}} diff --git a/chart/charts/prometheus/templates/pushgateway-clusterrole.yaml b/chart/charts/prometheus/templates/pushgateway-clusterrole.yaml new file mode 100755 index 0000000..f4393c9 --- /dev/null +++ b/chart/charts/prometheus/templates/pushgateway-clusterrole.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.pushgateway.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml b/chart/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml new file mode 100755 index 0000000..bcbaccb --- /dev/null +++ b/chart/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.pushgateway.fullname" . }} +{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-deployment.yaml b/chart/charts/prometheus/templates/pushgateway-deployment.yaml new file mode 100755 index 0000000..bddbc06 --- /dev/null +++ b/chart/charts/prometheus/templates/pushgateway-deployment.yaml @@ -0,0 +1,97 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +spec: + selector: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + replicas: {{ .Values.pushgateway.replicaCount }} + {{- if .Values.pushgateway.strategy }} + strategy: +{{ toYaml .Values.pushgateway.strategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.pushgateway.podAnnotations }} + annotations: +{{ toYaml .Values.pushgateway.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 8 }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{- if .Values.pushgateway.priorityClassName }} + priorityClassName: "{{ .Values.pushgateway.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }} + image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}" + imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.pushgateway.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + ports: + - containerPort: 9091 + livenessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/healthy + {{- else }} + path: /-/healthy + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + readinessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/ready + {{- else }} + path: /-/ready + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + resources: +{{ toYaml .Values.pushgateway.resources | indent 12 }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.pushgateway.persistentVolume.mountPath }}" + subPath: "{{ .Values.pushgateway.persistentVolume.subPath }}" + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.pushgateway.nodeSelector }} + nodeSelector: +{{ toYaml .Values.pushgateway.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.securityContext }} + securityContext: +{{ toYaml .Values.pushgateway.securityContext | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.tolerations }} + tolerations: +{{ toYaml .Values.pushgateway.tolerations | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.affinity }} + affinity: +{{ toYaml .Values.pushgateway.affinity | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumes: + - name: storage-volume + persistentVolumeClaim: + claimName: {{ if .Values.pushgateway.persistentVolume.existingClaim }}{{ .Values.pushgateway.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.pushgateway.fullname" . }}{{- end }} + {{- end -}} +{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-ingress.yaml b/chart/charts/prometheus/templates/pushgateway-ingress.yaml new file mode 100755 index 0000000..42315a9 --- /dev/null +++ b/chart/charts/prometheus/templates/pushgateway-ingress.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.pushgateway.fullname" . }} +{{- $servicePort := .Values.pushgateway.service.servicePort -}} +{{- $extraPaths := .Values.pushgateway.ingress.extraPaths -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: +{{- if .Values.pushgateway.ingress.annotations }} + annotations: +{{ toYaml .Values.pushgateway.ingress.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +spec: + rules: + {{- range .Values.pushgateway.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: /{{ rest $url | join "/" }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- if .Values.pushgateway.ingress.tls }} + tls: +{{ toYaml .Values.pushgateway.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/chart/charts/prometheus/templates/pushgateway-networkpolicy.yaml b/chart/charts/prometheus/templates/pushgateway-networkpolicy.yaml new file mode 100755 index 0000000..e8f6ab8 --- /dev/null +++ b/chart/charts/prometheus/templates/pushgateway-networkpolicy.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.pushgateway.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9091 +{{- end -}} diff --git a/chart/charts/prometheus/templates/pushgateway-pdb.yaml b/chart/charts/prometheus/templates/pushgateway-pdb.yaml new file mode 100755 index 0000000..e9910a5 --- /dev/null +++ b/chart/charts/prometheus/templates/pushgateway-pdb.yaml @@ -0,0 +1,13 @@ +{{- if .Values.pushgateway.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.pushgateway.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.pushgateway.labels" . | nindent 6 }} +{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-podsecuritypolicy.yaml b/chart/charts/prometheus/templates/pushgateway-podsecuritypolicy.yaml new file mode 100755 index 0000000..dd3829d --- /dev/null +++ b/chart/charts/prometheus/templates/pushgateway-podsecuritypolicy.yaml @@ -0,0 +1,44 @@ +{{- if .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + annotations: +{{- if .Values.pushgateway.podSecurityPolicy.annotations }} +{{ toYaml .Values.pushgateway.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'persistentVolumeClaim' + - 'secret' + allowedHostPaths: + - pathPrefix: {{ .Values.pushgateway.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-pvc.yaml b/chart/charts/prometheus/templates/pushgateway-pvc.yaml new file mode 100755 index 0000000..ba16a37 --- /dev/null +++ b/chart/charts/prometheus/templates/pushgateway-pvc.yaml @@ -0,0 +1,30 @@ +{{- if .Values.pushgateway.persistentVolume.enabled -}} +{{- if not .Values.pushgateway.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.pushgateway.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.pushgateway.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +spec: + accessModes: +{{ toYaml .Values.pushgateway.persistentVolume.accessModes | indent 4 }} +{{- if .Values.pushgateway.persistentVolume.storageClass }} +{{- if (eq "-" .Values.pushgateway.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.pushgateway.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.pushgateway.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.pushgateway.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.pushgateway.persistentVolume.size }}" +{{- end -}} +{{- end -}} diff --git a/chart/charts/prometheus/templates/pushgateway-service.yaml b/chart/charts/prometheus/templates/pushgateway-service.yaml new file mode 100755 index 0000000..e84771d --- /dev/null +++ b/chart/charts/prometheus/templates/pushgateway-service.yaml @@ -0,0 +1,40 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.pushgateway.service.annotations }} + annotations: +{{ toYaml .Values.pushgateway.service.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +{{- if .Values.pushgateway.service.labels }} +{{ toYaml .Values.pushgateway.service.labels | indent 4}} +{{- end }} + name: {{ template "prometheus.pushgateway.fullname" . }} +spec: +{{- if .Values.pushgateway.service.clusterIP }} + clusterIP: {{ .Values.pushgateway.service.clusterIP }} +{{- end }} +{{- if .Values.pushgateway.service.externalIPs }} + externalIPs: +{{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.pushgateway.service.servicePort }} + protocol: TCP + targetPort: 9091 + selector: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 4 }} + type: "{{ .Values.pushgateway.service.type }}" +{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-serviceaccount.yaml b/chart/charts/prometheus/templates/pushgateway-serviceaccount.yaml new file mode 100755 index 0000000..1596a28 --- /dev/null +++ b/chart/charts/prometheus/templates/pushgateway-serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if and .Values.pushgateway.enabled .Values.serviceAccounts.pushgateway.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{- end -}} diff --git a/chart/charts/prometheus/templates/server-clusterrole.yaml b/chart/charts/prometheus/templates/server-clusterrole.yaml new file mode 100755 index 0000000..c0c0585 --- /dev/null +++ b/chart/charts/prometheus/templates/server-clusterrole.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.server.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.server.fullname" . }} +{{- end }} + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - nodes/metrics + - services + - endpoints + - pods + - ingresses + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + resources: + - ingresses/status + - ingresses + verbs: + - get + - list + - watch + - nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end }} diff --git a/chart/charts/prometheus/templates/server-clusterrolebinding.yaml b/chart/charts/prometheus/templates/server-clusterrolebinding.yaml new file mode 100755 index 0000000..1196ce3 --- /dev/null +++ b/chart/charts/prometheus/templates/server-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.server.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.server.fullname" . }} +{{- end }} diff --git a/chart/charts/prometheus/templates/server-configmap.yaml b/chart/charts/prometheus/templates/server-configmap.yaml new file mode 100755 index 0000000..2e8c4a7 --- /dev/null +++ b/chart/charts/prometheus/templates/server-configmap.yaml @@ -0,0 +1,73 @@ +{{- if .Values.server.enabled -}} +{{- if (empty .Values.server.configMapOverrideName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.serverFiles }} + {{ $key }}: | +{{- if eq $key "prometheus.yml" }} + global: +{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} +{{- if eq $key "alerts" }} +{{- if and (not (empty $value)) (empty $value.groups) }} + groups: +{{- range $ruleKey, $ruleValue := $value }} + - name: {{ $ruleKey -}}.rules + rules: +{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} +{{- else }} +{{ toYaml $value | indent 4 }} +{{- end }} +{{- else }} +{{ toYaml $value | default "{}" | indent 4 }} +{{- end }} +{{- if eq $key "prometheus.yml" -}} +{{- if $root.Values.extraScrapeConfigs }} +{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} +{{- end -}} +{{- if $root.Values.alertmanager.enabled }} + alerting: +{{- if $root.Values.alertRelabelConfigs }} +{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} + alertmanagers: +{{- if $root.Values.server.alertmanagers }} +{{ toYaml $root.Values.server.alertmanagers | indent 8 }} +{{- else }} + - kubernetes_sd_configs: + - role: pod + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if $root.Values.alertmanager.prefixURL }} + path_prefix: {{ $root.Values.alertmanager.prefixURL }} + {{- end }} + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + regex: {{ $root.Release.Namespace }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app] + regex: {{ template "prometheus.name" $root }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_component] + regex: alertmanager + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe] + regex: {{ index $root.Values.alertmanager.podAnnotations "prometheus.io/probe" | default ".*" }} + action: keep + - source_labels: [__meta_kubernetes_pod_container_port_number] + regex: + action: drop +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/chart/charts/prometheus/templates/server-deployment.yaml b/chart/charts/prometheus/templates/server-deployment.yaml new file mode 100755 index 0000000..5440461 --- /dev/null +++ b/chart/charts/prometheus/templates/server-deployment.yaml @@ -0,0 +1,212 @@ +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.server.deploymentAnnotations }} + annotations: +{{ toYaml .Values.server.deploymentAnnotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +spec: + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + {{- if .Values.server.strategy }} + strategy: +{{ toYaml .Values.server.strategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: +{{ toYaml .Values.server.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.name }} + image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + ports: + - containerPort: 9090 + readinessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- toYaml .Values.server.sidecarContainers | nindent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + - name: storage-volume + {{- if .Values.server.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- range .Values.configmapReload.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/chart/charts/prometheus/templates/server-ingress.yaml b/chart/charts/prometheus/templates/server-ingress.yaml new file mode 100755 index 0000000..0a3cb69 --- /dev/null +++ b/chart/charts/prometheus/templates/server-ingress.yaml @@ -0,0 +1,40 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.ingress.enabled -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.server.fullname" . }} +{{- $servicePort := .Values.server.service.servicePort -}} +{{- $extraPaths := .Values.server.ingress.extraPaths -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: +{{- if .Values.server.ingress.annotations }} + annotations: +{{ toYaml .Values.server.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- range $key, $value := .Values.server.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +spec: + rules: + {{- range .Values.server.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: /{{ rest $url | join "/" }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- if .Values.server.ingress.tls }} + tls: +{{ toYaml .Values.server.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/chart/charts/prometheus/templates/server-networkpolicy.yaml b/chart/charts/prometheus/templates/server-networkpolicy.yaml new file mode 100755 index 0000000..9e10129 --- /dev/null +++ b/chart/charts/prometheus/templates/server-networkpolicy.yaml @@ -0,0 +1,17 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + ingress: + - ports: + - port: 9090 +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/server-pdb.yaml b/chart/charts/prometheus/templates/server-pdb.yaml new file mode 100755 index 0000000..b2447fd --- /dev/null +++ b/chart/charts/prometheus/templates/server-pdb.yaml @@ -0,0 +1,13 @@ +{{- if .Values.server.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.server.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.server.labels" . | nindent 6 }} +{{- end }} diff --git a/chart/charts/prometheus/templates/server-podsecuritypolicy.yaml b/chart/charts/prometheus/templates/server-podsecuritypolicy.yaml new file mode 100755 index 0000000..a0e15a3 --- /dev/null +++ b/chart/charts/prometheus/templates/server-podsecuritypolicy.yaml @@ -0,0 +1,53 @@ +{{- if .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + annotations: +{{- if .Values.server.podSecurityPolicy.annotations }} +{{ toYaml .Values.server.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + allowedCapabilities: + - 'CHOWN' + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + - 'hostPath' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.server.persistentVolume.mountPath }} + {{- range .Values.server.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/server-pvc.yaml b/chart/charts/prometheus/templates/server-pvc.yaml new file mode 100755 index 0000000..9d1cb37 --- /dev/null +++ b/chart/charts/prometheus/templates/server-pvc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +{{- if .Values.server.persistentVolume.enabled -}} +{{- if not .Values.server.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} +{{- if .Values.server.persistentVolume.storageClass }} +{{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.server.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.server.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/chart/charts/prometheus/templates/server-service-headless.yaml b/chart/charts/prometheus/templates/server-service-headless.yaml new file mode 100755 index 0000000..3edc58c --- /dev/null +++ b/chart/charts/prometheus/templates/server-service-headless.yaml @@ -0,0 +1,26 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.statefulSet.headless.labels }} +{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }}-headless +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.server.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9090 + selector: + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} diff --git a/chart/charts/prometheus/templates/server-service.yaml b/chart/charts/prometheus/templates/server-service.yaml new file mode 100755 index 0000000..a0c88ee --- /dev/null +++ b/chart/charts/prometheus/templates/server-service.yaml @@ -0,0 +1,50 @@ +{{- if .Values.server.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.service.annotations }} + annotations: +{{ toYaml .Values.server.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.service.labels }} +{{ toYaml .Values.server.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +spec: +{{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} +{{- end }} +{{- if .Values.server.service.externalIPs }} + externalIPs: +{{ toYaml .Values.server.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.server.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} +{{- end }} +{{- if .Values.server.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.server.service.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.service.nodePort }} + nodePort: {{ .Values.server.service.nodePort }} + {{- end }} + selector: + {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} + statefulset.kubernetes.io/pod-name: {{ .Release.Name }}-{{ .Values.server.name }}-{{ .Values.server.service.statefulsetReplica.replica }} + {{- else -}} + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- if .Values.server.service.sessionAffinity }} + sessionAffinity: {{ .Values.server.service.sessionAffinity }} +{{- end }} + {{- end }} + type: "{{ .Values.server.service.type }}" +{{- end -}} diff --git a/chart/charts/prometheus/templates/server-serviceaccount.yaml b/chart/charts/prometheus/templates/server-serviceaccount.yaml new file mode 100755 index 0000000..68c6412 --- /dev/null +++ b/chart/charts/prometheus/templates/server-serviceaccount.yaml @@ -0,0 +1,10 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.serviceAccounts.server.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.server" . }} +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/server-statefulset.yaml b/chart/charts/prometheus/templates/server-statefulset.yaml new file mode 100755 index 0000000..4569fef --- /dev/null +++ b/chart/charts/prometheus/templates/server-statefulset.yaml @@ -0,0 +1,220 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.server.statefulSet.annotations }} + annotations: +{{ toYaml .Values.server.statefulSet.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- if .Values.server.statefulSet.labels}} + {{ toYaml .Values.server.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.server.fullname" . }} +spec: + serviceName: {{ template "prometheus.server.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: +{{ toYaml .Values.server.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.statefulSet.labels}} + {{ toYaml .Values.server.statefulSet.labels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} +{{- end }} +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.name }} + image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + ports: + - containerPort: 9090 + readinessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + livenessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- toYaml .Values.server.sidecarContainers | nindent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- range .Values.configmapReload.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} +{{- if .Values.server.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: {} +{{- end }} +{{- end }} +{{- end }} diff --git a/chart/charts/prometheus/templates/server-vpa.yaml b/chart/charts/prometheus/templates/server-vpa.yaml new file mode 100755 index 0000000..ef3604e --- /dev/null +++ b/chart/charts/prometheus/templates/server-vpa.yaml @@ -0,0 +1,24 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.verticalAutoscaler.enabled -}} +apiVersion: autoscaling.k8s.io/v1beta2 +kind: VerticalPodAutoscaler +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }}-vpa +spec: + targetRef: +{{- if .Values.server.statefulSet.enabled }} + apiVersion: "apps/v1" + kind: StatefulSet +{{- else }} + apiVersion: "extensions/v1beta1" + kind: Deployment +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + updatePolicy: + updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} + resourcePolicy: + containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} +{{- end -}} {{/* if .Values.server.verticalAutoscaler.enabled */}} +{{- end -}} {{/* .Values.server.enabled */}} diff --git a/chart/charts/prometheus/values.yaml b/chart/charts/prometheus/values.yaml new file mode 100755 index 0000000..dfe4a13 --- /dev/null +++ b/chart/charts/prometheus/values.yaml @@ -0,0 +1,1468 @@ +rbac: + create: true + +podSecurityPolicy: + enabled: false + +imagePullSecrets: +# - name: "image-pull-secret" + +## Define serviceAccount names for components. Defaults to component's fully qualified name. +## +serviceAccounts: + alertmanager: + create: true + name: + kubeStateMetrics: + create: true + name: + nodeExporter: + create: true + name: + pushgateway: + create: true + name: + server: + create: true + name: + +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: true + + ## alertmanager container name + ## + name: alertmanager + + ## alertmanager container image + ## + image: + repository: prom/alertmanager + tag: v0.20.0 + pullPolicy: IfNotPresent + + ## alertmanager priorityClassName + ## + priorityClassName: "" + + ## Additional alertmanager container arguments + ## + extraArgs: {} + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access alertmanager + baseURL: "http://localhost:9093" + + ## Additional alertmanager container environment variable + ## For instance to add a http_proxy + ## + extraEnv: {} + + ## Additional alertmanager Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: alertmanager-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config + ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configFromSecret: "" + + ## The configuration file name to be loaded to alertmanager + ## Must match the key within configuration loaded from ConfigMap/Secret + ## + configFileName: alertmanager.yml + + ingress: + ## If true, alertmanager Ingress will be created + ## + enabled: false + + ## alertmanager Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## alertmanager Ingress additional labels + ## + extraLabels: {} + + ## alertmanager Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - alertmanager.domain.com + # - domain.com/alertmanager + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## alertmanager Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - alertmanager.domain.com + + ## Alertmanager Deployment Strategy type + # strategy: + # type: Recreate + + ## Node tolerations for alertmanager scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for alertmanager pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, alertmanager will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## alertmanager data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## alertmanager data Persistent Volume Claim annotations + ## + annotations: {} + + ## alertmanager data Persistent Volume existing claim name + ## Requires alertmanager.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## alertmanager data Persistent Volume mount root path + ## + mountPath: /data + + ## alertmanager data Persistent Volume size + ## + size: 2Gi + + ## alertmanager data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## alertmanager data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of alertmanager data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Annotations to be added to alertmanager pods + ## + podAnnotations: {} + ## Tell prometheus to use a specific set of alertmanager pods + ## instead of all alertmanager pods found in the same namespace + ## Useful if you deploy multiple releases within the same namespace + ## + ## prometheus.io/probe: alertmanager-teamA + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + # enableMeshPeer : true + + servicePort: 80 + + ## alertmanager resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + ## Security context to be added to alertmanager pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + # enableMeshPeer : true + + ## List of IP addresses at which the alertmanager service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + # nodePort: 30000 + sessionAffinity: None + type: ClusterIP + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/jimmidyson/configmap-reload +## +configmapReload: + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.3.0 + pullPolicy: IfNotPresent + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +kubeStateMetrics: + ## If false, kube-state-metrics will not be installed + ## + enabled: true + + ## kube-state-metrics container name + ## + name: kube-state-metrics + + ## kube-state-metrics container image + ## + image: + repository: quay.io/coreos/kube-state-metrics + tag: v1.9.0 + pullPolicy: IfNotPresent + + ## kube-state-metrics priorityClassName + ## + priorityClassName: "" + + ## kube-state-metrics container arguments + ## + args: {} + + ## Node tolerations for kube-state-metrics scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for kube-state-metrics pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to kube-state-metrics pods + ## + podAnnotations: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + pod: + labels: {} + + replicaCount: 1 + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## kube-state-metrics resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 16Mi + # requests: + # cpu: 10m + # memory: 16Mi + + ## Security context to be added to kube-state-metrics pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} + + # Exposed as a headless service: + # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services + clusterIP: None + + ## List of IP addresses at which the kube-state-metrics service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + +nodeExporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + ## If true, node-exporter pods share the host network namespace + ## + hostNetwork: true + + ## If true, node-exporter pods share the host PID namespace + ## + hostPID: true + + ## node-exporter container name + ## + name: node-exporter + + ## node-exporter container image + ## + image: + repository: prom/node-exporter + tag: v0.18.1 + pullPolicy: IfNotPresent + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## node-exporter priorityClassName + ## + priorityClassName: "" + + ## Custom Update Strategy + ## + updateStrategy: + type: RollingUpdate + + ## Additional node-exporter container arguments + ## + extraArgs: {} + + ## Additional node-exporter hostPath mounts + ## + extraHostPathMounts: [] + # - name: textfile-dir + # mountPath: /srv/txt_collector + # hostPath: /var/lib/node-exporter + # readOnly: true + # mountPropagation: HostToContainer + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # configMap: certs-configmap + # readOnly: true + + ## Node tolerations for node-exporter scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for node-exporter pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to node-exporter pods + ## + podAnnotations: {} + + ## Labels to be added to node-exporter pods + ## + pod: + labels: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## node-exporter resource limits & requests + ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + + ## Security context to be added to node-exporter pods + ## + securityContext: {} + # runAsUser: 0 + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} + + # Exposed as a headless service: + # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services + clusterIP: None + + ## List of IP addresses at which the node-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + hostPort: 9100 + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9100 + type: ClusterIP + +server: + ## Prometheus server container name + ## + enabled: true + name: server + sidecarContainers: + + ## Prometheus server container image + ## + image: + repository: prom/prometheus + tag: v2.15.2 + pullPolicy: IfNotPresent + + ## prometheus server priorityClassName + ## + priorityClassName: "" + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access alertmanager + ## Maybe same with Ingress host name + baseURL: "" + + ## Additional server container environment variables + ## + ## You specify this manually like you would a raw deployment manifest. + ## This means you can bind in environment variables from secrets. + ## + ## e.g. static environment variable: + ## - name: DEMO_GREETING + ## value: "Hello from the environment" + ## + ## e.g. secret environment variable: + ## - name: USERNAME + ## valueFrom: + ## secretKeyRef: + ## name: mysecret + ## key: username + env: [] + + extraFlags: + - web.enable-lifecycle + ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as + ## deleting time series. This is disabled by default. + # - web.enable-admin-api + ## + ## storage.tsdb.no-lockfile flag controls BD locking + # - storage.tsdb.no-lockfile + ## + ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) + # - storage.tsdb.wal-compression + + ## Path to a configuration file on prometheus server container FS + configPath: /etc/config/prometheus.yml + + global: + ## How frequently to scrape targets by default + ## + scrape_interval: 1m + ## How long until a scrape request times out + ## + scrape_timeout: 10s + ## How frequently to evaluate rules + ## + evaluation_interval: 1m + + ## Additional Prometheus server container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional Prometheus server Volume mounts + ## + extraVolumeMounts: [] + + ## Additional Prometheus server Volumes + ## + extraVolumes: [] + + ## Additional Prometheus server hostPath mounts + ## + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # subPath: "" + # hostPath: /etc/kubernetes/certs + # readOnly: true + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # subPath: "" + # configMap: certs-configmap + # readOnly: true + + ## Additional Prometheus server Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: prom-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress additional labels + ## + extraLabels: {} + + ## Prometheus server Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + # - domain.com/prometheus + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Server Deployment Strategy type + # strategy: + # type: Recreate + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Prometheus server data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + emptyDir: + sizeLimit: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + ## Labels to be added to Prometheus server pods + ## + podLabels: {} + + ## Prometheus AlertManager configuration + ## + alertmanagers: [] + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + servicePort: 80 + + ## Prometheus server readiness and liveness probe initial delay and timeout + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## + readinessProbeInitialDelay: 30 + readinessProbeTimeout: 30 + readinessProbeFailureThreshold: 3 + readinessProbeSuccessThreshold: 1 + livenessProbeInitialDelay: 30 + livenessProbeTimeout: 30 + livenessProbeFailureThreshold: 3 + livenessProbeSuccessThreshold: 1 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-server' + + ## Security context to be added to server pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + sessionAffinity: None + type: ClusterIP + + ## If using a statefulSet (statefulSet.enabled=true), configure the + ## service to connect to a specific replica to have a consistent view + ## of the data. + statefulsetReplica: + enabled: false + replica: 0 + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + + ## Prometheus data retention period (default if not specified is 15 days) + ## + retention: "15d" + +pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: true + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## pushgateway container name + ## + name: pushgateway + + ## pushgateway container image + ## + image: + repository: prom/pushgateway + tag: v1.0.1 + pullPolicy: IfNotPresent + + ## pushgateway priorityClassName + ## + priorityClassName: "" + + ## Additional pushgateway container arguments + ## + ## for example: persistence.file: /data/pushgateway.data + extraArgs: {} + + ingress: + ## If true, pushgateway Ingress will be created + ## + enabled: false + + ## pushgateway Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## pushgateway Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - pushgateway.domain.com + # - domain.com/pushgateway + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## pushgateway Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - pushgateway.domain.com + + ## Node tolerations for pushgateway scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for pushgateway pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to pushgateway pods + ## + podAnnotations: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + replicaCount: 1 + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## pushgateway resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + ## Security context to be added to push-gateway pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + + service: + annotations: + prometheus.io/probe: pushgateway + labels: {} + clusterIP: "" + + ## List of IP addresses at which the pushgateway service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9091 + type: ClusterIP + + ## pushgateway Deployment Strategy type + # strategy: + # type: Recreate + + persistentVolume: + ## If true, pushgateway will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: false + + ## pushgateway data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## pushgateway data Persistent Volume Claim annotations + ## + annotations: {} + + ## pushgateway data Persistent Volume existing claim name + ## Requires pushgateway.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## pushgateway data Persistent Volume mount root path + ## + mountPath: /data + + ## pushgateway data Persistent Volume size + ## + size: 2Gi + + ## pushgateway data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## pushgateway data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of pushgateway data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + +## alertmanager ConfigMap entries +## +alertmanagerFiles: + alertmanager.yml: + global: {} + # slack_api_url: '' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + +## Prometheus server ConfigMap entries +## +serverFiles: + + ## Alerts configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + alerting_rules.yml: {} + # groups: + # - name: Instances + # rules: + # - alert: InstanceDown + # expr: up == 0 + # for: 5m + # labels: + # severity: page + # annotations: + # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' + # summary: 'Instance {{ $labels.instance }} down' + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml + alerts: {} + + ## Records configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ + recording_rules.yml: {} + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics + + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + # This configuration will work only on kubelet 1.7.3+ + # As the scrape endpoints for cAdvisor have changed + # if you are using older version you need to change the replacement to + # replacement: /api/v1/nodes/$1:4194/proxy/metrics + # more info here https://github.com/coreos/prometheus-operator/issues/633 + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints' + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: kubernetes_node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + +# adds additional scrape configs to prometheus.yml +# must be a string so you have to add a | after extraScrapeConfigs: +# example adds prometheus-blackbox-exporter scrape config +extraScrapeConfigs: + # - job_name: 'prometheus-blackbox-exporter' + # metrics_path: /probe + # params: + # module: [http_2xx] + # static_configs: + # - targets: + # - https://example.com + # relabel_configs: + # - source_labels: [__address__] + # target_label: __param_target + # - source_labels: [__param_target] + # target_label: instance + # - target_label: __address__ + # replacement: prometheus-blackbox-exporter:9115 + +# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager +# useful in H/A prometheus with different external labels but the same alerts +alertRelabelConfigs: + # alert_relabel_configs: + # - source_labels: [dc] + # regex: (.+)\d+ + # target_label: dc + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false diff --git a/chart/charts/redis-10.3.4.tgz b/chart/charts/redis-10.3.4.tgz deleted file mode 100644 index 2b6741caec685fbee65ecb44f9536281b8d2148d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 29578 zcmV)hK%>7OiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0POvHciT3$I1JC<`V{*o=bYFbQ<9xqt2vqX=Q?TYw~6D&cH8Hy zv@3>4NWz#RSOB!6iSyl`g&PT8MOm_(X3Uw@SR}3+8~ct8h6BR;hf~BvZ;oc+-|pZU z3}@_cXq*kdo0dYVOAb~mnT(HC!3B$lKyTS}OiF_>E;^3nQ zd>RQvB3X_^6TS;m#HX0=LP)sayTGyFfT936WdaAXMJ#x)gQE$F@DL_c#9?PjXLtxx zA>v`buW-FbXum^zIUD(!5BvQQ5fPe^o=<0e#AP#mE|7o8b&WomP4r)$dhPA+4?gZc zdD@GkN#_#JuPF=ou;YO$)A8gS6gWDbn(PR1+?gSw=7X6kif4#~Lpw>#`2TeV(E*5% z9QXJ7ol_nMjQIaIz+5&rLrjpUH)d&t+=q^Sr$?n1P4#W3Jt1P6jMS`6=k{MOp#CLh zp00Xj5n^x9d(wN_xo?zxc-Hu63|HvN?}`C5)BmSWp6%}!>Ho8*g9rM*iDzpIehldd zh44!4c_1@1!4M;cW>{dxJ6l_D5@N*hE<{vd5K|;TObLf;5{5vpFk>W;?{TT}E|AC% zlYm4M2yl#}07pKNRNSZ0m`oDG7!t1QPxMeU_1xNm!-O#$2|!nfglH6|C29sc)NfAl zgmA&;ia)3~CrKEdVV_|kC)Clc3_Du@`5Hr*&F$aEXoj;AP7Asofaetc8PE0Cw3g(B zXo3$z#JOzt%=z5`O_qo7>2pU;)KGdjm2b_o4BeHV86IcsN)l~NpcU$&Ia7^L^){xQ2rA#_TIc}Oj7Xq~{+Jt2OFpv2lQ>qq+LJ@{P!xC!^BDVr zTwzmmOhW91B)Zh2=z#3gh$3EqM+8OQcOH8-(Bn9Zu}{WxK-n6d(2)3Z{rMFhqa+ku zqS)hu@EXneV<6*fHd0J^93p|ac)SZZd)FHTC>7L#xc?*T%9Jrlg1c)&- z2RbexWY0iD|AJY<1&lDvP?V@Pe1?(00gy=TGAS4E82PwYv6>YV6Nq2(e!645md-%7{b1){R=mpg2VKdT^dn-+*vM7G;4M zQvE8KShlMKYvN-iCd;-p8UT&5E>r=yCSnS5Z>pUl+e$l^t!2HFhzlw6^NuZ$o7EVF z9BZ69#eXJ*N!rZ06p#TWVk+zSNTN2-DBlke9Ealyd`yLQL1W_czOB&#$u&+)^C?72 zl?^T0nNQN)lK_Yp8T2_){x+*PwlXp zi16CU@Hz=`I>82Fd|PsSdxCuTBsraTi7YQ7%wUMj85UBc(Ldxah=?T7hg0ldo|74- zi5SAO4#1xYWywquH-La%M+akp*;^8ZgkzsZ0UyG1DK2*a#)yQH?xywkzSSS3co&8^ zJex;8AHtq|MVX|S&!4>bPH*IcARu~;%;vG6(bQ&4S)SZPG%&olncC=P>}J=$W+)!& zs;?E{1Iki5%mos}&-yuf#Z~7R`BO;+t$8M4=V-@aIfa^I_R@~$gwZQ0JELe0D5J|Lil{)E zfLu?BKZO~ZORmVYsA%O{A4T$Q!j);jHmc--B5+)INNGXJc}(_2qj0`v_}I3+kfOjF znIX7GkPP?xQepu-Ns47)-?(4RENe z`*9r3d+^&7M^=1ORd&^KNX><7O+(Jpr;K6WsJnhha3nOa9L{AX0!Wc9tC~psv=TB` z-T)kt$QUMQWCkEf=dLNHT9l2%Lh`~$ioA-AF*;LNs%egHVY@g)2rw5U(t?ESe5aSG zwI~ef_3MusH4MQv5{dj$?v5(prve}6J^24i=}-bp&tH;(^t;yPa^Dmjx2X|Hv%!#P z!Z7E@XK#`4r*B-uOFx~^p!l_8F#N#xl^$A;FK4-e^$4SX<^+jzBjieHS%S9El(qo& zQkf>A8X$7=MrkG7EGg0r5uQp&0VG^$?^6tAP2IRgIvqAbe(mf|zdtz4m`~feN&Zd4 zjDwseAy#;*iz!Fgm`X&@n_Ct(8idh=KGMg~?}WqU-$v^43RPC?^N^@qKI=T8wszA7KaQxqjL z%%r-a&0i$H^PDl*?N_ozx3>LiV0&VUJ?-!H>^wXgd1J!3@K_Q_?iQyL?{+Pm@JGVc zYH_8E!Zw*ilwql0;44Ct3@kgHt+ce(T!+eqVP|rQ=MpMP?huUND;#Lbd}>T$vWm9v z<(siB{EX+Calh4M(CGMx!0fxcDizSBty_}EZ;$$Ty;qc_6PmH*{-m$O7{lE_7HiNRq`%uMY%wn|7MoYmXaO8iGOi#547 z<^WZ82;DwQq8^`imoi3}ox2`Wgzm@@w{sI2hiJ0w#bT+xhFIWmo*8jQvX&%$ z(L~!=E4b|u7(Fcy!qwqr&r%+ ze;>Sg({T~S>>vkUB)8VuiXse-GSkj_lWI})N(y)Qx^Gk{PYTo?9sLXYg7JgVeI&A@ogHbJ`zIMgd`pip;Iq%XBTM?iB=0`y>$r_{!B7t%q9eDjH!p`Bhsv z43kg8J9>upk51G`y4?)X1>$U?T}|sqG{uZau0^@r1Ad5@ z4nc7zE61+vh}KlDhTlr&aOc}f;@HYrd6Tg*m|-!+35VAo)qxBKeRzunBfj=y>pv?F)~tHm1e^Xe8%L8N?Dd}Xo;im)_Md37 z^Kc%b-FXPT2h8Guh3^KOlv@Qs$>EqvnocztMb?W0tWa;X6n?-me?ItA>|c79c&?7G z{*33lv-vwQ#q4K1Hvt^BO4fY|77GLkS8&j{m+lV<4em1(OFA!EZnvUG#t>l~n4r0>?8^{ly2X@P$;QGF@-Nf4*NkvjCX+;=)P+oJK-#L(%db!FcW4i)wQyk9v{uD96`+{NI z*8wh)R*phk1*0{2+?ke7Sds+WKCnF4ZJ_x%rxbE2MB^s4n^d5qw z3|{u4X^Sl*S{F=rHKw^=cq#_I$=PTm1{uNzC=F+|N1u*Pc5MqP*7=%n3?Y^(z$iq~ zWrcyqntc{ni;P+IC?dj1oZ_DTm!Gv9e1h4GaITv=oT6x=!#e48gjs4wHPLoPbXIb6 zOoKVD$KJK2wP*m+uG`6raQRtTz+)$?uXjyj_$dI;uuGPN274=slEa9D|M`5 zuB~A3$_BN)`rg%CcU3p8{)|F~Q82e?D_51v-PzSq$!9vSTJEw#yJqseHJrwnjVYVi z0BlXxBxhHeXCTqQ;2JThpYffJZtuu$ozkXCjjzhI z**>L3wyO$zs&Yp?DPs*p!>q9|uRFYUA@Wlaa2_9c*P z7^MkBRT)v@Vpk2PUCP61c_`$*Gx{v|n4B@@v<&YbzdL_@))ODa8vAI@|FpNi|E!q* z>8bqukpF1|&o|$g>`oG|V*U>t?1l_CSGkt7Qoph48K_a;h4ASI8a=WE@l)Cx6 zM_uT_=g&Ra|EW%=)H_aX8lOM+_?6#t2A4aYEXOmZOnD>qLEuzm5XaA-g69R;rvcu{&H0?#WAc&>sM%#~O)bJ+4^EQ^ zkaMs&WkV7T5{`9lVVy`o!^2LSCDEbX8iZ{BQXE(6LEEo5HVUc8vc@6KNj;Yg@ZY=JH@AQ4EYsZI`<=)E$8$2qa=%5S+s*Nb8PIERyGuTOtHI;=p;)_t~MLS9zj z(>9D49oYGm-*3CW>U5rAoi6m>W$^Cpzff`Ydj;U;>a7aSY6hlq z#HTYl{tEp6OrAuKrhfiuV5xQyj*a@t%m+b`?ak{r}0}>0nT_|382JaR1Ln zo=>0p-@p}_4OQ&tn1ooFsAm!^{uB@4o4(qx{ck$2KgKp2xSPvOxd?h4Q_eF%zIHQf zQsrgeRNzuiUpS?LYEzNa$)pZ9AXpOl@cfDTlg!SN@tAysu9r0?DTt>@NPcwMX%UAB z&t-pyZRNur&7DF@WZkJ7yegA*7&8)yF?9cnd;cY=k~<0gx4Ap1W{8J6l^sgKpp(u2 znV?XGZvl#9HPT+^H>?4m$_ohwITj8h?4yKZ2;3Ac_5{nP6J3M+Y@O>y8@xIwaCGn(bj-3V^v<+{#{I-pO&dD&#%%I8!+2R&qQ#8*A?E(gQ3aRIWAM?=l;q?r)8Kr&uIRn?O+z3>Kd=b#vk zNJs?1yeHAvw9@;Lh~genK!R?T{IGwuH^Kt#H32mpD2XP$%WqYvz|~$e$QFS~MS&?G zm0|-b09A@L=-DO~fokvRZDFKWtr;#E99M&ei!`@}M@!^c6(a30rV+#1KuDjC&fVT1 z_i!C~Gg5U*{=3`djkk4 z=FwweI|Cwifm67~DrQ<8W}d|%HVHgcEVrlDF{BgX_c{k5r^;vTBiW2{3IHe3BvdM# zWt}1V_Z+Ysw2oUNTm>PZ$Q(h^PKKe1a9smk5r3Wbr4c=`xQ@+AdyQ)-m;r1%jc-UtT z`|L~FXW7Pr?KCN=Ib5K=)hb4&RO?-rE5FpHE7n}N=kgUEw%o&(`_i`@$sK4hr%}8( zJ7lt8GnNjewBD9AHJ93;^=&NNs&!Q#HtoZvy}wOstaL>)r;V{ixjQTB_GJE2jpI5a zSiV@#+;x#nGPPW77kng+7ig!hH8uKb;HB+$%^%%Xjnu;IN0W*wjY`vjVzrh*H;$|V zEcwLy73Q+;qM|iswr(>Z7b8b$qOGnISO5p-W`P_yBqgeuXZ35Rp(TJ;RjU$y`M|y< zj3fDrE9!tzZrGd;*w!i33j``(X~(67jV1GQtnMOf)vIW>x+S+HLs4)L##3~h%wTWO z9#ZC1og%)qNTbRuWb)1)w@2ihU7nq`TTTbg)vM+3Y1ihsU3JF~eExyYe@Vi*xl~>u zC!`=T6j#M&G@5dqL!sS!MP_}A&l<2y*kY3L8>64Bm z`BV|dNyb1~!7lEco2@Y~RF!ZkT-EnFttVc7)|C|(&Ntsi{e+fpt8VJcx6~3yi`TYD zSRU5->tAP=%!9gK_c@C1MSfkXjxJ@)(W!QUU<@G07V_I%jgQR1#ScbaBDQ&*`6SYTh^0JfbN zzLH5M9~zhq;PdC5U5HIM2vuq@k~~CG_w*1_#b#J83(?!n-i2{3+?!OqTf%=`kl#G!E<4LcXf9H5n+c{m7FTO)Qb)TV$mIjM zEKE5SK@@_lPA!|;`7(*+t>(T?TDimk)&%y=BkhuNT$WJY_xX$aev0RR(rksjIhw8M z1W@z&pTU!7FP<09|2*3tym&bOvxz4Q!XCoaUgwfT!O+~z@)pIN85Sr&0u4Km8DN@^ zBzDrItvab|99LG#RRDe3q_Utngp-z3w^*Txu?5*96ZXO>Q^*`?8J3P$$^v#mrhOWX zRcg)xaZPuEZBwPhWuBBiLV|;Qn!=$v_a288)q8LQBnqTd^0Z*O0;8^GgOD8sgjJkw zbxEMQaH1^CIE^)q+#DKhoM*RX z7-)oOnWtEQ%EIcamt`cHV_u5ePz@}Ev%dMH8O+6|+Cy2_Yz5@nqPKx@aTt|JSJJno zb?ElREY`W*MkETr+~%c|z11b{iuL(py*@TD+@wsetf0~A42yP|eROee&|6$|ict_^ z&TpUas{U|wc^-czlqIv$)B}1Qd1x%hJR@O9xW0|46(~vtWMc=it+#Gd#V*H)g!BqC z&s+d?yS{8I=cbWjFEL^$3~}i3dF0=6KDLcB)kFivF5bXtvbp9xTyAi|Ha36WH}@~g ze-^tviNtFX0T;-BdwcsO`S1DO{)7CtiKkNjGs4tuq`#Cr-nAImDB-n|6M2k%Ik@uu z2fqD#2*)(S<~B)lGol8B8dS=&hIKSul@N$cuuwOGntH8RXs#>ImZ|Qul2MlW%^Jyx zO!+G)TCP+h{O=p9AD*^P1^o}j)LW|=U;+Jqu|Fv0|9<}T=|leSjXdku|2>UDe;qSG zOrpt$^s>|sA@xz%`b)HC;U zn8utDshU$zz?xDn(gA=<=w=#xcY5x8i0$Q8I=L4a!+YpHvUBUg|9J`BlXu5IcHxhI zfSBUQ)WrUjx&`g)k3`r@W{F)#dmrz^GJXlm2><=x6(8TcJ9u?;{G&~jz}00?=>?id zAdUr)$lM-sD{N>bU@3&jQl>tj%Ic_c_WJnz==k-UO>cd~I8K+6_t5oaYIEw7xDTIx zc=yZks}DyfFZc9p)Ww16dzCQLeql*Og7nkrFULo(?6?2ZC-IG}d1DKi0g*9mr(L|< z+etqF9G$#;wD%84zYhLkK3TBZxYTWDEcqsMKF_^>q%WM3S#!mP^vLrqS)beQQEJ+_ zjOcZgY6gofF8{^#|GT?zbh4|)whNC2c80pw!Xyhe+5*S5L0RkBfNu}Z&R?G{-iL*9 zU&3}Q3~7m7nQ!2ycW397+tYUN?)3b_@h@+`e|>si?D=8M|7vS~Z@j!(LjQoGpnyfP zJog!i#fN~fm)%F(0b%L@YxmKhyVLF1wm;~Ev@f;Oq5CMS)UQznx^lU1ZgRCB{r?tL zPsi6#UrqLr^PyUlz03KQgyyBDTi(-4&Gx=E;Q#EWX#X+CM4Ve^SGNEyvj0DSQMCWO z7`%A?VE@^~^Qq!m5qngqe7BnYlD`EzEBN_y$2BNrcUt%KrP}IbK+(-$TDfJcNip=G zdW4bVpEb>!d$-*{i&k}W%=im;aX@w+eS+c9C7$oXBOQ-hkus?;Z|fGw8d5F5=g+D? z)6Nb2nNX4I%|OTcS8x=lTa}IfMEfpGpJ|=NOK5L3w>@RIoG)I&V%-M!#|#O7`o>w- z)(g9E!BRvzEBB}Z2=s5SHYfZYL@w6R|24XQ7t{aeFN*%(XM+d-??#@Qz314;*6tzB zw*Fs)de+mXo}Pi5Sx3ZHXfkQ?5pU?lj9uBa8{y%;8{7KROyZ7vY{%_W-;K0gFw6mrS+#$4S9rDwtt$q*mEO}IEsq67X| zzm){oAu%%&6@)Ym&_fyt{898wlP;m0p^xpWOeq&fCx_V>{$2Xmv~I z^6i(P<&+DPu1WhwgG_0xaAOrqrH8D)g~}ua?iC6}eXsa_QQxIvwVfMaJ1#i~tk}Qg z$;=$IQvkD9Qf%vsC?ECC4a9rjBaTmym_oPuU|x4e9W>W@MtPPeAlFk`rG00&=tWBk z?Yl5HV>Gv4izEkm|7R1X9OPV`?us>74VFip#UrXN+hRzXLaAZpFE1&lop;g>JZ~@p z=cU4uk+4u8!(OYeEUADj=wGLk&e@5~t6>Ek>uOg4vW=0bd{xg7x)o%v>PjC;1M3XS z1_2qNYP2@HS=MpslkT@g1KlRny(VH>Yy^$RBqGAO9m2jzb6IR#Fz>*lrS?gotB3}v z#WgGIzO7o+s>&-2LeLO1H#`EA4=8=vFa)>i3M1hD6bDI&S$@rPuPfKPSh04kv$V;> zoXxwP2}M7OfK;Qqxn9P_a8q6q9BsbI(Sa;+yZ%^Z$z7HZHIO3LFtD_qTwzT%>5sAw zJm`-|)E^-}b>4Zt^W(-Djgw$;d+}H#W{Djb?Bwm-3C9ctfskw_C zy*oYs+}$bT^MZ3sy2Zb%CXG2SR!M@pMa;;zp4HN@MTwX$tW=#$G;1nmR43E-W;DQE z=ti_$zqk#u>PEQgLU$o}Yv!`B1qYFufNhH5kAk5eLNwv!?Od5eudad_p~~!*QRRi8 zseM_Ma!@CaD-^9S29G|;QN91af0TMGt+35q!>%xsIASYyo5zZyE0>yU-l*~G^!3S` zqr-#qqj$$2-oAVF+G$96AsyAca@IAwq1`dY@^Et0vpkk6Y|`qJwb!Lr=XIJOS>0)| zzaN~a4E^Z7e#Y}tJkEb&a>AgpRkawJtKN~`*XF(cI+`&g;AEkP)seE$;7V9oXlqrpEHt+|UKSd>AH*!5zg{n9GR$eheB5LQ zNXukafW3j+h<2X7l>&MNIZu;d*+)t8CrC_(cCIUk4SB9#(@VY3(1z9NE&?P_b@R*W zpgbA1sc4E-jYu1%TTj+eooXg0X0caLmYSQCB7ENJ;~I&JF_&#(ZuM|-xgDX-E|b6Q zYZ=o_;hwPnSR2h>-sF*{=~1YdOFP`&sE{?{S{L$-E!B0d%eHod%3MS$?}>!c;xt5s zCG5gK|GBGohN@lYDrs986}&HJ~d~t z>WTi{nSN@zSV7OOY4LW7c1>fq*0gKdTS3*XY4Lt^Z72WfN>E&E07{#@LlaP4AL~HQ zwU~kGT3a1G*JlcfqQ8>@2c7@00>BDXX1K+Lj6tK2qeaM0cfVcvy<1G>zA_ zT%y_j%m$vQKG|`f*!~3ox=PJOky9-_KRQ|W6t$x@l47|`_y^PTP zqvMd77A}QByB$&n(TXD7mm(1@ibZb3dpjA4|DG<{Lgvr~TZ<(q=tE22=@aHWJ@DK(so;|BZ^jeO-fS;Q~OYT-oMU`bG86xm3mq5k4 zwP2%GPhnE!xJxTjqZh5R6nQwi+`ECxmj=|$C}qaY>>=_Ye9GZk&c<8JOHn5>wCuC; zoVp7ZU%p6}eTi<{WuaM$_|y^^m32YlMW=`-{j69mcGLtt)GWu^gBx1+oDRO~Al~XC zrfT{s!vcFaF-?O|JVXoiT)%j}cFAm6EpuhH%1C0j{4A!C%6gWIV77;x%^;O&P!iRG zl2jR0?!wZkneSL9t*(nFskghi!1y!4ToiIpaY0!+^#;+1{JS!Blye%@8cobGE8VU- zRhP>ppAI#N10?X7U`XJ}T#u@p2G_QhB}&NGvxBKQCFay)Gz>{J`9)PfK}-uhh!o5^fc%HSWgo8 zMfty;K7U%e|8@VxL;jCVJQV_76-!@#68OJJc8~_z-8uz)W%≥G6qgI{kZn^};wl zwaNXSZ_Q2po?Ry6CV;OfpU?lkQwyw$tnE!n-xt@elQ8dKz-LOxIaP_h%0qX*4mZ`CG#z|Ohg0h9$>P+oBAx)`E<88*h5vmny*kwspkK3 z=IcuXy6FCw^8N46_Fp{Y|J=w^*r;rTe4FyqEAV~w4JBeq8Tq&39=*$Nd5_R6XV@tX zZzT?V`EmeDLT!g`oi0@pRumJH`4QefUYs*kmDR(3{6{CRDZ~56F%7J_S7r4ln44l( zn2l7CgyTX@eX$h0U$_Q;bQ>o4u^^9hvCtjnBwi(*bFV=SURk>o@kVbFNYj1n$ z6^#?>!fN>ZxyP^kUfJgJip~&;Twts-Gj%029CC8|mj813wW>k!kjJa-3ARS~{k5iY zKPkyJk|kb~?>o#CR?+?{nV2e?yJZ%pik5DlfvKXgRkANtG*_&UYi(_bm+JC$RiFec znN5CKfY*r(3wr&4qTquK9A3u&D)zm8)x4#uGo0n!6Sq!rR@KAGyJ|hj&8nJOabK-5 ztyxuj8_H?6+SY2$hks{#tI+M{HdnSuni4>m<$Ora)|85*pZoMhrPA-@!1|C$|F2{z z{_9v?A2R7bWYT}gq(3GPnbiNYEHL+Dws^>-{*X!iA(Q$;CiRC*>Wxi)bqM;9Nqrs4 z`a>r5Oz!NIpjpf*@sLRUA(49eJS0+oNTjZxzholy`cTNS4fG)|dai}9B`{UIs(LsImr1${`0ez!@{+i#pZO6a{*X6@y&fLEmjtrbmb5G%(B zD%#IX(f(PFB-&Mrs2NL zOscbRf2FA|AD)V*G5^bY(tj_^|NU&RSIqym|K#~Y{+CTWEuycso05YoinjLb;D3?K zF!lVTZEo<&;>)E5&o|MS8N4=%vN|#NvU$PtZ7oa-UQxU*$#wxu%OwS0T=`RroZxF2 z?TQ(}+c$jI3BmItSvnthzIv;4;LTM&Z7LhMrc7UtgjCJ{CDx_hxRvFA#r*$CG5_O> z{lNqO-^A16V8Xh&`(Kl%F^QSoS zT}%0=d&n=X_kVAS!x@=Gl;Jz*W6}PXdp^Ja_n*Fa*#8@OdJ`f#Tk!6;;Uux?%z|%0}Iv$vNaoUIywsjv{ zTW}PK^h>96J|$d>H!z}9aKY3}!Cfpq?Dr=`Op{U1r?WJq-u&wY)W5{cQ=`~{0LLMn z&!pB*BiT?!>C>gZOkec~-2vkdf`m(`i7AHnKjCnebuK5}018vz>-J>{mrgCZCsLAXoZsZJFF0yW3&tVL5EO4mXCy^a3?|}om zDdI4~I0AvW&?tFL*`*%C@sCHx|1TUx6NWk81s^lvnHlWDI81OP0@d#cqcbd~IN=g@ zg2u9Y1&{B4J~_?LKrXM^JsJy77*=PSIgu>2<2ei#_Ii8!fA4sDBKy6)y}tux`qz`* zz#CzKp2+Vfze3FD6_LWW+&D?B?CvB|V@RR^vv4k#h(u$C(v4=YN};4(Kn1Q&=bWav zD&J~YDVWHgMpjm$J6z1>o_TRG%r-*-<2_Fm&dOHoqjTAliQm%;%QXp7K%HV2G#2&D z9BtraZr90$#_;|GF*L(+<^R~;iqjuEz_H${ih!tQvacYfNLZ}2t>OTJglRykY3Xh{ zoqxhN-<*^9n{S5jhHwFL#%7+}?2Zcs4%wC7&A&vX4)Tfp)DLio1y-M3Fgs{phPbJd z6)yvq;TgTc9PIMCaJ7L+91!Q!CrH>GnIVE+piotC@WCK-3PauLn1&&}R9Zci6cd)Nv?i9lPUj};5!yUA@CtLEkyx$$m-5I_>4_~}vvUL9G7l_S zelySQN2MTeF$w7inMmtXJRw}L`9(dne^kZcm12Q978{tUw(45m_PK$Jh=jF+ZmL;? zfOAhmb$dk!q;uD1K1JHOlFlMPjG@f{mMZ2_H1RX0Dyi3Ip4;PquI_*#Z(6Er^2X5FI5q6i zE6f-PFo%n@^3LsNbh~-1Wh?w=B7%YdF(*A15JKCR#(|?Rf(&M(y3iU*kI?1dkb^)> zIT3UL4Dt|AqUsm$+`vWd7<79mI1mtM(xzS>0s?Ic&zqkcxcGE_@Z*Q$gSW3gFNFjS z1hQjO3vl2l<2128W_AD14O|?JkEu9enBzz+0fPbr8pQ$I*IA5xGM+NF(iS#y182&-Bqa{n5JAZScOI=N4Ja03^DC5th*qNWRh z>}tq{mTurUnT;@$nz*XKR~q`7o*TH>yPaUHAdvS_FO-Os=a-JbRz7u)N|f$V0$6Pg&Js27yaFx5jq~hjh|{!blPY!3e&>0gM@)DQZ^) z*&3e}A>afsEN(;7fpofG$g?6HC>;G=LusN;bsR0d=k8JI*gzO!|ek>pyu>4C6PPzH4z_11=-M|^f z)&@LBv#@XIz9t)*Nzr~C?xzoltqW9s~qf0>V^>(*OXmqwP=xC zbOWzd0XTMRt!*l|d{(3bc?h5|q}Q)M3e2J^Sl+-Z8a)I z{4z}dpVA}@RQQBy?9Mf)73F~f1myZQ`Haq|#!q0P6$-N#)J%KDNi8=I_%T%nvY4#0 z!LFjFga8{Mf5FPTeU)+G^!tOuEN#r@F<@mJP!M1=#66aTc!9s=22N$6`&sJR$GFgM zQ&U2qkUMt~1kQwt&H+vW?D>d6oJGRfbg;Lb$!(t%aUh?%cYz1h+6U6Pei4x*D|$|g z5NPzE)pgIl76=s9M^L{?D_|`Z? zlci+l8~F737Sk_6Af`ddAX3qnV<|N{zl6bC-AG0sV!AhfJ)oa5Mu9m&Fm3*mFQ*L*u!*dn~f~Kfr~@!H9I<4K{KkznN#I6Iw-I} zDGiyb%4{$*lrIHIW+K!tw4u;cSr4Sv#0~Sgi`vKojzlEkUvI4L=CK}17&AhZ)jUL;AJ^DRZ>j~qQr$I7Yngeh zh6A}==Ozk7B`L3lzzL&QB*0SlOJk!>Y!l+wEXVn((^_U8E8;-;GO>D-p^l_NaPsS+ z3ar(Y^s(=4L-ZmVtck+4p2uo_!U>7(DiTHWm3v@{O>TQYV%J8IXy39jIBdj? zeghZk7Wmsr#kCj8RpP)|G71P&!qVJ&O$-cDxS$Q|M>lZM?XF-guYf@9nWGyx%N;PH zVLwf_r&x;Ntx$M!|24 z2>dScaclQUEe?=>*N~MPI3xd7K0`(P4Eo>qa|0LO{zz8k1@)I3T;%%Fw+;Ze*DpwI zePfWIv9k8qwr$(CZQJI~j&0kvZQJIKZSGj#>^b+xt-9Zzyj9(u4&F*CX*>zPom%46 zwr_KI_W+32I}3c6T&eOGctnLp4?$nUMo$2uxxUd%d=}Kt1hYo>m~^9 zGZMCJEdIOQS4RZ0fKob{#jcVr)glQ(rBZ@H(rEbLw+Z?$1=db9eeQ7G!sC2p3R65_ z+E|LnP)_OWFDaY(Xz+CIL`{)i*+=x6d=Q+dy5p)yZhCBU3*uipX#amnS+(&de}wX6 z!#wQJ(_HY|f88%>#GOHW%mhxY3PS=96mz+hrPlTf6=eClWXdwA;>=*(^ky~@4lyBXTe!ODCAAyeUDt?c7ybg30u==+Ucc_8^}!jJLDg)>j2pB_ zl0yU}Y8-+%`3+rEZrD5hAA&@wFzrzQaAW&d5~;q*2+$1HFMw9mT|3ZT6DxQB~2MxpO@L*P5GEWE-j6}`#Wp`d(kpH$_FteUL zOJmA5J@9cfxI#cM39%%SB53p9DdBK3qk(qH3vbJ9|D&kOqfLk|OA7LSPI zK~-9GDH4#A1mqwsp@9@Vh+n#e-8~e=%Ziv1v_I=Ez#s(ybh=0&vrg?s9)WsRJ_G=PSf%x&PeQK84ON@66Q1@K-K=V@^T@h)0Vmtp@Htfjo9aST|lc#_rG$Iylg) z`yqe}LM8tnS^1x1)Ue>M=ws=kix$>O!Z_2Vx!DASwXQQ8QN6g{x!!flmNn}VtxJc zkbUuj)^i}+XCyjni3)PAGg|!#iaqMD`T?qnQ1&Bzi*Dm~?WRJ#9#Ki4aKg16)oWl$ zP;*LO7Vxj?#b0!be@fMF1B4zu3Bj%RK(BO0MfDVTKcgFiI{W-p55m3zy7GQRCyzj_ zL;Jc%5VzpzEl#+4wDlaUX|$kmEmFgZIgzDH5beK$hR?h5bcs@GnUSS2fgZy(dS+y3 zP!Y;{KVB=RhLl1Cy#LDsgQoBX>A{3ui zYu}&^6zKd5tSM5Q^z}|P?>8+@@Z8U<3)G$R;_oKdObM4&ocRe2QK5?o#DPRIU|4zK zmG$S-rJAPd67S|)?^TT+*q=?EAKlJNEzMWIPUBTgnAv_>YzE7oXO_cnM!~iu3y-0P zUh>w6ZPi!<3CaTkqw;nS+6uD);e|P$#*!}whW+-tOv%cK$~+zfAOJ#NTcj=Y1dQ%!70~~GAv2_A7n9;Z z)JYibZo12IjJ2#Vcd9-3MV$91;m zgl}7WcY{R;Llia)8*KDATs<-7mNO%sBB5fSnoJ(qG`w|f} z!!p5;96+1#A7Pt6nn<+C8I2%^ia@BNukf1#1EM6exztRhi5v7kuyWFnhP**V{5e6S zn4@Gk=0QPJ6V@q;Xt0B1#H_GnRZ=ST!{SwIqO;o|D{OOR@GN4oA&4~7L2G9E-D`^^ z+5@0nG{84QQrP1PYA~g3lag)1f>anoXp11&x%@07cR>GhjDcwp1Ul#;S55H^kR;#i z`9m}yTc8Q*f^q3(_F2@GVdx(+dg1s*Nv~#&gE)u6I84ehXl4TVH4sqpe|BH%{Ggu- zy>@zYWO-i1^M}&L^KWkb`*Z1;1$ci&gJ!C**c0EPFBa`Ny-~YJ#j)i(R5>e>?;Vl} z6ykj*&nl6CGG+deSm0B;*`?v_2{|eV6w?1UCsO_doZp9o@7Me9_ALca|B1iaa{A)f zvklEJKjwHkXgBmi^ojP!}Hnvm2a@P5HwSLPQY?^EXzT8 zPX#@wrB0LP!`f9k5G;)YNFY$@XV=l>H50JT2o^@ckpvF*JdVRvNpv6?ntMbLo7nO? zjMT-@{$^@z5J1l(MCwygXF>wF2p{22j%te=uwAmVYI2n54)Z4sr)eSn$KdGRuiFQ$ z7&FzmtoH%I6N2?nxNoZYD0r#xfQ2uMH0nJH}g{VL>zJ*gXW|n7wB>zw1S? z>fWV$Gps>6X#fX8QRZr>@fGgu2TCxO0`>H~f%;u5G__K5^#r~3ki)zOTm8RW*}%>^ zVLE#bJNiMCYwCNfUYAN=F@Vt#Arb{UMP|%b?L{uzok@u+p{`C>-k2c zwXs%p)t~jU|Cj=RNWO#YdpODO*mq#|d@Tz?l5V}^ZlC3#eQmh;-CQ1xsa#CjOd3eG5zThpT@$2u z#yiEN1NX3sL#n14*zV9j{uFn4nC=gddSk87&#<_Ms5%t zJ9_>yh&AFwjOwDq(auegVos7g$^`mZ%!1ZZG@NZUwW@!;UyV4 z%kQ!EZ6%jVjjpZpW1j}@F9=I$Hy_>CjRue{yjhU#C=eu!cV0szf6bJNBn~4+k|3ux z@ys6M@D)%Knf^`(Oe?V- z6>KI4sa!#HKQBCx9w$Y60{Ci+CyBOW-vR~`c8~PO6ev@;yDgVjC3a^dW?E{niTS6B z*_U=oMI+7BmPeaRSV_D%;lOY!#ogYk$EFPkbL5jO=nz`lui8K^7n`BUS!9{MY`BI2 zw>~nni`v&a@)G`O3y6*Bp4>?JnC&ZDJ;M<_kAOEf*<~IoZ9r7?n`0#cN|1<~qqT$o zVwf^^!QBIx6G1AN*GYETKY+6&Az>Va*)D*PWw^kEaY7U~4rN8(L5lwCO)?CeV=YkM zvgV3nnL+GdNr<`F*Eb($(T7-;R2_3)Sdzq^hTU_R0x7IOXPT7LD%rT6b0nuLAwuw_ zq8@J|C}q&DY(1pudl(54Y}KN)v}c9J#$PqUK*Sja8XkQ?-1Qs3h~rJm`-qF|{xuPO z2USS8+sL8}q=u?1H%0ACiwTi3Sg!OTFrt&LwW0FD!5K?5R<)A&OrWVtXVVu+7|%JZ zt}13tXJ*T?62P((%xUi2rbtYYcpi(x+$wIFLX4wmwXT}qf;|c!X*`!RJszPxA8Ci% zyf$_=Hb%eK9}+FcZu%!JvTI&P`tBP!{h(aR#KT&?>1nrF4LHOAKH^K6)nZOEb^rpO z$Z$iSUVDUx)aO-G!-eTkG7tou!A#{0jim3IvJS|DQh-F_r2V6M zwXijptg*KOQ5N;oo|qovRS{E!R>5=nZNeG!dM92!cZO?ebD_YA!;V_!TW9xYH*P4= zdSEzP(t6D7{trZ81TEBLGc}IMQr}61?_=tACenwN*C8KqVO8$4*mKH#BkF()zK3Ls zlIY~|*%v3&Sy`>4zLngBRKdn+X3yq=Udi#}aic>ixcC8zTEgRnLgZ$XB~gU7d+S&y zyj%flSQE}T%+Jm3cE({O8T9(D@|)Qbtj&o#s$!m6%+8dwC*GV zktszsp|2DXD>x;}q-Y<$@g+3Q<%AWZQNW87IzjyAM*3CwBIeuaA_Y@xBBfRJ>~T30 zj3|#-1fOhT_+~fqlxIR32nxrRtSfqXB6B zJ_$RB))ijqZ5?nN`T(F#qtn}h!d*U-vBw;KeA#U2aq3AXzg<^M%K|3lxp35nbaE%X z*-~u#YXvyKcC!lCZ$8tuRH3V*Z3703w1Bl-rLbF6vs-xy*=6r;gU|J<8-a(wpb`+X z9T^GbhyYU=ph3Zw?(OZb9cP5z-4Qyf{wV&496Xv{2T+{O{<`#tmSW9**@`SSPsrJ{ zLZWOX<4cr|I04qHF@9cAw@}nNMFqu4uArI#Wp#O_^-dUff{g0I0N4(4F`m5R3)7y5K_%61{<#95fP z8dse-C|XXmDU>)Dr(!B@V_q)n-C%ySew?(E^+_cO5tb6jq`CU=6}Cs`oQ`>54-Qe| zOuL3jMtNOk>0bG776_Yr?pw&DbB&||eFhe#X?ZedQQAsXaue*4QI=Y+QO0GYb|lzD z{n)H{;vRNAU(cTD!pW2XzPq`T_&Ju#zCGPvJXh0aZnkD~JF{o(6P6DJ4R)=TSPEUR zLXxuCo`s3n3%S~@qA_u0lgf$;p3ddhqIUwNGKIJA}sqMo>|Ik*ke!CdjhbVZ#p=tlC$qd><`n+N0ax>y3`Mc z$4mT45?gX(=@d8NMS2u5!beYquPT^I!Hk`>kNm}d=XZ0Fl%Y0;=w!~Ck;cN5Sl95;e|TEuUZce!T7TYhi?AVZXg; zk1?4ab=rBT)YQ4>-3SZ7gqskRDMiQ9aYrKwWLt*)}U6 zxJL1BmCmTeEJ*oq|KA!IHK@Za=z5idbYkwf^Rc`fyLLc|PQmWUu*)cpZUv+tfYD8U zd;ONjt2kPFsQ&m|u!kYOKNkI-Zf;ZEk<0PZ-a2{?ih#=bAhG31Kj4@X=8QTYuOm+> zRZQcS!fJZNAd2u0N&*b#snYWI1Vig;CGJcyj<~`zN)aMrP+alUAX`Y^QP?i(Wp|P$%OpMVK8W5;%nXP`0du9NE)GK_QF-q!nAy{0MFbyS zkRO07kqiRXy$C&1@7<$W5jlg_rjFwVPGiyd$f}lVL87Y}l{y-K8h)*p+{IOUJ^ZZh zuM?s)E8==>E&uDFJ!&E3oXB>EEb0UyX$_2}aWqoaa!h^X)foQ)F#$n>^!SjqDsb@l zZq~jH*{d(+OWp!DAh2?bAWAb44di6SeNthCZZr~Rq<-i2Wmz@lZ7(@{^f8QE8ng#p zW(`_%$`Z3p?`T*It?2zU#Y1^Qk+0&R7?s@T6DhEyD0+ee#yeE{ya{1?gz(?Dfmz8# z-w9P3yo+n(Ou4_TxLQ)80ltb=2<$Mr2Xa+I~QUp*BIeX>%AY=)i01nQZ?Ao@Lo^%%J z{=G4VhXiO!mXM)mI8tRVJWJU5q;3h;V)}fqBAEPkyd7OpD=b@{3#uwVQ|$6kg?J@N z!Vrnsq6v4kPR0dBm&Ub!AVN&|p!8LJ41y!AMqr)UM+WY2^zLO6%UP`W#~!wVw>mC} zHK;lKR_Lk?+W1hBM?KLG&^_BJcmso5uE@(_FJhJ=#^c+{J_vN9 z_q0FWk8(dFFA)M%Jq;2k++kbLKVh}#SB>Nb&M&5gfTS@Hk_sbamrLW|HL6xq{psL8 zrz7a}Pr1)3Re1~Dqlbki5!IDgpJ?sW_F(FQbmb>%#xT;UDaLig3gX7 zm_zhRAAl9zuWo5tLd%mL*Ii zTZo8uD2WmtdbF$gwwBbSlu1Ym4X>{i#e{ym>BPtPD+H3nC8SPHiY|~B$+GW%E&V+6 zm{P5!fs+f^1o!Sof_-FPoavVO5JgbPnlUzRRE86Xh}kL^WJlXJ-(7Y(RPXzk^v0y5f~{_hQB`_mOTRG; zQZNxMu!0Sjv%=^dt*qyRsV&h&T85-yc=~QqJYJi{qpx2>gxSr4tw?U~$$?5P;6FiK z%4-`Ho6WH5Qi!J>71X$TDbH*>2?4|XS((u!%)B^c=Zb{~bTv}{fP!MGu8{c?Q zb`Q56oN6IHKp3wfrx-cCx;!Szb5Z_*z@!i{N++Ou@t45TTEo2EhAPu3<@+}FoCGPQ zefnZs^L)EJOYH48&;_>mjYNm={>R{`CnE7Zl1KX%jGA8fcbx!c(vrWMYkP)zQA=pnjXiwjbtC}lp=>a#4`!zaGQ|uG{<=!jZ9~r2!8DeFOemUdg)R6NQ(8GL--#07heaJT zZ)E}xU%p&xo=;tM-?{vrUwz-~_cpfuv4pt!2zuq0k|QKZuXd_1tqp5eLMn(5dVA?( zR5x6Ae1I{ID7$0iX*V4?1_!@Y{Ih>kKs_w^dmE$v!wR2>VYkG&ROsbwmMmLB^nE=h znY>t8-#E8PcMHj;_4CRmoH~BGF=iRmge-;Cm0zf@x}E)1(_5kC^!Lf2t0nTZenVoQ zn%3YKkBsfXqRA5TYo!<0MWfHm%nG{2z`N7oP)?=6*{{VW_48_3UDhe>b1nc@VkCGAm3H}`8+stY=%d}(Qz6_jy~p`>`Bsfkon^$!+Lm>^_E{H68Vyx zmAEgy`Yx)zN=vEYZZw@u8Zt8BThnIluBxufwYd6)bmE`vESH@ul^s+y(WPOm05gEK zW^J7nK2*~a46_m1Hlm^9T*PZkT@cEAGy6mz$`ms@M%*lZq%B)D1rvK0)aLf55QgI7 zG@UK-gAEb5`3H}uNq1dLf{5aDl(%JfOW27pc>xtW*^qcM$74=?)^GJREL28zsv3*t znS3P=z?;=g#~2so+kL$<5TIaz$IywLb1=u(&0W*Uh<9dKA&pGUf3Ce>NnRZGWK7&0 zr(rMZYI9pA&90;3(AlOTq_O6`2uhv~|1_v&sExQjW>TCqhQWdd=HpyG>^HorIp){6 zWF7jz;%}~4agOHgTAh(7EO!gF>VRVjY&XM6Hq$9)<`_F&9uOs)R-#Nd)T!o4BR_2( zk!ZyfMl%*FZ2@F{hz?LE&aXYG%uT1ov{1RV(tKDobnL&nPPX-@{jIuOJ))7W=;O~TJEsW39dILn5Pr$mqv!xytA8p_Cn z$YErTJEAo^61}FK48P97!Di%uQK_@I`Jh5fc2t8ihDZeKK=f^dgcc9%AHI;BSjpD_ zPF~6ET4Z6JRjhDjeAKY&s+wO;He~gtdt%n+Q~(;`&LuH;t#ZHox7AY*tx>^9+F>tQ^bs%KIYXEt!m67?8@ zPnlFv+xe9XzRV#r!C~3DU7tzmmE$&W8v{+vN5_A;6=&MYm@yKtzVnL*D>BcAVRiyO z#D}IhbJj{{0KMF<%V0>mAXYPsa6yvz5*J5GCnm4YjOPa54A^h-8mz3#Q<6jn%$s$N z6rCY$)k=C+ZYG$7*NG@Wy&P9~4CE$RZce+$c=pkU00!t4Y9sZ8b$n<(O< zCU$E3f|h~@CYCR8ZgyKbhWuK95Ex&LWi(Ni@wjJlnpu46x zA5Nv+DrQFWtdW^1PqON(7&!W7JCs~!rcllj=9scgqVw`RiBJOO{b?>SBr8VXQCyjJ zA|n&wxwzDiR--@_mRQo7t=MJo)-wri5S|ob6=-=}$^&`8fl6QIvP4S7fS9F3gE<0B zaoDsWlJ-zF2es;LxKGS!HX?A@S5H+(<1kGDSJmAoLbvBb=Qf?WS^q$(Wy`w$>8sj{AI7Fkpc$g{tMvg+1Dk? zwS9o(f`-(F>$suksErkKw`ft}-q@9f9UE@I1Syh3aCg-P3H^Z*U9*7Lh1pku({_&7 zdR)G_Ss&u+rY^?;)z~JV#}rX0OOE1LwDix_DLv7aBpI%{e?UxQW&ujys{*6yTS}r_ z>MIBV#SqU3t$vw~Ouu!!Bl33fxZXY5Ux>HiQzX7yQ4o4~&DG=vN+Xz*TcBKAF!Ihw zQ80ukvZgT^q{tBiOP$e|{S=K_Tz6~`^hmo@T`g@hD1eTpU>U+43MB%aT!CdUBUf+m zn_5QYxYE)%W-}%CqUNIWVQ8P4s5uYZJAO3US{&6 zB&L&+MUE!LcPH-_6TZYZbLR$i{+g#aR=Z^R3eb8MTdFbZ)@1Z@QDrG;f)mzRbUlND zU8PeL_OY{+#!`iRX^PE_&AiEL8XKWifTV$Yt|e*3vdl{S+IW0_mTZ3C54N}5%G|47 zdVZhFHek_cGxIKvCNPX1HT=yP0JexSmbDwEukP9=1sFA&(Kwtxqf~DOb)^vzenci} zC8@BaO~u3DCL`mbgX2=iRZ?U;*(Y&lOPy!+c@@%**q4Xc_W8paz5OmZtLro;`Q_zK zI0wi3`U;a93(^H#IbJ06li~|ZtwpR;E>OTdryWzU>?&}OhxO{~uofuXQegBLjj_Xh z1aT>zz0eA(s_#D1Da2J`-z^UUdugkVW&4v?Eb*;TD$GPro^wEG5u!kn_;)Pc?_>a< zqgRxj7Oxe;=}(9SUa)h4qWpB{G)ul`Xs5mEl6|_Hg3;P5T!CivgFDJ`(&p%?a7_}1 zOIuZNO^fPzM{atS+6uqSJb#-?BqPR5P%}m*e;4I%0hh;?($vTwX?NtDT)qdPXXKW~ zf-$7@Zb>Q+Dxu*pj-9$(R2~~U=LO}iMUPJ8a}O=0qtYC?*Yp>&C*8K42Q<=uhlVRy zrL%R`^OAOt>gSOP8iYzg)unet-?LXpBkuH3m&>Tu6szpP?|N!Htoyd7e*I||Z(j~z zu;`@y39Het%uMb_>fjM7?efUi>W|5G_&^3=FjgJrne~TXs)DOROi3qC7^vnm9WSDZ zzL5xh&fI7}Wpwjm<~BC%r~770kVizfq%j%&li>4ydr|(8@k##9b1+;MllUhwq<76y zj)WWJ1c|09_YkT$5VZH6<1=&2>Z@JCAxnyy%!vOprQGrnRzh0oG1l6rk%Lu3=MvC0 zCG<^|QbSY6u%|@UQK@5(tJq2}l_>I|qPCGo7{DMOx|Bbrk_K{G9kupd*ZZ&N>2a`I zS|(Wpe0W(|yp``^>sr{V#u5Z$?)z+|Z(Vd*;=b%eNkuM3Ot&|Y#iPt>VYl&sZM3E6 zQj3{XGw7U_W;X*cY@uj|y5stzj4#Ov_XTZy8(FC;ns+Nd$R&D2s`lVh_@EBV48#~L#5^rt62d9Q|$?;?a{0h@ML`HyC zzv@d~Uh}5+ap7{R8G8yW&+mn&9kYb=+qZZ(L**k>yp5{rW?n)#-W)T8arG8^qWbiFBNoD5#^qgz(G&~=6n zGQ(g!1Gn3e7pqv1BEYukurR=fO3C_8@7%9CU|Gk5NNJi)R}nYr64W*Ncc>aieE(Jv z7o83&H?E1G_yf!!J$+4<(*Z)%>@-F+;_RXy@X*<*rn1AnFg>0?K%pE#}f7ib_y=zf1Y{)?Mo;i}mhG z8{+PZSi#d+l?g6iLDpI0{8b3lRyi*MWa~u=wZ;eT9kVtwPDQ9mnx$Cioh+X*Ns2m< zZQeRLQh)a9m)f za`)Xws%gbZiT|g$tpc<*aeM(P>Q$)eiOzJNhSs<91vbQ-zxf?DYmSY>1U80#6JE@TF(-M&v@&qWi81s|?3dPt=kqDMhweKps)u}Tg&7>Ws5Ni0u|RfYsWRO< zqm&+xEkmZ!t-Y7J8Ra`3wB@22IdyFKBJrOWy#Dky@0Lc~jcETZt}05}Ru7`w>u+() zUH%L=f!yQKTmDPMM^AD;n`2)cmKXe+kdvYBUkUJc{9)gMAMpFW2;YG(_>%`_+Fzm7 z8MpXB-vl4vd;AdJcT`H+n{hM)z6f7||BEd60{?$Wv8(6Amg(C6>W=omdr~;2-r^@A zOK|%CrwIBs_~Fn7{L{h#p`7&}ZK}Edvqt{gnxn;h9|r-%_y1S@Pm6Qr7xLKO#`0_M z59?FkzgBmBb+U3{!@loO@c(BBL*E2nqx}#3SxZ7deh2_X)A*tQaf5E$2mpucVoO~M zhiL2+kpKiB0E7S#fC3NzLI4Q=9F}tLhBgqpODk-7oOA>DFB2isFE7Ne+obW z3_t;iNP>S_;0930|9vHXmim9j1NmRII9e^0E$?%u|h^o?!%MEu|It}d;Cwe zr=Ly$K_Ci*0SJM_smG;CgT$?VFn+){E!^95xqpm;eT^D8@Y&q+Z+CeuqHVf^0F%j(U4{YJ^vHMhl!r8cihljOOC{SBNk1peIr z8mr9P1XIUb^M(DYe}X<3S^W4Nwen5=+9mh3YP{HM;Jf_iK8sFlW(PO<#s0|@0<{GZ z^=W3EssqCXD@R`_xSJpqq^+1iw4DyqMsd&?Lo*PsuDHp~4Ti`P%Y2R_cm`+>6y7@^ zz2`nRS5;B5kV*6SIHC^6d!0TK!Xyg9gg#N=IAWLGKBaXhOyFt1l{bgd|8gNDyz`x$ zN_HNk7uyjSGEQ3pySdqDKmYM50)0;tia2!HthfB+6ID!5AfaOyL9pc9+t>DCeb3q% zS;B4Fv7RHA#?+D48sX8)Y(in+d8u}Q@OG$SUCCzk@-sQ9QzM;*Oy#JIqM63S_XyH) zzoUC2%ana3r_FB2mAK~5;DY;R*-VNJ9(*Th?oFU&E}n%rYA02(@swT|xM?y8cAdE3N-EgiEXX+*YCTwj*bs0SU4r2&91QcjzSuvwG*H zc6^d@USmEbKx+eIDl#e>{Od*%amTJJJtgRk>JiIVY1YUDrR~9IKByEuq+?R^W(f*P z8>dmf0v0#mJU}?u3j!7aw_D<@JOtd!$F83?7tvDFRxEaaUl0fh3Bg|DJwJ|!8xbI+ zez|k4sU2cmoL><5O&Lkj9R96nL-EHVB2BRV!9G~jy8*{(|Mwd|=r_R#B=HC&qgNJz2nhKMLZZVvtVgupv`0gPL%fHf z4-%tTEuNbD-F5Fa`uB3A5tDwQj^xom_r_dw^C zL-*8>ddG^?+#CS`^4|a9jMNZ4CkX`d3E>w8;l83Nvd8vAaeB*)+F;sZ&m{@2X!*VZ zU5;Y z-D~%t#x4HKeqb2r1Vgegfy!V$3cL4*IeQ$G9znC%t9I1Iu(g{zN7&j;oe8*HLty*q zCLi5o)~Kk1RnOQtPLF0Hma#8wYC+%K==K47zm04%M);Oep{2R@-Lck0rHDzL#8~E+ zMMiYBO+qmxeHI!s8i}dDcBDTNed!Z-T5SWQ9eQQ$P4~7!Ca`VxUKd~Q&og^wMQV4C z*$el_dmUOM-I3PYUlsSCC6u3#vegJ~=pRT}wPi=oG-%@_l^#m7pzu@O&vMUNG_w^e z(%kPMk$K(8#LczAG$B`b^`;OuSYTfYbDDh}JK4Eg?it_r;E-;!a>heJht%65kD((G zg+(>zcVf~XD_)LCI^?Jdmw&<3SYoR2rfe4d^wjaKvDfUxX+GP$ZR~sb(SCt$vx6xm ze^#!)+C*RKJRn`_#nCvGej#*87?Oag<`hRmhD--t)d^|5+yQo zztC#a`401#XiteAPmALOcBe#jo+=^oQYl;D! S{O8%t@3PQ **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the Redis chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | Redis password (overrides `password`) | `nil` | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | Redis Image registry | `docker.io` | +| `image.repository` | Redis Image name | `bitnami/redis` | +| `image.tag` | Redis Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `1` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | Redis password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common Redis node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.runAsUser` | User ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.sysctls` | Set namespaced sysctls for the container (both redis master and slave pods) | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Redis exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | {} | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.podLabels` | Additional labels for Redis master pod | {} | +| `master.podAnnotations` | Additional annotations for Redis master pod | {} | +| `redisPort` | Redis port (in both master and slaves) | `6379` | +| `master.command` | Redis master entrypoint string. The command `redis-server` is executed if this is not provided. | `/run.sh` | +| `master.configmap` | Additional Redis configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of Redis commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | Redis master additional command line flags | [] | +| `master.nodeSelector` | Redis master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for Redis master pod assignment | [] | +| `master.affinity` | Affinity settings for Redis master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `30` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `30` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.priorityClassName` | Redis Master pod priorityClassName | {} | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `stretch` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.command` | Redis slave entrypoint array. The docker image's ENTRYPOINT is used if this is not provided. | `/run.sh` | +| `slave.configmap` | Additional Redis configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of Redis commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | Redis slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `30` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `10` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.priorityClassName` | Redis Slave pod priorityClassName | {} | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | Redis Sentinel port | `26379` | +| `sentinel.configmap` | Additional Redis configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for Redis read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for Redis sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for Redis read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for Redis sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if Redis sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | Redis sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | Redis Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | Redis Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `stretch` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set password=secretpassword \ + stable/redis +``` + +The above command sets the Redis server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the Redis pod as it attempts to write to the `/bitnami` directory. Consider installing Redis with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of slaves: +```diff +- cluster.slaveCount: 2 ++ cluster.slaveCount: 3 +``` + +- Enable NetworkPolicy: +```diff +- networkPolicy.enabled: false ++ networkPolicy.enabled: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a Redis master StatefulSet (only one master node allowed) and a Redis slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - Redis Master service: Points to the master, where read-write operations can be performed + - Redis Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a Redis master StatefulSet (only one master allowed) and a Redis slave StatefulSet. In this case, the pods will contain en extra container with Redis Sentinel. This container will form a cluster of Redis Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - Redis service: Exposes port 6379 for Redis read-only operations and port 26379 for accesing Redis Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for Redis you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +### Host Kernel Settings +Redis may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install --set persistence.existingClaim=PVC_NAME stable/redis +``` + +## NetworkPolicy + +To enable network policy for Redis, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to Redis. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: +* Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +* Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + + - Recommended: Create a clone of the Redis Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install stable/redis --set persistence.existingClaim= + ``` + + - Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install stable/redis --name + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + + - `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) + - `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis StatefulSet before upgrading: +```bash +$ kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` +And edit the Redis slave (and metrics if enabled) deployment: +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Notable changes + +### 9.0.0 +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### 7.0.0 +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling Redis Sentinel containers inside of the Redis Pods (feature disabled by default). In case the master crashes, a new Redis node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/chart/charts/redis/ci/default-values.yaml b/chart/charts/redis/ci/default-values.yaml new file mode 100755 index 0000000..fc2ba60 --- /dev/null +++ b/chart/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/chart/charts/redis/ci/dev-values.yaml b/chart/charts/redis/ci/dev-values.yaml new file mode 100755 index 0000000..be01913 --- /dev/null +++ b/chart/charts/redis/ci/dev-values.yaml @@ -0,0 +1,9 @@ +master: + persistence: + enabled: false + +cluster: + enabled: true + slaveCount: 1 + +usePassword: false diff --git a/chart/charts/redis/ci/extra-flags-values.yaml b/chart/charts/redis/ci/extra-flags-values.yaml new file mode 100755 index 0000000..71132f7 --- /dev/null +++ b/chart/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/chart/charts/redis/ci/insecure-sentinel-values.yaml b/chart/charts/redis/ci/insecure-sentinel-values.yaml new file mode 100755 index 0000000..4ca1a93 --- /dev/null +++ b/chart/charts/redis/ci/insecure-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: false + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/chart/charts/redis/ci/production-sentinel-values.yaml b/chart/charts/redis/ci/production-sentinel-values.yaml new file mode 100755 index 0000000..57df7dc --- /dev/null +++ b/chart/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/chart/charts/redis/ci/production-values.yaml b/chart/charts/redis/ci/production-values.yaml new file mode 100755 index 0000000..7b535c9 --- /dev/null +++ b/chart/charts/redis/ci/production-values.yaml @@ -0,0 +1,525 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/chart/charts/redis/ci/redis-lib-values.yaml b/chart/charts/redis/ci/redis-lib-values.yaml new file mode 100755 index 0000000..e03382b --- /dev/null +++ b/chart/charts/redis/ci/redis-lib-values.yaml @@ -0,0 +1,13 @@ +## Redis library image +## ref: https://hub.docker.com/r/library/redis/ +## +image: + registry: docker.io + repository: redis + tag: '5.0.5' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/chart/charts/redis/ci/redisgraph-module-values.yaml b/chart/charts/redis/ci/redisgraph-module-values.yaml new file mode 100755 index 0000000..8096020 --- /dev/null +++ b/chart/charts/redis/ci/redisgraph-module-values.yaml @@ -0,0 +1,10 @@ +image: + registry: docker.io + repository: redislabs/redisgraph + tag: '1.0.0' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/chart/charts/redis/templates/NOTES.txt b/chart/charts/redis/templates/NOTES.txt new file mode 100755 index 0000000..4298d70 --- /dev/null +++ b/chart/charts/redis/templates/NOTES.txt @@ -0,0 +1,104 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis server: + +1. Run a Redis pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash + +2. Connect using the Redis CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.nodePort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} diff --git a/chart/charts/redis/templates/_helpers.tpl b/chart/charts/redis/templates/_helpers.tpl new file mode 100755 index 0000000..8c1df0d --- /dev/null +++ b/chart/charts/redis/templates/_helpers.tpl @@ -0,0 +1,355 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "stretch" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/chart/charts/redis/templates/configmap.yaml b/chart/charts/redis/templates/configmap.yaml new file mode 100755 index 0000000..d17ec26 --- /dev/null +++ b/chart/charts/redis/templates/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{ tpl .Values.configmap . | indent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{ tpl .Values.master.configmap . | indent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{ tpl .Values.slave.configmap . | indent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{ tpl .Values.sentinel.configmap . | indent 4 }} +{{- end }} +{{- end }} diff --git a/chart/charts/redis/templates/headless-svc.yaml b/chart/charts/redis/templates/headless-svc.yaml new file mode 100755 index 0000000..909cbce --- /dev/null +++ b/chart/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis +{{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel +{{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/chart/charts/redis/templates/health-configmap.yaml b/chart/charts/redis/templates/health-configmap.yaml new file mode 100755 index 0000000..35c61b5 --- /dev/null +++ b/chart/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,134 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_SENTINEL_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/chart/charts/redis/templates/metrics-prometheus.yaml b/chart/charts/redis/templates/metrics-prometheus.yaml new file mode 100755 index 0000000..3f33454 --- /dev/null +++ b/chart/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,30 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/chart/charts/redis/templates/metrics-svc.yaml b/chart/charts/redis/templates/metrics-svc.yaml new file mode 100755 index 0000000..ef39725 --- /dev/null +++ b/chart/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,30 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.service.labels -}} + {{ toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" -}} {{ if .Values.metrics.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/chart/charts/redis/templates/networkpolicy.yaml b/chart/charts/redis/templates/networkpolicy.yaml new file mode 100755 index 0000000..30f09f2 --- /dev/null +++ b/chart/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,79 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + {{- if .Values.metrics.enabled }} + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: metrics + {{- end }} + {{- if .Values.cluster.enabled }} + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + {{- end }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/chart/charts/redis/templates/psp.yaml b/chart/charts/redis/templates/psp.yaml new file mode 100755 index 0000000..28ae22a --- /dev/null +++ b/chart/charts/redis/templates/psp.yaml @@ -0,0 +1,42 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/chart/charts/redis/templates/redis-master-statefulset.yaml b/chart/charts/redis/templates/redis-master-statefulset.yaml new file mode 100755 index 0000000..3c2f183 --- /dev/null +++ b/chart/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,410 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master +{{- if .Values.master.podLabels }} +{{ toYaml .Values.master.podLabels | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} +{{ toYaml .Values.master.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.master.priorityClassName }} + priorityClassName: "{{ .Values.master.priorityClassName }}" + {{- end }} + {{- with .Values.master.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: "{{ .Values.master.schedulerName }}" + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: "{{ template "redis.image" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.command }} + {{ .Values.master.command }} ${ARGS[@]} + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.master.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + echo "Getting information about current running sentinels" + # Get information from existing sentinels + existing_sentinels=$(timeout -s 9 {{ .Values.sentinel.initialCheckTimeout }} redis-cli --raw -h {{ template "redis.fullname" . }} -a "$REDIS_PASSWORD" -p {{ .Values.sentinel.service.sentinelPort }} SENTINEL sentinels {{ .Values.sentinel.masterSet }}) + echo "$existing_sentinels" | awk -f /health/parse_sentinels.awk | tee -a /opt/bitnami/redis-sentinel/etc/sentinel.conf + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc/ + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and ( and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) ) .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.master.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel-tmp-conf + emptyDir: {} + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} diff --git a/chart/charts/redis/templates/redis-master-svc.yaml b/chart/charts/redis/templates/redis-master-svc.yaml new file mode 100755 index 0000000..3a98e66 --- /dev/null +++ b/chart/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,39 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{ toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{ toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/chart/charts/redis/templates/redis-role.yaml b/chart/charts/redis/templates/redis-role.yaml new file mode 100755 index 0000000..71f75ef --- /dev/null +++ b/chart/charts/redis/templates/redis-role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{ toYaml .Values.rbac.role.rules | indent 2 }} +{{- end -}} +{{- end -}} diff --git a/chart/charts/redis/templates/redis-rolebinding.yaml b/chart/charts/redis/templates/redis-rolebinding.yaml new file mode 100755 index 0000000..aceb258 --- /dev/null +++ b/chart/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/chart/charts/redis/templates/redis-serviceaccount.yaml b/chart/charts/redis/templates/redis-serviceaccount.yaml new file mode 100755 index 0000000..f027176 --- /dev/null +++ b/chart/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/chart/charts/redis/templates/redis-slave-statefulset.yaml b/chart/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100755 index 0000000..f28c545 --- /dev/null +++ b/chart/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,428 @@ +{{- if .Values.cluster.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: +{{ toYaml .Values.slave.updateStrategy | indent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} +{{ toYaml .Values.slave.podLabels | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} +{{ toYaml .Values.slave.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: "{{ .Values.slave.schedulerName }}" + {{- end }} + {{- with .Values.slave.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.command }} + {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.slave.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.slave.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/chart/charts/redis/templates/redis-slave-svc.yaml b/chart/charts/redis/templates/redis-slave-svc.yaml new file mode 100755 index 0000000..052ecea --- /dev/null +++ b/chart/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{ toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: +{{ toYaml .Values.slave.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/chart/charts/redis/templates/redis-with-sentinel-svc.yaml b/chart/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100755 index 0000000..984de21 --- /dev/null +++ b/chart/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,40 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{ toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: +{{ toYaml .Values.sentinel.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/chart/charts/redis/templates/secret.yaml b/chart/charts/redis/templates/secret.yaml new file mode 100755 index 0000000..ead9c61 --- /dev/null +++ b/chart/charts/redis/templates/secret.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/chart/charts/redis/values-production.yaml b/chart/charts/redis/values-production.yaml new file mode 100755 index 0000000..7fa2d6d --- /dev/null +++ b/chart/charts/redis/values-production.yaml @@ -0,0 +1,583 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-9-r50 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-9-r44 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.3.5-debian-9-r23 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/chart/charts/redis/values.schema.json b/chart/charts/redis/values.schema.json new file mode 100755 index 0000000..0c91011 --- /dev/null +++ b/chart/charts/redis/values.schema.json @@ -0,0 +1,152 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "condition": false, + "value": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "master.persistence.enabled" + } + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "slave.persistence.enabled" + } + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "condition": false, + "value": "metrics.enabled" + } + } + } + } + } + } + } +} diff --git a/chart/charts/redis/values.yaml b/chart/charts/redis/values.yaml new file mode 100755 index 0000000..e9efcca --- /dev/null +++ b/chart/charts/redis/values.yaml @@ -0,0 +1,583 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-9-r50 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-9-r44 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.3.5-debian-9-r23 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false -- GitLab From b33ee7efc6d495ae692b3ac0d1b25343e549cb36 Mon Sep 17 00:00:00 2001 From: Kevin Wilder Date: Wed, 23 Dec 2020 10:15:45 -0700 Subject: [PATCH 2/8] refactor requirements to point locally --- chart/requirements.lock | 21 --------------------- chart/requirements.yaml | 12 ++++++------ 2 files changed, 6 insertions(+), 27 deletions(-) delete mode 100644 chart/requirements.lock diff --git a/chart/requirements.lock b/chart/requirements.lock deleted file mode 100644 index 3121d69..0000000 --- a/chart/requirements.lock +++ /dev/null @@ -1,21 +0,0 @@ -dependencies: -- name: cert-manager - repository: https://charts.jetstack.io/ - version: 0.10.1 -- name: prometheus - repository: https://kubernetes-charts.storage.googleapis.com/ - version: 10.0.0 -- name: postgresql - repository: https://charts.bitnami.com/bitnami - version: 8.9.4 -- name: gitlab-runner - repository: https://charts.gitlab.io/ - version: 0.18.1 -- name: grafana - repository: https://kubernetes-charts.storage.googleapis.com/ - version: 4.0.1 -- name: redis - repository: https://charts.bitnami.com/bitnami - version: 10.3.4 -digest: sha256:359c29c27577a352c6315a621e222ed8c9f2485ac0120518b4ce7e3233eadfca -generated: "2020-12-07T17:03:30.441720026-07:00" diff --git a/chart/requirements.yaml b/chart/requirements.yaml index 7277c0c..b77916b 100644 --- a/chart/requirements.yaml +++ b/chart/requirements.yaml @@ -1,26 +1,26 @@ dependencies: - name: cert-manager version: 0.10.1 - repository: https://charts.jetstack.io/ + repository: file://./charts/cert-manager condition: certmanager.install alias: certmanager - name: prometheus version: 10.0.0 - repository: https://kubernetes-charts.storage.googleapis.com/ + repository: file://./charts/prometheus condition: prometheus.install - name: postgresql version: 8.9.4 - repository: https://charts.bitnami.com/bitnami + repository: file://./charts/postgresql condition: postgresql.install - name: gitlab-runner version: 0.18.1 - repository: https://charts.gitlab.io/ + repository: file://./charts/gitlab-runner condition: gitlab-runner.install - name: grafana version: 4.0.1 - repository: https://kubernetes-charts.storage.googleapis.com/ + repository: file://./charts/grafana condition: global.grafana.enabled - name: redis version: 10.3.4 - repository: https://charts.bitnami.com/bitnami + repository: file://./charts/redis condition: redis.install -- GitLab From 21a90d8a6be9f1d15aad36aa0db1fa6a7b96b82b Mon Sep 17 00:00:00 2001 From: "kevin.wilder" Date: Wed, 23 Dec 2020 18:13:47 +0000 Subject: [PATCH 3/8] Revert "refactor requirements to point locally" This reverts commit b33ee7efc6d495ae692b3ac0d1b25343e549cb36 --- chart/requirements.lock | 21 +++++++++++++++++++++ chart/requirements.yaml | 12 ++++++------ 2 files changed, 27 insertions(+), 6 deletions(-) create mode 100644 chart/requirements.lock diff --git a/chart/requirements.lock b/chart/requirements.lock new file mode 100644 index 0000000..3121d69 --- /dev/null +++ b/chart/requirements.lock @@ -0,0 +1,21 @@ +dependencies: +- name: cert-manager + repository: https://charts.jetstack.io/ + version: 0.10.1 +- name: prometheus + repository: https://kubernetes-charts.storage.googleapis.com/ + version: 10.0.0 +- name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 8.9.4 +- name: gitlab-runner + repository: https://charts.gitlab.io/ + version: 0.18.1 +- name: grafana + repository: https://kubernetes-charts.storage.googleapis.com/ + version: 4.0.1 +- name: redis + repository: https://charts.bitnami.com/bitnami + version: 10.3.4 +digest: sha256:359c29c27577a352c6315a621e222ed8c9f2485ac0120518b4ce7e3233eadfca +generated: "2020-12-07T17:03:30.441720026-07:00" diff --git a/chart/requirements.yaml b/chart/requirements.yaml index b77916b..7277c0c 100644 --- a/chart/requirements.yaml +++ b/chart/requirements.yaml @@ -1,26 +1,26 @@ dependencies: - name: cert-manager version: 0.10.1 - repository: file://./charts/cert-manager + repository: https://charts.jetstack.io/ condition: certmanager.install alias: certmanager - name: prometheus version: 10.0.0 - repository: file://./charts/prometheus + repository: https://kubernetes-charts.storage.googleapis.com/ condition: prometheus.install - name: postgresql version: 8.9.4 - repository: file://./charts/postgresql + repository: https://charts.bitnami.com/bitnami condition: postgresql.install - name: gitlab-runner version: 0.18.1 - repository: file://./charts/gitlab-runner + repository: https://charts.gitlab.io/ condition: gitlab-runner.install - name: grafana version: 4.0.1 - repository: file://./charts/grafana + repository: https://kubernetes-charts.storage.googleapis.com/ condition: global.grafana.enabled - name: redis version: 10.3.4 - repository: file://./charts/redis + repository: https://charts.bitnami.com/bitnami condition: redis.install -- GitLab From b322fa4466b810064a611a05601d86e4acd83fef Mon Sep 17 00:00:00 2001 From: "kevin.wilder" Date: Wed, 23 Dec 2020 18:44:29 +0000 Subject: [PATCH 4/8] Revert "explode subchart archives" This reverts commit 9c641830b17dd61fb684642a53e2e7592a5d1c6b --- chart/charts/cert-manager-v0.10.1.tgz | Bin 0 -> 11396 bytes chart/charts/cert-manager/.helmignore | 21 - chart/charts/cert-manager/Chart.yaml | 16 - chart/charts/cert-manager/OWNERS | 8 - chart/charts/cert-manager/README.md | 154 -- .../cert-manager/cainjector/.helmignore | 21 - .../charts/cert-manager/cainjector/Chart.yaml | 17 - .../cainjector/templates/NOTES.txt | 0 .../cainjector/templates/_helpers.tpl | 32 - .../cainjector/templates/deployment.yaml | 75 - .../cainjector/templates/rbac.yaml | 50 - .../cainjector/templates/serviceaccount.yaml | 14 - .../cert-manager/cainjector/values.yaml | 42 - .../charts/cainjector/.helmignore | 21 - .../cert-manager/charts/cainjector/Chart.yaml | 16 - .../charts/cainjector/templates/NOTES.txt | 0 .../charts/cainjector/templates/_helpers.tpl | 32 - .../cainjector/templates/deployment.yaml | 75 - .../charts/cainjector/templates/rbac.yaml | 50 - .../cainjector/templates/serviceaccount.yaml | 14 - .../charts/cainjector/values.yaml | 42 - chart/charts/cert-manager/requirements.yaml | 6 - chart/charts/cert-manager/templates/NOTES.txt | 15 - .../cert-manager/templates/_helpers.tpl | 92 -- .../cert-manager/templates/deployment.yaml | 135 -- chart/charts/cert-manager/templates/rbac.yaml | 420 ----- .../cert-manager/templates/service.yaml | 22 - .../templates/serviceaccount.yaml | 16 - .../templates/servicemonitor.yaml | 35 - .../templates/webhook-apiservice.yaml | 22 - .../templates/webhook-deployment.yaml | 82 - .../templates/webhook-mutating-webhook.yaml | 39 - .../cert-manager/templates/webhook-rbac.yaml | 76 - .../templates/webhook-service.yaml | 24 - .../templates/webhook-serviceaccount.yaml | 16 - .../templates/webhook-validating-webhook.yaml | 48 - chart/charts/cert-manager/values.yaml | 172 -- chart/charts/gitlab-runner-0.18.1.tgz | Bin 0 -> 14037 bytes chart/charts/gitlab-runner/.gitlab-ci.yml | 66 - .../gitlab-runner/.gitlab/changelog.yml | 36 - chart/charts/gitlab-runner/.helmignore | 24 - chart/charts/gitlab-runner/CHANGELOG.md | 183 -- chart/charts/gitlab-runner/CONTRIBUTING.md | 16 - chart/charts/gitlab-runner/Chart.yaml | 16 - chart/charts/gitlab-runner/LICENSE | 22 - chart/charts/gitlab-runner/Makefile | 20 - chart/charts/gitlab-runner/NOTICE | 30 - chart/charts/gitlab-runner/README.md | 3 - .../charts/gitlab-runner/templates/NOTES.txt | 14 - .../charts/gitlab-runner/templates/_cache.tpl | 28 - .../gitlab-runner/templates/_env_vars.tpl | 95 -- .../gitlab-runner/templates/_helpers.tpl | 78 - .../gitlab-runner/templates/configmap.yaml | 129 -- .../gitlab-runner/templates/deployment.yaml | 160 -- chart/charts/gitlab-runner/templates/hpa.yaml | 16 - .../gitlab-runner/templates/role-binding.yaml | 19 - .../charts/gitlab-runner/templates/role.yaml | 23 - .../gitlab-runner/templates/secrets.yaml | 15 - .../templates/service-account.yaml | 15 - chart/charts/gitlab-runner/values.yaml | 389 ----- chart/charts/grafana-4.0.1.tgz | Bin 0 -> 18232 bytes chart/charts/grafana/.helmignore | 23 - chart/charts/grafana/Chart.yaml | 19 - chart/charts/grafana/README.md | 305 ---- chart/charts/grafana/ci/default-values.yaml | 1 - .../ci/with-dashboard-json-values.yaml | 53 - .../grafana/ci/with-dashboard-values.yaml | 19 - .../grafana/dashboards/custom-dashboard.json | 1 - chart/charts/grafana/templates/NOTES.txt | 37 - chart/charts/grafana/templates/_helpers.tpl | 51 - chart/charts/grafana/templates/_pod.tpl | 360 ---- .../charts/grafana/templates/clusterrole.yaml | 28 - .../grafana/templates/clusterrolebinding.yaml | 23 - .../configmap-dashboard-provider.yaml | 27 - chart/charts/grafana/templates/configmap.yaml | 72 - .../templates/dashboards-json-configmap.yaml | 38 - .../charts/grafana/templates/deployment.yaml | 49 - .../grafana/templates/headless-service.yaml | 22 - chart/charts/grafana/templates/ingress.yaml | 41 - .../templates/poddisruptionbudget.yaml | 25 - .../grafana/templates/podsecuritypolicy.yaml | 55 - chart/charts/grafana/templates/pvc.yaml | 29 - chart/charts/grafana/templates/role.yaml | 35 - .../charts/grafana/templates/rolebinding.yaml | 30 - .../charts/grafana/templates/secret-env.yaml | 17 - chart/charts/grafana/templates/secret.yaml | 23 - chart/charts/grafana/templates/service.yaml | 50 - .../grafana/templates/serviceaccount.yaml | 16 - .../charts/grafana/templates/statefulset.yaml | 49 - .../templates/tests/test-configmap.yaml | 20 - .../tests/test-podsecuritypolicy.yaml | 32 - .../grafana/templates/tests/test-role.yaml | 17 - .../templates/tests/test-rolebinding.yaml | 20 - .../templates/tests/test-serviceaccount.yaml | 12 - .../charts/grafana/templates/tests/test.yaml | 67 - chart/charts/grafana/values.yaml | 464 ------ chart/charts/postgresql-8.9.4.tgz | Bin 0 -> 33145 bytes chart/charts/postgresql/.helmignore | 21 - chart/charts/postgresql/Chart.yaml | 23 - chart/charts/postgresql/README.md | 580 ------- .../postgresql/ci/commonAnnotations.yaml | 4 - .../charts/postgresql/ci/default-values.yaml | 1 - .../ci/shmvolume-disabled-values.yaml | 2 - chart/charts/postgresql/files/README.md | 1 - .../charts/postgresql/files/conf.d/README.md | 4 - .../docker-entrypoint-initdb.d/README.md | 3 - chart/charts/postgresql/templates/NOTES.txt | 60 - .../charts/postgresql/templates/_helpers.tpl | 452 ----- .../postgresql/templates/configmap.yaml | 29 - .../templates/extended-config-configmap.yaml | 24 - .../templates/initialization-configmap.yaml | 27 - .../templates/metrics-configmap.yaml | 16 - .../postgresql/templates/metrics-svc.yaml | 29 - .../postgresql/templates/networkpolicy.yaml | 41 - .../templates/podsecuritypolicy.yaml | 40 - .../postgresql/templates/prometheusrule.yaml | 26 - chart/charts/postgresql/templates/role.yaml | 22 - .../postgresql/templates/rolebinding.yaml | 22 - .../charts/postgresql/templates/secrets.yaml | 26 - .../postgresql/templates/serviceaccount.yaml | 14 - .../postgresql/templates/servicemonitor.yaml | 37 - .../templates/statefulset-slaves.yaml | 302 ---- .../postgresql/templates/statefulset.yaml | 457 ----- .../postgresql/templates/svc-headless.yaml | 22 - .../charts/postgresql/templates/svc-read.yaml | 46 - chart/charts/postgresql/templates/svc.yaml | 44 - .../charts/postgresql/values-production.yaml | 556 ------- chart/charts/postgresql/values.schema.json | 103 -- chart/charts/postgresql/values.yaml | 562 ------- chart/charts/prometheus-10.0.0.tgz | Bin 0 -> 26549 bytes chart/charts/prometheus/.helmignore | 23 - chart/charts/prometheus/Chart.yaml | 20 - chart/charts/prometheus/README.md | 476 ------ chart/charts/prometheus/templates/NOTES.txt | 112 -- .../charts/prometheus/templates/_helpers.tpl | 276 ---- .../templates/alertmanager-clusterrole.yaml | 21 - .../alertmanager-clusterrolebinding.yaml | 16 - .../templates/alertmanager-configmap.yaml | 14 - .../templates/alertmanager-deployment.yaml | 134 -- .../templates/alertmanager-ingress.yaml | 38 - .../templates/alertmanager-networkpolicy.yaml | 19 - .../templates/alertmanager-pdb.yaml | 13 - .../alertmanager-podsecuritypolicy.yaml | 48 - .../templates/alertmanager-pvc.yaml | 32 - .../alertmanager-service-headless.yaml | 30 - .../templates/alertmanager-service.yaml | 52 - .../alertmanager-serviceaccount.yaml | 8 - .../templates/alertmanager-statefulset.yaml | 150 -- .../kube-state-metrics-clusterrole.yaml | 87 - ...kube-state-metrics-clusterrolebinding.yaml | 16 - .../kube-state-metrics-deployment.yaml | 68 - .../kube-state-metrics-networkpolicy.yaml | 19 - .../templates/kube-state-metrics-pdb.yaml | 13 - .../kube-state-metrics-podsecuritypolicy.yaml | 42 - .../kube-state-metrics-serviceaccount.yaml | 8 - .../templates/kube-state-metrics-svc.yaml | 40 - .../templates/node-exporter-daemonset.yaml | 116 -- .../node-exporter-podsecuritypolicy.yaml | 55 - .../templates/node-exporter-role.yaml | 17 - .../templates/node-exporter-rolebinding.yaml | 19 - .../templates/node-exporter-service.yaml | 40 - .../node-exporter-serviceaccount.yaml | 8 - .../templates/pushgateway-clusterrole.yaml | 21 - .../pushgateway-clusterrolebinding.yaml | 16 - .../templates/pushgateway-deployment.yaml | 97 -- .../templates/pushgateway-ingress.yaml | 35 - .../templates/pushgateway-networkpolicy.yaml | 19 - .../prometheus/templates/pushgateway-pdb.yaml | 13 - .../pushgateway-podsecuritypolicy.yaml | 44 - .../prometheus/templates/pushgateway-pvc.yaml | 30 - .../templates/pushgateway-service.yaml | 40 - .../templates/pushgateway-serviceaccount.yaml | 8 - .../templates/server-clusterrole.yaml | 47 - .../templates/server-clusterrolebinding.yaml | 16 - .../templates/server-configmap.yaml | 73 - .../templates/server-deployment.yaml | 212 --- .../prometheus/templates/server-ingress.yaml | 40 - .../templates/server-networkpolicy.yaml | 17 - .../prometheus/templates/server-pdb.yaml | 13 - .../templates/server-podsecuritypolicy.yaml | 53 - .../prometheus/templates/server-pvc.yaml | 34 - .../templates/server-service-headless.yaml | 26 - .../prometheus/templates/server-service.yaml | 50 - .../templates/server-serviceaccount.yaml | 10 - .../templates/server-statefulset.yaml | 220 --- .../prometheus/templates/server-vpa.yaml | 24 - chart/charts/prometheus/values.yaml | 1468 ----------------- chart/charts/redis-10.3.4.tgz | Bin 0 -> 29578 bytes chart/charts/redis/.helmignore | 3 - chart/charts/redis/Chart.yaml | 21 - chart/charts/redis/README.md | 490 ------ chart/charts/redis/ci/default-values.yaml | 1 - chart/charts/redis/ci/dev-values.yaml | 9 - chart/charts/redis/ci/extra-flags-values.yaml | 11 - .../redis/ci/insecure-sentinel-values.yaml | 524 ------ .../redis/ci/production-sentinel-values.yaml | 524 ------ chart/charts/redis/ci/production-values.yaml | 525 ------ chart/charts/redis/ci/redis-lib-values.yaml | 13 - .../redis/ci/redisgraph-module-values.yaml | 10 - chart/charts/redis/templates/NOTES.txt | 104 -- chart/charts/redis/templates/_helpers.tpl | 355 ---- chart/charts/redis/templates/configmap.yaml | 52 - .../charts/redis/templates/headless-svc.yaml | 24 - .../redis/templates/health-configmap.yaml | 134 -- .../redis/templates/metrics-prometheus.yaml | 30 - chart/charts/redis/templates/metrics-svc.yaml | 30 - .../charts/redis/templates/networkpolicy.yaml | 79 - chart/charts/redis/templates/psp.yaml | 42 - .../templates/redis-master-statefulset.yaml | 410 ----- .../redis/templates/redis-master-svc.yaml | 39 - chart/charts/redis/templates/redis-role.yaml | 21 - .../redis/templates/redis-rolebinding.yaml | 18 - .../redis/templates/redis-serviceaccount.yaml | 11 - .../templates/redis-slave-statefulset.yaml | 428 ----- .../redis/templates/redis-slave-svc.yaml | 40 - .../templates/redis-with-sentinel-svc.yaml | 40 - chart/charts/redis/templates/secret.yaml | 14 - chart/charts/redis/values-production.yaml | 583 ------- chart/charts/redis/values.schema.json | 152 -- chart/charts/redis/values.yaml | 583 ------- 220 files changed, 19019 deletions(-) create mode 100644 chart/charts/cert-manager-v0.10.1.tgz delete mode 100755 chart/charts/cert-manager/.helmignore delete mode 100755 chart/charts/cert-manager/Chart.yaml delete mode 100755 chart/charts/cert-manager/OWNERS delete mode 100755 chart/charts/cert-manager/README.md delete mode 100755 chart/charts/cert-manager/cainjector/.helmignore delete mode 100755 chart/charts/cert-manager/cainjector/Chart.yaml delete mode 100755 chart/charts/cert-manager/cainjector/templates/NOTES.txt delete mode 100755 chart/charts/cert-manager/cainjector/templates/_helpers.tpl delete mode 100755 chart/charts/cert-manager/cainjector/templates/deployment.yaml delete mode 100755 chart/charts/cert-manager/cainjector/templates/rbac.yaml delete mode 100755 chart/charts/cert-manager/cainjector/templates/serviceaccount.yaml delete mode 100755 chart/charts/cert-manager/cainjector/values.yaml delete mode 100755 chart/charts/cert-manager/charts/cainjector/.helmignore delete mode 100755 chart/charts/cert-manager/charts/cainjector/Chart.yaml delete mode 100755 chart/charts/cert-manager/charts/cainjector/templates/NOTES.txt delete mode 100755 chart/charts/cert-manager/charts/cainjector/templates/_helpers.tpl delete mode 100755 chart/charts/cert-manager/charts/cainjector/templates/deployment.yaml delete mode 100755 chart/charts/cert-manager/charts/cainjector/templates/rbac.yaml delete mode 100755 chart/charts/cert-manager/charts/cainjector/templates/serviceaccount.yaml delete mode 100755 chart/charts/cert-manager/charts/cainjector/values.yaml delete mode 100755 chart/charts/cert-manager/requirements.yaml delete mode 100755 chart/charts/cert-manager/templates/NOTES.txt delete mode 100755 chart/charts/cert-manager/templates/_helpers.tpl delete mode 100755 chart/charts/cert-manager/templates/deployment.yaml delete mode 100755 chart/charts/cert-manager/templates/rbac.yaml delete mode 100755 chart/charts/cert-manager/templates/service.yaml delete mode 100755 chart/charts/cert-manager/templates/serviceaccount.yaml delete mode 100755 chart/charts/cert-manager/templates/servicemonitor.yaml delete mode 100755 chart/charts/cert-manager/templates/webhook-apiservice.yaml delete mode 100755 chart/charts/cert-manager/templates/webhook-deployment.yaml delete mode 100755 chart/charts/cert-manager/templates/webhook-mutating-webhook.yaml delete mode 100755 chart/charts/cert-manager/templates/webhook-rbac.yaml delete mode 100755 chart/charts/cert-manager/templates/webhook-service.yaml delete mode 100755 chart/charts/cert-manager/templates/webhook-serviceaccount.yaml delete mode 100755 chart/charts/cert-manager/templates/webhook-validating-webhook.yaml delete mode 100755 chart/charts/cert-manager/values.yaml create mode 100644 chart/charts/gitlab-runner-0.18.1.tgz delete mode 100755 chart/charts/gitlab-runner/.gitlab-ci.yml delete mode 100755 chart/charts/gitlab-runner/.gitlab/changelog.yml delete mode 100755 chart/charts/gitlab-runner/.helmignore delete mode 100755 chart/charts/gitlab-runner/CHANGELOG.md delete mode 100755 chart/charts/gitlab-runner/CONTRIBUTING.md delete mode 100755 chart/charts/gitlab-runner/Chart.yaml delete mode 100755 chart/charts/gitlab-runner/LICENSE delete mode 100755 chart/charts/gitlab-runner/Makefile delete mode 100755 chart/charts/gitlab-runner/NOTICE delete mode 100755 chart/charts/gitlab-runner/README.md delete mode 100755 chart/charts/gitlab-runner/templates/NOTES.txt delete mode 100755 chart/charts/gitlab-runner/templates/_cache.tpl delete mode 100755 chart/charts/gitlab-runner/templates/_env_vars.tpl delete mode 100755 chart/charts/gitlab-runner/templates/_helpers.tpl delete mode 100755 chart/charts/gitlab-runner/templates/configmap.yaml delete mode 100755 chart/charts/gitlab-runner/templates/deployment.yaml delete mode 100755 chart/charts/gitlab-runner/templates/hpa.yaml delete mode 100755 chart/charts/gitlab-runner/templates/role-binding.yaml delete mode 100755 chart/charts/gitlab-runner/templates/role.yaml delete mode 100755 chart/charts/gitlab-runner/templates/secrets.yaml delete mode 100755 chart/charts/gitlab-runner/templates/service-account.yaml delete mode 100755 chart/charts/gitlab-runner/values.yaml create mode 100644 chart/charts/grafana-4.0.1.tgz delete mode 100755 chart/charts/grafana/.helmignore delete mode 100755 chart/charts/grafana/Chart.yaml delete mode 100755 chart/charts/grafana/README.md delete mode 100755 chart/charts/grafana/ci/default-values.yaml delete mode 100755 chart/charts/grafana/ci/with-dashboard-json-values.yaml delete mode 100755 chart/charts/grafana/ci/with-dashboard-values.yaml delete mode 100755 chart/charts/grafana/dashboards/custom-dashboard.json delete mode 100755 chart/charts/grafana/templates/NOTES.txt delete mode 100755 chart/charts/grafana/templates/_helpers.tpl delete mode 100755 chart/charts/grafana/templates/_pod.tpl delete mode 100755 chart/charts/grafana/templates/clusterrole.yaml delete mode 100755 chart/charts/grafana/templates/clusterrolebinding.yaml delete mode 100755 chart/charts/grafana/templates/configmap-dashboard-provider.yaml delete mode 100755 chart/charts/grafana/templates/configmap.yaml delete mode 100755 chart/charts/grafana/templates/dashboards-json-configmap.yaml delete mode 100755 chart/charts/grafana/templates/deployment.yaml delete mode 100755 chart/charts/grafana/templates/headless-service.yaml delete mode 100755 chart/charts/grafana/templates/ingress.yaml delete mode 100755 chart/charts/grafana/templates/poddisruptionbudget.yaml delete mode 100755 chart/charts/grafana/templates/podsecuritypolicy.yaml delete mode 100755 chart/charts/grafana/templates/pvc.yaml delete mode 100755 chart/charts/grafana/templates/role.yaml delete mode 100755 chart/charts/grafana/templates/rolebinding.yaml delete mode 100755 chart/charts/grafana/templates/secret-env.yaml delete mode 100755 chart/charts/grafana/templates/secret.yaml delete mode 100755 chart/charts/grafana/templates/service.yaml delete mode 100755 chart/charts/grafana/templates/serviceaccount.yaml delete mode 100755 chart/charts/grafana/templates/statefulset.yaml delete mode 100755 chart/charts/grafana/templates/tests/test-configmap.yaml delete mode 100755 chart/charts/grafana/templates/tests/test-podsecuritypolicy.yaml delete mode 100755 chart/charts/grafana/templates/tests/test-role.yaml delete mode 100755 chart/charts/grafana/templates/tests/test-rolebinding.yaml delete mode 100755 chart/charts/grafana/templates/tests/test-serviceaccount.yaml delete mode 100755 chart/charts/grafana/templates/tests/test.yaml delete mode 100755 chart/charts/grafana/values.yaml create mode 100644 chart/charts/postgresql-8.9.4.tgz delete mode 100755 chart/charts/postgresql/.helmignore delete mode 100755 chart/charts/postgresql/Chart.yaml delete mode 100755 chart/charts/postgresql/README.md delete mode 100755 chart/charts/postgresql/ci/commonAnnotations.yaml delete mode 100755 chart/charts/postgresql/ci/default-values.yaml delete mode 100755 chart/charts/postgresql/ci/shmvolume-disabled-values.yaml delete mode 100755 chart/charts/postgresql/files/README.md delete mode 100755 chart/charts/postgresql/files/conf.d/README.md delete mode 100755 chart/charts/postgresql/files/docker-entrypoint-initdb.d/README.md delete mode 100755 chart/charts/postgresql/templates/NOTES.txt delete mode 100755 chart/charts/postgresql/templates/_helpers.tpl delete mode 100755 chart/charts/postgresql/templates/configmap.yaml delete mode 100755 chart/charts/postgresql/templates/extended-config-configmap.yaml delete mode 100755 chart/charts/postgresql/templates/initialization-configmap.yaml delete mode 100755 chart/charts/postgresql/templates/metrics-configmap.yaml delete mode 100755 chart/charts/postgresql/templates/metrics-svc.yaml delete mode 100755 chart/charts/postgresql/templates/networkpolicy.yaml delete mode 100755 chart/charts/postgresql/templates/podsecuritypolicy.yaml delete mode 100755 chart/charts/postgresql/templates/prometheusrule.yaml delete mode 100755 chart/charts/postgresql/templates/role.yaml delete mode 100755 chart/charts/postgresql/templates/rolebinding.yaml delete mode 100755 chart/charts/postgresql/templates/secrets.yaml delete mode 100755 chart/charts/postgresql/templates/serviceaccount.yaml delete mode 100755 chart/charts/postgresql/templates/servicemonitor.yaml delete mode 100755 chart/charts/postgresql/templates/statefulset-slaves.yaml delete mode 100755 chart/charts/postgresql/templates/statefulset.yaml delete mode 100755 chart/charts/postgresql/templates/svc-headless.yaml delete mode 100755 chart/charts/postgresql/templates/svc-read.yaml delete mode 100755 chart/charts/postgresql/templates/svc.yaml delete mode 100755 chart/charts/postgresql/values-production.yaml delete mode 100755 chart/charts/postgresql/values.schema.json delete mode 100755 chart/charts/postgresql/values.yaml create mode 100644 chart/charts/prometheus-10.0.0.tgz delete mode 100755 chart/charts/prometheus/.helmignore delete mode 100755 chart/charts/prometheus/Chart.yaml delete mode 100755 chart/charts/prometheus/README.md delete mode 100755 chart/charts/prometheus/templates/NOTES.txt delete mode 100755 chart/charts/prometheus/templates/_helpers.tpl delete mode 100755 chart/charts/prometheus/templates/alertmanager-clusterrole.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-configmap.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-deployment.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-ingress.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-networkpolicy.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-pdb.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-podsecuritypolicy.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-pvc.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-service-headless.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-service.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-serviceaccount.yaml delete mode 100755 chart/charts/prometheus/templates/alertmanager-statefulset.yaml delete mode 100755 chart/charts/prometheus/templates/kube-state-metrics-clusterrole.yaml delete mode 100755 chart/charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml delete mode 100755 chart/charts/prometheus/templates/kube-state-metrics-deployment.yaml delete mode 100755 chart/charts/prometheus/templates/kube-state-metrics-networkpolicy.yaml delete mode 100755 chart/charts/prometheus/templates/kube-state-metrics-pdb.yaml delete mode 100755 chart/charts/prometheus/templates/kube-state-metrics-podsecuritypolicy.yaml delete mode 100755 chart/charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml delete mode 100755 chart/charts/prometheus/templates/kube-state-metrics-svc.yaml delete mode 100755 chart/charts/prometheus/templates/node-exporter-daemonset.yaml delete mode 100755 chart/charts/prometheus/templates/node-exporter-podsecuritypolicy.yaml delete mode 100755 chart/charts/prometheus/templates/node-exporter-role.yaml delete mode 100755 chart/charts/prometheus/templates/node-exporter-rolebinding.yaml delete mode 100755 chart/charts/prometheus/templates/node-exporter-service.yaml delete mode 100755 chart/charts/prometheus/templates/node-exporter-serviceaccount.yaml delete mode 100755 chart/charts/prometheus/templates/pushgateway-clusterrole.yaml delete mode 100755 chart/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml delete mode 100755 chart/charts/prometheus/templates/pushgateway-deployment.yaml delete mode 100755 chart/charts/prometheus/templates/pushgateway-ingress.yaml delete mode 100755 chart/charts/prometheus/templates/pushgateway-networkpolicy.yaml delete mode 100755 chart/charts/prometheus/templates/pushgateway-pdb.yaml delete mode 100755 chart/charts/prometheus/templates/pushgateway-podsecuritypolicy.yaml delete mode 100755 chart/charts/prometheus/templates/pushgateway-pvc.yaml delete mode 100755 chart/charts/prometheus/templates/pushgateway-service.yaml delete mode 100755 chart/charts/prometheus/templates/pushgateway-serviceaccount.yaml delete mode 100755 chart/charts/prometheus/templates/server-clusterrole.yaml delete mode 100755 chart/charts/prometheus/templates/server-clusterrolebinding.yaml delete mode 100755 chart/charts/prometheus/templates/server-configmap.yaml delete mode 100755 chart/charts/prometheus/templates/server-deployment.yaml delete mode 100755 chart/charts/prometheus/templates/server-ingress.yaml delete mode 100755 chart/charts/prometheus/templates/server-networkpolicy.yaml delete mode 100755 chart/charts/prometheus/templates/server-pdb.yaml delete mode 100755 chart/charts/prometheus/templates/server-podsecuritypolicy.yaml delete mode 100755 chart/charts/prometheus/templates/server-pvc.yaml delete mode 100755 chart/charts/prometheus/templates/server-service-headless.yaml delete mode 100755 chart/charts/prometheus/templates/server-service.yaml delete mode 100755 chart/charts/prometheus/templates/server-serviceaccount.yaml delete mode 100755 chart/charts/prometheus/templates/server-statefulset.yaml delete mode 100755 chart/charts/prometheus/templates/server-vpa.yaml delete mode 100755 chart/charts/prometheus/values.yaml create mode 100644 chart/charts/redis-10.3.4.tgz delete mode 100755 chart/charts/redis/.helmignore delete mode 100755 chart/charts/redis/Chart.yaml delete mode 100755 chart/charts/redis/README.md delete mode 100755 chart/charts/redis/ci/default-values.yaml delete mode 100755 chart/charts/redis/ci/dev-values.yaml delete mode 100755 chart/charts/redis/ci/extra-flags-values.yaml delete mode 100755 chart/charts/redis/ci/insecure-sentinel-values.yaml delete mode 100755 chart/charts/redis/ci/production-sentinel-values.yaml delete mode 100755 chart/charts/redis/ci/production-values.yaml delete mode 100755 chart/charts/redis/ci/redis-lib-values.yaml delete mode 100755 chart/charts/redis/ci/redisgraph-module-values.yaml delete mode 100755 chart/charts/redis/templates/NOTES.txt delete mode 100755 chart/charts/redis/templates/_helpers.tpl delete mode 100755 chart/charts/redis/templates/configmap.yaml delete mode 100755 chart/charts/redis/templates/headless-svc.yaml delete mode 100755 chart/charts/redis/templates/health-configmap.yaml delete mode 100755 chart/charts/redis/templates/metrics-prometheus.yaml delete mode 100755 chart/charts/redis/templates/metrics-svc.yaml delete mode 100755 chart/charts/redis/templates/networkpolicy.yaml delete mode 100755 chart/charts/redis/templates/psp.yaml delete mode 100755 chart/charts/redis/templates/redis-master-statefulset.yaml delete mode 100755 chart/charts/redis/templates/redis-master-svc.yaml delete mode 100755 chart/charts/redis/templates/redis-role.yaml delete mode 100755 chart/charts/redis/templates/redis-rolebinding.yaml delete mode 100755 chart/charts/redis/templates/redis-serviceaccount.yaml delete mode 100755 chart/charts/redis/templates/redis-slave-statefulset.yaml delete mode 100755 chart/charts/redis/templates/redis-slave-svc.yaml delete mode 100755 chart/charts/redis/templates/redis-with-sentinel-svc.yaml delete mode 100755 chart/charts/redis/templates/secret.yaml delete mode 100755 chart/charts/redis/values-production.yaml delete mode 100755 chart/charts/redis/values.schema.json delete mode 100755 chart/charts/redis/values.yaml diff --git a/chart/charts/cert-manager-v0.10.1.tgz b/chart/charts/cert-manager-v0.10.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..baa0065c4d285510981e410b5c7e4d44bf0403d3 GIT binary patch literal 11396 zcmY*3HF}`q6VWilvH9emXza=_2gnVVpU@@QD(Q)Qs&}NP*dZOQ@61)v@`Kk z`DM>1X=-B&cIo5kyvgxw@O513u`r$-#|)f3Dypb|S{ThFx*4n=PfU{X)WyyebtfZ* zhXK<*U)`Qz$us3&!$wZrT?*@P1&}CXwMg#nc~RYN2Zd!gh+r1Ln{^#f8~x#tFua}O z$n)*l@$+@a|GIhs9{B;og0}2oe!~KP13$m|_I9(nKa6j`J+`bzg;MQ7)Y-oiw5WdH zRKL#$e8IEITd)Od_VAp#!^Xz7jY!fhPLRD)Aq(G1)G`le%%h?h(uhumb!`WS zTe6&cv0Z*fLgS2foMS~|Cog^%}hsBR#_aDc*O| z^0eQ3g3Mv{cA2Fiz`gEh$+unIc8#O~sK{`>TwYIwJSB9Z!Un{{?3;+Zn9;#X4PvM|NU=7S+w|AM%0Qa?(l4MB9LXvne zv3#Lo2#g7{)un(y06YY`oPV%IU>i*TEx?>1@l`X}R5|$B`}{6f@FVht7e8i<+;BCf z>_)n}FkMhV*aZu~X=V}%<%XAw%COp5y33QV%_9@ItK)7Bce6dQ^5Ggy#!q3)9L$UW zNX6=9&n`61ABTDL;Etua)wpq|5t0Xr7kWwSK6%S7XT8Bq@7D)6$+&wug8<10FN|aPRquB*X@dwUmVffe?-@!FlfhknfB;6<|z4&W|8PLyhO6`jloI{+E(bN|Z3l z+)Juh(r1tar_Vty@Q1FKu4euiDlaApM;dJ|R3F~Aa8Ba>i92d6b0lX%_z_LVHNEUy zk;&8O(c*jDXGd~41c1Sw>k`X%*ofqMMlJ;=XYyZoXUicyGkq(Q!YKZQvv%-zq^an3 z#}e~&Xv*l|d(*MsvyZ7gP1*0m5xBgP;qXa?crl@zg9f^@P1Y=`V_b+7UY=sk_ASvi z0A{TFdt@`puO%f9yu;t|FeWK*(3B^^0;pjnaiI8%rFJJ`(@S#DNu&gE%n}1)pLCM_ z;b=xIN&MpxQGVWkqvPU3UuPA>N8MxULo>6ggm%|e!{s(gN|w9o)sAHkjfITqEmv4(38zLMQdL1ra>(9`hZsx(l*k%-hyLij zwTCwhwKP1GiyDQB`}RCqK(iG+)7HC(F+iugQdyc6=+Y4TX^^5m zt3lSJXi!CzDpZ79rdV=Zb1dD%Np3!r{TfDtvHLnZ^hRq)*NdoZJJxR&NlormicR?; z=K=JvQX|j*;qo|~Kqq|{K*GDvHuxSVk;q&^V=@kRq+`V3F@Dhr!0XQ^E{R4Ci;TOv z>Lx`;oBoS(vMQ5|%9Wb=7~{Z^621whQga(~%5_Gt5O|rL@vL(l^9C|wi!ikZLXNp5 zord=urDt%;%YGLA`~x8syhN6I<10!=cq%|Yn2I!UtM(lr$*>g8oWe_Itj3?BB`^!OJu7MeSiR_ol~i!-8>$ER zpw-61b%FJPKfVX-1%{e0`Z;>}!W&16_A|*Io=zrf2eAf|Sw3dg)2{uakAtQVB6gB2 z-kQ|4ocQ}Ms?r>;8LKJm7!DYN3ykiQK9%2tuKMSSV|MuatIn9`_~U)XU7mtex?wGe zn;bCJP3irf5&i9mQDUS1f4;N&z#rwYF&O}p z^00rw!fHeUhSXFGg%m9sV`zAhuuQ&)CLllaK39spe%5}-Dg}aN2vre3F5bO(*E8gp z@6nwPu;z}bGv*hu3&5QStwEr3yXCAJGSF!;OiF9=8|S(qy8eFc$LJ{i?eg^UsO|jV z?w6IZ=T9Q%6qg(o@PSSf8gq1widGyPbK>dk&aW3n|3W-NwnXye6$dw!8iB#+ZOLs0 z5bm?r`^?Sz&Jz!9O1Q|$Q=Ahk*Qcm6Z4gFICod@4*)H3ggjS^Z;)gd zsdc%SkM9$)5h)e_rAxN2DYES+Wmkqy@-{MloD?~G>{{Ycqi+}}oUrHhr6ZkNqPxNSFGjWu`FmmsqaXTGxH zT=$`vRdE8DX+M~t&-=HTzUwc$4$yBLg}%QTq{w8^h2A@%RO7>VR659Z_C3q0Jbm;8p{Qb>OryF~HQy8>iAJ#6Zd&6QFVHz?}iq2el) z3DXeSNahX`&LPYc+G8IgS&+!*feVRuzijt`CY7z(idxKRZaLM1DNBIj6)X}$Zlx<+ zDMXftB5^1-!(C;CvIa?+Z&aq0i79S~9mm1X4vWfzP^XTP>N zLtAG4lm>wO()WL)hSTg%#X+Rqf`Wqj9?(GogqOsPA0UR@IO5+5hT{DNMsvpK24kdt za7T?E%Dlm)T{C$^n&Vt;5fm+$#X0s3IQHXh^MA#xGaL>oBB^R0EBUZARgU^uoRKRe27(SWSy<56PwFS)k1VKgvmQxzK zvA3(Y`qC35qYEC?ufI~rwK1frbTDwTceo#(?^ye$QuMVbVnA#6=!lM zVP>yBeOu@vBKMWRO~Up4n=$!lIF#t*l4DsQ>xFv{=X&cpme#Hk%?hx2R0J)i-_=st^*ha~>Daa~NmT zwXCM1gU^{r8I$f_G`sDbf2=D6eWpca(|ZIl+<`W)ZirH^dIkoTK|uNMm`Rf$f+6Oc zDVXUL?nJ+Y0)Gsn0B`bHCg;$vSLO!^tK|(rOXF0O-H|x4ZQ z`LyRn`0Lk8*J)ugwx5wggKFk%t{d-pQLaQzsL}e91za4I%xxj1#(3%3+zaq?OX6uj z1~1`VcMVme@z`|(c5A${nDtK(R6CZ!OQ&!QM&}R)!39q6^q;2*_iqT*acdA}N7mkC zPf1*&TAwr`tfrSoO&^pvDwERmxKhV+mdxawIET$|MooF#@ZL0J?Jup4iZ8?0YBWMfBe8f*`WH=zX*Y=3foxmH!&CATr2ej1%!ohU(N;vt zq1D&~^5_}ot2m`OD>jZ;XU@A9OX)NzMv}%36G5%M4s5j8EB)=YUd!$W%r7Y71BQF- zr&CXy9y-4E-PzPt%uRYu+a2-DxuOlw-oJcx)9AJ64}B|4F=w6Qc}CQz^7HxFz_pq% zyfqo8)vv|r|Ej(xw4W}!?>|0C^ZkWY+VH*((Qjz?%vbO3?1;~4O$pArlFZ^%x}9m+ z_*+*d%~#nN-#(}{tal94iDxoV#|Eo+Yom+jyRU0xk3Ki@%`>(F-+&;S;LfU}0F6#ikGq577|5Yk0ATY1y4f$|d|bTOW*~_5 zk*p`v3k{n%3$_-VJ$sIO``7^nyIUX zzR#_Kq-FUjeScoEI8@@Q!1vB~tb+<@=lrD@YSBkwB4;C@)of8`98_!>!?a6kjK)L4 zq|D5c>WiB@@|*WfXp87N+bGs)*@2~`LOhr!0ZRExL#bbOX?<@zisrb1!N$YRVW%rAXl9zt0N+qe zNfo+hDYlZyfpL9!gOeVRQ1_crk^z2v4Zc1#ofeg+BSnh%IjHA_WfWWNZaa1yCRz2+ zahYA`bbUB#fglME`tT()V;d9pe1-AE<>GD%cIK~s#U8&RcGb!s)9IG+>~H<%l^3ZV z*4vZ9&lx#}zIEM=q@0#ErKyYB99`SoerQ44$pPD`3|R%^kfupo3@w#TRaHV*SEaLP z^<0w+OcwmD&@jpd))Q+LYfYG#m|7>g2Q#wMFgYR%W`&*`8)x=~Wu2i$zpc>qqnRYt zDE(OIaQj%1{_}vZPoH2j-NVYg7(_9o!(+s}eaH3?Xz%JdFlR5U>P zuKXS_1s+g(FVvsxRA98O@-=T=*-yEENsXWpT+`s4?0K)3^NGDebx~>==9QcPAK_K# zAM4c-bhiuadpLm&`yF<0lMwbX_w|K)brZ|<{VFbxINA*b8Tr}m_uW#_hy|$wqs)3L z;tm_xXxOEkyRf_OhuzMnV=YM6H2^c;Zn+R9P_@`YK_QZssu&z^2S>MGTh+`bwKbEw z;b8~#6NiH}FYZ%RtPt)J5`PZQ*NMP}Zb|8~k`+Zop{`@qisq&1Aoa?GJ+fN$`FnGF zL33NPV-#sMJ7_s`p-R*DBEyovskJ9*G;LkvqR7$wBL-#_a8&ZQ*5HWHzg%VACBlQ{ z-%q07Va>D_Timt^XDC9@PQZ#$IhFD8I(l}gmgmG})YTmz!@lk35=8w!INlzz)-2bAYhwe3M- z{#fuHJo+k@%G3!ecOXS;gFN3Q#vR2|JaOXB3J>52^)lBmHDy56JP?HZEd2p`>oe#%0=eH+dRtGB;>BxKenYOYjGS?HW(J+i z$iL;75e{LB$t^u}PIgdq*T(+TGU#a$w)`zzcC?NH)SLt@JTA11gWOSG#@|5?K|#-7 zJv^Rj6H47qf%o-I2K-4D^G`nqk}Eud>OVY+Wo(V4**2JqW5tIa`ziE#^q!2=_FV_X zNq!cj{FvbQbN{5_m~qd@IOVY{oM+_(ZxQM|~7k{HRIyn`(S|$&w^FQ&xe&P`Tw@ zV|%yGUSM!KWyN!s;>k683Cvp_r8Wn+Zk8=HLj&j;Nb%OQvBPI`1j^y1NKHR(lOOV2 zyBA?L^blU`;);>DkT7S=;)Io(t1Lc$_6-uPJlxmQ#je(WvMV+y_7i(kZyPA#Hs?eC zjO5$%8YN=I&vdyszK-Rt-q5DP_)V3hi{T^O<8Y^v!RmZ4PK%zv1Rut9`T|5QT45^5 z3u6K})YF=TuI+h#HmVdE37uIHT2t9`4)oLjm%Fd{WL`mP*Wfz+Ae3i!py^l7$~9dqC@O?=yBIRKpo`(e6dMs(0XDKQn}B zJG>>fm!fnqo9>4Ve13M#o1-~kfgQw3`TL!Y<#kd)Y(J&{rM60hYE$>nD{~AvkslCb zwd{W7Zm_Czy}t1TxMR_`^-$tOB3{=q>lO*2@2e*sdT#A5UgCJ+jn}t_@c}DBO>Bdf zz?qgap%r^9%{UQnCasd)TKuRK*5|LbrjnX#k9fSkDob|JfsC@j<8&$8-OplbyY&w} zeZFU3X^CH4Qkyy`rrT@S9c=lCjx30rP~a>(cJz?WpzA=JD(9!>$plP9nIfZnA4u4GY5S20a+Qz^n)w^eLr2#OiTu*J<9l}~KR{rgkq3ZVES z7aNCd-_I)Jot;nS@ZX*XXp@3oUf#n@L3v-kzrWn6W)SZEnrQdk+}wJ@y;j%Oe%@P3haEP1 zPSJujKXIb7Tg$uUnO$w&GsV|B{fFXL! zpoCjJp`sAgK=W&M32_5`nggy>kXbWVS|+MSKN(h7Y$Fy-4@>zQ{a)&C{Uy_F`ZWtX zyVsR059o<9BW89ywq|HmW3IDXoliMxeCUclO-#*qbaS8Gko`^m9}w~L#^=9l zXL%E(P5<8V1Z?%`iG%*teO8h^o?W$7kizKdg=7g-&uSQxcDNxnkJaX?JJpN<*<-_Y zMuSjjzIK9yIzY~xGoc>>O~_iz4kJdpdLmWYPe7Chx;+ppCaRw@iofXz2(}~T0d!IY zdfDP_asL7-hjLfJ3SNNEqW-kfnN_ee3iaG`*a`Wkuq?kpJ$m<3xS35xi091j37CqU zW-HyzzyH}pM!c?38*cC93jx!IvJ>*Rz5xFf(2%GPNyX2P7q`UEo;?pI@Y6nw5Y(;b z7Z&(&BMl=;iDv4NWlNU;`V11{0*!1x1i683UdT{L@=+vOtNQK2QFpf={TO1*w*WH_S-aYr zt>2*gBE*_P17tQOcGI*U!$=V5d({g*j$eB^ilI3~fD%JVve&a$Lht_8g&fp5Wzfo& zdwb!IroyLy4|C4rA!uIUjqd!&UwpJf+6XrZ%=yGJO@Cy1;TletOQC}_I;Rvgvlftt zO2)Q#dp`v4-p*X8B+sq8Z@jp0g!}naLF)a0c?x=(DW$TXZrL7Yc_+S1*%yHAfCbL{ zb916#hnNdlv2YHB(?!wDdUzNd(C5STli+w5beP#9MsJV@SOaTlO_MAVQ5oUU?NYjK z9GhmW6av0>vI)C9T<5=iUNzUHf4Recb_r>Iz2u>2{hQMq)&|Le5Xkt05cPvrxd8l) zKc*2n6bD>I-Vhoj5IV^N8zR?^=>8< z_-;5A5>5qasF3Amc?!9jd{8|Kvsv)_pokEYx)qG%6G?PPVP`F=w=M)Z5I*v5DEjZO zc+>&7xZ>*|d!zni-I2Am0M?R8%>D?rAA~EpQ9>_oJ1Qh81~hPof`>MLYb8eBB+#J* zAE~NtM+G$gC`CM&57(xpc+;W617_h$9<_8w?Kz}P%%Mc40TC+fMLH9bDjF=eicn-o z#+jpIaVk{Xa!u^mU)TIjh9&LKQQivt8Bex$&Ajv9ebo8`AiuuwS`k zi7(_<$A^?-AK*3Ia8%9nx&zWc6dGPCn|ave^NEBAO%QglJ!aBaUd+(O{Ys{K{jxT5H zt?pNpeApg|?kvn^ZxyN;C6{PHbYj|W6xa`^AeroFla*&>qiDm^zp8E>5n?j?(^)}L zTL{@S-_LYI_Ev)wvRlZIaMzqE)7mLs?uQ4aL7(GcohKbRV`jX?{pck?D)ljv<{V!c z+X`8R{o!FuW9yneISvYzTfU_%+2JpfCi75%^6?)w!$eC;bUv)df`iIYe&~JmPV>ym zgwYYst+IF*D#zNzJ!yM{sH7+@`fWj)i*e>{E{1SbuBLZ}LT|(yJUEvSa3R67lw>a_ zCx$E9IEI{*@xyBooJlGRHWO1Aa`GQ8oP(P={@F z`(b!$)>=hwu~hDXTeMEJ@FRc4sxonc#%7oDaR4*5#&}RMJcQg_i>&p&c?c6D^{8+b z_@q1X24zkS^$Nwv^nx3K#WmjJvZpXj<*)!-x$~@FzGyly_eS9z$fv#5@D_&2D}BO zYsz)LB7?$N0#4e^k7ZAO{=TFC?|v$;Ogp>4-@?l$IRs~j#zQz2jU}2SOf(ug>)j2@ zs=2w4@!Wca;*oWi)aV43QzQk-j~;nDdR5~!B&U2I;E#n80*9sRtlpo3=7MvBh;*hd zb?yzv;UQGkcl))otC@(p*hTg@kp! zid#wZPoxp%BbvUUX+p}tLOZAoHGBjG2qV?@=-3jmKc%k@Mr#e=G;vW{{0_>wH1%5QyjrBhhWbd!*lrUu0 zbwV*>WZxq)yv)Gj5GSF_Drqb65aH(b6;RJ z56nXsmfZxgn%WcUj#NATxZr-HxKd>&?iQ0UUz4C+@rf9so66KjxoNuN`)NAH*i;zd z=E#~r&-gFy5Md+kSH<=VE09)ITr8%TlGXPC{(Bm236QXzBZ5*PHKsi?TlaYa-!rvg zYb4U3$-A8`s?$Zy8T5>CoRWV%_#jLa8ML)DVotFuV4yB76rB;=`O$J(S9VyzXley7>x=}mUJ-gr`c2o6v zrU$a?;Y9_$&EGuCPi+-D(xVDa@9IPg>k9}(4>l8_JL0geh#|;d=_CVjcrcZ`cSqiV zX-;=fJhz83Rb48LEM*_KoOtjmcyu|j0W@KsBxaA|^djv@ z%U1qp9By(gtgGH=w-0jp5{>YWXBTq2sot~{uZY|kk|8f%G}Lmm!fg|_9BmnG-@K)h zdeh4P)`9Bu(K%ep^cl10|1?HSveY9Lz9VHuNp(h};H9c2B2^m+s8IDTH_*U;sx2id z=QJ#?LCl(Zm8!N##n_A|Q}-2@v5c<6GzVyHgx1wZ^;%b5m5638oljKVh$|?=M@I`8 zov`M7Cvm0o8Qgn4y^%VE)vzaubFeH`hE(2gf@#9$!Q9}EltcMGA0N4{MznWCk%qc? zU{g^SE$PW8B*b!m|2=9c3k!bnT$LzB3v4BEkZTd4c%8Kwxo&wnPp&4U&yQqGjYkir zE7~CnHPd1RLPMe(JN}6pP_wz1UT`2y+f<}erdxE35KjSB)v`9?+&h|e+)=gcJ5+q( zM7N7D1vP+fp#g(v5Hut<=Z zXV`0oL;E+|pN9Ug8m>lW@m!P@W%S*7>bh;z>+*-5$NVxo5%0daHNX~cZbdqEld@fo z<%clF5{9MzxwD*YRCj|_m!Hk&9r+eGPINT-*esz0m)d!kbw7!y=uPOWKUC+!N$*rx zPg`PJrHKMl^ly@Vh=#{xjT;!8NWQaXWF5cQQh!3Oiq&%3*Y~fTuqx0B5nmO^{Q{&b z0pk7*D#;*zYKZ>IdztvkgAEI#|J)ffo}V*E)Q^2mBad6yXlQgc^(_pNo~YqtT{6_@ z^Cy@>tPTjA?;W$>J9mfO>P{LM*L!;l_&|-NoB37@W0p0^4hv#G4r49WBu&O@BrA;{ zv^vsK_wbd=us`Y^=PzsRHwoSXQ~3Kn;bb+RrVFzPbm}3Zc%qpwagOVqq@&ZA_FsO9cwxDW9vnoza0>@eHKW z4Q(;?zkE)`%q5e{B|RRN%FQwKB|)Idp*MabgNzw6eV$+~`Teeg!YBEjC(HLFg45O< zX$17Gp@YR6GF#(p$zPA!fey7q_{~ zV=i>7diGM{1}V7-Hf#;cWyoBSceNOU*MakYG8}6b(zoJt1(FNvy{mPiXuOC)TyDRk zYkkjBD7lsnXi0PZF7_&we|2P zt%%81l??brSLpV&4^61tXcyS#HRT>X?;NZ5($|5W zpHXOmAlTm>RiExSGgfIGwjOI*UEmpF9rP;A=QjQ8nSL~dOfpnfrRX8tMPe+M{+H%tF|-VU@wGt#vZrT68JRO5r7fB|ahSLLuX#sm5Bul`#< zikmj*pEkfI63^>yZoUw#iVrt%uzAo-d8%tJIf1@ihWm1fR7geAj*v~j$|3^MF?^o! zg6TcNtvZUk#barFY4UD30v2EEvceNf6Cc7fJoM^jt)rQ5NFAviTYZcqp_@*JWkBED z_>CJX2OB-VwWVg_(uUepS+noTg>Zx)<$HgxV?C?Fzr37WKsV?8H&w(JwKbh{zJ{Hj z-ijk5oG0*EyCjbfoc3gFBIJUFU=@^+u#V*Bn?8pwk#^@UJP$+4yynR=yr z`IaO9FGkg+G*>9Hy5S>6SXz5h-Cj7;{`fC@w}IbZE|e*6I(Ba`$!k4sfxJn+rTXP> zspel@Uacg47KHReC)GQZ$DeP%S-I;9_Af0W8(v@->EJOseANLPkK4lpxhr*8Wu zOk?qCoW0sDXuIlPaA9lf`74gc(=EULG&atdE=4Hrw4#@mnG{U(hlE6!t&4Qy5?&G* z%I{Qh5*ZJ+`Oeuz3T+Y-nDP#~6UaRSeeDgblAdlj)2aDCg2YB=5 z+w%nKN-AvvX#s2g+XaB^DUduezSJW?TUW%_O&=in+>{l+Af{c43w;a_4`9{)#qZ|O z^1yH7b%*1tB?e@B;0MZ{8NdH8=DvsW%iaB@HU;E<-+0%9vIYM4zli?@{x|Gj7zQ=p z^;|W~HPnB$2;ttM18eX>9XzKf$p6GghFp%XmkKt7|69ZHR&DJUp<`oeEIz+ Pa15fwv48?|0|)yb2V9@j literal 0 HcmV?d00001 diff --git a/chart/charts/cert-manager/.helmignore b/chart/charts/cert-manager/.helmignore deleted file mode 100755 index f0c1319..0000000 --- a/chart/charts/cert-manager/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/chart/charts/cert-manager/Chart.yaml b/chart/charts/cert-manager/Chart.yaml deleted file mode 100755 index a1cb6bf..0000000 --- a/chart/charts/cert-manager/Chart.yaml +++ /dev/null @@ -1,16 +0,0 @@ -appVersion: v0.10.1 -description: A Helm chart for cert-manager -home: https://github.com/jetstack/cert-manager -icon: https://raw.githubusercontent.com/jetstack/cert-manager/master/logo/logo.png -keywords: -- cert-manager -- kube-lego -- letsencrypt -- tls -maintainers: -- email: james@jetstack.io - name: munnerz -name: cert-manager -sources: -- https://github.com/jetstack/cert-manager -version: v0.10.1 diff --git a/chart/charts/cert-manager/OWNERS b/chart/charts/cert-manager/OWNERS deleted file mode 100755 index 68d3a41..0000000 --- a/chart/charts/cert-manager/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -approvers: -- munnerz -- simonswine -- kragniz -reviewers: -- munnerz -- unguiculus -- kragniz diff --git a/chart/charts/cert-manager/README.md b/chart/charts/cert-manager/README.md deleted file mode 100755 index e892bdf..0000000 --- a/chart/charts/cert-manager/README.md +++ /dev/null @@ -1,154 +0,0 @@ -# cert-manager - -cert-manager is a Kubernetes addon to automate the management and issuance of -TLS certificates from various issuing sources. - -It will ensure certificates are valid and up to date periodically, and attempt -to renew certificates at an appropriate time before expiry. - -## Prerequisites - -- Kubernetes 1.7+ - -## Installing the Chart - -Full installation instructions, including details on how to configure extra -functionality in cert-manager can be found in the [getting started docs](https://docs.cert-manager.io/en/latest/getting-started/). - -To install the chart with the release name `my-release`: - -```console -## IMPORTANT: you MUST install the cert-manager CRDs **before** installing the -## cert-manager Helm chart -$ kubectl apply \ - -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.10/deploy/manifests/00-crds.yaml - -## If you are installing on openshift : -$ oc create \ - -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.10/deploy/manifests/00-crds.yaml - -## IMPORTANT: if the cert-manager namespace **already exists**, you MUST ensure -## it has an additional label on it in order for the deployment to succeed -$ kubectl label namespace cert-manager certmanager.k8s.io/disable-validation="true" - -## For openshift: -$ oc label namespace cert-manager certmanager.k8s.io/disable-validation=true - -## Add the Jetstack Helm repository -$ helm repo add jetstack https://charts.jetstack.io - - -## Install the cert-manager helm chart -$ helm install --name my-release --namespace cert-manager jetstack/cert-manager -``` - -In order to begin issuing certificates, you will need to set up a ClusterIssuer -or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer). - -More information on the different types of issuers and how to configure them -can be found in our documentation: - -https://docs.cert-manager.io/en/latest/tasks/issuers/index.html - -For information on how to configure cert-manager to automatically provision -Certificates for Ingress resources, take a look at the `ingress-shim` -documentation: - -https://docs.cert-manager.io/en/latest/tasks/issuing-certificates/ingress-shim.html - -> **Tip**: List all releases using `helm list` - -## Upgrading the Chart - -Special considerations may be required when upgrading the Helm chart, and these -are documented in our full [upgrading guide](https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html). -Please check here before perform upgrades! - -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: - -```console -$ helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Configuration - -The following table lists the configurable parameters of the cert-manager chart and their default values. - -| Parameter | Description | Default | -| --------- | ----------- | ------- | -| `global.imagePullSecrets` | Reference to one or more secrets to be used when pulling images | `[]` | -| `global.rbac.create` | If `true`, create and use RBAC resources (includes sub-charts) | `true` | -| `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` | -| `image.tag` | Image tag | `v0.10.1` | -| `image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `replicaCount` | Number of cert-manager replicas | `1` | -| `clusterResourceNamespace` | Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources | Same namespace as cert-manager pod -| `leaderElection.Namespace` | Override the namespace used to store the ConfigMap for leader election | Same namespace as cert-manager pod -| `extraArgs` | Optional flags for cert-manager | `[]` | -| `extraEnv` | Optional environment variables for cert-manager | `[]` | -| `serviceAccount.create` | If `true`, create a new service account | `true` | -| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | | -| `resources` | CPU/memory resource requests/limits | | -| `securityContext.enabled` | Enable security context | `false` | -| `securityContext.fsGroup` | Group ID for the container | `1001` | -| `securityContext.runAsUser` | User ID for the container | `1001` | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `affinity` | Node affinity for pod assignment | `{}` | -| `tolerations` | Node tolerations for pod assignment | `[]` | -| `ingressShim.defaultIssuerName` | Optional default issuer to use for ingress resources | | -| `ingressShim.defaultIssuerKind` | Optional default issuer kind to use for ingress resources | | -| `ingressShim.defaultACMEChallengeType` | Optional default challenge type to use for ingresses using ACME issuers | | -| `ingressShim.defaultACMEDNS01ChallengeProvider` | Optional default DNS01 challenge provider to use for ingresses using ACME issuers with DNS01 | | -| `prometheus.enabled` | Enable Prometheus monitoring | `true` | -| `prometheus.servicemonitor.enabled` | Enable Prometheus Operator ServiceMonitor monitoring | `false` -| `prometheus.servicemonitor.namespace` | Define namespace where to deploy the ServiceMonitor resource | (namespace where you are deploying) | -| `prometheus.servicemonitor.prometheusInstance` | Prometheus Instance definition | `default` | -| `prometheus.servicemonitor.targetPort` | Prometheus scrape port | `9402` | -| `prometheus.servicemonitor.path` | Prometheus scrape path | `/metrics` | -| `prometheus.servicemonitor.interval` | Prometheus scrape interval | `60s` | -| `prometheus.servicemonitor.labels` | Add custom labels to ServiceMonitor | | -| `prometheus.servicemonitor.scrapeTimeout` | Prometheus scrape timeout | `30s` | -| `podAnnotations` | Annotations to add to the cert-manager pod | `{}` | -| `podDnsPolicy` | Optional cert-manager pod [DNS policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-policy) | | -| `podDnsConfig` | Optional cert-manager pod [DNS configurations](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-config) | | -| `podLabels` | Labels to add to the cert-manager pod | `{}` | -| `priorityClassName`| Priority class name for cert-manager and webhook pods | `""` | -| `http_proxy` | Value of the `HTTP_PROXY` environment variable in the cert-manager pod | | -| `https_proxy` | Value of the `HTTPS_PROXY` environment variable in the cert-manager pod | | -| `no_proxy` | Value of the `NO_PROXY` environment variable in the cert-manager pod | | -| `webhook.enabled` | Toggles whether the validating webhook component should be installed | `true` | -| `webhook.replicaCount` | Number of cert-manager webhook replicas | `1` | -| `webhook.podAnnotations` | Annotations to add to the webhook pods | `{}` | -| `webhook.extraArgs` | Optional flags for cert-manager webhook component | `[]` | -| `webhook.resources` | CPU/memory resource requests/limits for the webhook pods | | -| `webhook.nodeSelector` | Node labels for webhook pod assignment | `{}` | -| `webhook.image.repository` | Webhook image repository | `quay.io/jetstack/cert-manager-webhook` | -| `webhook.image.tag` | Webhook image tag | `v0.10.1` | -| `webhook.image.pullPolicy` | Webhook image pull policy | `IfNotPresent` | -| `webhook.injectAPIServerCA` | if true, the apiserver's CABundle will be automatically injected into the ValidatingWebhookConfiguration resource | `true` | -| `cainjector.enabled` | Toggles whether the cainjector component should be installed (required for the webhook component to work) | `true` | -| `cainjector.replicaCount` | Number of cert-manager cainjector replicas | `1` | -| `cainjector.podAnnotations` | Annotations to add to the cainjector pods | `{}` | -| `cainjector.extraArgs` | Optional flags for cert-manager cainjector component | `[]` | -| `cainjector.resources` | CPU/memory resource requests/limits for the cainjector pods | | -| `cainjector.nodeSelector` | Node labels for cainjector pod assignment | `{}` | -| `cainjector.image.repository` | cainjector image repository | `quay.io/jetstack/cert-manager-cainjector` | -| `cainjector.image.tag` | cainjector image tag | `v0.10.1` | -| `cainjector.image.pullPolicy` | cainjector image pull policy | `IfNotPresent` | - -Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. - -Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, - -```console -$ helm install --name my-release -f values.yaml . -``` -> **Tip**: You can use the default [values.yaml](values.yaml) - -## Contributing - -This chart is maintained at [github.com/jetstack/cert-manager](https://github.com/jetstack/cert-manager/tree/master/deploy/charts/cert-manager). diff --git a/chart/charts/cert-manager/cainjector/.helmignore b/chart/charts/cert-manager/cainjector/.helmignore deleted file mode 100755 index f0c1319..0000000 --- a/chart/charts/cert-manager/cainjector/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/chart/charts/cert-manager/cainjector/Chart.yaml b/chart/charts/cert-manager/cainjector/Chart.yaml deleted file mode 100755 index a0451ea..0000000 --- a/chart/charts/cert-manager/cainjector/Chart.yaml +++ /dev/null @@ -1,17 +0,0 @@ -name: cainjector -apiVersion: v1 -# The version and appVersion fields are set automatically by the release tool -version: v0.1.0 -appVersion: v0.1.0 -description: A Helm chart for deploying the cert-manager cainjector component -home: https://github.com/jetstack/cert-manager -sources: - - https://github.com/jetstack/cert-manager -keywords: - - cert-manager - - kube-lego - - letsencrypt - - tls -maintainers: - - name: munnerz - email: james@jetstack.io diff --git a/chart/charts/cert-manager/cainjector/templates/NOTES.txt b/chart/charts/cert-manager/cainjector/templates/NOTES.txt deleted file mode 100755 index e69de29..0000000 diff --git a/chart/charts/cert-manager/cainjector/templates/_helpers.tpl b/chart/charts/cert-manager/cainjector/templates/_helpers.tpl deleted file mode 100755 index f7465cb..0000000 --- a/chart/charts/cert-manager/cainjector/templates/_helpers.tpl +++ /dev/null @@ -1,32 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "cainjector.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "cainjector.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "cainjector.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/chart/charts/cert-manager/cainjector/templates/deployment.yaml b/chart/charts/cert-manager/cainjector/templates/deployment.yaml deleted file mode 100755 index 0d3e918..0000000 --- a/chart/charts/cert-manager/cainjector/templates/deployment.yaml +++ /dev/null @@ -1,75 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "cainjector.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - labels: - app: {{ include "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "cainjector.chart" . }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: {{ include "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- with .Values.strategy }} - strategy: - {{- . | toYaml | nindent 4 }} - {{- end }} - template: - metadata: - labels: - app: {{ include "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "cainjector.chart" . }} - annotations: - {{- if .Values.podAnnotations }} -{{ toYaml .Values.podAnnotations | indent 8 }} - {{- end }} - spec: - serviceAccountName: {{ include "cainjector.fullname" . }} - {{- if .Values.global.priorityClassName }} - priorityClassName: {{ .Values.global.priorityClassName | quote }} - {{- end }} - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - args: - {{- if .Values.global.logLevel }} - - --v={{ .Values.global.logLevel }} - {{- end }} - {{- if .Values.global.leaderElection.namespace }} - - --leader-election-namespace={{ .Values.global.leaderElection.namespace }} - {{- else }} - - --leader-election-namespace=$(POD_NAMESPACE) - {{- end }} - {{- if .Values.extraArgs }} -{{ toYaml .Values.extraArgs | indent 10 }} - {{- end }} - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - resources: -{{ toYaml .Values.resources | indent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} - {{- end }} diff --git a/chart/charts/cert-manager/cainjector/templates/rbac.yaml b/chart/charts/cert-manager/cainjector/templates/rbac.yaml deleted file mode 100755 index 6487404..0000000 --- a/chart/charts/cert-manager/cainjector/templates/rbac.yaml +++ /dev/null @@ -1,50 +0,0 @@ -{{- if .Values.global.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: {{ template "cainjector.fullname" . }} - labels: - app: {{ template "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "cainjector.chart" . }} -rules: - - apiGroups: ["certmanager.k8s.io"] - resources: ["certificates"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["configmaps", "events"] - verbs: ["get", "create", "update", "patch"] - - apiGroups: ["admissionregistration.k8s.io"] - resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["apiregistration.k8s.io"] - resources: ["apiservices"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "list", "watch", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: {{ template "cainjector.fullname" . }} - labels: - app: {{ template "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "cainjector.chart" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "cainjector.fullname" . }} -subjects: - - name: {{ include "cainjector.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - kind: ServiceAccount -{{- end -}} diff --git a/chart/charts/cert-manager/cainjector/templates/serviceaccount.yaml b/chart/charts/cert-manager/cainjector/templates/serviceaccount.yaml deleted file mode 100755 index 67f186f..0000000 --- a/chart/charts/cert-manager/cainjector/templates/serviceaccount.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "cainjector.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - labels: - app: {{ include "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "cainjector.chart" . }} -{{- if .Values.global.imagePullSecrets }} -imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }} -{{- end }} diff --git a/chart/charts/cert-manager/cainjector/values.yaml b/chart/charts/cert-manager/cainjector/values.yaml deleted file mode 100755 index 29769d1..0000000 --- a/chart/charts/cert-manager/cainjector/values.yaml +++ /dev/null @@ -1,42 +0,0 @@ -global: - ## Reference to one or more secrets to be used when pulling images - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - imagePullSecrets: [] - # - name: "image-pull-secret" - - # Optional priority class to be used for the cert-manager pods - priorityClassName: "" - rbac: - create: true - - leaderElection: - # Override the namespace used to store the ConfigMap for leader election - namespace: "" - -replicaCount: 1 - -strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - -podAnnotations: {} - -# Optional additional arguments for cainjector -extraArgs: [] - -resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - -nodeSelector: {} - -image: - repository: quay.io/jetstack/cert-manager-cainjector - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - pullPolicy: IfNotPresent diff --git a/chart/charts/cert-manager/charts/cainjector/.helmignore b/chart/charts/cert-manager/charts/cainjector/.helmignore deleted file mode 100755 index f0c1319..0000000 --- a/chart/charts/cert-manager/charts/cainjector/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/chart/charts/cert-manager/charts/cainjector/Chart.yaml b/chart/charts/cert-manager/charts/cainjector/Chart.yaml deleted file mode 100755 index a5f0cff..0000000 --- a/chart/charts/cert-manager/charts/cainjector/Chart.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -appVersion: v0.10.1 -description: A Helm chart for deploying the cert-manager cainjector component -home: https://github.com/jetstack/cert-manager -keywords: -- cert-manager -- kube-lego -- letsencrypt -- tls -maintainers: -- email: james@jetstack.io - name: munnerz -name: cainjector -sources: -- https://github.com/jetstack/cert-manager -version: v0.10.1 diff --git a/chart/charts/cert-manager/charts/cainjector/templates/NOTES.txt b/chart/charts/cert-manager/charts/cainjector/templates/NOTES.txt deleted file mode 100755 index e69de29..0000000 diff --git a/chart/charts/cert-manager/charts/cainjector/templates/_helpers.tpl b/chart/charts/cert-manager/charts/cainjector/templates/_helpers.tpl deleted file mode 100755 index f7465cb..0000000 --- a/chart/charts/cert-manager/charts/cainjector/templates/_helpers.tpl +++ /dev/null @@ -1,32 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "cainjector.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "cainjector.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "cainjector.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/chart/charts/cert-manager/charts/cainjector/templates/deployment.yaml b/chart/charts/cert-manager/charts/cainjector/templates/deployment.yaml deleted file mode 100755 index 0d3e918..0000000 --- a/chart/charts/cert-manager/charts/cainjector/templates/deployment.yaml +++ /dev/null @@ -1,75 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "cainjector.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - labels: - app: {{ include "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "cainjector.chart" . }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: {{ include "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- with .Values.strategy }} - strategy: - {{- . | toYaml | nindent 4 }} - {{- end }} - template: - metadata: - labels: - app: {{ include "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "cainjector.chart" . }} - annotations: - {{- if .Values.podAnnotations }} -{{ toYaml .Values.podAnnotations | indent 8 }} - {{- end }} - spec: - serviceAccountName: {{ include "cainjector.fullname" . }} - {{- if .Values.global.priorityClassName }} - priorityClassName: {{ .Values.global.priorityClassName | quote }} - {{- end }} - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - args: - {{- if .Values.global.logLevel }} - - --v={{ .Values.global.logLevel }} - {{- end }} - {{- if .Values.global.leaderElection.namespace }} - - --leader-election-namespace={{ .Values.global.leaderElection.namespace }} - {{- else }} - - --leader-election-namespace=$(POD_NAMESPACE) - {{- end }} - {{- if .Values.extraArgs }} -{{ toYaml .Values.extraArgs | indent 10 }} - {{- end }} - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - resources: -{{ toYaml .Values.resources | indent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} - {{- end }} diff --git a/chart/charts/cert-manager/charts/cainjector/templates/rbac.yaml b/chart/charts/cert-manager/charts/cainjector/templates/rbac.yaml deleted file mode 100755 index 6487404..0000000 --- a/chart/charts/cert-manager/charts/cainjector/templates/rbac.yaml +++ /dev/null @@ -1,50 +0,0 @@ -{{- if .Values.global.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: {{ template "cainjector.fullname" . }} - labels: - app: {{ template "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "cainjector.chart" . }} -rules: - - apiGroups: ["certmanager.k8s.io"] - resources: ["certificates"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["configmaps", "events"] - verbs: ["get", "create", "update", "patch"] - - apiGroups: ["admissionregistration.k8s.io"] - resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["apiregistration.k8s.io"] - resources: ["apiservices"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "list", "watch", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: {{ template "cainjector.fullname" . }} - labels: - app: {{ template "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "cainjector.chart" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "cainjector.fullname" . }} -subjects: - - name: {{ include "cainjector.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - kind: ServiceAccount -{{- end -}} diff --git a/chart/charts/cert-manager/charts/cainjector/templates/serviceaccount.yaml b/chart/charts/cert-manager/charts/cainjector/templates/serviceaccount.yaml deleted file mode 100755 index 67f186f..0000000 --- a/chart/charts/cert-manager/charts/cainjector/templates/serviceaccount.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "cainjector.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - labels: - app: {{ include "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "cainjector.chart" . }} -{{- if .Values.global.imagePullSecrets }} -imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }} -{{- end }} diff --git a/chart/charts/cert-manager/charts/cainjector/values.yaml b/chart/charts/cert-manager/charts/cainjector/values.yaml deleted file mode 100755 index 29769d1..0000000 --- a/chart/charts/cert-manager/charts/cainjector/values.yaml +++ /dev/null @@ -1,42 +0,0 @@ -global: - ## Reference to one or more secrets to be used when pulling images - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - imagePullSecrets: [] - # - name: "image-pull-secret" - - # Optional priority class to be used for the cert-manager pods - priorityClassName: "" - rbac: - create: true - - leaderElection: - # Override the namespace used to store the ConfigMap for leader election - namespace: "" - -replicaCount: 1 - -strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - -podAnnotations: {} - -# Optional additional arguments for cainjector -extraArgs: [] - -resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - -nodeSelector: {} - -image: - repository: quay.io/jetstack/cert-manager-cainjector - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - pullPolicy: IfNotPresent diff --git a/chart/charts/cert-manager/requirements.yaml b/chart/charts/cert-manager/requirements.yaml deleted file mode 100755 index b117e63..0000000 --- a/chart/charts/cert-manager/requirements.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# requirements.yaml -dependencies: -- name: cainjector - version: "v0.1.0" - repository: "file://cainjector" - condition: cainjector.enabled diff --git a/chart/charts/cert-manager/templates/NOTES.txt b/chart/charts/cert-manager/templates/NOTES.txt deleted file mode 100755 index 7edd135..0000000 --- a/chart/charts/cert-manager/templates/NOTES.txt +++ /dev/null @@ -1,15 +0,0 @@ -cert-manager has been deployed successfully! - -In order to begin issuing certificates, you will need to set up a ClusterIssuer -or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer). - -More information on the different types of issuers and how to configure them -can be found in our documentation: - -https://docs.cert-manager.io/en/latest/reference/issuers.html - -For information on how to configure cert-manager to automatically provision -Certificates for Ingress resources, take a look at the `ingress-shim` -documentation: - -https://docs.cert-manager.io/en/latest/reference/ingress-shim.html diff --git a/chart/charts/cert-manager/templates/_helpers.tpl b/chart/charts/cert-manager/templates/_helpers.tpl deleted file mode 100755 index b116334..0000000 --- a/chart/charts/cert-manager/templates/_helpers.tpl +++ /dev/null @@ -1,92 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "cert-manager.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "cert-manager.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "cert-manager.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "cert-manager.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "cert-manager.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{/* -Expand the name of the chart. -Manually fix the 'app' and 'name' labels to 'webhook' to maintain -compatibility with the v0.9 deployment selector. -*/}} -{{- define "webhook.name" -}} -{{- printf "webhook" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "webhook.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- printf "%s-webhook" .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-webhook" .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-webhook" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "webhook.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{- define "webhook.selfSignedIssuer" -}} -{{ printf "%s-selfsign" (include "webhook.fullname" .) }} -{{- end -}} - -{{- define "webhook.rootCAIssuer" -}} -{{ printf "%s-ca" (include "webhook.fullname" .) }} -{{- end -}} - -{{- define "webhook.rootCACertificate" -}} -{{ printf "%s-ca" (include "webhook.fullname" .) }} -{{- end -}} - -{{- define "webhook.servingCertificate" -}} -{{ printf "%s-tls" (include "webhook.fullname" .) }} -{{- end -}} diff --git a/chart/charts/cert-manager/templates/deployment.yaml b/chart/charts/cert-manager/templates/deployment.yaml deleted file mode 100755 index c3804e9..0000000 --- a/chart/charts/cert-manager/templates/deployment.yaml +++ /dev/null @@ -1,135 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "cert-manager.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- with .Values.strategy }} - strategy: - {{- . | toYaml | nindent 4 }} - {{- end }} - template: - metadata: - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -{{- if .Values.podLabels }} -{{ toYaml .Values.podLabels | indent 8 }} -{{- end }} - annotations: - {{- if .Values.podAnnotations }} -{{ toYaml .Values.podAnnotations | indent 8 }} - {{- end }} - {{- if and .Values.prometheus.enabled (not .Values.prometheus.servicemonitor.enabled) }} - prometheus.io/path: "/metrics" - prometheus.io/scrape: 'true' - prometheus.io/port: '9402' - {{- end }} - spec: - serviceAccountName: {{ template "cert-manager.serviceAccountName" . }} - {{- if .Values.global.priorityClassName }} - priorityClassName: {{ .Values.global.priorityClassName | quote }} - {{- end }} - {{- if .Values.securityContext.enabled }} - securityContext: - fsGroup: {{ .Values.securityContext.fsGroup }} - runAsUser: {{ .Values.securityContext.runAsUser }} - {{- end }} - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - args: - {{- if .Values.global.logLevel }} - - --v={{ .Values.global.logLevel }} - {{- end }} - {{- if .Values.clusterResourceNamespace }} - - --cluster-resource-namespace={{ .Values.clusterResourceNamespace }} - {{- else }} - - --cluster-resource-namespace=$(POD_NAMESPACE) - {{- end }} - {{- if .Values.global.leaderElection.namespace }} - - --leader-election-namespace={{ .Values.global.leaderElection.namespace }} - {{- else }} - - --leader-election-namespace=$(POD_NAMESPACE) - {{- end }} - {{- if .Values.extraArgs }} -{{ toYaml .Values.extraArgs | indent 10 }} - {{- end }} - {{- with .Values.ingressShim }} - {{- if .defaultIssuerName }} - - --default-issuer-name={{ .defaultIssuerName }} - {{- end }} - {{- if .defaultIssuerKind }} - - --default-issuer-kind={{ .defaultIssuerKind }} - {{- end }} - {{- if .defaultACMEChallengeType }} - - --default-acme-issuer-challenge-type={{ .defaultACMEChallengeType }} - {{- end }} - {{- if .defaultACMEDNS01ChallengeProvider }} - - --default-acme-issuer-dns01-provider-name={{ .defaultACMEDNS01ChallengeProvider }} - {{- end }} - {{- end }} - - --webhook-namespace=$(POD_NAMESPACE) - - --webhook-ca-secret={{ include "webhook.rootCACertificate" . }} - - --webhook-serving-secret={{ include "webhook.servingCertificate" . }} - - --webhook-dns-names={{ include "webhook.fullname" . }},{{ include "webhook.fullname" . }}.{{ .Release.Namespace }},{{ include "webhook.fullname" . }}.{{ .Release.Namespace }}.svc - ports: - - containerPort: 9402 - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - {{- if .Values.extraEnv }} -{{ toYaml .Values.extraEnv | indent 10 }} - {{- end }} - {{- if .Values.http_proxy }} - - name: HTTP_PROXY - value: {{ .Values.http_proxy }} - {{- end }} - {{- if .Values.https_proxy }} - - name: HTTPS_PROXY - value: {{ .Values.https_proxy }} - {{- end }} - {{- if .Values.no_proxy }} - - name: NO_PROXY - value: {{ .Values.no_proxy }} - {{- end }} - resources: -{{ toYaml .Values.resources | indent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} - {{- end }} -{{- if .Values.podDnsPolicy }} - dnsPolicy: {{ .Values.podDnsPolicy }} -{{- end }} -{{- if .Values.podDnsConfig }} - dnsConfig: -{{ toYaml .Values.podDnsConfig | indent 8 }} -{{- end }} diff --git a/chart/charts/cert-manager/templates/rbac.yaml b/chart/charts/cert-manager/templates/rbac.yaml deleted file mode 100755 index 694eee4..0000000 --- a/chart/charts/cert-manager/templates/rbac.yaml +++ /dev/null @@ -1,420 +0,0 @@ -{{- if .Values.global.rbac.create -}} - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: {{ template "cert-manager.fullname" . }}-leaderelection - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -rules: - # Used for leader election by the controller - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "create", "update", "patch"] - ---- - -# Issuer controller role -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-issuers - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -rules: - - apiGroups: ["certmanager.k8s.io"] - resources: ["issuers", "issuers/status"] - verbs: ["update"] - - apiGroups: ["certmanager.k8s.io"] - resources: ["issuers"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] - ---- - -# ClusterIssuer controller role -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -rules: - - apiGroups: ["certmanager.k8s.io"] - resources: ["clusterissuers", "clusterissuers/status"] - verbs: ["update"] - - apiGroups: ["certmanager.k8s.io"] - resources: ["clusterissuers"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] - ---- - -# Certificates controller role -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-certificates - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -rules: - - apiGroups: ["certmanager.k8s.io"] - resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"] - verbs: ["update"] - - apiGroups: ["certmanager.k8s.io"] - resources: ["certificates", "certificaterequests", "clusterissuers", "issuers", "orders"] - verbs: ["get", "list", "watch"] - # We require these rules to support users with the OwnerReferencesPermissionEnforcement - # admission controller enabled: - # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement - - apiGroups: ["certmanager.k8s.io"] - resources: ["certificates/finalizers"] - verbs: ["update"] - - apiGroups: ["certmanager.k8s.io"] - resources: ["orders"] - verbs: ["create", "delete"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] - ---- - -# Orders controller role -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-orders - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -rules: - - apiGroups: ["certmanager.k8s.io"] - resources: ["orders", "orders/status"] - verbs: ["update"] - - apiGroups: ["certmanager.k8s.io"] - resources: ["orders", "clusterissuers", "issuers", "challenges"] - verbs: ["get", "list", "watch"] - - apiGroups: ["certmanager.k8s.io"] - resources: ["challenges"] - verbs: ["create", "delete"] - # We require these rules to support users with the OwnerReferencesPermissionEnforcement - # admission controller enabled: - # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement - - apiGroups: ["certmanager.k8s.io"] - resources: ["orders/finalizers"] - verbs: ["update"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] - ---- - -# Challenges controller role -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-challenges - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -rules: - # Use to update challenge resource status - - apiGroups: ["certmanager.k8s.io"] - resources: ["challenges", "challenges/status"] - verbs: ["update"] - # Used to watch challenges, issuer and clusterissuer resources - - apiGroups: ["certmanager.k8s.io"] - resources: ["challenges", "issuers", "clusterissuers"] - verbs: ["get", "list", "watch"] - # Need to be able to retrieve ACME account private key to complete challenges - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] - # Used to create events - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] - # HTTP01 rules - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: ["extensions"] - resources: ["ingresses"] - verbs: ["get", "list", "watch", "create", "delete", "update"] -{{- if .Values.global.isOpenshift }} - # We require the ability to specify a custom hostname when we are creating - # new ingress resources. - # See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148 - - apiGroups: ["route.openshift.io"] - resources: ["routes/custom-host"] - verbs: ["create"] -{{- end }} - # We require these rules to support users with the OwnerReferencesPermissionEnforcement - # admission controller enabled: - # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement - - apiGroups: ["certmanager.k8s.io"] - resources: ["challenges/finalizers"] - verbs: ["update"] - # DNS01 rules (duplicated above) - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] - ---- - -# ingress-shim controller role -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -rules: - - apiGroups: ["certmanager.k8s.io"] - resources: ["certificates", "certificaterequests"] - verbs: ["create", "update", "delete"] - - apiGroups: ["certmanager.k8s.io"] - resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["ingresses"] - verbs: ["get", "list", "watch"] - # We require these rules to support users with the OwnerReferencesPermissionEnforcement - # admission controller enabled: - # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement - - apiGroups: ["extensions"] - resources: ["ingresses/finalizers"] - verbs: ["update"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: {{ template "cert-manager.fullname" . }}-leaderelection - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "cert-manager.fullname" . }}-leaderelection -subjects: - - name: {{ template "cert-manager.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} - kind: ServiceAccount - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-issuers - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "cert-manager.fullname" . }}-controller-issuers -subjects: - - name: {{ template "cert-manager.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} - kind: ServiceAccount - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers -subjects: - - name: {{ template "cert-manager.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} - kind: ServiceAccount - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-certificates - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "cert-manager.fullname" . }}-controller-certificates -subjects: - - name: {{ template "cert-manager.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} - kind: ServiceAccount - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-orders - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "cert-manager.fullname" . }}-controller-orders -subjects: - - name: {{ template "cert-manager.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} - kind: ServiceAccount - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-challenges - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "cert-manager.fullname" . }}-controller-challenges -subjects: - - name: {{ template "cert-manager.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} - kind: ServiceAccount - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim -subjects: - - name: {{ template "cert-manager.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} - kind: ServiceAccount - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "cert-manager.fullname" . }}-view - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} - rbac.authorization.k8s.io/aggregate-to-view: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-admin: "true" -rules: - - apiGroups: ["certmanager.k8s.io"] - resources: ["certificates", "certificaterequests", "issuers"] - verbs: ["get", "list", "watch"] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "cert-manager.fullname" . }}-edit - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-admin: "true" -rules: - - apiGroups: ["certmanager.k8s.io"] - resources: ["certificates", "certificaterequests", "issuers"] - verbs: ["create", "delete", "deletecollection", "patch", "update"] - -{{- end }} diff --git a/chart/charts/cert-manager/templates/service.yaml b/chart/charts/cert-manager/templates/service.yaml deleted file mode 100755 index 734fdef..0000000 --- a/chart/charts/cert-manager/templates/service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.prometheus.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "cert-manager.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -spec: - type: ClusterIP - ports: - - protocol: TCP - port: 9402 - targetPort: 9402 - selector: - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} diff --git a/chart/charts/cert-manager/templates/serviceaccount.yaml b/chart/charts/cert-manager/templates/serviceaccount.yaml deleted file mode 100755 index cad1456..0000000 --- a/chart/charts/cert-manager/templates/serviceaccount.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -{{- if .Values.global.imagePullSecrets }} -imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }} -{{- end }} -metadata: - name: {{ template "cert-manager.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} -{{- end }} diff --git a/chart/charts/cert-manager/templates/servicemonitor.yaml b/chart/charts/cert-manager/templates/servicemonitor.yaml deleted file mode 100755 index ea92a65..0000000 --- a/chart/charts/cert-manager/templates/servicemonitor.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- if and .Values.prometheus.enabled .Values.prometheus.servicemonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "cert-manager.fullname" . }} -{{- if .Values.prometheus.servicemonitor.namespace }} - namespace: {{ .Values.prometheus.servicemonitor.namespace }} -{{- else }} - namespace: {{ .Release.Namespace | quote }} -{{- end }} - labels: - app: {{ template "cert-manager.name" . }} - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ template "cert-manager.chart" . }} - prometheus: {{ .Values.prometheus.servicemonitor.prometheusInstance }} -{{- if .Values.prometheus.servicemonitor.labels }} -{{ toYaml .Values.prometheus.servicemonitor.labels | indent 4}} -{{- end }} -spec: - jobLabel: {{ template "cert-manager.fullname" . }} - selector: - matchLabels: - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - namespaceSelector: - matchNames: - - {{ .Release.Namespace }} - endpoints: - - targetPort: {{ .Values.prometheus.servicemonitor.targetPort }} - path: {{ .Values.prometheus.servicemonitor.path }} - interval: {{ .Values.prometheus.servicemonitor.interval }} - scrapeTimeout: {{ .Values.prometheus.servicemonitor.scrapeTimeout }} -{{- end }} diff --git a/chart/charts/cert-manager/templates/webhook-apiservice.yaml b/chart/charts/cert-manager/templates/webhook-apiservice.yaml deleted file mode 100755 index ed46424..0000000 --- a/chart/charts/cert-manager/templates/webhook-apiservice.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.webhook.enabled -}} -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.webhook.certmanager.k8s.io - labels: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "webhook.chart" . }} - annotations: - certmanager.k8s.io/inject-ca-from-secret: "{{ .Release.Namespace }}/{{ include "webhook.servingCertificate" . }}" -spec: - group: webhook.certmanager.k8s.io - groupPriorityMinimum: 1000 - versionPriority: 15 - service: - name: {{ include "webhook.fullname" . }} - namespace: "{{ .Release.Namespace }}" - version: v1beta1 -{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-deployment.yaml b/chart/charts/cert-manager/templates/webhook-deployment.yaml deleted file mode 100755 index a686f56..0000000 --- a/chart/charts/cert-manager/templates/webhook-deployment.yaml +++ /dev/null @@ -1,82 +0,0 @@ -{{- if .Values.webhook.enabled -}} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "webhook.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - labels: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "webhook.chart" . }} -spec: - replicas: {{ .Values.webhook.replicaCount }} - selector: - matchLabels: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- with .Values.webhook.strategy }} - strategy: - {{- . | toYaml | nindent 4 }} - {{- end }} - template: - metadata: - labels: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "webhook.chart" . }} - annotations: - {{- if .Values.webhook.podAnnotations }} -{{ toYaml .Values.webhook.podAnnotations | indent 8 }} - {{- end }} - spec: - serviceAccountName: {{ include "webhook.fullname" . }} - {{- if .Values.global.priorityClassName }} - priorityClassName: {{ .Values.global.priorityClassName | quote }} - {{- end }} - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.webhook.image.repository }}:{{ default .Chart.AppVersion .Values.webhook.image.tag }}" - imagePullPolicy: {{ .Values.webhook.image.pullPolicy }} - args: - {{- if .Values.global.logLevel }} - - --v={{ .Values.global.logLevel }} - {{- end }} - - --secure-port=6443 - - --tls-cert-file=/certs/tls.crt - - --tls-private-key-file=/certs/tls.key - {{- if .Values.webhook.extraArgs }} -{{ toYaml .Values.webhook.extraArgs | indent 10 }} - {{- end }} - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - resources: -{{ toYaml .Values.webhook.resources | indent 12 }} - volumeMounts: - - name: certs - mountPath: /certs - volumes: - - name: certs - secret: - secretName: {{ include "webhook.servingCertificate" . }} - {{- with .Values.webhook.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.webhook.affinity }} - affinity: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.webhook.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} - {{- end }} -{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-mutating-webhook.yaml b/chart/charts/cert-manager/templates/webhook-mutating-webhook.yaml deleted file mode 100755 index a89d75d..0000000 --- a/chart/charts/cert-manager/templates/webhook-mutating-webhook.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.webhook.enabled -}} -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - name: {{ include "webhook.fullname" . }} - labels: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "webhook.chart" . }} - annotations: -{{- if .Values.webhook.injectAPIServerCA }} - certmanager.k8s.io/inject-apiserver-ca: "true" -{{- end }} -webhooks: - - name: webhook.certmanager.k8s.io - rules: - - apiGroups: - - "certmanager.k8s.io" - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - certificates - - issuers - - clusterissuers - - orders - - challenges - - certificaterequests - failurePolicy: Fail - clientConfig: - service: - name: kubernetes - namespace: default - path: /apis/webhook.certmanager.k8s.io/v1beta1/mutations -{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-rbac.yaml b/chart/charts/cert-manager/templates/webhook-rbac.yaml deleted file mode 100755 index 428882d..0000000 --- a/chart/charts/cert-manager/templates/webhook-rbac.yaml +++ /dev/null @@ -1,76 +0,0 @@ -{{- if .Values.webhook.enabled -}} -{{- if .Values.global.rbac.create -}} -### Webhook ### ---- -# apiserver gets the auth-delegator role to delegate auth decisions to -# the core apiserver -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: {{ include "webhook.fullname" . }}:auth-delegator - labels: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "webhook.chart" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- apiGroup: "" - kind: ServiceAccount - name: {{ include "webhook.fullname" . }} - namespace: {{ .Release.Namespace }} - ---- - -# apiserver gets the ability to read authentication. This allows it to -# read the specific configmap that has the requestheader-* entries to -# api agg -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: {{ include "webhook.fullname" . }}:webhook-authentication-reader - namespace: kube-system - labels: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "webhook.chart" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- apiGroup: "" - kind: ServiceAccount - name: {{ include "webhook.fullname" . }} - namespace: {{ .Release.Namespace }} - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ include "webhook.fullname" . }}:webhook-requester - labels: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "webhook.chart" . }} -rules: -- apiGroups: - - admission.certmanager.k8s.io - resources: - - certificates - - certificaterequests - - issuers - - clusterissuers - verbs: - - create -{{- end -}} -{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-service.yaml b/chart/charts/cert-manager/templates/webhook-service.yaml deleted file mode 100755 index a1c32b4..0000000 --- a/chart/charts/cert-manager/templates/webhook-service.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if .Values.webhook.enabled -}} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "webhook.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - labels: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "webhook.chart" . }} -spec: - type: ClusterIP - ports: - - name: https - port: 443 - targetPort: 6443 - selector: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-serviceaccount.yaml b/chart/charts/cert-manager/templates/webhook-serviceaccount.yaml deleted file mode 100755 index 7b41731..0000000 --- a/chart/charts/cert-manager/templates/webhook-serviceaccount.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.webhook.enabled -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "webhook.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - labels: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "webhook.chart" . }} -{{- if .Values.global.imagePullSecrets }} -imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }} -{{- end -}} -{{- end -}} diff --git a/chart/charts/cert-manager/templates/webhook-validating-webhook.yaml b/chart/charts/cert-manager/templates/webhook-validating-webhook.yaml deleted file mode 100755 index 523d45f..0000000 --- a/chart/charts/cert-manager/templates/webhook-validating-webhook.yaml +++ /dev/null @@ -1,48 +0,0 @@ -{{- if .Values.webhook.enabled -}} -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - name: {{ include "webhook.fullname" . }} - labels: - app: {{ include "webhook.name" . }} - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "webhook.chart" . }} - annotations: -{{- if .Values.webhook.injectAPIServerCA }} - certmanager.k8s.io/inject-apiserver-ca: "true" -{{- end }} -webhooks: - - name: webhook.certmanager.k8s.io - namespaceSelector: - matchExpressions: - - key: "certmanager.k8s.io/disable-validation" - operator: "NotIn" - values: - - "true" - - key: "name" - operator: "NotIn" - values: - - {{ .Release.Namespace }} - rules: - - apiGroups: - - "certmanager.k8s.io" - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - certificates - - issuers - - clusterissuers - - certificaterequests - failurePolicy: Fail - sideEffects: None - clientConfig: - service: - name: kubernetes - namespace: default - path: /apis/webhook.certmanager.k8s.io/v1beta1/validations -{{- end -}} diff --git a/chart/charts/cert-manager/values.yaml b/chart/charts/cert-manager/values.yaml deleted file mode 100755 index 672fcc4..0000000 --- a/chart/charts/cert-manager/values.yaml +++ /dev/null @@ -1,172 +0,0 @@ -# Default values for cert-manager. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -global: - ## Reference to one or more secrets to be used when pulling images - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - imagePullSecrets: [] - isOpenshift: false - # - name: "image-pull-secret" - - # Optional priority class to be used for the cert-manager pods - priorityClassName: "" - rbac: - create: true - - logLevel: 2 - - leaderElection: - # Override the namespace used to store the ConfigMap for leader election - namespace: "" - -replicaCount: 1 - -strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - - -image: - repository: quay.io/jetstack/cert-manager-controller - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - pullPolicy: IfNotPresent - -# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer -# resources. By default, the same namespace as cert-manager is deployed within is -# used. This namespace will not be automatically created by the Helm chart. -clusterResourceNamespace: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: - -# Optional additional arguments -extraArgs: [] - # Use this flag to set a namespace that cert-manager will use to store - # supporting resources required for each ClusterIssuer (default is kube-system) - # - --cluster-resource-namespace=kube-system - # When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted - # - --enable-certificate-owner-ref=true - -extraEnv: [] -# - name: SOME_VAR -# value: 'some value' - -resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - -# Pod Security Context -# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -securityContext: - enabled: false - fsGroup: 1001 - runAsUser: 1001 - -podAnnotations: {} - -podLabels: {} -# Optional DNS settings, useful if you have a public and private DNS zone for -# the same domain on Route 53. What follows is an example of ensuring -# cert-manager can access an ingress or DNS TXT records at all times. -# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for -# the cluster to work. -# podDnsPolicy: "None" -# podDnsConfig: -# nameservers: -# - "1.1.1.1" -# - "8.8.8.8" - -nodeSelector: {} - -ingressShim: {} - # defaultIssuerName: "" - # defaultIssuerKind: "" - # defaultACMEChallengeType: "" - # defaultACMEDNS01ChallengeProvider: "" - -prometheus: - enabled: true - servicemonitor: - enabled: false - prometheusInstance: default - targetPort: 9402 - path: /metrics - interval: 60s - scrapeTimeout: 30s - labels: {} - -webhook: - enabled: true - replicaCount: 1 - - strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - - podAnnotations: {} - - # Optional additional arguments for webhook - extraArgs: [] - - resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - - nodeSelector: {} - - image: - repository: quay.io/jetstack/cert-manager-webhook - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - pullPolicy: IfNotPresent - - # If true, the apiserver's cabundle will be automatically injected into the - # webhook's ValidatingWebhookConfiguration resource by the CA injector. - # in future this will default to false, as the apiserver can use the loopback - # configuration caBundle to talk to itself in kubernetes 1.11+ - # see https://github.com/kubernetes/kubernetes/pull/62649 - injectAPIServerCA: true - -cainjector: - enabled: true - -# Use these variables to configure the HTTP_PROXY environment variables -# http_proxy: "http://proxy:8080" -# http_proxy: "http://proxy:8080" -# no_proxy: 127.0.0.1,localhost - -# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core -# for example: -# affinity: -# nodeAffinity: -# requiredDuringSchedulingIgnoredDuringExecution: -# nodeSelectorTerms: -# - matchExpressions: -# - key: foo.bar.com/role -# operator: In -# values: -# - master -affinity: {} - -# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core -# for example: -# tolerations: -# - key: foo.bar.com/role -# operator: Equal -# value: master -# effect: NoSchedule -tolerations: [] diff --git a/chart/charts/gitlab-runner-0.18.1.tgz b/chart/charts/gitlab-runner-0.18.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..13b217137a6de10003849304d0c5ab04a0c90686 GIT binary patch literal 14037 zcmV;`HY&*Dc zVQyr3R8em|NM&qo0PMZ%e%m;*I5=O2Pf?b0#&(ZM-F%DA`kYo|CDCbH@=9_tIqCe= z5D7_WQv{nJZEMomciCsz7uz3hyos`8$K#&)kNF|7NT5(vC{z^+1z<`-7mdvz@;n@r zTQd}d)&kAlKRw}7sZ=V*hlk?-N~MzjzjAn3`BSxibbMT`)vCN)wSIhf^e3o1B@(Ni zh=nNlQ{}tc$`ki5@*tmlzyTxFYrsv_K)xUUtk$inRW%&U?11>8c=wKk=V%OlAua~7 z`MX&d`m9kdOHs0HIxk1QOOeCMW|=Kmi05TLpub`}ETbC~qM%$mten*AWhr$dmrkkW zdsE{YFK%hzu!dp6l!S%}HZe?au|xmdBPCsx(? zqx*T)lO^m1xe;bhtpM&H|MlA8(LrwfAJl3unJ1wJZU^|e1>Ss%Az_DPEO-~oU2vq$f_8;v4M|mGFbqU6)z*#y`(O&7yRQ+ z)4fFtwgY4$KM&EA)3oEF5VLS+AF#_XFdRZiVW%_c((p3C413{DhLT3Pd>xH(;NcK6 zi%_9O$`QkXIgN;e%K|zpZ~Fl@{m6BRH#OxuPE`)I2BcdOWDI9mck2KLH#mT+{yFU4 zl5hroKtpO%cMrlD3gH%c0^I;l2@7!m;S7Uh5TGd{9t)RxVTIDO>3ohnhn4&7=GjHt znuT+BJHv8Qw<0@7e!B|Xm@NwEf<}SqP_FSs12Iv3Q9nT38o`hXS)u_T$KjIc;aerf zjFS}RxnRL?Mi|T^Nz)iZfWv^`8|+9C+)il7sUu_#)j6F_c&k+XYDBPa;>)00I6j!Ys7X?0&~fT;hOGXMk<$Ib8FK{tY5KWbjk0zhf$0 z>P-QAG@8!Tu!xcJsgt2QM!;P>ybHG(nR>`&#=Yn^png9Nm$5GzTx5 zk8!{?u}wWY3IgnfVz4}p1mxUsCFGcy*OHQOhRvx<$H+AqwzWo0nr%Q;3e6dw5Dzon zUrb6A%;+u9Nr*k(JN69Q*IG1WpL1h1j)?0#AKn~^ju+zK2DuHWS8`0wNbU{;mrkc# z&A9jmyB0KW5OLAi#ULj>fCAhA2aluaJ`it0_u&==-ag9@p@R3LC=OS^N zBYn>C%}dZK;wChhqi_{jGG5$@nW$mVh#9Q@~Oj0`yl==<2!8gA54r$R{A2! z-{4>@%1&{}zq*8p|8G%f&-fn_69GlPa4^2#QKrcXi6E45fj!awnNh9@DgZD zToNw$=#e3f@*H_+ij%U8>i{wE1!&uixcxP6xt65?P|sZmaKuY=Cuf%|_mMcbY1^1F z%{d9?N?FJWbzOSPMI@u0gBxOFXxcW7yin?JGLaoa7&wG+8FHlJCJf$VcdktHF2h)K zgeBRuC)yK?WfH7{DioleODe45sg~!_PzaB-HaUdYJ~s&5RtqJQD$F8#28aQ^W)_;G zzf%w0GAXds#U={87@Z;k=Y?f8Q3qeMvYf|>iI|*+NoiY{iW(D1oJ`(;^zc^@+)VIn zz}IiNkxRS~PXpnQTn2QG!x@ek%(>@oGr)Jk)&!f5%Q^K(NVx}D-p&k0lTJ)N6cu2T z!x5z5z)TID{r?E z4`UHeNQQ@)&FUEg2Ov!d+;Sh&qaNl`sD?duG{e=rJ{m_}7&WR#R^=ckSG*<-V9x#M zc*ZR4Fmz$zjX6 zDLKJGg0E)w9=_qqnoLZOhNds4`1^oFAi@$;M>Y84NR4od$5^^iWC9)yf#J|9Ku5<9 z_(U|NWLdF8qTFQJ%c_IrBvV?VRRisNEr}AK*p$R{D1=%XMNUdJ$Jg5#A6*$|oXu$D zI-Er=wXdbGi4w!2Le+9O>`O&zX}Oitmw?Z_Vl*TaJ$#mWHAGVeh%suDWD1g?V?m2M z_9nwVk}84QAkKPT(YjZKt&Blzi%6ZxAtZS6U#Y&CB6xdg26Z|kk zYWl>!HaXFzB09&ZAo8xf5KX5mx^=Zs2@4%Wo`|wxn|J3pKtefM83iZ=>Ft9^#90#K z7Br0s{_&>__S{iHe9D}78?h{M zO5~7`c$*w=&98*^tlPwutMG>SSmaYyIgeHBJExhJa?!-_Bp&QKX(hBYiI#X01_2hVO?hK+nMR5>G#3sZuSMo~KQ+_amlmG) zA&Y|px@!`dPlhcAz;y=-Lp=9Y$d7Z%<5PZxZOY__FhdM}jaaCIYt|EVG4D(2xQ-jY2;P z&&iyG4S|3*&s?g=I11wcEdv|bGvZAPkW1mEB#s8tTnJ_C(s6l?xC0^GogC^>NSTdX zY|Wi*Cuu##Io*0}CHVMHhk;og#)x|(F$69ks;U~>Z%H7n9@kK51rm8&WpMxF9rkd5 zT;4Gp023o_P)~j<24;+AONbY@?{f6+usRY`q@DCY-jVOF@nQ%50z0xS|5d=&+`M7z z(kUx5Y)1hJ7iF>J#mqUMGjJfLglt~pltZ`;uYk+09lA>XHF{G{Be6ka2|vs70H(=Z zEz>0-XToWW@M-B2jRHCC`w1^fD{I6=$r{oXf5^DK=0hn^l{*RJ? zcdfzwA|QFA7BB4pJJ<^eav4mBiyJG%WJ>9jPa=KaCAL)Lsj3;_5eopRf=g{YK({?g zb8tT5sh#zZ;S-I(7EVb#dD%?%?k9aozBq-ay!90oWC5$}BkqAwQ>^XX--f7SiP+TB z%4S9s)W{@qkw;J8uJK}}OxUbQcL>@wETKR6n-sjAf3yGyHH^(C6ah`?N^O!MI<12` zXRB3EjxVy>rF`*(nn-X%gk1mx#Jt{+?`65+!p99@_4K<-6wVq@lD#vMOfIDh3}z@u zd)Ly*sOKACb-7C0beup$2TFQBPBmBNr?1aV> z{)`xDWg&x5i4cM9N6I0qRVwq`%iawRZUYkH3v}1yb0jVt2T8d(p3`8V>mMFnkmV1B zFb=D=xUB6*eHDUfKsAGeeqRWqmd7InrFN1-<$g#4bL~>gFzu!cRq@faEM3W}Rpb}b zB$0fNT^|P<<2Vz=p9@9#O$9PbMHxz_M5u9hax^+RFkgLbbw>TGZnxbZ_1hmhBB`pB zB{i93X_r*I!gpZ-$vufRzsoIiJ28&r{7QFcSnbZ^}2e?BHwp0_ZpEazq1B>u2!o1P_5LyRUJqL{7k~R%bM5_dm4zqpSXT0@QY?hpz&+ zk&7~U)WP7QF?MChwbc<3SD!i}o=B!1C-1v#rUWBm+Pt+?$eRBgU{|c9%N2}e+(PQ& zAhmMy4?vL1a%dvYF_Xq}SPl~`3J~?Fvx0}Ia2a*ZMUjH~`PC_1d+n+eLGyxDDc#=E z`jGTE;}s(8-H?C^{=p3j2p1%^_@d+c#QRkS7*%-1kC$gqfa9E{+nF?(BEJHw;0Iws zw23zrO9%|7+TfLa#P|?E415%zISz5a_JLT~f?rG%dpA((HZR(zADZV^?b0v%3O=(J9IL0o$-4lq2aH?AM(3vAiO3l6)XiK!K zK%-(nyd)(^-#FO1lytKqk1-Y^?A>VP#(C2K!5kyE@~8Eo){_s)3e9U-k>aXfwh{$9 zVehgEF^Ie-yNdN`4yQPMCtl#;ex1wB8+#anHl6#CSOQb4@B+isn!K>9+X}X3m$Z(F zCFSpIKZ;FmywI~$L)+1~hOoID=jLQWJZ}7W_j4VP(nr$nJ%zg5MyeU1p^z66Q@`Pa z>%Zd_;BHQ%KOC_XvpEJP6DnFL#qDilNojM|wC~W|cX30+e5DlYF(lIu;>8PLwjvLx z2h68ooXyC9Q4x``k*DyL3raIQ*V-q*)5t{u+|uA02SPD3F?_99QOi~HEK#y}14`Wf z%XHXtwfrSb4A=iM-YwNUhFvZVB*2**y#G3ds&%yY9erDDX|}hGFl}Exf64N+UqtpC zEo6o)lKAgKIEz>&rWI2oR|ttTlXA_Qmy=vh<(1^SOsw3b`y03&(o(NFMW7=)z=UhO zXh7-yjwq?YM177O7ot-8jUO4Zu2Rk12oRSN;HC>D;i+BKmxq11Azjoec5Zj6HTI}NU4Rjry;Uk5F~a~k3t zIQ>$n@|1HW{d43vaun&o;8U?#q`tNp-};L>VYGY zIGh|bk!RdVW&B1u5;KWB0Uo&|TI!c0HToiY;; z#`erdac|6t*T*6%#HZcz6}n5klC#KEHWU}&P{af;)g%Ex_?KF4L6z>Nb`+`@;Yg?M ziDDrNrZ{Zg-~dhW2RXt;Oz4l^^Ob&b8>jHx7fEpCZg1EgSm9mx1Z}L@|68qB^ZS1f z4-V@u`~RNf`TErq+u1zZjU24QP)Z)Gc!O36Eco`#_>|gB)XyZoc=T58K8kk`sm){Q zJxlTSm?SuY!UlZ(x@r3yTd~K^{PxY*e$q#5Yiahoo$kAaSU7Qrlgf8m0P>RkYJ4s# zmi=~S*quwdTz3a-_aW&=mR806CawSAP{B+qi;_1!TuW@o!sk9c@@96>On1M|Bnvql^6T}9M5X| zuU5LScVlun$f}~;O7>b4lb^`NfX;i}_7af$^q5*KFR~zUYQ+D2)gBB-tzNfv)$g~v ztxtu}t4qL_Hx%8|OdP^Z%s>*qEhVMf?>d8FyU*>}Pr?b{T$%t4V3$uXcV~$Ggco^S zpd_~ymG<_Q&{W%&xIWICAPzk>MNU48EV1~z=M)e$Y`zkS6)Gi;A8ntvNL&(C`w(=2~bqXpS(Yw);eU-bH)9z|&?9qa*( zJPEz`rw?wxdZjBL&Sss=ws{**)xq~Q zKfL2q((I=FrkZ(7x9OHP@3j<759DErs)zCKab1=r?O`o0yFwc@wB#KXITlyv=iIyK ztZ<+Uz~_z@$cGq;{I7y6;d_IQMHQ@EKJiLV>1-roH=W@pK0O$=|27=;uezh=VASn( zNBv%JxFV{P35+}ttBB#?5g1(!+WmVy+_h~k1$e%P3Xw$p2r}OFdsml_rbD*+G=xkZ zLB!if6EN8X`P84QVv=Gaw7{&!SAdddHImgno&@(*xB<_&S)!GyRtT=@8O z&e~gWvFtiqKUjlNvR(*iGs*8pvAmGYa?Eg&u;?a za77aak!QnEUHnPrgJ?1#cTh4DV0vvrVj1t+)oqAVHsQAjx#IX4K)x^Vw~UWiLKo#j z-Ug?I!5GU^MH!vrbds?=-jZO~k=rCh8oSfAgPH%qO1aDt33@}kRHv@& zV0i+afXJm%pAw7I`Jt*SWGRtyzqS`d*)$G$V$zI|f9vzge}@43F0wI{{#t_4sPqWI zRRvh*LPVSXhY?Yi`=%8&YkNYj#pbYI zy=a>pX{*bwEx3w|Ws-)&&Ws=1gCC zS`$^{l*zN&@Lh-l54mz^u=S&&I5{9EQTMFJXYt!ue8fz0U*!#};nqxU2oNWKWD*hO zj25w!>KD%HIs!$#RG!PL)I9`^QxMz=7El_y7(2w9mS9&X1}NQ>mJ!>7UHn@CzDf1H zl!kdFk#3yEy5_!%3b+zVW0%0u1S@#DE99?2PNxoB*DFrc6C09PNd0v@TRIy&GfHUy zyS#sLOIuJ{u9m2orEhx*NWmgmA*8&f|Fy&1v$$lmDwDSP@cDtE~AeD;Kekx$*6C@@A9mMt@4<7uI zv%=snf607^j|77+U;dNB@s#?LM-V3|Z_QS#Js6CBYJWO?wQHlW0QTOBs)Kf`-yRlK zja!#hC&XBRfBJB#f`g(~Apfq{dw1R*H7_sEJ2I1S)atj-+TCHNc|JHTZ3zQ#-)|Fe>H@p(PD?#~^^lt(w$m+x8wEeD;md|zhT*^2>e1GZ-rOdMb8 zFh|!|pBV(a(Rt*CL>vaROFVUwk2sbdP!!HYk#uo!9w-*&#TM9~y}i}wUiLd5n!~md zK8;EN>M1VlX?PQ|Ml4qz_PL_pWhPKx_V}>(Q@fkz;!23?Mo8u$(iLMlPT9Jz>EhP5 zfXxzprTrf>Ie{$KAPL1@y9CBI3_?B;ZzaY4$z61&e$Gh#S6&2HJ%J3&nImIl}2 zAXk0O$7zAimu!as1T#*!W=kdPLOC61mdpODA*%mrh6GRtZ@)o)-~s zLX)|XzRDpTK)uQz9^Q-0%Cc(-~Zq^4@8u+w;5 zJFM+2LyV7;)0|D<=iKM?zDJ)(^3i82|;sv&z0+H2*e=v*((_Vf*58I5@3Wq)3T7 zh4^&$)h@%oK^01`z82Q_R@(bdaHtket^`V%YsA!{weadI`37_XE8!>*8G+SCIN7Si z8I@gMSy~)b$G~g8OTkW(2lavSk?U$EicU{;a<08pS7%Xpb&VJM@QMp+<8(R0EPZ~! zDr8dUOwfSi!C|IQY4BIPfrn#Pgiq=?pL`_{O^Z5$8ji&JH(mlu#Xk0yA?1i(W@-BLB*j(Pie#yUClhRk-__@ePs^*7 zYH#~0{Cw(|>X?$%{he6`d>VPoZs5Ncz4VbwPb!n^xQD%)EMojc9s~2+H=(G_$8d^F zpdfmZBnGEWr!OssX;WXbcm|PYM(WF{IHLc%F^SKaFTf)?06&9;auS89JS_!yGD?7K zgwUq>BTHIgqec6IW8;dHgc6l0aaM@&gCQ>oc>b^_GY!C00Jz+7Bvy(SENh}d^;E!b z)AXok3Kt03A-PcpopKW%-XQn%)qfkQX$QReD&KR#uRwk#F%B@g)+X3h6$mf8%Gxf0 zNV%1XV+p`qjD0u+@zbH6LQ|9$u7YsaOqbl?RgMOkKsjQ8c~r7R`e7qKx01{b zb1>8%=pWYAjAw6IzxkQsKda(@$zJpC4*;%-|JCcq`STx+E5|SKzh`;U@jvAItQ-dc z&teJqH;e)@U#J{4eR{kpfGIZf$IZZg4EG+z!40{%At%!o z=6(;W&@cV^Rr%!HtAirfbF~di`G}1aJFtDtqIt5yk>QP6+k0i{^;YW*VsdYIDYq!X zogVld*bLPUk61L%GF`R8^o?JewxKGlZDF%1J%}_0eN2)?Xqnjo@^J%pxC^+mqL8?a z5q6T<{|X;rqfTAn(m%du_23kLFCTxYBse+qG8Hh%6}RG2_pml!M;))3t`VQOc~4UX z)f8Rvn>82urhwmDGHHT%B#aNgjt7BG@=RX}pu{#__S|8!oqgC*BA?uUxAP?~tWK_8 ztyt8`dPcD<-91;FPkv7xxrU?;T#bck1rkf!Sow$|o`by*PSWshs2k1kg}8Gv0jznY zz$Aez{fJAH0|oSU{y?Y1w6_1LU8K5>?|}9cH0QkbbxL>{M>-Z z-y@x#G-%Nhr@)C@kvD*!#ck7@p6^pk$(ggp*RK%LPiXEY^ECZ2k%Vd`saRXm za^)#}`?je?{`ysB+kN|1O6QPsSOwdxWT^~U_R5j`V5i^@9=k%IiW@FSE#V{9D;l!t zEDb%QEA4Y5s+KBJsT*JZVTyxKEj`TGWkAQckrL{3Ldk{*CzI6_!cvbD;r98{_DS&&5#|s zq?cFpB%b@>;*138K{CeDncI4O9uPv9FF9rgI;KWK7&pUMCOKb#S1MvD9}l5pid9zM+rm5aS=-otkx%+aqjNa69hXJ)#KyUJrlbf33(e!%tN}X;PvC> zW~IdNhlF^}mSKWExqW4BNI?|?$iO%?vV>VBEE>3hD?Nf@iZFZJ3g z^JVKK$8>9taHn&0$7k>-VJLkKM1fg}OE0^Y_or|aFWqM@fGB7#Q|qBkhD9s$>Dv(s z8cf%DXnU!H>y!4@m#%Yfdw+D?{h#Gowf-~n(Gz5T%V*90&&qMVzI6Wg(aZU- z&+_EQoR7YJGct=p>Vg5{P0Kg6F%FSlAbL*&@^?PTaWARU)b|hs-?v(n+*H1F^0Yh) z>)W?@aYnQ^6sPR>@kFmMEw5+s_e*P9kJ9#;>jVmqlH=nhRZ3M$UpcV4T2e|?)3*{7 z=s8|qUC0pYxms$g&!#GF)%Xvni_J0d9O6yCR}rik|J8bZY5li$RDBu$&++85YEqF+ zGp7l}Hxz|4KJMjZH}ZCmWX-rJOzK7~#KA}6V0HWQw{NAEeA%Ziej{b9l!}h#UrUSq z@3#oe$DmSd$R}!rX=N`MTFE`46Y|{8gq|PBY!;1w#dgRVhN%!5WTM;OX%&_@s?9>_ zUt`1FKiPAi{(mY5aGn0=%0B1+9UUIL=>O+<{&4+&kLvk1)cBE$h2qruo;BceNvw$! zbyO0R7FeV%+Wh?WEBs1{2c>-|t*-PXePHksjM5Kr%ds&iLgNMpV}*kF^WNrl;pL`v z-Q{Lq6#GByS*8EeX-MDO#yb69sV$xVUVpj&<$0du$c8niC*;1%cX<;L8v+@beUJvv zMaTao^|!n@u;-)SA{_4|Um1nJ|1izv9{3%`M+exmvl$iXQ2hN6S3`MGu>Y>lYW**y z+C*~F_FZ;hZTz=-l-2*WN~QV||9O_@0senVxo76{X(hjua=pREy5$78f{ft}vxPIh z>c7XF;rn!VVa`n1p7Abo>C{?0K?Go(|5vRY<^8`(?dAN>XL*#fFk&|4t_uxkI56US zx;=brPB03^PX8{x1@(g&N#?ydA$P*z6Te_W?!;&D_DwX6U(6BlLhK>W#tpbgei#4_ zwIi{yF%*jCEd3LtEOH0}xd!y))$r=IIPH#4nNzqLW(GUM1o>|Y1;fm?YM6?4p#w%@ zZ;wc{HB(w4Uk5<3Ee|SPMdK6~&LWyFs*z60=Ez7F9!Jxp-16rT+cS^Y$OQ+F!=z-E z2N{6KzGht66c?6}A2uWYNY^rfVgya(>GIeg!MSFO(I%~j|8Y%n_O^z#S%2n{84Pl#D)nA(dn z43~I<4kjHY@flRwmqbBaY|V>-Wf-a+IQRy;)aU9uRa-+X06;tvf@#_=!Cq)m&s`+n zd}LpX6F3ck+Zmsb&yg1)ms>B-T_{BxAkUr&lZNkXtbxxF3q@Wp3PLhLwoVQAQ8-h7 z#$1%#_`wRNe-|I{olgQhB3=WIorJbAmTvhHQJD?^7}Q5s{qqK}dZS!^^)8``|Zn~EUEK5(}gWox6mB@oqFh&ak-#GBC5SG!XZf`fT`V+^M0Qf~^s1H%rO0hCzz=5xh-{j>Guue)Yo z^S{6Rb??8;a=Em(qkwS<9))C%Phagid@l6Y|6uxK0%!L|PmMG{lcTX${<&(t`kDaw z#(!!2zLiU05|9VP@>%r~*yM!Q23S92e(FPH*TPuZs?(WtX?Pi6hP`n4Yty|&3nro7 z$Z1&uHa^_YkH#*s%d!V=nK;$i2hqt_{qt{S;yL(ERMbUNZ+`z>=kJ@aYdfmMf98gb zRo+YbI>0^^xh;39G?=DH5-tF5no;1wtFKx@zooHc3(wXdVsyn}BpOpQz{rtIlqWP26EW{xF$O~8PhD}19z>e zvN=CYQ>9bGWlvm2q?3Kk2@U2@5<$aLu5kaS_eLHJS6V6+;K^yMn{ohRJ@UErzS);o z5`Pf@FZ;d!X}9E?Wtgdo{q}jgIcSgi?YFXKt9NnH8S>xx$^}W~$S38SgRkZRC%!l>FvIfl&&?Y-?tiwj|ED#=&;MIq+be&x`Tp)F=l^k8B~y!gB#(zlb;0%9&ZlHVkKk0gHUG z3l7)}yDm&!I+llF5O2EA)e#EG4HkY&>OJxtV;ej?mDBoNKfn`mhaI^CWoyrZp64!r zdZH#r349!IcMV&Hbv76cLKcX!-Om0zT~c5&=;?S6smQrYE^kv1+$`I9+}nG z-vP5Ct7Zd#h6!)U9VGk3rP`8v*W^jKY7g92tzL!QH#qPd_z?wY9I-vagn?9J;y4j9 zSqN1}ojBwM`s6m)s#RaZZi_?z6iqOJAMM4&!S-UZ7ZZD&F!1Pm7+6U$@Hk=Mp;@aa zn3*uFVM6=Pr`#>XVMx3w6FdBpv*hB09(h|46Pqjb9y@V2h`2|V!B(wSf!(Hy?~v!< z04{NegE{g%_|e68cX!z3JZn0RR{x?Q)hcwUgCTu%=T`OP1a|)yFCzGa;m5%&a>)WO zzJo87q@bQIh%07@2ObSE@8tu-giGQWVqCRjXEE_YB<-2l|vT>cNi={}qTI3V0F}PYIz-g8=b4ljEjt+}?+86sX!NETE9C4LBoxm@-*$OxDIQT`ehI6K> zhLuY^(XqUfCp5Tb(o7?8$Yg@WQOSb);S71Xb*>V}_QWuuqb5*Ect|cP>6w*e-d3%8 zykdN>m5Z8HnjF-EOxN+7A-iYjS98MCyrD*8)U6 z0a2CglVd44>Py_6Cd5>2%z>@yYc4$EaD>bzQ9jb+bBJUhC_ZC>lb;OgnrycE+N{Qy z^Jag^E@%J3TessN3UB#PmCP*Y@M6hvrhTnOs;}!DPjrp*r%)xmId(BG_=T zO=yT&_d!pZ#jxyOb-V5UsCPBIyc)`zl!cd_z0>skd^GG_w0l=W*gC8fsPNc)7Lj;z z7$a1X7LFrSF>5EQ#X-_?kTPVIgqDg~tEQ^vFLBa!#b{wzMNAxhDuJ9+gjB~oWNbxV z^0rkyt|cfXS+32?4zD4@CW7I7fK`udUj$GfW9&3EWopPxScSueB6Cj<0PGjfG*F#oPXbyM&^Unred}j1j{QP*06py@L(5 zs5SCUGqUCqtX;t?J=XV!h7(M_%Y9^tz)E5{r^*)K`xxqI44Oj@d*#6K3V$1nY<=MT5?T4kMmYxMEfb zTh#+$59jIt-|YBHoy!c=m-@eYpe4@3x8PEF14tq(W~@^2OC} zjvzGyJN8nlNAajWN#x@sttnX)<2MbEjVF;SE(YTqi*?j-Qu$a{=6o3k!uv*^J)>d* ze=PyoOx*(LP$3g_p>4$_b>g~eK1y*Y_8CAxxtiOm3Oo2Fn)^~zihTg9!a4R@<8_qa zb!65KBvGOnaRG@JA=h2V_=e~;?M`weQ>#JoIt>?J-W?zL3_z=P&R$zykxC}uQ5e*< zvxXRp<`_E73qb~_qMAlQ-KnO$@KJpGguReb!`rK_C%YoZuAgYK4->Kvb$8qgqAIP- zR<(AdVI0Itp1Z>b8^Pd)VXIoLCon207@Ofyu_|U@*I=t!IZ;xCq@NNGC2gZC)x?QV z`3s`w19F`7K>^~k&{d3)sVBXRlh-1BA!VmiK1lj>`K^P4RDP#3Q-VJhzqZNQ~+k{?Zuyt~{ zo>Q-dP$vFd;?5-L77RvWfXD%x&) zdRnLKq3X)4ce!<}8_}nM>}L-05b)CnX%vaGu$mnizEvSdh_A@W1avdD{iMFWJ5IFR zo*XN>78r?cZN>8vFoA?S43HaO{l5T^`Q z;?W2X6Q3z6rbz)4)Vt%JJihz$Fs#s?n- zXpXr*k@zdT8^lb&FyS)5J_<0zF;s49&{P`yt>Z&MFtV@31=O0ul8NyIg z(YEH!o?&owa{0H^3weKpGZb=?#Z}Ugh!JztK67WjoY=X%;!Wc#hrKk16y(>_`KuRNM3Zexx9uT_ro`oCT| zc-jB+EYEO;g;%Ro2kc&ShI@w5qW(gtG}yKGxHorV3YRXeZjDPE%n6fz3=`^gywKWq zA0}M)(~0oHrg&dOCA2MtMXR$`6V!kshl{zy*%6+SSf#*KV zsY53Ge=Nun`D1LhpPHBLbM>v1lduJ4u~O!`hE08fnN*DlcKgacaNf*Gs8}TCYPT~w z&r(8|F^K|?u$eS~4h2T{R~wAP#5N3r_cj{yIhq}Juu_x)1>OOE(yOr!Me@u~wXccW zHiN&?cj+3z>I!%xtA3(f8)_^H!9+lThwF8l4l0D66+b8&gzX`k&wr`tNeI_q@b z!JDfgpM!x2g}2XO*aP060@Y~`IIxR$pL^)T=9|uWXZUH~c-t9vIn1}cJ~ZL7*&lXV zSLe+>Twe7rdxJJKyJyhtbvxa+{Z98?`=Z?)TF~i2w+HPH?d}i;@0;i6yeXr3HGJRe zbNryyyZqGeyn8={_r3G8c7FhG+Hl@!zBzBprn;Y?b>8e;?88~}qWP{ZYW0Ti+kJx< zli0z>_ig@)x7X}Kvo-AWx||GQvYM^oJ`8*PVO;fNXVBh+*fC`>CN{zO-!uNa4e!)ixi=MyDNBp# z#-D#Is0pT88#9RmNA=q4qvO}`v+?IYn>t+eYIg~{Jz0W_6mQQIS5KH~Ga-~@((w;g z=D(7xBcn|6-uS|8xT;vpZBa6vuxA(rWHg{R7!5yN3dX(Kl^L6nNZl|Q{0?CNJD(eo z@8h8HWrzRm(p&NOUNN|{-pB5FuX#4Q>Yu|Yl=K9Rg)~4@Y{?tkkxy89+8G~GL0z^a zIOTf;r%1{aMSCWiJ4XklLOglLtNat*@vnB%c<=G4V4<}97;kpaymCk&xK&V`mzQbw z>>NtDCQ4vZ{&J~SiPxIVsRMt#TT$Q{$A4D;%Lw*Ux3O;icTmrt|5iJ$zv%yGc|MYG z1_7RmK)9Scs5zc?+tsw;Ws>+ZNqm_kzDyDyI!QE5(=guBKt##I83`QlQ6SQ*=RWmB z&|R#P`ZN$@SS+m;=%IZGXOv+hRga5$E}+aSQn7A<+;|1R59keXWE5W%xb<+LB+o#; zICFdK;sm9CpUG|3{>Li;Puj-1{a^KirTJgIT6?ko&+%;M5-{ImcP^Ktxgj7-EuzcZ z7zQX;Tx4>4HkPAyL8AbEO7U!Ire<#~Dj37`Kz00960CX-WI H00031q6S0p literal 0 HcmV?d00001 diff --git a/chart/charts/gitlab-runner/.gitlab-ci.yml b/chart/charts/gitlab-runner/.gitlab-ci.yml deleted file mode 100755 index 8c677e6..0000000 --- a/chart/charts/gitlab-runner/.gitlab-ci.yml +++ /dev/null @@ -1,66 +0,0 @@ -default: - image: registry.gitlab.com/gitlab-org/gitlab-build-images:gitlab-charts-build-base - tags: - - gitlab-org - -variables: - GIT_CLONE_PATH: $CI_BUILDS_DIR/gitlab-runner - -stages: -- test -- release - -lint: - stage: test - script: - - helm lint . - -release development: - stage: release - script: - - helm init --client-only - - helm package . - when: manual - only: - - branches - except: - - master - artifacts: - paths: - - gitlab-runner*.tgz - expire_in: 7d - -release beta: - stage: release - variables: - S3_URL: s3://${S3_BUCKET}${S3_PATH} - REPO_URL: https://${S3_BUCKET}.s3.amazonaws.com${S3_PATH} - script: - - apk add --no-cache py-pip - - pip install awscli - - helm init --client-only - - 'beta_info=$(git describe --long | sed -r "s/v[0-9\.]+(-rc[0-9]+)?-//")' - - 'build_time=$(date +%s)' - - 'sed -r "s/(version: [0-9\.]+-beta)/\1-${build_time}-${beta_info}/" -i Chart.yaml' - - 'sed -r "s/appVersion: .*/appVersion: bleeding/" -i Chart.yaml' - - 'sed -r "s/imagePullPolicy: IfNotPresent/imagePullPolicy: Always/" -i values.yaml' - - mkdir -p public/ - - aws s3 cp ${S3_URL}/index.yaml public/index.yaml || true - - (cd public; helm package ../) - - helm repo index public --merge public/index.yaml --url ${REPO_URL} - - aws s3 sync public ${S3_URL} --acl public-read - - 'echo "To install repository run: helm repo add gitlab-runner-beta ${REPO_URL} && helm repo update"' - only: - - master@gitlab-org/charts/gitlab-runner - -release stable: - stage: release - script: - - curl --request POST - --form "token=$CI_JOB_TOKEN" - --form ref=master - --form "variables[CHART_NAME]=$CI_PROJECT_NAME" - --form "variables[RELEASE_REF]=$CI_COMMIT_REF_NAME" - https://gitlab.com/api/v4/projects/2860651/trigger/pipeline - only: - - /\Av[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)?\Z/@gitlab-org/charts/gitlab-runner diff --git a/chart/charts/gitlab-runner/.gitlab/changelog.yml b/chart/charts/gitlab-runner/.gitlab/changelog.yml deleted file mode 100755 index 3d069ab..0000000 --- a/chart/charts/gitlab-runner/.gitlab/changelog.yml +++ /dev/null @@ -1,36 +0,0 @@ -default_scope: other -names: - new-feature: New features - security-fix: Security fixes - fix: Bug fixes - maintenance: Maintenance - documentation: Documentation changes - other: Other changes -order: -- new-feature -- security-fix -- fix -- maintenance -- documentation -- other -label_matchers: -- labels: - - documentation - scope: documentation -- labels: - - feature - scope: new-feature -- labels: - - security - scope: security-fix -- labels: - - bug - scope: fix -- labels: - - technical debt - scope: maintenance -- labels: - - backstage - scope: maintenance -authorship_labels: -- Community contribution diff --git a/chart/charts/gitlab-runner/.helmignore b/chart/charts/gitlab-runner/.helmignore deleted file mode 100755 index 73d4b16..0000000 --- a/chart/charts/gitlab-runner/.helmignore +++ /dev/null @@ -1,24 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj - -gitlab-runner*.tgz -scripts/ diff --git a/chart/charts/gitlab-runner/CHANGELOG.md b/chart/charts/gitlab-runner/CHANGELOG.md deleted file mode 100755 index cb66a09..0000000 --- a/chart/charts/gitlab-runner/CHANGELOG.md +++ /dev/null @@ -1,183 +0,0 @@ -## v0.18.1 (2020-07-01) - -### Maintenance - -- Update GitLab Runner version to 13.1.1 - -## v0.18.0 (2020-06-19) - -### Maintenance - -- Update GitLab Runner version to 13.1.0 - -### Other changes - -- Fix unregister when using token secret !231 (Bernd @arabus) -- Support specifying pod security context. !219 (Chen Yufei @cyfdecyf) - -## v0.17.1 (2020-06-01) - -### Maintenance - -- Update GitLab Runner version to 13.0.1 - -## v0.17.0 (2020-05-20) - -### New features - -- Expose settings for kubernetes resource limits and requests overwrites !220 (Alexander Petermann @lexxxel) -- Add support for setting Node Tolerations !188 (Zeyu Ye @Shuliyey) - -### Maintenance - -- Update GitLab Runner version to 13.0.0 -- Update package name in note !234 -- Pin CI jobs to gitlab-org runners !222 - -## v0.16.0 (2020-04-22) - -### New features - -- Add Service Account annotation support !211 (David Rosson @davidrosson) - -### Bug fixes - -- Support correct spelling of GCS secret !214 (Arthur Wiebe @arthur65) - -### Maintenance - -- Remove dependency of `gitlab-runner-builder` runner !221 -- Fix linting for forks with a different name than "gitlab-runner" !218 -- Install gitlab-changelog installation !217 - -### Other changes - -- Update GitLab Runner version to 12.10.1 -- Change listen address to not force IPv6 !213 (Fábio Matavelli @fabiomatavelli) - -## v0.15.0 (2020-03-20) - -### Maintenance - -- Update GitLab Runner version to 12.9.0 -- Update changelog generator configuration !212 -- Replace changelog entries generation script !209 - -### Other changes - -- Fix values.yaml typo !210 (Brian Choy @bycEEE) - -## v0.14.0 (2020-02-22) - -- Update GitLab Runner version to 12.8.0 - -## v0.13.0 (2020-01-20) - -- Add podLabels to the deployment !198 -- Mount custom-certs in configure init container !202 - -## v0.12.0 (2019-12-22) - -- Add `apiVersion: v1` to chart.yaml !195 -- Add documentation to protected Runners !193 -- Make securityContext configurable !199 -- Update GitLab Runner version to 12.6.0 - -## v0.11.0 (2019-11-20) - -- Variables for RUNNER_OUTPUT_LIMIT, and KUBERNETES_POLL_TIMEOUT !50 -- Add support for register protected Runners !185 - -## v0.10.1 (2019-10-28) - -- Update GitLab Runner to 12.4.1 - -## v0.10.0 (2019-10-21) - -- Updated GitLab Runner to 12.4.0 -- Use updated project path to release helm chart !172 -- Update resources API to stable verson !167 -- Add support for specifying log format !170 -- Use the cache.secret template to check if the secretName is set !166 -- Drop need for helm force update for now !181 -- Fix image version detection for old helm versions !173 - -## v0.9.0 (2019-09-20) - -- Use updated project path to release helm chart !172 -- Enabling horizontal pod auto-scaling based on custom metrics !127 -- Change base image used for CI jobs !156 -- Remove DJ as a listed chart maintainer !160 -- Release beta version on master using Bleeding Edge image !155 -- Update definition of 'release beta' CI jobs !164 -- Fix certs path in the comment in values file !148 -- Implement support for run-untagged option !140 -- Use new location for helm charts repo !162 -- Follow-up to adding run-untagged support !165 - -## v0.8.0 (2019-08-22) - -- Add suport for graceful stop !150 - -## v0.7.0 (2019-07-22) - -- Fix broken anchor link for gcs cache docs !135 -- Allow user to set rbac roles !112 -- Bump used Runner version to 12.1.0 !149 - -## v0.6.0 (2019-06-24) - -- Allow to manually build the package for development branches !120 -- When configuring cache: if no S3 secret assume IAM role !111 -- Allow to define request_concurrency value !121 -- Bump used Runner version to 12.0.0 !138 - -## v0.5.0 (2019-05-22) - -- Bump used Runner version to 11.11.0 !126 - -## v0.4.1 (2019-04-24) - -- Bump used Runner version to 11.10.1 !113 - -## v0.4.0 (2019-04-22) - -- Bump used Runner version to 11.10.0-rc2 !108 -- Fix a typo in values.yaml !101 -- Add pod labels for jobs !98 -- add hostAliases for pod assignment !89 -- Configurable deployment annotations !44 -- Add pod annotations for jobs !97 -- Bump used Runner version to 11.10.0-rc1 !107 - -## v0.3.0 (2019-03-22) - -- Change mount of secret with S3 distributed cache credentials !64 -- Add environment variables to runner !48 -- Replace S3_CACHE_INSECURE with CACHE_S3_INSECURE !90 -- Update values.yaml to remove invalid anchor in comments !85 -- Bump used Runner version to 11.9.0 !102 - -## v0.2.0 (2019-02-22) - -- Fix the error caused by unset 'locked' value !79 -- Create LICENSE file !76 -- Add CONTRIBUTING.md file !81 -- Add plain MIT text into LICENSE and add NOTICE !80 -- Fix incorrect custom secret documentation !71 -- Add affinity, nodeSelector and tolerations for pod assignment !56 -- Ignore scripts directory when buildin helm chart !83 -- Bump used Runner version to 11.8.0-rc1 !87 -- Fix year in Changelog - it's already 2019 !84 - -## v0.1.45 (2019-01-22) - -- Trigger release only for tagged versions !72 -- Fixes typos in values.yaml comments !60 -- Update chart to bring closer to helm standard template !43 -- Add nodeSelector config parameter for CI job pods !19 -- Prepare CHANGELOG management !75 -- Track app version in Chart.yaml !74 -- Fix the error caused by unset 'locked' value !79 -- Bump used Runner version to 11.7.0 !82 - diff --git a/chart/charts/gitlab-runner/CONTRIBUTING.md b/chart/charts/gitlab-runner/CONTRIBUTING.md deleted file mode 100755 index 1e55f92..0000000 --- a/chart/charts/gitlab-runner/CONTRIBUTING.md +++ /dev/null @@ -1,16 +0,0 @@ -## Developer Certificate of Origin + License - -By contributing to GitLab B.V., You accept and agree to the following terms and -conditions for Your present and future Contributions submitted to GitLab B.V. -Except for the license granted herein to GitLab B.V. and recipients of software -distributed by GitLab B.V., You reserve all right, title, and interest in and to -Your Contributions. All Contributions are subject to the following DCO + License -terms. - -[DCO + License](https://gitlab.com/gitlab-org/dco/blob/master/README.md) - -All Documentation content that resides under the [docs/ directory](/docs) of this -repository is licensed under Creative Commons: -[CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/). - -_This notice should stay as the first item in the CONTRIBUTING.md file._ diff --git a/chart/charts/gitlab-runner/Chart.yaml b/chart/charts/gitlab-runner/Chart.yaml deleted file mode 100755 index efd54a7..0000000 --- a/chart/charts/gitlab-runner/Chart.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -appVersion: 13.1.1 -description: GitLab Runner -icon: https://gitlab.com/uploads/-/system/project/avatar/250833/runner_logo.png -keywords: -- git -- ci -- deploy -maintainers: -- email: support@gitlab.com - name: GitLab Inc. -name: gitlab-runner -sources: -- https://hub.docker.com/r/gitlab/gitlab-runner/ -- https://docs.gitlab.com/runner/ -version: 0.18.1 diff --git a/chart/charts/gitlab-runner/LICENSE b/chart/charts/gitlab-runner/LICENSE deleted file mode 100755 index df96b29..0000000 --- a/chart/charts/gitlab-runner/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2018-2019 GitLab B.V. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/chart/charts/gitlab-runner/Makefile b/chart/charts/gitlab-runner/Makefile deleted file mode 100755 index d83671c..0000000 --- a/chart/charts/gitlab-runner/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -GITLAB_CHANGELOG_VERSION ?= master -GITLAB_CHANGELOG = .tmp/gitlab-changelog-$(GITLAB_CHANGELOG_VERSION) - -.PHONY: generate_changelog -generate_changelog: export CHANGELOG_RELEASE ?= dev -generate_changelog: $(GITLAB_CHANGELOG) - # Generating new changelog entries - @$(GITLAB_CHANGELOG) -project-id 6329679 \ - -release $(CHANGELOG_RELEASE) \ - -starting-point-matcher "v[0-9]*.[0-9]*.[0-9]*" \ - -config-file .gitlab/changelog.yml \ - -changelog-file CHANGELOG.md - -$(GITLAB_CHANGELOG): OS_TYPE ?= $(shell uname -s | tr '[:upper:]' '[:lower:]') -$(GITLAB_CHANGELOG): DOWNLOAD_URL = "https://storage.googleapis.com/gitlab-runner-tools/gitlab-changelog/$(GITLAB_CHANGELOG_VERSION)/gitlab-changelog-$(OS_TYPE)-amd64" -$(GITLAB_CHANGELOG): - # Installing $(DOWNLOAD_URL) as $(GITLAB_CHANGELOG) - @mkdir -p $(shell dirname $(GITLAB_CHANGELOG)) - @curl -sL "$(DOWNLOAD_URL)" -o "$(GITLAB_CHANGELOG)" - @chmod +x "$(GITLAB_CHANGELOG)" diff --git a/chart/charts/gitlab-runner/NOTICE b/chart/charts/gitlab-runner/NOTICE deleted file mode 100755 index aa3eb4d..0000000 --- a/chart/charts/gitlab-runner/NOTICE +++ /dev/null @@ -1,30 +0,0 @@ -With regard to the GitLab Software: - -The MIT License (MIT) - -Copyright (c) 2018-2019 GitLab B.V. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ---- - -For all third party components incorporated into the GitLab Software, those -components are licensed under the original license provided by the owner of the -applicable component. - diff --git a/chart/charts/gitlab-runner/README.md b/chart/charts/gitlab-runner/README.md deleted file mode 100755 index a05c351..0000000 --- a/chart/charts/gitlab-runner/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# GitLab Runner Helm Chart - -This chart deploys a GitLab Runner instance into your Kubernetes cluster. For more information, please review [our documentation](http://docs.gitlab.com/ee/install/kubernetes/gitlab_runner_chart.html). \ No newline at end of file diff --git a/chart/charts/gitlab-runner/templates/NOTES.txt b/chart/charts/gitlab-runner/templates/NOTES.txt deleted file mode 100755 index 467a281..0000000 --- a/chart/charts/gitlab-runner/templates/NOTES.txt +++ /dev/null @@ -1,14 +0,0 @@ -{{- if include "gitlab-runner.gitlabUrl" . }} -Your GitLab Runner should now be registered against the GitLab instance reachable at: {{ include "gitlab-runner.gitlabUrl" . }} -{{- else -}} -############################################################################## -## WARNING: You did not specify an gitlabUrl in your 'helm install' call. ## -############################################################################## - -This deployment will be incomplete until you provide the URL that your -GitLab instance is reachable at: - - helm upgrade {{ .Release.Name }} \ - --set gitlabUrl=http://gitlab.your-domain.com,runnerRegistrationToken=your-registration-token \ - gitlab/gitlab-runner -{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/_cache.tpl b/chart/charts/gitlab-runner/templates/_cache.tpl deleted file mode 100755 index 80b001c..0000000 --- a/chart/charts/gitlab-runner/templates/_cache.tpl +++ /dev/null @@ -1,28 +0,0 @@ -{{- define "gitlab-runner.cache" }} -{{- if .Values.runners.cache.cacheType }} -- name: CACHE_TYPE - value: {{ default "" .Values.runners.cache.cacheType | quote }} -- name: CACHE_PATH - value: {{ coalesce .Values.runners.cache.cachePath .Values.runners.cache.s3CachePath | default "" | quote }} -{{- if .Values.runners.cache.cacheShared }} -- name: CACHE_SHARED - value: "true" -{{- end }} -{{- if eq .Values.runners.cache.cacheType "s3" }} -- name: CACHE_S3_SERVER_ADDRESS - value: {{ include "gitlab-runner.cache.s3ServerAddress" . }} -- name: CACHE_S3_BUCKET_NAME - value: {{ default "" .Values.runners.cache.s3BucketName | quote }} -- name: CACHE_S3_BUCKET_LOCATION - value: {{ default "" .Values.runners.cache.s3BucketLocation | quote }} -{{- if .Values.runners.cache.s3CacheInsecure }} -- name: CACHE_S3_INSECURE - value: "true" -{{- end }} -{{- end }} -{{- if eq .Values.runners.cache.cacheType "gcs" }} -- name: CACHE_GCS_BUCKET_NAME - value: {{ default "" .Values.runners.cache.gcsBucketName | quote }} -{{- end }} -{{- end }} -{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/_env_vars.tpl b/chart/charts/gitlab-runner/templates/_env_vars.tpl deleted file mode 100755 index d8c83f2..0000000 --- a/chart/charts/gitlab-runner/templates/_env_vars.tpl +++ /dev/null @@ -1,95 +0,0 @@ -{{- define "gitlab-runner.runner-env-vars" }} -- name: CI_SERVER_URL - value: {{ include "gitlab-runner.gitlabUrl" . }} -- name: CLONE_URL - value: {{ default "" .Values.runners.cloneUrl | quote }} -- name: RUNNER_REQUEST_CONCURRENCY - value: {{ default 1 .Values.runners.requestConcurrency | quote }} -- name: RUNNER_EXECUTOR - value: "kubernetes" -- name: REGISTER_LOCKED - {{ if or (not (hasKey .Values.runners "locked")) .Values.runners.locked -}} - value: "true" - {{- else -}} - value: "false" - {{- end }} -- name: RUNNER_TAG_LIST - value: {{ default "" .Values.runners.tags | quote }} -- name: RUNNER_OUTPUT_LIMIT - value: {{ default "" .Values.runners.outputLimit | quote }} -- name: KUBERNETES_IMAGE - value: {{ .Values.runners.image | quote }} -{{ if .Values.runners.privileged }} -- name: KUBERNETES_PRIVILEGED - value: "true" -{{ end }} -- name: KUBERNETES_NAMESPACE - value: {{ default .Release.Namespace .Values.runners.namespace | quote }} -- name: KUBERNETES_POLL_TIMEOUT - value: {{ default "" .Values.runners.pollTimeout | quote }} -- name: KUBERNETES_CPU_LIMIT - value: {{ default "" .Values.runners.builds.cpuLimit | quote }} -- name: KUBERNETES_CPU_LIMIT_OVERWRITE_MAX_ALLOWED - value: {{ default "" .Values.runners.builds.cpuLimitOverwriteMaxAllowed | quote }} -- name: KUBERNETES_MEMORY_LIMIT - value: {{ default "" .Values.runners.builds.memoryLimit | quote }} -- name: KUBERNETES_MEMORY_LIMIT_OVERWRITE_MAX_ALLOWED - value: {{ default "" .Values.runners.builds.memoryLimitOverwriteMaxAllowed | quote }} -- name: KUBERNETES_CPU_REQUEST - value: {{ default "" .Values.runners.builds.cpuRequests | quote }} -- name: KUBERNETES_CPU_REQUEST_OVERWRITE_MAX_ALLOWED - value: {{ default "" .Values.runners.builds.cpuRequestsOverwriteMaxAllowed | quote }} -- name: KUBERNETES_MEMORY_REQUEST - value: {{ default "" .Values.runners.builds.memoryRequests| quote }} -- name: KUBERNETES_MEMORY_REQUEST_OVERWRITE_MAX_ALLOWED - value: {{ default "" .Values.runners.builds.memoryRequestsOverwriteMaxAllowed | quote }} -- name: KUBERNETES_SERVICE_ACCOUNT - value: {{ default "" .Values.runners.serviceAccountName | quote }} -- name: KUBERNETES_SERVICE_CPU_LIMIT - value: {{ default "" .Values.runners.services.cpuLimit | quote }} -- name: KUBERNETES_SERVICE_MEMORY_LIMIT - value: {{ default "" .Values.runners.services.memoryLimit | quote }} -- name: KUBERNETES_SERVICE_CPU_REQUEST - value: {{ default "" .Values.runners.services.cpuRequests | quote }} -- name: KUBERNETES_SERVICE_MEMORY_REQUEST - value: {{ default "" .Values.runners.services.memoryRequests | quote }} -- name: KUBERNETES_HELPER_CPU_LIMIT - value: {{ default "" .Values.runners.helpers.cpuLimit | quote }} -- name: KUBERNETES_HELPER_MEMORY_LIMIT - value: {{ default "" .Values.runners.helpers.memoryLimit | quote }} -- name: KUBERNETES_HELPER_CPU_REQUEST - value: {{ default "" .Values.runners.helpers.cpuRequests | quote }} -- name: KUBERNETES_HELPER_MEMORY_REQUEST - value: {{ default "" .Values.runners.helpers.memoryRequests | quote }} -- name: KUBERNETES_HELPER_IMAGE - value: {{ default "" .Values.runners.helpers.image | quote }} -- name: KUBERNETES_PULL_POLICY - value: {{ default "" .Values.runners.imagePullPolicy | quote }} -{{- if .Values.runners.pod_security_context }} -{{- if .Values.runners.pod_security_context.run_as_non_root }} -- name: KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_NON_ROOT - value: "true" -{{- end }} -{{- if .Values.runners.pod_security_context.run_as_user }} -- name: KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_USER - value: {{ .Values.runners.pod_security_context.run_as_user | quote }} -{{- end }} -{{- if .Values.runners.pod_security_context.run_as_group }} -- name: KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_GROUP - value: {{ .Values.runners.pod_security_context.run_as_group | quote }} -{{- end }} -{{- if .Values.runners.pod_security_context.fs_group }} -- name: KUBERNETES_POD_SECURITY_CONTEXT_FS_GROUP - value: {{ .Values.runners.pod_security_context.fs_group | quote }} -{{- end }} -{{- end }} -{{- if .Values.runners.cache -}} -{{ include "gitlab-runner.cache" . }} -{{- end }} -{{- if .Values.envVars -}} -{{ range .Values.envVars }} -- name: {{ .name }} - value: {{ .value | quote }} -{{- end }} -{{- end }} -{{- end }} diff --git a/chart/charts/gitlab-runner/templates/_helpers.tpl b/chart/charts/gitlab-runner/templates/_helpers.tpl deleted file mode 100755 index b9f1c51..0000000 --- a/chart/charts/gitlab-runner/templates/_helpers.tpl +++ /dev/null @@ -1,78 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "gitlab-runner.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "gitlab-runner.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "gitlab-runner.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Define the name of the secret containing the tokens -*/}} -{{- define "gitlab-runner.secret" -}} -{{- default (include "gitlab-runner.fullname" .) .Values.runners.secret | quote -}} -{{- end -}} - -{{/* -Define the name of the s3 cache secret -*/}} -{{- define "gitlab-runner.cache.secret" -}} -{{- if .Values.runners.cache.secretName -}} -{{- .Values.runners.cache.secretName | quote -}} -{{- end -}} -{{- end -}} - -{{/* -Template for outputing the gitlabUrl -*/}} -{{- define "gitlab-runner.gitlabUrl" -}} -{{- .Values.gitlabUrl | quote -}} -{{- end -}} - -{{/* -Template runners.cache.s3ServerAddress in order to allow overrides from external charts. -*/}} -{{- define "gitlab-runner.cache.s3ServerAddress" }} -{{- default "" .Values.runners.cache.s3ServerAddress | quote -}} -{{- end -}} - -{{/* -Define the image, using .Chart.AppVersion and GitLab Runner image as a default value -*/}} -{{- define "gitlab-runner.image" }} -{{- $appVersion := ternary "bleeding" (print "v" .Chart.AppVersion) (eq .Chart.AppVersion "bleeding") -}} -{{- $image := printf "gitlab/gitlab-runner:alpine-%s" $appVersion -}} -{{- default $image .Values.image }} -{{- end -}} - -{{/* -Unregister runners on pod stop -*/}} -{{- define "gitlab-runner.unregisterRunners" -}} -{{- if or (and (hasKey .Values "unregisterRunners") .Values.unregisterRunners) (and (not (hasKey .Values "unregisterRunners")) .Values.runnerRegistrationToken) -}} -lifecycle: - preStop: - exec: - command: ["/entrypoint", "unregister", "--all-runners"] -{{- end -}} -{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/configmap.yaml b/chart/charts/gitlab-runner/templates/configmap.yaml deleted file mode 100755 index ed1230a..0000000 --- a/chart/charts/gitlab-runner/templates/configmap.yaml +++ /dev/null @@ -1,129 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "gitlab-runner.fullname" . }} - labels: - app: {{ include "gitlab-runner.fullname" . }} - chart: {{ include "gitlab-runner.chart" . }} - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -data: - entrypoint: | - #!/bin/bash - set -e - mkdir -p /home/gitlab-runner/.gitlab-runner/ - cp /scripts/config.toml /home/gitlab-runner/.gitlab-runner/ - - # Register the runner - if [[ -f /secrets/accesskey && -f /secrets/secretkey ]]; then - export CACHE_S3_ACCESS_KEY=$(cat /secrets/accesskey) - export CACHE_S3_SECRET_KEY=$(cat /secrets/secretkey) - fi - - if [[ -f /secrets/gcs-applicaton-credentials-file ]]; then - export GOOGLE_APPLICATION_CREDENTIALS="/secrets/gcs-applicaton-credentials-file" - elif [[ -f /secrets/gcs-application-credentials-file ]]; then - export GOOGLE_APPLICATION_CREDENTIALS="/secrets/gcs-application-credentials-file" - else - if [[ -f /secrets/gcs-access-id && -f /secrets/gcs-private-key ]]; then - export CACHE_GCS_ACCESS_ID=$(cat /secrets/gcs-access-id) - # echo -e used to make private key multiline (in google json auth key private key is oneline with \n) - export CACHE_GCS_PRIVATE_KEY=$(echo -e $(cat /secrets/gcs-private-key)) - fi - fi - - if [[ -f /secrets/runner-registration-token ]]; then - export REGISTRATION_TOKEN=$(cat /secrets/runner-registration-token) - fi - - if [[ -f /secrets/runner-token ]]; then - export CI_SERVER_TOKEN=$(cat /secrets/runner-token) - fi - - if ! sh /scripts/register-the-runner; then - exit 1 - fi - - # Start the runner - exec /entrypoint run --user=gitlab-runner \ - --working-directory=/home/gitlab-runner - - config.toml: | - concurrent = {{ .Values.concurrent }} - check_interval = {{ .Values.checkInterval }} - log_level = {{ default "info" .Values.logLevel | quote }} - {{- if .Values.logFormat }} - log_format = {{ .Values.logFormat | quote }} - {{- end }} - {{- if .Values.metrics.enabled }} - listen_address = ':9252' - {{- end }} - configure: | - set -e - cp /init-secrets/* /secrets - register-the-runner: | - #!/bin/bash - MAX_REGISTER_ATTEMPTS=30 - - for i in $(seq 1 "${MAX_REGISTER_ATTEMPTS}"); do - echo "Registration attempt ${i} of ${MAX_REGISTER_ATTEMPTS}" - /entrypoint register \ - {{- range .Values.runners.imagePullSecrets }} - --kubernetes-image-pull-secrets {{ . | quote }} \ - {{- end }} - {{- range $key, $val := .Values.runners.nodeSelector }} - --kubernetes-node-selector {{ $key | quote }}:{{ $val | quote }} \ - {{- end }} - {{- range .Values.runners.nodeTolerations }} - {{- $keyValue := .key }} - {{- if eq (.operator | default "Equal") "Equal" }} - {{- $keyValue = print $keyValue "=" (.value | default "" ) }} - {{- end }} - --kubernetes-node-tolerations {{ $keyValue }}:{{ .effect | quote }} \ - {{- end }} - {{- range $key, $value := .Values.runners.podLabels }} - --kubernetes-pod-labels {{ $key | quote }}:{{ $value | quote }} \ - {{- end }} - {{- range $key, $val := .Values.runners.podAnnotations }} - --kubernetes-pod-annotations {{ $key | quote }}:{{ $val | quote }} \ - {{- end }} - {{- range $key, $value := .Values.runners.env }} - --env {{ $key | quote -}} = {{- $value | quote }} \ - {{- end }} - {{- if and (hasKey .Values.runners "runUntagged") .Values.runners.runUntagged }} - --run-untagged=true \ - {{- end }} - {{- if and (hasKey .Values.runners "protected") .Values.runners.protected }} - --access-level="ref_protected" \ - {{- end }} - {{- if .Values.runners.pod_security_context }} - {{- if .Values.runners.pod_security_context.supplemental_groups }} - {{- range $gid := .Values.runners.pod_security_context.supplemental_groups }} - --kubernetes-pod-security-context-supplemental-groups {{ $gid | quote }} \ - {{- end }} - {{- end }} - {{- end }} - --non-interactive - - retval=$? - - if [ ${retval} = 0 ]; then - break - elif [ ${i} = ${MAX_REGISTER_ATTEMPTS} ]; then - exit 1 - fi - - sleep 5 - done - - exit 0 - - check-live: | - #!/bin/bash - if /usr/bin/pgrep -f .*register-the-runner; then - exit 0 - elif /usr/bin/pgrep gitlab.*runner; then - exit 0 - else - exit 1 - fi diff --git a/chart/charts/gitlab-runner/templates/deployment.yaml b/chart/charts/gitlab-runner/templates/deployment.yaml deleted file mode 100755 index c7c6007..0000000 --- a/chart/charts/gitlab-runner/templates/deployment.yaml +++ /dev/null @@ -1,160 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "gitlab-runner.fullname" . }} - labels: - app: {{ include "gitlab-runner.fullname" . }} - chart: {{ include "gitlab-runner.chart" . }} - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - replicas: 1 - selector: - matchLabels: - app: {{ include "gitlab-runner.fullname" . }} - template: - metadata: - labels: - app: {{ include "gitlab-runner.fullname" . }} - chart: {{ include "gitlab-runner.chart" . }} - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" - {{- range $key, $value := .Values.podLabels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - annotations: - checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} - {{- if .Values.metrics.enabled }} - prometheus.io/scrape: 'true' - prometheus.io/port: '9252' - {{- end }} - {{- range $key, $value := .Values.podAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - spec: - securityContext: - runAsUser: {{ .Values.securityContext.runAsUser }} - {{- if .Values.securityContext.fsGroup }} - fsGroup: {{ .Values.securityContext.fsGroup }} - {{- end}} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} - initContainers: - - name: configure - command: ['sh', '/config/configure'] - image: {{ include "gitlab-runner.image" . }} - imagePullPolicy: {{ default "" .Values.imagePullPolicy | quote }} - env: - {{ include "gitlab-runner.runner-env-vars" . | indent 8 }} - volumeMounts: - - name: runner-secrets - mountPath: /secrets - readOnly: false - - name: scripts - mountPath: /config - readOnly: true - - name: init-runner-secrets - mountPath: /init-secrets - readOnly: true - {{- if .Values.certsSecretName }} - - name: custom-certs - readOnly: true - mountPath: /home/gitlab-runner/.gitlab-runner/certs/ - {{- end }} - resources: -{{ toYaml .Values.resources | indent 10 }} - serviceAccountName: {{ if .Values.rbac.create }}{{ include "gitlab-runner.fullname" . }}{{ else }}"{{ .Values.rbac.serviceAccountName }}"{{ end }} - containers: - - name: {{ include "gitlab-runner.fullname" . }} - image: {{ include "gitlab-runner.image" . }} - imagePullPolicy: {{ default "" .Values.imagePullPolicy | quote }} - {{- include "gitlab-runner.unregisterRunners" . | nindent 8 }} - command: ["/bin/bash", "/scripts/entrypoint"] - env: - {{ include "gitlab-runner.runner-env-vars" . | indent 8 }} - livenessProbe: - exec: - command: ["/bin/bash", "/scripts/check-live"] - initialDelaySeconds: 60 - timeoutSeconds: 1 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - exec: - command: ["/usr/bin/pgrep","gitlab.*runner"] - initialDelaySeconds: 10 - timeoutSeconds: 1 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - ports: - - name: metrics - containerPort: 9252 - volumeMounts: - - name: runner-secrets - mountPath: /secrets - - name: etc-gitlab-runner - mountPath: /home/gitlab-runner/.gitlab-runner - - name: scripts - mountPath: /scripts - {{- if .Values.certsSecretName }} - - name: custom-certs - readOnly: true - mountPath: /home/gitlab-runner/.gitlab-runner/certs/ - {{- end }} - resources: -{{ toYaml .Values.resources | indent 10 }} - volumes: - - name: runner-secrets - emptyDir: - medium: "Memory" - - name: etc-gitlab-runner - emptyDir: - medium: "Memory" - - name: init-runner-secrets - projected: - sources: - {{- if and .Values.runners.cache .Values.runners.cache.cacheType }} - {{- if and (include "gitlab-runner.cache.secret" .) (eq .Values.runners.cache.cacheType "s3") }} - - secret: - name: {{ include "gitlab-runner.cache.secret" . }} - {{- end }} - {{- if eq .Values.runners.cache.cacheType "gcs"}} - - secret: - # Outdated default secret "s3access" kept for compatibilty with older installs using it. - # Will be removed in next major release: https://gitlab.com/gitlab-org/charts/gitlab-runner/merge_requests/177 - name: {{ default "s3access" (include "gitlab-runner.cache.secret" .) }} - {{- end }} - {{- end }} - - secret: - name: {{ include "gitlab-runner.secret" . }} - items: - - key: runner-registration-token - path: runner-registration-token - - key: runner-token - path: runner-token - {{- if .Values.certsSecretName }} - - name: custom-certs - secret: - secretName: {{ .Values.certsSecretName }} - {{- end }} - - name: scripts - configMap: - name: {{ include "gitlab-runner.fullname" . }} - {{- if .Values.affinity }} - affinity: -{{ toYaml .Values.affinity | indent 8 }} - {{- end }} - {{- if .Values.nodeSelector }} - nodeSelector: -{{ toYaml .Values.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.tolerations }} - tolerations: -{{ toYaml .Values.tolerations | indent 8 }} - {{- end }} - {{- if .Values.hostAliases }} - hostAliases: -{{ toYaml .Values.hostAliases | indent 8 }} - {{- end }} diff --git a/chart/charts/gitlab-runner/templates/hpa.yaml b/chart/charts/gitlab-runner/templates/hpa.yaml deleted file mode 100755 index bce03c0..0000000 --- a/chart/charts/gitlab-runner/templates/hpa.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.hpa}} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "gitlab-runner.fullname" . }} - namespace: {{ .Release.Namespace }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "gitlab-runner.fullname" . }} - minReplicas: {{ default 1 .Values.hpa.minReplicas }} - maxReplicas: {{ default 1 .Values.hpa.maxReplicas }} - metrics: -{{ toYaml .Values.hpa.metrics | indent 2 }} -{{- end}} diff --git a/chart/charts/gitlab-runner/templates/role-binding.yaml b/chart/charts/gitlab-runner/templates/role-binding.yaml deleted file mode 100755 index 5810043..0000000 --- a/chart/charts/gitlab-runner/templates/role-binding.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ if .Values.rbac.clusterWideAccess }}"ClusterRoleBinding"{{ else }}"RoleBinding"{{ end }} -metadata: - name: {{ include "gitlab-runner.fullname" . }} - labels: - app: {{ include "gitlab-runner.fullname" . }} - chart: {{ include "gitlab-runner.chart" . }} - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: {{ if .Values.rbac.clusterWideAccess }}"ClusterRole"{{ else }}"Role"{{ end }} - name: {{ include "gitlab-runner.fullname" . }} -subjects: -- kind: ServiceAccount - name: {{ include "gitlab-runner.fullname" . }} - namespace: "{{ .Release.Namespace }}" -{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/role.yaml b/chart/charts/gitlab-runner/templates/role.yaml deleted file mode 100755 index 502ef2c..0000000 --- a/chart/charts/gitlab-runner/templates/role.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ if .Values.rbac.clusterWideAccess }}"ClusterRole"{{ else }}"Role"{{ end }} -metadata: - name: {{ include "gitlab-runner.fullname" . }} - labels: - app: {{ include "gitlab-runner.fullname" . }} - chart: {{ include "gitlab-runner.chart" . }} - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -rules: -- apiGroups: [""] - {{- if .Values.rbac.resources }} - resources: [{{ join ", " .Values.rbac.resources }}] - {{- else }} - resources: ["*"] - {{- end }} - {{- if .Values.rbac.verbs }} - verbs: [{{ join ", " .Values.rbac.verbs }}] - {{- else }} - verbs: ["*"] - {{- end }} -{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/secrets.yaml b/chart/charts/gitlab-runner/templates/secrets.yaml deleted file mode 100755 index e3374f4..0000000 --- a/chart/charts/gitlab-runner/templates/secrets.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if or .Values.runnerRegistrationToken .Values.runnerToken -}} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "gitlab-runner.secret" . }} - labels: - app: {{ include "gitlab-runner.fullname" . }} - chart: {{ include "gitlab-runner.chart" . }} - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -type: Opaque -data: - runner-registration-token: {{ default "" .Values.runnerRegistrationToken | b64enc | quote }} - runner-token: {{ default "" .Values.runnerToken | b64enc | quote }} -{{- end -}} diff --git a/chart/charts/gitlab-runner/templates/service-account.yaml b/chart/charts/gitlab-runner/templates/service-account.yaml deleted file mode 100755 index 1ccea4e..0000000 --- a/chart/charts/gitlab-runner/templates/service-account.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{- range $key, $value := .Values.rbac.serviceAccountAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - name: {{ include "gitlab-runner.fullname" . }} - labels: - app: {{ include "gitlab-runner.fullname" . }} - chart: {{ include "gitlab-runner.chart" . }} - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -{{- end -}} diff --git a/chart/charts/gitlab-runner/values.yaml b/chart/charts/gitlab-runner/values.yaml deleted file mode 100755 index 5fac332..0000000 --- a/chart/charts/gitlab-runner/values.yaml +++ /dev/null @@ -1,389 +0,0 @@ -## GitLab Runner Image -## -## By default it's using gitlab/gitlab-runner:alpine-v{VERSION} -## where {VERSION} is taken from Chart.yaml from appVersion field -## -## ref: https://hub.docker.com/r/gitlab/gitlab-runner/tags/ -## -# image: gitlab/gitlab-runner:alpine-v11.6.0 - -## Specify a imagePullPolicy -## 'Always' if imageTag is 'latest', else set to 'IfNotPresent' -## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images -## -imagePullPolicy: IfNotPresent - -## The GitLab Server URL (with protocol) that want to register the runner against -## ref: https://docs.gitlab.com/runner/commands/README.html#gitlab-runner-register -## -# gitlabUrl: http://gitlab.your-domain.com/ - -## The Registration Token for adding new Runners to the GitLab Server. This must -## be retrieved from your GitLab Instance. -## ref: https://docs.gitlab.com/ce/ci/runners/README.html -## -# runnerRegistrationToken: "" - -## The Runner Token for adding new Runners to the GitLab Server. This must -## be retrieved from your GitLab Instance. It is token of already registered runner. -## ref: (we don't yet have docs for that, but we want to use existing token) -## -# runnerToken: "" -# -## Unregister all runners before termination -## -## Updating the runner's chart version or configuration will cause the runner container -## to be terminated and created again. This may cause your Gitlab instance to reference -## non-existant runners. Un-registering the runner before termination mitigates this issue. -## ref: https://docs.gitlab.com/runner/commands/README.html#gitlab-runner-unregister -## -# unregisterRunners: true - -## When stopping the runner, give it time to wait for its jobs to terminate. -## -## Updating the runner's chart version or configuration will cause the runner container -## to be terminated with a graceful stop request. terminationGracePeriodSeconds -## instructs Kubernetes to wait long enough for the runner pod to terminate gracefully. -## ref: https://docs.gitlab.com/runner/commands/#signals -terminationGracePeriodSeconds: 3600 - -## Set the certsSecretName in order to pass custom certficates for GitLab Runner to use -## Provide resource name for a Kubernetes Secret Object in the same namespace, -## this is used to populate the /home/gitlab-runner/.gitlab-runner/certs/ directory -## ref: https://docs.gitlab.com/runner/configuration/tls-self-signed.html#supported-options-for-self-signed-certificates -## -# certsSecretName: - -## Configure the maximum number of concurrent jobs -## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section -## -concurrent: 10 - -## Defines in seconds how often to check GitLab for a new builds -## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section -## -checkInterval: 30 - -## Configure GitLab Runner's logging level. Available values are: debug, info, warn, error, fatal, panic -## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section -## -# logLevel: - -## Configure GitLab Runner's logging format. Available values are: runner, text, json -## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section -## -# logFormat: - -## For RBAC support: -rbac: - create: false - ## Define specific rbac permissions. - # resources: ["pods", "pods/exec", "secrets"] - # verbs: ["get", "list", "watch", "create", "patch", "delete"] - - ## Run the gitlab-bastion container with the ability to deploy/manage containers of jobs - ## cluster-wide or only within namespace - clusterWideAccess: false - - ## Use the following Kubernetes Service Account name if RBAC is disabled in this Helm chart (see rbac.create) - ## - # serviceAccountName: default - - ## Specify annotations for Service Accounts, useful for annotations such as eks.amazonaws.com/role-arn - ## - ## ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html - ## - # serviceAccountAnnotations: {} - -## Configure integrated Prometheus metrics exporter -## ref: https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server -metrics: - enabled: true - -## Configuration for the Pods that that the runner launches for each new job -## -runners: - ## Default container image to use for builds when none is specified - ## - image: ubuntu:16.04 - - ## Specify one or more imagePullSecrets - ## - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # imagePullSecrets: [] - - ## Specify the image pull policy: never, if-not-present, always. The cluster default will be used if not set. - ## - # imagePullPolicy: "" - - ## Defines number of concurrent requests for new job from GitLab - ## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section - ## - # requestConcurrency: 1 - - ## Specify whether the runner should be locked to a specific project: true, false. Defaults to true. - ## - # locked: true - - ## Specify the tags associated with the runner. Comma-separated list of tags. - ## - ## ref: https://docs.gitlab.com/ce/ci/runners/#using-tags - ## - # tags: "" - - ## Specify if jobs without tags should be run. - ## If not specified, Runner will default to true if no tags were specified. In other case it will - ## default to false. - ## - ## ref: https://docs.gitlab.com/ce/ci/runners/#allowing-runners-with-tags-to-pick-jobs-without-tags - ## - # runUntagged: true - - ## Specify whether the runner should only run protected branches. - ## Defaults to False. - ## - ## ref: https://docs.gitlab.com/ee/ci/runners/#protected-runners - ## - # protected: true - - ## Run all containers with the privileged flag enabled - ## This will allow the docker:dind image to run if you need to run Docker - ## commands. Please read the docs before turning this on: - ## ref: https://docs.gitlab.com/runner/executors/kubernetes.html#using-docker-dind - ## - privileged: false - - ## The name of the secret containing runner-token and runner-registration-token - # secret: gitlab-runner - - ## Namespace to run Kubernetes jobs in (defaults to the same namespace of this release) - ## - # namespace: - - ## The amount of time, in seconds, that needs to pass before the runner will - ## timeout attempting to connect to the container it has just created. - ## ref: https://docs.gitlab.com/runner/executors/kubernetes.html - pollTimeout: 180 - - ## Set maximum build log size in kilobytes, by default set to 4096 (4MB) - ## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section - outputLimit: 4096 - - ## Distributed runners caching - ## ref: https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/configuration/autoscale.md#distributed-runners-caching - ## - ## If you want to use s3 based distributing caching: - ## First of all you need to uncomment General settings and S3 settings sections. - ## - ## Create a secret 's3access' containing 'accesskey' & 'secretkey' - ## ref: https://aws.amazon.com/blogs/security/wheres-my-secret-access-key/ - ## - ## $ kubectl create secret generic s3access \ - ## --from-literal=accesskey="YourAccessKey" \ - ## --from-literal=secretkey="YourSecretKey" - ## ref: https://kubernetes.io/docs/concepts/configuration/secret/ - ## - ## If you want to use gcs based distributing caching: - ## First of all you need to uncomment General settings and GCS settings sections. - ## - ## Access using credentials file: - ## Create a secret 'google-application-credentials' containing your application credentials file. - ## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runnerscachegcs-section - ## You could configure - ## $ kubectl create secret generic google-application-credentials \ - ## --from-file=gcs-application-credentials-file=./path-to-your-google-application-credentials-file.json - ## ref: https://kubernetes.io/docs/concepts/configuration/secret/ - ## - ## Access using access-id and private-key: - ## Create a secret 'gcsaccess' containing 'gcs-access-id' & 'gcs-private-key'. - ## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-cache-gcs-section - ## You could configure - ## $ kubectl create secret generic gcsaccess \ - ## --from-literal=gcs-access-id="YourAccessID" \ - ## --from-literal=gcs-private-key="YourPrivateKey" - ## ref: https://kubernetes.io/docs/concepts/configuration/secret/ - cache: {} - ## General settings - # cacheType: s3 - # cachePath: "gitlab_runner" - # cacheShared: true - - ## S3 settings - # s3ServerAddress: s3.amazonaws.com - # s3BucketName: - # s3BucketLocation: - # s3CacheInsecure: false - # secretName: s3access - - ## GCS settings - # gcsBucketName: - ## Use this line for access using access-id and private-key - # secretName: gcsaccess - ## Use this line for access using google-application-credentials file - # secretName: google-application-credentials - - ## Build Container specific configuration - ## - builds: {} - # cpuLimit: 200m - # cpuLimitOverwriteMaxAllowed: 400m - # memoryLimit: 256Mi - # memoryLimitOverwriteMaxAllowed: 512Mi - # cpuRequests: 100m - # cpuRequestsOverwriteMaxAllowed: 200m - # memoryRequests: 128Mi - # memoryRequestsOverwriteMaxAllowed: 256Mi - - ## Service Container specific configuration - ## - services: {} - # cpuLimit: 200m - # memoryLimit: 256Mi - # cpuRequests: 100m - # memoryRequests: 128Mi - - ## Helper Container specific configuration - ## - helpers: {} - # cpuLimit: 200m - # memoryLimit: 256Mi - # cpuRequests: 100m - # memoryRequests: 128Mi - # image: "gitlab/gitlab-runner-helper:x86_64-${CI_RUNNER_REVISION}" - - ## Helper container security context configuration - ## Refer to https://docs.gitlab.com/runner/executors/kubernetes.html#using-security-context - # pod_security_context: - # run_as_non_root: true - # run_as_user: 100 - # run_as_group: 100 - # fs_group: 65533 - # supplemental_groups: [101, 102] - - ## Service Account to be used for runners - ## - # serviceAccountName: - - ## If Gitlab is not reachable through $CI_SERVER_URL - ## - # cloneUrl: - - ## Specify node labels for CI job pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - # nodeSelector: {} - - ## Specify node tolerations for CI job pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - # nodeTolerations: {} - - ## Specify pod labels for CI job pods - ## - # podLabels: {} - - ## Specify annotations for job pods, useful for annotations such as iam.amazonaws.com/role - # podAnnotations: {} - - ## Configure environment variables that will be injected to the pods that are created while - ## the build is running. These variables are passed as parameters, i.e. `--env "NAME=VALUE"`, - ## to `gitlab-runner register` command. - ## - ## Note that `envVars` (see below) are only present in the runner pod, not the pods that are - ## created for each build. - ## - ## ref: https://docs.gitlab.com/runner/commands/#gitlab-runner-register - ## - # env: - # NAME: VALUE - - -## Configure securitycontext -## ref: http://kubernetes.io/docs/user-guide/security-context/ -## -securityContext: - fsGroup: 65533 - runAsUser: 100 - - -## Configure resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## -resources: {} - # limits: - # memory: 256Mi - # cpu: 200m - # requests: - # memory: 128Mi - # cpu: 100m - -## Affinity for pod assignment -## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -## Node labels for pod assignment -## Ref: https://kubernetes.io/docs/user-guide/node-selection/ -## -nodeSelector: {} - # Example: The gitlab runner manager should not run on spot instances so you can assign - # them to the regular worker nodes only. - # node-role.kubernetes.io/worker: "true" - -## List of node taints to tolerate (requires Kubernetes >= 1.6) -## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - # Example: Regular worker nodes may have a taint, thus you need to tolerate the taint - # when you assign the gitlab runner manager with nodeSelector or affinity to the nodes. - # - key: "node-role.kubernetes.io/worker" - # operator: "Exists" - -## Configure environment variables that will be present when the registration command runs -## This provides further control over the registration process and the config.toml file -## ref: `gitlab-runner register --help` -## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html -## -# envVars: -# - name: RUNNER_EXECUTOR -# value: kubernetes - -## list of hosts and IPs that will be injected into the pod's hosts file -hostAliases: [] - # Example: - # - ip: "127.0.0.1" - # hostnames: - # - "foo.local" - # - "bar.local" - # - ip: "10.1.2.3" - # hostnames: - # - "foo.remote" - # - "bar.remote" - -## Annotations to be added to manager pod -## -podAnnotations: {} - # Example: - # iam.amazonaws.com/role: - -## Labels to be added to manager pod -## -podLabels: {} - # Example: - # owner.team: - -## HPA support for custom metrics: -## This section enables runners to autoscale based on defined custom metrics. -## In order to use this functionality, Need to enable a custom metrics API server by -## implementing "custom.metrics.k8s.io" using supported third party adapter -## Example: https://github.com/directxman12/k8s-prometheus-adapter -## -#hpa: {} - # minReplicas: 1 - # maxReplicas: 10 - # metrics: - # - type: Pods - # pods: - # metricName: gitlab_runner_jobs - # targetAverageValue: 400m diff --git a/chart/charts/grafana-4.0.1.tgz b/chart/charts/grafana-4.0.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..2bfeda8dc27d0a02cec153702785f4d61477b1b1 GIT binary patch literal 18232 zcmV)@K!Lv>iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMYcciT3$IC}opr`SVhPU2pZk{!qCPv@j-+&IbjG!I|fY1gbw zel|ox62>IK0-zi<_WkU4;YETkNw!s|(__w>#v<|9*x1-FY-~&z8lx2TUrrGd!5k&= zf2{o(3;8Z zKV!xTO@}bs?x8Fze(nXk!A>v2JY*yj>ecBKLyS>G(g_GkV;EBgzvh_D-jpVI2vZ?4KJ51mK?W%nJrc@J?H7hF zg9#DSe3Wy{LYfMkiXfzktyBD)ATBWLXZa{5VV@+EeoQCyOfZ!43^AOFv)#eqOOU0L z-bFsb#gPB+cJO^L@CUsFkyId(V#bF(A8>+5JcQp+I^_gU|2C1oW!F7`6iM*yr?N1M z^YDV;zlDT{v{IG{{i!UQAj*O`ZtE7DH4?`vcQ=2^ba%49;Z1Au>!Ebp2ANX zQhI{KF=oY3AOG-TC)nNz29J9)`))TF1lzrDO_C4KJwBF7W+=`v|9u3&V*dYh@ch|M zo&P`I*?!>v_wle13Wq&_kYOb75CqF{4oX40C;nU|Hzy|n$GV+GeJS*P;Fl_l)`EG{U$o^K1y^M3o($~`_ z$81JIydQ=%Pese<6`M8xo?pXRi1-k;d%cYfIKddsr7Q_W z4EReHBa%W!qlj>pE9p7PqX`xP&v1CI(deM)>POYp5WZeZz!Q@0&k%`6F_zHEH|Wd96v=nU zyzCr8gtM5=6DesWJQig^uh%09nkaNJoKa2$W%EKiDfA8ifhJa)1jzC@ zKB6%R=R-Igzo+7eVUAPLlgo4iJ}3f4aXbe}Z)7}I_+SjpfsR4-c$CNS2@V++JkYhw zFGzAOV1yw-X|CGP3p)amf|%lxS($pPVbED>H1Z|kD$TGjhvF-U{EU$q64+;WLbzb_ zzG`1_o~(p+XjqXC$^2NC{0YzXM~UWMPhc)yGn8PtP-JmjBLW|gaR|ZWC(7hepyMa;eF1@ zBn1RVj8KK`7;!G=7X+0ufdd%>$D+3ZS-r@x*Nf<7DtY7q;?ogDEaHc4>kMTXqZuPe z;C`61*pfgjQ79I!v}O;Q)MlDWHa6ghMzCKBJP9d{oEMdXZ_xS4KM;+qH}}+g(2n~M=)kI0h$X=B3<6_J9zSz zqUc8yqcp_K*4|V+lrmEvh|mpO5-~OGqNs1wkHe#~GMr$c3KhW2Gpx*9`BJV;37-6j zfru)qs`V#1iE!Vvsd=&aQA)o8nx2(H-z!Oj*aV9sS1d^)c3c<3TwH< zm>er0xdgtgiB10^B69tI0ebJNjfozEk+C@B++fBHLz-}f2>c-pWdof2Ce7x-8=_ikincLH zQA~bQ?ql6Vbx_$Xl++_M%h4cNRoL^?Z~TRDLDGrB(NM}(B3@4EWqN=F8fvk1IM!p@ zf(Qu&^imT0lw@#8;uun@j3KG@2|xn4KuqMBuBsNAT3lRfEQ}^tOmWVArL-`kU9mj1 z(hLwA5y23fOUc5liBmOEbn6vh-6by9K0M(V!wARpab)BuMLNBMkTp4s&Vkm$VL}c_9}HDpU#t#u1LBA*NAuK2zQeoX*Z=YY9rx1e;ND zcv2CoBb5GzNEp*RQadf9m`?g6{TB``i|0N`1(o8I`>Nsorh!`ikn+uAc^dGk2Z&47 zLsIReFt9%6a6rQg%*G_fy$w6gp%P*qpL+U_zG>tS?K(0>oYb#zIwOoKT?J-{5y>|s z_d!B4tPxfd4>T3j@jNPv1-c#WAL^gMMV2lDz>yTid9$2-ytn=y}-Cqyi5)KO$!4<=D}?| zZ%Q}Q^*UTKx^-U?f0rZ10ASrnjv-?P=C?Fb_YW<^h*mBZVx&^U3`}b_9RpJ(jM~yN zup|}z%EubO`ejuyFhKaB{H0woatSGs9_JI1a+uQ`E~Pq6QZ7&&YXU-2)F7+p8cnkCK4)<;5=VJz=ux)3 zRx4fWLmD95$OMNZOB|Mt#XDgR#h3}>+SF!~A5Kx4DyN+^c1RhU=HtgzA~7nCdLqK5SUiwuSDk8$&3wfX5qiVYonyUA;7+meqAfs?HTIUxrl3F) zX$~uu@x$q>A)HbvdCM}Iy{dUZF{j2FD)l62;=ZAPS+bDIWMk%?o-)h|{BJ2R>@Tei zAr2RK9#m$$G)wFXu`-h;*e{Be4!(bJiTJD1;Rlb7_kTJ2pOX*o?LJJ-1x?!O%DEgm z5clgv1KGMM?s7_jTp!~k#X{1~PVt6ey*S)=B@?f(ZT(q!CEGwO*tSHxv*7!s3ECH! z3m4K_WBv$nytjMvHvQ4wuG=oaXm8g9vN;0a7>_YieiK2Z${_Xm*wE=alvT(ePcijHCm(e?b@OwB}21eVSGgjCvEIYAoV?_6K=oL ziL=iRP$+VZ+U85IVBtp8EZEmndp0PZ3lbX0!YK|f&c>9T>t5?)xm459phxu)`wJX$25-Vr8M2mw9&$PmN+eE z)l9#h&oLsg+LsfE32FVyrc?vUkcuOe1%f8AG|@>qLotc8hc6e?Kz_B0zP>N7e8Uo= zGfpPyS)NtC=;=Sx?6HFIqQ@GkDpWM+^b{vqtjwy>T!Ajk3rVzRQygbffQu}y$SV!2 zyMrn*R4nyERN4JNJJ!Gq@-C#lAzSlhYd32}*643k5n%D(;?F&=`j*b+{2|$0$w*tg zRmK&dVwI;@&3+B%vTaEimWW6SNHN!QS=g5ocOG=Pwl?5gx2C{R8_Js310CxI)Xbk( zod8@=R@%+hjMfnMEGZQ|*q>Lv6Xr%aK8xvOA_w-Objt$3r%&1_;h$|-oZ8uZ0bVj! zvt*1G&&p`Z+L|GEOQI;Se$7jtXfU<4{-UL`I97cM`4OoYe=5cI*IUJX(a7*(EO{E$Xju8u|YK}`oR!@kpW`v|BK#K5cK%{K^ zgM{Xin1HL&cTQ5{K6m>g99W*FI^ac1FS$5vTZiH?*G6$hxpsm}k~DDu`)}Xs2q^`w z6s&PvcBnD0KfbpwD`+UJmkNW_q!DAm3v=G=}5_IKG@C9Rj}cF3iKMEfP$+H@Q(wpA6I zRvT8Y;e@fJT`!E5RFY|H`qth0G>zvl!c#OOG`CGv>XrPV;%vP=2XR>VDlEhnzr~`l zRsUS8rv3bfwL`2S!TVB;Itj!Vv9>cNOU{4p?(S{Z&j0Lg?>&7u|8pPDb^vd%fH}<> zJpTF>f}gcBr-%uF>+8o#n=BMaX?q$19!k3c?3s}X7FLn<{c;h=R*$h%?Ql>KC0t+2 z4~6~eSv`B4$FZ#IK_JWcR5>IwB&ILCuYp{gK{X!W+Jh_59y##+2!}Mn@Lzz#Ded)k z0wufORJZnwPoW4Lo)M%i9R~p>kTOv3udjioHMhgT`x7lL+^AuNN3&P1k+Z@egDwmO zd=>_#H4|s~^(&BZ0i=zWwce3y6;}JYBMy42h*!o8J|4e)!Z#%^9rnI{^?{*uf*TF1 z;7`*mSi4AqE8od#IF0n5fMbqL$u}l6+M@mAv0_>?!ijLGN0#A55C^Yp;PWYt8aySDm3oX;B$}~ zK(4(as&|#C-fV(*cvJ&UHqCDZA`v)I&~br8E*G)c>yq!xk897u5KgI`(MJZ7;iCd$ z#gta??*`H@o&#--9|}m&90aY4#-XXl6>c;OS48CHDzo>vGJ7Ylj(J7p5wb!{S5-gAsAhMxi9aunDf=?;GW6Cb2QT50US0A^iu*2fd-qQfy(g{gR z*L5LCmE_<8XRuwI9Fd|{8x|eXxTjhtW3|&_3pmlQt}xhG_hVWFfM52H-ygnzGc2Qe zrM}Cp5fJ_5FMm->R;jo#m0Q+ zpWj+xTl3S@|J`kb_4Khs|9`r@TR;E*Y_R+MLI1yx=j+%0cQ7N#5I7df4IugYi$tO& zoZ=yT*H`?k|6T9Zm&`IL*G-~=!-AeE=bN45I@k8gugZ%S=!8RxXImyFOMaMP#-!eQ z1%l;i2zyV}pCmcS$7AvZJilyAE=gZ=#Qt}^m(~YYcinYfYWqKbx=>39q z>#DqveaKO87~v4*9IMy}_^BF03S)v}$5;5-SufwmL1rK(^t;@AfEV}VgyrnA6GD3>~d{qabFKYX$JtU7QO zn9!|M#dra0E|Qfcx29}m1U@Am7dzTWM5SESQtbOT>av+=%*POHTN z<4L!c+}E!KaXm4cORcK%W>}erRBrI#eOQoR}+Z_wX!wTJ*n+M(fl4{$r{BxAXkz_Vc>`cW3aB|LI=X&Ilujhzy{R_3So@+xjIpeV17t$s|3fJTB0a=4DSMsH3*lQ_NB;fQPD2Y33 zT>(iWoC?@!cugz4HIjkIGR+Jju4ACJO0$P2K-ofHf&J!eH5Q}M+g5>lx}sjKGq_98} zMLI?MZb-pp8q%Of8rHIGi_VUGdACr^#h+XBx&mjA7I_9~49MJu&oEh;CQ ztcQ?#pUp|#VlTHz`PJbTMavbiXNNpl~44Le*GA4hfkf)rFVR zO*rH(qV_87{=8au&Vs2BqD`^m*Fp|jh_NHzPd7n!RaliMyXrRy(;>M2h@^c!HB_L| zAiyW^{7}+LauE>*ezwY?3;56f1k+eG6xM=O#{Y12LiV!>8ko5oRgW(4d{#`-JN0R$Wm~Cn z)L5%(fmE^i?$znvKOEG_`|aV$X|3Sf{U2YwZCeJ5kfS4Z%JpL#d;Q_p&Yu)+N zoJQ+y9ltvH@bUQNt43=hDvI^8!bH|WVw!~ae*fL8lcW8Xo|a&t*|H0kvmQF(tOjs4 zAUpZ#@aXLH?aA5CuZ|C2|6>*S>MeFav{01Wwr(xNE32S9^Sp#Jb`ypHzPWH)lur|q zP#aQP!ButhQgyQ@-lck5%SPE6NzqoZ%J7_?t1yH$)wz|(9XWSfX61(4ej`?O0}fOS z_=XHjLT5Z)XDBK9ph3jh@7E{jOdYM@%|JTgR7Bu|8$_iFy&wh3@vX$uT z3VzM0z||#RWe1K|-vHNU!7i~`Z)T;os{ge)wz+m{ra_0US1T zmeKri?UaSaykl&ZM*$u=Wn8nft7gqiebg*S#_)tPcrr!&Cp@=hy?=3TtiF>!!_jP%E+k)*`{lnjkIO>sNEhv_M%cu*%{r z4)(1Lt)+{qJsV0Za$8HTpl*x~orI2LqLI~8;b+_}vyMPHG=!bRFGd@_yk{5}@v z0-5}c{aFhjb2E|sAytLdhEYx5zn+2AdNv7fo9(s)OTVvmRm+0geS)N@kufT0NaG=# zzC7yg)XAQ|puZx~dwa`{rQH%WjMG{0=JnahtCt^-4^RJbwtw*M@cr4xlUK)0G`E+l znsBcfO@_rQo0jhQQ!wtd<14IQWsz1)jGwZjQve#9HPY^%m%_ zD`W0x|K#MC561_qV9%EQcfz2PLZFGoBBQ`^Oco?=5)`(f>+t=_>HgcdXGd>8zBzn< zvKURp%^(Xgwd|}#69aD6oE5wvDwjlc0$M5I-RaSKSI@$p+hS~KV0)>Vck{jrigXCp)UUHwG1`XH4xX@*3$0Xw6M1XVA(SAaXM=Qrgq6z z(@$3^(Bh-3U-Q*$p<1G~i3NVbF8^TLe#1(84XyhOO{1~mZFaAokOeANQMer~-nyu? zX|HRLAB(FSZrPS|Cx!g(UQw}HizaFRu-9s~m3KV#ZpyuH;xy}09=p2_icgKb?j<21 zu4dcexTpfv;i|YDY97fI+0kN~7k0^znl2W}{}`o0ana_8UEi5KF0j1W6}?&uP?E)_ zKMmYXN8EG)H{8FPOAZ&gkUPYho37g7=nLx!+p#Q~?+O03^}9Mv>SeRgt3~v=PUG_G4{h6;t1&cmDgkygK+=W5O`Lrr)wn?Z zokz$85_p`)91A_t4QW?QNjm9OXSi+r*qFj9x6}d$>Z+T=U2djEKX+}r;$6Bui#Hek zxiNL?4ejZ6AuUAxN^YSpkyB+Ob=FY+E?2Cf+jlC}rYz*VXhFnEuAwgIZnEU6>uV)I z+MGLGpe&q%Kj-P(-~VZz|1ily7>)6ot^;3k{&#zCr=I`k+4E<+59dGb<7pAwY=lC4 z1nr6I^im&+BngocUl$~eiW@19X^eYr&UjS=ifP zoue#U8n>hc$rKrvYqO|1IT@6OpE~(DY@~Lu1*Vu0p~5WO5+~-cW2;qF-uk5*soW6W zY76Z)Y=;}jzH(h!`Q{wzSRj_4v=-|=%%LW&_11MY<4oPZelrkO6vWlb+&%9zY=GxU zn`^%p2!zfoE=sQ928eFjQ?_`2ha@aGrAosF0gL|{oTI=Tgd-M67%Wgu3}&r|9`r@SL6TBpKcEx`2T%8 zYv=zzYMQy77`PQTG*tS(SBmglT@YqmYI;UaDkmpRu#4u3YsaeI@q0gHa!1o)0smi<_h)(j z=ciBW`9GdLeaQcOKhF(V`c2|!R^em+7D7S~eo@>~ap%_8ukbHQQt-CCwvrVo;oklQ zR}11&q@4L;n=1!OQc^^?d@7?;KG!#2ag38K{m;)RRwoc_tV*$YD?h^ZHM}T($)e2- zQ|HemvFJEpIUd6N^6Qk;R}*rnhV$)6rU3GMZGTHRYGSClFJ(}Z6E&M!P^f*oQa{7 z0J7H*GiJ)zEKCiv z}G15_!~Si9A|viactwMU)q#;gi+n-BDXMUB!KA z$&?>n(>#sjbg7%T;NFfdw?UzA6g6b0)7&Z#FGce!~vClc;Oq|2T{3Ji)10 zmjh@C|9`sOxc}?fv*$Yx{Qo{4twE|)yyQNS%~fJ|H|ty;Wf|`mtp1>&tq0FiJz!ix z46TPr%(u>6hu>vWIm6*lMb{`hi&tFR_N8u|7speS(Tqemee2-=1~ae(uZ?k*yy`?S zB=E$XE#~G|eXN41!0*n@8w;Qri9@0QRie7#ux>iAj7D$U2%-f3`v&j|BzK-|*Xr7S zo+o{KS-4#ECgj~Dlz$Z1tL1|q5y$GRv{%~-@HSQde2R9S?eRQuM^|*wZau8t=y4XWTqx~v}See8uTPS=f^`jFi zdAMvOZP&KGwZMCLDo<1Yn_?8jm~-FeFSxT7xXk|F89c4&e|vjRANK$5I9`1j+m#1ch)-RMUZhDh!_Psg2z38}XS;MFgN0bTI zA>4BDtPhV&ze=@r3R`g0o@-H9oy3qud0ScMt6!5b_=~`)-jthdkFY>i<{av}?J}nv z`=8nYVez-f5m76%xGgZw#!YGz4w z19K%lklP#0T^g=n+Wk-fxIJh!_I1xi++k%ARSE%&P8>irsi`NrOzy{a5oL7FtS<2VjOnq{W^d7Zyn1t{hSS^-+MDYjhL0TYXW2eW zXpwjgWf_vc%bq1gtTYamr-CH7+Ve{7wGX7Vy%tYr#>kAsc!Hy$4tJ9CBOyGpA71eg z#Yi6xvn3e*H75*52aIMfQHDk&CW2ryof~jqa)@y(V9aO&2MVuS%KW9dfP?*)XCHok zb$ony@Cqn{mw*58%X?!qzW(sbdy@s$hu0^sP7e>vkDm_TzBPZIygGeTygzw$`qBMy z^m6~m{CfZD^z6sO_XlSuua18{G?)MA*T?(6*ed@xIeYo{;}0KB&5L(Gz5ig_+y8iQ zczX8B@!{#KvWM5Y2PbQz{P4I);`P|RvFVwL7a61f!lA%X@kVc17C$3;nO-6m?H?VM zmD=)6$*ou3^%Hw1Cc0ZjzR@%o~lICB! zzvo#N;{>My#U?jNSJ5%or`5Z@+XU#xlnND1Ip+c=7T9m1owUgRS-3{u?|-ad|2^BQ z+kd+|gNOaUdwK44|4)U8b?y5dwXOU=wkmNS)~1$a)$k1mv93e)Mjq9c;}e~oj7f@O z@*8HhkMhOJU05-U!Vq)*jz(6PG+nHf@s-6(XRC6v*6uZ_`ftlL&w3Vq%`q>pcrZCN zI}?R+@*A$UZ)$X0!tPeB6@=Oex^KPS>ra#auQBnTd6wCKJG*uJZ};i5y$Ak(A5WXg zZ*o7?ch44OadPcJmGkEh&A0E*1pGZhH@-G9@71mLCyjJoW!A2abbxEj8d#oM?>z(1GH%W>3;H|x!y| z$OkpxcT)qLl#8pRNwkR>Z<>+O$u8NMTk>|9C0Ci_Hu7}DY+L5Z8xQbiX(s>R_S{XM zCjZx}fsfPKJ)Qp?G|vBTKil1VIRAey&)20lgmnbz%g?ur(bxBb{b@XOI<);mhJMYl zbIQJcioSNsT=OC}H%F5>x(-(`+S|ox=z62tk~6LG{I_$iY)v$_O#ge@IRCS|_x!>C ze=pAsPAsmS5&X%S06=-YY{g%!UVr53ZtmvtZfwJ=;z#CQOQWzJWKfh z)4}uI#`<&mQ>yeLU+wg866S>NQSmm&T%y-&&c~kI2}H>pmi*a=SR&^0ptnC10G# z^fuv%3gKxjXfs0Ak8w8dz&4eNbEUeTbIi2mhWuLLuU~5(Vlsx#0lcLs`Vqw_4U2oL zn@Pgm8f_xBm^7y`3r+Q1YrAdL+JxhxdMCJ@!^pfit}O-^lrTcr?WE3L@k+p3%IgW^(a$j&n~r|MJ$UkDriXKA)LNE zGVcUp6D*Fp8Yn*-rXbfHN}~k}?>&vw*(tg1q|(%}_)=4BbJMw_-@cTvbXCxBbi~-N z=T!H%mR%3e-JeeV4{49>I@G}B{=Y{4kKLW;5BlGIJnbCZxa;js(2`Ugb|YYo_rH9@ zt(h7Me(y_R*04cx7dyCR{g+8v?q~n^>F%KA{?~{6kN5KYVeKXF^a|1gRh2-Tsl-wJ{t5$BHxW&B8$HuRNF)5>(GzoZ6ZjPR z5uWwaJdVLX1pgAezkZcnoXL{c9z4ofylEd|9g2_?EiKd@CV2c@_WdOKY$0aI2qfSj6$#+cVw`*Wm<%m zOa(SkL3>IB_v^xh2u{J)+zZZkC#QrDMM4Sn%SjEPe%s^{>PL}SLjBU_*n@Ps-LsJY zujwf8a{d2VWB=zN{_}pGJM;gWoD#b80pUNX1TZIL+s?mgdHPMyzAB`DeqgYF%4aG6 zU&B$*W&D3<@T`9S=iovAzn|w@@&7v=5dE`?f%`epsj;{2W1Xj%i+hb+czD+FEad-d z3I$uj{|9^9dv*T*?Ab&9?|XS}%>V!QMTq^W8R4S!e_!|i?mmCsxc_(e;r!3NJa^jv zJEGCu6u5uGJ-wDtA8(4|B;eEjl+p`t2wHaLDi&fz9$e_RzoB^(Rz@t5NF281-5!w3luf`Xtl<`5~zwCN`U z{}}o?XZ@IlDDKB(gumdh4}Dd*FRS)VaqR(rC#R5?kR|f>k-FAL*C`q|_%iS6cOXfG z1X&)(I@rmC;HcL}<=c*HjoK?$PuNuEb%h+FDUI-ni5@ia(0Oy0?4)QsmK0^Kd$zys z(xRX-W^PQS^Wp~Wh8;BC!b>a@{5`21K6x(L$^Up;;Q1=in-TRoI>GzAfF>JMDJwdG zSLHf(-`aH7#Rk@L0uIF4Zw(~%AfC+TEM>Q6|J8CDn5Iqy*#^)?{I6U9^E zNyhM)e8G`s!++TfK=KNpsj4YKf($c=Ns5DBaBy;VA}GVX4R}eDgr@NG%M&Shc`uj{ z(O3V~;P-;jZ>+EWYhO$!efb~zlh4wA+0Y1u7kLI_5@X)`F5s70@4H}xE_&YuBFTE+ z{eQg;_!%)ma}I|GuXrz*@sLKi7nqIcUO*y@`no)$|LUo09Zvo&n1fya$FKGe-n|Nv z=y&L2@%n$Zx3k??|AXy^_|JQJHsFn>0QftO6Ht~@ulF4srd*&n)|q85@d%1we902@ z#TD?$)cyR#L@ZiqOWCBq+3Rg=!0FrnIynBXUhn+;Jftb7G44GADQke}9)JsJWZJ5- zpsHv9kiAI|3}C0UsHali%D0+<$e+e^Q$iiileuqRoDUZP;QOk4S-cCRk16Q_Dz~5d z5stCI>a){ggtM5=6P$`xP)wz=g6{KFW%X5>959^F8RlSz{!?)^jx3VV6sLj%;+%#A z3CmVXB*`{(H{8WlphuQX7*aEfQm{uR!04+|Fd#_rB_!xyl)+5f)xF-nd7(QnujMak zSUicz1%}ynuoFB5{|A`u1cP9(S;<+9gan0QARJKIBdHt-5@Zx(7%_}4BowB-6k`Qo zg3=ttfI_92EAThql)|oT9-i!{5j+jHcY^Iri9f;%xXevHTod{aFJMO&8)yXRNakGY z`@P;393iP$3(R2Ia|H*Ohm5ET@0OO<<@KFGH+`;pSN`hHP0HS;yi>Dx1?Tq6*!lAD zzt58qW3b*`IN?y zH&nMfpMzOMk_)de!YeqoMOPZ(Z3lRLuHdTq`nmzD5=6~I*a4SU#EE)C*nPeuv82OO z_3w7N&`XFBs(P?T#=0Q80_Aj}V>mKZK-E#QjdLt#$)YCKe91V@56ADRIAWONbOGTmgUfhDyXnh;%UrIo1g@_O`Q2i; z+*CpfTe))eL{m#;cQUooCgaiJPQI4YmuI~qT$Cu3s zwSc}XpO8AT0*t2WZwpzyylwlYz*Xn5$RE`yXb)4J8_PwJg0G|ex#Zxtz*q$?n@nl} zzFO$Z)@q0AE_}5mM_zkdS2pqhDi@@6x4xeBsP{7$%I%jq7c?7Op?)JW8Ey312Pn6^1wyIMQy|1#tE6 zU?^C)rj&Qf#tyh-9kn&y0oSL`YoPltf}tZMrd`05aH*mSSxPC)7|w7S$v^Q7v$+D- z>^K=WK>!~)9_O)km)lPyoxrD16Nhn9XjjbsFDJ17_Q#?_sTDIC$CzpN86Woh<$cFK zClek_h?wRhb(iub_fb6Z&B*+)sNJ9BB*K03pTYu{S+DPDge?~tQzp$2W05Eu zs!{$ z;vZ6bG{%~O(}An(WLXEg(uuw&ZpD7wPIx%QQ66KqQ2lLaz!%?hD5}B}DyEo$n4%QS zb?ckASLw6`-%K{vh2f7JLwP;82@Q|~=Ylc}5fZ3U{!SW7J$cyz4fs@=Q*;bY6?k%< z&BAk_49=aTXVPw7BV3&Pwg|2h@>>r(y{qzKY-!D?qL6g^%_S#3#whxQ5rIFX zAzqm+H{n{OonN(I*xjk8-;ooyUq{{2UKd>M9o99tK7m&%UQ*f0W}%-kDzMU@JZMK@ zlfLrNQKy1_1uCQp5b%5i5n+Xsp+RmIt~+@krO=wv%k%&Vw1~4_!Qoh^1=xa!rjLbG zkFZ&W^is;0Dail{P?1o123IPaYb|gs@RMGtkWoLBWqk!qA@p>#T+L^C1?Quj&qwsj ztvoSpaJBkLm%}B{q*DG|#ns=$_qYVERzK--xT-$WyLv_|a4pcon{Y|pq@jYte}YlM zZ_Oxfs^{#-%A+Q*qv5DXfR6HaT8}uLtycF6UMWiyr!&H+R%d635$&DH5a(EFdv?3x zt{QGSd(CK~-P^0_g{bQDQ+XAiBxZ|1Z_meb&rPp}d`Q5tD&Qd$#sY%5{7)Z{fxI)1i$yI?A5ow(lHz-5Hf+TnU>lMF5ZZ4q2W6mT8FTLD+X z^~yv7FT&U&xNIbG197*9D~t09S!TLxxQ=vDv#Mf>NV|G^(TeeNs3fBr-W_NWKRMKoO z+{Qbi(TZD(>w)X-N+{ECt+=xN-U6}Y%|cWbkE z@p`nLac$}@Z5_@p$R(LD67?^>=f3G^I0&|borO^oO}LI0L@m|fS{Al+N2A=rwRBso zgsUjJxb-`RM;N$Dq1)k+D#H*|;JS`Pf=d!%;25!Rsy2HZ*)0t=6h*wBs~}R|cS76~ zX?t24tqZL#xQd$wQg<<2!N-*RnwQe3LjI;o6#t2WPTM&T*RejN|im}4|(k_%&mC2C?_>6#ygNOTj3~mXASmX+%9=z8SI#l}2`>&m(r{hix#jo; zTnd7A;AF#cC>kjFYX!hcxH81~C1uef*lq=vZCJx)e=UJc`yyJd)#`A=-E1CJ!?&79 z2d)(+vKwC-f>kC`mGqZQrvujt6L~whsuQW2)rwyB|n#Au0?vfimZE$G~ zqg^+BskIM#?7*a{l!%b(>UnE>X{V3FIWk}xQnV|mock>uC4syg>$1^Nweg<8{X5_=z)dsDuE&}7xxFaN>tlDT`dzQ^#lp)@sIm29Y;iU z{#3Jt$XR%VK{v})LR{7Oahbom&?=3!Y}HzG+Nr3be>)k9b$qF7@2ivkX^34n*Z6$}*mP3-HtkR*~$f46n zxz;VrJKz#%5_D7;1iQh_Pwch8wcxaN3tUT1Y2PgzE25WaOi^^;dVbp?p(HD63((Qz zU54o>$AWFucYPftx!(a~olfM{7nEeN(pwv$Gb(+{93^qjZ+>+1)R|xneH*cp@NR}! zKPDr4e!K6MGZCaiWB>8iLY469Ve!*9WqlQ`Y*Lxp1bq75ry{qG-l;FKr&7%@3`qVT z#`qEvET*)wuOC%5`_2Am06*fGUaE?mCXx{;)M+OG{*uKgHL4;~^``XFswfzRQ`66| z#A?}%M5YnbI>Ddppqz4>^zUDsrb8!V=T|40py%kS-mvF<34*}+rE-B~DhH2jPHi89 z#7M1%+q57eR%t9wN z0mpbe1bdpTxDlPY*Hia*OzBCA#bGpro#K+;8PN%b?VHjgGK8a|PxVpdR&63EXK~p- zLzm_RowU>>f51|urAc4@?w2B>pCBp3T_9_twkzky{gYRFyOovW1i^%MZ^fK6P|=S? zK$vn=MHF*&O*O+JXDR&8$%pr5D&o0XG>)Q5Y%w|CKO&Czb^)g$jc}wV{Rs{x0ldMC z5wY3ZS4o<(B6HCcaTsBoR)Z>Y7OUkKs!Uf@YW;JX7cL(JqrF{&D;=5%_}3i8yx0an zV_`hJvMHPjHZQ@8a7eYj%>iXuOr%bN$5iqM8-t-h7Zf7GFM{6TSi81#Lquao8XHu( zt;)_74^v;;Ix(YDZI>OYLZa+Uv3HHH0tv&Od9iYC}(TAN9YQ+<+>#*mZxzP#R7#?|;>Q`6RmTtLFP(&|O4^=>B;oyE%7 zf&x_aiD)V!d=b86zkl6%EqgAJWtOGJlkDNIJFjIQ z@*mra#hO~4c2~Q)OWe~mhMlFca7$VnyIXSQ*3a9?ZvnanMfo~AQW@H8)5K*oG8;5y z@k=D+NR;2SxJ{eG6Kdmp`KE2gde#k!1!f#*F7)1RinU+6hjdhkl%|m~1QZzEduxI6 zDrrJ?Du^kS)L%j*1^6W+Lg2Kgg^J$JQNDb2=%`{`1n^R&W#e|;)tX9ZN=zJes<*-H zrfSdKy=P|Q6-Q>TAPRcDePCExH#m*-_9Ed{WuU3klS&U-w6-Ntf`H*D5A8;7lE;Eb zMXs{VByrWNwj}2)XAGy32}=%>F`8i(&mkdloYik$u*x$jodV}fupvi)9YM1uypCvH=NqnPEW5L3#zklV16T{uP%p1b)j&w`6|INq?hy2ztE}XW-6c04;;@6&uiQGc8h-t}((Pl8e~wQpvG+ZhKW%+#-P$ zNrTEDX&vucs9D!G55R>(W!sb^MVVZ=j37}`1;gANHWCyPlqxe^&S7OCTbS%tB8reo zQ!!?=CDCa_Lms%|q*N|sFn3hM5T687z$DXeIR zjw|!+Mo5eb=C6@#$#I0Zw!8G#gR**gMS@1c8-}te5v7$z*+g0ti$PE+WHA#)-;OO0EbBJkuy+bop9kBnaOGS21W;bx;#-PBOGQuaww@q?TaAW@+A z9<0}0P(4+!EpWwIke*i68`zB@$Mu?M8MdX`;>cxI8rY${| zg$tfpul2DVR;Q-LLcQiu6FaRCD?#h*;ldQ72s3-ZgZ%um{Mu+OLF~fFXGJIOl6i-| z=;ZKjM8x}S*B^3^)zhJX4youB`mG z_WXR4n}X6P{PU%}NGen>BTDy&^irU2#N2h2s|7{o|npT9%On zV?C2f0)FdU=>HoZ>tGH^-z+*``w-T3`P&Nje{4S+)bjuDKHqt`|Knbs4S0*SUyg9C z*Xd9JxsFqwGjne4Vx;NB2Eecq~mv7sjNoAagGf+}m%$s1Cx?6Ii=dJnGiv$_^+2;Q5n0_8rP zr;=QIl>*n!uj(Kwn~ycPJ!zUY#?~AifG2sbI8@t&b>)$Qo}!`G!`WDhA^mnB&}6ql3uhB zafVVHx4hTeM?aUlzyGKDSR)S|;W5K}dPY)#*$inMdC$1F7|P#hs_;_7iQR`k$dhEg zWF!7C3jgu{Qu@oqsyp$Wo6z5yaj)s=mjCNB1D5Ikd(Z0fe{W~-aR2kYJa>@)HhyDU zj<*rRvAuZtG_#**&$D@3WHr$q=oTk4=s1+JZs|vT>s zr<}uwhC`WKT;yQwxSVsP3M9v$Q+hX?mo#&(j8@RM$AdXRD9$*3FMiX~Yir!kUHVwI v{s)7d`uczVaR2B1JYTQBH3SdO!}IVwJP*&ec>ezY00960rCStT0Hgr`<4W;@ literal 0 HcmV?d00001 diff --git a/chart/charts/grafana/.helmignore b/chart/charts/grafana/.helmignore deleted file mode 100755 index 8cade13..0000000 --- a/chart/charts/grafana/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.vscode -.project -.idea/ -*.tmproj -OWNERS diff --git a/chart/charts/grafana/Chart.yaml b/chart/charts/grafana/Chart.yaml deleted file mode 100755 index 59b0f9d..0000000 --- a/chart/charts/grafana/Chart.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -appVersion: 6.4.2 -description: The leading tool for querying and visualizing time series and metrics. -engine: gotpl -home: https://grafana.net -icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png -kubeVersion: ^1.8.0-0 -maintainers: -- email: zanhsieh@gmail.com - name: zanhsieh -- email: rluckie@cisco.com - name: rtluckie -- email: maor.friedman@redhat.com - name: maorfr -name: grafana -sources: -- https://github.com/grafana/grafana -tillerVersion: '>=2.12.0' -version: 4.0.1 diff --git a/chart/charts/grafana/README.md b/chart/charts/grafana/README.md deleted file mode 100755 index fcc14de..0000000 --- a/chart/charts/grafana/README.md +++ /dev/null @@ -1,305 +0,0 @@ -# Grafana Helm Chart - -* Installs the web dashboarding system [Grafana](http://grafana.org/) - -## TL;DR; - -```console -$ helm install stable/grafana -``` - -## Installing the Chart - -To install the chart with the release name `my-release`: - -```console -$ helm install --name my-release stable/grafana -``` - -## Uninstalling the Chart - -To uninstall/delete the my-release deployment: - -```console -$ helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Upgrading an existing Release to a new major version - -A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an -incompatible breaking change needing manual actions. - -### To 4.0.0 (And 3.12.1) - -This version requires Helm >= 2.12.0. - -## Configuration - -| Parameter | Description | Default | -|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| -| `replicas` | Number of nodes | `1` | -| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | -| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | -| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | -| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | -| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| -| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "fsGroup": 472}` | -| `priorityClassName` | Name of Priority Class to assign pods | `nil` | -| `image.repository` | Image repository | `grafana/grafana` | -| `image.tag` | Image tag (`Must be >= 5.0.0`) | `6.3.5` | -| `image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Image pull secrets | `{}` | -| `service.type` | Kubernetes service type | `ClusterIP` | -| `service.port` | Kubernetes port where service is exposed | `80` | -| `service.portName` | Name of the port on the service | `service` | -| `service.targetPort` | Internal service is port | `3000` | -| `service.nodePort` | Kubernetes service nodePort | `nil` | -| `service.annotations` | Service annotations | `{}` | -| `service.labels` | Custom labels | `{}` | -| `ingress.enabled` | Enables Ingress | `false` | -| `ingress.annotations` | Ingress annotations | `{}` | -| `ingress.labels` | Custom labels | `{}` | -| `ingress.path` | Ingress accepted path | `/` | -| `ingress.hosts` | Ingress accepted hostnames | `[]` | -| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). | `[]` | -| `ingress.tls` | Ingress TLS configuration | `[]` | -| `resources` | CPU/Memory resource requests/limits | `{}` | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `tolerations` | Toleration labels for pod assignment | `[]` | -| `affinity` | Affinity settings for pod assignment | `{}` | -| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | -| `extraContainers` | Sidecar containers to add to the grafana pod | `{}` | -| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | -| `persistence.enabled` | Use persistent volume to store data | `false` | -| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `false` | -| `persistence.size` | Size of persistent volume claim | `10Gi` | -| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` | -| `persistence.storageClassName` | Type of persistent volume claim | `nil` | -| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | -| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | -| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | -| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` | -| `initChownData.enabled` | If false, don't reset data ownership at startup | true | -| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | -| `initChownData.image.tag` | init-chown-data container image tag | `latest` | -| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | -| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | -| `schedulerName` | Alternate scheduler name | `nil` | -| `env` | Extra environment variables passed to pods | `{}` | -| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment | `""` | -| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` | -| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | -| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | -| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` | -| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | -| `plugins` | Plugins to be loaded along with Grafana | `[]` | -| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | -| `notifiers` | Configure grafana notifiers | `{}` | -| `dashboardProviders` | Configure grafana dashboard providers | `{}` | -| `dashboards` | Dashboards to import | `{}` | -| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | -| `grafana.ini` | Grafana's primary configuration | `{}` | -| `ldap_enabled` | Enable LDAP authentication | `false` | -| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | -| `ldap.config ` | Grafana's LDAP configuration | `""` | -| `annotations` | Deployment annotations | `{}` | -| `labels` | Deployment labels | `{}` | -| `podAnnotations` | Pod annotations | `{}` | -| `podLabels` | Pod labels | `{}` | -| `podPortName` | Name of the grafana port on the pod | `grafana` | -| `sidecar.image` | Sidecar image | `kiwigrid/k8s-sidecar:0.1.20` | -| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | -| `sidecar.resources` | Sidecar resources | `{}` | -| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | -| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | -| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | -| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | -| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | -| `sidecar.dashboards.provider.type` | Provider type | `file` | -| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | -| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | -| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | -| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | -| `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | -| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | -| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | -| `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | -| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | -| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | -| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | -| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` | -| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | -| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | -| `serviceAccount.annotations` | ServiceAccount annotations | -| `serviceAccount.create` | Create service account | `true` | -| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | -| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | -| `rbac.create` | Create and use RBAC resources | `true` | -| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | -| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` | -| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` | -| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | -| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | -| `command` | Define command to be executed by grafana container at startup | `nil` | -| `testFramework.enabled` | Whether to create test-related resources | `true` | -| `testFramework.image` | `test-framework` image repository. | `dduportal/bats` | -| `testFramework.tag` | `test-framework` image tag. | `0.4.0` | -| `testFramework.securityContext` | `test-framework` securityContext | `{}` | -| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | - - -### Example of extraVolumeMounts - -```yaml -- extraVolumeMounts: - - name: plugins - mountPath: /var/lib/grafana/plugins - subPath: configs/grafana/plugins - existingClaim: existing-grafana-claim - readOnly: false -``` - -## Import dashboards - -There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: - -```yaml -dashboards: - default: - some-dashboard: - json: | - { - "annotations": - - ... - # Complete json file here - ... - - "title": "Some Dashboard", - "uid": "abcd1234", - "version": 1 - } - custom-dashboard: - # This is a path to a file inside the dashboards directory inside the chart directory - file: dashboards/custom-dashboard.json - prometheus-stats: - # Ref: https://grafana.com/dashboards/2 - gnetId: 2 - revision: 2 - datasource: Prometheus - local-dashboard: - url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json -``` - -## BASE64 dashboards - -Dashboards could be storaged in a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) -A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. -If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. - -### Gerrit use case: -Gerrit API for download files has the following schema: https://yourgerritserver/a/{project-name}/branches/{branch-id}/files/{file-id}/content where {project-name} and -{file-id} usualy has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard -the url value is https://yourgerritserver/a/user%2Frepo/branches/master/files/dir1%2Fdir2%2Fdashboard/content - -## Sidecar for dashboards - -If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana -pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with -a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written -to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported -dashboards are deleted/updated. - -A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside -one configmap is currently not properly mirrored in grafana. - -Example dashboard config: -``` -apiVersion: v1 -kind: ConfigMap -metadata: - name: sample-grafana-dashboard - labels: - grafana_dashboard: "1" -data: - k8s-dashboard.json: |- - [...] -``` - -## Sidecar for datasources - -If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana -pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and -filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in -those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, -the data sources in grafana can be imported. The secrets must be created before `helm install` so -that the datasources init container can list the secrets. - -Secrets are recommended over configmaps for this usecase because datasources usually contain private -data like usernames and passwords. Secrets are the more appropriate cluster ressource to manage those. - -Example datasource config adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): -``` -apiVersion: v1 -kind: Secret -metadata: - name: sample-grafana-datasource - labels: - grafana_datasource: "1" -type: Opaque -stringData: - datasource.yaml: |- - # config file version - apiVersion: 1 - - # list of datasources that should be deleted from the database - deleteDatasources: - - name: Graphite - orgId: 1 - - # list of datasources to insert/update depending - # whats available in the database - datasources: - # name of the datasource. Required - - name: Graphite - # datasource type. Required - type: graphite - # access mode. proxy or direct (Server or Browser in the UI). Required - access: proxy - # org id. will default to orgId 1 if not specified - orgId: 1 - # url - url: http://localhost:8080 - # database password, if used - password: - # database user, if used - user: - # database name, if used - database: - # enable/disable basic auth - basicAuth: - # basic auth username - basicAuthUser: - # basic auth password - basicAuthPassword: - # enable/disable with credentials headers - withCredentials: - # mark as default datasource. Max one per org - isDefault: - # fields that will be converted to json and stored in json_data - jsonData: - graphiteVersion: "1.1" - tlsAuth: true - tlsAuthWithCACert: true - # json object of data that will be encrypted. - secureJsonData: - tlsCACert: "..." - tlsClientCert: "..." - tlsClientKey: "..." - version: 1 - # allow users to edit datasources from the UI. - editable: false - -``` diff --git a/chart/charts/grafana/ci/default-values.yaml b/chart/charts/grafana/ci/default-values.yaml deleted file mode 100755 index fc2ba60..0000000 --- a/chart/charts/grafana/ci/default-values.yaml +++ /dev/null @@ -1 +0,0 @@ -# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/chart/charts/grafana/ci/with-dashboard-json-values.yaml b/chart/charts/grafana/ci/with-dashboard-json-values.yaml deleted file mode 100755 index e0c4e41..0000000 --- a/chart/charts/grafana/ci/with-dashboard-json-values.yaml +++ /dev/null @@ -1,53 +0,0 @@ -dashboards: - my-provider: - my-awesome-dashboard: - # An empty but valid dashboard - json: | - { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "6.3.5" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "links": [], - "panels": [], - "schemaVersion": 19, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-6h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": ["5s"] - }, - "timezone": "", - "title": "Dummy Dashboard", - "uid": "IdcYQooWk", - "version": 1 - } - datasource: Prometheus diff --git a/chart/charts/grafana/ci/with-dashboard-values.yaml b/chart/charts/grafana/ci/with-dashboard-values.yaml deleted file mode 100755 index 7b662c5..0000000 --- a/chart/charts/grafana/ci/with-dashboard-values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -dashboards: - my-provider: - my-awesome-dashboard: - gnetId: 10000 - revision: 1 - datasource: Prometheus -dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'my-provider' - orgId: 1 - folder: '' - type: file - updateIntervalSeconds: 10 - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards/my-provider diff --git a/chart/charts/grafana/dashboards/custom-dashboard.json b/chart/charts/grafana/dashboards/custom-dashboard.json deleted file mode 100755 index 9e26dfe..0000000 --- a/chart/charts/grafana/dashboards/custom-dashboard.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/chart/charts/grafana/templates/NOTES.txt b/chart/charts/grafana/templates/NOTES.txt deleted file mode 100755 index 1193aa0..0000000 --- a/chart/charts/grafana/templates/NOTES.txt +++ /dev/null @@ -1,37 +0,0 @@ -1. Get your '{{ .Values.adminUser }}' user password by running: - - kubectl get secret --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo - -2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: - - {{ template "grafana.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local -{{ if .Values.ingress.enabled }} - From outside the cluster, the server URL(s) are: -{{- range .Values.ingress.hosts }} - http://{{ . }} -{{- end }} -{{ else }} - Get the Grafana URL to visit by running these commands in the same shell: -{{ if contains "NodePort" .Values.service.type -}} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{ else if contains "LoadBalancer" .Values.service.type -}} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "grafana.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - http://$SERVICE_IP:{{ .Values.service.port -}} -{{ else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "grafana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 3000 -{{- end }} -{{- end }} - -3. Login with the password from step 1 and the username: {{ .Values.adminUser }} - -{{- if not .Values.persistence.enabled }} -################################################################################# -###### WARNING: Persistence is disabled!!! You will lose your data when ##### -###### the Grafana pod is terminated. ##### -################################################################################# -{{- end }} diff --git a/chart/charts/grafana/templates/_helpers.tpl b/chart/charts/grafana/templates/_helpers.tpl deleted file mode 100755 index f6880cd..0000000 --- a/chart/charts/grafana/templates/_helpers.tpl +++ /dev/null @@ -1,51 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "grafana.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "grafana.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "grafana.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create the name of the service account -*/}} -{{- define "grafana.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{- define "grafana.serviceAccountNameTest" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} -{{- else -}} - {{ default "default" .Values.serviceAccount.nameTest }} -{{- end -}} -{{- end -}} diff --git a/chart/charts/grafana/templates/_pod.tpl b/chart/charts/grafana/templates/_pod.tpl deleted file mode 100755 index 8018dd7..0000000 --- a/chart/charts/grafana/templates/_pod.tpl +++ /dev/null @@ -1,360 +0,0 @@ -{{- define "grafana.pod" -}} -{{- if .Values.schedulerName }} -schedulerName: "{{ .Values.schedulerName }}" -{{- end }} -serviceAccountName: {{ template "grafana.serviceAccountName" . }} -{{- if .Values.schedulerName }} -schedulerName: "{{ .Values.schedulerName }}" -{{- end }} -{{- if .Values.securityContext }} -securityContext: -{{ toYaml .Values.securityContext | indent 2 }} -{{- end }} -{{- if .Values.priorityClassName }} -priorityClassName: {{ .Values.priorityClassName }} -{{- end }} -{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.extraInitContainers) }} -initContainers: -{{- end }} -{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} - - name: init-chown-data - image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" - imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} - securityContext: - runAsUser: 0 - command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsUser }}", "/var/lib/grafana"] - resources: -{{ toYaml .Values.initChownData.resources | indent 6 }} - volumeMounts: - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} -{{- end }} -{{- if .Values.dashboards }} - - name: download-dashboards - image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" - imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} - command: ["/bin/sh"] - args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh" ] - env: -{{- range $key, $value := .Values.downloadDashboards.env }} - - name: "{{ $key }}" - value: "{{ $value }}" -{{- end }} - volumeMounts: - - name: config - mountPath: "/etc/grafana/download_dashboards.sh" - subPath: download_dashboards.sh - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- end }} -{{- end }} -{{- if .Values.sidecar.datasources.enabled }} - - name: {{ template "grafana.name" . }}-sc-datasources - image: "{{ .Values.sidecar.image }}" - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - - name: METHOD - value: LIST - - name: LABEL - value: "{{ .Values.sidecar.datasources.label }}" - - name: FOLDER - value: "/etc/grafana/provisioning/datasources" - - name: RESOURCE - value: "both" - {{- if .Values.sidecar.datasources.searchNamespace }} - - name: NAMESPACE - value: "{{ .Values.sidecar.datasources.searchNamespace }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - resources: -{{ toYaml .Values.sidecar.resources | indent 6 }} - volumeMounts: - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end}} -{{- if .Values.extraInitContainers }} -{{ toYaml .Values.extraInitContainers | indent 2 }} -{{- end }} -{{- if .Values.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end}} -{{- end }} -containers: -{{- if .Values.sidecar.dashboards.enabled }} - - name: {{ template "grafana.name" . }}-sc-dashboard - image: "{{ .Values.sidecar.image }}" - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - - name: LABEL - value: "{{ .Values.sidecar.dashboards.label }}" - - name: FOLDER - value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" - - name: RESOURCE - value: "both" - {{- if .Values.sidecar.dashboards.searchNamespace }} - - name: NAMESPACE - value: "{{ .Values.sidecar.dashboards.searchNamespace }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - resources: -{{ toYaml .Values.sidecar.resources | indent 6 }} - volumeMounts: - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} -{{- end}} - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if .Values.command }} - command: - {{- range .Values.command }} - - {{ . }} - {{- end }} - {{- end}} - volumeMounts: - - name: config - mountPath: "/etc/grafana/grafana.ini" - subPath: grafana.ini - {{- if .Values.ldap.enabled }} - - name: ldap - mountPath: "/etc/grafana/ldap.toml" - subPath: ldap.toml - {{- end }} - {{- range .Values.extraConfigmapMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath | default "" }} - readOnly: {{ .readOnly }} - {{- end }} - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} -{{- if .Values.dashboards }} -{{- range $provider, $dashboards := .Values.dashboards }} -{{- range $key, $value := $dashboards }} -{{- if (or (hasKey $value "json") (hasKey $value "file")) }} - - name: dashboards-{{ $provider }} - mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" - subPath: "{{ $key }}.json" -{{- end }} -{{- end }} -{{- end }} -{{- end -}} -{{- if .Values.dashboardsConfigMaps }} -{{- range keys .Values.dashboardsConfigMaps }} - - name: dashboards-{{ . }} - mountPath: "/var/lib/grafana/dashboards/{{ . }}" -{{- end }} -{{- end }} -{{- if .Values.datasources }} - - name: config - mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml" - subPath: datasources.yaml -{{- end }} -{{- if .Values.notifiers }} - - name: config - mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml" - subPath: notifiers.yaml -{{- end }} -{{- if .Values.dashboardProviders }} - - name: config - mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml" - subPath: dashboardproviders.yaml -{{- end }} -{{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} - - name: sc-dashboard-provider - mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" - subPath: provider.yaml -{{- end}} -{{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end}} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.extraVolumeMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath | default "" }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - {{- end }} - ports: - - name: {{ .Values.service.portName }} - containerPort: {{ .Values.service.port }} - protocol: TCP - - name: {{ .Values.podPortName }} - containerPort: 3000 - protocol: TCP - env: - {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} - - name: GF_SECURITY_ADMIN_USER - valueFrom: - secretKeyRef: - name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if not .Values.env.GF_SECURITY_ADMIN_PASSWORD }} - - name: GF_SECURITY_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if .Values.plugins }} - - name: GF_INSTALL_PLUGINS - valueFrom: - configMapKeyRef: - name: {{ template "grafana.fullname" . }} - key: plugins - {{- end }} - {{- if .Values.smtp.existingSecret }} - - name: GF_SMTP_USER - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.userKey | default "user" }} - - name: GF_SMTP_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.passwordKey | default "password" }} - {{- end }} -{{- range $key, $value := .Values.env }} - - name: "{{ $key }}" - value: "{{ $value }}" -{{- end }} - {{- if .Values.envFromSecret }} - envFrom: - - secretRef: - name: {{ .Values.envFromSecret }} - {{- end }} - {{- if .Values.envRenderSecret }} - envFrom: - - secretRef: - name: {{ template "grafana.fullname" . }}-env - {{- end }} - livenessProbe: -{{ toYaml .Values.livenessProbe | indent 6 }} - readinessProbe: -{{ toYaml .Values.readinessProbe | indent 6 }} - resources: -{{ toYaml .Values.resources | indent 6 }} -{{- if .Values.extraContainers }} -{{ toYaml .Values.extraContainers | indent 2}} -{{- end }} -{{- with .Values.nodeSelector }} -nodeSelector: -{{ toYaml . | indent 2 }} -{{- end }} -{{- with .Values.affinity }} -affinity: -{{ toYaml . | indent 2 }} -{{- end }} -{{- with .Values.tolerations }} -tolerations: -{{ toYaml . | indent 2 }} -{{- end }} -volumes: - - name: config - configMap: - name: {{ template "grafana.fullname" . }} -{{- range .Values.extraConfigmapMounts }} - - name: {{ .name }} - configMap: - name: {{ .configMap }} -{{- end }} - {{- if .Values.dashboards }} - {{- range keys .Values.dashboards }} - - name: dashboards-{{ . }} - configMap: - name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} - {{- end }} - {{- end }} - {{- if .Values.dashboardsConfigMaps }} - {{ $root := . }} - {{- range $provider, $name := .Values.dashboardsConfigMaps }} - - name: dashboards-{{ $provider }} - configMap: - name: {{ tpl $name $root }} - {{- end }} - {{- end }} - {{- if .Values.ldap.enabled }} - - name: ldap - secret: - {{- if .Values.ldap.existingSecret }} - secretName: {{ .Values.ldap.existingSecret }} - {{- else }} - secretName: {{ template "grafana.fullname" . }} - {{- end }} - items: - - key: ldap-toml - path: ldap.toml - {{- end }} -{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} - - name: storage - persistentVolumeClaim: - claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} -{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }} -# nothing -{{- else }} - - name: storage - emptyDir: {} -{{- end -}} -{{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - emptyDir: {} -{{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-provider - configMap: - name: {{ template "grafana.fullname" . }}-config-dashboards -{{- end }} -{{- end }} -{{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - emptyDir: {} -{{- end -}} -{{- range .Values.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - defaultMode: {{ .defaultMode }} -{{- end }} -{{- range .Values.extraVolumeMounts }} - - name: {{ .name }} - persistentVolumeClaim: - claimName: {{ .existingClaim }} -{{- end }} -{{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - emptyDir: {} -{{- end -}} -{{- end }} diff --git a/chart/charts/grafana/templates/clusterrole.yaml b/chart/charts/grafana/templates/clusterrole.yaml deleted file mode 100755 index d141280..0000000 --- a/chart/charts/grafana/templates/clusterrole.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} - name: {{ template "grafana.fullname" . }}-clusterrole -{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }} -rules: -{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }} -- apiGroups: [""] # "" indicates the core API group - resources: ["configmaps", "secrets"] - verbs: ["get", "watch", "list"] -{{- end}} -{{- with .Values.rbac.extraClusterRoleRules }} -{{ toYaml . | indent 0 }} -{{- end}} -{{- else }} -rules: [] -{{- end}} -{{- end}} diff --git a/chart/charts/grafana/templates/clusterrolebinding.yaml b/chart/charts/grafana/templates/clusterrolebinding.yaml deleted file mode 100755 index 0ffe9ff..0000000 --- a/chart/charts/grafana/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "grafana.fullname" . }}-clusterrolebinding - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -subjects: - - kind: ServiceAccount - name: {{ template "grafana.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ template "grafana.fullname" . }}-clusterrole - apiGroup: rbac.authorization.k8s.io -{{- end -}} diff --git a/chart/charts/grafana/templates/configmap-dashboard-provider.yaml b/chart/charts/grafana/templates/configmap-dashboard-provider.yaml deleted file mode 100755 index c65e415..0000000 --- a/chart/charts/grafana/templates/configmap-dashboard-provider.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{{- if .Values.sidecar.dashboards.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} - name: {{ template "grafana.fullname" . }}-config-dashboards - namespace: {{ .Release.Namespace }} -data: - provider.yaml: |- - apiVersion: 1 - providers: - - name: '{{ .Values.sidecar.dashboards.provider.name }}' - orgId: {{ .Values.sidecar.dashboards.provider.orgid }} - folder: '{{ .Values.sidecar.dashboards.provider.folder }}' - type: {{ .Values.sidecar.dashboards.provider.type }} - disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} - options: - path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} -{{- end}} diff --git a/chart/charts/grafana/templates/configmap.yaml b/chart/charts/grafana/templates/configmap.yaml deleted file mode 100755 index d24d0c8..0000000 --- a/chart/charts/grafana/templates/configmap.yaml +++ /dev/null @@ -1,72 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -data: -{{- if .Values.plugins }} - plugins: {{ join "," .Values.plugins }} -{{- end }} - grafana.ini: | -{{- range $key, $value := index .Values "grafana.ini" }} - [{{ $key }}] - {{- range $elem, $elemVal := $value }} - {{ $elem }} = {{ $elemVal }} - {{- end }} -{{- end }} - -{{- if .Values.datasources }} -{{ $root := . }} - {{- range $key, $value := .Values.datasources }} - {{ $key }}: | -{{ tpl (toYaml $value | indent 4) $root }} - {{- end -}} -{{- end -}} - -{{- if .Values.notifiers }} - {{- range $key, $value := .Values.notifiers }} - {{ $key }}: | -{{ toYaml $value | indent 4 }} - {{- end -}} -{{- end -}} - -{{- if .Values.dashboardProviders }} - {{- range $key, $value := .Values.dashboardProviders }} - {{ $key }}: | -{{ toYaml $value | indent 4 }} - {{- end -}} -{{- end -}} - -{{- if .Values.dashboards }} - download_dashboards.sh: | - #!/usr/bin/env sh - set -euf - {{- if .Values.dashboardProviders }} - {{- range $key, $value := .Values.dashboardProviders }} - {{- range $value.providers }} - mkdir -p {{ .options.path }} - {{- end }} - {{- end }} - {{- end }} - - {{- range $provider, $dashboards := .Values.dashboards }} - {{- range $key, $value := $dashboards }} - {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} - curl -sk \ - --connect-timeout 60 \ - --max-time 60 \ - {{- if not $value.b64content }} - -H "Accept: application/json" \ - -H "Content-Type: application/json;charset=UTF-8" \ - {{- end }} - {{- if $value.url -}}{{ $value.url }}{{- else -}} https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download{{- end -}}{{ if $value.datasource }}| sed 's|\"datasource\":[^,]*|\"datasource\": \"{{ $value.datasource }}\"|g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \ - > /var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json - {{- end -}} - {{- end }} - {{- end }} -{{- end }} diff --git a/chart/charts/grafana/templates/dashboards-json-configmap.yaml b/chart/charts/grafana/templates/dashboards-json-configmap.yaml deleted file mode 100755 index 8fb1396..0000000 --- a/chart/charts/grafana/templates/dashboards-json-configmap.yaml +++ /dev/null @@ -1,38 +0,0 @@ -{{- if .Values.dashboards }} -{{ $files := .Files }} -{{- range $provider, $dashboards := .Values.dashboards }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }} - namespace: {{ $.Release.Namespace }} - labels: - app: {{ template "grafana.name" $ }} - chart: {{ template "grafana.chart" $ }} - release: {{ $.Release.Name }} - heritage: {{ $.Release.Service }} - dashboard-provider: {{ $provider }} -{{- if $dashboards }} -data: -{{- $dashboardFound := false }} -{{- range $key, $value := $dashboards }} -{{- if (or (hasKey $value "json") (hasKey $value "file")) }} -{{- $dashboardFound = true }} -{{ print $key | indent 2 }}.json: -{{- if hasKey $value "json" }} - |- -{{ $value.json | indent 6 }} -{{- end }} -{{- if hasKey $value "file" }} -{{ toYaml ( $files.Get $value.file ) | indent 4}} -{{- end }} -{{- end }} -{{- end }} -{{- if not $dashboardFound }} - {} -{{- end }} -{{- end }} ---- -{{- end }} - -{{- end }} diff --git a/chart/charts/grafana/templates/deployment.yaml b/chart/charts/grafana/templates/deployment.yaml deleted file mode 100755 index ce94d84..0000000 --- a/chart/charts/grafana/templates/deployment.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{ if (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc")) }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- if .Values.labels }} -{{ toYaml .Values.labels | indent 4 }} -{{- end }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - app: {{ template "grafana.name" . }} - release: {{ .Release.Name }} -{{- with .Values.deploymentStrategy }} - strategy: -{{ toYaml . | trim | indent 4 }} -{{- end }} - template: - metadata: - labels: - app: {{ template "grafana.name" . }} - release: {{ .Release.Name }} -{{- with .Values.podLabels }} -{{ toYaml . | indent 8 }} -{{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} - checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} -{{- if not .Values.admin.existingSecret }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} -{{- end }} -{{- with .Values.podAnnotations }} -{{ toYaml . | indent 8 }} -{{- end }} - spec: - {{- include "grafana.pod" . | nindent 6 }} -{{- end }} \ No newline at end of file diff --git a/chart/charts/grafana/templates/headless-service.yaml b/chart/charts/grafana/templates/headless-service.yaml deleted file mode 100755 index c0c182a..0000000 --- a/chart/charts/grafana/templates/headless-service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "grafana.fullname" . }}-headless - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: - clusterIP: None - selector: - app: {{ template "grafana.name" . }} - release: {{ .Release.Name }} - type: ClusterIP -{{- end }} diff --git a/chart/charts/grafana/templates/ingress.yaml b/chart/charts/grafana/templates/ingress.yaml deleted file mode 100755 index 6077c10..0000000 --- a/chart/charts/grafana/templates/ingress.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "grafana.fullname" . -}} -{{- $servicePort := .Values.service.port -}} -{{- $ingressPath := .Values.ingress.path -}} -{{- $extraPaths := .Values.ingress.extraPaths -}} -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: {{ $fullName }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- if .Values.ingress.labels }} -{{ toYaml .Values.ingress.labels | indent 4 }} -{{- end }} -{{- with .Values.ingress.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: -{{- if .Values.ingress.tls }} - tls: -{{ toYaml .Values.ingress.tls | indent 4 }} -{{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ . }} - http: - paths: -{{ if $extraPaths }} -{{ toYaml $extraPaths | indent 10 }} -{{- end }} - - path: {{ $ingressPath }} - backend: - serviceName: {{ $fullName }} - servicePort: {{ $servicePort }} - {{- end }} -{{- end }} diff --git a/chart/charts/grafana/templates/poddisruptionbudget.yaml b/chart/charts/grafana/templates/poddisruptionbudget.yaml deleted file mode 100755 index 60f58bf..0000000 --- a/chart/charts/grafana/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if .Values.podDisruptionBudget }} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "grafana.name" . }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- if .Values.labels }} -{{ toYaml .Values.labels | indent 4 }} -{{- end }} -spec: -{{- if .Values.podDisruptionBudget.minAvailble }} - minAvailable: {{ .Values.podDisruptionBudget.minAvailble }} -{{- end }} -{{- if .Values.podDisruptionBudget.maxUnavailable }} - maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} -{{- end }} - selector: - matchLabels: - app: {{ template "grafana.name" . }} - release: {{ .Release.Name }} -{{- end }} diff --git a/chart/charts/grafana/templates/podsecuritypolicy.yaml b/chart/charts/grafana/templates/podsecuritypolicy.yaml deleted file mode 100755 index a1d87c8..0000000 --- a/chart/charts/grafana/templates/podsecuritypolicy.yaml +++ /dev/null @@ -1,55 +0,0 @@ -{{- if .Values.rbac.pspEnabled }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default' - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - {{- if .Values.rbac.pspUseAppArmor }} - apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' - apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - {{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - # Default set from Docker, without DAC_OVERRIDE or CHOWN - - FOWNER - - FSETID - - KILL - - SETGID - - SETUID - - SETPCAP - - NET_BIND_SERVICE - - NET_RAW - - SYS_CHROOT - - MKNOD - - AUDIT_WRITE - - SETFCAP - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'secret' - - 'downwardAPI' - - 'persistentVolumeClaim' - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'RunAsAny' - fsGroup: - rule: 'RunAsAny' - readOnlyRootFilesystem: false -{{- end }} diff --git a/chart/charts/grafana/templates/pvc.yaml b/chart/charts/grafana/templates/pvc.yaml deleted file mode 100755 index 780de6c..0000000 --- a/chart/charts/grafana/templates/pvc.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - {{- with .Values.persistence.annotations }} - annotations: -{{ toYaml . | indent 4 }} - {{- end }} - {{- with .Values.persistence.finalizers }} - finalizers: -{{ toYaml . | indent 4 }} - {{- end }} -spec: - accessModes: - {{- range .Values.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.persistence.size | quote }} - storageClassName: {{ .Values.persistence.storageClassName }} -{{- end -}} diff --git a/chart/charts/grafana/templates/role.yaml b/chart/charts/grafana/templates/role.yaml deleted file mode 100755 index 2653f6c..0000000 --- a/chart/charts/grafana/templates/role.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }} -rules: -{{- if .Values.rbac.pspEnabled }} -- apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ template "grafana.fullname" . }}] -{{- end }} -{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }} -- apiGroups: [""] # "" indicates the core API group - resources: ["configmaps", "secrets"] - verbs: ["get", "watch", "list"] -{{- end }} -{{- with .Values.rbac.extraRoleRules }} -{{ toYaml . | indent 0 }} -{{- end}} -{{- else }} -rules: [] -{{- end }} -{{- end }} diff --git a/chart/charts/grafana/templates/rolebinding.yaml b/chart/charts/grafana/templates/rolebinding.yaml deleted file mode 100755 index 680be28..0000000 --- a/chart/charts/grafana/templates/rolebinding.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ template "grafana.fullname" . }} -subjects: -- kind: ServiceAccount - name: {{ template "grafana.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- if .Values.rbac.namespaced }} -roleRef: - kind: Role - name: {{ template "grafana.fullname" . }} - apiGroup: rbac.authorization.k8s.io -{{- end }} -{{- end -}} diff --git a/chart/charts/grafana/templates/secret-env.yaml b/chart/charts/grafana/templates/secret-env.yaml deleted file mode 100755 index 36c14b5..0000000 --- a/chart/charts/grafana/templates/secret-env.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if .Values.envRenderSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "grafana.fullname" . }}-env - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -type: Opaque -data: -{{- range $key, $val := .Values.envRenderSecret }} - {{ $key }}: {{ $val | b64enc | quote }} -{{- end -}} -{{- end }} diff --git a/chart/charts/grafana/templates/secret.yaml b/chart/charts/grafana/templates/secret.yaml deleted file mode 100755 index 4f02fa3..0000000 --- a/chart/charts/grafana/templates/secret.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- if not .Values.admin.existingSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -type: Opaque -data: - admin-user: {{ .Values.adminUser | b64enc | quote }} - {{- if .Values.adminPassword }} - admin-password: {{ .Values.adminPassword | b64enc | quote }} - {{- else }} - admin-password: {{ randAlphaNum 40 | b64enc | quote }} - {{- end }} - {{- if not .Values.ldap.existingSecret }} - ldap-toml: {{ .Values.ldap.config | b64enc | quote }} - {{- end }} -{{- end }} diff --git a/chart/charts/grafana/templates/service.yaml b/chart/charts/grafana/templates/service.yaml deleted file mode 100755 index f18df46..0000000 --- a/chart/charts/grafana/templates/service.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- if .Values.service.labels }} -{{ toYaml .Values.service.labels | indent 4 }} -{{- end }} -{{- with .Values.service.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: -{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} - type: ClusterIP - {{- if .Values.service.clusterIP }} - clusterIP: {{ .Values.service.clusterIP }} - {{end}} -{{- else if eq .Values.service.type "LoadBalancer" }} - type: {{ .Values.service.type }} - {{- if .Values.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.service.loadBalancerIP }} - {{- end }} - {{- if .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: -{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} - {{- end -}} -{{- else }} - type: {{ .Values.service.type }} -{{- end }} -{{- if .Values.service.externalIPs }} - externalIPs: -{{ toYaml .Values.service.externalIPs | indent 4 }} -{{- end }} - ports: - - name: {{ .Values.service.portName }} - port: {{ .Values.service.port }} - protocol: TCP - targetPort: {{ .Values.service.targetPort }} -{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} - nodePort: {{.Values.service.nodePort}} -{{ end }} - selector: - app: {{ template "grafana.name" . }} - release: {{ .Release.Name }} diff --git a/chart/charts/grafana/templates/serviceaccount.yaml b/chart/charts/grafana/templates/serviceaccount.yaml deleted file mode 100755 index 37a8e6a..0000000 --- a/chart/charts/grafana/templates/serviceaccount.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app: {{ template "grafana.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -{{- with .Values.serviceAccount.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} - name: {{ template "grafana.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/chart/charts/grafana/templates/statefulset.yaml b/chart/charts/grafana/templates/statefulset.yaml deleted file mode 100755 index ebe3c2c..0000000 --- a/chart/charts/grafana/templates/statefulset.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ template "grafana.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - app: {{ template "grafana.name" . }} - release: {{ .Release.Name }} - serviceName: {{ template "grafana.fullname" . }}-headless - template: - metadata: - labels: - app: {{ template "grafana.name" . }} - release: {{ .Release.Name }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} - checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} -{{- if not .Values.admin.existingSecret }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} -{{- end }} -{{- with .Values.podAnnotations }} -{{ toYaml . | indent 8 }} -{{- end }} - spec: - {{- include "grafana.pod" . | nindent 6 }} - volumeClaimTemplates: - - metadata: - name: storage - spec: - accessModes: {{ .Values.persistence.accessModes }} - storageClassName: {{ .Values.persistence.storageClassName }} - resources: - requests: - storage: {{ .Values.persistence.size }} -{{- end }} diff --git a/chart/charts/grafana/templates/tests/test-configmap.yaml b/chart/charts/grafana/templates/tests/test-configmap.yaml deleted file mode 100755 index bf5bde3..0000000 --- a/chart/charts/grafana/templates/tests/test-configmap.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if .Values.testFramework.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "grafana.fullname" . }}-test - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - heritage: "{{ .Release.Service }}" - release: "{{ .Release.Name }}" -data: - run.sh: |- - @test "Test Health" { - url="http://{{ template "grafana.fullname" . }}/api/health" - - code=$(curl -s -o /dev/null -I -w "%{http_code}" $url) - [ "$code" == "200" ] - } -{{- end }} diff --git a/chart/charts/grafana/templates/tests/test-podsecuritypolicy.yaml b/chart/charts/grafana/templates/tests/test-podsecuritypolicy.yaml deleted file mode 100755 index 662d4a2..0000000 --- a/chart/charts/grafana/templates/tests/test-podsecuritypolicy.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "grafana.fullname" . }}-test - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - allowPrivilegeEscalation: true - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - fsGroup: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - runAsUser: - rule: RunAsAny - volumes: - - configMap - - downwardAPI - - emptyDir - - projected - - secret -{{- end }} diff --git a/chart/charts/grafana/templates/tests/test-role.yaml b/chart/charts/grafana/templates/tests/test-role.yaml deleted file mode 100755 index 9d34fbd..0000000 --- a/chart/charts/grafana/templates/tests/test-role.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ template "grafana.fullname" . }}-test - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ template "grafana.fullname" . }}-test] -{{- end }} diff --git a/chart/charts/grafana/templates/tests/test-rolebinding.yaml b/chart/charts/grafana/templates/tests/test-rolebinding.yaml deleted file mode 100755 index 0a11db2..0000000 --- a/chart/charts/grafana/templates/tests/test-rolebinding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ template "grafana.fullname" . }}-test - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "grafana.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ template "grafana.fullname" . }}-test -subjects: -- kind: ServiceAccount - name: {{ template "grafana.serviceAccountNameTest" . }} - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/chart/charts/grafana/templates/tests/test-serviceaccount.yaml b/chart/charts/grafana/templates/tests/test-serviceaccount.yaml deleted file mode 100755 index e6a46f9..0000000 --- a/chart/charts/grafana/templates/tests/test-serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app: {{ template "grafana.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - name: {{ template "grafana.serviceAccountNameTest" . }} - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/chart/charts/grafana/templates/tests/test.yaml b/chart/charts/grafana/templates/tests/test.yaml deleted file mode 100755 index e0e4883..0000000 --- a/chart/charts/grafana/templates/tests/test.yaml +++ /dev/null @@ -1,67 +0,0 @@ -{{- if .Values.testFramework.enabled }} -apiVersion: v1 -kind: Pod -metadata: - name: {{ template "grafana.fullname" . }}-test - labels: - app: {{ template "grafana.fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - heritage: "{{ .Release.Service }}" - release: "{{ .Release.Name }}" - annotations: - "helm.sh/hook": test-success - namespace: {{ .Release.Namespace }} -spec: - serviceAccountName: {{ template "grafana.serviceAccountNameTest" . }} - {{- if .Values.testFramework.securityContext }} - securityContext: {{ toYaml .Values.testFramework.securityContext | nindent 4 }} - {{- end }} - initContainers: - - name: test-framework - image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" - command: - - "bash" - - "-c" - - | - set -ex - # copy bats to tools dir - cp -R /usr/local/libexec/ /tools/bats/ - volumeMounts: - - mountPath: /tools - name: tools - {{- if .Values.image.pullSecrets }} - imagePullSecrets: - {{- range .Values.image.pullSecrets }} - - name: {{ . }} - {{- end}} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 4 }} - {{- end }} - {{- with .Values.affinity }} - affinity: -{{ toYaml . | indent 4 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 4 }} - {{- end }} - containers: - - name: {{ .Release.Name }}-test - image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" - command: ["/tools/bats/bats", "-t", "/tests/run.sh"] - volumeMounts: - - mountPath: /tests - name: tests - readOnly: true - - mountPath: /tools - name: tools - volumes: - - name: tests - configMap: - name: {{ template "grafana.fullname" . }}-test - - name: tools - emptyDir: {} - restartPolicy: Never -{{- end }} diff --git a/chart/charts/grafana/values.yaml b/chart/charts/grafana/values.yaml deleted file mode 100755 index 45cd9fc..0000000 --- a/chart/charts/grafana/values.yaml +++ /dev/null @@ -1,464 +0,0 @@ -rbac: - create: true - pspEnabled: true - pspUseAppArmor: true - namespaced: false - extraRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] - extraClusterRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] -serviceAccount: - create: true - name: - nameTest: -# annotations: - -replicas: 1 - -## See `kubectl explain poddisruptionbudget.spec` for more -## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ -podDisruptionBudget: {} -# minAvailble: 1 -# maxUnavailable: 1 - -## See `kubectl explain deployment.spec.strategy` for more -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy -deploymentStrategy: - type: RollingUpdate - -readinessProbe: - httpGet: - path: /api/health - port: 3000 - -livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 60 - timeoutSeconds: 30 - failureThreshold: 10 - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: "default-scheduler" - -image: - repository: grafana/grafana - tag: 6.4.2 - pullPolicy: IfNotPresent - - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistrKeySecretName - -testFramework: - enabled: true - image: "dduportal/bats" - tag: "0.4.0" - securityContext: {} - -securityContext: - runAsUser: 472 - fsGroup: 472 - - -extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/grafana/ssl/ - # subPath: certificates.crt # (optional) - # configMap: certs-configmap - # readOnly: true - - -extraEmptyDirMounts: [] - # - name: provisioning-notifiers - # mountPath: /etc/grafana/provisioning/notifiers - - -## Assign a PriorityClassName to pods if set -# priorityClassName: - -downloadDashboardsImage: - repository: appropriate/curl - tag: latest - pullPolicy: IfNotPresent - -downloadDashboards: - env: {} - -## Pod Annotations -# podAnnotations: {} - -## Pod Labels -# podLabels: {} - -podPortName: grafana - -## Deployment annotations -# annotations: {} - -## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). -## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. -## ref: http://kubernetes.io/docs/user-guide/services/ -## -service: - type: ClusterIP - port: 80 - targetPort: 3000 - # targetPort: 4181 To be used with a proxy extraContainer - annotations: {} - labels: {} - portName: service - -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - labels: {} - path: / - hosts: - - chart-example.local - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi - -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -# -nodeSelector: {} - -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Affinity for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -extraInitContainers: [] - -## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod -extraContainers: | -# - name: proxy -# image: quay.io/gambol99/keycloak-proxy:latest -# args: -# - -provider=github -# - -client-id= -# - -client-secret= -# - -github-org= -# - -email-domain=* -# - -cookie-secret= -# - -http-address=http://0.0.0.0:4181 -# - -upstream-url=http://127.0.0.1:3000 -# ports: -# - name: proxy-web -# containerPort: 4181 - -## Enable persistence using Persistent Volume Claims -## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ -## -persistence: - type: pvc - enabled: false - # storageClassName: default - accessModes: - - ReadWriteOnce - size: 10Gi - # annotations: {} - finalizers: - - kubernetes.io/pvc-protection - # subPath: "" - # existingClaim: - -initChownData: - ## If false, data ownership will not be reset at startup - ## This allows the prometheus-server to be run with an arbitrary user - ## - enabled: true - - ## initChownData container image - ## - image: - repository: busybox - tag: "1.30" - pullPolicy: IfNotPresent - - ## initChownData resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - - -# Administrator credentials when not using an existing secret (see below) -adminUser: admin -# adminPassword: strongpassword - -# Use an existing secret for the admin user. -admin: - existingSecret: "" - userKey: admin-user - passwordKey: admin-password - -## Define command to be executed at startup by grafana container -## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) -## Default is "run.sh" as defined in grafana's Dockerfile -# command: -# - "sh" -# - "/run.sh" - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: - -## Extra environment variables that will be pass onto deployment pods -env: {} - -## The name of a secret in the same kubernetes namespace which contain values to be added to the environment -## This can be useful for auth tokens, etc -envFromSecret: "" - -## Sensible environment variables that will be rendered as new secret object -## This can be useful for auth tokens, etc -envRenderSecret: {} - -## Additional grafana server secret mounts -# Defines additional mounts with secrets. Secrets must be manually created in the namespace. -extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # secretName: grafana-secret-files - # readOnly: true - -## Additional grafana server volume mounts -# Defines additional volume mounts. -extraVolumeMounts: [] - # - name: extra-volume - # mountPath: /mnt/volume - # readOnly: true - # existingClaim: volume-claim - -## Pass the plugins you want installed as a list. -## -plugins: [] - # - digrich-bubblechart-panel - # - grafana-clock-panel - -## Configure grafana datasources -## ref: http://docs.grafana.org/administration/provisioning/#datasources -## -datasources: {} -# datasources.yaml: -# apiVersion: 1 -# datasources: -# - name: Prometheus -# type: prometheus -# url: http://prometheus-prometheus-server -# access: proxy -# isDefault: true - -## Configure notifiers -## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels -## -notifiers: {} -# notifiers.yaml: -# notifiers: -# - name: email-notifier -# type: email -# uid: email1 -# # either: -# org_id: 1 -# # or -# org_name: Main Org. -# is_default: true -# settings: -# addresses: an_email_address@example.com -# delete_notifiers: - -## Configure grafana dashboard providers -## ref: http://docs.grafana.org/administration/provisioning/#dashboards -## -## `path` must be /var/lib/grafana/dashboards/ -## -dashboardProviders: {} -# dashboardproviders.yaml: -# apiVersion: 1 -# providers: -# - name: 'default' -# orgId: 1 -# folder: '' -# type: file -# disableDeletion: false -# editable: true -# options: -# path: /var/lib/grafana/dashboards/default - -## Configure grafana dashboard to import -## NOTE: To use dashboards you must also enable/configure dashboardProviders -## ref: https://grafana.com/dashboards -## -## dashboards per provider, use provider name as key. -## -dashboards: {} - # default: - # some-dashboard: - # json: | - # $RAW_JSON - # custom-dashboard: - # file: dashboards/custom-dashboard.json - # prometheus-stats: - # gnetId: 2 - # revision: 2 - # datasource: Prometheus - # local-dashboard: - # url: https://example.com/repository/test.json - # local-dashboard-base64: - # url: https://example.com/repository/test-b64.json - # b64content: true - -## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value. -## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. -## ConfigMap data example: -## -## data: -## example-dashboard.json: | -## RAW_JSON -## -dashboardsConfigMaps: {} -# default: "" - -## Grafana's primary configuration -## NOTE: values in map will be converted to ini format -## ref: http://docs.grafana.org/installation/configuration/ -## -grafana.ini: - paths: - data: /var/lib/grafana/data - logs: /var/log/grafana - plugins: /var/lib/grafana/plugins - provisioning: /etc/grafana/provisioning - analytics: - check_for_updates: true - log: - mode: console - grafana_net: - url: https://grafana.net -## LDAP Authentication can be enabled with the following values on grafana.ini -## NOTE: Grafana will fail to start if the value for ldap.toml is invalid - # auth.ldap: - # enabled: true - # allow_sign_up: true - # config_file: /etc/grafana/ldap.toml - -## Grafana's LDAP configuration -## Templated by the template in _helpers.tpl -## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled -## ref: http://docs.grafana.org/installation/configuration/#auth-ldap -## ref: http://docs.grafana.org/installation/ldap/#configuration -ldap: - enabled: false - # `existingSecret` is a reference to an existing secret containing the ldap configuration - # for Grafana in a key `ldap-toml`. - existingSecret: "" - # `config` is the content of `ldap.toml` that will be stored in the created secret - config: "" - # config: |- - # verbose_logging = true - - # [[servers]] - # host = "my-ldap-server" - # port = 636 - # use_ssl = true - # start_tls = false - # ssl_skip_verify = false - # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" - -## Grafana's SMTP configuration -## NOTE: To enable, grafana.ini must be configured with smtp.enabled -## ref: http://docs.grafana.org/installation/configuration/#smtp -smtp: - # `existingSecret` is a reference to an existing secret containing the smtp configuration - # for Grafana. - existingSecret: "" - userKey: "user" - passwordKey: "password" - -## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders -## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards -sidecar: - image: kiwigrid/k8s-sidecar:0.1.20 - imagePullPolicy: IfNotPresent - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - # skipTlsVerify Set to true to skip tls verification for kube api calls - # skipTlsVerify: true - dashboards: - enabled: false - # label that the configmaps with dashboards are marked with - label: grafana_dashboard - # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) - folder: /tmp/dashboards - # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead - defaultFolderName: null - # If specified, the sidecar will search for dashboard config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # provider configuration that lets grafana manage the dashboards - provider: - # name of the provider, should be unique - name: sidecarProvider - # orgid as configured in grafana - orgid: 1 - # folder in which the dashboards should be imported in grafana - folder: '' - # type of the provider - type: file - # disableDelete to activate a import-only behaviour - disableDelete: false - datasources: - enabled: false - # label that the configmaps with datasources are marked with - label: grafana_datasource - # If specified, the sidecar will search for datasource config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null diff --git a/chart/charts/postgresql-8.9.4.tgz b/chart/charts/postgresql-8.9.4.tgz new file mode 100644 index 0000000000000000000000000000000000000000..bdd82702d40e02e0dfe3efb27638401b7b2b3ce8 GIT binary patch literal 33145 zcmV*GKxw}piwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0POwyciXnID2|_>^;gVQdT#92ljvXI6N$+0S z{aO$SN!TF?769$2alU{1XW>PHFNu=nM|RD%cVm%w3PP%b76^FPA=-V`y> zo1rNDkM%y=+uPeOpFdasZf|dw|J~Vn_T)cyp1yegbbI^R)2A>0V|(Z6v+bS#fbBKI zQTwD^Aod^I*Y2x4xNqb^3Hg8-Co~?w#ZCt$N%nJRr}whA-3c)F8A*hCr)UJml);H6 zoU{LVvjr#yI{Fv(g~xD+WJwf4fCL&Ljv+!Zn&1e>0{Dyz9CZLT-k$EiIy&2gOCqMS zK8}(p;)DZ@0e==a=43=dB4&W%K)&Px#Q|ag_k0>9AwjW^dmS84NQ?(Cp&|)8QySp` zOod4Jpx?i|yzJ%V-J@*M?+{;(!j>2j5u=Fod>Zu;=UDJQ7s&s_`_8f;(Zu=bsRG{4 z&i3b>otM2No^(Fp*(GHGA9Os(3pzhKUWSa02l^YsNl1K!tBwbLm~w$xCqg6^h{Tww z)^LPKH~^p8m;cv+xC0O)Io?Tn5H9zCj)`~*9)ls2{X^Be_;ve$KFr9XPvvI@P}vK;}H4<3RBE)Kmg3=|IeOn zKiw|#{}<1mJn;Y9cpg21*C8FD5cbtV1`?qOh6FJbVSyR%JbDBtAx0c;K}-b(F+~Ez zlyJBtVF>gBGe!dW9#<-F0g3%E4M;qJ04F#OaO@Mzw8Wruoml7`_K_8As(Kpox6paTxy8^D*Zoksx0 zCI%4A?BB;I!g&d&$$@$c;5mi=j%WI7R*MVD&;;*=h;!NOne)3NM{5|lBU;S>4M&w_ z7#`>HOY&sRe-&$H8FO}4wLlZzmt$5<$sx0BXr?Zq>TNwLcf(6GV_O0O9aB zL;`d1cnfgIF>owY@x$>k6(|!$Z|A%wx<=?&FT#2=2=cQ;tJbY#CSB% zC{VZ@WXj~8aF50Ue>NQYmFN5o!Ex559=ZM5wOIVp%YQ`|Mj4i^pV)5@)lYl~jSuAA*_op~WL(H}S_a;5)N)7N+ zw^z~Q8dn_(BrBA>&od?hXCX#uC`b}wFY8AudXMss}^$LF$ z3V<4PWs*Uv5FG#`Nd~a9y}i>ZAXB}`A!ZlE$FS@BQYKv=;QT$;O?yZ;-S3zermI?c zCb^9i&~&V4nqd^o0DmT2$OUIw0Mleo>P_l{CT_Lt1HxsaTyYpl!ctfA%T+boCNx$n zM%!(w(`ck#3#9lo_HiP3KT%cs5-Hi-nRlIpCvwzUVz}zko_fQ9(GV+Ea8^K-EOLtf zNQvCdDcj)LNr#P)U(=dn_o~Hks%Su#I!`r6O%Ugolm+Ik{VA7ah%WG{ZjcXPC)ZW^ zEcU02#x&)M3lael4e$VHyajYT-U5arD)2cA zK1*XkBJ61r^Da@?n~ErW^!ojt_uK9p@6Ex7gEtcF9JoCN+W@+BJnri5eql7yk~)n> zT0T>=A~) z74h2v*}+-fNRD{313b+eg+0x!K;qZFlSJH;T+#{@H4eEvEGKi}0!5WX=}k^6ilad^ zJ9plhg^_KV71zw@1~yWT1Gtn@d6>_gWHu6_p_NhDx8Y`GMJ;3h?2ke^>PJX%>(hhX z{iB0k6g<)kI-YpQv&LXg>5Vk@#)NUs<-xj>9ap4xdNXWRn$3khpDI-vYu;+9B$9!B>~p$bNo;u{cl13*d`iV*A*Hh?-y znuJ0@wre+JQV91Z6$_g)+dOQIv#upl4q&bP!*tA94$%DgT$ZZ}kh%n!*6O zBgDEINqL>@FZlSof+kZ)q*ga3lOvSa?Sv*j1Dphk7hC&)7~eZG1`4y=v{oD@xllY|1&SZyVzSaBBKmun3Bg0iYk*`)aR+CtywLaMPP z9Z2avW$f*_C&3h1`trCDQT6yTxucl<;CX`9wd`rN2o|8 zaOJ5Z6s48}3iba5OSw?*r<9BAwJN0ElnO+%+$Ukim5Q|U)05uLiywR2z3u*!XAlL? zwRp@psWNWNTi#l8epfvYYoK_wy{Hbc^QhQ#2EkwfKurrK`yi{9S~J{T`*m=a3MBg> zG)7EW8%+VZ!bVs!FhBy7_=XXsqRp(^CdxBz77C69n<-1Pr-AAPg*Df)z@F)L^hsMZ zJC|?Ib>lrg9l(`2f`do@t3M)fpHJ-z>`y6l_bG51VKF7~q}Q#(X^X)&&UFFCGR!69 zYo)JMpRgfWnxxmS0{)EsRG4$&62X}l%HWgzDk{|05DtM|I3$+JFpUFkJ4t8&F~)(yp0&WEYs39|>i`{>ku|e71$sasERxwf$rJgTz7MM`kn` zsKycVr{_ormUXAzjEP*!_MJb)QY%$IM9AIELt%HK%~r)xnzsXO7dW9@@9a;# z&IN(DJax^8#8l>9jD;i^5>M>A{v+9><P zV^}?h%LYX_43#JtTF6wV`b?|{WSte~M0*;@orXpi7&j=;eo&7d!Qt3ixVH*EQ`+?V zU+$~FneygLy#D*amiz?& zTOYs*g{E%%8xz5o9fn@R%}e ziCC$_BMc*&#(^a-Y=ko5fa^K;7I3CwW#w5bv&TWE&&;jG0Pml%HDW9Tav1o6C|d!_Cf~^ zo{q9jjdf4RSHI>Af@ZJ~Iq(bDpvZIQ1R7&F`Jix?9h+DQoI%FZk%p_=RbZ5S54)b! z>a-j?q5(Dz86KQs6#T}Bz;9zE$T;~E58%hwq(-}te!o`<~SDpM;a)PYp4k3og$cnO!GXY%0cJ7`fR3F)NzSyx-!?Y0bf8{=M|+q|O1*6p-7;yen|JlV6Jc=-Dw? zSG!EudGhp`V%-Mjd?wzyuKKk*cP&a+i*2^; zs>+r!aZ1AH`Upn(jZ-FfXSy|WZmL+3<34AGYIaTe0%8Sk9c83jvm!Uh6Bz#Aa>>4nul*@L3yPtu02iLdJkU zCn52PaF?Hr26v`faPULE|Aw9Z+ z2VXwn*+A9X0-8v%Ev5QlyrpeClH1zme*dauP$Ov8c#yM7?Q^6}ky@z>$pwz(I+LWs zFbatJl`uNOtqcZjy})fpUe<}HJLx}iDyuh}9b5QB{;o5+N$rN9a36SOXBWz8!*EYQBYOO4=|5h{Dac4%3 zYk5RFXzq4xY}-4rr*d0Sjs;nW+!6YOflI=c z5`@NFFhq=;QB7HOz<9$szCV_L@^Cv@eqet|)`~(b$?(^7f*lw7H)xU*8mzrQ5Lc4H_jTu8?ck7lyI#KU)*j=z5G%F)0hj+vJLBbHG7T~<6iBEy^L>W5wP36A4_rfKUE@0OTp_8@uZb_ z(tX7zUAGJ4U~E>!e9m^sXo`7^!)&6??L3Cmo= zI;sX{&WmNO5wzuq%r(HYqSmdopOrTY02Q^pswvOWmFu{o}Xq4$gYwvsh0b^Yef0Y(IZd&j0v)XXhdRcS&za(HR$@b}+$I&M@1igQyN4&Z_yZ5nM-=;cSF%wB#NP6N89QtdF1btp`n6+Wlp@f=*&IF$8&VtF zz<<<~wXwK4{N2q>)x|ps6XQ*sIr>Zan-`Q?gP9$z8NH3DSJPQBfG)|D1{@nc{^;qN#GYhTC zqJ6)1;YvBuzjy|o7hs9#MDx&dpAa(_9})?M1T$+%rrff$yqN7F zsUEc*nk?6HshDF2&tnmIo)9zei2Q{m-xfbVKE@ZAJ#L2~Pwy-nyH;NrL|>8W9nyR?u49d!Sl={fagKN z{|LeJR6JZuYm4_@VYHc1$T9(xm}E7q+tBFT0t)Ox0A&PwaWI%{ANWShLP=>eyO2k0>frD|-Ns3zGW6Z%ILyEotI$Vd0z| zoPIdmt64j(J8sMFy}ZdPWj$hCIJP&{?Dph;%xreQ+U3`p zx>qIc97UGCTWmk>Kiy~No(njvB?>t>Z%o-GVnI8~;1__}lL3@!|37EWSczTn(_mzQ6%& zI0!6*V92Km!Pl>w*>P>|Rzs9&f(u6DNjL+BrJka*AqWcSf)bmAiRF-RATc0F7!6XL zHO2|F>2-b}@t9^d?tuEd=Ux+$yWjVx^wJZwuQD5`ur1H@;>lh-r(|I|@7m&1d}{4~ z=TjUenDL%S)^QcUeEZ+iXHTD(?SC(}x1T@Q|8C>?@}>VhT##s>{Qbrx#8RgG97(kJ zQ#^q0`(M9yWU0=<=fuS3YV({q*-|FLUdNR4j4w)oQEB<%{w*N>`NX7&QfRaS#A?*Byb?d6U zkbTI3a2R19r5r1_Lil@j#APg!1cw1v4kgNwS2G&58`~J^gapQVsS>^=*x#IwnOwTs z0Xg!gocKE{(^#Mt#-wbrKfY7L8vNXTQXDul;i6k9=oUGwg||xsm@pEHF?9cvd;cj# zT%k4H?=oc4{P(v=JvvgShKN?5*6*?;^iJ)K#j~)vm}(bT&>Lnv2dJxu+r17Ex$Uh= zH=0;Zu}GP=cA_L+i#rmR>5wn zktyI=9jSF!3YInl8Rz^JhY>vK?d$+g6J-w53nL55(+MiI_ncJCvkaM8GGnEbXQB7N zeVX8{&L|aN8vjYr!js<4HUt#&__45^0TElkDO_UZx2Y1^L`kTxoKx(Tj6FryA)OGv z*Vzp@wch=Yh4Sx5AomHOB7MumL-gm&I?x)IK$XlsHr^-soH?w`Tu_vq2iAP-6)!0= zMQ!n%Jp2F22Z}=H)h)t{wH7Z)hugm75#>qOR4_NZ7kdjG*x}r9z1Lh?tFVtztgr6 zsV#Thruf1InVHDCS#(!EA%#YkTASsXjVm(iUSDR|)c3=}d{~%ww=iFqeP*>s>U^V} z>$TcS%_v@~1GU5I!iGq~_KkeE*{E`qO4g&X)Zc>E8{4Qm{CE5^HkV+I<_2G6J&Wn+ zU)UEm1x23dbG|`UmC>VEd3IVNwH7;S80`tKnCd6b`>)S(=8m**C&N+gVC(N~s6Bir zP(wq#I;tp{tqV2RLmw7Z_18%(g){f!iAA`py`*9>>gubtz?`jk6DX`KG2KUm5@&hY z)U)&N6`8A=$QI_|mNMAr{X#;_d#^(}g08-CqeRu+&H7ULEiK*6nvGNOE|ctgUEna?6Pnq=>WGp7lhI%b%Q`Yp%aMru18&GDr?2`B@v;B6M zs1bR!pjthBndvlj+=+D1Y7<_pfYGLX#lFsSf2j5nnMP-Zs5~ndALltq7rJ+uwTovOBokzu+V}71UK)&p01(!pa)lW zxz}>+-1^)zp>p^$_92;^6Rz*$*52NAb=`Q(EBGU&B6t06(VJUk>bjS=^6CDPJBF_X z=oKNzr?wDfu-HFb!~GGwG4yF7>aIu?QPS^a+6E5bceuJTRceYCi?&m*T{&F5I%KXj z!MXq78y2^7grqmua^;S{VpNhV4gaBhnC4X5EYvI8DXO=w{ImPdZnx4`z5v>`Q)5Y+ zI~q$hbM0vE*-?IvN^`?=k=4{f6=SR6Z9evv#o?0Ub~;(12JWl?=?B;P(+wjS49 z#LeM+D#RpbL{sEk2btf3_Fko?>NJQEk%S7|E^Mf4KB0S|e`cdVV%ih5C#G1xg04v( z)s_3UuV3eK*u44Dqwv=COMwQS-Nn)LMpUm@o&PulNYnGsq5UYgX zmx#sco&^#s;b*pW5$2N8^vw*uEI3AFfe&suRxLZoGL3N<{H#)E*MHHsJv;Aem9=6g zy)ZyY!6Io#iUclh>r@XuJC-PkHuYJOVG{RcBLlhau$KPTC^!GpswNSyXk7Rn90 za>f=@tg`b!K;y^4=rL1la%&fE>X3DpAd=?!bJmla??%C9!wJ+ChbGNUl!#feRLa85 z&F0Vg0@h6(&zJ)^$cDcKPC3PA{-JL^R@xmFFrs1#LtA{P;2b)2hG5cybLB=tA`@x z7;yS(ch9kU+t-59We7kmt%k`?cw)|>ib*`!2>IqiW%lwqTi}#aSZsipRE9=kpe(>l z-*$GU(gs{o_G!*!%)3rShlZ$oRI;3n=G`MXNtBU4Nj%Z-2=IiV@# zGVMj$_NpSSCMspFU^AGimaQV_t|Kk)=TjU1n;l&BW+=LT1n~U$-|Z()oz$=gt6Rn%E?X%m!|6=yo6a!|?A;n@6C!6{}$s2s#@$uo1SQK4|fQ6am<$6WmL zwR+E1L#~O1yT@wd&h8O5;R<3B2RIh+EChlMLJAUtVu zKxrb_ull;NLdyeVsMdD(?>^z#mQ4`vc;+c&wQ61^M3K$F74R5+!oGHu{@G1DUuT}r zMe*X5IR|fUxutMcJ^a_`${WNA_cL+EzsXZ8|4Uvn-Zk}s1@eFA`OdSV z{C~3Z^x1>_zm>~ULr`F~Titl<4uLn7PyK-E)w=JY4 z!{6r$n@kGmCg;+g28f!(6D@>vZ#|Ot8x~Nt{J-cgc3t+rkpI7Y_N2o9pFVrw|F`k9 z;QuZ6zy2bwp<^Ow=KfgNSq2T^kk_z=sk~g}obG<7cq`_@S zgL!gbE=_4#Qqo~WV#H?qnOre!JV9c*1>dQQbR0hZ<%`N!uD`?AuL_yp+3vA0AFg2Z z;u(&87p?UieX&4mvpBvkCFs1Oq$?~H(~dstRPI1?nrS7KYWcst!D>x*z(xFj=jrp3 z{qN<=XD=W4|7|=qCo9$q(H?k|-xqE$TPw?U9J$SkeqhtLq_l&%n9jP(s}-*iXqe4h zDyZkAcTL<6&%$Re|K}I}I!=HV>i zVHRJy<+dHihr40(nb%xBJGU8RoFSE);|ccVF!gQRjyb>FhRsS3h3j#g%cg286z_M~ zY%E;7quZzn#e2Kw0asVOfF|VzuF27cMofiB9Ez2XzB>G;ogZJC7Xq;f7ANX;sXTWs zA>mKoI1{kk6tu>wxmU*C@4Xv_ z^b$xsqG=pB7a?-PtsT~#zi5WaiHs_Q9<;m@EU!0aG%9vm!k2w&@RobDFL6WI0J`dq zuud^rp?M*HPD0`nk=^>jEqukGhwQGH9Q^7Iuyxa!b!z8Hv73@wvO3b1BRc!EUeZn# zC1-hvE^w!I43;<(Mk6eyIOV{7h7!DhHkAf(hg;@a`CoPYyixn#%kupnPqufSKG^?m z{|oxWBW#@l*)c)QE}pfs=5~<5@lai)Wtqwd2vSnrEZ9lZ>E$Bqip67$d`vFd&L(g zhx_>pmd3mMJ;z3ZuvBvG$44m_r}E1&jZZ065OVy6#OdckIaOvio~iF?Vghkd_?po) z;q^^w@YS(Y#@#)LNIZa@PA?$p6-jLP93L`IvPxc5sitUjKq_kPZ_3) zG4(S8;z^;A)x|>PytDzz*tzep<0!hdc`v?sSssFUnxS3Esv`1hI_YPM7bxVZ6?{0=3= zD8BH~iOcN%Zg9wH%6zQ8miL1M+hP}(jZ~48<5Ep!Fv@k@)f3{OS|9`Om+|JXM|G(1Y@E`{MO~e2V@hKil96K_8>6x-JVh$~Ic&bv2tNHYx z;6AeJ`!tP3#~wYXp0i6w7o=;2&GPj(rn#Pb=~8KlfhHDM$+U< z`lF_7b^%>Bp6=!*l)~mc`=@8G0x6#>IcM|wPv=U*UTxq;#d|C7Y+)a*nqF{ib-7ok z%wh=Y?`~{0P`hv&6JQlyN^Sn!_OMxoSr#b*+gy1uY;OLYyJl|>&w{5x{wqF*j6QYK z=Rey|UzE>(o<4tw|G$;T_0Vllrwv0Xg>csB-18KKg1+x63g58gUsA{URuqN5@^|y_RJI@N||Asn_40Wt_{(n|zq<-tT$N*HwRU3eufBe5x z-G3K9fwrit_Y-InOw`yyMf!ZBo&}k|9F7AO{GT}sBz18>?Qu?Gf!T%YVs78GA=ql| zioVXY^E~#R z@_E0WDR{p8f4cK>d%NQQ{qo`b?^d367OpmtUQL%DIPTzQrpQ^XvAEkZ1V&M0_gfae z6<;(pq+XcG*V-H}<;<8X#vk8N^l`(OQ5|@#91yMSHL~V-ndtK=MnQ-crsYbV|fiSd)4 z;4d0AN{y132Dln&=)SEBFs!LtL{&Sb)-agyn8ZXl*Y(>s&0S}8R_|8O5VJBRI4_$! zcWbWFzrZXav1;HoLq0yijHoay8V4>c^NJOiVao;=;>CCf?rE7(JU~*yy)l`8$3x*D3BKGvXUzqXY*pmwP)fF<^IKcIy|(6N@0DmSDm=SczT#QrMo z*DTN)E?KQa9cN5bM?n@SdEP~>x_Et9az%4D4!Olrj-PIg!6vViYg#RWwZY!;T8zi` zpg$sUpHE9~J^yp*%~dJ9y6lWxGw+4&TyZx_cdIBp9=5-=t38~@snW(-9s3M^&Q&^G}s!Gf*yEQw6{WNc#4hLMyi^K>+~s`*~7*g%zKwXZ^tO{1z3 znKP-`>`k|M`O2rFLcXYarGLMoC@v1O zthHLeVnwZ`TITMVD_U5-ZLY!I?W$kPZmnQ7)^i<3gX4>`cBrTJ)#1D2-J`?v{exHU zUzb*xvN)PqWHDkhljjJ!DxE?3Zu$U^(n(X&9^2o^+p~ACPY=%i=gs+tw{PAb9h~nU zp4Ru4sP!O5G$J#`)J`O(|`g9cP&w)~b&y+3QhY6}nB)c7cC6ISE!98{+qTj%Fr zNVt&13ZS0so}K;n_H_UJm%}$r!|&5X^hZR*C?d|qWi|k#zre4!N+xw`V=E}@0Q{wp z*33Q{w!h<q%<4@ zpd|adDAyV~JK9x!{rdLo-5e&jjEHOQWFgCGbh6hBXZ~Wi(cpkeZ>ui7{EzjnyGqEy zii@rS-#uP&l`*$fv={5&OHb>XXY-nq_zlcm>nST`ByXTS1PH5ELh#eGPk$9J@9b-ny=kMxIpe{5V{5Tl${sLl4J=x*P_W% zTdNXfshMj~WvQiWk!7i^73gxF&2tsyWRbKjS_&5^C->ckSqs6pXdRw2thHQxn|-dm z^0oynm2R$`GjTQKBQRF84gYY>?3<%u%csUsi`Kf&N?r(Qd-~|e9hsh?f9u?ltsRs?~7L0JJrndH?$h& zwKv zR$LV1PyB{NL=0f(HE98z8_vBzqBltM3`Y2}6gtYBke3TDVx7FqmufptlK=^PCKwWU zGSi5x++I~IiYv$)T5yZHEvWBOxA*U67pON9Nacm={0xB9?%{|A zWig|{sU28er3jpk9;>r>*N|;~dRy4C{*hu{-brvlS>~8trR_jZZnT4t`2e26V;8(Wqk4C<6Ev-%4$W_i(?Rj|l|uc7sYfoIG2_l*aj z8QippK&#tu)YWc3NmVfBM4`>;-ibq7z3YXMXmbE4#-gnN!$UAyOK|Qm7;Q0_PDqv^ zjE8Wvhj6q7hrbWuX!kb4>meNNAsp=?9PJ?-?S6-&ee)g%_s*B2<&L2tAT6)r#-+`v zv&_D>1z0PGrY){tf#4xF?IAX;wAyYsHf{Z3W496jS7y^0Sdte6qg|;UEExRx;ql?S z{a5F^r?1Z%aXz<$uW)VWmK{+qhq6W&QUY2)gye~186)xUz`A9QH+eTq(0r?Py`4EQb zJnIU>bhqD5bYLOH;w<*3jK(zOdo+rOfG!=6yPJzJc=nIuy0BmcE0!a=;-j> z94c>0?^x-(rOcMXx&VQh&&4v#?LJPUvmAmf&Ez$suWvHab^bIz1dBcdi?*beHiW#i zpt*VB@(_`FV-cyVhh0|ivj3`)Y?g_tTtQD$mXL$^qPEz4ubf(^oUA>^7iY6=(7-$# z{m+~8gX7&-Zw}7i?C+i|-tEpX??N}GIcSCZ=Gv{cLY2h;O32R74o*MJVFQJxIc68F zHbkmrx#@1#uH0lVv76U$@BMnvLiTF7{VA@OwPoaqaN&;H0`TrlgEG?HsbM#9&=zZ_ z2frNtzm|AocuYPojk))~wC;hYW2c9f9lY8-J2>AzZr6k43;V}yVSjaayx+cQ5(m|$ zEf>^EQj~~UwN`$c<3eUwfb1I(7=`O7uR*702fL?xzn<^Dd)JzIAm&fckr1spi0R{( z!#D5RvnbWen1rG|r+R<-rd{hP3mfFYJS(jgG9M7*dq)*(X_YX%JfNnQEOMf7&I(Gs z)Z~VENK)~Tq~ak-#Y2*cha?pbNh%(aR5&x<7+f1r{}W~&js=^kJLo)p9-;znzwqf6 z|09(D?@j*v>crmaLT5fBiQsv3ZSChK9mP+%ppoj&sDd0`;~2Tc~|*D%|#CK4I{ZJ_o$}!Yu%*UIZ4S<-`R~F z41QLT@Py$2x`#1P7GMT~f*ygL2bj?mVvGa%i6_`6;~5~xn%SuLmQdfE(2Ge(M=0zS z_Fv`lo88sV4fc26?Vg{#eSf-laQ^=Eu+h|#FcOO~bh9nu;6H=@fAat4Jd9|7e+~$j zqgRkLxSN}es!0VF0#JCYi%a$o4k8l$6XN8IxP?l zZC;m@)@PAuXmKsX^pHlgNd?pPfg_`@%r_#`k#Z5virTN&)Y~|m4M4Eewdf%>;RE>C z^~~nH_eYA^%##bMy9Hgd>n`-H^6_+kUp(A72Pch-vT|(l)6S1CoB2sjS}VjCR4ccN zGpLNUDgWTNu=A|hzC7geyYIRDzNLhg_rz%2&S1RKtQJ99el{1_k(aU|uR5_cFf2We zs_kb{PQ|(!ZIUV$x_8}u%f^}3a(1DNU5PSGGv98u&N`3k8uM`Nx6gzN5>Hgt$Xe)& z#^bE6_>gCt98@p5Z`FsIG5PYi-~$5R)!6ow>0te-NZqCv;cug{Z+C#ka`cdl^C1~0 ztTzMa-9GjCKQH_&f!-PtfX>hVxxMpr`)MWr=gy0V{GYe+xcNUXc5F6Jog`|BOrFiz z7r*gbo{O_>RR@XYeTsG52%L&)g=W3m25yJatF)n^)@P-7LW^npc^5%k8R{YAtyI-o{5SU3dNLM_GvijyAaf3;>N|R^ zgWgi1LXXSoy=zi@0V>NNW0hG8mhlyaQZ4qSIZOQ7HEPJRn=9u1?_=$Fc*;+c{=X*G zUp));|LwB=|9t1k!~HL}@+_m-TPNO|QteCauT{4%wS9}q{hXd}L%T0^zjpP$)czgn z_YbQ5gKGbv+CQlFw_(!$_BDHRZ;_+_^`JcHUGQP=3_+`@hzBfXjI^Xw1lytuT|92k22gC?XIUMdE@J=sb^k3K)ogN7=>g#fh z{_9)Z96Rg(^k8@Y=%5z`>+WOj`hT*s^WQhGPh%A3fqfB*(F8|QP2e*wa0DA~PxoIP zoo(s`k7B@4GDVzlpfM=#Tuw$LBx0tt2O0wxC=L({xaZR-35irpdY#TMl)5BC$WW=_rSs86mb|~9D~4I=or;Y%06irj$a=h z|35g4C5reK_?QXLj9?4KVTxlBsD4ivjj)*FR8g{^iR@nCe?9GOdm}8+Q~CYm10;-I5V@Vk z%t~iDW(h+Y2bhI3IYlHMGsFc;eUUP3=u1(foj`@I&O4e>seGwXrBEV&TB=n!dWSg~ z4+nKCWeIYBC;QTQYs}1YhdC+KPO*z1OMqsi_BOmi#Dkn}3?EMrLlKtq|NF+HB>S-m z9ILejA>o3nX7aBfrbt-Yw5{R*f|O}=s-fv_I-S44_us!G$@kw6;0@sd_oe$rBOm-919MJ zbLtZ$?9$9hLQhnvDmav3C_6)=?sQDUkY1{RqEU#sgYj&_MqvT{W)(R!koPO5n6PZh zFSJ#o)477`lR;ARt*-pFo_(yt10+qY`I!srzQM%2`kgCpxo6q(cllfw)~n7H3{9E= zmC)f7PY4%mHf)}c*Qz+|D{%^kvKBB??K(5P{^tsYF$o)ovbes3ow-REhIx<9y3Nx) z&-86P=PWyW7*fanQ*vTsYdtt|hSLlGJVX;SLx1wXPg-jfk zRM1k6mGb8x&<5G@0tQPR+z|F|oOW!mVLk1jAR=*wfCdW6nz(fex*_ZsTj~jAqTF@? zJ4z9Dabi!SD`5 zfKxNRGwhh+;LGxQBR-QmIzQisN|t?>&su0l=k;QX^g!FA!*}kpDyXreO+YCyVPBu`^d%*&feKR=vY2^S@`0 z<;trHEeAX5dez0~&9Otqps!@rwj$tDnuY<4FmOEAY;tSQ6%5r)va3ohg&oON-x@t` zVMAA7ZXp*G62t^SVK@V;211ReRqlzP4{?B-Ys@c+T#*bXSR3aGX5w`yj%$(Vb{MTN z{FH6*0Qpy*qNgrUcq4z zExNW>z{EXK64#;)He;toKWvYkyH^q&Z7?D+3Giq+9L>c}t$x@ZJ9n=nO6;gZxD}Vy zTi{r>y!zh8$@RA5~fcJyVV zZFZrDCvvOHFz1rN#)gt4cJyLX4fR$AX^qeNu#?oB&eZp){Ndv?8ewMLd>pjM1?JN@ z#=cr>clxZ6cGQKhtt=Rt$Ea$2XxWWK(AmA9nEY8Ec0|}tXUeg&lVRrFn=`<%&t^B` zg|qsz#+9X9=UdTEZr`!C8LHg^rJAVtkX8sOi>wpc{;@1HB5|-Xs4F~cTv_Tu;N`G` zl6#GuwaTpQj@t#+uU#vW<%m@r2cQWeF)!iBhO#ih{^pGw3y9 zN8RP!20Q!5Qu|Zmx6DLPt@7NnXT8|T?ku0Xvh04oNGSIaQES0YP0F=-yHK-7cWSNN z{S7~B*`vGIN%?}!R*iSly?xBs&KC}->>`jSnRZ581?nb*ing$%=l}! z7FTd=HlSmj^@!UwcJGeSC3beMZ8l`V@myWAu?lwLG&;*i zUPDAK?PNrw4nozj_!M2>0`A<#;g||MfIXG}14bAo3?rcrQsa3vyyD(}Emn6IDmlC6 z?_*SF(YUfHiNPtVwhldb&+#}7m83_(1&Vzf6hs@xLXdcJYfJD7hSBWYfpbYzDeSa0 zDVrWw^mXrRMckOl1Txz8Cp;@SfoI{KUd~i6r^KHEv4L-n-c=WWptm+e$O-`& zk1;c$mRVuBsB132u^p+^kU3G*Ij9%wJCv}ft$InQIE&Mu)I{rBQhq>6yL2{c7-+1t zO^s`P3`RI$`4`HLjjZR#9a!VmqJcW|VE(Ga9JoEsi9XN)ypl?4>4#{iubfI7neRyK?320spbB;%APmMd4DR3Zx`JU%UWp-Wa4G7w7(pCr zFXrFOwFtq`L448j9Ar11fvN&Hz95XoDi6X1Vnl~q&u9vl%H=gApRo4$i+z57lLt!)XDPTu#Sk=BnnI;*EIWp-Z=v{l_!TPaNxGA$KV(v7J1&Od- zS9#ipYmDP&o;72~z2~%%c5Iqu~UBua3k&H7p!J^SKL%6g?olo@;cbiwxqXp zG2o?H(L3YnbZxF8$<{(UwO0f;wONZD+rk1g-S%dFJ+Y|n9kGkrx`KV89$usnJy$+S=?he zXUqM4u3-55+0!R06Hfse(2D;klOunD%(T+dGK^kIKe^OuW@z{w|`Sc@)tU7E^F||Gl z##V8s*|gR}JJaZc3bt$$GM9Pc;V2QaePRPq6$|PI_@d9Jk@}$%P3sLsh%WF3--Hty zZ17vd*cA)~OILQpD`Ur>Ml_g@BV`u^Bux0JtN<@xzO=-Y;b%jqC$~GCZC$^N9iEQn;YRI3(@~yQ z#IPHKpA2XB15sBn>~^nv4qCv@qBMS2ioKkf^nH6-`aVZvHmJJ$XYDHs`99|Shz3pW zE*d+#su;*}_ZBx=8$87*_>B>P-^M;(@<6>xJLJzr*^pe?ImLgZn2URgp(_~v_?oPf zb{6eKtGiHjTyx)qxq8elboKiBEUR>ZVA~zKiUZki3?69+C60>m#1cfacBFlbz%pN$8yK8 zAld2_?3!}-A4BWJj-VlCiaV z%q4=woOSn(lGZ7-6n3;h>xiatgKtj(J7$xmOd0nMH#dfz`5TNfcJ3iwEUfFp4krQj zQSEXn^tc-Y5*HRj#f4?CvpPiUeAbGcR{j}RI(FIK(1)DB#=<$NWYb6bAm`4WwV1?8 z*qQHvFb6xC1Hyg44BQZQ{JDNGbFhBC`zbw6b6A+khirLW*lB0M`0JT440hT@m@H$$IFk*p4&1e$HEu&&#!$31 zVca!l;AXHh--6NJgmDkhvli?K%pwviz4tXkK0d*W(BKUFG!80kNX4|t4;hPzF%C4( zG3{WO$Y%8YaCiBvDHpAAz-iOY73`h7?;qiavRUT315A5{@qS1ma&Ll-uRy~&fZ>zp zFOJA?O9HQcZPz*>m+R};lH$Ef3q0VBkGHL*gckmMFI-Pc&wAnmO4u3m*NmnO!L(PP zKDrlG<#$xwv*!wiJKNi)`cA&Y%pE^v%FKVfvVF!tsOep8veW;E5=ybu)X8~0Sx%F2MP(zKO|C5R;EJ5sLdr+ z%A|V7&rQ(I=OiRP5t6;|1o@a34gs%V*NszG)>d0UCs@(d!x?b(T4jLN4Z)bv$R;C> zCpsdRq4Ajb;M;4uZDj6xuycLWX_4=cmx!CiPA{2U#VrQ%rd4@ z9*UDV(p?rLI=V!b!vUbNzznA7La9rs?_(U`VC_+(SMcI`^{d!Xz;eDEc1)$k=rIMy zX;?&&LECztE7-YSeQm)&0cooaUYWeEjy71Ve~;9`wMg_5cIFGMlFZ8L-bbOe7QR!$ z&U~Tumy=l~?93Nhe>s^|!p?l5^_P=bCG5->T7Nm26=A`M&)Z$peg$U&F#$-9Kf+>) zQ!Y1Gl-;$9Y4usl9^GQ6)oJKoHqYLIb3WVSnLt8rs@|_?tVX?=ziNQ@)07z`?p^ZdzC(li;ED{UMF3OhoD>ze(4eaM09FFawoX-B` zP=viVffwsYcBF{xW}lnHjvmoFz3Uq0JsRwo?btHNF;#8}H*3aDF5OPku*uF-5LXyk4$c!p`-$av4!~!Isy`l}p%Z;>tNQzMNuKC78_RiBIhrmGvjM<>yL= zwx9?*KqC&0v+(@-bFjnYSZl0bAJvE7H>N@)e9-R~)y;tV+1K7c@H3znYz!9Cb(6$V?aa?OF3iqJ`UXv%9iu{kk8|nhh8&9CPP6 zVMVyBgp(_hZHsUABG=f!`ygW`JZG@KXzTX0>v1`pe3!R!&ZTIhmCv0TTF3 zFeLC~aaOeVsyJ{{{vO$L%Fh)HzlfRgxHzR@NaBfxz1z|frZ1V`Nl)9m+XNxQo<6!d zp#hxPefs?j(pt+NosE#+8a)|yPHBkMZENz6S0oPP0@*N8Tl!Yxld}`Kwk|3AbXN(M zuZf*b$J+XI<|V{Xge61Ji5w&g8G0Va0zTo{&*}{2<5u?P??XUrlBy(ng2Hf$!w5*s z1q#Dq4}PJ{9<*$AI>X^mav&Pw&UfIJglOh59Ad;V{Idg~`^yiV>_yT)Yg=Aoes1L@ z_K`p%#PQG3%nQulswsf#y-^a7PUqbe0~*l_$rhtXU8<+dgvHzOvy1PCS=6iTUJbYi z(Xi*(2E!Q;d&55m1Q}d0if{l!2h?G&)7cGGyr5*`;cN>K{A2g%jk;)FAEw%Svw40| znZxDnv~hdrn&_*~0=S&YCJd(Sl=@S|#7gtyjZNRZ8H&P2?EVeD|Nb3GzW;sz|DdS? zD&_iCbn89v(P`)R4d=&Zr}OBMxg*j8IoWWrfr9xRC z1)4bLiUIX`&w$(WY1Gei|MaKy(i60w#sOwrpg8cDZqSo$cuvX9PUld0I&i^gJPBvO zu%sUxn|o;FC`kbXAhC9`4N?`;2{^tWjH=9&J$N@I9B|B)PBWt^GgfM~q%xbfKSlAR z9CS{``Ejj46Xk!QB!{{2+D*cu?f#w4D~$ks_HO4CL5wdoP3RURDv983dL%M7#V#=8 zaunuDvzUa9e{nb)kx#+Cs2X>ZJ7fiS6eM z3*fWZpE4TLlq+6K1Vl8zjXmYX_f+Wtbm@59E%#VgEbdkZXh){ms3QC+lDe`3Rtp@* zX>^v?$cMUJf~%?APKJ_Zw|lqmk~4okx0Mh1yuPE(DhX$5ofv{si)1S!d&`W6o@enh zgmH)_`f|fSDg^R-oS`wMa&eJZGbu+z=KQJ$yCJ95KISe*%G~~(MaZZ0GS(v6u!lW$ z*hc=8?CB`K3|FU=%6!cgyR|cdgW{^~bawSsmx3k`;tL!q?>44PH7IjErlWshUl`BH z0FUK%Ofy^CiM~wAC37dhlC~D`?iGky&Z z7!Y4q`S$kFtM1ocr*jx**)|O#I0mY` z)OilQo=z;ZU!G2zV)TEBL9NWn#Wk&)Rc@w&a0qCOVKkF%iENOC8d<9avOT5(TQ*3{ zN-i){ajDq{p~|CI!EaWs%6n*gwAWIG6v_R7N=Kpz+PUZ6)%AuYiW_!1hhv49K;?+h z{E^!2oI~D?Q>{o+(wItBWhukQOwyl22oC=3qL9c}8=0X7#=xGh5Bati^yjUM&buk) z*wkEhWds3WRkP9=TMntY{f|?iMGGwgM`L00B zc7|lLE7Ed_jtveDUBLJ^D^=2N}jj(x0cB_Hk9F(b(Rgo?ULsuhBaF>{}hL^F5N3|bEkW1uX+CN&WDQ=vy`>MNaV22sj|o_4Jh5@iU_ zO(8I*p=@NR$=twN-aXJLOP|oIPCH1a7+I4M>tfRSN++W{5dp>>&&F5 z&X~K>U7Hh8vouz-V>D1@;n7!?xNLtz$zK$^?N|pg${?hZiJU3Bx!lZ$9Z*Te7X<0S z=h?G0LvrWuj6_K|GkYMjLNk{&Y4JnHS}@uk%Q0i6sR`E7=|W z&!7WfVUmA)%49lFy?cszc=ClZ@2a2fr~K_n<^nu`-S864xca7)#)10NHY{IdE#{D) zs94vXCr_W%18sNC9kn62hfph{fFwZU(yGZ=b3B@4W@^Qjj8&o%pDZ=gkGZX)Ay8ve z@1(NYaiI7nY{*vNr=1^PZrU7}vSq3A8a0@xzOb*%ZP`xLv?G_7+^p%(N?JRt`_qry z?luko)evR=98$zNE)LQp|`{A$Gm8b zEJeVmc~DX;6c#fY2FdiDQPg7MlF=0N7>C)`>D*R-q_sIO#+ROf+PfggL!L&kCuQxX zc6+62*R3r2hGo9zYV$t{=?H~zffzxf5OXE^RB@k5=syGZaf}$DdmXdEcSy}cz&oabyYq~tF8V6fSw0^2>wHDyI1qtt?i>w zC~Gk9azTwbFYkMei@F*$FE2Vj)S%^uDy}N8I(0KphUu#2m8|H%e-<4KU*Pd4fWS@X=akywf>j`>#Yv9rc_<;2>j5w=x{I#~_Kcuf2>6g}9L zo0)7oWgJqqUz0Wey2`#*2FkKSQk|Y;JFmJ#Tjb9xUB#8!I9;OIRs}k16hRk=gzAWo z#A+XA&H=LpphO{@V8#g-_~v%vTu_E9t`7kc$SMc9KE%POXu}#ivD`9=sU1hAk@h;B zY!CwUB}T9pA`)p0Pgy|a`u_je`|{?tactlDTb}|W-5T2|QH!@EdN*B9vE6Q0+p(YR zB==Uj%Z5ZqVuvDm3DS-xlkfiOEC4}@vYo`8zEfq@^f=tG8~_IgXZana5|Y-i>Tw!t z?URi|Mjp$}(vzWu)5Dkk5C(1f(iyb3!? zpPaqh69hTz2O5VdDMq4Wk8(0hmGoKaz=j8r2wX$OLf1({4pljqWZ zxw+dLzufA+?Cf@ZH40?pz3irOoK18$3`WkZ*R$5~PXC2>ytA{rVUFsSNz;)!)4iF= zEKeiY&CC6%Xmj>S{q2HQ=-kl}_=V+I@c ze2FM%MU$zDR7%Ed7b||1c-5iJhhB0}Hj+>-iW6|0XGxx63QyM^nAZy}x_vgIjunW8 z*{xEMSl)e%LO1fohkax};uHuN$Z(~Z@yc!+ybCoN9;y&31uR!%oub$PSn|Fw+$fP+ z3mGlkhpyw5l?vKCpN&KjOrtneKB+TsKJH5C(JU_U&dV6tj z-YzcpUpp6P!;6der|0|5t)n+bm;0};hUbSD%Xi-okN1zxk>+QI`@_p&M+{k&o2t1C zB2@j=HXZ#B$eVC%6nCozwZo&ba?nw!6M3*v03~?k@~mcGSvIZ1xTJYxb70~0t|qi0 z1uFTmak?y(&I3*AR+cO{?IS6$Bhud5oSN4iF%%!wLX1?TCP79Y1-k0Kr3-Zvw1y?a zB5rx4V5z5aoT0-9?p7L>Ih==lqo$1`mk0ZS&eC9%lca?0o5|EK1w-pBl>uxv7)*7V zsOVxAOfsM}FcW*Djb|A+OQ`^DQ>=r4JJr`jrKY;FwQG z1192GLEY6M%&(d<&*El#MFcppvbLVhNw({ZXY<%EKP9csKApYT)exohJJ$OV@r9q$ z*ckqe%foi3*XgxceVD{r2P3lMTL(Mms8P2%m*@Fwr2~-1*gj)+#Yo+nj|(&sqd z`^YiS;s#g+Sb1gGal`T2=yi4*f7~66tvl+i)^v~jLGEJqg z8jYiDO^ZmuUL#?0#?~B0Mo~^QWGyx9z07fAm{~9qIhBSMKXjaB&n&`u0PY6#DY?Q1 z5xKvlGV*d2Dn}6IL$@+%7N8a0+GW{r>jY&ot zNj)M}p8?H#;y{fP2s#v~;@FQgLg(V zJc>bH#2NSk7)GOUma0kc$!shjJfP5ND+*GPq;VFHV@leMO6CPUKGIo$0@n>3`oC00cdO6EkAstIPZG2APflG~$r-swZrhrVy64ivSI z_lIZYJ}~#a(1wCW+o?TVA_<)8~W9 z#~|{>`o2AoZ?IDzoyMicLU1d3N8kP$L_Y3BhwpyvkdK?0?uYMw6|^UtkTgl|)LDS8 z;R`WJl?1knV_odAYs~E8iMR>WEh_!kt_jP&$qS~*hfw34$8rv^bF4b@aDH~gflR|d zrKY>Mx{-EU!Vi38C2rU%lW`oU#B13{6u7MpqOlSqkda}M;hLDsY)n&_t)NGds5!*L zQty(|r<>$q*zG^xe9R~JP5&C#MjokR zImskgL(NL6#sra;DxHVq_aD`b14R#@P&b!<#FkuQl+P36^pESRGJAJBJDum)ZG6O^ zwM**2FvNY^+3ajf@gAB8+2n5g{8>UT`9R`VX9Zg{5Bt5g=#w(JzX`L>l)Bm3>4+oR zD{#92A zblR1pIM2F~y4BtOiru19A_zSnYv83 zHg6T|5rdMI&f*l#Gvq~r5k5JwNZ1CDXz&?@Fz{v>P0jY~xRhw6D4s;6X`Hf@UnVuk z^-TXTp6aq<fPVOw`_{l!WuHbe_p!z0ul5{QV^>2`h^WrX$1zE%WskLT zff%bqrOo3^xhV{tw=S|cNy=?47xV1d@?aV@@ZVur@1FtBo;8H{(EnZhJiHtp4@41n z$)4pYnwfZu3wc*02W$Rv|KM_X1fO0STSReigH5(@NF>DCWtV)|aX1Yj2k=$ub2k9d z?{~I3Y+*BQ{ODi}JcU7F=fN~(agmyYVi8Jmq0c+1BjZpi2E%Y7M{r?mlEt!vQ69XrK=ub(WB5@ ztnu}eMU5^CZt zso_?!R&vK&QYKy-LU52Do88-R+nU{zG)`0+E-L1)YlkP1BX|eF>)7?kx@@{~bJ0Lp zWzxS1*s^C%vB7+<{6Jag< zPG=;RsAo-*;xwD7^foZd45z}>pnT)rO^OoW0X1jWTHPu-v(1xi0cRdYPwy~xxT^nR zczk^N{_5c5?DD^D*{>GlbWf&$QoC8DgLBY`siROvA6-3$v68x>$*9UEkBA!;#tF2d z@DJQ_mWGv?ki0=w(ytk{ND*HJGuPy2Z3{U8cAc??EDlxb=02&$cqOr~JB=f@Qqjp= zMkH~tde~t*9Edf^!;msp&s3NINz^}yOspe0_zX$E&Q5`;mJ zzs6Q)i|gVbkSdDOhjSFyYr4id9tr}pH#<;{)8V_zD!u3w=ibK>UFzOlWcf{}%+dp9 zg`4&yV%dOMYnDe}_d5U8`LAm+N@Ya(Bd+^+r?fB8CW9Z4?~u+^0wS&O#3`z4gcCPO zu*~b%7YFC>j`j`^@b{Wd1{PLf{rdEHzhGe-X6+W*K}QrO9D}73P)RkE#jH4DZzisX zuiuQBH5WIffW~G(P#H|I%AD4S*3#m?y zYTLO`;mw3Q35EC8ek%5dm&4b?3x4vG=w|bz3rCYY=~yBU(PbgcqY8?Mvo;8ZW1eIj z;3w_mW2<8^7j?N8wArm(5^Uh5)DnbD0QF(;DV{yEV6IqUK?hDNHzI*`@K1smz;S{W zTc26#CJs??rgbDMMwT$WwWnJQ4*>?SL~t02)ztmZdg?CA!6;FnmD-6n2;;E~oiT57 zcuqmOVklhEc)bMwKmwy>1RI57*9anp03|9)W;O*`$1^pTg@GD5ma!3)tR9mf)tSpX zk!w1#IFhn+4=Jr}=2c7hX`B=-Dbe`lG!9kqg7NNS z@~^!B;#PX4>DL zevk5Vlsn^~3+y3|hDGi>3_bYXI(ofc@A=LS{_XX8)xW*|*4E$pTe~}3z25fL*7Lvh z`n%iPy}yax<8<-vUk)<)-+JF&SG9BhCcmW-A2b9CE*(AV&f@svKs1w7c`U}XVXyOy zt)}v-cdLTwEE|ZX*KanSmgv93ue|;FgmTH;ud z3~5j?ND<;XX~<0M9f8QK#V8L#Ukf>vz!1dhDE}aG@#Z>D%;10Nue|>CY);-y9{bMw zkHHG=TmQYS?ehBH1exGz{r`xcb^e2Wzk*rzSGVoy_vind^-mZY=Lf_6lY`FOf4n;G z-TybM^8a>k>uLS}kl$XMEQCSUOqa~zGV;4|S_AZDAX75}P+yzL_W7xBj<# zn-%%LzuDV5_FraEORR+b)7k&YSN%oRa9B0NFYyi!_ zVpV+OatIthKu;o^je(!raAJZwfAt9bwLfS5*TX$OLLGOn|Lv{a?IrzxZ|iCQ{~^Ct zOB@p321m?s(r6htH&etw%opb0Z{%DRZ#YE` z@U9S^^YX81K~L>h?5e&OO0A*T3+Ec+3|0o~8jq7!OQdQt5MK=e4Sgo;x`wa`=m)~) zeaE|oBM#~uRD{UnbRhbD^wDFWiGI&ZU-lY8v|2Xxw5-D6(PNJ0M~P~U@bT6}Fe!ko zMWDr6C^MzAwYE^9Rs<0fi#0cm_8Jvu$xx5zkZYBC(>(B17c0=+R+1_&$tgPs-NTY# z3OLApZdFv%X@R$AQMn@_(^M{CPpeLa4pW1p8V!KyTt+!z_Mvs&UqV^vRQh!%RE1qU zUK0C7niC8`V#vZI4Q^znJTur`)7zFv+=&Y}?wYu!JLIvG<_ZQ4T`bT)gZxNhjGiED zTo=%jd}WEy;wVEr3oTv`FMh$5`DS=>Bz!f>3GgzA5CFzkBZn4u#VPRH9#*WoEhyfL zBX1=2%(LY-1l3`*ny`v4vXs415}AUO5H1Qv=E(5I8?o?JTt}uimqBDbv5Z?6ytn}? zttDI!#zCb@Sn&EDR`cl0<}uj~FTpa)alk^q zjoo}Z!};@Bj_vUh+qsYN5YH&h@FM3O0~ydu2Hr-AF{U0dX?ZZ(g7BdY5HfPA6<`Te z7{}Z>jx6B@8^PEN7+m1=kLd@*4bmc9xUsY2G3OCslnQ7?;A^oCgzT9x8b)B=;9U+5opZS;;sp%)!`EY5aNO%C zh-k}!#!w|m>@N*=3EF}j+V2Fv;OSc_h8db=i=szp30R~Byd%FtKMz&PDHoa`0e)mfm}Ra6WsiT0mT%1E<(zI*i>!r(3SDZ~0}s8!B+BnH8yt;>!!|(eWT-;}eagWmjb{l+ytE%13fZ4$Q98YMV2|>;}xEmjI zE)pY1ILZg+Hm-WMtCQcb%?nrkl(;rR0|1|l>W7gxy$29w97ki7WV%ZzS6v{A9Xr0) zNOS_drVYcDmi9OoB6^GhC%7n)3SK#{WY=t@BRQ@qt(359d0yl=1>mKtocO8nNaefc z5*B4Ob(xh^S0yeJanu&^WYXqn$19m6;lf^#3K_kl>VGxJ2-}BugHIfwf$_I>kFvP>ry|j8BNG1@AY<(Mah0Z_z4e7 zn#Obb^zvW;@_7MK^HZ6;4Ix(NN5eACooLiO#`XsCntL#XF%Zq{CfE;H9EOdR16pBD z1}HQ+r64;K^Tm~OkC#OOQ41{7alt2q0oalqm0*v9zAP9<-6c#I3%( zMavoWkjBs1L!l8B^{LLhG>^nOoT8#QQ-9xRxapP*G=c$!8C3g>zTWG&ALofm@zU|h z4kq{p9xVd>XZMwDCtfyF2ruTDu_$%6u{;=q!a=^$KfCJxy5{1(x{T4YG!%B?kqGAO z4ItUV%jOozx&!kB9-ScXFGW(rGqQYDQ%^!otH9zKEf{gr$e)9-hMzkjx<6{l-$hY_ z#JfaM6UhB1G!ojdgp)N8b!s#O&4MNj&1noIaNlRGM=fF>yzXRH8_?)ysXT~o@S?~G z@$=~^-4G-v3`BDz(-aBFKgvlk% zDx4tmM?$7k{U;IK9rz=xZI~jUM(K3VMkq}FV}QXc=xSLiBQ3`!Ec=Hw_q_~Zs#ND{ zIcKHQ8I}%KXLq|VUmFy*I%OM9%=)}VNuJfa_F&dzw$g=4cMGtDB`V&BBcL-7Vk;sL zia%BAmGadsk*afCckZ-a<;+iX&;O^Lb=N@MmvtvAQ7i4T3Bs5%fr}iqXief6HlVmIF5X6k$)qld;$nW z6)13}X)4DG4J;2*vRSnVv>w>9?j|w)U$^VW4SdjjJzFHu zWB4;Lx`LxKRCL)K)C2J*j+7<9&i2-3qiko+oYc{*TQli@_lkT3=3ci2C3?^vXmDX`H7y))>(AJ;Qbr3m3s`~HuUWZ1mkV5`tXJ!Z z1L&i)evrmQ*fDiEMW81`VYjVP#3cgd>105?ID&Qah(s|ZqsZfpxB&%qLZ?|rXNE@5 zSW&c=P%`f!zynn2h(X{vnxh((`Z$bp-(&aJ6b`HER)%4>l`5U3!8r4{m`R*sr%a!W zI=Q}Y7ubh$1g;^IyC?d+4*d^g4>l3YG+P4OS=SrD?cfiv@i@;YuK~}NI1+j`7jrd7 zjObQNye=@YXrDF7U`Pe-lWB&*ffnq^TG*dhh}X#Eu_kfd_0>&R&*qfJVt4!GnsilY z)yQV3Kls3vAqY^d%O3CgVK7Q%y0C|91{LgX_jfl8HUk#uG+@f;J_ zN9tB&^JHQyU4&6Fk8jwzfQEu%cd6b2jG~TfG+>G6&!3B8yg=j>C769)FAPLt zAoYW28mGz!YFsZH;cTY@Ev_m5g=ra9N(QlVBkQy{%h`n32TG6=RI4S9CPu^EE;&q* z>i0k0TR-vpY+Y@7&ALyTO#|s?O)th)iR&DC7(`Pp3nW$2udh3P-5Vgqa5YgLhHVkW zOI4wHR|J~$I9g?GF`mV-u4cowwW_ou#QHSZgi*pANSI+rroN!lg%F3oQhFru53;^y za{LipOdR<3Iy(x;=7kHuTt;%L{2KDZa>Im9Y;JdmfA((AIujTl0$US!N3iG5|W)1E8ozfXfqokqBk z>ENGgAYL2>HPQu4Pr|vn$Woc9X*Gcp>}H5{eSvwbeZ&}NI78>rvV!qpVmk2pG!D_` z?VxIRThu5|Q>y}{8|7vWtmJtfqj%0*LhE9eDyIV{v=Ns)VVy+B=y>9*0y?kl;^qwHhuMEYz}o%_gIcLUF1urL8yiol(5N2 z-OF-#68p#Q8%gXBmtW%6ly)xW%)I1nG!{CKS`*6g=yzZ-s4sqh}gN9*cw zn5bLU6A%4P9*WHE`&g(&%U3qpijx5WK``e*MFPFozk3|dlN_C#Suuq!kM@e^@{3$CP?v+)7Pu05DQ6{*+ySLp zHQt19{3z$nv?X`MX%sHR5#{5wsWb@`DpSU_FN$xMl@hERwkIAgdmj6g>=NDVI^-A4 zr)bJNNAct3oSB@Vhe|7@%`CjxOzK!V#1soCEjiDiDg}T4uuo{wzS+6!$_zKRT#&s>tHVU<@n>*B~MzEA%BGjiO48{REyrWaTHvAqM zkLs6b>gXOme_@g_UCv^kvmsJ;1jb5r26#;u9{&~5hBhP1O3sh8|C%*Quk(zoY{6XVwzyU7-i_~=ly(FJ%b?`c3nnr(#5NW~2VE@L zTT%)bS8-MqLyIiU(7T_LaVf1zg}I@NchJb-&LXh+NZAnzQC+Y6j>Do4O6+;{SXwTM z6zBc7;GSuW-dQP{;Qi9O2cHsHjV8Q?KJdink7_Z%@7jzd3$mNOe94kRf` zW-7;7pvS3%&zew2v-TA>biVArcup@=Z9}#lCvg<;s;aT03dgKg5_OFqJ>|3(b1H?l z26%?HsvF?ZTRo1(64dTX8Vnd3W;=W2n097+aGn!K8!4ke{cbfVefA(d=t`Hjah$4H zcMD2;hF!GCx6TYTIb&1WA9rM+UtdpXhs@2U2{R*T7ucsT7dQ7+i!={`KXbZy>C2?Z zHjhpuXy&)kYA0?g#v&^(1!CrmOodxBhmJewDm;OT$TxU@V6K3(%22^H{3V|%$D#dL zPK%e2ou&hkOs^b=^{Y@$KXdu7nx8)x0i(^D0MDlAh%a9}UWLssqARv`dp+?JdehsA zCcvQkbN5zgcMnJU*6K#3cq)W_JfPSG`+RA2+V2RqoYM-s-uEyWAJ&Jeyd^1=ceH@- zfe!yc^Z3h`CcHh1^K@NrbSmx7=W_`%vQwDboUbjI#+8p8-xz{wdhA)YX@gOFofnl5 z_?JsTos~H9r_j!&(9V6zitRW-u)8m>in}@6Z5roFM=G=fb!7&s1w}?v1bpGLyqiFJ z(uC$r0#Z|Og(t;c2+dBWSH5*cr&~h&Pp)8cG%pU05B4sF-|;1$fyM9w$=()w!;1s) z{+EL{P{3GbGK{C4nbfwhD;topZa;5}wY}3fZw~e@*EYmuyW8Wj)Cqsqaz=oP2VVVll1e=X9Xdar1oV2^bS$EURbUdmsbhQ zD@0}?mPPpgC2jjt@qd=^pE{HcrSpf5qerUa9{jiM-R9kA#Y&F~PI-CLFUCPl-_b3}-7AV~uh|f5eo68}7_L)0dj#_YRf)dc|;68huw;_>2q!OZV@d z0C$b+{bO4u<_R_cL-v)qUV5Y>0GbKbZ5$xOJB>xL(v3K_N zkqFUS^@1V9yh4XY-M?2Y8haRi$g-)M-Of@f4%YiBlR>EOXICr-ty%)3I1ZJJR **Tip**: List all releases using `helm list` - -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: - -```console -$ helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Parameters - -The following tables lists the configurable parameters of the PostgreSQL chart and their default values. - -| Parameter | Description | Default | -|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| -| `global.imageRegistry` | Global Docker Image registry | `nil` | -| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | -| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | -| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | -| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | -| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | -| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | -| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | -| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | -| `image.registry` | PostgreSQL Image registry | `docker.io` | -| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | -| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | -| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | -| `image.debug` | Specify if debug values should be set | `false` | -| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | -| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | -| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | -| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | -| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | -| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | -| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | -| `ldap.enabled` | Enable LDAP support | `false` | -| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | -| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | -| `ldap.server` | IP address or name of the LDAP server. | `nil` | -| `ldap.port` | Port number on the LDAP server to connect to | `nil` | -| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | -| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | -| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | -| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | -| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | -| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | -| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | -| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | -| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | -| `replication.enabled` | Enable replication | `false` | -| `replication.user` | Replication user | `repl_user` | -| `replication.password` | Replication user password | `repl_password` | -| `replication.slaveReplicas` | Number of slaves replicas | `1` | -| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | -| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | -| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | -| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | -| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) | _random 10 character alphanumeric string_ | -| `postgresqlUsername` | PostgreSQL admin user | `postgres` | -| `postgresqlPassword` | PostgreSQL admin password | _random 10 character alphanumeric string_ | -| `postgresqlDatabase` | PostgreSQL database | `nil` | -| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | -| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | -| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | -| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | -| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | -| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | -| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | -| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | -| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | -| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | -| `initdbScripts` | Dictionary of initdb scripts | `nil` | -| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | -| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | -| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | -| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | -| `service.type` | Kubernetes Service type | `ClusterIP` | -| `service.port` | PostgreSQL port | `5432` | -| `service.nodePort` | Kubernetes Service nodePort | `nil` | -| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | -| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | -| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | -| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | -| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | -| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | -| `persistence.enabled` | Enable persistence using PVC | `true` | -| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | -| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | -| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | -| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | -| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | -| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | -| `persistence.annotations` | Annotations for the PVC | `{}` | -| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | -| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | -| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | -| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | -| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | -| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | -| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | -| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | -| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | -| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | -| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | -| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | -| `master.sidecars` | Add additional containers to the pod | `[]` | -| `master.service.type` | Allows using a different service type for Master | `nil` | -| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | -| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | -| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | -| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | -| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | -| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | -| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | -| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | -| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | -| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | -| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | -| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | -| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | -| `slave.sidecars` | Add additional containers to the pod | `[]` | -| `slave.service.type` | Allows using a different service type for Slave | `nil` | -| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | -| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | -| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | -| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | -| `securityContext.enabled` | Enable security context | `true` | -| `securityContext.fsGroup` | Group ID for the container | `1001` | -| `securityContext.runAsUser` | User ID for the container | `1001` | -| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | -| `serviceAcccount.name` | Name of existing service account | `nil` | -| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | -| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | -| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | -| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | -| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | -| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | -| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | -| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | -| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | -| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | -| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | -| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | -| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | -| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | -| `metrics.enabled` | Start a prometheus exporter | `false` | -| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | -| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | -| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | -| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | -| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | -| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | -| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | -| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | -| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | -| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | -| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | -| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | -| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | -| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | -| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | -| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | -| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | -| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | -| `metrics.customMetrics` | Additional custom metrics | `nil` | -| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | -| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | -| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | -| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | -| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | -| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | -| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | -| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | -| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | -| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | -| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | -| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | -| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | -| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | -| `psp.create` | Create Pod Security Policy | `false` | -| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | - - -Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, - -```console -$ helm install my-release \ - --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ - bitnami/postgresql -``` - -The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. - -Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, - -```console -$ helm install my-release -f values.yaml bitnami/postgresql -``` - -> **Tip**: You can use the default [values.yaml](values.yaml) - -## Configuration and installation details - -### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) - -It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. - -Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. - -### Production configuration and horizontal scaling - -This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. - -- Enable replication: -```diff -- replication.enabled: false -+ replication.enabled: true -``` - -- Number of slaves replicas: -```diff -- replication.slaveReplicas: 1 -+ replication.slaveReplicas: 2 -``` - -- Set synchronous commit mode: -```diff -- replication.synchronousCommit: "off" -+ replication.synchronousCommit: "on" -``` - -- Number of replicas that will have synchronous replication: -```diff -- replication.numSynchronousReplicas: 0 -+ replication.numSynchronousReplicas: 1 -``` - -- Start a prometheus exporter: -```diff -- metrics.enabled: false -+ metrics.enabled: true -``` - -To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. - -### Customizing Master and Slave services in a replicated configuration - -At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. - -### Change PostgreSQL version - -To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=12.0.0` - -### postgresql.conf / pg_hba.conf files as configMap - -This helm chart also supports to customize the whole configuration file. - -Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. - -Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. - -In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. - -### Allow settings to be loaded from files other than the default `postgresql.conf` - -If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. -Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. - -Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. - -### Initialize a fresh instance - -The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. - -Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. - -In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. - -The allowed extensions are `.sh`, `.sql` and `.sql.gz`. - -### Sidecars - -If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. - -```yaml -# For the PostgreSQL master -master: - sidecars: - - name: your-image-name - image: your-image - imagePullPolicy: Always - ports: - - name: portname - containerPort: 1234 -# For the PostgreSQL replicas -slave: - sidecars: - - name: your-image-name - image: your-image - imagePullPolicy: Always - ports: - - name: portname - containerPort: 1234 -``` - -### Metrics - -The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). - -The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. - -### Use of global variables - -In more complex scenarios, we may have the following tree of dependencies - -``` - +--------------+ - | | - +------------+ Chart 1 +-----------+ - | | | | - | --------+------+ | - | | | - | | | - | | | - | | | - v v v -+-------+------+ +--------+------+ +--------+------+ -| | | | | | -| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | -| | | | | | -+--------------+ +---------------+ +---------------+ -``` - -The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: - -``` -postgresql.postgresqlPassword=testtest -subchart1.postgresql.postgresqlPassword=testtest -subchart2.postgresql.postgresqlPassword=testtest -postgresql.postgresqlDatabase=db1 -subchart1.postgresql.postgresqlDatabase=db1 -subchart2.postgresql.postgresqlDatabase=db1 -``` - -If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: - -``` -global.postgresql.postgresqlPassword=testtest -global.postgresql.postgresqlDatabase=db1 -``` - -This way, the credentials will be available in all of the subcharts. - -## Persistence - -The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. - -Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. -See the [Parameters](#parameters) section to configure the PVC or to disable persistence. - -If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. - -## NetworkPolicy - -To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. - -For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: - -```console -$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" -``` - -With NetworkPolicy enabled, traffic will be limited to just port 5432. - -For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. -This label will be displayed in the output of a successful install. - -## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image - -- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. -- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. -- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false - -### Deploy chart using Docker Official PostgreSQL Image - -From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. -Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. - -``` -image.repository=postgres -image.tag=10.6 -postgresqlDataDir=/data/pgdata -persistence.mountPath=/data/ -``` - -## Upgrade - -It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: - -```bash -$ helm upgrade my-release stable/postgresql \ - --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ - --set replication.password=[REPLICATION_PASSWORD] -``` - -> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. - -## 8.0.0 - -Prefixes the port names with their protocols to comply with Istio conventions. - -If you depend on the port names in your setup, make sure to update them to reflect this change. - -## 7.1.0 - -Adds support for LDAP configuration. - -## 7.0.0 - -Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. - -In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. - -This major version bump signifies this change. - -## 6.5.7 - -In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: - -- protobuf -- protobuf-c -- json-c -- geos -- proj - -## 5.0.0 - -In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). - -For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: - -```console -Welcome to the Bitnami postgresql container -Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql -Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues -Send us your feedback at containers@bitnami.com - -INFO ==> ** Starting PostgreSQL setup ** -NFO ==> Validating settings in POSTGRESQL_* env vars.. -INFO ==> Initializing PostgreSQL database... -INFO ==> postgresql.conf file not detected. Generating it... -INFO ==> pg_hba.conf file not detected. Generating it... -INFO ==> Deploying PostgreSQL with persisted data... -INFO ==> Configuring replication parameters -INFO ==> Loading custom scripts... -INFO ==> Enabling remote connections -INFO ==> Stopping PostgreSQL... -INFO ==> ** PostgreSQL setup finished! ** - -INFO ==> ** Starting PostgreSQL ** - [1] FATAL: database files are incompatible with server - [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. -``` - -In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. - -### 4.0.0 - -This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. - -IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error - -``` -The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development -``` - -### 3.0.0 - -This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. -It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. - -#### Breaking changes - -- `affinty` has been renamed to `master.affinity` and `slave.affinity`. -- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. -- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. - -### 2.0.0 - -In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: - - - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running - -```console -$ kubectl get svc -``` - -- Install (not upgrade) the new version - -```console -$ helm repo update -$ helm install my-release bitnami/postgresql -``` - -- Connect to the new pod (you can obtain the name by running `kubectl get pods`): - -```console -$ kubectl exec -it NAME bash -``` - -- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: - -```console -$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql -``` - -After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). -This operation could take some time depending on the database size. - -- Once you have the backup file, you can restore it with a command like the one below: - -```console -$ psql -U postgres DATABASE_NAME < /tmp/backup.sql -``` - -In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). - -If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. - -```console -$ psql -U postgres -postgres=# drop database DATABASE_NAME; -postgres=# create database DATABASE_NAME; -postgres=# create user USER_NAME; -postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; -postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; -postgres=# alter database DATABASE_NAME owner to USER_NAME; -``` diff --git a/chart/charts/postgresql/ci/commonAnnotations.yaml b/chart/charts/postgresql/ci/commonAnnotations.yaml deleted file mode 100755 index a936299..0000000 --- a/chart/charts/postgresql/ci/commonAnnotations.yaml +++ /dev/null @@ -1,4 +0,0 @@ -commonAnnotations: - helm.sh/hook: "pre-install, pre-upgrade" - helm.sh/hook-weight: "-1" - diff --git a/chart/charts/postgresql/ci/default-values.yaml b/chart/charts/postgresql/ci/default-values.yaml deleted file mode 100755 index fc2ba60..0000000 --- a/chart/charts/postgresql/ci/default-values.yaml +++ /dev/null @@ -1 +0,0 @@ -# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/chart/charts/postgresql/ci/shmvolume-disabled-values.yaml b/chart/charts/postgresql/ci/shmvolume-disabled-values.yaml deleted file mode 100755 index 347d3b4..0000000 --- a/chart/charts/postgresql/ci/shmvolume-disabled-values.yaml +++ /dev/null @@ -1,2 +0,0 @@ -shmVolume: - enabled: false diff --git a/chart/charts/postgresql/files/README.md b/chart/charts/postgresql/files/README.md deleted file mode 100755 index 1813a2f..0000000 --- a/chart/charts/postgresql/files/README.md +++ /dev/null @@ -1 +0,0 @@ -Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/chart/charts/postgresql/files/conf.d/README.md b/chart/charts/postgresql/files/conf.d/README.md deleted file mode 100755 index 184c187..0000000 --- a/chart/charts/postgresql/files/conf.d/README.md +++ /dev/null @@ -1,4 +0,0 @@ -If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. -These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. - -More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/chart/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/chart/charts/postgresql/files/docker-entrypoint-initdb.d/README.md deleted file mode 100755 index cba3809..0000000 --- a/chart/charts/postgresql/files/docker-entrypoint-initdb.d/README.md +++ /dev/null @@ -1,3 +0,0 @@ -You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. - -More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/chart/charts/postgresql/templates/NOTES.txt b/chart/charts/postgresql/templates/NOTES.txt deleted file mode 100755 index 3b5e6c6..0000000 --- a/chart/charts/postgresql/templates/NOTES.txt +++ /dev/null @@ -1,60 +0,0 @@ -** Please be patient while the chart is being deployed ** - -PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: - - {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection -{{- if .Values.replication.enabled }} - {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection -{{- end }} - -{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} - -To get the password for "postgres" run: - - export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) -{{- end }} - -To get the password for "{{ template "postgresql.username" . }}" run: - - export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) - -To connect to your database run the following command: - - kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} - --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} - -{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} -Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. -{{- end }} - -To connect to your database from outside the cluster execute the following commands: - -{{- if contains "NodePort" .Values.service.type }} - - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) - {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} - -{{- else if contains "LoadBalancer" .Values.service.type }} - - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} - -{{- else if contains "ClusterIP" .Values.service.type }} - - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & - {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} - -{{- end }} - -{{- include "postgresql.validateValues" . -}} - -{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} - -WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. -+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ - -{{- end }} diff --git a/chart/charts/postgresql/templates/_helpers.tpl b/chart/charts/postgresql/templates/_helpers.tpl deleted file mode 100755 index e13caad..0000000 --- a/chart/charts/postgresql/templates/_helpers.tpl +++ /dev/null @@ -1,452 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "postgresql.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "postgresql.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "postgresql.master.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} -{{- if .Values.replication.enabled -}} -{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for networkpolicy. -*/}} -{{- define "postgresql.networkPolicy.apiVersion" -}} -{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} -"extensions/v1beta1" -{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} -"networking.k8s.io/v1" -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "postgresql.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Return the proper PostgreSQL image name -*/}} -{{- define "postgresql.image" -}} -{{- $registryName := .Values.image.registry -}} -{{- $repositoryName := .Values.image.repository -}} -{{- $tag := .Values.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return PostgreSQL postgres user password -*/}} -{{- define "postgresql.postgres.password" -}} -{{- if .Values.global.postgresql.postgresqlPostgresPassword }} - {{- .Values.global.postgresql.postgresqlPostgresPassword -}} -{{- else if .Values.postgresqlPostgresPassword -}} - {{- .Values.postgresqlPostgresPassword -}} -{{- else -}} - {{- randAlphaNum 10 -}} -{{- end -}} -{{- end -}} - -{{/* -Return PostgreSQL password -*/}} -{{- define "postgresql.password" -}} -{{- if .Values.global.postgresql.postgresqlPassword }} - {{- .Values.global.postgresql.postgresqlPassword -}} -{{- else if .Values.postgresqlPassword -}} - {{- .Values.postgresqlPassword -}} -{{- else -}} - {{- randAlphaNum 10 -}} -{{- end -}} -{{- end -}} - -{{/* -Return PostgreSQL replication password -*/}} -{{- define "postgresql.replication.password" -}} -{{- if .Values.global.postgresql.replicationPassword }} - {{- .Values.global.postgresql.replicationPassword -}} -{{- else if .Values.replication.password -}} - {{- .Values.replication.password -}} -{{- else -}} - {{- randAlphaNum 10 -}} -{{- end -}} -{{- end -}} - -{{/* -Return PostgreSQL username -*/}} -{{- define "postgresql.username" -}} -{{- if .Values.global.postgresql.postgresqlUsername }} - {{- .Values.global.postgresql.postgresqlUsername -}} -{{- else -}} - {{- .Values.postgresqlUsername -}} -{{- end -}} -{{- end -}} - - -{{/* -Return PostgreSQL replication username -*/}} -{{- define "postgresql.replication.username" -}} -{{- if .Values.global.postgresql.replicationUser }} - {{- .Values.global.postgresql.replicationUser -}} -{{- else -}} - {{- .Values.replication.user -}} -{{- end -}} -{{- end -}} - -{{/* -Return PostgreSQL port -*/}} -{{- define "postgresql.port" -}} -{{- if .Values.global.postgresql.servicePort }} - {{- .Values.global.postgresql.servicePort -}} -{{- else -}} - {{- .Values.service.port -}} -{{- end -}} -{{- end -}} - -{{/* -Return PostgreSQL created database -*/}} -{{- define "postgresql.database" -}} -{{- if .Values.global.postgresql.postgresqlDatabase }} - {{- .Values.global.postgresql.postgresqlDatabase -}} -{{- else if .Values.postgresqlDatabase -}} - {{- .Values.postgresqlDatabase -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper image name to change the volume permissions -*/}} -{{- define "postgresql.volumePermissions.image" -}} -{{- $registryName := .Values.volumePermissions.image.registry -}} -{{- $repositoryName := .Values.volumePermissions.image.repository -}} -{{- $tag := .Values.volumePermissions.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper PostgreSQL metrics image name -*/}} -{{- define "postgresql.metrics.image" -}} -{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} -{{- $repositoryName := .Values.metrics.image.repository -}} -{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Get the password secret. -*/}} -{{- define "postgresql.secretName" -}} -{{- if .Values.global.postgresql.existingSecret }} - {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} -{{- else if .Values.existingSecret -}} - {{- printf "%s" (tpl .Values.existingSecret $) -}} -{{- else -}} - {{- printf "%s" (include "postgresql.fullname" .) -}} -{{- end -}} -{{- end -}} - -{{/* -Return true if a secret object should be created -*/}} -{{- define "postgresql.createSecret" -}} -{{- if .Values.global.postgresql.existingSecret }} -{{- else if .Values.existingSecret -}} -{{- else -}} - {{- true -}} -{{- end -}} -{{- end -}} - -{{/* -Get the configuration ConfigMap name. -*/}} -{{- define "postgresql.configurationCM" -}} -{{- if .Values.configurationConfigMap -}} -{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} -{{- else -}} -{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} -{{- end -}} -{{- end -}} - -{{/* -Get the extended configuration ConfigMap name. -*/}} -{{- define "postgresql.extendedConfigurationCM" -}} -{{- if .Values.extendedConfConfigMap -}} -{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} -{{- else -}} -{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} -{{- end -}} -{{- end -}} - -{{/* -Return true if a configmap should be mounted with PostgreSQL configuration -*/}} -{{- define "postgresql.mountConfigurationCM" -}} -{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} - {{- true -}} -{{- end -}} -{{- end -}} - -{{/* -Get the initialization scripts ConfigMap name. -*/}} -{{- define "postgresql.initdbScriptsCM" -}} -{{- if .Values.initdbScriptsConfigMap -}} -{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} -{{- else -}} -{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} -{{- end -}} -{{- end -}} - -{{/* -Get the initialization scripts Secret name. -*/}} -{{- define "postgresql.initdbScriptsSecret" -}} -{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} -{{- end -}} - -{{/* -Get the metrics ConfigMap name. -*/}} -{{- define "postgresql.metricsCM" -}} -{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names -*/}} -{{- define "postgresql.imagePullSecrets" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -Also, we can not use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} -{{- if .Values.global.imagePullSecrets }} -imagePullSecrets: -{{- range .Values.global.imagePullSecrets }} - - name: {{ . }} -{{- end }} -{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.metrics.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.volumePermissions.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- end -}} -{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.metrics.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.volumePermissions.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- end -}} -{{- end -}} - -{{/* -Get the readiness probe command -*/}} -{{- define "postgresql.readinessProbeCommand" -}} -- | -{{- if (include "postgresql.database" .) }} - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} -{{- else }} - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} -{{- end }} -{{- if contains "bitnami/" .Values.image.repository }} - [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] -{{- end -}} -{{- end -}} - -{{/* -Return the proper Storage Class -*/}} -{{- define "postgresql.storageClass" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -*/}} -{{- if .Values.global -}} - {{- if .Values.global.storageClass -}} - {{- if (eq "-" .Values.global.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.global.storageClass -}} - {{- end -}} - {{- else -}} - {{- if .Values.persistence.storageClass -}} - {{- if (eq "-" .Values.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} - {{- end -}} - {{- end -}} - {{- end -}} -{{- else -}} - {{- if .Values.persistence.storageClass -}} - {{- if (eq "-" .Values.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} - {{- end -}} - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Renders a value that contains template. -Usage: -{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} -*/}} -{{- define "postgresql.tplValue" -}} - {{- if typeIs "string" .value }} - {{- tpl .value .context }} - {{- else }} - {{- tpl (.value | toYaml) .context }} - {{- end }} -{{- end -}} - -{{/* -Return the appropriate apiVersion for statefulset. -*/}} -{{- define "postgresql.statefulset.apiVersion" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "apps/v1beta2" -}} -{{- else -}} -{{- print "apps/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Compile all warnings into a single message, and call fail. -*/}} -{{- define "postgresql.validateValues" -}} -{{- $messages := list -}} -{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} -{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} -{{- $messages := without $messages "" -}} -{{- $message := join "\n" $messages -}} - -{{- if $message -}} -{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} -{{- end -}} -{{- end -}} - -{{/* -Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap -*/}} -{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} -{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} -postgresql: ldap.url, ldap.server - You cannot set both `ldap.url` and `ldap.server` at the same time. - Please provide a unique way to configure LDAP. - More info at https://www.postgresql.org/docs/current/auth-ldap.html -{{- end -}} -{{- end -}} - -{{/* -Validate values of Postgresql - If PSP is enabled RBAC should be enabled too -*/}} -{{- define "postgresql.validateValues.psp" -}} -{{- if and .Values.psp.create (not .Values.rbac.create) }} -postgresql: psp.create, rbac.create - RBAC should be enabled if PSP is enabled in order for PSP to work. - More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for podsecuritypolicy. -*/}} -{{- define "podsecuritypolicy.apiVersion" -}} -{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "extensions/v1beta1" -}} -{{- else -}} -{{- print "policy/v1beta1" -}} -{{- end -}} -{{- end -}} \ No newline at end of file diff --git a/chart/charts/postgresql/templates/configmap.yaml b/chart/charts/postgresql/templates/configmap.yaml deleted file mode 100755 index 18ca98e..0000000 --- a/chart/charts/postgresql/templates/configmap.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "postgresql.fullname" . }}-configuration - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -data: -{{- if (.Files.Glob "files/postgresql.conf") }} -{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} -{{- else if .Values.postgresqlConfiguration }} - postgresql.conf: | -{{- range $key, $value := default dict .Values.postgresqlConfiguration }} - {{ $key | snakecase }}={{ $value }} -{{- end }} -{{- end }} -{{- if (.Files.Glob "files/pg_hba.conf") }} -{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} -{{- else if .Values.pgHbaConfiguration }} - pg_hba.conf: | -{{ .Values.pgHbaConfiguration | indent 4 }} -{{- end }} -{{ end }} diff --git a/chart/charts/postgresql/templates/extended-config-configmap.yaml b/chart/charts/postgresql/templates/extended-config-configmap.yaml deleted file mode 100755 index 04fc917..0000000 --- a/chart/charts/postgresql/templates/extended-config-configmap.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "postgresql.fullname" . }}-extended-configuration - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -data: -{{- with .Files.Glob "files/conf.d/*.conf" }} -{{ .AsConfig | indent 2 }} -{{- end }} -{{ with .Values.postgresqlExtendedConf }} - override.conf: | -{{- range $key, $value := . }} - {{ $key | snakecase }}={{ $value }} -{{- end }} -{{- end }} -{{- end }} diff --git a/chart/charts/postgresql/templates/initialization-configmap.yaml b/chart/charts/postgresql/templates/initialization-configmap.yaml deleted file mode 100755 index 3c489bd..0000000 --- a/chart/charts/postgresql/templates/initialization-configmap.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "postgresql.fullname" . }}-init-scripts - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} -binaryData: -{{- range $path, $bytes := . }} - {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} -{{- end }} -{{- end }} -data: -{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} -{{ .AsConfig | indent 2 }} -{{- end }} -{{- with .Values.initdbScripts }} -{{ toYaml . | indent 2 }} -{{- end }} -{{- end }} diff --git a/chart/charts/postgresql/templates/metrics-configmap.yaml b/chart/charts/postgresql/templates/metrics-configmap.yaml deleted file mode 100755 index c812292..0000000 --- a/chart/charts/postgresql/templates/metrics-configmap.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "postgresql.metricsCM" . }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -data: - custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} -{{- end }} diff --git a/chart/charts/postgresql/templates/metrics-svc.yaml b/chart/charts/postgresql/templates/metrics-svc.yaml deleted file mode 100755 index 69f1a8d..0000000 --- a/chart/charts/postgresql/templates/metrics-svc.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- if .Values.metrics.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "postgresql.fullname" . }}-metrics - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - annotations: - {{- if .Values.commonAnnotations }} - {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - {{- toYaml .Values.metrics.service.annotations | nindent 4 }} -spec: - type: {{ .Values.metrics.service.type }} - {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} - {{- end }} - ports: - - name: http-metrics - port: 9187 - targetPort: http-metrics - selector: - app: {{ template "postgresql.name" . }} - release: {{ .Release.Name }} - role: master -{{- end }} diff --git a/chart/charts/postgresql/templates/networkpolicy.yaml b/chart/charts/postgresql/templates/networkpolicy.yaml deleted file mode 100755 index 340cb58..0000000 --- a/chart/charts/postgresql/templates/networkpolicy.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{- if .Values.networkPolicy.enabled }} -kind: NetworkPolicy -apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} -metadata: - name: {{ template "postgresql.fullname" . }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - podSelector: - matchLabels: - app: {{ template "postgresql.name" . }} - release: {{ .Release.Name | quote }} - ingress: - # Allow inbound connections - - ports: - - port: {{ template "postgresql.port" . }} - {{- if not .Values.networkPolicy.allowExternal }} - from: - - podSelector: - matchLabels: - {{ template "postgresql.fullname" . }}-client: "true" - {{- if .Values.networkPolicy.explicitNamespacesSelector }} - namespaceSelector: -{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} - {{- end }} - - podSelector: - matchLabels: - app: {{ template "postgresql.name" . }} - release: {{ .Release.Name | quote }} - role: slave - {{- end }} - # Allow prometheus scrapes - - ports: - - port: 9187 -{{- end }} diff --git a/chart/charts/postgresql/templates/podsecuritypolicy.yaml b/chart/charts/postgresql/templates/podsecuritypolicy.yaml deleted file mode 100755 index 6b15374..0000000 --- a/chart/charts/postgresql/templates/podsecuritypolicy.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.psp.create }} -apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} -kind: PodSecurityPolicy -metadata: - name: {{ template "postgresql.fullname" . }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - privileged: false - volumes: - - 'configMap' - - 'secret' - - 'persistentVolumeClaim' - - 'emptyDir' - - 'projected' - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false -{{- end }} diff --git a/chart/charts/postgresql/templates/prometheusrule.yaml b/chart/charts/postgresql/templates/prometheusrule.yaml deleted file mode 100755 index 917b3ea..0000000 --- a/chart/charts/postgresql/templates/prometheusrule.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ template "postgresql.fullname" . }} -{{- with .Values.metrics.prometheusRule.namespace }} - namespace: {{ . }} -{{- end }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- with .Values.metrics.prometheusRule.additionalLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: -{{- with .Values.metrics.prometheusRule.rules }} - groups: - - name: {{ template "postgresql.name" $ }} - rules: {{ tpl (toYaml .) $ | nindent 8 }} -{{- end }} -{{- end }} diff --git a/chart/charts/postgresql/templates/role.yaml b/chart/charts/postgresql/templates/role.yaml deleted file mode 100755 index c99842a..0000000 --- a/chart/charts/postgresql/templates/role.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.rbac.create }} -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "postgresql.fullname" . }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -rules: - {{- if .Values.psp.create }} - - apiGroups: ["extensions"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: - - {{ template "postgresql.fullname" . }} - {{- end }} -{{- end }} diff --git a/chart/charts/postgresql/templates/rolebinding.yaml b/chart/charts/postgresql/templates/rolebinding.yaml deleted file mode 100755 index b61bee2..0000000 --- a/chart/charts/postgresql/templates/rolebinding.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.rbac.create }} -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "postgresql.fullname" . }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -roleRef: - kind: Role - name: {{ template "postgresql.fullname" . }} - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/chart/charts/postgresql/templates/secrets.yaml b/chart/charts/postgresql/templates/secrets.yaml deleted file mode 100755 index 12a2b7c..0000000 --- a/chart/charts/postgresql/templates/secrets.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if (include "postgresql.createSecret" .) }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "postgresql.fullname" . }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -type: Opaque -data: - {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} - postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} - {{- end }} - postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} - {{- if .Values.replication.enabled }} - postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} - {{- end }} - {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} - postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} - {{- end }} -{{- end -}} diff --git a/chart/charts/postgresql/templates/serviceaccount.yaml b/chart/charts/postgresql/templates/serviceaccount.yaml deleted file mode 100755 index 7583136..0000000 --- a/chart/charts/postgresql/templates/serviceaccount.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - name: {{ template "postgresql.fullname" . }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -{{- end }} diff --git a/chart/charts/postgresql/templates/servicemonitor.yaml b/chart/charts/postgresql/templates/servicemonitor.yaml deleted file mode 100755 index ec7df64..0000000 --- a/chart/charts/postgresql/templates/servicemonitor.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "postgresql.fullname" . }} - {{- if .Values.metrics.serviceMonitor.namespace }} - namespace: {{ .Values.metrics.serviceMonitor.namespace }} - {{- end }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- if .Values.metrics.serviceMonitor.additionalLabels }} - {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - -spec: - endpoints: - - port: http-metrics - {{- if .Values.metrics.serviceMonitor.interval }} - interval: {{ .Values.metrics.serviceMonitor.interval }} - {{- end }} - {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} - {{- end }} - namespaceSelector: - matchNames: - - {{ .Release.Namespace }} - selector: - matchLabels: - app: {{ template "postgresql.name" . }} - release: {{ .Release.Name }} -{{- end }} diff --git a/chart/charts/postgresql/templates/statefulset-slaves.yaml b/chart/charts/postgresql/templates/statefulset-slaves.yaml deleted file mode 100755 index 179841f..0000000 --- a/chart/charts/postgresql/templates/statefulset-slaves.yaml +++ /dev/null @@ -1,302 +0,0 @@ -{{- if .Values.replication.enabled }} -apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} -kind: StatefulSet -metadata: - name: "{{ template "postgresql.fullname" . }}-slave" - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} -{{- with .Values.slave.labels }} -{{ toYaml . | indent 4 }} -{{- end }} - annotations: - {{- if .Values.commonAnnotations }} - {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - {{- with .Values.slave.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - serviceName: {{ template "postgresql.fullname" . }}-headless - replicas: {{ .Values.replication.slaveReplicas }} - selector: - matchLabels: - app: {{ template "postgresql.name" . }} - release: {{ .Release.Name | quote }} - role: slave - template: - metadata: - name: {{ template "postgresql.fullname" . }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - role: slave -{{- with .Values.slave.podLabels }} -{{ toYaml . | indent 8 }} -{{- end }} -{{- with .Values.slave.podAnnotations }} - annotations: -{{ toYaml . | indent 8 }} -{{- end }} - spec: - {{- if .Values.schedulerName }} - schedulerName: "{{ .Values.schedulerName }}" - {{- end }} -{{- include "postgresql.imagePullSecrets" . | indent 6 }} - {{- if .Values.slave.nodeSelector }} - nodeSelector: -{{ toYaml .Values.slave.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.slave.affinity }} - affinity: -{{ toYaml .Values.slave.affinity | indent 8 }} - {{- end }} - {{- if .Values.slave.tolerations }} - tolerations: -{{ toYaml .Values.slave.tolerations | indent 8 }} - {{- end }} - {{- if .Values.terminationGracePeriodSeconds }} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} - {{- end }} - {{- if .Values.securityContext.enabled }} - securityContext: - fsGroup: {{ .Values.securityContext.fsGroup }} - {{- end }} - {{- if .Values.serviceAccount.enabled }} - serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} - {{- end }} - {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} - initContainers: - {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} - - name: init-chmod-data - image: {{ template "postgresql.volumePermissions.image" . }} - imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} - {{- if .Values.resources }} - resources: {{- toYaml .Values.resources | nindent 12 }} - {{- end }} - command: - - /bin/sh - - -cx - - | - {{- if .Values.persistence.enabled }} - mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} - chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} - find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ - {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} - xargs chown -R `id -u`:`id -G | cut -d " " -f2` - {{- else }} - xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} - {{- end }} - {{- end }} - {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} - chmod -R 777 /dev/shm - {{- end }} - {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} - securityContext: - {{- else }} - securityContext: - runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} - {{- end }} - volumeMounts: - {{ if .Values.persistence.enabled }} - - name: data - mountPath: {{ .Values.persistence.mountPath }} - subPath: {{ .Values.persistence.subPath }} - {{- end }} - {{- if .Values.shmVolume.enabled }} - - name: dshm - mountPath: /dev/shm - {{- end }} - {{- end }} - {{- if .Values.slave.extraInitContainers }} -{{ tpl .Values.slave.extraInitContainers . | indent 8 }} - {{- end }} - {{- end }} - {{- if .Values.slave.priorityClassName }} - priorityClassName: {{ .Values.slave.priorityClassName }} - {{- end }} - containers: - - name: {{ template "postgresql.fullname" . }} - image: {{ template "postgresql.image" . }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - {{- if .Values.resources }} - resources: {{- toYaml .Values.resources | nindent 12 }} - {{- end }} - {{- if .Values.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.securityContext.runAsUser }} - {{- end }} - env: - - name: BITNAMI_DEBUG - value: {{ ternary "true" "false" .Values.image.debug | quote }} - - name: POSTGRESQL_VOLUME_DIR - value: "{{ .Values.persistence.mountPath }}" - - name: POSTGRESQL_PORT_NUMBER - value: "{{ template "postgresql.port" . }}" - {{- if .Values.persistence.mountPath }} - - name: PGDATA - value: {{ .Values.postgresqlDataDir | quote }} - {{- end }} - - name: POSTGRES_REPLICATION_MODE - value: "slave" - - name: POSTGRES_REPLICATION_USER - value: {{ include "postgresql.replication.username" . | quote }} - {{- if .Values.usePasswordFile }} - - name: POSTGRES_REPLICATION_PASSWORD_FILE - value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" - {{- else }} - - name: POSTGRES_REPLICATION_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "postgresql.secretName" . }} - key: postgresql-replication-password - {{- end }} - - name: POSTGRES_CLUSTER_APP_NAME - value: {{ .Values.replication.applicationName }} - - name: POSTGRES_MASTER_HOST - value: {{ template "postgresql.fullname" . }} - - name: POSTGRES_MASTER_PORT_NUMBER - value: {{ include "postgresql.port" . | quote }} - {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} - {{- if .Values.usePasswordFile }} - - name: POSTGRES_POSTGRES_PASSWORD_FILE - value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" - {{- else }} - - name: POSTGRES_POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "postgresql.secretName" . }} - key: postgresql-postgres-password - {{- end }} - {{- end }} - {{- if .Values.usePasswordFile }} - - name: POSTGRES_PASSWORD_FILE - value: "/opt/bitnami/postgresql/secrets/postgresql-password" - {{- else }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "postgresql.secretName" . }} - key: postgresql-password - {{- end }} - ports: - - name: tcp-postgresql - containerPort: {{ template "postgresql.port" . }} - {{- if .Values.livenessProbe.enabled }} - livenessProbe: - exec: - command: - - /bin/sh - - -c - {{- if (include "postgresql.database" .) }} - - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} - {{- else }} - - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} - {{- end }} - initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.livenessProbe.failureThreshold }} - {{- end }} - {{- if .Values.readinessProbe.enabled }} - readinessProbe: - exec: - command: - - /bin/sh - - -c - - -e - {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} - initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.readinessProbe.failureThreshold }} - {{- end }} - volumeMounts: - {{- if .Values.usePasswordFile }} - - name: postgresql-password - mountPath: /opt/bitnami/postgresql/secrets/ - {{- end }} - {{- if .Values.shmVolume.enabled }} - - name: dshm - mountPath: /dev/shm - {{- end }} - {{- if .Values.persistence.enabled }} - - name: data - mountPath: {{ .Values.persistence.mountPath }} - subPath: {{ .Values.persistence.subPath }} - {{ end }} - {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} - - name: postgresql-extended-config - mountPath: /bitnami/postgresql/conf/conf.d/ - {{- end }} - {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} - - name: postgresql-config - mountPath: /bitnami/postgresql/conf - {{- end }} - {{- if .Values.slave.extraVolumeMounts }} - {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} - {{- end }} -{{- if .Values.slave.sidecars }} -{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} -{{- end }} - volumes: - {{- if .Values.usePasswordFile }} - - name: postgresql-password - secret: - secretName: {{ template "postgresql.secretName" . }} - {{- end }} - {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} - - name: postgresql-config - configMap: - name: {{ template "postgresql.configurationCM" . }} - {{- end }} - {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} - - name: postgresql-extended-config - configMap: - name: {{ template "postgresql.extendedConfigurationCM" . }} - {{- end }} - {{- if .Values.shmVolume.enabled }} - - name: dshm - emptyDir: - medium: Memory - sizeLimit: 1Gi - {{- end }} - {{- if not .Values.persistence.enabled }} - - name: data - emptyDir: {} - {{- end }} - {{- if .Values.slave.extraVolumes }} - {{- toYaml .Values.slave.extraVolumes | nindent 8 }} - {{- end }} - updateStrategy: - type: {{ .Values.updateStrategy.type }} - {{- if (eq "Recreate" .Values.updateStrategy.type) }} - rollingUpdate: null - {{- end }} -{{- if .Values.persistence.enabled }} - volumeClaimTemplates: - - metadata: - name: data - {{- with .Values.persistence.annotations }} - annotations: - {{- range $key, $value := . }} - {{ $key }}: {{ $value }} - {{- end }} - {{- end }} - spec: - accessModes: - {{- range .Values.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.persistence.size | quote }} - {{ include "postgresql.storageClass" . }} -{{- end }} -{{- end }} diff --git a/chart/charts/postgresql/templates/statefulset.yaml b/chart/charts/postgresql/templates/statefulset.yaml deleted file mode 100755 index 9eb1cad..0000000 --- a/chart/charts/postgresql/templates/statefulset.yaml +++ /dev/null @@ -1,457 +0,0 @@ -apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} -kind: StatefulSet -metadata: - name: {{ template "postgresql.master.fullname" . }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- with .Values.master.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - annotations: - {{- if .Values.commonAnnotations }} - {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - {{- with .Values.slave.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - serviceName: {{ template "postgresql.fullname" . }}-headless - replicas: 1 - updateStrategy: - type: {{ .Values.updateStrategy.type }} - {{- if (eq "Recreate" .Values.updateStrategy.type) }} - rollingUpdate: null - {{- end }} - selector: - matchLabels: - app: {{ template "postgresql.name" . }} - release: {{ .Release.Name | quote }} - role: master - template: - metadata: - name: {{ template "postgresql.fullname" . }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - role: master - {{- with .Values.master.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.master.podAnnotations }} - annotations: {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- if .Values.schedulerName }} - schedulerName: "{{ .Values.schedulerName }}" - {{- end }} -{{- include "postgresql.imagePullSecrets" . | indent 6 }} - {{- if .Values.master.nodeSelector }} - nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} - {{- end }} - {{- if .Values.master.affinity }} - affinity: {{- toYaml .Values.master.affinity | nindent 8 }} - {{- end }} - {{- if .Values.master.tolerations }} - tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} - {{- end }} - {{- if .Values.terminationGracePeriodSeconds }} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} - {{- end }} - {{- if .Values.securityContext.enabled }} - securityContext: - fsGroup: {{ .Values.securityContext.fsGroup }} - {{- end }} - {{- if .Values.serviceAccount.enabled }} - serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} - {{- end }} - {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} - initContainers: - {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} - - name: init-chmod-data - image: {{ template "postgresql.volumePermissions.image" . }} - imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} - {{- if .Values.resources }} - resources: {{- toYaml .Values.resources | nindent 12 }} - {{- end }} - command: - - /bin/sh - - -cx - - | - {{- if .Values.persistence.enabled }} - mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} - chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} - find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ - {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} - xargs chown -R `id -u`:`id -G | cut -d " " -f2` - {{- else }} - xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} - {{- end }} - {{- end }} - {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} - chmod -R 777 /dev/shm - {{- end }} - {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} - securityContext: - {{- else }} - securityContext: - runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} - {{- end }} - volumeMounts: - {{- if .Values.persistence.enabled }} - - name: data - mountPath: {{ .Values.persistence.mountPath }} - subPath: {{ .Values.persistence.subPath }} - {{- end }} - {{- if .Values.shmVolume.enabled }} - - name: dshm - mountPath: /dev/shm - {{- end }} - {{- end }} - {{- if .Values.master.extraInitContainers }} - {{- tpl .Values.master.extraInitContainers . | nindent 8 }} - {{- end }} - {{- end }} - {{- if .Values.master.priorityClassName }} - priorityClassName: {{ .Values.master.priorityClassName }} - {{- end }} - containers: - - name: {{ template "postgresql.fullname" . }} - image: {{ template "postgresql.image" . }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - {{- if .Values.resources }} - resources: {{- toYaml .Values.resources | nindent 12 }} - {{- end }} - {{- if .Values.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.securityContext.runAsUser }} - {{- end }} - env: - - name: BITNAMI_DEBUG - value: {{ ternary "true" "false" .Values.image.debug | quote }} - - name: POSTGRESQL_PORT_NUMBER - value: "{{ template "postgresql.port" . }}" - - name: POSTGRESQL_VOLUME_DIR - value: "{{ .Values.persistence.mountPath }}" - {{- if .Values.postgresqlInitdbArgs }} - - name: POSTGRES_INITDB_ARGS - value: {{ .Values.postgresqlInitdbArgs | quote }} - {{- end }} - {{- if .Values.postgresqlInitdbWalDir }} - - name: POSTGRES_INITDB_WALDIR - value: {{ .Values.postgresqlInitdbWalDir | quote }} - {{- end }} - {{- if .Values.initdbUser }} - - name: POSTGRESQL_INITSCRIPTS_USERNAME - value: {{ .Values.initdbUser }} - {{- end }} - {{- if .Values.initdbPassword }} - - name: POSTGRESQL_INITSCRIPTS_PASSWORD - value: {{ .Values.initdbPassword }} - {{- end }} - {{- if .Values.persistence.mountPath }} - - name: PGDATA - value: {{ .Values.postgresqlDataDir | quote }} - {{- end }} - {{- if .Values.replication.enabled }} - - name: POSTGRES_REPLICATION_MODE - value: "master" - - name: POSTGRES_REPLICATION_USER - value: {{ include "postgresql.replication.username" . | quote }} - {{- if .Values.usePasswordFile }} - - name: POSTGRES_REPLICATION_PASSWORD_FILE - value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" - {{- else }} - - name: POSTGRES_REPLICATION_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "postgresql.secretName" . }} - key: postgresql-replication-password - {{- end }} - {{- if not (eq .Values.replication.synchronousCommit "off")}} - - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE - value: {{ .Values.replication.synchronousCommit | quote }} - - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS - value: {{ .Values.replication.numSynchronousReplicas | quote }} - {{- end }} - - name: POSTGRES_CLUSTER_APP_NAME - value: {{ .Values.replication.applicationName }} - {{- end }} - {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} - {{- if .Values.usePasswordFile }} - - name: POSTGRES_POSTGRES_PASSWORD_FILE - value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" - {{- else }} - - name: POSTGRES_POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "postgresql.secretName" . }} - key: postgresql-postgres-password - {{- end }} - {{- end }} - - name: POSTGRES_USER - value: {{ include "postgresql.username" . | quote }} - {{- if .Values.usePasswordFile }} - - name: POSTGRES_PASSWORD_FILE - value: "/opt/bitnami/postgresql/secrets/postgresql-password" - {{- else }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "postgresql.secretName" . }} - key: postgresql-password - {{- end }} - {{- if (include "postgresql.database" .) }} - - name: POSTGRES_DB - value: {{ (include "postgresql.database" .) | quote }} - {{- end }} - {{- if .Values.extraEnv }} - {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} - {{- end }} - - name: POSTGRESQL_ENABLE_LDAP - value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} - {{- if .Values.ldap.enabled }} - - name: POSTGRESQL_LDAP_SERVER - value: {{ .Values.ldap.server }} - - name: POSTGRESQL_LDAP_PORT - value: {{ .Values.ldap.port | quote }} - - name: POSTGRESQL_LDAP_SCHEME - value: {{ .Values.ldap.scheme }} - {{- if .Values.ldap.tls }} - - name: POSTGRESQL_LDAP_TLS - value: "1" - {{- end}} - - name: POSTGRESQL_LDAP_PREFIX - value: {{ .Values.ldap.prefix | quote }} - - name: POSTGRESQL_LDAP_SUFFIX - value: {{ .Values.ldap.suffix | quote}} - - name: POSTGRESQL_LDAP_BASE_DN - value: {{ .Values.ldap.baseDN }} - - name: POSTGRESQL_LDAP_BIND_DN - value: {{ .Values.ldap.bindDN }} - {{- if (not (empty .Values.ldap.bind_password)) }} - - name: POSTGRESQL_LDAP_BIND_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "postgresql.secretName" . }} - key: postgresql-ldap-password - {{- end}} - - name: POSTGRESQL_LDAP_SEARCH_ATTR - value: {{ .Values.ldap.search_attr }} - - name: POSTGRESQL_LDAP_SEARCH_FILTER - value: {{ .Values.ldap.search_filter }} - - name: POSTGRESQL_LDAP_URL - value: {{ .Values.ldap.url }} - {{- end}} - {{- if .Values.extraEnvVarsCM }} - envFrom: - - configMapRef: - name: {{ tpl .Values.extraEnvVarsCM . }} - {{- end }} - ports: - - name: tcp-postgresql - containerPort: {{ template "postgresql.port" . }} - {{- if .Values.livenessProbe.enabled }} - livenessProbe: - exec: - command: - - /bin/sh - - -c - {{- if (include "postgresql.database" .) }} - - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} - {{- else }} - - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} - {{- end }} - initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.livenessProbe.failureThreshold }} - {{- end }} - {{- if .Values.readinessProbe.enabled }} - readinessProbe: - exec: - command: - - /bin/sh - - -c - - -e - {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} - initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.readinessProbe.failureThreshold }} - {{- end }} - volumeMounts: - {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} - - name: custom-init-scripts - mountPath: /docker-entrypoint-initdb.d/ - {{- end }} - {{- if .Values.initdbScriptsSecret }} - - name: custom-init-scripts-secret - mountPath: /docker-entrypoint-initdb.d/secret - {{- end }} - {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} - - name: postgresql-extended-config - mountPath: /bitnami/postgresql/conf/conf.d/ - {{- end }} - {{- if .Values.usePasswordFile }} - - name: postgresql-password - mountPath: /opt/bitnami/postgresql/secrets/ - {{- end }} - {{- if .Values.shmVolume.enabled }} - - name: dshm - mountPath: /dev/shm - {{- end }} - {{- if .Values.persistence.enabled }} - - name: data - mountPath: {{ .Values.persistence.mountPath }} - subPath: {{ .Values.persistence.subPath }} - {{- end }} - {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} - - name: postgresql-config - mountPath: /bitnami/postgresql/conf - {{- end }} - {{- if .Values.master.extraVolumeMounts }} - {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} - {{- end }} -{{- if .Values.master.sidecars }} -{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} -{{- end }} -{{- if .Values.metrics.enabled }} - - name: metrics - image: {{ template "postgresql.metrics.image" . }} - imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} - {{- if .Values.metrics.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.metrics.securityContext.runAsUser }} - {{- end }} - env: - {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} - - name: DATA_SOURCE_URI - value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.port" .)) $database | quote }} - {{- if .Values.usePasswordFile }} - - name: DATA_SOURCE_PASS_FILE - value: "/opt/bitnami/postgresql/secrets/postgresql-password" - {{- else }} - - name: DATA_SOURCE_PASS - valueFrom: - secretKeyRef: - name: {{ template "postgresql.secretName" . }} - key: postgresql-password - {{- end }} - - name: DATA_SOURCE_USER - value: {{ template "postgresql.username" . }} - {{- if .Values.livenessProbe.enabled }} - livenessProbe: - httpGet: - path: / - port: http-metrics - initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} - {{- end }} - {{- if .Values.readinessProbe.enabled }} - readinessProbe: - httpGet: - path: / - port: http-metrics - initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} - {{- end }} - volumeMounts: - {{- if .Values.usePasswordFile }} - - name: postgresql-password - mountPath: /opt/bitnami/postgresql/secrets/ - {{- end }} - {{- if .Values.metrics.customMetrics }} - - name: custom-metrics - mountPath: /conf - readOnly: true - args: ["--extend.query-path", "/conf/custom-metrics.yaml"] - {{- end }} - ports: - - name: http-metrics - containerPort: 9187 - {{- if .Values.metrics.resources }} - resources: {{- toYaml .Values.metrics.resources | nindent 12 }} - {{- end }} -{{- end }} - volumes: - {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} - - name: postgresql-config - configMap: - name: {{ template "postgresql.configurationCM" . }} - {{- end }} - {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} - - name: postgresql-extended-config - configMap: - name: {{ template "postgresql.extendedConfigurationCM" . }} - {{- end }} - {{- if .Values.usePasswordFile }} - - name: postgresql-password - secret: - secretName: {{ template "postgresql.secretName" . }} - {{- end }} - {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} - - name: custom-init-scripts - configMap: - name: {{ template "postgresql.initdbScriptsCM" . }} - {{- end }} - {{- if .Values.initdbScriptsSecret }} - - name: custom-init-scripts-secret - secret: - secretName: {{ template "postgresql.initdbScriptsSecret" . }} - {{- end }} - {{- if .Values.master.extraVolumes }} - {{- toYaml .Values.master.extraVolumes | nindent 8 }} - {{- end }} - {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} - - name: custom-metrics - configMap: - name: {{ template "postgresql.metricsCM" . }} - {{- end }} - {{- if .Values.shmVolume.enabled }} - - name: dshm - emptyDir: - medium: Memory - sizeLimit: 1Gi - {{- end }} -{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} - - name: data - persistentVolumeClaim: -{{- with .Values.persistence.existingClaim }} - claimName: {{ tpl . $ }} -{{- end }} -{{- else if not .Values.persistence.enabled }} - - name: data - emptyDir: {} -{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} - volumeClaimTemplates: - - metadata: - name: data - {{- with .Values.persistence.annotations }} - annotations: - {{- range $key, $value := . }} - {{ $key }}: {{ $value }} - {{- end }} - {{- end }} - spec: - accessModes: - {{- range .Values.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.persistence.size | quote }} - {{ include "postgresql.storageClass" . }} -{{- end }} diff --git a/chart/charts/postgresql/templates/svc-headless.yaml b/chart/charts/postgresql/templates/svc-headless.yaml deleted file mode 100755 index 6f31bc8..0000000 --- a/chart/charts/postgresql/templates/svc-headless.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "postgresql.fullname" . }}-headless - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - type: ClusterIP - clusterIP: None - ports: - - name: tcp-postgresql - port: {{ template "postgresql.port" . }} - targetPort: tcp-postgresql - selector: - app: {{ template "postgresql.name" . }} - release: {{ .Release.Name | quote }} diff --git a/chart/charts/postgresql/templates/svc-read.yaml b/chart/charts/postgresql/templates/svc-read.yaml deleted file mode 100755 index 754445a..0000000 --- a/chart/charts/postgresql/templates/svc-read.yaml +++ /dev/null @@ -1,46 +0,0 @@ -{{- if .Values.replication.enabled }} -{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} -{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} -{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} -{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} -{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} -{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "postgresql.fullname" . }}-read - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - annotations: - {{- if .Values.commonAnnotations }} - {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - {{- if $serviceAnnotations }} - {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} - {{- end }} -spec: - type: {{ $serviceType }} - {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} - loadBalancerIP: {{ $serviceLoadBalancerIP }} - {{- end }} - {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} - loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} - {{- end }} - {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} - clusterIP: {{ $serviceClusterIP }} - {{- end }} - ports: - - name: tcp-postgresql - port: {{ template "postgresql.port" . }} - targetPort: tcp-postgresql - {{- if $serviceNodePort }} - nodePort: {{ $serviceNodePort }} - {{- end }} - selector: - app: {{ template "postgresql.name" . }} - release: {{ .Release.Name | quote }} - role: slave -{{- end }} diff --git a/chart/charts/postgresql/templates/svc.yaml b/chart/charts/postgresql/templates/svc.yaml deleted file mode 100755 index d24b2a6..0000000 --- a/chart/charts/postgresql/templates/svc.yaml +++ /dev/null @@ -1,44 +0,0 @@ -{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} -{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} -{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} -{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} -{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} -{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "postgresql.fullname" . }} - labels: - app: {{ template "postgresql.name" . }} - chart: {{ template "postgresql.chart" . }} - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - annotations: - {{- if .Values.commonAnnotations }} - {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - {{- if $serviceAnnotations }} - {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} - {{- end }} -spec: - type: {{ $serviceType }} - {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} - loadBalancerIP: {{ $serviceLoadBalancerIP }} - {{- end }} - {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} - loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} - {{- end }} - {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} - clusterIP: {{ $serviceClusterIP }} - {{- end }} - ports: - - name: tcp-postgresql - port: {{ template "postgresql.port" . }} - targetPort: tcp-postgresql - {{- if $serviceNodePort }} - nodePort: {{ $serviceNodePort }} - {{- end }} - selector: - app: {{ template "postgresql.name" . }} - release: {{ .Release.Name | quote }} - role: master diff --git a/chart/charts/postgresql/values-production.yaml b/chart/charts/postgresql/values-production.yaml deleted file mode 100755 index 01e6039..0000000 --- a/chart/charts/postgresql/values-production.yaml +++ /dev/null @@ -1,556 +0,0 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets -## -global: - postgresql: {} -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass - -## Bitnami PostgreSQL image version -## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ -## -image: - registry: docker.io - repository: bitnami/postgresql - tag: 11.7.0-debian-10-r90 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - - ## Set to true if you would like to see extra information on logs - ## It turns BASH and NAMI debugging in minideb - ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging - debug: false - -## String to partially override postgresql.fullname template (will maintain the release name) -## -# nameOverride: - -## String to fully override postgresql.fullname template -## -# fullnameOverride: - -## -## Init containers parameters: -## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup -## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: buster - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - ## Init container Security Context - ## Note: the chown of the data folder is done to securityContext.runAsUser - ## and not the below volumePermissions.securityContext.runAsUser - ## When runAsUser is set to special value "auto", init container will try to chwon the - ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` - ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). - ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with - ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false - ## - securityContext: - runAsUser: 0 - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: - -## Pod Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -## -securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - -## Pod Service Account -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -serviceAccount: - enabled: false - ## Name of an already existing service account. Setting this value disables the automatic service account creation. - # name: - -## Pod Security Policy -## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ -psp: - create: false - -## Creates role for ServiceAccount -## Required for PSP -rbac: - create: false - -replication: - enabled: true - user: repl_user - password: repl_password - slaveReplicas: 2 - ## Set synchronous commit mode: on, off, remote_apply, remote_write and local - ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL - synchronousCommit: "on" - ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication - ## NOTE: It cannot be > slaveReplicas - numSynchronousReplicas: 1 - ## Replication Cluster application name. Useful for defining multiple replication policies - applicationName: my_application - -## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) -# postgresqlPostgresPassword: - -## PostgreSQL user (has superuser privileges if username is `postgres`) -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run -postgresqlUsername: postgres - -## PostgreSQL password -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run -## -# postgresqlPassword: - -## PostgreSQL password using existing secret -## existingSecret: secret - -## Mount PostgreSQL secret as a file instead of passing environment variable -# usePasswordFile: false - -## Create a database -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run -## -# postgresqlDatabase: - -## PostgreSQL data dir -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -postgresqlDataDir: /bitnami/postgresql/data - -## An array to add extra environment variables -## For example: -## extraEnv: -## - name: FOO -## value: "bar" -## -# extraEnv: -extraEnv: [] - -## Name of a ConfigMap containing extra env vars -## -# extraEnvVarsCM: - -## Specify extra initdb args -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -# postgresqlInitdbArgs: - -## Specify a custom location for the PostgreSQL transaction log -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -# postgresqlInitdbWalDir: - -## PostgreSQL configuration -## Specify runtime configuration parameters as a dict, using camelCase, e.g. -## {"sharedBuffers": "500MB"} -## Alternatively, you can put your postgresql.conf under the files/ directory -## ref: https://www.postgresql.org/docs/current/static/runtime-config.html -## -# postgresqlConfiguration: - -## PostgreSQL extended configuration -## As above, but _appended_ to the main configuration -## Alternatively, you can put your *.conf under the files/conf.d/ directory -## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf -## -# postgresqlExtendedConf: - -## PostgreSQL client authentication configuration -## Specify content for pg_hba.conf -## Default: do not create pg_hba.conf -## Alternatively, you can put your pg_hba.conf under the files/ directory -# pgHbaConfiguration: |- -# local all all trust -# host all all localhost trust -# host mydatabase mysuser 192.168.0.0/24 md5 - -## ConfigMap with PostgreSQL configuration -## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration -# configurationConfigMap: - -## ConfigMap with PostgreSQL extended configuration -# extendedConfConfigMap: - -## initdb scripts -## Specify dictionary of scripts to be run at first boot -## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory -## -# initdbScripts: -# my_init_script.sh: | -# #!/bin/sh -# echo "Do something." - -## Specify the PostgreSQL username and password to execute the initdb scripts -# initdbUser: -# initdbPassword: - -## ConfigMap with scripts to be run at first boot -## NOTE: This will override initdbScripts -# initdbScriptsConfigMap: - -## Secret with scripts to be run at first boot (in case it contains sensitive information) -## NOTE: This can work along initdbScripts or initdbScriptsConfigMap -# initdbScriptsSecret: - -## Optional duration in seconds the pod needs to terminate gracefully. -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods -## -# terminationGracePeriodSeconds: 30 - -## LDAP configuration -## -ldap: - enabled: false - url: "" - server: "" - port: "" - prefix: "" - suffix: "" - baseDN: "" - bindDN: "" - bind_password: - search_attr: "" - search_filter: "" - scheme: "" - tls: false - -## PostgreSQL service configuration -service: - ## PosgresSQL service type - type: ClusterIP - # clusterIP: None - port: 5432 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. Evaluated as a template. - ## - annotations: {} - ## Set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - # loadBalancerIP: - - ## Load Balancer sources. Evaluated as a template. - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## - # loadBalancerSourceRanges: - # - 10.10.10.0/24 - -## Start master and slave(s) pod(s) without limitations on shm memory. -## By default docker and containerd (and possibly other container runtimes) -## limit `/dev/shm` to `64M` (see e.g. the -## [docker issue](https://github.com/docker-library/postgres/issues/416) and the -## [containerd issue](https://github.com/containerd/containerd/issues/3654), -## which could be not enough if PostgreSQL uses parallel workers heavily. -## -shmVolume: - ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove - ## this limitation. - ## - enabled: true - ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. - ## This option is ingored if `volumePermissions.enabled` is `false` - ## - chmod: - enabled: true - -## PostgreSQL data Persistent Volume Storage Class -## If defined, storageClassName: -## If set to "-", storageClassName: "", which disables dynamic provisioning -## If undefined (the default) or set to null, no storageClassName spec is -## set, choosing the default provisioner. (gp2 on AWS, standard on -## GKE, AWS & OpenStack) -## -persistence: - enabled: true - ## A manually managed Persistent Volume and Claim - ## If defined, PVC must be created manually before volume will be bound - ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart - ## - # existingClaim: - - ## The path the volume will be mounted at, useful when using different - ## PostgreSQL images. - ## - mountPath: /bitnami/postgresql - - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - ## - subPath: "" - - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - annotations: {} - -## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies -updateStrategy: - type: RollingUpdate - -## -## PostgreSQL Master parameters -## -master: - ## Node, affinity, tolerations, and priorityclass settings for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption - nodeSelector: {} - affinity: {} - tolerations: [] - labels: {} - annotations: {} - podLabels: {} - podAnnotations: {} - priorityClassName: "" - ## Additional PostgreSQL Master Volume mounts - ## - extraVolumeMounts: [] - ## Additional PostgreSQL Master Volumes - ## - extraVolumes: [] - ## Add sidecars to the pod - ## - ## For example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - sidecars: [] - - ## Override the service configuration for master - ## - service: {} - # type: - # nodePort: - # clusterIP: - -## -## PostgreSQL Slave parameters -## -slave: - ## Node, affinity, tolerations, and priorityclass settings for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption - nodeSelector: {} - affinity: {} - tolerations: [] - labels: {} - annotations: {} - podLabels: {} - podAnnotations: {} - priorityClassName: "" - ## Extra init containers - ## Example - ## - ## extraInitContainers: - ## - name: do-something - ## image: busybox - ## command: ['do', 'something'] - extraInitContainers: [] - ## Additional PostgreSQL Slave Volume mounts - ## - extraVolumeMounts: [] - ## Additional PostgreSQL Slave Volumes - ## - extraVolumes: [] - ## Add sidecars to the pod - ## - ## For example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - sidecars: [] - - ## Override the service configuration for slave - ## - service: {} - # type: - # nodePort: - # clusterIP: - -## Configure resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## -resources: - requests: - memory: 256Mi - cpu: 250m - -## Add annotations to all the deployed resources -## -commonAnnotiations: {} - -networkPolicy: - ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. - ## - enabled: false - - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to the port PostgreSQL is listening - ## on. When true, PostgreSQL will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - - ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the DB. - ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. - ## - ## Example: - ## explicitNamespacesSelector: - ## matchLabels: - ## role: frontend - ## matchExpressions: - ## - {key: role, operator: In, values: [frontend]} - explicitNamespacesSelector: {} - -## Configure extra options for liveness and readiness probes -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) -livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - -readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - -## Configure metrics exporter -## -metrics: - enabled: true - # resources: {} - service: - type: ClusterIP - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9187" - loadBalancerIP: - serviceMonitor: - enabled: false - additionalLabels: {} - # namespace: monitoring - # interval: 30s - # scrapeTimeout: 10s - ## Custom PrometheusRule to be defined - ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - prometheusRule: - enabled: false - additionalLabels: {} - namespace: "" - ## These are just examples rules, please adapt them to your needs. - ## Make sure to constraint the rules to the current postgresql service. - ## rules: - ## - alert: HugeReplicationLag - ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 - ## for: 1m - ## labels: - ## severity: critical - ## annotations: - ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). - ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). - rules: [] - - image: - registry: docker.io - repository: bitnami/postgres-exporter - tag: 0.8.0-debian-10-r99 - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - ## Define additional custom metrics - ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file - # customMetrics: - # pg_database: - # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" - # metrics: - # - name: - # usage: "LABEL" - # description: "Name of the database" - # - size_bytes: - # usage: "GAUGE" - # description: "Size of the database in bytes" - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - enabled: false - runAsUser: 1001 - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## Configure extra options for liveness and readiness probes - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 diff --git a/chart/charts/postgresql/values.schema.json b/chart/charts/postgresql/values.schema.json deleted file mode 100755 index ac2de6e..0000000 --- a/chart/charts/postgresql/values.schema.json +++ /dev/null @@ -1,103 +0,0 @@ -{ - "$schema": "http://json-schema.org/schema#", - "type": "object", - "properties": { - "postgresqlUsername": { - "type": "string", - "title": "Admin user", - "form": true - }, - "postgresqlPassword": { - "type": "string", - "title": "Password", - "form": true - }, - "persistence": { - "type": "object", - "properties": { - "size": { - "type": "string", - "title": "Persistent Volume Size", - "form": true, - "render": "slider", - "sliderMin": 1, - "sliderMax": 100, - "sliderUnit": "Gi" - } - } - }, - "resources": { - "type": "object", - "title": "Required Resources", - "description": "Configure resource requests", - "form": true, - "properties": { - "requests": { - "type": "object", - "properties": { - "memory": { - "type": "string", - "form": true, - "render": "slider", - "title": "Memory Request", - "sliderMin": 10, - "sliderMax": 2048, - "sliderUnit": "Mi" - }, - "cpu": { - "type": "string", - "form": true, - "render": "slider", - "title": "CPU Request", - "sliderMin": 10, - "sliderMax": 2000, - "sliderUnit": "m" - } - } - } - } - }, - "replication": { - "type": "object", - "form": true, - "title": "Replication Details", - "properties": { - "enabled": { - "type": "boolean", - "title": "Enable Replication", - "form": true - }, - "slaveReplicas": { - "type": "integer", - "title": "Slave Replicas", - "form": true, - "hidden": { - "condition": false, - "value": "replication.enabled" - } - } - } - }, - "volumePermissions": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean", - "form": true, - "title": "Enable Init Containers", - "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" - } - } - }, - "metrics": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean", - "title": "Configure metrics exporter", - "form": true - } - } - } - } -} diff --git a/chart/charts/postgresql/values.yaml b/chart/charts/postgresql/values.yaml deleted file mode 100755 index 8c766f9..0000000 --- a/chart/charts/postgresql/values.yaml +++ /dev/null @@ -1,562 +0,0 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets -## -global: - postgresql: {} -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass - -## Bitnami PostgreSQL image version -## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ -## -image: - registry: docker.io - repository: bitnami/postgresql - tag: 11.7.0-debian-10-r90 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - - ## Set to true if you would like to see extra information on logs - ## It turns BASH and NAMI debugging in minideb - ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging - debug: false - -## String to partially override postgresql.fullname template (will maintain the release name) -## -# nameOverride: - -## String to fully override postgresql.fullname template -## -# fullnameOverride: - -## -## Init containers parameters: -## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup -## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: buster - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - ## Init container Security Context - ## Note: the chown of the data folder is done to securityContext.runAsUser - ## and not the below volumePermissions.securityContext.runAsUser - ## When runAsUser is set to special value "auto", init container will try to chwon the - ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` - ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). - ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with - ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false - ## - securityContext: - runAsUser: 0 - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: - - -## Pod Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -## -securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - -## Pod Service Account -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -serviceAccount: - enabled: false - ## Name of an already existing service account. Setting this value disables the automatic service account creation. - # name: - -## Pod Security Policy -## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ -psp: - create: false - -## Creates role for ServiceAccount -## Required for PSP -rbac: - create: false - -replication: - enabled: false - user: repl_user - password: repl_password - slaveReplicas: 1 - ## Set synchronous commit mode: on, off, remote_apply, remote_write and local - ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL - synchronousCommit: "off" - ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication - ## NOTE: It cannot be > slaveReplicas - numSynchronousReplicas: 0 - ## Replication Cluster application name. Useful for defining multiple replication policies - applicationName: my_application - -## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) -# postgresqlPostgresPassword: - -## PostgreSQL user (has superuser privileges if username is `postgres`) -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run -postgresqlUsername: postgres - -## PostgreSQL password -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run -## -# postgresqlPassword: - -## PostgreSQL password using existing secret -## existingSecret: secret - -## Mount PostgreSQL secret as a file instead of passing environment variable -# usePasswordFile: false - -## Create a database -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run -## -# postgresqlDatabase: - -## PostgreSQL data dir -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -postgresqlDataDir: /bitnami/postgresql/data - -## An array to add extra environment variables -## For example: -## extraEnv: -## - name: FOO -## value: "bar" -## -# extraEnv: -extraEnv: [] - -## Name of a ConfigMap containing extra env vars -## -# extraEnvVarsCM: - -## Specify extra initdb args -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -# postgresqlInitdbArgs: - -## Specify a custom location for the PostgreSQL transaction log -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -# postgresqlInitdbWalDir: - -## PostgreSQL configuration -## Specify runtime configuration parameters as a dict, using camelCase, e.g. -## {"sharedBuffers": "500MB"} -## Alternatively, you can put your postgresql.conf under the files/ directory -## ref: https://www.postgresql.org/docs/current/static/runtime-config.html -## -# postgresqlConfiguration: - -## PostgreSQL extended configuration -## As above, but _appended_ to the main configuration -## Alternatively, you can put your *.conf under the files/conf.d/ directory -## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf -## -# postgresqlExtendedConf: - -## PostgreSQL client authentication configuration -## Specify content for pg_hba.conf -## Default: do not create pg_hba.conf -## Alternatively, you can put your pg_hba.conf under the files/ directory -# pgHbaConfiguration: |- -# local all all trust -# host all all localhost trust -# host mydatabase mysuser 192.168.0.0/24 md5 - -## ConfigMap with PostgreSQL configuration -## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration -# configurationConfigMap: - -## ConfigMap with PostgreSQL extended configuration -# extendedConfConfigMap: - -## initdb scripts -## Specify dictionary of scripts to be run at first boot -## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory -## -# initdbScripts: -# my_init_script.sh: | -# #!/bin/sh -# echo "Do something." - -## ConfigMap with scripts to be run at first boot -## NOTE: This will override initdbScripts -# initdbScriptsConfigMap: - -## Secret with scripts to be run at first boot (in case it contains sensitive information) -## NOTE: This can work along initdbScripts or initdbScriptsConfigMap -# initdbScriptsSecret: - -## Specify the PostgreSQL username and password to execute the initdb scripts -# initdbUser: -# initdbPassword: - -## Optional duration in seconds the pod needs to terminate gracefully. -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods -## -# terminationGracePeriodSeconds: 30 - -## LDAP configuration -## -ldap: - enabled: false - url: "" - server: "" - port: "" - prefix: "" - suffix: "" - baseDN: "" - bindDN: "" - bind_password: - search_attr: "" - search_filter: "" - scheme: "" - tls: false - -## PostgreSQL service configuration -service: - ## PosgresSQL service type - type: ClusterIP - # clusterIP: None - port: 5432 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. Evaluated as a template. - ## - annotations: {} - ## Set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - # loadBalancerIP: - - ## Load Balancer sources. Evaluated as a template. - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## - # loadBalancerSourceRanges: - # - 10.10.10.0/24 - -## Start master and slave(s) pod(s) without limitations on shm memory. -## By default docker and containerd (and possibly other container runtimes) -## limit `/dev/shm` to `64M` (see e.g. the -## [docker issue](https://github.com/docker-library/postgres/issues/416) and the -## [containerd issue](https://github.com/containerd/containerd/issues/3654), -## which could be not enough if PostgreSQL uses parallel workers heavily. -## -shmVolume: - ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove - ## this limitation. - ## - enabled: true - ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. - ## This option is ingored if `volumePermissions.enabled` is `false` - ## - chmod: - enabled: true - -## PostgreSQL data Persistent Volume Storage Class -## If defined, storageClassName: -## If set to "-", storageClassName: "", which disables dynamic provisioning -## If undefined (the default) or set to null, no storageClassName spec is -## set, choosing the default provisioner. (gp2 on AWS, standard on -## GKE, AWS & OpenStack) -## -persistence: - enabled: true - ## A manually managed Persistent Volume and Claim - ## If defined, PVC must be created manually before volume will be bound - ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart - ## - # existingClaim: - - ## The path the volume will be mounted at, useful when using different - ## PostgreSQL images. - ## - mountPath: /bitnami/postgresql - - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - ## - subPath: "" - - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - annotations: {} - -## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies -updateStrategy: - type: RollingUpdate - -## -## PostgreSQL Master parameters -## -master: - ## Node, affinity, tolerations, and priorityclass settings for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption - nodeSelector: {} - affinity: {} - tolerations: [] - labels: {} - annotations: {} - podLabels: {} - podAnnotations: {} - priorityClassName: "" - ## Extra init containers - ## Example - ## - ## extraInitContainers: - ## - name: do-something - ## image: busybox - ## command: ['do', 'something'] - extraInitContainers: [] - - ## Additional PostgreSQL Master Volume mounts - ## - extraVolumeMounts: [] - ## Additional PostgreSQL Master Volumes - ## - extraVolumes: [] - ## Add sidecars to the pod - ## - ## For example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - sidecars: [] - - ## Override the service configuration for master - ## - service: {} - # type: - # nodePort: - # clusterIP: - -## -## PostgreSQL Slave parameters -## -slave: - ## Node, affinity, tolerations, and priorityclass settings for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption - nodeSelector: {} - affinity: {} - tolerations: [] - labels: {} - annotations: {} - podLabels: {} - podAnnotations: {} - priorityClassName: "" - extraInitContainers: | - # - name: do-something - # image: busybox - # command: ['do', 'something'] - ## Additional PostgreSQL Slave Volume mounts - ## - extraVolumeMounts: [] - ## Additional PostgreSQL Slave Volumes - ## - extraVolumes: [] - ## Add sidecars to the pod - ## - ## For example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - sidecars: [] - - ## Override the service configuration for slave - ## - service: {} - # type: - # nodePort: - # clusterIP: - -## Configure resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## -resources: - requests: - memory: 256Mi - cpu: 250m - -## Add annotations to all the deployed resources -## -commonAnnotiations: {} - -networkPolicy: - ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. - ## - enabled: false - - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to the port PostgreSQL is listening - ## on. When true, PostgreSQL will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - - ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the DB. - ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. - ## - ## Example: - ## explicitNamespacesSelector: - ## matchLabels: - ## role: frontend - ## matchExpressions: - ## - {key: role, operator: In, values: [frontend]} - explicitNamespacesSelector: {} - -## Configure extra options for liveness and readiness probes -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) -livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - -readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - -## Configure metrics exporter -## -metrics: - enabled: false - # resources: {} - service: - type: ClusterIP - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9187" - loadBalancerIP: - serviceMonitor: - enabled: false - additionalLabels: {} - # namespace: monitoring - # interval: 30s - # scrapeTimeout: 10s - ## Custom PrometheusRule to be defined - ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - prometheusRule: - enabled: false - additionalLabels: {} - namespace: "" - ## These are just examples rules, please adapt them to your needs. - ## Make sure to constraint the rules to the current postgresql service. - ## rules: - ## - alert: HugeReplicationLag - ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 - ## for: 1m - ## labels: - ## severity: critical - ## annotations: - ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). - ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). - rules: [] - - image: - registry: docker.io - repository: bitnami/postgres-exporter - tag: 0.8.0-debian-10-r99 - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - ## Define additional custom metrics - ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file - # customMetrics: - # pg_database: - # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" - # metrics: - # - name: - # usage: "LABEL" - # description: "Name of the database" - # - size_bytes: - # usage: "GAUGE" - # description: "Size of the database in bytes" - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - enabled: false - runAsUser: 1001 - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## Configure extra options for liveness and readiness probes - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 diff --git a/chart/charts/prometheus-10.0.0.tgz b/chart/charts/prometheus-10.0.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..a5599382e1279ec3ddcd0b547f9c504c553fea73 GIT binary patch literal 26549 zcmZ5{Q*>rsux)Ic9ouFn={V`wwr$(CZQHhO+qU)H{?EDNKHNRlsK>R(#+p@g)+~ZZ zC^Vq|E&vq}jlQ@7qoKGoyOcW@n*oaoqmd$;xrQPayR3=|yR@p6g}#lEyOO-^Z*gNQ zYoM!cbJtDI6vi!Gv0f0v)y442!s(bq>gl3Wd$RJ2tnmXwie*!ol3%#QM9}CFdcb-3 z&55tAy^y%jaeD>B5s4`Lq!`dzT2`+*T3clKMlo-2#Z>?&2&!AUJ>HK=liYqiMmH}n zS^xnY#4sQJ3^(D}4?@qK68h<952Qn-1)A-v%(%(!;t z>5#k$7y-vI&I0c-tOJ+$$S^4h4lDwK*wNAc`9ZO6(j1o)q*1&BEP3_|C*TYryy!l( z!L$-Xsh?Y=E~IN0ps;)a{?8Ol0A`daTw!`?)BBPA$WwG<7g)Hwlpn-ch|xhYkq=*9 zueL(KrrQ}7L?HqYut;1X@*b;5wv6C_BU%yK<1kgYT^*8$9NZ`}jF=t>NeDMSZ>XYE zhUh#O4CR5<(mJHJ^u^23s7->XeCqhDXZFkwpIZnyXG+&T4CEOplsJx~dZ<5iJ|W>_ z&=BQG6~)8=>j9iyLT~SAA8B*}_!~(ua*iI_#76hbkI!iE1lSCFH(D<&J~{*G06q{{ zJ~yV*MKZA>{4hAWS53H{kbGntdlK4jGoV!}7js{NuCy{P_q``ftwTx>rhUCApcw2> z`5-~{!5nF}LzEar%{%pRIOD;Rj0~Mn_~n)4%RZ$Pex+MVNhs8UK|v~1h<5!5ea0fZV;&NN^?A&fGh5d$4PB*GE7 zHPBnEj3VT|dTJ=_1mCSookDv=@1w90#)}fx4oEvZVe_ycppgQniVS10JM5!|1@xjc zC~P{FWiUQCfe0YfDa^2P%uUH=1VYqij0Qg&ex*#=gLy^xE#Jhld9JuaO77R2t70~v zS^ksibG}HO4ACuU4Ced_`lC?!UV?ZaQI>(Xv(f%ovFlT?Vb!z*HL1v}*0aotPnLX( z?NR>=Yb+4T;pE7mgjiTpl-yC(6+9KTN^$(OzaS(L77-(0c8WsY;Hzn~e#AairIyT$ zvguoKj##B~NX*T%LM(R)803s`h4f5{Y`Rn#5IX>J4j~x}W!P=#7^KBqKf!&phjpkY zWW*1g=qsMTqZgMuIzKWg?%ErB+P1K0{ddxMba)^SNkgc--r%SLHAuyr!h0SelnJbw zc{DQc930&&XhijO!iKs0uC_KFz0~`IHm#UgX11)1%35q>quNZHY+9{~LqVhf?^v@i>lHI7QL)t-8nIW`kOF8T#Bg#j;i3!&jQP{x2R!J^68o+*M9_s;%2P5WCYT7y2N<+- z&Z8H=u2*!vcgjfyoLCTbW;mnm7KNBRmZ_wdO3jpc_`z;Ul`3d*Q z1jm=hiD%5<2V8`~YABytfTW0a=a8-__%6P;Ha%aS+uF)7yIqs9@Yj*SbH`07i!F2c zh+?j0DBRh7sA{H6y;WyPy7OemWj80Elo*!kSC-?S)e_h~%6U%sH zNn#SAoEuXfeqEnV_o|dC1EC0~`=LMTN+iw5gO(~6L~i*QbO3{uVguz7`jsLNIl@Nq zeMKCY<=hRkNOfnei&q$fUiW6v`7zpurO+rxyNxUP3f0McZ&TIYTkmwCKNBs?~ z;S#a1y&lka+WH>tFLflWju=lH0)(%V`=p}F2qR6QLI6c+g&g*ETC&G+Ey;gVfV1GG z2{q?BzT&m6+h(KB534Nst7F)j2~I-5TiL4=WcKDFg+87qi>X9?BfbfeF%2 z_viB+^d9>Qy-PyQYexTWzb_};8!|f{E3WwDhypJZM55l&Td@G6f}RW}y3aRd=i9s4 zpSEA!gcKb4s-0xNe|dxd^juA%B#8cqsBQSnuHYrwUq8tJwRj~V_=c%@-(fFJf~~aZ zd#0wo2~UH0iYlEia$wF84ZSK|-Rfh=m2nf&Rvu0Lq0+BFEd@)3x8Fa@-(Z!^3L1Dc ziUg~IS-u@F4NCC13_gOmg-sMTAwVvov(L_4Ah{wqUD1gQ4|JwGLYfjA^Z5^6|9kka zImh+IU^9G4s>v-}pyG;Thuoy)sc8LpB?c-y3Y03OiVi7`<>lKdXqsJ}H`!Ggf+As! zJwIF_8v=6ZB1b_ExRn2LztN%`CheX)#L^eAljU#4e8@s02u~Xp#)vUC7+I=xvt?LZ zf6v`rO@xRN9*iX9Hq3-Q(1H_9gn2j{1cxX$nlRRAuAD-=A~ID_cLG+4LdT(Dm{Q7c zJD}W(zcfLDLc%2uREmA3j1WS0xkO+il*Fr$Cejq);fRC1!LY5a&Ux6^$q;Jrf#Cs!XLKNU933$idPD{T#GLUV*KRKw&ijri+2pO={0>JwJSfw;a zA0UwmXH6J?jwPXmeH{uTbL;I2C37e@nv444#n;#Kae4YYTrEHD@J^{z;%ushP0N7$ zy!034b-nBFb#60Hs0qvyP#I)NRAxfW+Doj?xlJMAG98y$>YaqPUqu&3Dp?2Z!dTRXC*x_XF(*4Fu$$60As8nVhn#EN_7#&)rFvZL*lSCrP`F1`hI8-~U76&@ZI zhQ3d(lJb!RX_msie_%>34AJx2>pC}tSD4VRETHi}%YO`)laN_El-oGvuh1`kd*7k3 zP;GW`q9+O>qY8#NuAf(oIhBHmFk8SeEIdyxpUHoIt|wEgRTlm$^XJIYIZd*RB#T!`Rq&O#hf7Ufit&eN zj5$WDa8vZiGgCC*pQhh6CGv>eTwfX#S!#O^-|<3WMGt&jiWiH;ovIppcgJ_K$5l6U z^3)n$YI?z1kVP|j0Nh>v&Y!+ZHxIiVwbg&B_CI=%6I6)P0wG1|37tdSL&|mBrhy(k zkKo1>6-yolT8kKG$IbRvRK0J4)q!`LZCzs3A=}+HHR~aIzzi?m;9T*#_13}c-50SC zqdKGy>piT1OQ%QPNrGXSw1D%&+@@(BuUW3X*K9uB>NLg!N?Kftw;{DVv2WQTArEdE zikFQeZDm^Bj};0n?h92)?bfMqT_!?omDb~synAnRjDEue);T2a~58OUwGyWQ+B+6846;dz=8E8NRGnEWCokY*llCk zTeBMOJio&4)lez(a!(p6f3lRx`n;LQ6ci^K&r@zA2WpSBHGyT7lWTK6qy$V8DxXX& zXh_OdISv!iXM!bdbMoq~SVm~Cb{*OD9D2L&{L*fZOB^3DBUZQPa`4aP zqw4bzW~>g^&&s=H9gmTEXcdU@cfl#m`129b`DlJm#^zW&O`r<5)3Hov0O!$)-i7z@ zc%j*nh1a_GEF%A{zv$y@a-Ik-$E0y+1VSY)7t!hk**k?|ich*W2==HT95T~@sNDq{&pOUF4_6C~Z60%AfEeHf0 zyxPLHfH&e6fOYHa09(Wrz?7JLWMp7GcQ*Xz{SmuV9hT&LvJcE3zKiYoWfE-0bXK>! z{q6Yb{jrrn2T;&^EV#%ohlaeIk|qqhr&BI{EU&&;V*arG{@A!mQJa&wVr0m#$V^~- zZD|IG#t1+*>(;bn?f1-pJp=i67#KlOWBF!v zZVvE08H)hq_C$s=yw+eD4e756sozQ@FhUSlh{EYKx{6csm@qc5#LVEY0QZN7zSuv& zO++sTOO(|DygyB(FGvSokpQn8u09P6;EP~WknOZ$okim9%CgOXvBBs&!yfK)(l-r{ZJ-;G8w*y>>_P7r< zwh(bE&N1XX^OLd~aPVjF#5Iv>Vam(4%LYk5Or{eq00z_nmLnX=7sGIUQok|6M77?F z8)`rcNyIGPFn2<0$VuQL&XC3hg7ZY}f^!(URd5On9x8gSb!0I}d9|hr@m7saVLyT? zmA2Tm(UX0NxEG2=#KWfR4QrbbUypK&9Zo$8C}R_oLB1sso`s5Axfj-0Aw&1@<>v6fxF9dXu`u?`MyYvC#&2` zPYeGC@1S=$`A`Vd!Xcr@H2%2_B@|Fja#ivLW}Y&I>?=!0GrAxct>_$@P4@&N{UmrV z4nPsMs05FDZE<$C813f-h!bmOZ?fx7>!CrDuo~>;9$BpToXN{x&+{N9OHLToZ?`z; z-3OhmzWOu+fde%wLUWk-VUu}+2d75v^Y*9<|J|KE?!6l(hkx- zYQMc6F870Q-t}{$Kly5QgN8^0;Eh3y+MZx9A*1^`rHW%>2b4(Fwv(M<+uqJsKRt_s z+_j86WGdRzFDj`R0(!${SWS+Qjhea~-Qm3yWU#5<-so;;Bv}*>N;YUHz!%|*K`N)~ zzb_@eb4EE!9#+;zjGsSE2$C3l7O#XXB2x!ms#v#SIwQYlz38qon`+7FeTK7EwT?En zx3^DPI;Nc|@A2=$sP~#8iyId8f1sVyb|vS~JUMom=w>jXU#Lb3FG*}SqZ11P^)bQU z;meWnWWJthkbLmIII9%U&{Ezq*e#~n*^3pg>C{)1awU4q2Qx76wW)f=VcV>wEUfb1 zX4aOQ&Nitr*y#G$Pch7ajZ8%vwP`Xi6dz8TbIqAJTg|u@sX|O=znSHxjFa}hy(z38 zXE_CnpO(6ZwLW*vb$Kt#K-Itk5iuG2F{z?O_i|R49pKK{1DKQc6&st0>O9*|6_m_#^MT_MK8bzqF&<+`@a@p~`(3n9my)<VJuPeUJl0DQI zXO50I;!sce-c@v#0lv8gJT!|!bb1>mw=UBqR&z`h*M*S=YTplMm0QQe5J-#((N;c!y1!t0QQS;+&`Yuanu_ z;h692DF0{WudgrcDMC=gAOoa?OEqdj2hC_h#!-7};dD5*F9Q!&D!-J3=#EPEE<&ocuCMrWo6y9*Qp21iQL|E<8341_6_uHv8dEBg9{w< z#D$~~NMEiwNgBC^p4gn_Te9h_3bKUCm|hrO#rVt1MMAvc@>AGtv7TiVLXeaWRR_Gk zjwJIqdm-i8e-AwoVNFFzD1}j=3WM{9Qsw5S9SS^)v}O^d_cw7DD7TudB&z^3ciwhD zh5Lu2cm(Ua;L`1BPY^q%X@Nu>?LNe4URZm+$K`H<5|5JHy8KQN5Bbq|arY;U5XuG> z7BimvyTrYve!*4D7DpsArKN3xL=Q}coKQ&Dd**}X%^zL}{^loFlCV_IB=s4`grevE zBcBSQW5i6VKyqrlH+6#Ymmdr|A5TlN!?m0trUv>uZ+p^?H#&|DBjuieWuFXe$`Vb) zAnnIqLB8#D-TBqQsgtqaqv_VU!8+NuS1;d}hMMQt%85Ir>PhN#QCJ%3P@u@N>&!|v zY(*(sdJuBY)xK$o0+-Fw0C8N4&<_>10Qbu?bgxD5#xTm?V9nCx{iNs0g ztM+|v{H}JDG?b>fp49|s6l2JX*1~+txgq*Iz+0H9Yzm=7~zg-GfT9%)naw|$}&3N%Ue6H+88rF zUokNqhxUV;=^AlAA@jcEImlt(dvK>1hRVhidbx-*QE)}sTiL{A=SQz-zX-U-0CrFA4|#ZXAIPGds9;Yf4$D`@oFt=NtyCC}rxaF->Wz}8?im(WvWa0MH~^8CfXn?1F_?9SfO<2|Jop$3?rpO0C_{H!Na z+2xSxl9|?CKuk*Bwmtrsa6a7-+dL>2^}Vifon~UL&E<{nP+16sMy?~BYk=2{>3iK< zz)S=~K$lBzzXp%zj=`hq(Sv_%cjA&F8)y&KOUFhsXzIG*FqQKQQ;{XQY7UVHI03>Lth}$iZ<%U`S7R!xv=I zO~9YOtuE1FKg;*1F+ALN_8kdI?>KOigaWML5bM*v-ONA~{`x9?qa4c*{iQz#aIdk> z7@XI{*wYO^A!`%NV2741;;Xp&$@O?ZRH@)h%hl=I)zlHUr5YW(-qyfuvbwSFZK^g&R#+s>{ zfz#qrW+CP_&=4v4l^(pUV#>wuKMumpN0`t?3BS$}FB{a3u|>z##EH_O^udyHiLtz} z5@Y*}PQP%9NVs%FITh}8Q{tj=+#04MtP3hZN@5?;vjkvHXAII38PQ?Sy7ve%LkakQ zUZQJyFlc|1y!I|TfOCamUR-yCX4dOW!l$iEuMnBSJ>4*7t`pOgn_^mv$34V=OwOkS z1w}g*8=*AME2d68cP@lqvdc1lWK}LYhbczBp^oM&nV6H1(}<^Auh7c@>^V8RthkHA zYHA%ACB)m99>|0v8NBhzhxcnDpP$DV$QEqH)ofka)sj~teteb>y3k5ENW^gEvS)hO zb?O~z+c6rL<3bX>%=sVb1O4o&hsB7S1y5SQLWunKQ2kJ2%4X_`^ZD33y?2MqAIzhJO!cH{Sw)_cT!6mus^W|`?;0GuZoOhj_ z621AEwrqD+de!G}!OASmMCwNo=TJPh=s!3vdu)4Grzg%v_wUqbM^Wp4Ej??tBoPb0 zi|B_PjnV`NA(bqI=2*@`kk@)&Wd(hbcfiK<^M6pdU@)6wW?4Q*Io31P{wkH!rBcMU z5OROtO&1(d_t#;aKA?^9#QI8+_7Y&F=PkImaGrC72Rgga2u8 zC)pnSUT<&n`?`H58n$i0N?Cz*}EUKn|07Tn`IAa62y?IYioTJbu_C@riV9VWSpAL^NZ?!pEDd zV{aEALJ^Qt`n(69W{?iJBQQSS8?#z(|CTo@>H^NGpQto}p2x_ZC#u=;&c;-|t}!jo zH#nyL(^E&R&?(`9gr?MKIW(%JyCGpeL%mxz=;N;|3rMp4k7z?5Em?lc)z+Proqn`Qn^ z1O5O{VwQsADf?9_GY;cTlO+fmNT@SJo)PVw6Xd>t{WDqLrDOB!*7+fv*VpIeGO{$M-D>+oL7L~$=XOM@6(4HY%8==i^WCeK z^_**>u%)TqK-q{$=|$l3+x_EYefTm_bryDzteNiGga*WuQ6fhvT9Z!=ky*2q;y6Zf z;OG5O=U4`1lm$CqIZ4TLlyD11c7F3FXwFhx=q~%5Jq{sWM6%-N>djqOIVg@8e`oip zkO&0m$KB!OWB5{6)Ey5L9cE0;;QO-|(79ExU`0p_N5xJhL@*W?Dah?icw)MS&78iE zLVP%!QP)Iy<=-(H(l)N6DiiaYLnaT*QV?b!95p0AI_#itJ0S0%sg=oo*uSG|?jTB| z`|QBlTm<-}Xzpsn81wdMM;^MwMBmHNL29tei)BG-I?G0^r2Nm`3uz_>sWbF@$@Z_G z1BIBt6U-#%7>x;Tgr41=p~IpN3%b`eB#!k<7e&%C4--0th^SAI(=k*!PT1{2;)*dT zcY7hPaaql)n&Zj2JQ# zf2;-&4%`IKv~t26e4MK?RDbManHw18>UwcEv1`kJr zKX(tsM#VZmcddCjIKFTD(Q{&pLYKkB#F1ht9GPhQ;iCIqz1#Uec3pFPrHlq-vu8%q zcpzBB-D?Xcot?Y%5uMLYz>B<%Zp*8v2I)-=*s=;aMuZ51F0J3jp`OK3ZXf2((T#RXp?9Z?3?iu!y?(#e{7(&(#Y)LM0jCA(rf-1~y~76lbzaC&?!MHT5yFw39Q7=NysQ2upp zVa|H4RzWl`-(e3~mZxjNQYigW32E#YzQ}jFurkE5j&B^M+!$_U4oiktiPe{g=s6}@ z;y9oal}+86=zHI&_{FmMqvtr7%wm!@&T{b92IO1Bs8EGN9-j2aBiomF_{G?rKCM^J zoHp`ea13u97%6!Gu8>km=8+jWaiKzz;O$c*V19zloH-|NFHa|GJhoMIq3+4x#E{%8 zojDm?Bb@mF%F@S+qLa;IK?kVmZVI6vCW6M>sEyk;RkV(BjccwaQgd)=vgqP=;DL$` z_fz95vMN_aP~zI+A*D)7n>=-uMwv);W&S_$3$ z31@Z9H%slTU(W;8HFL`wmd#SLNV4S(A$1@lz-fAgnYluiVG7=cI*m zX>SL#iaIh%2n$JrZuv+NWhAEMmk>=yT#Gj83pZCbOx*1d(r(;L(22d=%7%1@Zuf)U z{ky~AHNySG^tsQ0+s5ZzV3*bIo*M-ztNHL|RQu>#?QNAmm)yYpy*fRBJ0~w5g3C+prW-4IfW6wSr7hnA6P#5+ zQm(V8fKMP*E^-8b?7HM}F`bv*iEob+57>IdN0fMpsb!}yCOb?ADc7cvN+(I(%nO7m z=HCInlf&*AWp>zxiK)lWw(Tv$uX5~5lt)AT`kOMZ+chR@@*sl2Gv&qbi=|%X$Se4a zlO~DzXeq_z=H!sNyR#lI3djwhDVj|u*j>2jn`27#!9QXjc6i=kl^6fR@1c|j2pvTv zxIP20=P*bCd=+Re5E_6}VtmE?VYoKHVeTEp&XS>4IOfU{E`aH*Ol_o%4?PZ7myk8@ z;H1GsLpGP-JRtp;JGD)PU9R?Vy)@>0vE7{!648qRq96SK?1E}}fIzXqP@a$8=u*1x zUIxi6vBw~WFYsk%cW_ikF*@6x>h zd#H7u4Oth(?f}iyT^ba>k8gfnuOwfVQ$YW`%+Kb;q6c6!NEOWZAg)jXC!54ZGWFGd zxuv46DsEHK`pZ;3oTu$kJpjZXR0tV{GarUv(sQV2-z?am|B`g{F@7MC7&|>%+4K7x zMYi}xLlGr+8~+f^fmOSV`DAshfK?e?OQUelG9St4ur#zkqXvc828bFZNA2FDNgQsg ziTNKa=oz{F05Lna1ed!Jn})2YiK?Soa4kJ2fwQ2MAQy~Ss<2&m{qpagu(Jrh@XS`I zoe+z0j-_a35o5|vW5^MWV``NxmSy{X@V|eO&g>wvnAT|3|2*kz-bAXMo_O{@T^Mdd z(S6sxVjp0nP-4FLXuy*(&V{kgDv>X8L}G1DF%p6Glk;M8cu1n9aCtIfYrd*Ws{-NUoPp{V!P~l5%mvACX z4GNOEjO$QO1S`o8g9Hd7@x~p|1 z=iiH16z&E-NF&T;QdT*L^85X!2j@6&zHpR(ZM;-forJ<^!fW~z_DEt_Nsdop9{uqatsiMLtX@%aWUQL! zkoO(@{y$kX8*@Ww|apXjDQ?y?_UF`(3CPD@_!Wbf2{|*M9h6IVyai) z7J3StH7gb7)4D++*Ra~72ye36;7G#?m!1t$ZRNVTVXORDzvkU8y(bxqAjN5y3zu~r zV8XwnlL3p$Fnu-`h18W%QV#-EBXuG4o@%Q3w*_)!*LB=v6US#5lHtuiw&9+p|hmGCYveq+!w=yRGNo@m9K$j)k`fx&9YME+B-a17WX@FSx63)5BHb9 z17>r7>laW%d$UDN>FI;ROBe5@ZwjP^jsx3!*6bLEo}q^{1@&=n_2xXbo(ueJ4wo^C z!lS5Ei{_E1xy7i~FD*hH48$`a)#`B2bvrozvwU&pNQQa9*QnVK!p`q-)u#(kpx6 zs_|_mNq49S3d3l$wMBg#%rIgT5=7c2SYVX=+-P$m68Va2%t7nzeJ{A&d_#W`R%#B7 z(Y!xtHsCGrr5c3tbYi2BvP8u-SW)xaL7uV~;G`spaB{i3X{I?VTB8-QX9IEBnIu{e zNE^=R8;%X>={;Fd3M&jBeh8*GqWm89Ga0BM`h;5FAj+-N-A!-Y+eo8#DdaPTlLSFw zddyAoI33M7X^zI{zY85h))2h-g9Sk#hlk;;x}u}3Xkfe3ph2{2E@O_m(=oN5;R41H z87Ow`>Q|=Ztl#`76(`FAaNUkojR(oN;tv%Q8~y|w9%g^fs^l2I_g?DTu>7#EelP!= zuSEAII&QW<>1OZQCQM_W5NU(7*x%NUr!#%#s{un! z{!}p#lkaUPP5+_TcZ+V^)!kV0MTmIcCE&-qIRMxmn=?+jhhR&i+K0SR!I!oNV^Sh{ zanJ^lAC(?|xTa;z?vJ|Kr(l9FFTK}0htTmVr(o-%Kb5h`hU0#v!vU^wc1cal%}Gt%fATzgbn|_R z9Vil&sCovFGYag$$Qcv(ivQV`p3C3wvcMS=_8w5szT*ud227JkU5W@vLUX#j)~U1< zocp`4ex2sHA1*(e>PG+Yc;?qo-e9B2zab7BCEMO`!M1bAJ>PgG~UhT12SB|bW z39|R592XPo;`P3t^1eTKDrfUC_Zrw?D&hi?23Ku&CEv*ziu6nn=^C>CsQ>WHAVtn7 zOdA|?p_#mCkz+vJIpzH2zhz*9!2iDn zphE;8)RL^JmW{IX5f;HVr*JjKm=g~L*PRJmAB{Ue<(|tiV6gBH6JYfaAF#Vn*uKKN zgWoda((G9{!D9gHKqd;0*_!;zsFaWWC=C6twq>oeh$3wD%J6~Bdv}>rf35i1fJ4*P zX;*_-koj-q4beXx2x`fXy&%K7X{s_{TnGbFl3SD zxgJb)Fw+Z{#`e$sbZw?+1DR0k$n{zhWuBc&K>NP!*Gs_}UkgT0mEIZey*(Y5Ge~fS z$#8!%ZhU|7(+&f$5L`Zbt9dQ&@2U6uCVidnqEb?j*w3tvtA`zNiEO4t4a)h%()EX^ zzZaV=J6al$;*f)+aG<0-E|A@%4VLV`ETSa#qaKA>f``%|n{H*tV+02BgA)CON$y_C z@6Qh6WRUrtZA(&c+?w-riP(8W!cJ0kD{vi$OrQmq&LfTumxB^%FQkGh8FX$B>NuyD zKYQ7}dw+lS78as>6X0Fn__jPQSw~7II(5gb6dwE{)XlN8EZSJKXdSSQJC%Vob&3`d zZB9GN!>Hu`CbByl04)-?QAnv*C?$kR}KQscbXC`6Ey7Jbf(Ew zgmbF)@ooUx{E|dS+8!K5jHc4Qai+6ez=0^HI%#t~6DyW{Rax%ag4W9oU-dCRKC(AD zQ{A5*TwbrYIy1og+5F6pBh75XDWC;f@#07XmH%;a$kBq}>0~xf%c>smyq}h@4BEGR z3)mmrpF;(B|2ugPTOB{Lra~-egthjigYE4z^p%w(Hx6cPK|!@gqg)Z~;kxspxTob% zF88|{!@iqdM}94*@!EUs+Iw7;(f%pY;UdM2#2r2Tc9M%}2 zSYQT_YrUGk`IJ4_4acMCAD#C<_#n`NPvZ#&{jM1ZUYLQixt^6u>?FsRW&A_ox5xA+ zx$Z7Zfd5_W=#R63XY@zAD?&daq<-;mT=lT;u8pwotzgWZalU%VVV=GLVeYQndfONA zXEhEKeyyVS*7Ob9)5?7LnNXps4LRz|hsCXNRiA&8-%~8AG1eJ)w6VR}FL5Jqbl?h+*X@itR`Bb(m=M(UiZ#>G@qZQ}qTgZfNAr}KsV^gpjE zIXr}b>I_gHPlG+{;{b*h&)0|kR&;`1XO?x_7xTY>)p3w*0YEHdY~)LC=wScMk1LJr zzL}=Pr!7~$BPtHpT0*{A;7J!*8Jo9w>saK9G?%u89@*vrN>YROJ<3WSnHh}+7)Sql zp0ZsfHlW{8?0B$##j&kMu$YdKQtQ6TJX@2foYlqJm}Xlg&t_SnHRumbVWL7cL}g;c z!{3Z-C4urf9`g(~wT5?^jdE5^lop_l5Tg9mmO0Bg(=RJBlo<{=)|XHHJ0Z(YPQ>?0 zroi^d63XM_?gTE$-)Y9t{E~AkOn=S6TG(AG(@K{>)Nu=szy4NKt`avWE_?IHZa(gE ztNYh#PGy*FhfmDovDFsU# z{>t-jr*SdpC2falUWE1$k=6X@mBGk^_>XL{QjYj~zM0kiH#DR{NztQ+kjEf0W9b0g zjFTUI0%{TbQ^Do0aSLE;0AjlW4E7H$eBa*70D7}ePc|n3jn)P7xsYRG3@Ov#O{7sg z$%tj-*@$j{P5RE6f9vKwKvwgoR|c(*{2j1gnD7laEc};%CK@AtO0huel=f2&foyIX zyxTKpY~{=6Jpi@w;8hYozE5v;Za@6LZm#%SAF3Jvnz+!-h-kO%m6~*FUNi!lEHCDp zlf_f~?C88b*~Rjk^i43tyEQJ&zOKgK#)0FlAWQ6dUe9oJm`Xm-oR-9m^?MIAEiRV8 zCp7o!RAXH&Q0AD5-QR36|9VpQQ#AFx-DxK}KLm+L&huhS9bqmkyEV$-eh@!^xB2*BJ$0%Uy}#Jdom(RWHcGXGzInT3aAC0hHWSU4bTPK$CvT(sdUBLRwR{l{ z)hr64)U0IMNfy^B_?)=V+o~GaHbZ=Xi+a$PW8}|>yzS$oU@iKCaIm!5T-?#BuCJ-> zrEAE2D^wkpQx_N{GUzPH5v64ZodMCjjveayz)G>(a_A6yj*!|Nrw&P%6vPgT zE!=v~>VU~q8?FE!PqO}$_Q`Sl4{Wfbj52j6_oc(;U1I@v{Ib4c%$TY(1TA!Ax|*sg z2`ZTDiBqek%;_4_hN>C@BkWQtbGKzbKsDmsf*=0Nmy?%z%@>}4pKs$GV12v}2cSUP zLYxOHLAIUDh0|+SM0gmIN+v^$U>4q#Gc3kxbQR$?LqGg0lGi-Q67c%^V3+^ubGb&% zfA^C^ky1rlB*-Y)@%W=N5vRx!Nv4S}(PE?J5;CZJxtQ?7fd4q}gL4izJopCO-QDlU z{Ma783!e6JZ&=4$>za+#MtGbeH!wEE7#pEIx9t;$YUzB?Hf;3&Gs`TQf-;jqZ@hK z6HcA&Ce6|ftv3zio6!Or8J3@>23(5QDU2%u28QGtG)+w@?k$$D3Jr5F1&s;LEiKvR zrrFyw7!3=|n~~1r)Som?2$r(|Rfq2+Sx^80ub0{b0PpInx8;%6^M~fchdbDI{nn9% zeDxY$wReH8d+Bf+ATeR0I5y9wRh9fE%`g3t+y5^kOxkgBZd?Pbd2_yPr+>2lew!S& zFoW0mdpSbyT5z9k{OJW)m!_}(o}2?HGXVd4RfPpVr+52<+F$>QN!^s6+Ki9$mi0bs zGOvb=F3{QJBRkc*3?LB?mQya+N9Pur#l4kdF7%0hT?!())RXKdVu5PAswZ(ml z*x6wtWIwywZqjPRU_bNG#-P6twKYDyB&Te>@U-UGnRwo)ncH*Mrtc^^bjB92?7cM6 zH4kO})Gl5XFH_nd*LqU<^~KpFDf|XA{C{-{?*Gv#;%76Td)7b+q(}R58f{)1danJu zEm?2m6C9SLi_E`w{;Z=a@#Jl9`7QVVW7?nFKXCnFvlzyZ>amw@D>l;22T*}m+*OP* z|G%=q==6J#D~BY(0bm-T7uK8Kj`XeWK~7C=?fT0i>sI?_^UPCMtUK6lm^0$NcA;}* zSuCS;-96X--&CNp^7sIj`p#PNQ*T)4bwjtR-{``9!kkkyNL6iL?WVpW4HvblpxTP; zdi>~$onlR3BZ5q&TaxOsE@bR6bE&&!OB})K#$s~LPV#0O)T24r{T3OxzgzNnMz#HC z820GB5(ro>f9v-Au?M!Y?$6u_zP6)zr=ohZ2hKA;_xchyf*$-w?%<&QJiybAbL-vL z_E=p3aEoXEG*R~~46@~_L{)d}E|;Ze73`uTnX4yVUZU6}M)I9)&8}jv^q`(*Oey|H z=>uI=dG;eMm9}E=p>k*7_b9A#>jwAA>RiJ}y)>i4^dxmj{pLc)xut6^Y)fq*%YZ|_ z%#4vF2~+U@BlDL5#Wde`WFe?h*s8~mVVqf-$UG>=rZs8#-NJZ{I-f%H{fLqf30##k zh+=5i|BuF#O#BaxkBMM*>M=KR&C*ulK30p|p~~J=$KWy(D%QtC)Cclbl}a&q(0^wqI2#x`ujJePdk!IHUAll=}Z5{`&2s6$P+&YHI{+Pbm8K ze}F6Y+N5T!l6qO=Bt)NhSPObAv(_fdKK|%(eCRg*n5wk^c>jT)&1w7l%R{eCsKXNE zD-BY#vEgTPtxu`or+Y1~O4LQ`MD!tj<`q7)Z|fxaXEX2gmv8h??1%Z$KT(!|_(xkInN9|-3!jCG~&9Kbpt1@5om+$qN?(`QxpX%pu&#vte zur<*b0FeE>)8!M2a&~j@zN^(_u``>m>=@H!JFbk^O?+$>XjfjU9f`PZ+pl2i&CqLt ze?QI+I41UK2Ce6Y!Hs-v_7ZAMiEbZ9s`M^nFfQArUsA2ST%Hiu<;${V_nXo0OQo|<1<}cF6LI3%&$f~eLD=bp!6Ip@8lpVQmw@!F}J<*4Mh@$xZ+&pJ8Y&gg8YrO$6x26W-8H|ufSY2NpRO{K_3_}q{oRxHU@QywGS%|6HA3PL z_1|)o#9zOfk#b`XSplC+1u@b+c>>dtZ+059A+6eZV+mk6ZC+`BHMa&ewM>S~y_?T_ zjaLtq6L0M)XU3&N>3Kk5g48a_u9K3ev7}nPew?ZrlnY`GV>q>Tyq+xi4ad__bVgmH7||GU#4cEPp-71)1Sz5yoP0p> zO7LXO3|!-4QK6dNoxQYOjuXgd7fEityJU5b*ECg2r#!-4ppMiRnsI>l7Z-w`Tv#)c z3EIC|!%fi?+1Po-@4Pq+lvVTxgp#EiPe`afU%({sBTy6cPHpB*V-k>gLShti4?gzv zY^v4#lKvv^;RPn**SKO6y*5FNx{aZOvjx&NYH+c_ zrUC|QxGTmA_SfEEnBbZv@jQB6D|$i{uNix}r01=gGkJD<5o7)|i;0|9!Or`d73qUgUY4GPxw%Np8WSCa z+5VEL4bMwfwYd61RQrIqaO!94t%>LKPJiOwgO2DaI=c4*umR~8(ogrMMY%juZ)K-NG4rc(f;UQ(>M4#c+dycXQcKbor*0)#sgl62+#aC# zs*WMVSN7U&-aozo^kpn4;wTnUk8jgz$Dtl);W#hZ<9Lpq_+&;eC@*pfzFNG!366a5 z)K}l4E8&(vKJ!3mawT^mUZb&SlaUDZB0Q#$V#E~5x6aVI#6?!37EX5Ju7bmAse~;I z{2akbvfhTC`saTP87k=M?0y(ie96_y1MIEs+w5(4ncdkhllow-c>5+yeG4{U3}SCZ zt9K*Od!yC+cAkyv3#(Fb42z-dn_#_F<%?h@5JxFRZ)X&-8HvJXKtC616d|%y`l2oY z@w`yOg}7|yiq^B|O9rv-{5i<3H+>GMHD=F&vx&)b0M^f4t>l-jg)cAgEt_Lu9R^jA zT%ih1bzX&P24jM?Gzy=0Q?P8?%@u@f&q?^J9s-s0;+vS~3jNEk*4{6+Sg&(&YWf0X zRv=WN(83`KAoZz&M1=|+X{nM|p?9<4EGf5gc~L4rtcb}f z*-$)HS8RQ`94N&~E%f3X(K(JKlD|+Gpi@M#bhW4wSZXWea4R#TRPpf4Zp~%=u_q5o zou!-3eBvZH@MRYC|-lSQK8Cjl}KuxG%}S9RcDZCUu9#1S2ptzk2HgO zhH0~+#FUh=QC{LXE{WCI2j4Pps_sV*fEypv>3XbcJn{X@Y^@@Gfv;e`*L{{+t4fmf z(#1^Ia~EEIFf-q4u0Ph-w63pZZ6ZB6tyrofErr;X1=ua1T_t$dFAHVh=3=GZW#p)r z|K8T(*!&3IjyX`B{m%wcKgp-T{{Qf(nEz?`c(}9w-_Fz8{-=1UaqG#Ss$G<-@;+sv z_|hq#ik-TRY)^$YmW_i{Q~j3hlv?ZNo#A_#MMP3<=e`k+YW}8u(4%^LsG1Z~1@v~= zqqdMesxanFiK9Hl+vbeAHPhHX#Z$-sH<0>CJ`MbT@c6J4|L@`9&i;2R&(i$gp8@JF zB!Kesht2T7O{9HlX?R=6_*9i|X|S32>UH~n1F3)H)3pB|JuJ%qj}9O0;(u@DskAKq zf~kMDv5{*M{?v@YMsxktpt|uiKb24I{$D@!k9?ZW|K<3?hb;b&x)C}1u0FN=fBn=y z@@eA#hl4>${(rp7|GAatHvE5=G&QJiWNVp(s@hvoQGdJngKpQixFKs$We?jX463wC zZI>&kP4!Ks32KRI%M3wP&{j_lv<$=zUGkt7^=dk@f)J4H z_DmYEG6TIPFGyPwWqOX|st)}{yq1!F;y!nf6mOA^>yA@h)mD#PXPU8WFt(ecPrh3~{ZpBev zE{j`Z?UfqdSnjr^+PJl}Z8dh3%d+Tdk1GW`CSd`vc~gf`v6jV5U#t4B zi!#K|RN%9smyA`S=f+u0NZ8E{yqo=mJmHj@vC-k!tThlZl(~VdEf-bK9z#sJpUgb?(YA$@~m|J zw=)3h8oABDZRGyjgu1&fV&6A?#aH{(?thD|-Cq8`!{MN8|2sI|<^SHwbDR1N8Oe5?_9ErcRFY-_77M8(*x4-3NY+LwMs=a5xYhIcJ2aYDjZGIabL z#oUDGtJh3=s%ZIh7j?LP9k0HYSByiv$#50TcecBr$1Q>$@8S#Z;tTKM3-96!-}Ufd zyYRxh@WQ+B!rKim{Kew?`WN5r#x~Ah?xG9pXCu*tcaeLmMrIcmcNZ6T7Z>*yhv3f~ z7x#9e;w~Q+cm24yyTG_hAg~LJyLnBccPH!uW$E-M zGkV%R*;8l#b(O$v#D5q*8kFL{9SwHzAGh+XYX5bGD8~VePEt--0HYeGA*hSsc-s5$ zgEWg*(c%uZW!SG8)+q^da9_kmv~1dRmB7qGO8GaOp-CIFwStR)Js^A9FiuhYvUOaM z$~$$-0jO;-R?_?eA6YGCR+@y6BYpcQ;ixx4Qt%y1Jvu=i#%*#n=z zA1UFwpGLm08?&xSA{=1kJqml;EY-7`jxgiklM#$Q0Z#F}#vK&g-1NU+ZUcPZ_kpO` zVzTg;+y55*@6Av&$J3ZlwAnVA?EjAsj|=qw=;86s{(mdaJ~)M(BN{Wn2~e{Su4gC) z;}l0SqE-Tf4{(ZDud@%{&M=cP!U*LIuo;RXFpbDqS$g7l`T$TAL5{DG)P`8^VH|e$ zL5!yId;{*Q`!^JVYs_ch-w%2~DzHf`YYLP=f+&b^jC!5kv-68{PAKZ^gQsLZCo%Z- z={X27Wu4v>^S=C7q2K9^|3&-qU-M!%?TdfRPj(geb3kJld`J^8!4YDe?|bYz>3rWC z!w;SBdwiaBzW@I^``}kdF-aMC@$5P4^b$&bM*;8jaEM@Em80bMPUp=(Uq3%P|C(fI zYyY1;KY8}*d2b$Wx{ccXKO77WOZh(zAMND7?L7P7G+P0k&inkwZ}(@MCv4QW>_&v> zcJKf|@cwB;(h$5>8~rJnCn*=C;%Ab^q1@kj^&k!=vLevi<^jMAK%j!`G3lJCjZ49t z#F!H*K&j6?@PdPYL=g%&19QYF4j7nFGS@1sl%h}zW(+_MruYiQ0LL8BD;Ti{0A0Z- z6{i{*O%R}u2~}Q)>;VWdOCq>nDrg{LX~dZ%R3Lz{5DcbMM1kP#oCy(tkvS6WhFFp? z%m5inb{-1iIY!d(uGi`8?}N85|Kr)&e{?#RmzUburt=*T3>M&+aTrB_aUlR$qbthF zx-Vi*NtgzrkxoaF++YSN2V+7w;}j-h{MyVF2s3#QkXTgu@60__RT}mCj>qbOaTf5j{xG3`DTo$hqrl5EVxTQw9Cikdcg#USNda5-Oe^^40<-QahnS1!nlS53!T z(IT18#iCGy$*dhC22~(o_0bF;R$q+NX;!P;96cbwzIWdJc7NabeE=AeD?kdYYKbP0 z!a3rIil30nRXRrYP6$)Ed2}~bW1Y_5!S~<4#mV>IkAO^XB1S;>o9TJJl*1#MQfu&C zjH@OiHJ|z+iV#Qgvpw?Gs`E~Qu9nv*4>d(|a)lT$lWq?%nI|MhvDoj7kpM$(gou23 zsOgGi1#6Xz)%u3-o$_U{r2EVH^-DBGXVXAPUjsHYk*7u&xyk| zjv^2vMT^v-OSFtN0n0EGp9MxSx&{+M=a7Rjnh3Fv&(K1ysWh2V80yh_Cx3|QlI$iF zT}e?$wW3i1^|Ykpi!a?A_J(4R@GJqgbKnd{}gywqkpcBMjcnm6opcT zKp?g$nE;x`u|NmfRn@6V>^dPpe)u8Zh?2hq6egk$`Y2$E!NW0tG2t^rvy#^{93hMD zF+n0!Hz$Mw1cRBiw0lLuq*1tZ4|tbgo-#4SfFUkM;ZiV(%U-8*B4^O7xZ~d7fjyn6 z*qiQ3?J4ow)F2a(uMxrWB-21<+(XErnP4#nn91;(V*5aj#pugDV}TfoCSGt+Qmp3r z9L1qpPI)&4>ozUuP4z&$kqb_ZunNza6BxBH2?y7Ne&~U7q~`s-)v{RdZ8w8dQF3cP z?CWiKAZJOSN)_*vzgT+xy`8M>tO+YkP`mVd-3#2Qzpjws)yl zYVRR?^yIWXkbEcvm-&Y-iNebsco73aLqzS}N$mQ9p#E#{HDbNM$RoJif+D<%7R0mdXm&-Kps z7D!Syn?jDR;lle}^-_H@2FUVPcDK`uNC2bU%zOkM9}Z@wW}z2M;SQ{U%zI=~mB}BH z@eO-gZ|CkSv&Jle$08Pg(}XH1kcbyb?zmFQrUjfwitxRMwxmAf=ItNaJjN(R3ds zxPLX2cFQ>+-InnT3z zVrHaIU005D0OL+b0Mz27Q8izRW~O=-`nnn-X(}a8WgW2`xtKeOCWs=j?24q;8EGwP zKg}!_;1r6PE!GN2kjlXJlFo~szNbiD7hEWSOL8Cwi^8vc7xIDZ)cd^a7c9JmMt$%P zdYz8Vd&D$YgOaaDu^b-AS(E~cpar%$f+qZOOvS>1EF=43FWR>?j`0Hk4n@GEui6`d zJs}9~c?#%Uhx)YdDE0^!o2vOb!;%g##H{=%0mQu{6Z^$ksrS@WB zWXukVgNTT(;TVJ@6?9PEu`osBp4tmW9l0`r0>~4vb==HWmw0WRJvCoCEyzAVZ(K|> zlBl^*r*mp*gHPa@q)6&iAb!cBovwMnC(!MBeswM#2_X}&KYv+3ea#X%jvHr%n+>(Y3DzoGc0~FThV~8vD)gIh-~E=5Q(?UJZJOgWkXo zCwCBR1Sg8>yj}wAi^*%kPbp$3=0(_<6O91+R?=JX0p%(ZtPX zFErb3unwv!r@R6ry!XEq;dxDC1x#z>IWTh$NOTP$15pGfX%s1i7iQq{=Ekwhl~$JK zam0>4_&ru!N#3ah|Z1UlmwNeZm+CC{q+8Dit|m_DU0_F!`C1 zxf(aKHe{d31VC+NX=DL;a<>ExQH~`rKzjA)IaOrtq%hkPJbmy*x`(4|ir<#bSQBwF zl>JpATSTBfoI9YvE5Qlmt(%gz&_W&^a9n_=>q{gvocn5cdqvVG1o;wKaWjBBgxDDo zof$im&=r-ILm`h}Re&uqqC~O`OxIx2vnf3}sjGqIdo>GDQUg-vk*)+LzRG3i-`wR? z<8lCXW%*X+l2%#*l%QBnwmP;uEsB?veuuHm{Sr(h~~m+zL~lVPg!q@`79j^0R&#NE{w)q-J@=h)$U7GXJ3El=X2HH z*XTOPh)v;;#~xmQ%MF- zkQA;YW%U(-`|>V?B4%r_|NdmydsKm_aT*aoB_6pGi3f|CJe^*_$d{P2avwG)z!Yt~ zf*?hI0un69XPg9y^7)h?rQ6A|g6Ut&-B1Tna;nh@0Qlora zvid*)04lvuh4ffwKV9mIR8N!}P*uQO+gxA1KSMD5C&e7Si33zx7YBMgi?Ovua4P;AZ?#j%s+MBm+Q?i0e`X+5oK7q@=DB9Q(!~ZI;yoyGonffPi zdH4%%hHF`Nw}pSM?~;@`_$P3g#tfC^?Z!MmZQIjTfHur)*_?Jq6^)#y;}BCxQKbs2 zO{1LX$?~rRC*qmxh^v4QQhw->dXz?@ib7Ur)Rf&wP=F^3MQp9+v?QsH?h1f)Cc622 z2|g8?vcu0^N`pg&DGEWH&c}$BhREPtCX8h(QiqJCOi(Nenf6_kS1 zm4KDQ*!ekf<}Or*K*C(}YaEEcUqOWDxaw?O@FEs4pDIcAeYQIT_01=O6UKRBLm8h0-Ry3~Y`uQ%VXtMHb2wt@HJ z)K6(yIRQgynhcCg|MKEgP+uW@R~>4YH?4x)2w7c8yQOd~{t_j@SQiEN@x)yFqwt^} z^{U(ma&KeC1!;P53ZA}rcE(IbVIy<3Xt=s_bvvzjgE4>Td&!B^E0Bf5uUaM#jLPfs zHHpzBAQW6ajWCM28f^8tqG7&BjXw#{X5G0U<)vq@q$a3QP`T%| zPXOuLcjtZQ(&xi0z-I5eDpsLKUt274S%&6Q+txt#scwJA^EnO|EW3ee(V*7jHnx=O za*w1cw<(BZE1>ln-O4bI1qEhJGiB9iLzKoQ&`PcaM#5As95s*&p3p5Y*IK%Q2DUP^ zqj#KJvNExqpU%j2{0wrK%XCUWkmA?_5R&*F7r+o#(lfahJZ6Ry0J)I%C{GjVu9Bi6 zM6*G}+ioCUcO{7O{wS0v;pW>M)lhwVW1O!i5mC z0zwgC{bd@~i=st~hByv;KT6_Q0nl99<0=l2Ny75PQaqID#vX2Z5TlzO1sN7Fhxp4w*pM^|58ya!l) zo~Z<1IL6lWqW&;j*}#Smw5{5?twU{PKOk2Ys)tuE2Dx)l zaYn3apezlQ!MVZMY5`~Q)e6VAGSK>iVF_LTsn7u{+J24rHK8AzrRc(ve4EXn@~cso z^s%aCnWI#!RN1{3&z1y#`r=tL=q8!7)QnHubRaGzYI=H61+=WJUmey)rKCpS>q$sO z@V6-bHZ)}ol4%A{)c62X6zH3v^4c*rsUCYDr`-*T+-`q-f@NXNRLhhOC`X=jK`bLm zIxszAWHDB%7*k8S!?B#s3TUkaJQw$6WIJzbez~qdRO>`KFVuoK=f}BTlJmlqR8>ni zexQ?MElt)z%~fjyDN2@sxRPWY zta>@p8Tbk@(ht2XL^@d37^K?pTG*rdz!oU052DW+)dwoG9Q6Sy8;*KAxsu(e-QG80 zHtHK1Z8ho}+eV{NXJ0o+%60=4aDKWVK+rOM{S2na;+MtMLgKbub`o^J1oi;7r zH^E*{y0@X1OknNqHe0|th_*3+weIEZmRg&_dPk_i6t<$xm5pNS+jiIgl5ICo?`FH$ z<=U&WkgaXkF_2xZ%^FkL+9r*q>~gKSHna5&6ijB9Yp7^Eo13%NHtIE=U9RPV8Ewmk zYs_fNF-_Xq(mE~r&b|&Ytv>m>6xurY78zcPYjB6!wL7BPtrYFzeBk_NP)C5QwuTv-*{z*#lepy9$0Z!^ns~9DcS(JkVU483}PM= z7npByL4iF{Vs8ZY&WMb2rDCb<0kP8P7zRDnSoLZ1?5C5b?%`4&4~rpyY(t;&2G0WI zxQ7uJR$O-eH2$xpv!?ox*Y%w+>)=$MGqXxX;QEBw4Aq=74|~Jo-eD1pd&X>rR)5lD z#ax~OD3JFCV#}4|+?d!X(%Di62gsZJ`5Z`wnJ2dEyRlC4CZ`b@!>Fg!Xcs0Np=kae z_?WGFHLallF- zb&y0%;s#tzG({astbkzEX})eA%V&C4%saXSw2919DyY?-OyVgwU^l`O6fA-W?fq7O z|02db&lskT(wN&yS3wzJ8V54~#Sk+bBHQ`g5Jpx=DtWn1|$s zm}v{EVU=};KCG5j7y7sKSFbcIj16y^sYb0KD`#BvDGbXkRHzMEDRJStS~upVB)qKh zE;np!!z#5Ju{89lH$$ni0};zBD;0FYz49GnRP^vbWk6rUX|9+D%zmkXh;u$jdIu zJA{NU8|yii<^hz`pW&@fqLcaw7%!tN09f=CbTEkPpNy-iGN$ns2PC2ixLT z+j8;uG*_d2Z5(_9M}|5q48Pq*li}X#ke3ThP?&&1Py!G5w&4gof^n7WeqAfxXapOA zUC_$5fa+LvXY1Y4|CeFZ8ow2=tWKC^EqwsNucs$~jDJS~FP-2Ft;xlHa35hAo7T#o z1_;$UUo?;TgFMtH9Fr^KY^;t0#eMQ6Jv)lJtkM9ocK0Wq%~i2%YA;Ycs+a0(lmf)U z4N5ge_4gsB$HBbfGkt&iGg~pi-P+qhgB$qk0LyE z;i1KkOH<`iwm&F0-$)_q2m@O(j)4oZZ3F{v2lG3kYLqK)ii+sxEcQ&Xb}v6>NQJfq zoQflJptM%@1aSh0NnfUg1QRD$|76}HaSbB6o1pb9fx)a)%W>M`CpU8A>P$pp+MUN=KQ4t{6ba zmgf{ic`w@npt`?phG%j|U8t;95PVc`niFUi=MPa5M7sXIhjMcq?X ztQqSDr=o$qJsmRXOr?XU1@~T-1Uhqs)KseCn@kZQ+qiJEQ!sS^`A7f6X7&nu?Xp<* zzj&@9+P${F=kg8s)ae*~i_Dvqr4O+3Yh8A`LMZxx7Ek1@;rj>KpTAvZh5<5XmYuxs zvIl-9)J)!krE)BK694_LOfaEPX+uAr96cJSR1+uqCIDZdXz>6*@INQ7UMkxWnbU#E z0l+$`l9k;-N-FX&YGqZ>5^TS+8Hr@BU7gMhGcgJr2WbRpv=9wt*NJ%wE=69(C?fGx z4hG3VYUYt?^7KK~Gfc9F`%ZuNWk#XSzJW6A%C^);i_XZzCMvm;Trj4%`UOB&eH{T9 zOKqusT?Bu)9Qeal;N_fcautZ5p7F3=JN?j!OZSyQoSS3hj0iFg;}D3yw4UE~$to&& zV9LoHN>x#$&W^^OO{G66im8{`{P$#f$~lZvS(PE4CJH67j!bGqlMO4UNVc)x6HULQ zU4tfI_BqcIP|Q(1%MEDCj2D(kguTZ{pd=dI+*mo-K7Y<6FG2JyR2u6k*~+AL%+ewx z6m_$UuR=LP~ZC;RV=l5{%L{L>0WSj#t!U{hd_2bwBLDt=#P zMvE6J{}q#ZAeCCD90>99AX~}uR!NrA!|0}w0SO6vPS(+i{cYOv*_#N1Mrca%u4_|}fXw^9BhEMsKJ>F<2L}&8qR5%MitWT-ngK%&<|vrK7_)hSZsZx# z-o8A404A6+uBns38v!UYYy8#58zd0ZoWCo@B0kTEVF#m7O1k3;Z>;Kn@+)Jj3Jl@EWK8|~z$ zzH^hn9AW((QK&S8flOq(@%T&dM@`f|_xQuO#M4BEIv-tzzO%n&L5OycV+!1~3`9hR z?oe<9C;T%SjIYE$!Ul=G72}$BpsnK67w&W*xtqPnWSc0ivs)sjE5-KgKl<#i+VOZlUC^0If4`ApUA`^q6W@!Ixf_EMm(Ez|2v?hJE$0 Z$tIg@vdKTmcK`qY|Np_;&LIF^0|3aB8({zd literal 0 HcmV?d00001 diff --git a/chart/charts/prometheus/.helmignore b/chart/charts/prometheus/.helmignore deleted file mode 100755 index 825c007..0000000 --- a/chart/charts/prometheus/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj - -OWNERS diff --git a/chart/charts/prometheus/Chart.yaml b/chart/charts/prometheus/Chart.yaml deleted file mode 100755 index 0b153ed..0000000 --- a/chart/charts/prometheus/Chart.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -appVersion: 2.15.2 -description: Prometheus is a monitoring system and time series database. -engine: gotpl -home: https://prometheus.io/ -icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png -maintainers: -- email: gianrubio@gmail.com - name: gianrubio -- email: zanhsieh@gmail.com - name: zanhsieh -name: prometheus -sources: -- https://github.com/prometheus/alertmanager -- https://github.com/prometheus/prometheus -- https://github.com/prometheus/pushgateway -- https://github.com/prometheus/node_exporter -- https://github.com/kubernetes/kube-state-metrics -tillerVersion: '>=2.8.0' -version: 10.0.0 diff --git a/chart/charts/prometheus/README.md b/chart/charts/prometheus/README.md deleted file mode 100755 index 9f84bf0..0000000 --- a/chart/charts/prometheus/README.md +++ /dev/null @@ -1,476 +0,0 @@ -# Prometheus - -[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. - -## TL;DR; - -```console -$ helm install stable/prometheus -``` - -## Introduction - -This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -## Prerequisites - -- Kubernetes 1.3+ with Beta APIs enabled - -## Installing the Chart - -To install the chart with the release name `my-release`: - -```console -$ helm install --name my-release stable/prometheus -``` - -The command deploys Prometheus on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. - -> **Tip**: List all releases using `helm list` - -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: - -```console -$ helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Prometheus 2.x - -Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/) - -Users of this chart will need to update their alerting rules to the new format before they can upgrade. - -## Upgrading from previous chart versions. - -Version 9.0 adds a new option to enable or disable the Prometheus Server. -This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. -To install the server `server.enabled` must be set to `true`. - -As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. - -### Example migration - -Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: - -1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: - - ``` - alertmanager: - enabled: false - alertmanagerFiles: - alertmanager.yml: "" - kubeStateMetrics: - enabled: false - nodeExporter: - enabled: false - pushgateway: - enabled: false - server: - extraArgs: - storage.local.retention: 720h - serverFiles: - alerts: "" - prometheus.yml: "" - rules: "" - ``` - -1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. - - ``` - prometheus.yml: - ... - remote_read: - - url: http://prometheus-old/api/v1/read - ... - ``` - - Old data will be available when you query the new prometheus instance. - -## Scraping Pod Metrics via Annotations - -This chart uses a default configuration that causes prometheus -to scrape a variety of kubernetes resource types, provided they have the correct annotations. -In this section we describe how to configure pods to be scraped; -for information on how other resource types can be scraped you can -do a `helm template` to get the kubernetes resource definitions, -and then reference the prometheus configuration in the ConfigMap against the prometheus documentation -for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) -and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). - -In order to get prometheus to scrape pods, you must add annotations to the the pods as below: - -``` -metadata: - annotations: - prometheus.io/scrape: "true" - prometheus.io/path: /metrics - prometheus.io/port: "8080" -spec: -... -``` - -You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. -`prometheus.io/port` should be set to the port that your pod serves metrics from. -Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be -enclosed in double quotes. - -## Configuration - -The following table lists the configurable parameters of the Prometheus chart and their default values. - -Parameter | Description | Default ---------- | ----------- | ------- -`alertmanager.enabled` | If true, create alertmanager | `true` -`alertmanager.name` | alertmanager container name | `alertmanager` -`alertmanager.image.repository` | alertmanager container image repository | `prom/alertmanager` -`alertmanager.image.tag` | alertmanager container image tag | `v0.20.0` -`alertmanager.image.pullPolicy` | alertmanager container image pull policy | `IfNotPresent` -`alertmanager.prefixURL` | The prefix slug at which the server can be accessed | `` -`alertmanager.baseURL` | The external url at which the server can be accessed | `"http://localhost:9093"` -`alertmanager.extraArgs` | Additional alertmanager container arguments | `{}` -`alertmanager.extraSecretMounts` | Additional alertmanager Secret mounts | `[]` -`alertmanager.configMapOverrideName` | Prometheus alertmanager ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}}` and setting this value will prevent the default alertmanager ConfigMap from being generated | `""` -`alertmanager.configFromSecret` | The name of a secret in the same kubernetes namespace which contains the Alertmanager config, setting this value will prevent the default alertmanager ConfigMap from being generated | `""` -`alertmanager.configFileName` | The configuration file name to be loaded to alertmanager. Must match the key within configuration loaded from ConfigMap/Secret. | `alertmanager.yml` -`alertmanager.ingress.enabled` | If true, alertmanager Ingress will be created | `false` -`alertmanager.ingress.annotations` | alertmanager Ingress annotations | `{}` -`alertmanager.ingress.extraLabels` | alertmanager Ingress additional labels | `{}` -`alertmanager.ingress.hosts` | alertmanager Ingress hostnames | `[]` -`alertmanager.ingress.extraPaths` | Ingress extra paths to prepend to every alertmanager host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]` -`alertmanager.ingress.tls` | alertmanager Ingress TLS configuration (YAML) | `[]` -`alertmanager.nodeSelector` | node labels for alertmanager pod assignment | `{}` -`alertmanager.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` -`alertmanager.affinity` | pod affinity | `{}` -`alertmanager.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` -`alertmanager.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` -`alertmanager.schedulerName` | alertmanager alternate scheduler name | `nil` -`alertmanager.persistentVolume.enabled` | If true, alertmanager will create a Persistent Volume Claim | `true` -`alertmanager.persistentVolume.accessModes` | alertmanager data Persistent Volume access modes | `[ReadWriteOnce]` -`alertmanager.persistentVolume.annotations` | Annotations for alertmanager Persistent Volume Claim | `{}` -`alertmanager.persistentVolume.existingClaim` | alertmanager data Persistent Volume existing claim name | `""` -`alertmanager.persistentVolume.mountPath` | alertmanager data Persistent Volume mount root path | `/data` -`alertmanager.persistentVolume.size` | alertmanager data Persistent Volume size | `2Gi` -`alertmanager.persistentVolume.storageClass` | alertmanager data Persistent Volume Storage Class | `unset` -`alertmanager.persistentVolume.volumeBindingMode` | alertmanager data Persistent Volume Binding Mode | `unset` -`alertmanager.persistentVolume.subPath` | Subdirectory of alertmanager data Persistent Volume to mount | `""` -`alertmanager.podAnnotations` | annotations to be added to alertmanager pods | `{}` -`alertmanager.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | -`alertmanager.replicaCount` | desired number of alertmanager pods | `1` -`alertmanager.statefulSet.enabled` | If true, use a statefulset instead of a deployment for pod management | `false` -`alertmanager.statefulSet.podManagementPolicy` | podManagementPolicy of alertmanager pods | `OrderedReady` -`alertmanager.statefulSet.headless.annotations` | annotations for alertmanager headless service | `{}` -`alertmanager.statefulSet.headless.labels` | labels for alertmanager headless service | `{}` -`alertmanager.statefulSet.headless.enableMeshPeer` | If true, enable the mesh peer endpoint for the headless service | `{}` -`alertmanager.statefulSet.headless.servicePort` | alertmanager headless service port | `80` -`alertmanager.priorityClassName` | alertmanager priorityClassName | `nil` -`alertmanager.resources` | alertmanager pod resource requests & limits | `{}` -`alertmanager.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for Alert Manager containers | `{}` -`alertmanager.service.annotations` | annotations for alertmanager service | `{}` -`alertmanager.service.clusterIP` | internal alertmanager cluster service IP | `""` -`alertmanager.service.externalIPs` | alertmanager service external IP addresses | `[]` -`alertmanager.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` -`alertmanager.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` -`alertmanager.service.servicePort` | alertmanager service port | `80` -`alertmanager.service.sessionAffinity` | Session Affinity for alertmanager service, can be `None` or `ClientIP` | `None` -`alertmanager.service.type` | type of alertmanager service to create | `ClusterIP` -`alertmanagerFiles.alertmanager.yml` | Prometheus alertmanager configuration | example configuration -`configmapReload.name` | configmap-reload container name | `configmap-reload` -`configmapReload.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload` -`configmapReload.image.tag` | configmap-reload container image tag | `v0.3.0` -`configmapReload.image.pullPolicy` | configmap-reload container image pull policy | `IfNotPresent` -`configmapReload.extraArgs` | Additional configmap-reload container arguments | `{}` -`configmapReload.extraVolumeDirs` | Additional configmap-reload volume directories | `{}` -`configmapReload.extraConfigmapMounts` | Additional configmap-reload configMap mounts | `[]` -`configmapReload.resources` | configmap-reload pod resource requests & limits | `{}` -`initChownData.enabled` | If false, don't reset data ownership at startup | true -`initChownData.name` | init-chown-data container name | `init-chown-data` -`initChownData.image.repository` | init-chown-data container image repository | `busybox` -`initChownData.image.tag` | init-chown-data container image tag | `latest` -`initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` -`initChownData.resources` | init-chown-data pod resource requests & limits | `{}` -`kubeStateMetrics.enabled` | If true, create kube-state-metrics | `true` -`kubeStateMetrics.name` | kube-state-metrics container name | `kube-state-metrics` -`kubeStateMetrics.image.repository` | kube-state-metrics container image repository| `quay.io/coreos/kube-state-metrics` -`kubeStateMetrics.image.tag` | kube-state-metrics container image tag | `v1.9.0` -`kubeStateMetrics.image.pullPolicy` | kube-state-metrics container image pull policy | `IfNotPresent` -`kubeStateMetrics.args` | kube-state-metrics container arguments | `{}` -`kubeStateMetrics.nodeSelector` | node labels for kube-state-metrics pod assignment | `{}` -`kubeStateMetrics.podAnnotations` | annotations to be added to kube-state-metrics pods | `{}` -`kubeStateMetrics.deploymentAnnotations` | annotations to be added to kube-state-metrics deployment | `{}` -`kubeStateMetrics.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | -`kubeStateMetrics.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` -`kubeStateMetrics.replicaCount` | desired number of kube-state-metrics pods | `1` -`kubeStateMetrics.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` -`kubeStateMetrics.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` -`kubeStateMetrics.priorityClassName` | kube-state-metrics priorityClassName | `nil` -`kubeStateMetrics.resources` | kube-state-metrics resource requests and limits (YAML) | `{}` -`kubeStateMetrics.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for kube-state-metrics containers | `{}` -`kubeStateMetrics.service.annotations` | annotations for kube-state-metrics service | `{prometheus.io/scrape: "true"}` -`kubeStateMetrics.service.clusterIP` | internal kube-state-metrics cluster service IP | `None` -`kubeStateMetrics.service.externalIPs` | kube-state-metrics service external IP addresses | `[]` -`kubeStateMetrics.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` -`kubeStateMetrics.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` -`kubeStateMetrics.service.servicePort` | kube-state-metrics service port | `80` -`kubeStateMetrics.service.type` | type of kube-state-metrics service to create | `ClusterIP` -`nodeExporter.enabled` | If true, create node-exporter | `true` -`nodeExporter.name` | node-exporter container name | `node-exporter` -`nodeExporter.image.repository` | node-exporter container image repository| `prom/node-exporter` -`nodeExporter.image.tag` | node-exporter container image tag | `v0.18.1` -`nodeExporter.image.pullPolicy` | node-exporter container image pull policy | `IfNotPresent` -`nodeExporter.extraArgs` | Additional node-exporter container arguments | `{}` -`nodeExporter.extraHostPathMounts` | Additional node-exporter hostPath mounts | `[]` -`nodeExporter.extraConfigmapMounts` | Additional node-exporter configMap mounts | `[]` -`nodeExporter.hostNetwork` | If true, node-exporter pods share the host network namespace | `true` -`nodeExporter.hostPID` | If true, node-exporter pods share the host PID namespace | `true` -`nodeExporter.nodeSelector` | node labels for node-exporter pod assignment | `{}` -`nodeExporter.podAnnotations` | annotations to be added to node-exporter pods | `{}` -`nodeExporter.pod.labels` | labels to be added to node-exporter pods | `{}` -`nodeExporter.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` -`nodeExporter.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` -`nodeExporter.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | -`nodeExporter.podSecurityPolicy.enabled` | Specify if a Pod Security Policy for node-exporter must be created | `false` -`nodeExporter.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` -`nodeExporter.priorityClassName` | node-exporter priorityClassName | `nil` -`nodeExporter.resources` | node-exporter resource requests and limits (YAML) | `{}` -`nodeExporter.securityContext` | securityContext for containers in pod | `{}` -`nodeExporter.service.annotations` | annotations for node-exporter service | `{prometheus.io/scrape: "true"}` -`nodeExporter.service.clusterIP` | internal node-exporter cluster service IP | `None` -`nodeExporter.service.externalIPs` | node-exporter service external IP addresses | `[]` -`nodeExporter.service.hostPort` | node-exporter service host port | `9100` -`nodeExporter.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` -`nodeExporter.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` -`nodeExporter.service.servicePort` | node-exporter service port | `9100` -`nodeExporter.service.type` | type of node-exporter service to create | `ClusterIP` -`podSecurityPolicy.enabled` | If true, create & use pod security policies resources | `false` -`pushgateway.enabled` | If true, create pushgateway | `true` -`pushgateway.name` | pushgateway container name | `pushgateway` -`pushgateway.image.repository` | pushgateway container image repository | `prom/pushgateway` -`pushgateway.image.tag` | pushgateway container image tag | `v1.0.1` -`pushgateway.image.pullPolicy` | pushgateway container image pull policy | `IfNotPresent` -`pushgateway.extraArgs` | Additional pushgateway container arguments | `{}` -`pushgateway.ingress.enabled` | If true, pushgateway Ingress will be created | `false` -`pushgateway.ingress.annotations` | pushgateway Ingress annotations | `{}` -`pushgateway.ingress.hosts` | pushgateway Ingress hostnames | `[]` -`pushgateway.ingress.extraPaths` | Ingress extra paths to prepend to every pushgateway host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]` -`pushgateway.ingress.tls` | pushgateway Ingress TLS configuration (YAML) | `[]` -`pushgateway.nodeSelector` | node labels for pushgateway pod assignment | `{}` -`pushgateway.podAnnotations` | annotations to be added to pushgateway pods | `{}` -`pushgateway.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | -`pushgateway.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` -`pushgateway.replicaCount` | desired number of pushgateway pods | `1` -`pushgateway.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` -`pushgateway.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` -`pushgateway.schedulerName` | pushgateway alternate scheduler name | `nil` -`pushgateway.persistentVolume.enabled` | If true, Prometheus pushgateway will create a Persistent Volume Claim | `false` -`pushgateway.persistentVolume.accessModes` | Prometheus pushgateway data Persistent Volume access modes | `[ReadWriteOnce]` -`pushgateway.persistentVolume.annotations` | Prometheus pushgateway data Persistent Volume annotations | `{}` -`pushgateway.persistentVolume.existingClaim` | Prometheus pushgateway data Persistent Volume existing claim name | `""` -`pushgateway.persistentVolume.mountPath` | Prometheus pushgateway data Persistent Volume mount root path | `/data` -`pushgateway.persistentVolume.size` | Prometheus pushgateway data Persistent Volume size | `2Gi` -`pushgateway.persistentVolume.storageClass` | Prometheus pushgateway data Persistent Volume Storage Class | `unset` -`pushgateway.persistentVolume.volumeBindingMode` | Prometheus pushgateway data Persistent Volume Binding Mode | `unset` -`pushgateway.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""` -`pushgateway.priorityClassName` | pushgateway priorityClassName | `nil` -`pushgateway.resources` | pushgateway pod resource requests & limits | `{}` -`pushgateway.service.annotations` | annotations for pushgateway service | `{}` -`pushgateway.service.clusterIP` | internal pushgateway cluster service IP | `""` -`pushgateway.service.externalIPs` | pushgateway service external IP addresses | `[]` -`pushgateway.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` -`pushgateway.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` -`pushgateway.service.servicePort` | pushgateway service port | `9091` -`pushgateway.service.type` | type of pushgateway service to create | `ClusterIP` -`pushgateway.strategy.type` | Deployment strategy | `{ "type": "RollingUpdate" }` -`rbac.create` | If true, create & use RBAC resources | `true` -`server.enabled` | If false, Prometheus server will not be created | `true` -`server.name` | Prometheus server container name | `server` -`server.image.repository` | Prometheus server container image repository | `prom/prometheus` -`server.image.tag` | Prometheus server container image tag | `v2.15.2` -`server.image.pullPolicy` | Prometheus server container image pull policy | `IfNotPresent` -`server.configPath` | Path to a prometheus server config file on the container FS | `/etc/config/prometheus.yml` -`server.global.scrape_interval` | How frequently to scrape targets by default | `1m` -`server.global.scrape_timeout` | How long until a scrape request times out | `10s` -`server.global.evaluation_interval` | How frequently to evaluate rules | `1m` -`server.extraArgs` | Additional Prometheus server container arguments | `{}` -`server.extraFlags` | Additional Prometheus server container flags | `["web.enable-lifecycle"]` -`server.extraInitContainers` | Init containers to launch alongside the server | `[]` -`server.prefixURL` | The prefix slug at which the server can be accessed | `` -`server.baseURL` | The external url at which the server can be accessed | `` -`server.env` | Prometheus server environment variables | `[]` -`server.extraHostPathMounts` | Additional Prometheus server hostPath mounts | `[]` -`server.extraConfigmapMounts` | Additional Prometheus server configMap mounts | `[]` -`server.extraSecretMounts` | Additional Prometheus server Secret mounts | `[]` -`server.extraVolumeMounts` | Additional Prometheus server Volume mounts | `[]` -`server.extraVolumes` | Additional Prometheus server Volumes | `[]` -`server.configMapOverrideName` | Prometheus server ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.server.configMapOverrideName}}` and setting this value will prevent the default server ConfigMap from being generated | `""` -`server.ingress.enabled` | If true, Prometheus server Ingress will be created | `false` -`server.ingress.annotations` | Prometheus server Ingress annotations | `[]` -`server.ingress.extraLabels` | Prometheus server Ingress additional labels | `{}` -`server.ingress.hosts` | Prometheus server Ingress hostnames | `[]` -`server.ingress.extraPaths` | Ingress extra paths to prepend to every Prometheus server host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]` -`server.ingress.tls` | Prometheus server Ingress TLS configuration (YAML) | `[]` -`server.nodeSelector` | node labels for Prometheus server pod assignment | `{}` -`server.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` -`server.affinity` | pod affinity | `{}` -`server.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` -`server.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` -`server.priorityClassName` | Prometheus server priorityClassName | `nil` -`server.schedulerName` | Prometheus server alternate scheduler name | `nil` -`server.persistentVolume.enabled` | If true, Prometheus server will create a Persistent Volume Claim | `true` -`server.persistentVolume.accessModes` | Prometheus server data Persistent Volume access modes | `[ReadWriteOnce]` -`server.persistentVolume.annotations` | Prometheus server data Persistent Volume annotations | `{}` -`server.persistentVolume.existingClaim` | Prometheus server data Persistent Volume existing claim name | `""` -`server.persistentVolume.mountPath` | Prometheus server data Persistent Volume mount root path | `/data` -`server.persistentVolume.size` | Prometheus server data Persistent Volume size | `8Gi` -`server.persistentVolume.storageClass` | Prometheus server data Persistent Volume Storage Class | `unset` -`server.persistentVolume.volumeBindingMode` | Prometheus server data Persistent Volume Binding Mode | `unset` -`server.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""` -`server.emptyDir.sizeLimit` | emptyDir sizeLimit if a Persistent Volume is not used | `""` -`server.podAnnotations` | annotations to be added to Prometheus server pods | `{}` -`server.podLabels` | labels to be added to Prometheus server pods | `{}` -`server.alertmanagers` | Prometheus AlertManager configuration for the Prometheus server | `{}` -`server.deploymentAnnotations` | annotations to be added to Prometheus server deployment | `{}` -`server.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | -`server.replicaCount` | desired number of Prometheus server pods | `1` -`server.statefulSet.enabled` | If true, use a statefulset instead of a deployment for pod management | `false` -`server.statefulSet.annotations` | annotations to be added to Prometheus server stateful set | `{}` -`server.statefulSet.labels` | labels to be added to Prometheus server stateful set | `{}` -`server.statefulSet.podManagementPolicy` | podManagementPolicy of server pods | `OrderedReady` -`server.statefulSet.headless.annotations` | annotations for Prometheus server headless service | `{}` -`server.statefulSet.headless.labels` | labels for Prometheus server headless service | `{}` -`server.statefulSet.headless.servicePort` | Prometheus server headless service port | `80` -`server.resources` | Prometheus server resource requests and limits | `{}` -`server.verticalAutoscaler.enabled` | If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) | `false` -`server.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for server containers | `{}` -`server.service.annotations` | annotations for Prometheus server service | `{}` -`server.service.clusterIP` | internal Prometheus server cluster service IP | `""` -`server.service.externalIPs` | Prometheus server service external IP addresses | `[]` -`server.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` -`server.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` -`server.service.nodePort` | Port to be used as the service NodePort (ignored if `server.service.type` is not `NodePort`) | `0` -`server.service.servicePort` | Prometheus server service port | `80` -`server.service.sessionAffinity` | Session Affinity for server service, can be `None` or `ClientIP` | `None` -`server.service.type` | type of Prometheus server service to create | `ClusterIP` -`server.service.statefulsetReplica.enabled` | If true, send the traffic from the service to only one replica of the replicaset | `false` -`server.service.statefulsetReplica.replica` | Which replica to send the traffice to | `0` -`server.sidecarContainers` | array of snippets with your sidecar containers for prometheus server | `""` -`serviceAccounts.alertmanager.create` | If true, create the alertmanager service account | `true` -`serviceAccounts.alertmanager.name` | name of the alertmanager service account to use or create | `{{ prometheus.alertmanager.fullname }}` -`serviceAccounts.kubeStateMetrics.create` | If true, create the kubeStateMetrics service account | `true` -`serviceAccounts.kubeStateMetrics.name` | name of the kubeStateMetrics service account to use or create | `{{ prometheus.kubeStateMetrics.fullname }}` -`serviceAccounts.nodeExporter.create` | If true, create the nodeExporter service account | `true` -`serviceAccounts.nodeExporter.name` | name of the nodeExporter service account to use or create | `{{ prometheus.nodeExporter.fullname }}` -`serviceAccounts.pushgateway.create` | If true, create the pushgateway service account | `true` -`serviceAccounts.pushgateway.name` | name of the pushgateway service account to use or create | `{{ prometheus.pushgateway.fullname }}` -`serviceAccounts.server.create` | If true, create the server service account | `true` -`serviceAccounts.server.name` | name of the server service account to use or create | `{{ prometheus.server.fullname }}` -`server.terminationGracePeriodSeconds` | Prometheus server Pod termination grace period | `300` -`server.retention` | (optional) Prometheus data retention | `"15d"` -`serverFiles.alerts` | (Deprecated) Prometheus server alerts configuration | `{}` -`serverFiles.rules` | (Deprecated) Prometheus server rules configuration | `{}` -`serverFiles.alerting_rules.yml` | Prometheus server alerts configuration | `{}` -`serverFiles.recording_rules.yml` | Prometheus server rules configuration | `{}` -`serverFiles.prometheus.yml` | Prometheus server scrape configuration | example configuration -`extraScrapeConfigs` | Prometheus server additional scrape configuration | "" -`alertRelabelConfigs` | Prometheus server [alert relabeling configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs) for H/A prometheus | "" -`networkPolicy.enabled` | Enable NetworkPolicy | `false` | - -Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, - -```console -$ helm install stable/prometheus --name my-release \ - --set server.terminationGracePeriodSeconds=360 -``` - -Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, - -```console -$ helm install stable/prometheus --name my-release -f values.yaml -``` - -> **Tip**: You can use the default [values.yaml](values.yaml) - -Note that you have multiple yaml files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, - -```yaml -# values.yaml -# ... - -# service1-alert.yaml -serverFiles: - alerts: - service1: - - alert: anAlert - # ... - -# service2-alert.yaml -serverFiles: - alerts: - service2: - - alert: anAlert - # ... -``` - -```console -$ helm install stable/prometheus --name my-release -f values.yaml -f service1-alert.yaml -f service2-alert.yaml -``` - -### RBAC Configuration -Roles and RoleBindings resources will be created automatically for `server` and `kubeStateMetrics` services. - -To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. - -> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. - -### ConfigMap Files -AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. - -Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. - -### Ingress TLS -If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [kube-lego](https://github.com/jetstack/kube-lego)), please refer to the documentation for that mechanism. - -To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: - -```console -kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key -``` - -Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: - -```yaml -server: - ingress: - ## If true, Prometheus server Ingress will be created - ## - enabled: true - - ## Prometheus server Ingress hostnames - ## Must be provided if Ingress is enabled - ## - hosts: - - prometheus.domain.com - - ## Prometheus server Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: - - secretName: prometheus-server-tls - hosts: - - prometheus.domain.com -``` - -### NetworkPolicy - -Enabling Network Policy for Prometheus will secure connections to Alert Manager -and Kube State Metrics by only accepting connections from Prometheus Server. -All inbound connections to Prometheus Server are still allowed. - -To enable network policy for Prometheus, install a networking plugin that -implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. - -If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need -to manually create a networkpolicy which allows it. diff --git a/chart/charts/prometheus/templates/NOTES.txt b/chart/charts/prometheus/templates/NOTES.txt deleted file mode 100755 index 0e8868f..0000000 --- a/chart/charts/prometheus/templates/NOTES.txt +++ /dev/null @@ -1,112 +0,0 @@ -{{- if .Values.server.enabled -}} -The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: -{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - -{{ if .Values.server.ingress.enabled -}} -From outside the cluster, the server URL(s) are: -{{- range .Values.server.ingress.hosts }} -http://{{ . }} -{{- end }} -{{- else }} -Get the Prometheus server URL by running these commands in the same shell: -{{- if contains "NodePort" .Values.server.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.server.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} -{{- else if contains "ClusterIP" .Values.server.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.server.name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 -{{- end }} -{{- end }} - -{{- if .Values.server.persistentVolume.enabled }} -{{- else }} -################################################################################# -###### WARNING: Persistence is disabled!!! You will lose your data when ##### -###### the Server pod is terminated. ##### -################################################################################# -{{- end }} -{{- end }} - -{{ if .Values.alertmanager.enabled }} -The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.servicePort }} on the following DNS name from within your cluster: -{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - -{{ if .Values.alertmanager.ingress.enabled -}} -From outside the cluster, the alertmanager URL(s) are: -{{- range .Values.alertmanager.ingress.hosts }} -http://{{ . }} -{{- end }} -{{- else }} -Get the Alertmanager URL by running these commands in the same shell: -{{- if contains "NodePort" .Values.alertmanager.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} -{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.alertmanager.name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 -{{- end }} -{{- end }} - -{{- if .Values.alertmanager.persistentVolume.enabled }} -{{- else }} -################################################################################# -###### WARNING: Persistence is disabled!!! You will lose your data when ##### -###### the AlertManager pod is terminated. ##### -################################################################################# -{{- end }} -{{- end }} - -{{- if .Values.nodeExporter.podSecurityPolicy.enabled }} -{{- else }} -################################################################################# -###### WARNING: Pod Security Policy has been moved to a global property. ##### -###### use .Values.podSecurityPolicy.enabled with pod-based ##### -###### annotations ##### -###### (e.g. .Values.nodeExporter.podSecurityPolicy.annotations) ##### -################################################################################# -{{- end }} - -{{ if .Values.pushgateway.enabled }} -The Prometheus PushGateway can be accessed via port {{ .Values.pushgateway.service.servicePort }} on the following DNS name from within your cluster: -{{ template "prometheus.pushgateway.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - -{{ if .Values.pushgateway.ingress.enabled -}} -From outside the cluster, the pushgateway URL(s) are: -{{- range .Values.pushgateway.ingress.hosts }} -http://{{ . }} -{{- end }} -{{- else }} -Get the PushGateway URL by running these commands in the same shell: -{{- if contains "NodePort" .Values.pushgateway.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.pushgateway.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.pushgateway.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.pushgateway.fullname" . }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo http://$SERVICE_IP:{{ .Values.pushgateway.service.servicePort }} -{{- else if contains "ClusterIP" .Values.pushgateway.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.pushgateway.name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 -{{- end }} -{{- end }} -{{- end }} - -For more information on running Prometheus, visit: -https://prometheus.io/ diff --git a/chart/charts/prometheus/templates/_helpers.tpl b/chart/charts/prometheus/templates/_helpers.tpl deleted file mode 100755 index 295aa01..0000000 --- a/chart/charts/prometheus/templates/_helpers.tpl +++ /dev/null @@ -1,276 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "prometheus.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "prometheus.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create unified labels for prometheus components -*/}} -{{- define "prometheus.common.matchLabels" -}} -app: {{ template "prometheus.name" . }} -release: {{ .Release.Name }} -{{- end -}} - -{{- define "prometheus.common.metaLabels" -}} -chart: {{ template "prometheus.chart" . }} -heritage: {{ .Release.Service }} -{{- end -}} - -{{- define "prometheus.alertmanager.labels" -}} -{{ include "prometheus.alertmanager.matchLabels" . }} -{{ include "prometheus.common.metaLabels" . }} -{{- end -}} - -{{- define "prometheus.alertmanager.matchLabels" -}} -component: {{ .Values.alertmanager.name | quote }} -{{ include "prometheus.common.matchLabels" . }} -{{- end -}} - -{{- define "prometheus.kubeStateMetrics.labels" -}} -{{ include "prometheus.kubeStateMetrics.matchLabels" . }} -{{ include "prometheus.common.metaLabels" . }} -{{- end -}} - -{{- define "prometheus.kubeStateMetrics.matchLabels" -}} -component: {{ .Values.kubeStateMetrics.name | quote }} -{{ include "prometheus.common.matchLabels" . }} -{{- end -}} - -{{- define "prometheus.nodeExporter.labels" -}} -{{ include "prometheus.nodeExporter.matchLabels" . }} -{{ include "prometheus.common.metaLabels" . }} -{{- end -}} - -{{- define "prometheus.nodeExporter.matchLabels" -}} -component: {{ .Values.nodeExporter.name | quote }} -{{ include "prometheus.common.matchLabels" . }} -{{- end -}} - -{{- define "prometheus.pushgateway.labels" -}} -{{ include "prometheus.pushgateway.matchLabels" . }} -{{ include "prometheus.common.metaLabels" . }} -{{- end -}} - -{{- define "prometheus.pushgateway.matchLabels" -}} -component: {{ .Values.pushgateway.name | quote }} -{{ include "prometheus.common.matchLabels" . }} -{{- end -}} - -{{- define "prometheus.server.labels" -}} -{{ include "prometheus.server.matchLabels" . }} -{{ include "prometheus.common.metaLabels" . }} -{{- end -}} - -{{- define "prometheus.server.matchLabels" -}} -component: {{ .Values.server.name | quote }} -{{ include "prometheus.common.matchLabels" . }} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a fully qualified alertmanager name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} - -{{- define "prometheus.alertmanager.fullname" -}} -{{- if .Values.alertmanager.fullnameOverride -}} -{{- .Values.alertmanager.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a fully qualified kube-state-metrics name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.kubeStateMetrics.fullname" -}} -{{- if .Values.kubeStateMetrics.fullnameOverride -}} -{{- .Values.kubeStateMetrics.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name .Values.kubeStateMetrics.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name .Values.kubeStateMetrics.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a fully qualified node-exporter name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.nodeExporter.fullname" -}} -{{- if .Values.nodeExporter.fullnameOverride -}} -{{- .Values.nodeExporter.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a fully qualified Prometheus server name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.server.fullname" -}} -{{- if .Values.server.fullnameOverride -}} -{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a fully qualified pushgateway name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.pushgateway.fullname" -}} -{{- if .Values.pushgateway.fullnameOverride -}} -{{- .Values.pushgateway.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for deployment. -*/}} -{{- define "prometheus.deployment.apiVersion" -}} -{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "extensions/v1beta1" -}} -{{- else if semverCompare "^1.9-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "apps/v1" -}} -{{- end -}} -{{- end -}} -{{/* -Return the appropriate apiVersion for daemonset. -*/}} -{{- define "prometheus.daemonset.apiVersion" -}} -{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "extensions/v1beta1" -}} -{{- else if semverCompare "^1.9-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "apps/v1" -}} -{{- end -}} -{{- end -}} -{{/* -Return the appropriate apiVersion for networkpolicy. -*/}} -{{- define "prometheus.networkPolicy.apiVersion" -}} -{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "extensions/v1beta1" -}} -{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "networking.k8s.io/v1" -}} -{{- end -}} -{{- end -}} -{{/* -Return the appropriate apiVersion for podsecuritypolicy. -*/}} -{{- define "prometheus.podSecurityPolicy.apiVersion" -}} -{{- if semverCompare ">=1.3-0, <1.10-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "extensions/v1beta1" -}} -{{- else if semverCompare "^1.10-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "policy/v1beta1" -}} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to use for the alertmanager component -*/}} -{{- define "prometheus.serviceAccountName.alertmanager" -}} -{{- if .Values.serviceAccounts.alertmanager.create -}} - {{ default (include "prometheus.alertmanager.fullname" .) .Values.serviceAccounts.alertmanager.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.alertmanager.name }} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to use for the kubeStateMetrics component -*/}} -{{- define "prometheus.serviceAccountName.kubeStateMetrics" -}} -{{- if .Values.serviceAccounts.kubeStateMetrics.create -}} - {{ default (include "prometheus.kubeStateMetrics.fullname" .) .Values.serviceAccounts.kubeStateMetrics.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.kubeStateMetrics.name }} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to use for the nodeExporter component -*/}} -{{- define "prometheus.serviceAccountName.nodeExporter" -}} -{{- if .Values.serviceAccounts.nodeExporter.create -}} - {{ default (include "prometheus.nodeExporter.fullname" .) .Values.serviceAccounts.nodeExporter.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.nodeExporter.name }} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to use for the pushgateway component -*/}} -{{- define "prometheus.serviceAccountName.pushgateway" -}} -{{- if .Values.serviceAccounts.pushgateway.create -}} - {{ default (include "prometheus.pushgateway.fullname" .) .Values.serviceAccounts.pushgateway.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.pushgateway.name }} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to use for the server component -*/}} -{{- define "prometheus.serviceAccountName.server" -}} -{{- if .Values.serviceAccounts.server.create -}} - {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.server.name }} -{{- end -}} -{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-clusterrole.yaml b/chart/charts/prometheus/templates/alertmanager-clusterrole.yaml deleted file mode 100755 index 3cfc133..0000000 --- a/chart/charts/prometheus/templates/alertmanager-clusterrole.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" . }} -rules: -{{- if .Values.podSecurityPolicy.enabled }} - - apiGroups: - - extensions - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - {{ template "prometheus.alertmanager.fullname" . }} -{{- else }} - [] -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml b/chart/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml deleted file mode 100755 index 925afcd..0000000 --- a/chart/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.alertmanager" . }} - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "prometheus.alertmanager.fullname" . }} -{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-configmap.yaml b/chart/charts/prometheus/templates/alertmanager-configmap.yaml deleted file mode 100755 index f2d78e2..0000000 --- a/chart/charts/prometheus/templates/alertmanager-configmap.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if and .Values.alertmanager.enabled (and (empty .Values.alertmanager.configMapOverrideName) (empty .Values.alertmanager.configFromSecret)) -}} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" . }} -data: -{{- $root := . -}} -{{- range $key, $value := .Values.alertmanagerFiles }} - {{ $key }}: | -{{ toYaml $value | default "{}" | indent 4 }} -{{- end -}} -{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-deployment.yaml b/chart/charts/prometheus/templates/alertmanager-deployment.yaml deleted file mode 100755 index 5202407..0000000 --- a/chart/charts/prometheus/templates/alertmanager-deployment.yaml +++ /dev/null @@ -1,134 +0,0 @@ -{{- if and .Values.alertmanager.enabled (not .Values.alertmanager.statefulSet.enabled) -}} -apiVersion: {{ template "prometheus.deployment.apiVersion" . }} -kind: Deployment -metadata: - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" . }} -spec: - selector: - matchLabels: - {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} - replicas: {{ .Values.alertmanager.replicaCount }} - {{- if .Values.server.strategy }} - strategy: -{{ toYaml .Values.server.strategy | indent 4 }} - {{- end }} - template: - metadata: - {{- if .Values.alertmanager.podAnnotations }} - annotations: -{{ toYaml .Values.alertmanager.podAnnotations | indent 8 }} - {{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 8 }} - spec: -{{- if .Values.alertmanager.schedulerName }} - schedulerName: "{{ .Values.alertmanager.schedulerName }}" -{{- end }} - serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} -{{- if .Values.alertmanager.priorityClassName }} - priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" -{{- end }} - containers: - - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} - image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" - imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" - env: - {{- range $key, $value := .Values.alertmanager.extraEnv }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - - name: POD_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - args: - - --config.file=/etc/config/{{ .Values.alertmanager.configFileName }} - - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} - - --cluster.advertise-address=$(POD_IP):6783 - {{- range $key, $value := .Values.alertmanager.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- if .Values.alertmanager.baseURL }} - - --web.external-url={{ .Values.alertmanager.baseURL }} - {{- end }} - - ports: - - containerPort: 9093 - readinessProbe: - httpGet: - path: {{ .Values.alertmanager.prefixURL }}/#/status - port: 9093 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: -{{ toYaml .Values.alertmanager.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - - name: storage-volume - mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" - subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" - {{- range .Values.alertmanager.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - - - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.name }} - image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" - imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" - args: - - --volume-dir=/etc/config - - --webhook-url=http://127.0.0.1:9093{{ .Values.alertmanager.prefixURL }}/-/reload - resources: -{{ toYaml .Values.configmapReload.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - readOnly: true - {{- if .Values.imagePullSecrets }} - imagePullSecrets: - {{ toYaml .Values.imagePullSecrets | indent 2 }} - {{- end }} - {{- if .Values.alertmanager.nodeSelector }} - nodeSelector: -{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.securityContext }} - securityContext: -{{ toYaml .Values.alertmanager.securityContext | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.tolerations }} - tolerations: -{{ toYaml .Values.alertmanager.tolerations | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.affinity }} - affinity: -{{ toYaml .Values.alertmanager.affinity | indent 8 }} - {{- end }} - volumes: - - name: config-volume - {{- if empty .Values.alertmanager.configFromSecret }} - configMap: - name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} - {{- else }} - secret: - secretName: {{ .Values.alertmanager.configFromSecret }} - {{- end }} - {{- range .Values.alertmanager.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- end }} - - name: storage-volume - {{- if .Values.alertmanager.persistentVolume.enabled }} - persistentVolumeClaim: - claimName: {{ if .Values.alertmanager.persistentVolume.existingClaim }}{{ .Values.alertmanager.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} - {{- else }} - emptyDir: {} - {{- end -}} -{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-ingress.yaml b/chart/charts/prometheus/templates/alertmanager-ingress.yaml deleted file mode 100755 index a6a9b29..0000000 --- a/chart/charts/prometheus/templates/alertmanager-ingress.yaml +++ /dev/null @@ -1,38 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}} -{{- $releaseName := .Release.Name -}} -{{- $serviceName := include "prometheus.alertmanager.fullname" . }} -{{- $servicePort := .Values.alertmanager.service.servicePort -}} -{{- $extraPaths := .Values.alertmanager.ingress.extraPaths -}} -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: -{{- if .Values.alertmanager.ingress.annotations }} - annotations: -{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} -{{- range $key, $value := .Values.alertmanager.ingress.extraLabels }} - {{ $key }}: {{ $value }} -{{- end }} - name: {{ template "prometheus.alertmanager.fullname" . }} -spec: - rules: - {{- range .Values.alertmanager.ingress.hosts }} - {{- $url := splitList "/" . }} - - host: {{ first $url }} - http: - paths: -{{ if $extraPaths }} -{{ toYaml $extraPaths | indent 10 }} -{{- end }} - - path: /{{ rest $url | join "/" }} - backend: - serviceName: {{ $serviceName }} - servicePort: {{ $servicePort }} - {{- end -}} -{{- if .Values.alertmanager.ingress.tls }} - tls: -{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }} - {{- end -}} -{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-networkpolicy.yaml b/chart/charts/prometheus/templates/alertmanager-networkpolicy.yaml deleted file mode 100755 index 0bcbd27..0000000 --- a/chart/charts/prometheus/templates/alertmanager-networkpolicy.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.networkPolicy.enabled -}} -apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} -kind: NetworkPolicy -metadata: - name: {{ template "prometheus.alertmanager.fullname" . }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} - ingress: - - from: - - podSelector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 12 }} - - ports: - - port: 9093 -{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-pdb.yaml b/chart/charts/prometheus/templates/alertmanager-pdb.yaml deleted file mode 100755 index c38df77..0000000 --- a/chart/charts/prometheus/templates/alertmanager-pdb.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.alertmanager.podDisruptionBudget.enabled }} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "prometheus.alertmanager.fullname" . }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} -spec: - maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }} - selector: - matchLabels: - {{- include "prometheus.alertmanager.labels" . | nindent 6 }} -{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-podsecuritypolicy.yaml b/chart/charts/prometheus/templates/alertmanager-podsecuritypolicy.yaml deleted file mode 100755 index 70f8033..0000000 --- a/chart/charts/prometheus/templates/alertmanager-podsecuritypolicy.yaml +++ /dev/null @@ -1,48 +0,0 @@ -{{- if .Values.rbac.create }} -{{- if .Values.podSecurityPolicy.enabled }} -apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} -kind: PodSecurityPolicy -metadata: - name: {{ template "prometheus.alertmanager.fullname" . }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - annotations: -{{- if .Values.alertmanager.podSecurityPolicy.annotations }} -{{ toYaml .Values.alertmanager.podSecurityPolicy.annotations | indent 4 }} -{{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - volumes: - - 'configMap' - - 'persistentVolumeClaim' - - 'emptyDir' - - 'secret' - allowedHostPaths: - - pathPrefix: /etc - readOnly: true - - pathPrefix: {{ .Values.alertmanager.persistentVolume.mountPath }} - hostNetwork: false - hostPID: false - hostIPC: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: true -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-pvc.yaml b/chart/charts/prometheus/templates/alertmanager-pvc.yaml deleted file mode 100755 index 400aba5..0000000 --- a/chart/charts/prometheus/templates/alertmanager-pvc.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if not .Values.alertmanager.statefulSet.enabled -}} -{{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}} -{{- if not .Values.alertmanager.persistentVolume.existingClaim -}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - {{- if .Values.alertmanager.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }} - {{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" . }} -spec: - accessModes: -{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }} -{{- if .Values.alertmanager.persistentVolume.storageClass }} -{{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} - storageClassName: "" -{{- else }} - storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" -{{- end }} -{{- end }} -{{- if .Values.alertmanager.persistentVolume.volumeBindingMode }} - volumeBindingModeName: "{{ .Values.alertmanager.persistentVolume.volumeBindingMode }}" -{{- end }} - resources: - requests: - storage: "{{ .Values.alertmanager.persistentVolume.size }}" -{{- end -}} -{{- end -}} -{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-service-headless.yaml b/chart/charts/prometheus/templates/alertmanager-service-headless.yaml deleted file mode 100755 index 8d619e8..0000000 --- a/chart/charts/prometheus/templates/alertmanager-service-headless.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.alertmanager.statefulSet.headless.annotations }} - annotations: -{{ toYaml .Values.alertmanager.statefulSet.headless.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} -{{- if .Values.alertmanager.statefulSet.headless.labels }} -{{ toYaml .Values.alertmanager.statefulSet.headless.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.alertmanager.fullname" . }}-headless -spec: - clusterIP: None - ports: - - name: http - port: {{ .Values.alertmanager.statefulSet.headless.servicePort }} - protocol: TCP - targetPort: 9093 -{{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} - - name: meshpeer - port: 6783 - protocol: TCP - targetPort: 6783 -{{- end }} - selector: - {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} -{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-service.yaml b/chart/charts/prometheus/templates/alertmanager-service.yaml deleted file mode 100755 index 7919643..0000000 --- a/chart/charts/prometheus/templates/alertmanager-service.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{- if .Values.alertmanager.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.alertmanager.service.annotations }} - annotations: -{{ toYaml .Values.alertmanager.service.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} -{{- if .Values.alertmanager.service.labels }} -{{ toYaml .Values.alertmanager.service.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.alertmanager.fullname" . }} -spec: -{{- if .Values.alertmanager.service.clusterIP }} - clusterIP: {{ .Values.alertmanager.service.clusterIP }} -{{- end }} -{{- if .Values.alertmanager.service.externalIPs }} - externalIPs: -{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} -{{- end }} -{{- if .Values.alertmanager.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} -{{- end }} -{{- if .Values.alertmanager.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} - - {{ $cidr }} - {{- end }} -{{- end }} - ports: - - name: http - port: {{ .Values.alertmanager.service.servicePort }} - protocol: TCP - targetPort: 9093 - {{- if .Values.alertmanager.service.nodePort }} - nodePort: {{ .Values.alertmanager.service.nodePort }} - {{- end }} -{{- if .Values.alertmanager.service.enableMeshPeer }} - - name: meshpeer - port: 6783 - protocol: TCP - targetPort: 6783 -{{- end }} - selector: - {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} -{{- if .Values.alertmanager.service.sessionAffinity }} - sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }} -{{- end }} - type: "{{ .Values.alertmanager.service.type }}" -{{- end }} diff --git a/chart/charts/prometheus/templates/alertmanager-serviceaccount.yaml b/chart/charts/prometheus/templates/alertmanager-serviceaccount.yaml deleted file mode 100755 index 4ff4558..0000000 --- a/chart/charts/prometheus/templates/alertmanager-serviceaccount.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.serviceAccounts.alertmanager.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.serviceAccountName.alertmanager" . }} -{{- end -}} diff --git a/chart/charts/prometheus/templates/alertmanager-statefulset.yaml b/chart/charts/prometheus/templates/alertmanager-statefulset.yaml deleted file mode 100755 index 811d678..0000000 --- a/chart/charts/prometheus/templates/alertmanager-statefulset.yaml +++ /dev/null @@ -1,150 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" . }} -spec: - serviceName: {{ template "prometheus.alertmanager.fullname" . }}-headless - selector: - matchLabels: - {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} - replicas: {{ .Values.alertmanager.replicaCount }} - podManagementPolicy: {{ .Values.alertmanager.statefulSet.podManagementPolicy }} - template: - metadata: - {{- if .Values.alertmanager.podAnnotations }} - annotations: -{{ toYaml .Values.alertmanager.podAnnotations | indent 8 }} - {{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 8 }} - spec: -{{- if .Values.alertmanager.affinity }} - affinity: -{{ toYaml .Values.alertmanager.affinity | indent 8 }} -{{- end }} -{{- if .Values.alertmanager.schedulerName }} - schedulerName: "{{ .Values.alertmanager.schedulerName }}" -{{- end }} - serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} -{{- if .Values.alertmanager.priorityClassName }} - priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" -{{- end }} - containers: - - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} - image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" - imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" - env: - {{- range $key, $value := .Values.alertmanager.extraEnv }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - - name: POD_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - args: - - --config.file=/etc/config/alertmanager.yml - - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} - - --cluster.advertise-address=$(POD_IP):6783 - {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} - - --cluster.listen-address=0.0.0.0:6783 - {{- range $n := until (.Values.alertmanager.replicaCount | int) }} - - --cluster.peer={{ template "prometheus.alertmanager.fullname" $ }}-{{ $n }}.{{ template "prometheus.alertmanager.fullname" $ }}-headless:6783 - {{- end }} - {{- end }} - {{- range $key, $value := .Values.alertmanager.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- if .Values.alertmanager.baseURL }} - - --web.external-url={{ .Values.alertmanager.baseURL }} - {{- end }} - - ports: - - containerPort: 9093 - readinessProbe: - httpGet: - path: {{ .Values.alertmanager.prefixURL }}/#/status - port: 9093 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: -{{ toYaml .Values.alertmanager.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - - name: storage-volume - mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" - subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" - {{- range .Values.alertmanager.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.name }} - image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" - imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" - args: - - --volume-dir=/etc/config - - --webhook-url=http://localhost:9093{{ .Values.alertmanager.prefixURL }}/-/reload - resources: -{{ toYaml .Values.configmapReload.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - readOnly: true - {{- if .Values.imagePullSecrets }} - imagePullSecrets: - {{ toYaml .Values.imagePullSecrets | indent 2 }} - {{- end }} - {{- if .Values.alertmanager.nodeSelector }} - nodeSelector: -{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.securityContext }} - securityContext: -{{ toYaml .Values.alertmanager.securityContext | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.tolerations }} - tolerations: -{{ toYaml .Values.alertmanager.tolerations | indent 8 }} - {{- end }} - volumes: - - name: config-volume - configMap: - name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} - {{- range .Values.alertmanager.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- end }} -{{- if .Values.alertmanager.persistentVolume.enabled }} - volumeClaimTemplates: - - metadata: - name: storage-volume - {{- if .Values.alertmanager.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 10 }} - {{- end }} - spec: - accessModes: -{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 10 }} - resources: - requests: - storage: "{{ .Values.alertmanager.persistentVolume.size }}" - {{- if .Values.server.persistentVolume.storageClass }} - {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} - storageClassName: "" - {{- else }} - storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" - {{- end }} - {{- end }} -{{- else }} - - name: storage-volume - emptyDir: {} -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-clusterrole.yaml b/chart/charts/prometheus/templates/kube-state-metrics-clusterrole.yaml deleted file mode 100755 index 9f5be97..0000000 --- a/chart/charts/prometheus/templates/kube-state-metrics-clusterrole.yaml +++ /dev/null @@ -1,87 +0,0 @@ -{{- if and .Values.kubeStateMetrics.enabled .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} - name: {{ template "prometheus.kubeStateMetrics.fullname" . }} -rules: -{{- if .Values.podSecurityPolicy.enabled }} - - apiGroups: - - extensions - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - {{ template "prometheus.kubeStateMetrics.fullname" . }} -{{- end }} - - apiGroups: - - "" - resources: - - namespaces - - nodes - - persistentvolumeclaims - - pods - - services - - resourcequotas - - replicationcontrollers - - limitranges - - persistentvolumeclaims - - persistentvolumes - - endpoints - - secrets - - configmaps - verbs: - - list - - watch - - apiGroups: - - extensions - resources: - - daemonsets - - deployments - - ingresses - - replicasets - verbs: - - list - - watch - - apiGroups: - - apps - resources: - - daemonsets - - deployments - - statefulsets - verbs: - - get - - list - - watch - - apiGroups: - - batch - resources: - - cronjobs - - jobs - verbs: - - list - - watch - - apiGroups: - - autoscaling - resources: - - horizontalpodautoscalers - verbs: - - list - - watch - - apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - list - - watch - - apiGroups: - - certificates.k8s.io - resources: - - certificatesigningrequests - verbs: - - list - - watch -{{- end }} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml b/chart/charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml deleted file mode 100755 index 5e3b275..0000000 --- a/chart/charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.kubeStateMetrics.enabled .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - labels: - {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} - name: {{ template "prometheus.kubeStateMetrics.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }} - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "prometheus.kubeStateMetrics.fullname" . }} -{{- end -}} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-deployment.yaml b/chart/charts/prometheus/templates/kube-state-metrics-deployment.yaml deleted file mode 100755 index eaeda96..0000000 --- a/chart/charts/prometheus/templates/kube-state-metrics-deployment.yaml +++ /dev/null @@ -1,68 +0,0 @@ -{{- if .Values.kubeStateMetrics.enabled -}} -apiVersion: {{ template "prometheus.deployment.apiVersion" . }} -kind: Deployment -metadata: -{{- if .Values.kubeStateMetrics.deploymentAnnotations }} - annotations: -{{ toYaml .Values.kubeStateMetrics.deploymentAnnotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} - name: {{ template "prometheus.kubeStateMetrics.fullname" . }} -spec: - selector: - matchLabels: - {{- include "prometheus.kubeStateMetrics.matchLabels" . | nindent 6 }} - replicas: {{ .Values.kubeStateMetrics.replicaCount }} - template: - metadata: - {{- if .Values.kubeStateMetrics.podAnnotations }} - annotations: -{{ toYaml .Values.kubeStateMetrics.podAnnotations | indent 8 }} - {{- end }} - labels: - {{- include "prometheus.kubeStateMetrics.labels" . | nindent 8 }} -{{- if .Values.kubeStateMetrics.pod.labels }} -{{ toYaml .Values.kubeStateMetrics.pod.labels | indent 8 }} -{{- end }} - spec: - serviceAccountName: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }} -{{- if .Values.kubeStateMetrics.priorityClassName }} - priorityClassName: "{{ .Values.kubeStateMetrics.priorityClassName }}" -{{- end }} - containers: - - name: {{ template "prometheus.name" . }}-{{ .Values.kubeStateMetrics.name }} - image: "{{ .Values.kubeStateMetrics.image.repository }}:{{ .Values.kubeStateMetrics.image.tag }}" - imagePullPolicy: "{{ .Values.kubeStateMetrics.image.pullPolicy }}" - {{- if .Values.kubeStateMetrics.args }} - args: - {{- range $key, $value := .Values.kubeStateMetrics.args }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- end }} - ports: - - name: metrics - containerPort: 8080 - resources: -{{ toYaml .Values.kubeStateMetrics.resources | indent 12 }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: - {{ toYaml .Values.imagePullSecrets | indent 2 }} - {{- end }} - {{- if .Values.kubeStateMetrics.nodeSelector }} - nodeSelector: -{{ toYaml .Values.kubeStateMetrics.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.kubeStateMetrics.securityContext }} - securityContext: -{{ toYaml .Values.kubeStateMetrics.securityContext | indent 8 }} - {{- end }} - {{- if .Values.kubeStateMetrics.tolerations }} - tolerations: -{{ toYaml .Values.kubeStateMetrics.tolerations | indent 8 }} - {{- end }} - {{- if .Values.kubeStateMetrics.affinity }} - affinity: -{{ toYaml .Values.kubeStateMetrics.affinity | indent 8 }} - {{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-networkpolicy.yaml b/chart/charts/prometheus/templates/kube-state-metrics-networkpolicy.yaml deleted file mode 100755 index 56893ce..0000000 --- a/chart/charts/prometheus/templates/kube-state-metrics-networkpolicy.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if and .Values.kubeStateMetrics.enabled .Values.networkPolicy.enabled -}} -apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} -kind: NetworkPolicy -metadata: - name: {{ template "prometheus.kubeStateMetrics.fullname" . }} - labels: - {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - {{- include "prometheus.kubeStateMetrics.matchLabels" . | nindent 6 }} - ingress: - - from: - - podSelector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 10 }} - - ports: - - port: 8080 -{{- end -}} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-pdb.yaml b/chart/charts/prometheus/templates/kube-state-metrics-pdb.yaml deleted file mode 100755 index 3f3411d..0000000 --- a/chart/charts/prometheus/templates/kube-state-metrics-pdb.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.kubeStateMetrics.podDisruptionBudget.enabled }} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "prometheus.kubeStateMetrics.fullname" . }} - labels: - {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} -spec: - maxUnavailable: {{ .Values.kubeStateMetrics.podDisruptionBudget.maxUnavailable }} - selector: - matchLabels: - {{- include "prometheus.kubeStateMetrics.labels" . | nindent 6 }} -{{- end }} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-podsecuritypolicy.yaml b/chart/charts/prometheus/templates/kube-state-metrics-podsecuritypolicy.yaml deleted file mode 100755 index d1afcb8..0000000 --- a/chart/charts/prometheus/templates/kube-state-metrics-podsecuritypolicy.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{{- if .Values.rbac.create }} -{{- if .Values.podSecurityPolicy.enabled }} -apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} -kind: PodSecurityPolicy -metadata: - name: {{ template "prometheus.kubeStateMetrics.fullname" . }} - labels: - {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} - annotations: -{{- if .Values.kubeStateMetrics.podSecurityPolicy.annotations }} -{{ toYaml .Values.kubeStateMetrics.podSecurityPolicy.annotations | indent 4 }} -{{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - volumes: - - 'secret' - allowedHostPaths: [] - hostNetwork: false - hostPID: false - hostIPC: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: true -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml b/chart/charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml deleted file mode 100755 index 5f97480..0000000 --- a/chart/charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if and .Values.kubeStateMetrics.enabled .Values.serviceAccounts.kubeStateMetrics.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} - name: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }} -{{- end -}} diff --git a/chart/charts/prometheus/templates/kube-state-metrics-svc.yaml b/chart/charts/prometheus/templates/kube-state-metrics-svc.yaml deleted file mode 100755 index 717d85f..0000000 --- a/chart/charts/prometheus/templates/kube-state-metrics-svc.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if and .Values.kubeStateMetrics.enabled .Values.kubeStateMetrics.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.kubeStateMetrics.service.annotations }} - annotations: -{{ toYaml .Values.kubeStateMetrics.service.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} -{{- if .Values.kubeStateMetrics.service.labels }} -{{ toYaml .Values.kubeStateMetrics.service.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.kubeStateMetrics.fullname" . }} -spec: -{{- if .Values.kubeStateMetrics.service.clusterIP }} - clusterIP: {{ .Values.kubeStateMetrics.service.clusterIP }} -{{- end }} -{{- if .Values.kubeStateMetrics.service.externalIPs }} - externalIPs: -{{ toYaml .Values.kubeStateMetrics.service.externalIPs | indent 4 }} -{{- end }} -{{- if .Values.kubeStateMetrics.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.kubeStateMetrics.service.loadBalancerIP }} -{{- end }} -{{- if .Values.kubeStateMetrics.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- range $cidr := .Values.kubeStateMetrics.service.loadBalancerSourceRanges }} - - {{ $cidr }} - {{- end }} -{{- end }} - ports: - - name: http - port: {{ .Values.kubeStateMetrics.service.servicePort }} - protocol: TCP - targetPort: 8080 - selector: - {{- include "prometheus.kubeStateMetrics.matchLabels" . | nindent 4 }} - type: "{{ .Values.kubeStateMetrics.service.type }}" -{{- end }} diff --git a/chart/charts/prometheus/templates/node-exporter-daemonset.yaml b/chart/charts/prometheus/templates/node-exporter-daemonset.yaml deleted file mode 100755 index 478f10a..0000000 --- a/chart/charts/prometheus/templates/node-exporter-daemonset.yaml +++ /dev/null @@ -1,116 +0,0 @@ -{{- if .Values.nodeExporter.enabled -}} -apiVersion: {{ template "prometheus.daemonset.apiVersion" . }} -kind: DaemonSet -metadata: -{{- if .Values.nodeExporter.deploymentAnnotations }} - annotations: -{{ toYaml .Values.nodeExporter.deploymentAnnotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} - name: {{ template "prometheus.nodeExporter.fullname" . }} -spec: - selector: - matchLabels: - {{- include "prometheus.nodeExporter.matchLabels" . | nindent 6 }} - {{- if .Values.nodeExporter.updateStrategy }} - updateStrategy: -{{ toYaml .Values.nodeExporter.updateStrategy | indent 4 }} - {{- end }} - template: - metadata: - {{- if .Values.nodeExporter.podAnnotations }} - annotations: -{{ toYaml .Values.nodeExporter.podAnnotations | indent 8 }} - {{- end }} - labels: - {{- include "prometheus.nodeExporter.labels" . | nindent 8 }} -{{- if .Values.nodeExporter.pod.labels }} -{{ toYaml .Values.nodeExporter.pod.labels | indent 8 }} -{{- end }} - spec: - serviceAccountName: {{ template "prometheus.serviceAccountName.nodeExporter" . }} -{{- if .Values.nodeExporter.priorityClassName }} - priorityClassName: "{{ .Values.nodeExporter.priorityClassName }}" -{{- end }} - containers: - - name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }} - image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}" - imagePullPolicy: "{{ .Values.nodeExporter.image.pullPolicy }}" - args: - - --path.procfs=/host/proc - - --path.sysfs=/host/sys - {{- range $key, $value := .Values.nodeExporter.extraArgs }} - {{- if $value }} - - --{{ $key }}={{ $value }} - {{- else }} - - --{{ $key }} - {{- end }} - {{- end }} - ports: - - name: metrics - containerPort: 9100 - hostPort: {{ .Values.nodeExporter.service.hostPort }} - resources: -{{ toYaml .Values.nodeExporter.resources | indent 12 }} - volumeMounts: - - name: proc - mountPath: /host/proc - readOnly: true - - name: sys - mountPath: /host/sys - readOnly: true - {{- range .Values.nodeExporter.extraHostPathMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- if .mountPropagation }} - mountPropagation: {{ .mountPropagation }} - {{- end }} - {{- end }} - {{- range .Values.nodeExporter.extraConfigmapMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: - {{ toYaml .Values.imagePullSecrets | indent 2 }} - {{- end }} - {{- if .Values.nodeExporter.hostNetwork }} - hostNetwork: true - {{- end }} - {{- if .Values.nodeExporter.hostPID }} - hostPID: true - {{- end }} - {{- if .Values.nodeExporter.tolerations }} - tolerations: -{{ toYaml .Values.nodeExporter.tolerations | indent 8 }} - {{- end }} - {{- if .Values.nodeExporter.nodeSelector }} - nodeSelector: -{{ toYaml .Values.nodeExporter.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.nodeExporter.securityContext }} - securityContext: -{{ toYaml .Values.nodeExporter.securityContext | indent 8 }} - {{- end }} - volumes: - - name: proc - hostPath: - path: /proc - - name: sys - hostPath: - path: /sys - {{- range .Values.nodeExporter.extraHostPathMounts }} - - name: {{ .name }} - hostPath: - path: {{ .hostPath }} - {{- end }} - {{- range .Values.nodeExporter.extraConfigmapMounts }} - - name: {{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - -{{- end -}} diff --git a/chart/charts/prometheus/templates/node-exporter-podsecuritypolicy.yaml b/chart/charts/prometheus/templates/node-exporter-podsecuritypolicy.yaml deleted file mode 100755 index 825794b..0000000 --- a/chart/charts/prometheus/templates/node-exporter-podsecuritypolicy.yaml +++ /dev/null @@ -1,55 +0,0 @@ -{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} -{{- if .Values.podSecurityPolicy.enabled }} -apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} -kind: PodSecurityPolicy -metadata: - name: {{ template "prometheus.nodeExporter.fullname" . }} - labels: - {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} - annotations: -{{- if .Values.nodeExporter.podSecurityPolicy.annotations }} -{{ toYaml .Values.nodeExporter.podSecurityPolicy.annotations | indent 4 }} -{{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - volumes: - - 'configMap' - - 'hostPath' - - 'secret' - allowedHostPaths: - - pathPrefix: /proc - readOnly: true - - pathPrefix: /sys - readOnly: true - {{- range .Values.nodeExporter.extraHostPathMounts }} - - pathPrefix: {{ .hostPath }} - readOnly: {{ .readOnly }} - {{- end }} - hostNetwork: {{ .Values.nodeExporter.hostNetwork }} - hostPID: {{ .Values.nodeExporter.hostPID }} - hostIPC: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - hostPorts: - - min: 1 - max: 65535 -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/node-exporter-role.yaml b/chart/charts/prometheus/templates/node-exporter-role.yaml deleted file mode 100755 index 49a6874..0000000 --- a/chart/charts/prometheus/templates/node-exporter-role.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} -{{- if or (default .Values.nodeExporter.podSecurityPolicy.enabled false) (.Values.podSecurityPolicy.enabled) }} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - name: {{ template "prometheus.nodeExporter.fullname" . }} - labels: - {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} - namespace: {{ .Release.Namespace }} -rules: -- apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "prometheus.nodeExporter.fullname" . }} -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/node-exporter-rolebinding.yaml b/chart/charts/prometheus/templates/node-exporter-rolebinding.yaml deleted file mode 100755 index e56e5ff..0000000 --- a/chart/charts/prometheus/templates/node-exporter-rolebinding.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} -{{- if .Values.podSecurityPolicy.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ template "prometheus.nodeExporter.fullname" . }} - labels: - {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: {{ template "prometheus.nodeExporter.fullname" . }} - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} - namespace: {{ .Release.Namespace }} -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/node-exporter-service.yaml b/chart/charts/prometheus/templates/node-exporter-service.yaml deleted file mode 100755 index 55c683b..0000000 --- a/chart/charts/prometheus/templates/node-exporter-service.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.nodeExporter.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.nodeExporter.service.annotations }} - annotations: -{{ toYaml .Values.nodeExporter.service.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} -{{- if .Values.nodeExporter.service.labels }} -{{ toYaml .Values.nodeExporter.service.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.nodeExporter.fullname" . }} -spec: -{{- if .Values.nodeExporter.service.clusterIP }} - clusterIP: {{ .Values.nodeExporter.service.clusterIP }} -{{- end }} -{{- if .Values.nodeExporter.service.externalIPs }} - externalIPs: -{{ toYaml .Values.nodeExporter.service.externalIPs | indent 4 }} -{{- end }} -{{- if .Values.nodeExporter.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.nodeExporter.service.loadBalancerIP }} -{{- end }} -{{- if .Values.nodeExporter.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- range $cidr := .Values.nodeExporter.service.loadBalancerSourceRanges }} - - {{ $cidr }} - {{- end }} -{{- end }} - ports: - - name: metrics - port: {{ .Values.nodeExporter.service.servicePort }} - protocol: TCP - targetPort: 9100 - selector: - {{- include "prometheus.nodeExporter.matchLabels" . | nindent 4 }} - type: "{{ .Values.nodeExporter.service.type }}" -{{- end -}} diff --git a/chart/charts/prometheus/templates/node-exporter-serviceaccount.yaml b/chart/charts/prometheus/templates/node-exporter-serviceaccount.yaml deleted file mode 100755 index a922b23..0000000 --- a/chart/charts/prometheus/templates/node-exporter-serviceaccount.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if and .Values.nodeExporter.enabled .Values.serviceAccounts.nodeExporter.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} - name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} -{{- end -}} diff --git a/chart/charts/prometheus/templates/pushgateway-clusterrole.yaml b/chart/charts/prometheus/templates/pushgateway-clusterrole.yaml deleted file mode 100755 index f4393c9..0000000 --- a/chart/charts/prometheus/templates/pushgateway-clusterrole.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.pushgateway.fullname" . }} -rules: -{{- if .Values.podSecurityPolicy.enabled }} - - apiGroups: - - extensions - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - {{ template "prometheus.pushgateway.fullname" . }} -{{- else }} - [] -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml b/chart/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml deleted file mode 100755 index bcbaccb..0000000 --- a/chart/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.pushgateway.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.pushgateway" . }} - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "prometheus.pushgateway.fullname" . }} -{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-deployment.yaml b/chart/charts/prometheus/templates/pushgateway-deployment.yaml deleted file mode 100755 index bddbc06..0000000 --- a/chart/charts/prometheus/templates/pushgateway-deployment.yaml +++ /dev/null @@ -1,97 +0,0 @@ -{{- if .Values.pushgateway.enabled -}} -apiVersion: {{ template "prometheus.deployment.apiVersion" . }} -kind: Deployment -metadata: - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.pushgateway.fullname" . }} -spec: - selector: - {{- if .Values.schedulerName }} - schedulerName: "{{ .Values.schedulerName }}" - {{- end }} - matchLabels: - {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} - replicas: {{ .Values.pushgateway.replicaCount }} - {{- if .Values.pushgateway.strategy }} - strategy: -{{ toYaml .Values.pushgateway.strategy | indent 4 }} - {{- end }} - template: - metadata: - {{- if .Values.pushgateway.podAnnotations }} - annotations: -{{ toYaml .Values.pushgateway.podAnnotations | indent 8 }} - {{- end }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 8 }} - spec: - serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }} -{{- if .Values.pushgateway.priorityClassName }} - priorityClassName: "{{ .Values.pushgateway.priorityClassName }}" -{{- end }} - containers: - - name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }} - image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}" - imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}" - args: - {{- range $key, $value := .Values.pushgateway.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - ports: - - containerPort: 9091 - livenessProbe: - httpGet: - {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} - path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/healthy - {{- else }} - path: /-/healthy - {{- end }} - port: 9091 - initialDelaySeconds: 10 - timeoutSeconds: 10 - readinessProbe: - httpGet: - {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} - path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/ready - {{- else }} - path: /-/ready - {{- end }} - port: 9091 - initialDelaySeconds: 10 - timeoutSeconds: 10 - resources: -{{ toYaml .Values.pushgateway.resources | indent 12 }} - {{- if .Values.pushgateway.persistentVolume.enabled }} - volumeMounts: - - name: storage-volume - mountPath: "{{ .Values.pushgateway.persistentVolume.mountPath }}" - subPath: "{{ .Values.pushgateway.persistentVolume.subPath }}" - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: - {{ toYaml .Values.imagePullSecrets | indent 2 }} - {{- end }} - {{- if .Values.pushgateway.nodeSelector }} - nodeSelector: -{{ toYaml .Values.pushgateway.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.pushgateway.securityContext }} - securityContext: -{{ toYaml .Values.pushgateway.securityContext | indent 8 }} - {{- end }} - {{- if .Values.pushgateway.tolerations }} - tolerations: -{{ toYaml .Values.pushgateway.tolerations | indent 8 }} - {{- end }} - {{- if .Values.pushgateway.affinity }} - affinity: -{{ toYaml .Values.pushgateway.affinity | indent 8 }} - {{- end }} - {{- if .Values.pushgateway.persistentVolume.enabled }} - volumes: - - name: storage-volume - persistentVolumeClaim: - claimName: {{ if .Values.pushgateway.persistentVolume.existingClaim }}{{ .Values.pushgateway.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.pushgateway.fullname" . }}{{- end }} - {{- end -}} -{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-ingress.yaml b/chart/charts/prometheus/templates/pushgateway-ingress.yaml deleted file mode 100755 index 42315a9..0000000 --- a/chart/charts/prometheus/templates/pushgateway-ingress.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}} -{{- $releaseName := .Release.Name -}} -{{- $serviceName := include "prometheus.pushgateway.fullname" . }} -{{- $servicePort := .Values.pushgateway.service.servicePort -}} -{{- $extraPaths := .Values.pushgateway.ingress.extraPaths -}} -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: -{{- if .Values.pushgateway.ingress.annotations }} - annotations: -{{ toYaml .Values.pushgateway.ingress.annotations | indent 4}} -{{- end }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.pushgateway.fullname" . }} -spec: - rules: - {{- range .Values.pushgateway.ingress.hosts }} - {{- $url := splitList "/" . }} - - host: {{ first $url }} - http: - paths: -{{ if $extraPaths }} -{{ toYaml $extraPaths | indent 10 }} -{{- end }} - - path: /{{ rest $url | join "/" }} - backend: - serviceName: {{ $serviceName }} - servicePort: {{ $servicePort }} - {{- end -}} -{{- if .Values.pushgateway.ingress.tls }} - tls: -{{ toYaml .Values.pushgateway.ingress.tls | indent 4 }} - {{- end -}} -{{- end -}} diff --git a/chart/charts/prometheus/templates/pushgateway-networkpolicy.yaml b/chart/charts/prometheus/templates/pushgateway-networkpolicy.yaml deleted file mode 100755 index e8f6ab8..0000000 --- a/chart/charts/prometheus/templates/pushgateway-networkpolicy.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if and .Values.pushgateway.enabled .Values.networkPolicy.enabled -}} -apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} -kind: NetworkPolicy -metadata: - name: {{ template "prometheus.pushgateway.fullname" . }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} - ingress: - - from: - - podSelector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 12 }} - - ports: - - port: 9091 -{{- end -}} diff --git a/chart/charts/prometheus/templates/pushgateway-pdb.yaml b/chart/charts/prometheus/templates/pushgateway-pdb.yaml deleted file mode 100755 index e9910a5..0000000 --- a/chart/charts/prometheus/templates/pushgateway-pdb.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.pushgateway.podDisruptionBudget.enabled }} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "prometheus.pushgateway.fullname" . }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} -spec: - maxUnavailable: {{ .Values.pushgateway.podDisruptionBudget.maxUnavailable }} - selector: - matchLabels: - {{- include "prometheus.pushgateway.labels" . | nindent 6 }} -{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-podsecuritypolicy.yaml b/chart/charts/prometheus/templates/pushgateway-podsecuritypolicy.yaml deleted file mode 100755 index dd3829d..0000000 --- a/chart/charts/prometheus/templates/pushgateway-podsecuritypolicy.yaml +++ /dev/null @@ -1,44 +0,0 @@ -{{- if .Values.rbac.create }} -{{- if .Values.podSecurityPolicy.enabled }} -apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} -kind: PodSecurityPolicy -metadata: - name: {{ template "prometheus.pushgateway.fullname" . }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - annotations: -{{- if .Values.pushgateway.podSecurityPolicy.annotations }} -{{ toYaml .Values.pushgateway.podSecurityPolicy.annotations | indent 4 }} -{{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - volumes: - - 'persistentVolumeClaim' - - 'secret' - allowedHostPaths: - - pathPrefix: {{ .Values.pushgateway.persistentVolume.mountPath }} - hostNetwork: false - hostPID: false - hostIPC: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: true -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-pvc.yaml b/chart/charts/prometheus/templates/pushgateway-pvc.yaml deleted file mode 100755 index ba16a37..0000000 --- a/chart/charts/prometheus/templates/pushgateway-pvc.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{{- if .Values.pushgateway.persistentVolume.enabled -}} -{{- if not .Values.pushgateway.persistentVolume.existingClaim -}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - {{- if .Values.pushgateway.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.pushgateway.persistentVolume.annotations | indent 4 }} - {{- end }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.pushgateway.fullname" . }} -spec: - accessModes: -{{ toYaml .Values.pushgateway.persistentVolume.accessModes | indent 4 }} -{{- if .Values.pushgateway.persistentVolume.storageClass }} -{{- if (eq "-" .Values.pushgateway.persistentVolume.storageClass) }} - storageClassName: "" -{{- else }} - storageClassName: "{{ .Values.pushgateway.persistentVolume.storageClass }}" -{{- end }} -{{- end }} -{{- if .Values.pushgateway.persistentVolume.volumeBindingMode }} - volumeBindingModeName: "{{ .Values.pushgateway.persistentVolume.volumeBindingMode }}" -{{- end }} - resources: - requests: - storage: "{{ .Values.pushgateway.persistentVolume.size }}" -{{- end -}} -{{- end -}} diff --git a/chart/charts/prometheus/templates/pushgateway-service.yaml b/chart/charts/prometheus/templates/pushgateway-service.yaml deleted file mode 100755 index e84771d..0000000 --- a/chart/charts/prometheus/templates/pushgateway-service.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.pushgateway.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.pushgateway.service.annotations }} - annotations: -{{ toYaml .Values.pushgateway.service.annotations | indent 4}} -{{- end }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} -{{- if .Values.pushgateway.service.labels }} -{{ toYaml .Values.pushgateway.service.labels | indent 4}} -{{- end }} - name: {{ template "prometheus.pushgateway.fullname" . }} -spec: -{{- if .Values.pushgateway.service.clusterIP }} - clusterIP: {{ .Values.pushgateway.service.clusterIP }} -{{- end }} -{{- if .Values.pushgateway.service.externalIPs }} - externalIPs: -{{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }} -{{- end }} -{{- if .Values.pushgateway.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }} -{{- end }} -{{- if .Values.pushgateway.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }} - - {{ $cidr }} - {{- end }} -{{- end }} - ports: - - name: http - port: {{ .Values.pushgateway.service.servicePort }} - protocol: TCP - targetPort: 9091 - selector: - {{- include "prometheus.pushgateway.matchLabels" . | nindent 4 }} - type: "{{ .Values.pushgateway.service.type }}" -{{- end }} diff --git a/chart/charts/prometheus/templates/pushgateway-serviceaccount.yaml b/chart/charts/prometheus/templates/pushgateway-serviceaccount.yaml deleted file mode 100755 index 1596a28..0000000 --- a/chart/charts/prometheus/templates/pushgateway-serviceaccount.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if and .Values.pushgateway.enabled .Values.serviceAccounts.pushgateway.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.serviceAccountName.pushgateway" . }} -{{- end -}} diff --git a/chart/charts/prometheus/templates/server-clusterrole.yaml b/chart/charts/prometheus/templates/server-clusterrole.yaml deleted file mode 100755 index c0c0585..0000000 --- a/chart/charts/prometheus/templates/server-clusterrole.yaml +++ /dev/null @@ -1,47 +0,0 @@ -{{- if and .Values.server.enabled .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }} -rules: -{{- if .Values.podSecurityPolicy.enabled }} - - apiGroups: - - extensions - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - {{ template "prometheus.server.fullname" . }} -{{- end }} - - apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - nodes/metrics - - services - - endpoints - - pods - - ingresses - - configmaps - verbs: - - get - - list - - watch - - apiGroups: - - "extensions" - resources: - - ingresses/status - - ingresses - verbs: - - get - - list - - watch - - nonResourceURLs: - - "/metrics" - verbs: - - get -{{- end }} diff --git a/chart/charts/prometheus/templates/server-clusterrolebinding.yaml b/chart/charts/prometheus/templates/server-clusterrolebinding.yaml deleted file mode 100755 index 1196ce3..0000000 --- a/chart/charts/prometheus/templates/server-clusterrolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.server.enabled .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.server" . }} - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "prometheus.server.fullname" . }} -{{- end }} diff --git a/chart/charts/prometheus/templates/server-configmap.yaml b/chart/charts/prometheus/templates/server-configmap.yaml deleted file mode 100755 index 2e8c4a7..0000000 --- a/chart/charts/prometheus/templates/server-configmap.yaml +++ /dev/null @@ -1,73 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if (empty .Values.server.configMapOverrideName) -}} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }} -data: -{{- $root := . -}} -{{- range $key, $value := .Values.serverFiles }} - {{ $key }}: | -{{- if eq $key "prometheus.yml" }} - global: -{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} -{{- end }} -{{- if eq $key "alerts" }} -{{- if and (not (empty $value)) (empty $value.groups) }} - groups: -{{- range $ruleKey, $ruleValue := $value }} - - name: {{ $ruleKey -}}.rules - rules: -{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} -{{- end }} -{{- else }} -{{ toYaml $value | indent 4 }} -{{- end }} -{{- else }} -{{ toYaml $value | default "{}" | indent 4 }} -{{- end }} -{{- if eq $key "prometheus.yml" -}} -{{- if $root.Values.extraScrapeConfigs }} -{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} -{{- end -}} -{{- if $root.Values.alertmanager.enabled }} - alerting: -{{- if $root.Values.alertRelabelConfigs }} -{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} -{{- end }} - alertmanagers: -{{- if $root.Values.server.alertmanagers }} -{{ toYaml $root.Values.server.alertmanagers | indent 8 }} -{{- else }} - - kubernetes_sd_configs: - - role: pod - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - {{- if $root.Values.alertmanager.prefixURL }} - path_prefix: {{ $root.Values.alertmanager.prefixURL }} - {{- end }} - relabel_configs: - - source_labels: [__meta_kubernetes_namespace] - regex: {{ $root.Release.Namespace }} - action: keep - - source_labels: [__meta_kubernetes_pod_label_app] - regex: {{ template "prometheus.name" $root }} - action: keep - - source_labels: [__meta_kubernetes_pod_label_component] - regex: alertmanager - action: keep - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe] - regex: {{ index $root.Values.alertmanager.podAnnotations "prometheus.io/probe" | default ".*" }} - action: keep - - source_labels: [__meta_kubernetes_pod_container_port_number] - regex: - action: drop -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} diff --git a/chart/charts/prometheus/templates/server-deployment.yaml b/chart/charts/prometheus/templates/server-deployment.yaml deleted file mode 100755 index 5440461..0000000 --- a/chart/charts/prometheus/templates/server-deployment.yaml +++ /dev/null @@ -1,212 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if not .Values.server.statefulSet.enabled -}} -apiVersion: {{ template "prometheus.deployment.apiVersion" . }} -kind: Deployment -metadata: -{{- if .Values.server.deploymentAnnotations }} - annotations: -{{ toYaml .Values.server.deploymentAnnotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }} -spec: - selector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 6 }} - replicas: {{ .Values.server.replicaCount }} - {{- if .Values.server.strategy }} - strategy: -{{ toYaml .Values.server.strategy | indent 4 }} - {{- end }} - template: - metadata: - {{- if .Values.server.podAnnotations }} - annotations: -{{ toYaml .Values.server.podAnnotations | indent 8 }} - {{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 8 }} - {{- if .Values.server.podLabels}} - {{ toYaml .Values.server.podLabels | nindent 8 }} - {{- end}} - spec: -{{- if .Values.server.priorityClassName }} - priorityClassName: "{{ .Values.server.priorityClassName }}" -{{- end }} -{{- if .Values.server.schedulerName }} - schedulerName: "{{ .Values.server.schedulerName }}" -{{- end }} - serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} - {{- if .Values.server.extraInitContainers }} - initContainers: -{{ toYaml .Values.server.extraInitContainers | indent 8 }} - {{- end }} - containers: - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.name }} - image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" - imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" - args: - - --volume-dir=/etc/config - - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload - {{- range $key, $value := .Values.configmapReload.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- range .Values.configmapReload.extraVolumeDirs }} - - --volume-dir={{ . }} - {{- end }} - resources: -{{ toYaml .Values.configmapReload.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - readOnly: true - {{- range .Values.configmapReload.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} - image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" - imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" - {{- if .Values.server.env }} - env: -{{ toYaml .Values.server.env | indent 12}} - {{- end }} - args: - {{- if .Values.server.retention }} - - --storage.tsdb.retention.time={{ .Values.server.retention }} - {{- end }} - - --config.file={{ .Values.server.configPath }} - - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} - - --web.console.libraries=/etc/prometheus/console_libraries - - --web.console.templates=/etc/prometheus/consoles - {{- range .Values.server.extraFlags }} - - --{{ . }} - {{- end }} - {{- if .Values.server.baseURL }} - - --web.external-url={{ .Values.server.baseURL }} - {{- end }} - - {{- range $key, $value := .Values.server.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - ports: - - containerPort: 9090 - readinessProbe: - httpGet: - path: {{ .Values.server.prefixURL }}/-/ready - port: 9090 - initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} - timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} - failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} - successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} - livenessProbe: - httpGet: - path: {{ .Values.server.prefixURL }}/-/healthy - port: 9090 - initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} - timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} - failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} - successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} - resources: -{{ toYaml .Values.server.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - - name: storage-volume - mountPath: {{ .Values.server.persistentVolume.mountPath }} - subPath: "{{ .Values.server.persistentVolume.subPath }}" - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- if .Values.server.extraVolumeMounts }} - {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} - {{- end }} - {{- if .Values.server.sidecarContainers }} - {{- toYaml .Values.server.sidecarContainers | nindent 8 }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: - {{ toYaml .Values.imagePullSecrets | indent 2 }} - {{- end }} - {{- if .Values.server.nodeSelector }} - nodeSelector: -{{ toYaml .Values.server.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.server.securityContext }} - securityContext: -{{ toYaml .Values.server.securityContext | indent 8 }} - {{- end }} - {{- if .Values.server.tolerations }} - tolerations: -{{ toYaml .Values.server.tolerations | indent 8 }} - {{- end }} - {{- if .Values.server.affinity }} - affinity: -{{ toYaml .Values.server.affinity | indent 8 }} - {{- end }} - terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} - volumes: - - name: config-volume - configMap: - name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} - - name: storage-volume - {{- if .Values.server.persistentVolume.enabled }} - persistentVolumeClaim: - claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} - {{- else }} - emptyDir: - {{- if .Values.server.emptyDir.sizeLimit }} - sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} - {{- else }} - {} - {{- end -}} - {{- end -}} -{{- if .Values.server.extraVolumes }} -{{ toYaml .Values.server.extraVolumes | indent 8}} -{{- end }} - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - hostPath: - path: {{ .hostPath }} - {{- end }} - {{- range .Values.configmapReload.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- end }} - {{- range .Values.configmapReload.extraConfigmapMounts }} - - name: {{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} -{{- end -}} -{{- end -}} diff --git a/chart/charts/prometheus/templates/server-ingress.yaml b/chart/charts/prometheus/templates/server-ingress.yaml deleted file mode 100755 index 0a3cb69..0000000 --- a/chart/charts/prometheus/templates/server-ingress.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.server.ingress.enabled -}} -{{- $releaseName := .Release.Name -}} -{{- $serviceName := include "prometheus.server.fullname" . }} -{{- $servicePort := .Values.server.service.servicePort -}} -{{- $extraPaths := .Values.server.ingress.extraPaths -}} -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: -{{- if .Values.server.ingress.annotations }} - annotations: -{{ toYaml .Values.server.ingress.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -{{- range $key, $value := .Values.server.ingress.extraLabels }} - {{ $key }}: {{ $value }} -{{- end }} - name: {{ template "prometheus.server.fullname" . }} -spec: - rules: - {{- range .Values.server.ingress.hosts }} - {{- $url := splitList "/" . }} - - host: {{ first $url }} - http: - paths: -{{ if $extraPaths }} -{{ toYaml $extraPaths | indent 10 }} -{{- end }} - - path: /{{ rest $url | join "/" }} - backend: - serviceName: {{ $serviceName }} - servicePort: {{ $servicePort }} - {{- end -}} -{{- if .Values.server.ingress.tls }} - tls: -{{ toYaml .Values.server.ingress.tls | indent 4 }} - {{- end -}} -{{- end -}} -{{- end -}} diff --git a/chart/charts/prometheus/templates/server-networkpolicy.yaml b/chart/charts/prometheus/templates/server-networkpolicy.yaml deleted file mode 100755 index 9e10129..0000000 --- a/chart/charts/prometheus/templates/server-networkpolicy.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.networkPolicy.enabled }} -apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} -kind: NetworkPolicy -metadata: - name: {{ template "prometheus.server.fullname" . }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 6 }} - ingress: - - ports: - - port: 9090 -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/server-pdb.yaml b/chart/charts/prometheus/templates/server-pdb.yaml deleted file mode 100755 index b2447fd..0000000 --- a/chart/charts/prometheus/templates/server-pdb.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.server.podDisruptionBudget.enabled }} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "prometheus.server.fullname" . }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -spec: - maxUnavailable: {{ .Values.server.podDisruptionBudget.maxUnavailable }} - selector: - matchLabels: - {{- include "prometheus.server.labels" . | nindent 6 }} -{{- end }} diff --git a/chart/charts/prometheus/templates/server-podsecuritypolicy.yaml b/chart/charts/prometheus/templates/server-podsecuritypolicy.yaml deleted file mode 100755 index a0e15a3..0000000 --- a/chart/charts/prometheus/templates/server-podsecuritypolicy.yaml +++ /dev/null @@ -1,53 +0,0 @@ -{{- if .Values.rbac.create }} -{{- if .Values.podSecurityPolicy.enabled }} -apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} -kind: PodSecurityPolicy -metadata: - name: {{ template "prometheus.server.fullname" . }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - annotations: -{{- if .Values.server.podSecurityPolicy.annotations }} -{{ toYaml .Values.server.podSecurityPolicy.annotations | indent 4 }} -{{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - allowedCapabilities: - - 'CHOWN' - volumes: - - 'configMap' - - 'persistentVolumeClaim' - - 'emptyDir' - - 'secret' - - 'hostPath' - allowedHostPaths: - - pathPrefix: /etc - readOnly: true - - pathPrefix: {{ .Values.server.persistentVolume.mountPath }} - {{- range .Values.server.extraHostPathMounts }} - - pathPrefix: {{ .hostPath }} - readOnly: {{ .readOnly }} - {{- end }} - hostNetwork: false - hostPID: false - hostIPC: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: false -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/server-pvc.yaml b/chart/charts/prometheus/templates/server-pvc.yaml deleted file mode 100755 index 9d1cb37..0000000 --- a/chart/charts/prometheus/templates/server-pvc.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if not .Values.server.statefulSet.enabled -}} -{{- if .Values.server.persistentVolume.enabled -}} -{{- if not .Values.server.persistentVolume.existingClaim -}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - {{- if .Values.server.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} - {{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }} -spec: - accessModes: -{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} -{{- if .Values.server.persistentVolume.storageClass }} -{{- if (eq "-" .Values.server.persistentVolume.storageClass) }} - storageClassName: "" -{{- else }} - storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" -{{- end }} -{{- end }} -{{- if .Values.server.persistentVolume.volumeBindingMode }} - volumeBindingModeName: "{{ .Values.server.persistentVolume.volumeBindingMode }}" -{{- end }} - resources: - requests: - storage: "{{ .Values.server.persistentVolume.size }}" -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} diff --git a/chart/charts/prometheus/templates/server-service-headless.yaml b/chart/charts/prometheus/templates/server-service-headless.yaml deleted file mode 100755 index 3edc58c..0000000 --- a/chart/charts/prometheus/templates/server-service-headless.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.server.statefulSet.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.server.statefulSet.headless.annotations }} - annotations: -{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -{{- if .Values.server.statefulSet.headless.labels }} -{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.server.fullname" . }}-headless -spec: - clusterIP: None - ports: - - name: http - port: {{ .Values.server.statefulSet.headless.servicePort }} - protocol: TCP - targetPort: 9090 - selector: - {{- include "prometheus.server.matchLabels" . | nindent 4 }} -{{- end -}} -{{- end -}} diff --git a/chart/charts/prometheus/templates/server-service.yaml b/chart/charts/prometheus/templates/server-service.yaml deleted file mode 100755 index a0c88ee..0000000 --- a/chart/charts/prometheus/templates/server-service.yaml +++ /dev/null @@ -1,50 +0,0 @@ -{{- if .Values.server.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.server.service.annotations }} - annotations: -{{ toYaml .Values.server.service.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -{{- if .Values.server.service.labels }} -{{ toYaml .Values.server.service.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.server.fullname" . }} -spec: -{{- if .Values.server.service.clusterIP }} - clusterIP: {{ .Values.server.service.clusterIP }} -{{- end }} -{{- if .Values.server.service.externalIPs }} - externalIPs: -{{ toYaml .Values.server.service.externalIPs | indent 4 }} -{{- end }} -{{- if .Values.server.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} -{{- end }} -{{- if .Values.server.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} - - {{ $cidr }} - {{- end }} -{{- end }} - ports: - - name: http - port: {{ .Values.server.service.servicePort }} - protocol: TCP - targetPort: 9090 - {{- if .Values.server.service.nodePort }} - nodePort: {{ .Values.server.service.nodePort }} - {{- end }} - selector: - {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} - statefulset.kubernetes.io/pod-name: {{ .Release.Name }}-{{ .Values.server.name }}-{{ .Values.server.service.statefulsetReplica.replica }} - {{- else -}} - {{- include "prometheus.server.matchLabels" . | nindent 4 }} -{{- if .Values.server.service.sessionAffinity }} - sessionAffinity: {{ .Values.server.service.sessionAffinity }} -{{- end }} - {{- end }} - type: "{{ .Values.server.service.type }}" -{{- end -}} diff --git a/chart/charts/prometheus/templates/server-serviceaccount.yaml b/chart/charts/prometheus/templates/server-serviceaccount.yaml deleted file mode 100755 index 68c6412..0000000 --- a/chart/charts/prometheus/templates/server-serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.serviceAccounts.server.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.serviceAccountName.server" . }} -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/server-statefulset.yaml b/chart/charts/prometheus/templates/server-statefulset.yaml deleted file mode 100755 index 4569fef..0000000 --- a/chart/charts/prometheus/templates/server-statefulset.yaml +++ /dev/null @@ -1,220 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.server.statefulSet.enabled -}} -apiVersion: apps/v1 -kind: StatefulSet -metadata: -{{- if .Values.server.statefulSet.annotations }} - annotations: -{{ toYaml .Values.server.statefulSet.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - {{- if .Values.server.statefulSet.labels}} - {{ toYaml .Values.server.statefulSet.labels | nindent 4 }} - {{- end}} - name: {{ template "prometheus.server.fullname" . }} -spec: - serviceName: {{ template "prometheus.server.fullname" . }}-headless - selector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 6 }} - replicas: {{ .Values.server.replicaCount }} - podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} - template: - metadata: - {{- if .Values.server.podAnnotations }} - annotations: -{{ toYaml .Values.server.podAnnotations | indent 8 }} - {{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 8 }} - {{- if .Values.server.statefulSet.labels}} - {{ toYaml .Values.server.statefulSet.labels | nindent 8 }} - {{- end}} - spec: -{{- if .Values.server.affinity }} - affinity: -{{ toYaml .Values.server.affinity | indent 8 }} -{{- end }} -{{- if .Values.server.priorityClassName }} - priorityClassName: "{{ .Values.server.priorityClassName }}" -{{- end }} -{{- if .Values.server.schedulerName }} - schedulerName: "{{ .Values.server.schedulerName }}" -{{- end }} - serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} - containers: - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.name }} - image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" - imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" - args: - - --volume-dir=/etc/config - - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload - {{- range $key, $value := .Values.configmapReload.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- range .Values.configmapReload.extraVolumeDirs }} - - --volume-dir={{ . }} - {{- end }} - resources: -{{ toYaml .Values.configmapReload.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - readOnly: true - {{- range .Values.configmapReload.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} - image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" - imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" - {{- if .Values.server.env }} - env: -{{ toYaml .Values.server.env | indent 12}} - {{- end }} - args: - {{- if .Values.server.retention }} - - --storage.tsdb.retention.time={{ .Values.server.retention }} - {{- end }} - - --config.file={{ .Values.server.configPath }} - - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} - - --web.console.libraries=/etc/prometheus/console_libraries - - --web.console.templates=/etc/prometheus/consoles - {{- range .Values.server.extraFlags }} - - --{{ . }} - {{- end }} - {{- range $key, $value := .Values.server.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- if .Values.server.baseURL }} - - --web.external-url={{ .Values.server.baseURL }} - {{- end }} - ports: - - containerPort: 9090 - readinessProbe: - httpGet: - path: {{ .Values.server.prefixURL }}/-/ready - port: 9090 - initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} - timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} - livenessProbe: - httpGet: - path: {{ .Values.server.prefixURL }}/-/healthy - port: 9090 - initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} - timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} - resources: -{{ toYaml .Values.server.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - - name: storage-volume - mountPath: {{ .Values.server.persistentVolume.mountPath }} - subPath: "{{ .Values.server.persistentVolume.subPath }}" - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- if .Values.server.extraVolumeMounts }} - {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} - {{- end }} - {{- if .Values.server.sidecarContainers }} - {{- toYaml .Values.server.sidecarContainers | nindent 8 }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: - {{ toYaml .Values.imagePullSecrets | indent 2 }} - {{- end }} - {{- if .Values.server.nodeSelector }} - nodeSelector: -{{ toYaml .Values.server.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.server.securityContext }} - securityContext: -{{ toYaml .Values.server.securityContext | indent 8 }} - {{- end }} - {{- if .Values.server.tolerations }} - tolerations: -{{ toYaml .Values.server.tolerations | indent 8 }} - {{- end }} - {{- if .Values.server.affinity }} - affinity: -{{ toYaml .Values.server.affinity | indent 8 }} - {{- end }} - terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} - volumes: - - name: config-volume - configMap: - name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - hostPath: - path: {{ .hostPath }} - {{- end }} - {{- range .Values.configmapReload.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- end }} - {{- range .Values.configmapReload.extraConfigmapMounts }} - - name: {{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} -{{- if .Values.server.extraVolumes }} -{{ toYaml .Values.server.extraVolumes | indent 8}} -{{- end }} -{{- if .Values.server.persistentVolume.enabled }} - volumeClaimTemplates: - - metadata: - name: storage-volume - {{- if .Values.server.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }} - {{- end }} - spec: - accessModes: -{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }} - resources: - requests: - storage: "{{ .Values.server.persistentVolume.size }}" - {{- if .Values.server.persistentVolume.storageClass }} - {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} - storageClassName: "" - {{- else }} - storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" - {{- end }} - {{- end }} -{{- else }} - - name: storage-volume - emptyDir: {} -{{- end }} -{{- end }} -{{- end }} diff --git a/chart/charts/prometheus/templates/server-vpa.yaml b/chart/charts/prometheus/templates/server-vpa.yaml deleted file mode 100755 index ef3604e..0000000 --- a/chart/charts/prometheus/templates/server-vpa.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.server.verticalAutoscaler.enabled -}} -apiVersion: autoscaling.k8s.io/v1beta2 -kind: VerticalPodAutoscaler -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }}-vpa -spec: - targetRef: -{{- if .Values.server.statefulSet.enabled }} - apiVersion: "apps/v1" - kind: StatefulSet -{{- else }} - apiVersion: "extensions/v1beta1" - kind: Deployment -{{- end }} - name: {{ template "prometheus.server.fullname" . }} - updatePolicy: - updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} - resourcePolicy: - containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} -{{- end -}} {{/* if .Values.server.verticalAutoscaler.enabled */}} -{{- end -}} {{/* .Values.server.enabled */}} diff --git a/chart/charts/prometheus/values.yaml b/chart/charts/prometheus/values.yaml deleted file mode 100755 index dfe4a13..0000000 --- a/chart/charts/prometheus/values.yaml +++ /dev/null @@ -1,1468 +0,0 @@ -rbac: - create: true - -podSecurityPolicy: - enabled: false - -imagePullSecrets: -# - name: "image-pull-secret" - -## Define serviceAccount names for components. Defaults to component's fully qualified name. -## -serviceAccounts: - alertmanager: - create: true - name: - kubeStateMetrics: - create: true - name: - nodeExporter: - create: true - name: - pushgateway: - create: true - name: - server: - create: true - name: - -alertmanager: - ## If false, alertmanager will not be installed - ## - enabled: true - - ## alertmanager container name - ## - name: alertmanager - - ## alertmanager container image - ## - image: - repository: prom/alertmanager - tag: v0.20.0 - pullPolicy: IfNotPresent - - ## alertmanager priorityClassName - ## - priorityClassName: "" - - ## Additional alertmanager container arguments - ## - extraArgs: {} - - ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug - ## so that the various internal URLs are still able to access as they are in the default case. - ## (Optional) - prefixURL: "" - - ## External URL which can access alertmanager - baseURL: "http://localhost:9093" - - ## Additional alertmanager container environment variable - ## For instance to add a http_proxy - ## - extraEnv: {} - - ## Additional alertmanager Secret mounts - # Defines additional mounts with secrets. Secrets must be manually created in the namespace. - extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # subPath: "" - # secretName: alertmanager-secret-files - # readOnly: true - - ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} - ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configMapOverrideName: "" - - ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config - ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configFromSecret: "" - - ## The configuration file name to be loaded to alertmanager - ## Must match the key within configuration loaded from ConfigMap/Secret - ## - configFileName: alertmanager.yml - - ingress: - ## If true, alertmanager Ingress will be created - ## - enabled: false - - ## alertmanager Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## alertmanager Ingress additional labels - ## - extraLabels: {} - - ## alertmanager Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - alertmanager.domain.com - # - domain.com/alertmanager - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## alertmanager Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-alerts-tls - # hosts: - # - alertmanager.domain.com - - ## Alertmanager Deployment Strategy type - # strategy: - # type: Recreate - - ## Node tolerations for alertmanager scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for alertmanager pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Pod affinity - ## - affinity: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - persistentVolume: - ## If true, alertmanager will create/use a Persistent Volume Claim - ## If false, use emptyDir - ## - enabled: true - - ## alertmanager data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## alertmanager data Persistent Volume Claim annotations - ## - annotations: {} - - ## alertmanager data Persistent Volume existing claim name - ## Requires alertmanager.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## alertmanager data Persistent Volume mount root path - ## - mountPath: /data - - ## alertmanager data Persistent Volume size - ## - size: 2Gi - - ## alertmanager data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## alertmanager data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of alertmanager data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - ## Annotations to be added to alertmanager pods - ## - podAnnotations: {} - ## Tell prometheus to use a specific set of alertmanager pods - ## instead of all alertmanager pods found in the same namespace - ## Useful if you deploy multiple releases within the same namespace - ## - ## prometheus.io/probe: alertmanager-teamA - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) - ## - replicaCount: 1 - - statefulSet: - ## If true, use a statefulset instead of a deployment for pod management. - ## This allows to scale replicas to more than 1 pod - ## - enabled: false - - podManagementPolicy: OrderedReady - - ## Alertmanager headless service to use for the statefulset - ## - headless: - annotations: {} - labels: {} - - ## Enabling peer mesh service end points for enabling the HA alert manager - ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md - # enableMeshPeer : true - - servicePort: 80 - - ## alertmanager resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 10m - # memory: 32Mi - # requests: - # cpu: 10m - # memory: 32Mi - - ## Security context to be added to alertmanager pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - runAsGroup: 65534 - fsGroup: 65534 - - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Enabling peer mesh service end points for enabling the HA alert manager - ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md - # enableMeshPeer : true - - ## List of IP addresses at which the alertmanager service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 80 - # nodePort: 30000 - sessionAffinity: None - type: ClusterIP - -## Monitors ConfigMap changes and POSTs to a URL -## Ref: https://github.com/jimmidyson/configmap-reload -## -configmapReload: - ## configmap-reload container name - ## - name: configmap-reload - - ## configmap-reload container image - ## - image: - repository: jimmidyson/configmap-reload - tag: v0.3.0 - pullPolicy: IfNotPresent - - ## Additional configmap-reload container arguments - ## - extraArgs: {} - ## Additional configmap-reload volume directories - ## - extraVolumeDirs: [] - - - ## Additional configmap-reload mounts - ## - extraConfigmapMounts: [] - # - name: prometheus-alerts - # mountPath: /etc/alerts.d - # subPath: "" - # configMap: prometheus-alerts - # readOnly: true - - - ## configmap-reload resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - -kubeStateMetrics: - ## If false, kube-state-metrics will not be installed - ## - enabled: true - - ## kube-state-metrics container name - ## - name: kube-state-metrics - - ## kube-state-metrics container image - ## - image: - repository: quay.io/coreos/kube-state-metrics - tag: v1.9.0 - pullPolicy: IfNotPresent - - ## kube-state-metrics priorityClassName - ## - priorityClassName: "" - - ## kube-state-metrics container arguments - ## - args: {} - - ## Node tolerations for kube-state-metrics scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for kube-state-metrics pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Annotations to be added to kube-state-metrics pods - ## - podAnnotations: {} - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - pod: - labels: {} - - replicaCount: 1 - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## kube-state-metrics resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 10m - # memory: 16Mi - # requests: - # cpu: 10m - # memory: 16Mi - - ## Security context to be added to kube-state-metrics pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - - service: - annotations: - prometheus.io/scrape: "true" - labels: {} - - # Exposed as a headless service: - # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services - clusterIP: None - - ## List of IP addresses at which the kube-state-metrics service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 80 - type: ClusterIP - -nodeExporter: - ## If false, node-exporter will not be installed - ## - enabled: true - - ## If true, node-exporter pods share the host network namespace - ## - hostNetwork: true - - ## If true, node-exporter pods share the host PID namespace - ## - hostPID: true - - ## node-exporter container name - ## - name: node-exporter - - ## node-exporter container image - ## - image: - repository: prom/node-exporter - tag: v0.18.1 - pullPolicy: IfNotPresent - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## node-exporter priorityClassName - ## - priorityClassName: "" - - ## Custom Update Strategy - ## - updateStrategy: - type: RollingUpdate - - ## Additional node-exporter container arguments - ## - extraArgs: {} - - ## Additional node-exporter hostPath mounts - ## - extraHostPathMounts: [] - # - name: textfile-dir - # mountPath: /srv/txt_collector - # hostPath: /var/lib/node-exporter - # readOnly: true - # mountPropagation: HostToContainer - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /prometheus - # configMap: certs-configmap - # readOnly: true - - ## Node tolerations for node-exporter scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for node-exporter pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Annotations to be added to node-exporter pods - ## - podAnnotations: {} - - ## Labels to be added to node-exporter pods - ## - pod: - labels: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## node-exporter resource limits & requests - ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 200m - # memory: 50Mi - # requests: - # cpu: 100m - # memory: 30Mi - - ## Security context to be added to node-exporter pods - ## - securityContext: {} - # runAsUser: 0 - - service: - annotations: - prometheus.io/scrape: "true" - labels: {} - - # Exposed as a headless service: - # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services - clusterIP: None - - ## List of IP addresses at which the node-exporter service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - hostPort: 9100 - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 9100 - type: ClusterIP - -server: - ## Prometheus server container name - ## - enabled: true - name: server - sidecarContainers: - - ## Prometheus server container image - ## - image: - repository: prom/prometheus - tag: v2.15.2 - pullPolicy: IfNotPresent - - ## prometheus server priorityClassName - ## - priorityClassName: "" - - ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug - ## so that the various internal URLs are still able to access as they are in the default case. - ## (Optional) - prefixURL: "" - - ## External URL which can access alertmanager - ## Maybe same with Ingress host name - baseURL: "" - - ## Additional server container environment variables - ## - ## You specify this manually like you would a raw deployment manifest. - ## This means you can bind in environment variables from secrets. - ## - ## e.g. static environment variable: - ## - name: DEMO_GREETING - ## value: "Hello from the environment" - ## - ## e.g. secret environment variable: - ## - name: USERNAME - ## valueFrom: - ## secretKeyRef: - ## name: mysecret - ## key: username - env: [] - - extraFlags: - - web.enable-lifecycle - ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as - ## deleting time series. This is disabled by default. - # - web.enable-admin-api - ## - ## storage.tsdb.no-lockfile flag controls BD locking - # - storage.tsdb.no-lockfile - ## - ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) - # - storage.tsdb.wal-compression - - ## Path to a configuration file on prometheus server container FS - configPath: /etc/config/prometheus.yml - - global: - ## How frequently to scrape targets by default - ## - scrape_interval: 1m - ## How long until a scrape request times out - ## - scrape_timeout: 10s - ## How frequently to evaluate rules - ## - evaluation_interval: 1m - - ## Additional Prometheus server container arguments - ## - extraArgs: {} - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: [] - - ## Additional Prometheus server Volume mounts - ## - extraVolumeMounts: [] - - ## Additional Prometheus server Volumes - ## - extraVolumes: [] - - ## Additional Prometheus server hostPath mounts - ## - extraHostPathMounts: [] - # - name: certs-dir - # mountPath: /etc/kubernetes/certs - # subPath: "" - # hostPath: /etc/kubernetes/certs - # readOnly: true - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /prometheus - # subPath: "" - # configMap: certs-configmap - # readOnly: true - - ## Additional Prometheus server Secret mounts - # Defines additional mounts with secrets. Secrets must be manually created in the namespace. - extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # subPath: "" - # secretName: prom-secret-files - # readOnly: true - - ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} - ## Defining configMapOverrideName will cause templates/server-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configMapOverrideName: "" - - ingress: - ## If true, Prometheus server Ingress will be created - ## - enabled: false - - ## Prometheus server Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## Prometheus server Ingress additional labels - ## - extraLabels: {} - - ## Prometheus server Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - prometheus.domain.com - # - domain.com/prometheus - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## Prometheus server Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-server-tls - # hosts: - # - prometheus.domain.com - - ## Server Deployment Strategy type - # strategy: - # type: Recreate - - ## Node tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for Prometheus server pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Pod affinity - ## - affinity: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - persistentVolume: - ## If true, Prometheus server will create/use a Persistent Volume Claim - ## If false, use emptyDir - ## - enabled: true - - ## Prometheus server data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## Prometheus server data Persistent Volume annotations - ## - annotations: {} - - ## Prometheus server data Persistent Volume existing claim name - ## Requires server.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## Prometheus server data Persistent Volume mount root path - ## - mountPath: /data - - ## Prometheus server data Persistent Volume size - ## - size: 8Gi - - ## Prometheus server data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## Prometheus server data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of Prometheus server data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - emptyDir: - sizeLimit: "" - - ## Annotations to be added to Prometheus server pods - ## - podAnnotations: {} - # iam.amazonaws.com/role: prometheus - - ## Labels to be added to Prometheus server pods - ## - podLabels: {} - - ## Prometheus AlertManager configuration - ## - alertmanagers: [] - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) - ## - replicaCount: 1 - - statefulSet: - ## If true, use a statefulset instead of a deployment for pod management. - ## This allows to scale replicas to more than 1 pod - ## - enabled: false - - annotations: {} - labels: {} - podManagementPolicy: OrderedReady - - ## Alertmanager headless service to use for the statefulset - ## - headless: - annotations: {} - labels: {} - servicePort: 80 - - ## Prometheus server readiness and liveness probe initial delay and timeout - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ - ## - readinessProbeInitialDelay: 30 - readinessProbeTimeout: 30 - readinessProbeFailureThreshold: 3 - readinessProbeSuccessThreshold: 1 - livenessProbeInitialDelay: 30 - livenessProbeTimeout: 30 - livenessProbeFailureThreshold: 3 - livenessProbeSuccessThreshold: 1 - - ## Prometheus server resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 500m - # memory: 512Mi - # requests: - # cpu: 500m - # memory: 512Mi - - ## Vertical Pod Autoscaler config - ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler - verticalAutoscaler: - ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) - enabled: false - # updateMode: "Auto" - # containerPolicies: - # - containerName: 'prometheus-server' - - ## Security context to be added to server pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - runAsGroup: 65534 - fsGroup: 65534 - - service: - annotations: {} - labels: {} - clusterIP: "" - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 80 - sessionAffinity: None - type: ClusterIP - - ## If using a statefulSet (statefulSet.enabled=true), configure the - ## service to connect to a specific replica to have a consistent view - ## of the data. - statefulsetReplica: - enabled: false - replica: 0 - - ## Prometheus server pod termination grace period - ## - terminationGracePeriodSeconds: 300 - - ## Prometheus data retention period (default if not specified is 15 days) - ## - retention: "15d" - -pushgateway: - ## If false, pushgateway will not be installed - ## - enabled: true - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## pushgateway container name - ## - name: pushgateway - - ## pushgateway container image - ## - image: - repository: prom/pushgateway - tag: v1.0.1 - pullPolicy: IfNotPresent - - ## pushgateway priorityClassName - ## - priorityClassName: "" - - ## Additional pushgateway container arguments - ## - ## for example: persistence.file: /data/pushgateway.data - extraArgs: {} - - ingress: - ## If true, pushgateway Ingress will be created - ## - enabled: false - - ## pushgateway Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## pushgateway Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - pushgateway.domain.com - # - domain.com/pushgateway - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## pushgateway Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-alerts-tls - # hosts: - # - pushgateway.domain.com - - ## Node tolerations for pushgateway scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for pushgateway pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Annotations to be added to pushgateway pods - ## - podAnnotations: {} - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - replicaCount: 1 - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## pushgateway resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 10m - # memory: 32Mi - # requests: - # cpu: 10m - # memory: 32Mi - - ## Security context to be added to push-gateway pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - - service: - annotations: - prometheus.io/probe: pushgateway - labels: {} - clusterIP: "" - - ## List of IP addresses at which the pushgateway service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 9091 - type: ClusterIP - - ## pushgateway Deployment Strategy type - # strategy: - # type: Recreate - - persistentVolume: - ## If true, pushgateway will create/use a Persistent Volume Claim - ## If false, use emptyDir - ## - enabled: false - - ## pushgateway data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## pushgateway data Persistent Volume Claim annotations - ## - annotations: {} - - ## pushgateway data Persistent Volume existing claim name - ## Requires pushgateway.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## pushgateway data Persistent Volume mount root path - ## - mountPath: /data - - ## pushgateway data Persistent Volume size - ## - size: 2Gi - - ## pushgateway data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## pushgateway data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of pushgateway data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - -## alertmanager ConfigMap entries -## -alertmanagerFiles: - alertmanager.yml: - global: {} - # slack_api_url: '' - - receivers: - - name: default-receiver - # slack_configs: - # - channel: '@you' - # send_resolved: true - - route: - group_wait: 10s - group_interval: 5m - receiver: default-receiver - repeat_interval: 3h - -## Prometheus server ConfigMap entries -## -serverFiles: - - ## Alerts configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ - alerting_rules.yml: {} - # groups: - # - name: Instances - # rules: - # - alert: InstanceDown - # expr: up == 0 - # for: 5m - # labels: - # severity: page - # annotations: - # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' - # summary: 'Instance {{ $labels.instance }} down' - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml - alerts: {} - - ## Records configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ - recording_rules.yml: {} - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml - rules: {} - - prometheus.yml: - rule_files: - - /etc/config/recording_rules.yml - - /etc/config/alerting_rules.yml - ## Below two files are DEPRECATED will be removed from this default values file - - /etc/config/rules - - /etc/config/alerts - - scrape_configs: - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - # A scrape configuration for running Prometheus on a Kubernetes cluster. - # This uses separate scrape configs for cluster components (i.e. API server, node) - # and services to allow each to use different authentication configs. - # - # Kubernetes labels will be added as Prometheus labels on metrics via the - # `labelmap` relabeling action. - - # Scrape config for API servers. - # - # Kubernetes exposes API servers as endpoints to the default/kubernetes - # service so this uses `endpoints` role and uses relabelling to only keep - # the endpoints associated with the default/kubernetes service using the - # default named port `https`. This works for single API server deployments as - # well as HA API server deployments. - - job_name: 'kubernetes-apiservers' - - kubernetes_sd_configs: - - role: endpoints - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - # Keep only the default/kubernetes service endpoints for the https port. This - # will add targets for each API server which Kubernetes adds an endpoint to - # the default/kubernetes service. - relabel_configs: - - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] - action: keep - regex: default;kubernetes;https - - - job_name: 'kubernetes-nodes' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics - - - - job_name: 'kubernetes-nodes-cadvisor' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - # This configuration will work only on kubelet 1.7.3+ - # As the scrape endpoints for cAdvisor have changed - # if you are using older version you need to change the replacement to - # replacement: /api/v1/nodes/$1:4194/proxy/metrics - # more info here https://github.com/coreos/prometheus-operator/issues/633 - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - - # Scrape config for service endpoints. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/scrape`: Only scrape services that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: If the metrics are exposed on a different port to the - # service then set this appropriately. - - job_name: 'kubernetes-service-endpoints' - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: kubernetes_name - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: kubernetes_node - - - job_name: 'prometheus-pushgateway' - honor_labels: true - - kubernetes_sd_configs: - - role: service - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - action: keep - regex: pushgateway - - # Example scrape config for probing services via the Blackbox Exporter. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/probe`: Only probe services that have a value of `true` - - job_name: 'kubernetes-services' - - metrics_path: /probe - params: - module: [http_2xx] - - kubernetes_sd_configs: - - role: service - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - action: keep - regex: true - - source_labels: [__address__] - target_label: __param_target - - target_label: __address__ - replacement: blackbox - - source_labels: [__param_target] - target_label: instance - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - target_label: kubernetes_name - - # Example scrape config for pods - # - # The relabeling allows the actual pod scrape endpoint to be configured via the - # following annotations: - # - # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. - - job_name: 'kubernetes-pods' - - kubernetes_sd_configs: - - role: pod - - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: kubernetes_pod_name - -# adds additional scrape configs to prometheus.yml -# must be a string so you have to add a | after extraScrapeConfigs: -# example adds prometheus-blackbox-exporter scrape config -extraScrapeConfigs: - # - job_name: 'prometheus-blackbox-exporter' - # metrics_path: /probe - # params: - # module: [http_2xx] - # static_configs: - # - targets: - # - https://example.com - # relabel_configs: - # - source_labels: [__address__] - # target_label: __param_target - # - source_labels: [__param_target] - # target_label: instance - # - target_label: __address__ - # replacement: prometheus-blackbox-exporter:9115 - -# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager -# useful in H/A prometheus with different external labels but the same alerts -alertRelabelConfigs: - # alert_relabel_configs: - # - source_labels: [dc] - # regex: (.+)\d+ - # target_label: dc - -networkPolicy: - ## Enable creation of NetworkPolicy resources. - ## - enabled: false diff --git a/chart/charts/redis-10.3.4.tgz b/chart/charts/redis-10.3.4.tgz new file mode 100644 index 0000000000000000000000000000000000000000..2b6741caec685fbee65ecb44f9536281b8d2148d GIT binary patch literal 29578 zcmV)hK%>7OiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0POvHciT3$I1JC<`V{*o=bYFbQ<9xqt2vqX=Q?TYw~6D&cH8Hy zv@3>4NWz#RSOB!6iSyl`g&PT8MOm_(X3Uw@SR}3+8~ct8h6BR;hf~BvZ;oc+-|pZU z3}@_cXq*kdo0dYVOAb~mnT(HC!3B$lKyTS}OiF_>E;^3nQ zd>RQvB3X_^6TS;m#HX0=LP)sayTGyFfT936WdaAXMJ#x)gQE$F@DL_c#9?PjXLtxx zA>v`buW-FbXum^zIUD(!5BvQQ5fPe^o=<0e#AP#mE|7o8b&WomP4r)$dhPA+4?gZc zdD@GkN#_#JuPF=ou;YO$)A8gS6gWDbn(PR1+?gSw=7X6kif4#~Lpw>#`2TeV(E*5% z9QXJ7ol_nMjQIaIz+5&rLrjpUH)d&t+=q^Sr$?n1P4#W3Jt1P6jMS`6=k{MOp#CLh zp00Xj5n^x9d(wN_xo?zxc-Hu63|HvN?}`C5)BmSWp6%}!>Ho8*g9rM*iDzpIehldd zh44!4c_1@1!4M;cW>{dxJ6l_D5@N*hE<{vd5K|;TObLf;5{5vpFk>W;?{TT}E|AC% zlYm4M2yl#}07pKNRNSZ0m`oDG7!t1QPxMeU_1xNm!-O#$2|!nfglH6|C29sc)NfAl zgmA&;ia)3~CrKEdVV_|kC)Clc3_Du@`5Hr*&F$aEXoj;AP7Asofaetc8PE0Cw3g(B zXo3$z#JOzt%=z5`O_qo7>2pU;)KGdjm2b_o4BeHV86IcsN)l~NpcU$&Ia7^L^){xQ2rA#_TIc}Oj7Xq~{+Jt2OFpv2lQ>qq+LJ@{P!xC!^BDVr zTwzmmOhW91B)Zh2=z#3gh$3EqM+8OQcOH8-(Bn9Zu}{WxK-n6d(2)3Z{rMFhqa+ku zqS)hu@EXneV<6*fHd0J^93p|ac)SZZd)FHTC>7L#xc?*T%9Jrlg1c)&- z2RbexWY0iD|AJY<1&lDvP?V@Pe1?(00gy=TGAS4E82PwYv6>YV6Nq2(e!645md-%7{b1){R=mpg2VKdT^dn-+*vM7G;4M zQvE8KShlMKYvN-iCd;-p8UT&5E>r=yCSnS5Z>pUl+e$l^t!2HFhzlw6^NuZ$o7EVF z9BZ69#eXJ*N!rZ06p#TWVk+zSNTN2-DBlke9Ealyd`yLQL1W_czOB&#$u&+)^C?72 zl?^T0nNQN)lK_Yp8T2_){x+*PwlXp zi16CU@Hz=`I>82Fd|PsSdxCuTBsraTi7YQ7%wUMj85UBc(Ldxah=?T7hg0ldo|74- zi5SAO4#1xYWywquH-La%M+akp*;^8ZgkzsZ0UyG1DK2*a#)yQH?xywkzSSS3co&8^ zJex;8AHtq|MVX|S&!4>bPH*IcARu~;%;vG6(bQ&4S)SZPG%&olncC=P>}J=$W+)!& zs;?E{1Iki5%mos}&-yuf#Z~7R`BO;+t$8M4=V-@aIfa^I_R@~$gwZQ0JELe0D5J|Lil{)E zfLu?BKZO~ZORmVYsA%O{A4T$Q!j);jHmc--B5+)INNGXJc}(_2qj0`v_}I3+kfOjF znIX7GkPP?xQepu-Ns47)-?(4RENe z`*9r3d+^&7M^=1ORd&^KNX><7O+(Jpr;K6WsJnhha3nOa9L{AX0!Wc9tC~psv=TB` z-T)kt$QUMQWCkEf=dLNHT9l2%Lh`~$ioA-AF*;LNs%egHVY@g)2rw5U(t?ESe5aSG zwI~ef_3MusH4MQv5{dj$?v5(prve}6J^24i=}-bp&tH;(^t;yPa^Dmjx2X|Hv%!#P z!Z7E@XK#`4r*B-uOFx~^p!l_8F#N#xl^$A;FK4-e^$4SX<^+jzBjieHS%S9El(qo& zQkf>A8X$7=MrkG7EGg0r5uQp&0VG^$?^6tAP2IRgIvqAbe(mf|zdtz4m`~feN&Zd4 zjDwseAy#;*iz!Fgm`X&@n_Ct(8idh=KGMg~?}WqU-$v^43RPC?^N^@qKI=T8wszA7KaQxqjL z%%r-a&0i$H^PDl*?N_ozx3>LiV0&VUJ?-!H>^wXgd1J!3@K_Q_?iQyL?{+Pm@JGVc zYH_8E!Zw*ilwql0;44Ct3@kgHt+ce(T!+eqVP|rQ=MpMP?huUND;#Lbd}>T$vWm9v z<(siB{EX+Calh4M(CGMx!0fxcDizSBty_}EZ;$$Ty;qc_6PmH*{-m$O7{lE_7HiNRq`%uMY%wn|7MoYmXaO8iGOi#547 z<^WZ82;DwQq8^`imoi3}ox2`Wgzm@@w{sI2hiJ0w#bT+xhFIWmo*8jQvX&%$ z(L~!=E4b|u7(Fcy!qwqr&r%+ ze;>Sg({T~S>>vkUB)8VuiXse-GSkj_lWI})N(y)Qx^Gk{PYTo?9sLXYg7JgVeI&A@ogHbJ`zIMgd`pip;Iq%XBTM?iB=0`y>$r_{!B7t%q9eDjH!p`Bhsv z43kg8J9>upk51G`y4?)X1>$U?T}|sqG{uZau0^@r1Ad5@ z4nc7zE61+vh}KlDhTlr&aOc}f;@HYrd6Tg*m|-!+35VAo)qxBKeRzunBfj=y>pv?F)~tHm1e^Xe8%L8N?Dd}Xo;im)_Md37 z^Kc%b-FXPT2h8Guh3^KOlv@Qs$>EqvnocztMb?W0tWa;X6n?-me?ItA>|c79c&?7G z{*33lv-vwQ#q4K1Hvt^BO4fY|77GLkS8&j{m+lV<4em1(OFA!EZnvUG#t>l~n4r0>?8^{ly2X@P$;QGF@-Nf4*NkvjCX+;=)P+oJK-#L(%db!FcW4i)wQyk9v{uD96`+{NI z*8wh)R*phk1*0{2+?ke7Sds+WKCnF4ZJ_x%rxbE2MB^s4n^d5qw z3|{u4X^Sl*S{F=rHKw^=cq#_I$=PTm1{uNzC=F+|N1u*Pc5MqP*7=%n3?Y^(z$iq~ zWrcyqntc{ni;P+IC?dj1oZ_DTm!Gv9e1h4GaITv=oT6x=!#e48gjs4wHPLoPbXIb6 zOoKVD$KJK2wP*m+uG`6raQRtTz+)$?uXjyj_$dI;uuGPN274=slEa9D|M`5 zuB~A3$_BN)`rg%CcU3p8{)|F~Q82e?D_51v-PzSq$!9vSTJEw#yJqseHJrwnjVYVi z0BlXxBxhHeXCTqQ;2JThpYffJZtuu$ozkXCjjzhI z**>L3wyO$zs&Yp?DPs*p!>q9|uRFYUA@Wlaa2_9c*P z7^MkBRT)v@Vpk2PUCP61c_`$*Gx{v|n4B@@v<&YbzdL_@))ODa8vAI@|FpNi|E!q* z>8bqukpF1|&o|$g>`oG|V*U>t?1l_CSGkt7Qoph48K_a;h4ASI8a=WE@l)Cx6 zM_uT_=g&Ra|EW%=)H_aX8lOM+_?6#t2A4aYEXOmZOnD>qLEuzm5XaA-g69R;rvcu{&H0?#WAc&>sM%#~O)bJ+4^EQ^ zkaMs&WkV7T5{`9lVVy`o!^2LSCDEbX8iZ{BQXE(6LEEo5HVUc8vc@6KNj;Yg@ZY=JH@AQ4EYsZI`<=)E$8$2qa=%5S+s*Nb8PIERyGuTOtHI;=p;)_t~MLS9zj z(>9D49oYGm-*3CW>U5rAoi6m>W$^Cpzff`Ydj;U;>a7aSY6hlq z#HTYl{tEp6OrAuKrhfiuV5xQyj*a@t%m+b`?ak{r}0}>0nT_|382JaR1Ln zo=>0p-@p}_4OQ&tn1ooFsAm!^{uB@4o4(qx{ck$2KgKp2xSPvOxd?h4Q_eF%zIHQf zQsrgeRNzuiUpS?LYEzNa$)pZ9AXpOl@cfDTlg!SN@tAysu9r0?DTt>@NPcwMX%UAB z&t-pyZRNur&7DF@WZkJ7yegA*7&8)yF?9cnd;cY=k~<0gx4Ap1W{8J6l^sgKpp(u2 znV?XGZvl#9HPT+^H>?4m$_ohwITj8h?4yKZ2;3Ac_5{nP6J3M+Y@O>y8@xIwaCGn(bj-3V^v<+{#{I-pO&dD&#%%I8!+2R&qQ#8*A?E(gQ3aRIWAM?=l;q?r)8Kr&uIRn?O+z3>Kd=b#vk zNJs?1yeHAvw9@;Lh~genK!R?T{IGwuH^Kt#H32mpD2XP$%WqYvz|~$e$QFS~MS&?G zm0|-b09A@L=-DO~fokvRZDFKWtr;#E99M&ei!`@}M@!^c6(a30rV+#1KuDjC&fVT1 z_i!C~Gg5U*{=3`djkk4 z=FwweI|Cwifm67~DrQ<8W}d|%HVHgcEVrlDF{BgX_c{k5r^;vTBiW2{3IHe3BvdM# zWt}1V_Z+Ysw2oUNTm>PZ$Q(h^PKKe1a9smk5r3Wbr4c=`xQ@+AdyQ)-m;r1%jc-UtT z`|L~FXW7Pr?KCN=Ib5K=)hb4&RO?-rE5FpHE7n}N=kgUEw%o&(`_i`@$sK4hr%}8( zJ7lt8GnNjewBD9AHJ93;^=&NNs&!Q#HtoZvy}wOstaL>)r;V{ixjQTB_GJE2jpI5a zSiV@#+;x#nGPPW77kng+7ig!hH8uKb;HB+$%^%%Xjnu;IN0W*wjY`vjVzrh*H;$|V zEcwLy73Q+;qM|iswr(>Z7b8b$qOGnISO5p-W`P_yBqgeuXZ35Rp(TJ;RjU$y`M|y< zj3fDrE9!tzZrGd;*w!i33j``(X~(67jV1GQtnMOf)vIW>x+S+HLs4)L##3~h%wTWO z9#ZC1og%)qNTbRuWb)1)w@2ihU7nq`TTTbg)vM+3Y1ihsU3JF~eExyYe@Vi*xl~>u zC!`=T6j#M&G@5dqL!sS!MP_}A&l<2y*kY3L8>64Bm z`BV|dNyb1~!7lEco2@Y~RF!ZkT-EnFttVc7)|C|(&Ntsi{e+fpt8VJcx6~3yi`TYD zSRU5->tAP=%!9gK_c@C1MSfkXjxJ@)(W!QUU<@G07V_I%jgQR1#ScbaBDQ&*`6SYTh^0JfbN zzLH5M9~zhq;PdC5U5HIM2vuq@k~~CG_w*1_#b#J83(?!n-i2{3+?!OqTf%=`kl#G!E<4LcXf9H5n+c{m7FTO)Qb)TV$mIjM zEKE5SK@@_lPA!|;`7(*+t>(T?TDimk)&%y=BkhuNT$WJY_xX$aev0RR(rksjIhw8M z1W@z&pTU!7FP<09|2*3tym&bOvxz4Q!XCoaUgwfT!O+~z@)pIN85Sr&0u4Km8DN@^ zBzDrItvab|99LG#RRDe3q_Utngp-z3w^*Txu?5*96ZXO>Q^*`?8J3P$$^v#mrhOWX zRcg)xaZPuEZBwPhWuBBiLV|;Qn!=$v_a288)q8LQBnqTd^0Z*O0;8^GgOD8sgjJkw zbxEMQaH1^CIE^)q+#DKhoM*RX z7-)oOnWtEQ%EIcamt`cHV_u5ePz@}Ev%dMH8O+6|+Cy2_Yz5@nqPKx@aTt|JSJJno zb?ElREY`W*MkETr+~%c|z11b{iuL(py*@TD+@wsetf0~A42yP|eROee&|6$|ict_^ z&TpUas{U|wc^-czlqIv$)B}1Qd1x%hJR@O9xW0|46(~vtWMc=it+#Gd#V*H)g!BqC z&s+d?yS{8I=cbWjFEL^$3~}i3dF0=6KDLcB)kFivF5bXtvbp9xTyAi|Ha36WH}@~g ze-^tviNtFX0T;-BdwcsO`S1DO{)7CtiKkNjGs4tuq`#Cr-nAImDB-n|6M2k%Ik@uu z2fqD#2*)(S<~B)lGol8B8dS=&hIKSul@N$cuuwOGntH8RXs#>ImZ|Qul2MlW%^Jyx zO!+G)TCP+h{O=p9AD*^P1^o}j)LW|=U;+Jqu|Fv0|9<}T=|leSjXdku|2>UDe;qSG zOrpt$^s>|sA@xz%`b)HC;U zn8utDshU$zz?xDn(gA=<=w=#xcY5x8i0$Q8I=L4a!+YpHvUBUg|9J`BlXu5IcHxhI zfSBUQ)WrUjx&`g)k3`r@W{F)#dmrz^GJXlm2><=x6(8TcJ9u?;{G&~jz}00?=>?id zAdUr)$lM-sD{N>bU@3&jQl>tj%Ic_c_WJnz==k-UO>cd~I8K+6_t5oaYIEw7xDTIx zc=yZks}DyfFZc9p)Ww16dzCQLeql*Og7nkrFULo(?6?2ZC-IG}d1DKi0g*9mr(L|< z+etqF9G$#;wD%84zYhLkK3TBZxYTWDEcqsMKF_^>q%WM3S#!mP^vLrqS)beQQEJ+_ zjOcZgY6gofF8{^#|GT?zbh4|)whNC2c80pw!Xyhe+5*S5L0RkBfNu}Z&R?G{-iL*9 zU&3}Q3~7m7nQ!2ycW397+tYUN?)3b_@h@+`e|>si?D=8M|7vS~Z@j!(LjQoGpnyfP zJog!i#fN~fm)%F(0b%L@YxmKhyVLF1wm;~Ev@f;Oq5CMS)UQznx^lU1ZgRCB{r?tL zPsi6#UrqLr^PyUlz03KQgyyBDTi(-4&Gx=E;Q#EWX#X+CM4Ve^SGNEyvj0DSQMCWO z7`%A?VE@^~^Qq!m5qngqe7BnYlD`EzEBN_y$2BNrcUt%KrP}IbK+(-$TDfJcNip=G zdW4bVpEb>!d$-*{i&k}W%=im;aX@w+eS+c9C7$oXBOQ-hkus?;Z|fGw8d5F5=g+D? z)6Nb2nNX4I%|OTcS8x=lTa}IfMEfpGpJ|=NOK5L3w>@RIoG)I&V%-M!#|#O7`o>w- z)(g9E!BRvzEBB}Z2=s5SHYfZYL@w6R|24XQ7t{aeFN*%(XM+d-??#@Qz314;*6tzB zw*Fs)de+mXo}Pi5Sx3ZHXfkQ?5pU?lj9uBa8{y%;8{7KROyZ7vY{%_W-;K0gFw6mrS+#$4S9rDwtt$q*mEO}IEsq67X| zzm){oAu%%&6@)Ym&_fyt{898wlP;m0p^xpWOeq&fCx_V>{$2Xmv~I z^6i(P<&+DPu1WhwgG_0xaAOrqrH8D)g~}ua?iC6}eXsa_QQxIvwVfMaJ1#i~tk}Qg z$;=$IQvkD9Qf%vsC?ECC4a9rjBaTmym_oPuU|x4e9W>W@MtPPeAlFk`rG00&=tWBk z?Yl5HV>Gv4izEkm|7R1X9OPV`?us>74VFip#UrXN+hRzXLaAZpFE1&lop;g>JZ~@p z=cU4uk+4u8!(OYeEUADj=wGLk&e@5~t6>Ek>uOg4vW=0bd{xg7x)o%v>PjC;1M3XS z1_2qNYP2@HS=MpslkT@g1KlRny(VH>Yy^$RBqGAO9m2jzb6IR#Fz>*lrS?gotB3}v z#WgGIzO7o+s>&-2LeLO1H#`EA4=8=vFa)>i3M1hD6bDI&S$@rPuPfKPSh04kv$V;> zoXxwP2}M7OfK;Qqxn9P_a8q6q9BsbI(Sa;+yZ%^Z$z7HZHIO3LFtD_qTwzT%>5sAw zJm`-|)E^-}b>4Zt^W(-Djgw$;d+}H#W{Djb?Bwm-3C9ctfskw_C zy*oYs+}$bT^MZ3sy2Zb%CXG2SR!M@pMa;;zp4HN@MTwX$tW=#$G;1nmR43E-W;DQE z=ti_$zqk#u>PEQgLU$o}Yv!`B1qYFufNhH5kAk5eLNwv!?Od5eudad_p~~!*QRRi8 zseM_Ma!@CaD-^9S29G|;QN91af0TMGt+35q!>%xsIASYyo5zZyE0>yU-l*~G^!3S` zqr-#qqj$$2-oAVF+G$96AsyAca@IAwq1`dY@^Et0vpkk6Y|`qJwb!Lr=XIJOS>0)| zzaN~a4E^Z7e#Y}tJkEb&a>AgpRkawJtKN~`*XF(cI+`&g;AEkP)seE$;7V9oXlqrpEHt+|UKSd>AH*!5zg{n9GR$eheB5LQ zNXukafW3j+h<2X7l>&MNIZu;d*+)t8CrC_(cCIUk4SB9#(@VY3(1z9NE&?P_b@R*W zpgbA1sc4E-jYu1%TTj+eooXg0X0caLmYSQCB7ENJ;~I&JF_&#(ZuM|-xgDX-E|b6Q zYZ=o_;hwPnSR2h>-sF*{=~1YdOFP`&sE{?{S{L$-E!B0d%eHod%3MS$?}>!c;xt5s zCG5gK|GBGohN@lYDrs986}&HJ~d~t z>WTi{nSN@zSV7OOY4LW7c1>fq*0gKdTS3*XY4Lt^Z72WfN>E&E07{#@LlaP4AL~HQ zwU~kGT3a1G*JlcfqQ8>@2c7@00>BDXX1K+Lj6tK2qeaM0cfVcvy<1G>zA_ zT%y_j%m$vQKG|`f*!~3ox=PJOky9-_KRQ|W6t$x@l47|`_y^PTP zqvMd77A}QByB$&n(TXD7mm(1@ibZb3dpjA4|DG<{Lgvr~TZ<(q=tE22=@aHWJ@DK(so;|BZ^jeO-fS;Q~OYT-oMU`bG86xm3mq5k4 zwP2%GPhnE!xJxTjqZh5R6nQwi+`ECxmj=|$C}qaY>>=_Ye9GZk&c<8JOHn5>wCuC; zoVp7ZU%p6}eTi<{WuaM$_|y^^m32YlMW=`-{j69mcGLtt)GWu^gBx1+oDRO~Al~XC zrfT{s!vcFaF-?O|JVXoiT)%j}cFAm6EpuhH%1C0j{4A!C%6gWIV77;x%^;O&P!iRG zl2jR0?!wZkneSL9t*(nFskghi!1y!4ToiIpaY0!+^#;+1{JS!Blye%@8cobGE8VU- zRhP>ppAI#N10?X7U`XJ}T#u@p2G_QhB}&NGvxBKQCFay)Gz>{J`9)PfK}-uhh!o5^fc%HSWgo8 zMfty;K7U%e|8@VxL;jCVJQV_76-!@#68OJJc8~_z-8uz)W%≥G6qgI{kZn^};wl zwaNXSZ_Q2po?Ry6CV;OfpU?lkQwyw$tnE!n-xt@elQ8dKz-LOxIaP_h%0qX*4mZ`CG#z|Ohg0h9$>P+oBAx)`E<88*h5vmny*kwspkK3 z=IcuXy6FCw^8N46_Fp{Y|J=w^*r;rTe4FyqEAV~w4JBeq8Tq&39=*$Nd5_R6XV@tX zZzT?V`EmeDLT!g`oi0@pRumJH`4QefUYs*kmDR(3{6{CRDZ~56F%7J_S7r4ln44l( zn2l7CgyTX@eX$h0U$_Q;bQ>o4u^^9hvCtjnBwi(*bFV=SURk>o@kVbFNYj1n$ z6^#?>!fN>ZxyP^kUfJgJip~&;Twts-Gj%029CC8|mj813wW>k!kjJa-3ARS~{k5iY zKPkyJk|kb~?>o#CR?+?{nV2e?yJZ%pik5DlfvKXgRkANtG*_&UYi(_bm+JC$RiFec znN5CKfY*r(3wr&4qTquK9A3u&D)zm8)x4#uGo0n!6Sq!rR@KAGyJ|hj&8nJOabK-5 ztyxuj8_H?6+SY2$hks{#tI+M{HdnSuni4>m<$Ora)|85*pZoMhrPA-@!1|C$|F2{z z{_9v?A2R7bWYT}gq(3GPnbiNYEHL+Dws^>-{*X!iA(Q$;CiRC*>Wxi)bqM;9Nqrs4 z`a>r5Oz!NIpjpf*@sLRUA(49eJS0+oNTjZxzholy`cTNS4fG)|dai}9B`{UIs(LsImr1${`0ez!@{+i#pZO6a{*X6@y&fLEmjtrbmb5G%(B zD%#IX(f(PFB-&Mrs2NL zOscbRf2FA|AD)V*G5^bY(tj_^|NU&RSIqym|K#~Y{+CTWEuycso05YoinjLb;D3?K zF!lVTZEo<&;>)E5&o|MS8N4=%vN|#NvU$PtZ7oa-UQxU*$#wxu%OwS0T=`RroZxF2 z?TQ(}+c$jI3BmItSvnthzIv;4;LTM&Z7LhMrc7UtgjCJ{CDx_hxRvFA#r*$CG5_O> z{lNqO-^A16V8Xh&`(Kl%F^QSoS zT}%0=d&n=X_kVAS!x@=Gl;Jz*W6}PXdp^Ja_n*Fa*#8@OdJ`f#Tk!6;;Uux?%z|%0}Iv$vNaoUIywsjv{ zTW}PK^h>96J|$d>H!z}9aKY3}!Cfpq?Dr=`Op{U1r?WJq-u&wY)W5{cQ=`~{0LLMn z&!pB*BiT?!>C>gZOkec~-2vkdf`m(`i7AHnKjCnebuK5}018vz>-J>{mrgCZCsLAXoZsZJFF0yW3&tVL5EO4mXCy^a3?|}om zDdI4~I0AvW&?tFL*`*%C@sCHx|1TUx6NWk81s^lvnHlWDI81OP0@d#cqcbd~IN=g@ zg2u9Y1&{B4J~_?LKrXM^JsJy77*=PSIgu>2<2ei#_Ii8!fA4sDBKy6)y}tux`qz`* zz#CzKp2+Vfze3FD6_LWW+&D?B?CvB|V@RR^vv4k#h(u$C(v4=YN};4(Kn1Q&=bWav zD&J~YDVWHgMpjm$J6z1>o_TRG%r-*-<2_Fm&dOHoqjTAliQm%;%QXp7K%HV2G#2&D z9BtraZr90$#_;|GF*L(+<^R~;iqjuEz_H${ih!tQvacYfNLZ}2t>OTJglRykY3Xh{ zoqxhN-<*^9n{S5jhHwFL#%7+}?2Zcs4%wC7&A&vX4)Tfp)DLio1y-M3Fgs{phPbJd z6)yvq;TgTc9PIMCaJ7L+91!Q!CrH>GnIVE+piotC@WCK-3PauLn1&&}R9Zci6cd)Nv?i9lPUj};5!yUA@CtLEkyx$$m-5I_>4_~}vvUL9G7l_S zelySQN2MTeF$w7inMmtXJRw}L`9(dne^kZcm12Q978{tUw(45m_PK$Jh=jF+ZmL;? zfOAhmb$dk!q;uD1K1JHOlFlMPjG@f{mMZ2_H1RX0Dyi3Ip4;PquI_*#Z(6Er^2X5FI5q6i zE6f-PFo%n@^3LsNbh~-1Wh?w=B7%YdF(*A15JKCR#(|?Rf(&M(y3iU*kI?1dkb^)> zIT3UL4Dt|AqUsm$+`vWd7<79mI1mtM(xzS>0s?Ic&zqkcxcGE_@Z*Q$gSW3gFNFjS z1hQjO3vl2l<2128W_AD14O|?JkEu9enBzz+0fPbr8pQ$I*IA5xGM+NF(iS#y182&-Bqa{n5JAZScOI=N4Ja03^DC5th*qNWRh z>}tq{mTurUnT;@$nz*XKR~q`7o*TH>yPaUHAdvS_FO-Os=a-JbRz7u)N|f$V0$6Pg&Js27yaFx5jq~hjh|{!blPY!3e&>0gM@)DQZ^) z*&3e}A>afsEN(;7fpofG$g?6HC>;G=LusN;bsR0d=k8JI*gzO!|ek>pyu>4C6PPzH4z_11=-M|^f z)&@LBv#@XIz9t)*Nzr~C?xzoltqW9s~qf0>V^>(*OXmqwP=xC zbOWzd0XTMRt!*l|d{(3bc?h5|q}Q)M3e2J^Sl+-Z8a)I z{4z}dpVA}@RQQBy?9Mf)73F~f1myZQ`Haq|#!q0P6$-N#)J%KDNi8=I_%T%nvY4#0 z!LFjFga8{Mf5FPTeU)+G^!tOuEN#r@F<@mJP!M1=#66aTc!9s=22N$6`&sJR$GFgM zQ&U2qkUMt~1kQwt&H+vW?D>d6oJGRfbg;Lb$!(t%aUh?%cYz1h+6U6Pei4x*D|$|g z5NPzE)pgIl76=s9M^L{?D_|`Z? zlci+l8~F737Sk_6Af`ddAX3qnV<|N{zl6bC-AG0sV!AhfJ)oa5Mu9m&Fm3*mFQ*L*u!*dn~f~Kfr~@!H9I<4K{KkznN#I6Iw-I} zDGiyb%4{$*lrIHIW+K!tw4u;cSr4Sv#0~Sgi`vKojzlEkUvI4L=CK}17&AhZ)jUL;AJ^DRZ>j~qQr$I7Yngeh zh6A}==Ozk7B`L3lzzL&QB*0SlOJk!>Y!l+wEXVn((^_U8E8;-;GO>D-p^l_NaPsS+ z3ar(Y^s(=4L-ZmVtck+4p2uo_!U>7(DiTHWm3v@{O>TQYV%J8IXy39jIBdj? zeghZk7Wmsr#kCj8RpP)|G71P&!qVJ&O$-cDxS$Q|M>lZM?XF-guYf@9nWGyx%N;PH zVLwf_r&x;Ntx$M!|24 z2>dScaclQUEe?=>*N~MPI3xd7K0`(P4Eo>qa|0LO{zz8k1@)I3T;%%Fw+;Ze*DpwI zePfWIv9k8qwr$(CZQJI~j&0kvZQJIKZSGj#>^b+xt-9Zzyj9(u4&F*CX*>zPom%46 zwr_KI_W+32I}3c6T&eOGctnLp4?$nUMo$2uxxUd%d=}Kt1hYo>m~^9 zGZMCJEdIOQS4RZ0fKob{#jcVr)glQ(rBZ@H(rEbLw+Z?$1=db9eeQ7G!sC2p3R65_ z+E|LnP)_OWFDaY(Xz+CIL`{)i*+=x6d=Q+dy5p)yZhCBU3*uipX#amnS+(&de}wX6 z!#wQJ(_HY|f88%>#GOHW%mhxY3PS=96mz+hrPlTf6=eClWXdwA;>=*(^ky~@4lyBXTe!ODCAAyeUDt?c7ybg30u==+Ucc_8^}!jJLDg)>j2pB_ zl0yU}Y8-+%`3+rEZrD5hAA&@wFzrzQaAW&d5~;q*2+$1HFMw9mT|3ZT6DxQB~2MxpO@L*P5GEWE-j6}`#Wp`d(kpH$_FteUL zOJmA5J@9cfxI#cM39%%SB53p9DdBK3qk(qH3vbJ9|D&kOqfLk|OA7LSPI zK~-9GDH4#A1mqwsp@9@Vh+n#e-8~e=%Ziv1v_I=Ez#s(ybh=0&vrg?s9)WsRJ_G=PSf%x&PeQK84ON@66Q1@K-K=V@^T@h)0Vmtp@Htfjo9aST|lc#_rG$Iylg) z`yqe}LM8tnS^1x1)Ue>M=ws=kix$>O!Z_2Vx!DASwXQQ8QN6g{x!!flmNn}VtxJc zkbUuj)^i}+XCyjni3)PAGg|!#iaqMD`T?qnQ1&Bzi*Dm~?WRJ#9#Ki4aKg16)oWl$ zP;*LO7Vxj?#b0!be@fMF1B4zu3Bj%RK(BO0MfDVTKcgFiI{W-p55m3zy7GQRCyzj_ zL;Jc%5VzpzEl#+4wDlaUX|$kmEmFgZIgzDH5beK$hR?h5bcs@GnUSS2fgZy(dS+y3 zP!Y;{KVB=RhLl1Cy#LDsgQoBX>A{3ui zYu}&^6zKd5tSM5Q^z}|P?>8+@@Z8U<3)G$R;_oKdObM4&ocRe2QK5?o#DPRIU|4zK zmG$S-rJAPd67S|)?^TT+*q=?EAKlJNEzMWIPUBTgnAv_>YzE7oXO_cnM!~iu3y-0P zUh>w6ZPi!<3CaTkqw;nS+6uD);e|P$#*!}whW+-tOv%cK$~+zfAOJ#NTcj=Y1dQ%!70~~GAv2_A7n9;Z z)JYibZo12IjJ2#Vcd9-3MV$91;m zgl}7WcY{R;Llia)8*KDATs<-7mNO%sBB5fSnoJ(qG`w|f} z!!p5;96+1#A7Pt6nn<+C8I2%^ia@BNukf1#1EM6exztRhi5v7kuyWFnhP**V{5e6S zn4@Gk=0QPJ6V@q;Xt0B1#H_GnRZ=ST!{SwIqO;o|D{OOR@GN4oA&4~7L2G9E-D`^^ z+5@0nG{84QQrP1PYA~g3lag)1f>anoXp11&x%@07cR>GhjDcwp1Ul#;S55H^kR;#i z`9m}yTc8Q*f^q3(_F2@GVdx(+dg1s*Nv~#&gE)u6I84ehXl4TVH4sqpe|BH%{Ggu- zy>@zYWO-i1^M}&L^KWkb`*Z1;1$ci&gJ!C**c0EPFBa`Ny-~YJ#j)i(R5>e>?;Vl} z6ykj*&nl6CGG+deSm0B;*`?v_2{|eV6w?1UCsO_doZp9o@7Me9_ALca|B1iaa{A)f zvklEJKjwHkXgBmi^ojP!}Hnvm2a@P5HwSLPQY?^EXzT8 zPX#@wrB0LP!`f9k5G;)YNFY$@XV=l>H50JT2o^@ckpvF*JdVRvNpv6?ntMbLo7nO? zjMT-@{$^@z5J1l(MCwygXF>wF2p{22j%te=uwAmVYI2n54)Z4sr)eSn$KdGRuiFQ$ z7&FzmtoH%I6N2?nxNoZYD0r#xfQ2uMH0nJH}g{VL>zJ*gXW|n7wB>zw1S? z>fWV$Gps>6X#fX8QRZr>@fGgu2TCxO0`>H~f%;u5G__K5^#r~3ki)zOTm8RW*}%>^ zVLE#bJNiMCYwCNfUYAN=F@Vt#Arb{UMP|%b?L{uzok@u+p{`C>-k2c zwXs%p)t~jU|Cj=RNWO#YdpODO*mq#|d@Tz?l5V}^ZlC3#eQmh;-CQ1xsa#CjOd3eG5zThpT@$2u z#yiEN1NX3sL#n14*zV9j{uFn4nC=gddSk87&#<_Ms5%t zJ9_>yh&AFwjOwDq(auegVos7g$^`mZ%!1ZZG@NZUwW@!;UyV4 z%kQ!EZ6%jVjjpZpW1j}@F9=I$Hy_>CjRue{yjhU#C=eu!cV0szf6bJNBn~4+k|3ux z@ys6M@D)%Knf^`(Oe?V- z6>KI4sa!#HKQBCx9w$Y60{Ci+CyBOW-vR~`c8~PO6ev@;yDgVjC3a^dW?E{niTS6B z*_U=oMI+7BmPeaRSV_D%;lOY!#ogYk$EFPkbL5jO=nz`lui8K^7n`BUS!9{MY`BI2 zw>~nni`v&a@)G`O3y6*Bp4>?JnC&ZDJ;M<_kAOEf*<~IoZ9r7?n`0#cN|1<~qqT$o zVwf^^!QBIx6G1AN*GYETKY+6&Az>Va*)D*PWw^kEaY7U~4rN8(L5lwCO)?CeV=YkM zvgV3nnL+GdNr<`F*Eb($(T7-;R2_3)Sdzq^hTU_R0x7IOXPT7LD%rT6b0nuLAwuw_ zq8@J|C}q&DY(1pudl(54Y}KN)v}c9J#$PqUK*Sja8XkQ?-1Qs3h~rJm`-qF|{xuPO z2USS8+sL8}q=u?1H%0ACiwTi3Sg!OTFrt&LwW0FD!5K?5R<)A&OrWVtXVVu+7|%JZ zt}13tXJ*T?62P((%xUi2rbtYYcpi(x+$wIFLX4wmwXT}qf;|c!X*`!RJszPxA8Ci% zyf$_=Hb%eK9}+FcZu%!JvTI&P`tBP!{h(aR#KT&?>1nrF4LHOAKH^K6)nZOEb^rpO z$Z$iSUVDUx)aO-G!-eTkG7tou!A#{0jim3IvJS|DQh-F_r2V6M zwXijptg*KOQ5N;oo|qovRS{E!R>5=nZNeG!dM92!cZO?ebD_YA!;V_!TW9xYH*P4= zdSEzP(t6D7{trZ81TEBLGc}IMQr}61?_=tACenwN*C8KqVO8$4*mKH#BkF()zK3Ls zlIY~|*%v3&Sy`>4zLngBRKdn+X3yq=Udi#}aic>ixcC8zTEgRnLgZ$XB~gU7d+S&y zyj%flSQE}T%+Jm3cE({O8T9(D@|)Qbtj&o#s$!m6%+8dwC*GV zktszsp|2DXD>x;}q-Y<$@g+3Q<%AWZQNW87IzjyAM*3CwBIeuaA_Y@xBBfRJ>~T30 zj3|#-1fOhT_+~fqlxIR32nxrRtSfqXB6B zJ_$RB))ijqZ5?nN`T(F#qtn}h!d*U-vBw;KeA#U2aq3AXzg<^M%K|3lxp35nbaE%X z*-~u#YXvyKcC!lCZ$8tuRH3V*Z3703w1Bl-rLbF6vs-xy*=6r;gU|J<8-a(wpb`+X z9T^GbhyYU=ph3Zw?(OZb9cP5z-4Qyf{wV&496Xv{2T+{O{<`#tmSW9**@`SSPsrJ{ zLZWOX<4cr|I04qHF@9cAw@}nNMFqu4uArI#Wp#O_^-dUff{g0I0N4(4F`m5R3)7y5K_%61{<#95fP z8dse-C|XXmDU>)Dr(!B@V_q)n-C%ySew?(E^+_cO5tb6jq`CU=6}Cs`oQ`>54-Qe| zOuL3jMtNOk>0bG776_Yr?pw&DbB&||eFhe#X?ZedQQAsXaue*4QI=Y+QO0GYb|lzD z{n)H{;vRNAU(cTD!pW2XzPq`T_&Ju#zCGPvJXh0aZnkD~JF{o(6P6DJ4R)=TSPEUR zLXxuCo`s3n3%S~@qA_u0lgf$;p3ddhqIUwNGKIJA}sqMo>|Ik*ke!CdjhbVZ#p=tlC$qd><`n+N0ax>y3`Mc z$4mT45?gX(=@d8NMS2u5!beYquPT^I!Hk`>kNm}d=XZ0Fl%Y0;=w!~Ck;cN5Sl95;e|TEuUZce!T7TYhi?AVZXg; zk1?4ab=rBT)YQ4>-3SZ7gqskRDMiQ9aYrKwWLt*)}U6 zxJL1BmCmTeEJ*oq|KA!IHK@Za=z5idbYkwf^Rc`fyLLc|PQmWUu*)cpZUv+tfYD8U zd;ONjt2kPFsQ&m|u!kYOKNkI-Zf;ZEk<0PZ-a2{?ih#=bAhG31Kj4@X=8QTYuOm+> zRZQcS!fJZNAd2u0N&*b#snYWI1Vig;CGJcyj<~`zN)aMrP+alUAX`Y^QP?i(Wp|P$%OpMVK8W5;%nXP`0du9NE)GK_QF-q!nAy{0MFbyS zkRO07kqiRXy$C&1@7<$W5jlg_rjFwVPGiyd$f}lVL87Y}l{y-K8h)*p+{IOUJ^ZZh zuM?s)E8==>E&uDFJ!&E3oXB>EEb0UyX$_2}aWqoaa!h^X)foQ)F#$n>^!SjqDsb@l zZq~jH*{d(+OWp!DAh2?bAWAb44di6SeNthCZZr~Rq<-i2Wmz@lZ7(@{^f8QE8ng#p zW(`_%$`Z3p?`T*It?2zU#Y1^Qk+0&R7?s@T6DhEyD0+ee#yeE{ya{1?gz(?Dfmz8# z-w9P3yo+n(Ou4_TxLQ)80ltb=2<$Mr2Xa+I~QUp*BIeX>%AY=)i01nQZ?Ao@Lo^%%J z{=G4VhXiO!mXM)mI8tRVJWJU5q;3h;V)}fqBAEPkyd7OpD=b@{3#uwVQ|$6kg?J@N z!Vrnsq6v4kPR0dBm&Ub!AVN&|p!8LJ41y!AMqr)UM+WY2^zLO6%UP`W#~!wVw>mC} zHK;lKR_Lk?+W1hBM?KLG&^_BJcmso5uE@(_FJhJ=#^c+{J_vN9 z_q0FWk8(dFFA)M%Jq;2k++kbLKVh}#SB>Nb&M&5gfTS@Hk_sbamrLW|HL6xq{psL8 zrz7a}Pr1)3Re1~Dqlbki5!IDgpJ?sW_F(FQbmb>%#xT;UDaLig3gX7 zm_zhRAAl9zuWo5tLd%mL*Ii zTZo8uD2WmtdbF$gwwBbSlu1Ym4X>{i#e{ym>BPtPD+H3nC8SPHiY|~B$+GW%E&V+6 zm{P5!fs+f^1o!Sof_-FPoavVO5JgbPnlUzRRE86Xh}kL^WJlXJ-(7Y(RPXzk^v0y5f~{_hQB`_mOTRG; zQZNxMu!0Sjv%=^dt*qyRsV&h&T85-yc=~QqJYJi{qpx2>gxSr4tw?U~$$?5P;6FiK z%4-`Ho6WH5Qi!J>71X$TDbH*>2?4|XS((u!%)B^c=Zb{~bTv}{fP!MGu8{c?Q zb`Q56oN6IHKp3wfrx-cCx;!Szb5Z_*z@!i{N++Ou@t45TTEo2EhAPu3<@+}FoCGPQ zefnZs^L)EJOYH48&;_>mjYNm={>R{`CnE7Zl1KX%jGA8fcbx!c(vrWMYkP)zQA=pnjXiwjbtC}lp=>a#4`!zaGQ|uG{<=!jZ9~r2!8DeFOemUdg)R6NQ(8GL--#07heaJT zZ)E}xU%p&xo=;tM-?{vrUwz-~_cpfuv4pt!2zuq0k|QKZuXd_1tqp5eLMn(5dVA?( zR5x6Ae1I{ID7$0iX*V4?1_!@Y{Ih>kKs_w^dmE$v!wR2>VYkG&ROsbwmMmLB^nE=h znY>t8-#E8PcMHj;_4CRmoH~BGF=iRmge-;Cm0zf@x}E)1(_5kC^!Lf2t0nTZenVoQ zn%3YKkBsfXqRA5TYo!<0MWfHm%nG{2z`N7oP)?=6*{{VW_48_3UDhe>b1nc@VkCGAm3H}`8+stY=%d}(Qz6_jy~p`>`Bsfkon^$!+Lm>^_E{H68Vyx zmAEgy`Yx)zN=vEYZZw@u8Zt8BThnIluBxufwYd6)bmE`vESH@ul^s+y(WPOm05gEK zW^J7nK2*~a46_m1Hlm^9T*PZkT@cEAGy6mz$`ms@M%*lZq%B)D1rvK0)aLf55QgI7 zG@UK-gAEb5`3H}uNq1dLf{5aDl(%JfOW27pc>xtW*^qcM$74=?)^GJREL28zsv3*t znS3P=z?;=g#~2so+kL$<5TIaz$IywLb1=u(&0W*Uh<9dKA&pGUf3Ce>NnRZGWK7&0 zr(rMZYI9pA&90;3(AlOTq_O6`2uhv~|1_v&sExQjW>TCqhQWdd=HpyG>^HorIp){6 zWF7jz;%}~4agOHgTAh(7EO!gF>VRVjY&XM6Hq$9)<`_F&9uOs)R-#Nd)T!o4BR_2( zk!ZyfMl%*FZ2@F{hz?LE&aXYG%uT1ov{1RV(tKDobnL&nPPX-@{jIuOJ))7W=;O~TJEsW39dILn5Pr$mqv!xytA8p_Cn z$YErTJEAo^61}FK48P97!Di%uQK_@I`Jh5fc2t8ihDZeKK=f^dgcc9%AHI;BSjpD_ zPF~6ET4Z6JRjhDjeAKY&s+wO;He~gtdt%n+Q~(;`&LuH;t#ZHox7AY*tx>^9+F>tQ^bs%KIYXEt!m67?8@ zPnlFv+xe9XzRV#r!C~3DU7tzmmE$&W8v{+vN5_A;6=&MYm@yKtzVnL*D>BcAVRiyO z#D}IhbJj{{0KMF<%V0>mAXYPsa6yvz5*J5GCnm4YjOPa54A^h-8mz3#Q<6jn%$s$N z6rCY$)k=C+ZYG$7*NG@Wy&P9~4CE$RZce+$c=pkU00!t4Y9sZ8b$n<(O< zCU$E3f|h~@CYCR8ZgyKbhWuK95Ex&LWi(Ni@wjJlnpu46x zA5Nv+DrQFWtdW^1PqON(7&!W7JCs~!rcllj=9scgqVw`RiBJOO{b?>SBr8VXQCyjJ zA|n&wxwzDiR--@_mRQo7t=MJo)-wri5S|ob6=-=}$^&`8fl6QIvP4S7fS9F3gE<0B zaoDsWlJ-zF2es;LxKGS!HX?A@S5H+(<1kGDSJmAoLbvBb=Qf?WS^q$(Wy`w$>8sj{AI7Fkpc$g{tMvg+1Dk? zwS9o(f`-(F>$suksErkKw`ft}-q@9f9UE@I1Syh3aCg-P3H^Z*U9*7Lh1pku({_&7 zdR)G_Ss&u+rY^?;)z~JV#}rX0OOE1LwDix_DLv7aBpI%{e?UxQW&ujys{*6yTS}r_ z>MIBV#SqU3t$vw~Ouu!!Bl33fxZXY5Ux>HiQzX7yQ4o4~&DG=vN+Xz*TcBKAF!Ihw zQ80ukvZgT^q{tBiOP$e|{S=K_Tz6~`^hmo@T`g@hD1eTpU>U+43MB%aT!CdUBUf+m zn_5QYxYE)%W-}%CqUNIWVQ8P4s5uYZJAO3US{&6 zB&L&+MUE!LcPH-_6TZYZbLR$i{+g#aR=Z^R3eb8MTdFbZ)@1Z@QDrG;f)mzRbUlND zU8PeL_OY{+#!`iRX^PE_&AiEL8XKWifTV$Yt|e*3vdl{S+IW0_mTZ3C54N}5%G|47 zdVZhFHek_cGxIKvCNPX1HT=yP0JexSmbDwEukP9=1sFA&(Kwtxqf~DOb)^vzenci} zC8@BaO~u3DCL`mbgX2=iRZ?U;*(Y&lOPy!+c@@%**q4Xc_W8paz5OmZtLro;`Q_zK zI0wi3`U;a93(^H#IbJ06li~|ZtwpR;E>OTdryWzU>?&}OhxO{~uofuXQegBLjj_Xh z1aT>zz0eA(s_#D1Da2J`-z^UUdugkVW&4v?Eb*;TD$GPro^wEG5u!kn_;)Pc?_>a< zqgRxj7Oxe;=}(9SUa)h4qWpB{G)ul`Xs5mEl6|_Hg3;P5T!CivgFDJ`(&p%?a7_}1 zOIuZNO^fPzM{atS+6uqSJb#-?BqPR5P%}m*e;4I%0hh;?($vTwX?NtDT)qdPXXKW~ zf-$7@Zb>Q+Dxu*pj-9$(R2~~U=LO}iMUPJ8a}O=0qtYC?*Yp>&C*8K42Q<=uhlVRy zrL%R`^OAOt>gSOP8iYzg)unet-?LXpBkuH3m&>Tu6szpP?|N!Htoyd7e*I||Z(j~z zu;`@y39Het%uMb_>fjM7?efUi>W|5G_&^3=FjgJrne~TXs)DOROi3qC7^vnm9WSDZ zzL5xh&fI7}Wpwjm<~BC%r~770kVizfq%j%&li>4ydr|(8@k##9b1+;MllUhwq<76y zj)WWJ1c|09_YkT$5VZH6<1=&2>Z@JCAxnyy%!vOprQGrnRzh0oG1l6rk%Lu3=MvC0 zCG<^|QbSY6u%|@UQK@5(tJq2}l_>I|qPCGo7{DMOx|Bbrk_K{G9kupd*ZZ&N>2a`I zS|(Wpe0W(|yp``^>sr{V#u5Z$?)z+|Z(Vd*;=b%eNkuM3Ot&|Y#iPt>VYl&sZM3E6 zQj3{XGw7U_W;X*cY@uj|y5stzj4#Ov_XTZy8(FC;ns+Nd$R&D2s`lVh_@EBV48#~L#5^rt62d9Q|$?;?a{0h@ML`HyC zzv@d~Uh}5+ap7{R8G8yW&+mn&9kYb=+qZZ(L**k>yp5{rW?n)#-W)T8arG8^qWbiFBNoD5#^qgz(G&~=6n zGQ(g!1Gn3e7pqv1BEYukurR=fO3C_8@7%9CU|Gk5NNJi)R}nYr64W*Ncc>aieE(Jv z7o83&H?E1G_yf!!J$+4<(*Z)%>@-F+;_RXy@X*<*rn1AnFg>0?K%pE#}f7ib_y=zf1Y{)?Mo;i}mhG z8{+PZSi#d+l?g6iLDpI0{8b3lRyi*MWa~u=wZ;eT9kVtwPDQ9mnx$Cioh+X*Ns2m< zZQeRLQh)a9m)f za`)Xws%gbZiT|g$tpc<*aeM(P>Q$)eiOzJNhSs<91vbQ-zxf?DYmSY>1U80#6JE@TF(-M&v@&qWi81s|?3dPt=kqDMhweKps)u}Tg&7>Ws5Ni0u|RfYsWRO< zqm&+xEkmZ!t-Y7J8Ra`3wB@22IdyFKBJrOWy#Dky@0Lc~jcETZt}05}Ru7`w>u+() zUH%L=f!yQKTmDPMM^AD;n`2)cmKXe+kdvYBUkUJc{9)gMAMpFW2;YG(_>%`_+Fzm7 z8MpXB-vl4vd;AdJcT`H+n{hM)z6f7||BEd60{?$Wv8(6Amg(C6>W=omdr~;2-r^@A zOK|%CrwIBs_~Fn7{L{h#p`7&}ZK}Edvqt{gnxn;h9|r-%_y1S@Pm6Qr7xLKO#`0_M z59?FkzgBmBb+U3{!@loO@c(BBL*E2nqx}#3SxZ7deh2_X)A*tQaf5E$2mpucVoO~M zhiL2+kpKiB0E7S#fC3NzLI4Q=9F}tLhBgqpODk-7oOA>DFB2isFE7Ne+obW z3_t;iNP>S_;0930|9vHXmim9j1NmRII9e^0E$?%u|h^o?!%MEu|It}d;Cwe zr=Ly$K_Ci*0SJM_smG;CgT$?VFn+){E!^95xqpm;eT^D8@Y&q+Z+CeuqHVf^0F%j(U4{YJ^vHMhl!r8cihljOOC{SBNk1peIr z8mr9P1XIUb^M(DYe}X<3S^W4Nwen5=+9mh3YP{HM;Jf_iK8sFlW(PO<#s0|@0<{GZ z^=W3EssqCXD@R`_xSJpqq^+1iw4DyqMsd&?Lo*PsuDHp~4Ti`P%Y2R_cm`+>6y7@^ zz2`nRS5;B5kV*6SIHC^6d!0TK!Xyg9gg#N=IAWLGKBaXhOyFt1l{bgd|8gNDyz`x$ zN_HNk7uyjSGEQ3pySdqDKmYM50)0;tia2!HthfB+6ID!5AfaOyL9pc9+t>DCeb3q% zS;B4Fv7RHA#?+D48sX8)Y(in+d8u}Q@OG$SUCCzk@-sQ9QzM;*Oy#JIqM63S_XyH) zzoUC2%ana3r_FB2mAK~5;DY;R*-VNJ9(*Th?oFU&E}n%rYA02(@swT|xM?y8cAdE3N-EgiEXX+*YCTwj*bs0SU4r2&91QcjzSuvwG*H zc6^d@USmEbKx+eIDl#e>{Od*%amTJJJtgRk>JiIVY1YUDrR~9IKByEuq+?R^W(f*P z8>dmf0v0#mJU}?u3j!7aw_D<@JOtd!$F83?7tvDFRxEaaUl0fh3Bg|DJwJ|!8xbI+ zez|k4sU2cmoL><5O&Lkj9R96nL-EHVB2BRV!9G~jy8*{(|Mwd|=r_R#B=HC&qgNJz2nhKMLZZVvtVgupv`0gPL%fHf z4-%tTEuNbD-F5Fa`uB3A5tDwQj^xom_r_dw^C zL-*8>ddG^?+#CS`^4|a9jMNZ4CkX`d3E>w8;l83Nvd8vAaeB*)+F;sZ&m{@2X!*VZ zU5;Y z-D~%t#x4HKeqb2r1Vgegfy!V$3cL4*IeQ$G9znC%t9I1Iu(g{zN7&j;oe8*HLty*q zCLi5o)~Kk1RnOQtPLF0Hma#8wYC+%K==K47zm04%M);Oep{2R@-Lck0rHDzL#8~E+ zMMiYBO+qmxeHI!s8i}dDcBDTNed!Z-T5SWQ9eQQ$P4~7!Ca`VxUKd~Q&og^wMQV4C z*$el_dmUOM-I3PYUlsSCC6u3#vegJ~=pRT}wPi=oG-%@_l^#m7pzu@O&vMUNG_w^e z(%kPMk$K(8#LczAG$B`b^`;OuSYTfYbDDh}JK4Eg?it_r;E-;!a>heJht%65kD((G zg+(>zcVf~XD_)LCI^?Jdmw&<3SYoR2rfe4d^wjaKvDfUxX+GP$ZR~sb(SCt$vx6xm ze^#!)+C*RKJRn`_#nCvGej#*87?Oag<`hRmhD--t)d^|5+yQo zztC#a`401#XiteAPmALOcBe#jo+=^oQYl;D! S{O8%t@3PQ **Tip**: List all releases using `helm list` - -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: - -```bash -$ helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Parameters - -The following table lists the configurable parameters of the Redis chart and their default values. - -| Parameter | Description | Default | -| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- | -| `global.imageRegistry` | Global Docker image registry | `nil` | -| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | -| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | -| `global.redis.password` | Redis password (overrides `password`) | `nil` | -| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | -| `image.registry` | Redis Image registry | `docker.io` | -| `image.repository` | Redis Image name | `bitnami/redis` | -| `image.tag` | Redis Image tag | `{TAG_NAME}` | -| `image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | -| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | -| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | -| `cluster.enabled` | Use master-slave topology | `true` | -| `cluster.slaveCount` | Number of slaves | `1` | -| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | -| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | -| `usePassword` | Use password | `true` | -| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | -| `password` | Redis password (ignored if existingSecret set) | Randomly generated | -| `configmap` | Additional common Redis node configuration (this value is evaluated as a template) | See values.yaml | -| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | -| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | -| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | -| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | -| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | -| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | -| `securityContext.runAsUser` | User ID for the container (both redis master and slave pods) | `1001` | -| `securityContext.sysctls` | Set namespaced sysctls for the container (both redis master and slave pods) | `nil` | -| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | -| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | -| `rbac.create` | Specifies whether RBAC resources should be created | `false` | -| `rbac.role.rules` | Rules to create | `[]` | -| `metrics.enabled` | Start a side-car prometheus exporter | `false` | -| `metrics.image.registry` | Redis exporter image registry | `docker.io` | -| `metrics.image.repository` | Redis exporter image name | `bitnami/redis-exporter` | -| `metrics.image.tag` | Redis exporter image tag | `{TAG_NAME}` | -| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | -| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | -| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | -| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | -| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | -| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | -| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | -| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | -| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | -| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | -| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | -| `metrics.service.labels` | Additional labels for the metrics service | {} | -| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | -| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | {} | -| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | -| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | -| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | -| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | -| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | -| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | -| `master.persistence.size` | Size of data volume | `8Gi` | -| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | -| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | -| `master.podLabels` | Additional labels for Redis master pod | {} | -| `master.podAnnotations` | Additional annotations for Redis master pod | {} | -| `redisPort` | Redis port (in both master and slaves) | `6379` | -| `master.command` | Redis master entrypoint string. The command `redis-server` is executed if this is not provided. | `/run.sh` | -| `master.configmap` | Additional Redis configuration for the master nodes (this value is evaluated as a template) | `nil` | -| `master.disableCommands` | Array of Redis commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | -| `master.extraFlags` | Redis master additional command line flags | [] | -| `master.nodeSelector` | Redis master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | -| `master.tolerations` | Toleration labels for Redis master pod assignment | [] | -| `master.affinity` | Affinity settings for Redis master pod assignment | {} | -| `master.schedulerName` | Name of an alternate scheduler | `nil` | -| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | -| `master.service.port` | Kubernetes Service port (redis master) | `6379` | -| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | -| `master.service.annotations` | annotations for redis master service | {} | -| `master.service.labels` | Additional labels for redis master service | {} | -| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | -| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | -| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | -| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | -| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `30` | -| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `30` | -| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | -| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | -| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | -| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | -| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | -| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `10` | -| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | -| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | -| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | -| `master.priorityClassName` | Redis Master pod priorityClassName | {} | -| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | -| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | -| `volumePermissions.image.tag` | Init container volume-permissions image tag | `stretch` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | -| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | -| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | -| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | -| `slave.service.annotations` | annotations for redis slave service | {} | -| `slave.service.labels` | Additional labels for redis slave service | {} | -| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | -| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` | -| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if Redis slave service type is `LoadBalancer` | `nil` | -| `slave.command` | Redis slave entrypoint array. The docker image's ENTRYPOINT is used if this is not provided. | `/run.sh` | -| `slave.configmap` | Additional Redis configuration for the slave nodes (this value is evaluated as a template) | `nil` | -| `slave.disableCommands` | Array of Redis commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | -| `slave.extraFlags` | Redis slave additional command line flags | `[]` | -| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | -| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `30` | -| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | -| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | -| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | -| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | -| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | -| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | -| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | -| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `10` | -| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | -| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | -| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | -| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | -| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | -| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | -| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | -| `slave.persistence.size` | Size of data volume | `8Gi` | -| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | -| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | -| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` | -| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` | -| `slave.schedulerName` | Name of an alternate scheduler | `nil` | -| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `{}` | -| `slave.affinity` | Enable node/pod affinity for slaves | {} | -| `slave.priorityClassName` | Redis Slave pod priorityClassName | {} | -| `sentinel.enabled` | Enable sentinel containers | `false` | -| `sentinel.usePassword` | Use password for sentinel containers | `true` | -| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | -| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | -| `sentinel.quorum` | Quorum for electing a new master | `2` | -| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis node is down | `60000` | -| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | -| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | -| `sentinel.port` | Redis Sentinel port | `26379` | -| `sentinel.configmap` | Additional Redis configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | -| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | -| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | -| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | -| `sentinel.service.annotations` | annotations for redis sentinel service | {} | -| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | -| `sentinel.service.redisPort` | Kubernetes Service port for Redis read only operations | `6379` | -| `sentinel.service.sentinelPort` | Kubernetes Service port for Redis sentinel | `26379` | -| `sentinel.service.redisNodePort` | Kubernetes Service node port for Redis read only operations | `` | -| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for Redis sentinel | `` | -| `sentinel.service.loadBalancerIP` | LoadBalancerIP if Redis sentinel service type is `LoadBalancer` | `nil` | -| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | -| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | -| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | -| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | -| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | -| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | -| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | -| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | -| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | -| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | -| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | -| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | -| `sentinel.resources` | Redis sentinel CPU/Memory resource requests/limits | `{}` | -| `sentinel.image.registry` | Redis Sentinel Image registry | `docker.io` | -| `sentinel.image.repository` | Redis Sentinel Image name | `bitnami/redis-sentinel` | -| `sentinel.image.tag` | Redis Sentinel Image tag | `{TAG_NAME}` | -| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | -| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | -| `sysctlImage.command` | sysctlImage command to execute | [] | -| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | -| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | -| `sysctlImage.tag` | sysctlImage Init container tag | `stretch` | -| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | -| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | -| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | -| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | - -Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, - -```bash -$ helm install --name my-release \ - --set password=secretpassword \ - stable/redis -``` - -The above command sets the Redis server password to `secretpassword`. - -Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, - -```bash -$ helm install --name my-release -f values.yaml stable/redis -``` - -> **Tip**: You can use the default [values.yaml](values.yaml) - -> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the Redis pod as it attempts to write to the `/bitnami` directory. Consider installing Redis with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. - -## Configuration and installation details - -### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) - -It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. - -Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. - -### Production configuration - -This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. - -- Number of slaves: -```diff -- cluster.slaveCount: 2 -+ cluster.slaveCount: 3 -``` - -- Enable NetworkPolicy: -```diff -- networkPolicy.enabled: false -+ networkPolicy.enabled: true -``` - -- Start a side-car prometheus exporter: -```diff -- metrics.enabled: false -+ metrics.enabled: true -``` - -### Cluster topologies - -#### Default: Master-Slave - -When installing the chart with `cluster.enabled=true`, it will deploy a Redis master StatefulSet (only one master node allowed) and a Redis slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: - - - Redis Master service: Points to the master, where read-write operations can be performed - - Redis Slave service: Points to the slaves, where only read operations are allowed. - -In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. - -#### Master-Slave with Sentinel - -When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a Redis master StatefulSet (only one master allowed) and a Redis slave StatefulSet. In this case, the pods will contain en extra container with Redis Sentinel. This container will form a cluster of Redis Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: - - - Redis service: Exposes port 6379 for Redis read-only operations and port 26379 for accesing Redis Sentinel. - -For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis Sentinel cluster and query the current master using the command below (using redis-cli or similar: - -``` -SENTINEL get-master-addr-by-name -``` -This command will return the address of the current master, which can be accessed from inside the cluster. - -In case the current master crashes, the Sentinel containers will elect a new master node. - -### Using password file -To use a password file for Redis you need to create a secret containing the password. - -> *NOTE*: It is important that the file with the password must be called `redis-password` - -And then deploy the Helm Chart using the secret name as parameter: - -```console -usePassword=true -usePasswordFile=true -existingSecret=redis-password-file -sentinels.enabled=true -metrics.enabled=true -``` - -### Metrics - -The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. - -### Host Kernel Settings -Redis may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. -To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: -``` -sysctlImage: - enabled: true - mountHostSys: true - command: - - /bin/sh - - -c - - |- - install_packages procps - sysctl -w net.core.somaxconn=10000 - echo never > /host-sys/kernel/mm/transparent_hugepage/enabled -``` - -Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: - -```yaml -securityContext: - sysctls: - - name: net.core.somaxconn - value: "10000" -``` - -Note that this will not disable transparent huge tables. - -## Persistence - -By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. - -### Existing PersistentVolumeClaim - -1. Create the PersistentVolume -2. Create the PersistentVolumeClaim -3. Install the chart - -```bash -$ helm install --set persistence.existingClaim=PVC_NAME stable/redis -``` - -## NetworkPolicy - -To enable network policy for Redis, install -[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), -and set `networkPolicy.enabled` to `true`. - -For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting -the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: - - kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" - -With NetworkPolicy enabled, only pods with the generated client label will be -able to connect to Redis. This label will be displayed in the output -after a successful install. - -With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: - -``` -networkPolicy: - enabled: true - ingressNSMatchLabels: - redis: external - ingressNSPodMatchLabels: - redis-client: true -``` - -## Upgrading an existing Release to a new major version - -A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an -incompatible breaking change needing manual actions. - -### To 10.0.0 - -For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: -* Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. -* Where redis clients need to be updated to support sentinel authentication. - -If using a master/slave topology, or with `usePassword: false`, no action is required. - -### To 8.0.18 - -For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. - -### To 7.0.0 - -This version causes a change in the Redis Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: - - - Recommended: Create a clone of the Redis Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. - - ``` - helm install stable/redis --set persistence.existingClaim= - ``` - - - Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis Master StatefulSet. As a consequence, the following commands can be done to upgrade the release - - ``` - helm delete --purge - helm install stable/redis --name - ``` - -Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. - -Some values have changed as well: - - - `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) - - `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) - -By default, the upgrade will not change the cluster topology. In case you want to use Redis Sentinel, you must explicitly set `sentinel.enabled` to `true`. - -### To 6.0.0 - -Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. - -### To 5.0.0 - -The default image in this release may be switched out for any image containing the `redis-server` -and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` -must be specified. - -#### Breaking changes -- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. -- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. -- `master.persistence.path` now defaults to `/data`. - -### 4.0.0 - -This version removes the `chart` label from the `spec.selector.matchLabels` -which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently -added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. - -It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. - -Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. - -In order to upgrade, delete the Redis StatefulSet before upgrading: -```bash -$ kubectl delete statefulsets.apps --cascade=false my-release-redis-master -``` -And edit the Redis slave (and metrics if enabled) deployment: -```bash -kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' -kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' -``` - -## Notable changes - -### 9.0.0 -The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). - -### 7.0.0 -In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. - -This version also allows enabling Redis Sentinel containers inside of the Redis Pods (feature disabled by default). In case the master crashes, a new Redis node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/chart/charts/redis/ci/default-values.yaml b/chart/charts/redis/ci/default-values.yaml deleted file mode 100755 index fc2ba60..0000000 --- a/chart/charts/redis/ci/default-values.yaml +++ /dev/null @@ -1 +0,0 @@ -# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/chart/charts/redis/ci/dev-values.yaml b/chart/charts/redis/ci/dev-values.yaml deleted file mode 100755 index be01913..0000000 --- a/chart/charts/redis/ci/dev-values.yaml +++ /dev/null @@ -1,9 +0,0 @@ -master: - persistence: - enabled: false - -cluster: - enabled: true - slaveCount: 1 - -usePassword: false diff --git a/chart/charts/redis/ci/extra-flags-values.yaml b/chart/charts/redis/ci/extra-flags-values.yaml deleted file mode 100755 index 71132f7..0000000 --- a/chart/charts/redis/ci/extra-flags-values.yaml +++ /dev/null @@ -1,11 +0,0 @@ -master: - extraFlags: - - --maxmemory-policy allkeys-lru - persistence: - enabled: false -slave: - extraFlags: - - --maxmemory-policy allkeys-lru - persistence: - enabled: false -usePassword: false diff --git a/chart/charts/redis/ci/insecure-sentinel-values.yaml b/chart/charts/redis/ci/insecure-sentinel-values.yaml deleted file mode 100755 index 4ca1a93..0000000 --- a/chart/charts/redis/ci/insecure-sentinel-values.yaml +++ /dev/null @@ -1,524 +0,0 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets -## -# global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName - -## Bitnami Redis image version -## ref: https://hub.docker.com/r/bitnami/redis/tags/ -## -image: - registry: docker.io - repository: bitnami/redis - ## Bitnami Redis image tag - ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links - ## - tag: 5.0.5-debian-9-r36 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - -## Redis pod Security Context -securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - -## Cluster settings -cluster: - enabled: true - slaveCount: 3 - -## Use redis sentinel in the redis pod. This will disable the master and slave services and -## create one redis service with ports to the sentinel and the redis instances -sentinel: - enabled: true - ## Require password authentication on the sentinel itself - ## ref: https://redis.io/topics/sentinel - usePassword: false - ## Bitnami Redis Sentintel image version - ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ - ## - image: - registry: docker.io - repository: bitnami/redis-sentinel - ## Bitnami Redis image tag - ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links - ## - tag: 5.0.5-debian-9-r37 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - masterSet: mymaster - initialCheckTimeout: 5 - quorum: 2 - downAfterMilliseconds: 60000 - failoverTimeout: 18000 - parallelSyncs: 1 - port: 26379 - ## Configure extra options for Redis Sentinel liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - ## Redis Sentinel resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - ## Redis Sentinel Service properties - service: - ## Redis Sentinel Service type - type: ClusterIP - sentinelPort: 26379 - redisPort: 6379 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # sentinelNodePort: - # redisNodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - loadBalancerIP: - -networkPolicy: - ## Specifies whether a NetworkPolicy should be created - ## - enabled: true - - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to the port Redis is listening - ## on. When true, Redis will accept connections from any source - ## (with the correct destination port). - ## - # allowExternal: true - -serviceAccount: - ## Specifies whether a ServiceAccount should be created - ## - create: false - ## The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the fullname template - name: - -rbac: - ## Specifies whether RBAC resources should be created - ## - create: false - - role: - ## Rules to create. It follows the role specification - # rules: - # - apiGroups: - # - extensions - # resources: - # - podsecuritypolicies - # verbs: - # - use - # resourceNames: - # - gce.unprivileged - rules: [] - - -## Use password authentication -usePassword: true -## Redis password (both master and slave) -## Defaults to a random 10-character alphanumeric string if not set and usePassword is true -## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run -## -password: -## Use existing secret (ignores previous password) -# existingSecret: -## Password key to be retrieved from Redis secret -## -# existingSecretPasswordKey: - -## Mount secrets as files instead of environment variables -usePasswordFile: false - -## Persist data to a persistent volume -persistence: {} - ## A manually managed Persistent Volume and Claim - ## Requires persistence.enabled: true - ## If defined, PVC must be created manually before volume will be bound - # existingClaim: - -# Redis port -redisPort: 6379 - -## -## Redis Master parameters -## -master: - ## Redis command arguments - ## - ## Can be used to specify command line arguments, for example: - ## - command: "/run.sh" - ## Redis additional command line flags - ## - ## Can be used to specify command line flags, for example: - ## - ## extraFlags: - ## - "--maxmemory-policy volatile-ttl" - ## - "--repl-backlog-size 1024mb" - extraFlags: [] - ## Comma-separated list of Redis commands to disable - ## - ## Can be used to disable Redis commands for security reasons. - ## Commands will be completely disabled by renaming each to an empty string. - ## ref: https://redis.io/topics/security#disabling-of-specific-commands - ## - disableCommands: - - FLUSHDB - - FLUSHALL - - ## Redis Master additional pod labels and annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - podLabels: {} - podAnnotations: {} - - ## Redis Master resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## Configure extra options for Redis Master liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - - ## Redis Master Node selectors and tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## - # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} - # tolerations: [] - ## Redis Master pod/node affinity/anti-affinity - ## - affinity: {} - - ## Redis Master Service properties - service: - ## Redis Master Service type - type: ClusterIP - port: 6379 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - loadBalancerIP: - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - ## The path the volume will be mounted at, useful when using different - ## Redis images. - path: /data - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - subPath: "" - ## redis data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - - ## Update strategy, can be set to RollingUpdate or onDelete by default. - ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets - statefulset: - updateStrategy: RollingUpdate - ## Partition update strategy - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions - # rollingUpdatePartition: - - ## Redis Master pod priorityClassName - # priorityClassName: {} - - -## -## Redis Slave properties -## Note: service.type is a mandatory parameter -## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master -## -slave: - ## Slave Service properties - service: - ## Redis Slave Service type - type: ClusterIP - ## Redis port - port: 6379 - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - loadBalancerIP: - - ## Redis slave port - port: 6379 - - ## Can be used to specify command line arguments, for example: - ## - command: "/run.sh" - ## Redis extra flags - extraFlags: [] - ## List of Redis commands to disable - disableCommands: - - FLUSHDB - - FLUSHALL - - ## Redis Slave pod/node affinity/anti-affinity - ## - affinity: {} - - ## Configure extra options for Redis Slave liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 10 - successThreshold: 1 - failureThreshold: 5 - - ## Redis slave Resource - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - ## The path the volume will be mounted at, useful when using different - ## Redis images. - path: /data - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - subPath: "" - ## redis data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - - ## Update strategy, can be set to RollingUpdate or onDelete by default. - ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets - statefulset: - updateStrategy: RollingUpdate - ## Partition update strategy - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions - # rollingUpdatePartition: - - ## Redis slave selectors and tolerations for pod assignment - # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} - # tolerations: [] - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## Redis slave pod Annotation and Labels - podLabels: {} - podAnnotations: {} - - ## Redis slave pod priorityClassName - # priorityClassName: {} - -## Prometheus Exporter / Metrics -## -metrics: - enabled: true - - image: - registry: docker.io - repository: bitnami/redis-exporter - tag: 1.0.3-debian-9-r0 - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - - ## Metrics exporter resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - # resources: {} - ## Metrics exporter pod priorityClassName - # priorityClassName: {} - service: - type: ClusterIP - ## Use serviceLoadBalancerIP to request a specific static IP, - ## otherwise leave blank - # loadBalancerIP: - annotations: {} - - ## Extra arguments for Metrics exporter, for example: - ## extraArgs: - ## check-keys: myKey,myOtherKey - # extraArgs: {} - - ## Metrics exporter pod Annotation and Labels - podAnnotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9121" - # podLabels: {} - - # Enable this if you're using https://github.com/coreos/prometheus-operator - serviceMonitor: - enabled: false - ## Specify a namespace if needed - # namespace: monitoring - # fallback to the prometheus default unless specified - # interval: 10s - ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) - ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) - ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) - selector: - prometheus: kube-prometheus -## -## Init containers parameters: -## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup -## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: stretch - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - resources: {} - # resources: - # requests: - # memory: 128Mi - # cpu: 100m - -## Redis config file -## ref: https://redis.io/topics/config -## -configmap: |- - # maxmemory-policy volatile-lru - -## Sysctl InitContainer -## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) -sysctlImage: - enabled: false - command: [] - registry: docker.io - repository: bitnami/minideb - tag: stretch - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - mountHostSys: false - resources: {} - # resources: - # requests: - # memory: 128Mi - # cpu: 100m diff --git a/chart/charts/redis/ci/production-sentinel-values.yaml b/chart/charts/redis/ci/production-sentinel-values.yaml deleted file mode 100755 index 57df7dc..0000000 --- a/chart/charts/redis/ci/production-sentinel-values.yaml +++ /dev/null @@ -1,524 +0,0 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets -## -# global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName - -## Bitnami Redis image version -## ref: https://hub.docker.com/r/bitnami/redis/tags/ -## -image: - registry: docker.io - repository: bitnami/redis - ## Bitnami Redis image tag - ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links - ## - tag: 5.0.5-debian-9-r36 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - -## Redis pod Security Context -securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - -## Cluster settings -cluster: - enabled: true - slaveCount: 3 - -## Use redis sentinel in the redis pod. This will disable the master and slave services and -## create one redis service with ports to the sentinel and the redis instances -sentinel: - enabled: true - ## Require password authentication on the sentinel itself - ## ref: https://redis.io/topics/sentinel - usePassword: true - ## Bitnami Redis Sentintel image version - ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ - ## - image: - registry: docker.io - repository: bitnami/redis-sentinel - ## Bitnami Redis image tag - ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links - ## - tag: 5.0.5-debian-9-r37 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - masterSet: mymaster - initialCheckTimeout: 5 - quorum: 2 - downAfterMilliseconds: 60000 - failoverTimeout: 18000 - parallelSyncs: 1 - port: 26379 - ## Configure extra options for Redis Sentinel liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - ## Redis Sentinel resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - ## Redis Sentinel Service properties - service: - ## Redis Sentinel Service type - type: ClusterIP - sentinelPort: 26379 - redisPort: 6379 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # sentinelNodePort: - # redisNodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - loadBalancerIP: - -networkPolicy: - ## Specifies whether a NetworkPolicy should be created - ## - enabled: true - - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to the port Redis is listening - ## on. When true, Redis will accept connections from any source - ## (with the correct destination port). - ## - # allowExternal: true - -serviceAccount: - ## Specifies whether a ServiceAccount should be created - ## - create: false - ## The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the fullname template - name: - -rbac: - ## Specifies whether RBAC resources should be created - ## - create: false - - role: - ## Rules to create. It follows the role specification - # rules: - # - apiGroups: - # - extensions - # resources: - # - podsecuritypolicies - # verbs: - # - use - # resourceNames: - # - gce.unprivileged - rules: [] - - -## Use password authentication -usePassword: true -## Redis password (both master and slave) -## Defaults to a random 10-character alphanumeric string if not set and usePassword is true -## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run -## -password: -## Use existing secret (ignores previous password) -# existingSecret: -## Password key to be retrieved from Redis secret -## -# existingSecretPasswordKey: - -## Mount secrets as files instead of environment variables -usePasswordFile: false - -## Persist data to a persistent volume -persistence: {} - ## A manually managed Persistent Volume and Claim - ## Requires persistence.enabled: true - ## If defined, PVC must be created manually before volume will be bound - # existingClaim: - -# Redis port -redisPort: 6379 - -## -## Redis Master parameters -## -master: - ## Redis command arguments - ## - ## Can be used to specify command line arguments, for example: - ## - command: "/run.sh" - ## Redis additional command line flags - ## - ## Can be used to specify command line flags, for example: - ## - ## extraFlags: - ## - "--maxmemory-policy volatile-ttl" - ## - "--repl-backlog-size 1024mb" - extraFlags: [] - ## Comma-separated list of Redis commands to disable - ## - ## Can be used to disable Redis commands for security reasons. - ## Commands will be completely disabled by renaming each to an empty string. - ## ref: https://redis.io/topics/security#disabling-of-specific-commands - ## - disableCommands: - - FLUSHDB - - FLUSHALL - - ## Redis Master additional pod labels and annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - podLabels: {} - podAnnotations: {} - - ## Redis Master resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## Configure extra options for Redis Master liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - - ## Redis Master Node selectors and tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## - # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} - # tolerations: [] - ## Redis Master pod/node affinity/anti-affinity - ## - affinity: {} - - ## Redis Master Service properties - service: - ## Redis Master Service type - type: ClusterIP - port: 6379 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - loadBalancerIP: - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - ## The path the volume will be mounted at, useful when using different - ## Redis images. - path: /data - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - subPath: "" - ## redis data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - - ## Update strategy, can be set to RollingUpdate or onDelete by default. - ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets - statefulset: - updateStrategy: RollingUpdate - ## Partition update strategy - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions - # rollingUpdatePartition: - - ## Redis Master pod priorityClassName - # priorityClassName: {} - - -## -## Redis Slave properties -## Note: service.type is a mandatory parameter -## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master -## -slave: - ## Slave Service properties - service: - ## Redis Slave Service type - type: ClusterIP - ## Redis port - port: 6379 - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - loadBalancerIP: - - ## Redis slave port - port: 6379 - - ## Can be used to specify command line arguments, for example: - ## - command: "/run.sh" - ## Redis extra flags - extraFlags: [] - ## List of Redis commands to disable - disableCommands: - - FLUSHDB - - FLUSHALL - - ## Redis Slave pod/node affinity/anti-affinity - ## - affinity: {} - - ## Configure extra options for Redis Slave liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 10 - successThreshold: 1 - failureThreshold: 5 - - ## Redis slave Resource - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - ## The path the volume will be mounted at, useful when using different - ## Redis images. - path: /data - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - subPath: "" - ## redis data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - - ## Update strategy, can be set to RollingUpdate or onDelete by default. - ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets - statefulset: - updateStrategy: RollingUpdate - ## Partition update strategy - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions - # rollingUpdatePartition: - - ## Redis slave selectors and tolerations for pod assignment - # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} - # tolerations: [] - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## Redis slave pod Annotation and Labels - podLabels: {} - podAnnotations: {} - - ## Redis slave pod priorityClassName - # priorityClassName: {} - -## Prometheus Exporter / Metrics -## -metrics: - enabled: true - - image: - registry: docker.io - repository: bitnami/redis-exporter - tag: 1.0.3-debian-9-r0 - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - - ## Metrics exporter resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - # resources: {} - ## Metrics exporter pod priorityClassName - # priorityClassName: {} - service: - type: ClusterIP - ## Use serviceLoadBalancerIP to request a specific static IP, - ## otherwise leave blank - # loadBalancerIP: - annotations: {} - - ## Extra arguments for Metrics exporter, for example: - ## extraArgs: - ## check-keys: myKey,myOtherKey - # extraArgs: {} - - ## Metrics exporter pod Annotation and Labels - podAnnotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9121" - # podLabels: {} - - # Enable this if you're using https://github.com/coreos/prometheus-operator - serviceMonitor: - enabled: false - ## Specify a namespace if needed - # namespace: monitoring - # fallback to the prometheus default unless specified - # interval: 10s - ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) - ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) - ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) - selector: - prometheus: kube-prometheus -## -## Init containers parameters: -## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup -## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: stretch - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - resources: {} - # resources: - # requests: - # memory: 128Mi - # cpu: 100m - -## Redis config file -## ref: https://redis.io/topics/config -## -configmap: |- - # maxmemory-policy volatile-lru - -## Sysctl InitContainer -## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) -sysctlImage: - enabled: false - command: [] - registry: docker.io - repository: bitnami/minideb - tag: stretch - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - mountHostSys: false - resources: {} - # resources: - # requests: - # memory: 128Mi - # cpu: 100m diff --git a/chart/charts/redis/ci/production-values.yaml b/chart/charts/redis/ci/production-values.yaml deleted file mode 100755 index 7b535c9..0000000 --- a/chart/charts/redis/ci/production-values.yaml +++ /dev/null @@ -1,525 +0,0 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets -## -# global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName - -## Bitnami Redis image version -## ref: https://hub.docker.com/r/bitnami/redis/tags/ -## -image: - registry: docker.io - repository: bitnami/redis - ## Bitnami Redis image tag - ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links - ## - tag: 5.0.5-debian-9-r36 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - -## Redis pod Security Context -securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - -## Cluster settings -cluster: - enabled: true - slaveCount: 3 - -## Use redis sentinel in the redis pod. This will disable the master and slave services and -## create one redis service with ports to the sentinel and the redis instances -sentinel: - enabled: false - ## Require password authentication on the sentinel itself - ## ref: https://redis.io/topics/sentinel - usePassword: true - ## Bitnami Redis Sentintel image version - ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ - ## - image: - registry: docker.io - repository: bitnami/redis-sentinel - ## Bitnami Redis image tag - ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links - ## - tag: 5.0.5-debian-9-r37 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - masterSet: mymaster - initialCheckTimeout: 5 - quorum: 2 - downAfterMilliseconds: 60000 - failoverTimeout: 18000 - parallelSyncs: 1 - port: 26379 - ## Configure extra options for Redis Sentinel liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - ## Redis Sentinel resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - ## Redis Sentinel Service properties - service: - ## Redis Sentinel Service type - type: ClusterIP - sentinelPort: 26379 - redisPort: 6379 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # sentinelNodePort: - # redisNodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - loadBalancerIP: - -networkPolicy: - ## Specifies whether a NetworkPolicy should be created - ## - enabled: true - - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to the port Redis is listening - ## on. When true, Redis will accept connections from any source - ## (with the correct destination port). - ## - # allowExternal: true - -serviceAccount: - ## Specifies whether a ServiceAccount should be created - ## - create: false - ## The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the fullname template - name: - -rbac: - ## Specifies whether RBAC resources should be created - ## - create: false - - role: - ## Rules to create. It follows the role specification - # rules: - # - apiGroups: - # - extensions - # resources: - # - podsecuritypolicies - # verbs: - # - use - # resourceNames: - # - gce.unprivileged - rules: [] - - -## Use password authentication -usePassword: true -## Redis password (both master and slave) -## Defaults to a random 10-character alphanumeric string if not set and usePassword is true -## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run -## -password: -## Use existing secret (ignores previous password) -# existingSecret: -## Password key to be retrieved from Redis secret -## -# existingSecretPasswordKey: - -## Mount secrets as files instead of environment variables -usePasswordFile: false - -## Persist data to a persistent volume -persistence: {} - ## A manually managed Persistent Volume and Claim - ## Requires persistence.enabled: true - ## If defined, PVC must be created manually before volume will be bound - # existingClaim: - -# Redis port -redisPort: 6379 - -## -## Redis Master parameters -## -master: - ## Redis command arguments - ## - ## Can be used to specify command line arguments, for example: - ## - command: "/run.sh" - ## Redis additional command line flags - ## - ## Can be used to specify command line flags, for example: - ## - ## extraFlags: - ## - "--maxmemory-policy volatile-ttl" - ## - "--repl-backlog-size 1024mb" - extraFlags: [] - ## Comma-separated list of Redis commands to disable - ## - ## Can be used to disable Redis commands for security reasons. - ## Commands will be completely disabled by renaming each to an empty string. - ## ref: https://redis.io/topics/security#disabling-of-specific-commands - ## - disableCommands: - - FLUSHDB - - FLUSHALL - - ## Redis Master additional pod labels and annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - podLabels: {} - podAnnotations: {} - - ## Redis Master resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## Configure extra options for Redis Master liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - - ## Redis Master Node selectors and tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## - # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} - # tolerations: [] - ## Redis Master pod/node affinity/anti-affinity - ## - affinity: {} - - ## Redis Master Service properties - service: - ## Redis Master Service type - type: ClusterIP - port: 6379 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - loadBalancerIP: - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - ## The path the volume will be mounted at, useful when using different - ## Redis images. - path: /data - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - subPath: "" - ## redis data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - - ## Update strategy, can be set to RollingUpdate or onDelete by default. - ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets - statefulset: - updateStrategy: RollingUpdate - ## Partition update strategy - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions - # rollingUpdatePartition: - - ## Redis Master pod priorityClassName - # priorityClassName: {} - - -## -## Redis Slave properties -## Note: service.type is a mandatory parameter -## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master -## -slave: - ## Slave Service properties - service: - ## Redis Slave Service type - type: ClusterIP - ## Redis port - port: 6379 - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - loadBalancerIP: - - ## Redis slave port - port: 6379 - - ## Can be used to specify command line arguments, for example: - ## - command: "/run.sh" - ## Redis extra flags - extraFlags: [] - ## List of Redis commands to disable - disableCommands: - - FLUSHDB - - FLUSHALL - - ## Redis Slave pod/node affinity/anti-affinity - ## - affinity: {} - - ## Configure extra options for Redis Slave liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 10 - successThreshold: 1 - failureThreshold: 5 - - ## Redis slave Resource - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - ## The path the volume will be mounted at, useful when using different - ## Redis images. - path: /data - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - subPath: "" - ## redis data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - - ## Update strategy, can be set to RollingUpdate or onDelete by default. - ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets - statefulset: - updateStrategy: RollingUpdate - ## Partition update strategy - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions - # rollingUpdatePartition: - - ## Redis slave selectors and tolerations for pod assignment - # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} - # tolerations: [] - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## Redis slave pod Annotation and Labels - podLabels: {} - podAnnotations: {} - - ## Redis slave pod priorityClassName - # priorityClassName: {} - -## Prometheus Exporter / Metrics -## -metrics: - enabled: true - - image: - registry: docker.io - repository: bitnami/redis-exporter - tag: 1.0.3-debian-9-r0 - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - - ## Metrics exporter resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - # resources: {} - - ## Extra arguments for Metrics exporter, for example: - ## extraArgs: - ## check-keys: myKey,myOtherKey - # extraArgs: {} - ## Metrics exporter pod priorityClassName - # priorityClassName: {} - service: - type: ClusterIP - ## Use serviceLoadBalancerIP to request a specific static IP, - ## otherwise leave blank - # loadBalancerIP: - annotations: {} - - ## Metrics exporter pod Annotation and Labels - podAnnotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9121" - # podLabels: {} - - # Enable this if you're using https://github.com/coreos/prometheus-operator - serviceMonitor: - enabled: false - ## Specify a namespace if needed - # namespace: monitoring - # fallback to the prometheus default unless specified - # interval: 10s - ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) - ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) - ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) - selector: - prometheus: kube-prometheus - -## -## Init containers parameters: -## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup -## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: stretch - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - resources: {} - # resources: - # requests: - # memory: 128Mi - # cpu: 100m - -## Redis config file -## ref: https://redis.io/topics/config -## -configmap: |- - # maxmemory-policy volatile-lru - -## Sysctl InitContainer -## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) -sysctlImage: - enabled: false - command: [] - registry: docker.io - repository: bitnami/minideb - tag: stretch - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - mountHostSys: false - resources: {} - # resources: - # requests: - # memory: 128Mi - # cpu: 100m diff --git a/chart/charts/redis/ci/redis-lib-values.yaml b/chart/charts/redis/ci/redis-lib-values.yaml deleted file mode 100755 index e03382b..0000000 --- a/chart/charts/redis/ci/redis-lib-values.yaml +++ /dev/null @@ -1,13 +0,0 @@ -## Redis library image -## ref: https://hub.docker.com/r/library/redis/ -## -image: - registry: docker.io - repository: redis - tag: '5.0.5' - -master: - command: "redis-server" - -slave: - command: "redis-server" diff --git a/chart/charts/redis/ci/redisgraph-module-values.yaml b/chart/charts/redis/ci/redisgraph-module-values.yaml deleted file mode 100755 index 8096020..0000000 --- a/chart/charts/redis/ci/redisgraph-module-values.yaml +++ /dev/null @@ -1,10 +0,0 @@ -image: - registry: docker.io - repository: redislabs/redisgraph - tag: '1.0.0' - -master: - command: "redis-server" - -slave: - command: "redis-server" diff --git a/chart/charts/redis/templates/NOTES.txt b/chart/charts/redis/templates/NOTES.txt deleted file mode 100755 index 4298d70..0000000 --- a/chart/charts/redis/templates/NOTES.txt +++ /dev/null @@ -1,104 +0,0 @@ -** Please be patient while the chart is being deployed ** - -{{- if contains .Values.master.service.type "LoadBalancer" }} -{{- if not .Values.usePassword }} -{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} - -------------------------------------------------------------------------------- - WARNING - - By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have - most likely exposed the Redis service externally without any authentication - mechanism. - - For security reasons, we strongly suggest that you switch to "ClusterIP" or - "NodePort". As alternative, you can also switch to "usePassword=true" - providing a valid password on "password" parameter. - -------------------------------------------------------------------------------- -{{- end }} -{{- end }} -{{- end }} - -{{- if .Values.cluster.enabled }} -{{- if .Values.sentinel.enabled }} -Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: - -{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations - -For read/write operations, first access the Redis Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. - -{{- else }} -Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: - -{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations -{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations -{{- end }} - -{{- else }} -Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: - -{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - -{{- end }} - -{{ if .Values.usePassword }} -To get your password run: - - export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) -{{- end }} - -To connect to your Redis server: - -1. Run a Redis pod that you can use as a client: - - kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ - {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} - {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} - --image {{ template "redis.image" . }} -- bash - -2. Connect using the Redis CLI: - -{{- if .Values.cluster.enabled }} - {{- if .Values.sentinel.enabled }} - redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Read only operations - redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Sentinel access - {{- else }} - redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} - redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} - {{- end }} -{{- else }} - redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} -{{- end }} - -{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} -Note: Since NetworkPolicy is enabled, only pods with label -{{ template "redis.fullname" . }}-client=true" -will be able to connect to redis. -{{- else -}} - -To connect to your database from outside the cluster execute the following commands: - -{{- if contains "NodePort" .Values.master.service.type }} - - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) - redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} - -{{- else if contains "LoadBalancer" .Values.master.service.type }} - - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - redis-cli -h $SERVICE_IP -p {{ .Values.master.service.nodePort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} - -{{- else if contains "ClusterIP" .Values.master.service.type }} - - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & - redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} - -{{- end }} -{{- end }} - -{{ include "redis.checkRollingTags" . }} diff --git a/chart/charts/redis/templates/_helpers.tpl b/chart/charts/redis/templates/_helpers.tpl deleted file mode 100755 index 8c1df0d..0000000 --- a/chart/charts/redis/templates/_helpers.tpl +++ /dev/null @@ -1,355 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "redis.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Expand the chart plus release name (used by the chart label) -*/}} -{{- define "redis.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "redis.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for networkpolicy. -*/}} -{{- define "networkPolicy.apiVersion" -}} -{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "extensions/v1beta1" -}} -{{- else -}} -{{- print "networking.k8s.io/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiGroup for PodSecurityPolicy. -*/}} -{{- define "podSecurityPolicy.apiGroup" -}} -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "policy" -}} -{{- else -}} -{{- print "extensions" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for PodSecurityPolicy. -*/}} -{{- define "podSecurityPolicy.apiVersion" -}} -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "policy/v1beta1" -}} -{{- else -}} -{{- print "extensions/v1beta1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Redis image name -*/}} -{{- define "redis.image" -}} -{{- $registryName := .Values.image.registry -}} -{{- $repositoryName := .Values.image.repository -}} -{{- $tag := .Values.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Redis Sentinel image name -*/}} -{{- define "sentinel.image" -}} -{{- $registryName := .Values.sentinel.image.registry -}} -{{- $repositoryName := .Values.sentinel.image.repository -}} -{{- $tag := .Values.sentinel.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper image name (for the metrics image) -*/}} -{{- define "redis.metrics.image" -}} -{{- $registryName := .Values.metrics.image.registry -}} -{{- $repositoryName := .Values.metrics.image.repository -}} -{{- $tag := .Values.metrics.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper image name (for the init container volume-permissions image) -*/}} -{{- define "redis.volumePermissions.image" -}} -{{- $registryName := .Values.volumePermissions.image.registry -}} -{{- $repositoryName := .Values.volumePermissions.image.repository -}} -{{- $tag := .Values.volumePermissions.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "redis.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{/* -Get the password secret. -*/}} -{{- define "redis.secretName" -}} -{{- if .Values.existingSecret -}} -{{- printf "%s" .Values.existingSecret -}} -{{- else -}} -{{- printf "%s" (include "redis.fullname" .) -}} -{{- end -}} -{{- end -}} - -{{/* -Get the password key to be retrieved from Redis secret. -*/}} -{{- define "redis.secretPasswordKey" -}} -{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} -{{- printf "%s" .Values.existingSecretPasswordKey -}} -{{- else -}} -{{- printf "redis-password" -}} -{{- end -}} -{{- end -}} - -{{/* -Return Redis password -*/}} -{{- define "redis.password" -}} -{{- if not (empty .Values.global.redis.password) }} - {{- .Values.global.redis.password -}} -{{- else if not (empty .Values.password) -}} - {{- .Values.password -}} -{{- else -}} - {{- randAlphaNum 10 -}} -{{- end -}} -{{- end -}} - -{{/* -Return sysctl image -*/}} -{{- define "redis.sysctl.image" -}} -{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} -{{- $repositoryName := .Values.sysctlImage.repository -}} -{{- $tag := default "stretch" .Values.sysctlImage.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names -*/}} -{{- define "redis.imagePullSecrets" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -Also, we can not use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} -{{- if .Values.global.imagePullSecrets }} -imagePullSecrets: -{{- range .Values.global.imagePullSecrets }} - - name: {{ . }} -{{- end }} -{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.metrics.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.sysctlImage.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.volumePermissions.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- end -}} -{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.metrics.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.sysctlImage.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.volumePermissions.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- end -}} -{{- end -}} - -{{/* Check if there are rolling tags in the images */}} -{{- define "redis.checkRollingTags" -}} -{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} -WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. -+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ -{{- end }} -{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} -WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. -+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ -{{- end }} -{{- end -}} - -{{/* -Return the proper Storage Class for master -*/}} -{{- define "redis.master.storageClass" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -*/}} -{{- if .Values.global -}} - {{- if .Values.global.storageClass -}} - {{- if (eq "-" .Values.global.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.global.storageClass -}} - {{- end -}} - {{- else -}} - {{- if .Values.master.persistence.storageClass -}} - {{- if (eq "-" .Values.master.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} - {{- end -}} - {{- end -}} - {{- end -}} -{{- else -}} - {{- if .Values.master.persistence.storageClass -}} - {{- if (eq "-" .Values.master.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} - {{- end -}} - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Storage Class for slave -*/}} -{{- define "redis.slave.storageClass" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -*/}} -{{- if .Values.global -}} - {{- if .Values.global.storageClass -}} - {{- if (eq "-" .Values.global.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.global.storageClass -}} - {{- end -}} - {{- else -}} - {{- if .Values.slave.persistence.storageClass -}} - {{- if (eq "-" .Values.slave.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} - {{- end -}} - {{- end -}} - {{- end -}} -{{- else -}} - {{- if .Values.slave.persistence.storageClass -}} - {{- if (eq "-" .Values.slave.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} - {{- end -}} - {{- end -}} -{{- end -}} -{{- end -}} diff --git a/chart/charts/redis/templates/configmap.yaml b/chart/charts/redis/templates/configmap.yaml deleted file mode 100755 index d17ec26..0000000 --- a/chart/charts/redis/templates/configmap.yaml +++ /dev/null @@ -1,52 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "redis.fullname" . }} - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -data: - redis.conf: |- -{{- if .Values.configmap }} - # User-supplied configuration: -{{ tpl .Values.configmap . | indent 4 }} -{{- end }} - master.conf: |- - dir {{ .Values.master.persistence.path }} -{{- if .Values.master.configmap }} - # User-supplied master configuration: -{{ tpl .Values.master.configmap . | indent 4 }} -{{- end }} -{{- if .Values.master.disableCommands }} -{{- range .Values.master.disableCommands }} - rename-command {{ . }} "" -{{- end }} -{{- end }} - replica.conf: |- - dir {{ .Values.slave.persistence.path }} - slave-read-only yes -{{- if .Values.slave.configmap }} - # User-supplied slave configuration: -{{ tpl .Values.slave.configmap . | indent 4 }} -{{- end }} -{{- if .Values.slave.disableCommands }} -{{- range .Values.slave.disableCommands }} - rename-command {{ . }} "" -{{- end }} -{{- end }} -{{- if .Values.sentinel.enabled }} - sentinel.conf: |- - dir "/tmp" - bind 0.0.0.0 - port {{ .Values.sentinel.port }} - sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} - sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} - sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} - sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} -{{- if .Values.sentinel.configmap }} - # User-supplied sentinel configuration: -{{ tpl .Values.sentinel.configmap . | indent 4 }} -{{- end }} -{{- end }} diff --git a/chart/charts/redis/templates/headless-svc.yaml b/chart/charts/redis/templates/headless-svc.yaml deleted file mode 100755 index 909cbce..0000000 --- a/chart/charts/redis/templates/headless-svc.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "redis.fullname" . }}-headless - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - clusterIP: None - ports: - - name: redis - port: {{ .Values.redisPort }} - targetPort: redis -{{- if .Values.sentinel.enabled }} - - name: redis-sentinel - port: {{ .Values.sentinel.port }} - targetPort: redis-sentinel -{{- end }} - selector: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} diff --git a/chart/charts/redis/templates/health-configmap.yaml b/chart/charts/redis/templates/health-configmap.yaml deleted file mode 100755 index 35c61b5..0000000 --- a/chart/charts/redis/templates/health-configmap.yaml +++ /dev/null @@ -1,134 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "redis.fullname" . }}-health - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -data: - ping_readiness_local.sh: |- -{{- if .Values.usePasswordFile }} - password_aux=`cat ${REDIS_PASSWORD_FILE}` - export REDIS_PASSWORD=$password_aux -{{- end }} - response=$( - timeout -s 9 $1 \ - redis-cli \ -{{- if .Values.usePassword }} - -a $REDIS_PASSWORD --no-auth-warning \ -{{- end }} - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_local.sh: |- -{{- if .Values.usePasswordFile }} - password_aux=`cat ${REDIS_PASSWORD_FILE}` - export REDIS_PASSWORD=$password_aux -{{- end }} - response=$( - timeout -s 9 $1 \ - redis-cli \ -{{- if .Values.usePassword }} - -a $REDIS_PASSWORD --no-auth-warning \ -{{- end }} - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi -{{- if .Values.sentinel.enabled }} - ping_sentinel.sh: |- -{{- if .Values.usePasswordFile }} - password_aux=`cat ${REDIS_PASSWORD_FILE}` - export REDIS_PASSWORD=$password_aux -{{- end }} - response=$( - timeout -s 9 $1 \ - redis-cli \ -{{- if .Values.usePassword }} - -a $REDIS_PASSWORD --no-auth-warning \ -{{- end }} - -h localhost \ - -p $REDIS_SENTINEL_PORT \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - parse_sentinels.awk: |- - /ip/ {FOUND_IP=1} - /port/ {FOUND_PORT=1} - /runid/ {FOUND_RUNID=1} - !/ip|port|runid/ { - if (FOUND_IP==1) { - IP=$1; FOUND_IP=0; - } - else if (FOUND_PORT==1) { - PORT=$1; - FOUND_PORT=0; - } else if (FOUND_RUNID==1) { - printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; - } - } -{{- end }} - ping_readiness_master.sh: |- -{{- if .Values.usePasswordFile }} - password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` - export REDIS_MASTER_PASSWORD=$password_aux -{{- end }} - response=$( - timeout -s 9 $1 \ - redis-cli \ -{{- if .Values.usePassword }} - -a $REDIS_MASTER_PASSWORD --no-auth-warning \ -{{- end }} - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_master.sh: |- -{{- if .Values.usePasswordFile }} - password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` - export REDIS_MASTER_PASSWORD=$password_aux -{{- end }} - response=$( - timeout -s 9 $1 \ - redis-cli \ -{{- if .Values.usePassword }} - -a $REDIS_MASTER_PASSWORD --no-auth-warning \ -{{- end }} - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? - "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? - exit $exit_status - ping_liveness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? - "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? - exit $exit_status diff --git a/chart/charts/redis/templates/metrics-prometheus.yaml b/chart/charts/redis/templates/metrics-prometheus.yaml deleted file mode 100755 index 3f33454..0000000 --- a/chart/charts/redis/templates/metrics-prometheus.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "redis.fullname" . }} - {{- if .Values.metrics.serviceMonitor.namespace }} - namespace: {{ .Values.metrics.serviceMonitor.namespace }} - {{- end }} - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} - {{ $key }}: {{ $value | quote }} - {{- end }} -spec: - endpoints: - - port: metrics - {{- if .Values.metrics.serviceMonitor.interval }} - interval: {{ .Values.metrics.serviceMonitor.interval }} - {{- end }} - selector: - matchLabels: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - namespaceSelector: - matchNames: - - {{ .Release.Namespace }} -{{- end -}} diff --git a/chart/charts/redis/templates/metrics-svc.yaml b/chart/charts/redis/templates/metrics-svc.yaml deleted file mode 100755 index ef39725..0000000 --- a/chart/charts/redis/templates/metrics-svc.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{{- if .Values.metrics.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "redis.fullname" . }}-metrics - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - {{- if .Values.metrics.service.labels -}} - {{ toYaml .Values.metrics.service.labels | nindent 4 }} - {{- end -}} - {{- if .Values.metrics.service.annotations }} - annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }} - {{- end }} -spec: - type: {{ .Values.metrics.service.type }} - {{ if eq .Values.metrics.service.type "LoadBalancer" -}} {{ if .Values.metrics.service.loadBalancerIP -}} - loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} - {{ end -}} - {{- end -}} - ports: - - name: metrics - port: 9121 - targetPort: metrics - selector: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} -{{- end }} diff --git a/chart/charts/redis/templates/networkpolicy.yaml b/chart/charts/redis/templates/networkpolicy.yaml deleted file mode 100755 index 30f09f2..0000000 --- a/chart/charts/redis/templates/networkpolicy.yaml +++ /dev/null @@ -1,79 +0,0 @@ -{{- if .Values.networkPolicy.enabled }} -kind: NetworkPolicy -apiVersion: {{ template "networkPolicy.apiVersion" . }} -metadata: - name: {{ template "redis.fullname" . }} - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - podSelector: - matchLabels: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - {{- if .Values.cluster.enabled }} - policyTypes: - - Ingress - - Egress - egress: - # Allow outbound connections to other cluster pods - - ports: - - port: {{ .Values.redisPort }} - {{- if .Values.sentinel.enabled }} - - port: {{ .Values.sentinel.port }} - {{- end }} - to: - - podSelector: - matchLabels: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - {{- end }} - ingress: - # Allow inbound connections - - ports: - - port: {{ .Values.redisPort }} - {{- if .Values.sentinel.enabled }} - - port: {{ .Values.sentinel.port }} - {{- end }} - {{- if not .Values.networkPolicy.allowExternal }} - from: - - podSelector: - matchLabels: - {{ template "redis.fullname" . }}-client: "true" - {{- if .Values.metrics.enabled }} - - podSelector: - matchLabels: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - role: metrics - {{- end }} - {{- if .Values.cluster.enabled }} - - podSelector: - matchLabels: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - role: slave - {{- end }} - {{- if .Values.networkPolicy.ingressNSMatchLabels }} - - namespaceSelector: - matchLabels: - {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} - {{ $key | quote }}: {{ $value | quote }} - {{- end }} - {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} - podSelector: - matchLabels: - {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} - {{ $key | quote }}: {{ $value | quote }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} - {{- if .Values.metrics.enabled }} - # Allow prometheus scrapes for metrics - - ports: - - port: 9121 - {{- end }} -{{- end }} diff --git a/chart/charts/redis/templates/psp.yaml b/chart/charts/redis/templates/psp.yaml deleted file mode 100755 index 28ae22a..0000000 --- a/chart/charts/redis/templates/psp.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{{- if .Values.podSecurityPolicy.create }} -apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} -kind: PodSecurityPolicy -metadata: - name: {{ template "redis.fullname" . }} - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - allowPrivilegeEscalation: false - fsGroup: - rule: 'MustRunAs' - ranges: - - min: {{ .Values.securityContext.fsGroup }} - max: {{ .Values.securityContext.fsGroup }} - hostIPC: false - hostNetwork: false - hostPID: false - privileged: false - readOnlyRootFilesystem: false - requiredDropCapabilities: - - ALL - runAsUser: - rule: 'MustRunAs' - ranges: - - min: {{ .Values.securityContext.runAsUser }} - max: {{ .Values.securityContext.runAsUser }} - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: {{ .Values.securityContext.runAsUser }} - max: {{ .Values.securityContext.runAsUser }} - volumes: - - 'configMap' - - 'secret' - - 'emptyDir' - - 'persistentVolumeClaim' -{{- end }} diff --git a/chart/charts/redis/templates/redis-master-statefulset.yaml b/chart/charts/redis/templates/redis-master-statefulset.yaml deleted file mode 100755 index 3c2f183..0000000 --- a/chart/charts/redis/templates/redis-master-statefulset.yaml +++ /dev/null @@ -1,410 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "redis.fullname" . }}-master - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - selector: - matchLabels: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - role: master - serviceName: {{ template "redis.fullname" . }}-headless - template: - metadata: - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - role: master -{{- if .Values.master.podLabels }} -{{ toYaml .Values.master.podLabels | indent 8 }} -{{- end }} -{{- if and .Values.metrics.enabled .Values.metrics.podLabels }} -{{ toYaml .Values.metrics.podLabels | indent 8 }} -{{- end }} - annotations: - checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} - checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} - {{- if .Values.master.podAnnotations }} -{{ toYaml .Values.master.podAnnotations | indent 8 }} - {{- end }} - {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} -{{ toYaml .Values.metrics.podAnnotations | indent 8 }} - {{- end }} - spec: -{{- include "redis.imagePullSecrets" . | indent 6 }} - {{- if .Values.securityContext.enabled }} - securityContext: - fsGroup: {{ .Values.securityContext.fsGroup }} - {{- if .Values.securityContext.sysctls }} - sysctls: -{{ toYaml .Values.securityContext.sysctls | indent 8 }} - {{- end }} - {{- end }} - serviceAccountName: "{{ template "redis.serviceAccountName" . }}" - {{- if .Values.master.priorityClassName }} - priorityClassName: "{{ .Values.master.priorityClassName }}" - {{- end }} - {{- with .Values.master.affinity }} - affinity: -{{ tpl (toYaml .) $ | indent 8 }} - {{- end }} - {{- if .Values.master.nodeSelector }} - nodeSelector: -{{ toYaml .Values.master.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.master.tolerations }} - tolerations: -{{ toYaml .Values.master.tolerations | indent 8 }} - {{- end }} - {{- if .Values.master.schedulerName }} - schedulerName: "{{ .Values.master.schedulerName }}" - {{- end }} - containers: - - name: {{ template "redis.fullname" . }} - image: "{{ template "redis.image" . }}" - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - {{- if .Values.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.securityContext.runAsUser }} - {{- end }} - command: - - /bin/bash - - -c - - | - {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} - useradd redis - chown -R redis {{ .Values.master.persistence.path }} - {{- end }} - if [[ -n $REDIS_PASSWORD_FILE ]]; then - password_aux=`cat ${REDIS_PASSWORD_FILE}` - export REDIS_PASSWORD=$password_aux - fi - if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then - cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - ARGS=("--port" "${REDIS_PORT}") - {{- if .Values.usePassword }} - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_PASSWORD}") - {{- else }} - ARGS+=("--protected-mode" "no") - {{- end }} - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") - {{- if .Values.master.extraFlags }} - {{- range .Values.master.extraFlags }} - ARGS+=({{ . | quote }}) - {{- end }} - {{- end }} - {{- if .Values.master.command }} - {{ .Values.master.command }} ${ARGS[@]} - {{- else }} - redis-server "${ARGS[@]}" - {{- end }} - env: - - name: REDIS_REPLICATION_MODE - value: master - {{- if .Values.usePassword }} - {{- if .Values.usePasswordFile }} - - name: REDIS_PASSWORD_FILE - value: "/opt/bitnami/redis/secrets/redis-password" - {{- else }} - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "redis.secretName" . }} - key: {{ template "redis.secretPasswordKey" . }} - {{- end }} - {{- else }} - - name: ALLOW_EMPTY_PASSWORD - value: "yes" - {{- end }} - - name: REDIS_PORT - value: {{ .Values.redisPort | quote }} - ports: - - name: redis - containerPort: {{ .Values.redisPort }} - {{- if .Values.master.livenessProbe.enabled }} - livenessProbe: - initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.master.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} - exec: - command: - - sh - - -c - - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} - {{- end }} - {{- if .Values.master.readinessProbe.enabled}} - readinessProbe: - initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.master.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} - exec: - command: - - sh - - -c - - /health/ping_readiness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} - {{- end }} - resources: -{{ toYaml .Values.master.resources | indent 10 }} - volumeMounts: - - name: health - mountPath: /health - {{- if .Values.usePasswordFile }} - - name: redis-password - mountPath: /opt/bitnami/redis/secrets/ - {{- end }} - - name: redis-data - mountPath: {{ .Values.master.persistence.path }} - subPath: {{ .Values.master.persistence.subPath }} - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc/ - {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} - - name: sentinel - image: "{{ template "sentinel.image" . }}" - imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} - {{- if .Values.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.securityContext.runAsUser }} - {{- end }} - command: - - /bin/bash - - -c - - | - if [[ -n $REDIS_PASSWORD_FILE ]]; then - password_aux=`cat ${REDIS_PASSWORD_FILE}` - export REDIS_PASSWORD=$password_aux - fi - if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then - cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf - {{- if .Values.usePassword }} - printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf - {{- if .Values.sentinel.usePassword }} - printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf - {{- end }} - {{- end }} - {{- if .Values.sentinel.staticID }} - printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf - {{- end }} - fi - echo "Getting information about current running sentinels" - # Get information from existing sentinels - existing_sentinels=$(timeout -s 9 {{ .Values.sentinel.initialCheckTimeout }} redis-cli --raw -h {{ template "redis.fullname" . }} -a "$REDIS_PASSWORD" -p {{ .Values.sentinel.service.sentinelPort }} SENTINEL sentinels {{ .Values.sentinel.masterSet }}) - echo "$existing_sentinels" | awk -f /health/parse_sentinels.awk | tee -a /opt/bitnami/redis-sentinel/etc/sentinel.conf - - redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel - env: - {{- if .Values.usePassword }} - {{- if .Values.usePasswordFile }} - - name: REDIS_PASSWORD_FILE - value: "/opt/bitnami/redis/secrets/redis-password" - {{- else }} - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "redis.secretName" . }} - key: {{ template "redis.secretPasswordKey" . }} - {{- end }} - {{- else }} - - name: ALLOW_EMPTY_PASSWORD - value: "yes" - {{- end }} - - name: REDIS_SENTINEL_PORT - value: {{ .Values.sentinel.port | quote }} - ports: - - name: redis-sentinel - containerPort: {{ .Values.sentinel.port }} - {{- if .Values.sentinel.livenessProbe.enabled }} - livenessProbe: - initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} - exec: - command: - - sh - - -c - - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} - {{- end }} - {{- if .Values.sentinel.readinessProbe.enabled}} - readinessProbe: - initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} - exec: - command: - - sh - - -c - - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} - {{- end }} - resources: -{{ toYaml .Values.sentinel.resources | indent 10 }} - volumeMounts: - - name: health - mountPath: /health - {{- if .Values.usePasswordFile }} - - name: redis-password - mountPath: /opt/bitnami/redis/secrets/ - {{- end }} - - name: redis-data - mountPath: {{ .Values.master.persistence.path }} - subPath: {{ .Values.master.persistence.subPath }} - - name: config - mountPath: /opt/bitnami/redis-sentinel/mounted-etc - - name: sentinel-tmp-conf - mountPath: /opt/bitnami/redis-sentinel/etc/ - {{- end }} -{{- if .Values.metrics.enabled }} - - name: metrics - image: {{ template "redis.metrics.image" . }} - imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} - command: - - /bin/bash - - -c - - | - if [[ -f '/secrets/redis-password' ]]; then - export REDIS_PASSWORD=$(cat /secrets/redis-password) - fi - redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} - env: - - name: REDIS_ALIAS - value: {{ template "redis.fullname" . }} - {{- if and .Values.usePassword (not .Values.usePasswordFile) }} - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "redis.secretName" . }} - key: {{ template "redis.secretPasswordKey" . }} - {{- end }} - volumeMounts: - {{- if .Values.usePasswordFile }} - - name: redis-password - mountPath: /secrets/ - {{- end }} - ports: - - name: metrics - containerPort: 9121 - resources: -{{ toYaml .Values.metrics.resources | indent 10 }} -{{- end }} - {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and ( and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) ) .Values.securityContext.enabled) }} - {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} - initContainers: - {{- if $needsVolumePermissions }} - - name: volume-permissions - image: "{{ template "redis.volumePermissions.image" . }}" - imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} - command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.master.persistence.path }}"] - securityContext: - runAsUser: 0 - resources: -{{ toYaml .Values.volumePermissions.resources | indent 10 }} - volumeMounts: - - name: redis-data - mountPath: {{ .Values.master.persistence.path }} - subPath: {{ .Values.master.persistence.subPath }} - {{- end }} - {{- if .Values.sysctlImage.enabled }} - - name: init-sysctl - image: {{ template "redis.sysctl.image" . }} - imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} - resources: -{{ toYaml .Values.sysctlImage.resources | indent 10 }} - {{- if .Values.sysctlImage.mountHostSys }} - volumeMounts: - - name: host-sys - mountPath: /host-sys - {{- end }} - command: -{{ toYaml .Values.sysctlImage.command | indent 10 }} - securityContext: - privileged: true - runAsUser: 0 - {{- end }} - {{- end }} - volumes: - - name: health - configMap: - name: {{ template "redis.fullname" . }}-health - defaultMode: 0755 - {{- if .Values.usePasswordFile }} - - name: redis-password - secret: - secretName: {{ template "redis.secretName" . }} - items: - - key: {{ template "redis.secretPasswordKey" . }} - path: redis-password - {{- end }} - - name: config - configMap: - name: {{ template "redis.fullname" . }} - {{- if not .Values.master.persistence.enabled }} - - name: "redis-data" - emptyDir: {} - {{- else }} - {{- if .Values.persistence.existingClaim }} - - name: "redis-data" - persistentVolumeClaim: - claimName: {{ .Values.persistence.existingClaim }} - {{- end }} - {{- end }} - {{- if .Values.sysctlImage.mountHostSys }} - - name: host-sys - hostPath: - path: /sys - {{- end }} - - name: redis-tmp-conf - emptyDir: {} - {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} - - name: sentinel-tmp-conf - emptyDir: {} - {{- end }} - {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) }} - volumeClaimTemplates: - - metadata: - name: redis-data - labels: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - component: master - spec: - accessModes: - {{- range .Values.master.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.master.persistence.size | quote }} - {{ include "redis.master.storageClass" . }} - {{- end }} - updateStrategy: - type: {{ .Values.master.statefulset.updateStrategy }} - {{- if .Values.master.statefulset.rollingUpdatePartition }} - {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} - rollingUpdate: null - {{- else }} - rollingUpdate: - partition: {{ .Values.master.statefulset.rollingUpdatePartition }} - {{- end }} - {{- end }} diff --git a/chart/charts/redis/templates/redis-master-svc.yaml b/chart/charts/redis/templates/redis-master-svc.yaml deleted file mode 100755 index 3a98e66..0000000 --- a/chart/charts/redis/templates/redis-master-svc.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if not .Values.sentinel.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "redis.fullname" . }}-master - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - {{- if .Values.master.service.labels -}} - {{ toYaml .Values.master.service.labels | nindent 4 }} - {{- end -}} -{{- if .Values.master.service.annotations }} - annotations: {{ toYaml .Values.master.service.annotations | nindent 4 }} -{{- end }} -spec: - type: {{ .Values.master.service.type }} - {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} - {{- end }} - {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- with .Values.master.service.loadBalancerSourceRanges }} -{{ toYaml . | indent 4 }} -{{- end }} - {{- end }} - ports: - - name: redis - port: {{ .Values.master.service.port }} - targetPort: redis - {{- if .Values.master.service.nodePort }} - nodePort: {{ .Values.master.service.nodePort }} - {{- end }} - selector: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - role: master -{{- end }} diff --git a/chart/charts/redis/templates/redis-role.yaml b/chart/charts/redis/templates/redis-role.yaml deleted file mode 100755 index 71f75ef..0000000 --- a/chart/charts/redis/templates/redis-role.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ template "redis.fullname" . }} - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -rules: -{{- if .Values.podSecurityPolicy.create }} - - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ template "redis.fullname" . }}] -{{- end -}} -{{- if .Values.rbac.role.rules }} -{{ toYaml .Values.rbac.role.rules | indent 2 }} -{{- end -}} -{{- end -}} diff --git a/chart/charts/redis/templates/redis-rolebinding.yaml b/chart/charts/redis/templates/redis-rolebinding.yaml deleted file mode 100755 index aceb258..0000000 --- a/chart/charts/redis/templates/redis-rolebinding.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ template "redis.fullname" . }} - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ template "redis.fullname" . }} -subjects: -- kind: ServiceAccount - name: {{ template "redis.serviceAccountName" . }} -{{- end -}} diff --git a/chart/charts/redis/templates/redis-serviceaccount.yaml b/chart/charts/redis/templates/redis-serviceaccount.yaml deleted file mode 100755 index f027176..0000000 --- a/chart/charts/redis/templates/redis-serviceaccount.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "redis.serviceAccountName" . }} - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- end -}} diff --git a/chart/charts/redis/templates/redis-slave-statefulset.yaml b/chart/charts/redis/templates/redis-slave-statefulset.yaml deleted file mode 100755 index f28c545..0000000 --- a/chart/charts/redis/templates/redis-slave-statefulset.yaml +++ /dev/null @@ -1,428 +0,0 @@ -{{- if .Values.cluster.enabled }} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "redis.fullname" . }}-slave - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: -{{- if .Values.slave.updateStrategy }} - strategy: -{{ toYaml .Values.slave.updateStrategy | indent 4 }} -{{- end }} - replicas: {{ .Values.cluster.slaveCount }} - serviceName: {{ template "redis.fullname" . }}-headless - selector: - matchLabels: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - role: slave - template: - metadata: - labels: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - chart: {{ template "redis.chart" . }} - role: slave - {{- if .Values.slave.podLabels }} -{{ toYaml .Values.slave.podLabels | indent 8 }} - {{- end }} - {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} -{{ toYaml .Values.metrics.podLabels | indent 8 }} - {{- end }} - annotations: - checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} - checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} - {{- if .Values.slave.podAnnotations }} -{{ toYaml .Values.slave.podAnnotations | indent 8 }} - {{- end }} - {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} -{{ toYaml .Values.metrics.podAnnotations | indent 8 }} - {{- end }} - spec: -{{- include "redis.imagePullSecrets" . | indent 6 }} - {{- if .Values.securityContext.enabled }} - securityContext: - fsGroup: {{ .Values.securityContext.fsGroup }} - {{- if .Values.securityContext.sysctls }} - sysctls: -{{ toYaml .Values.securityContext.sysctls | indent 8 }} - {{- end }} - {{- end }} - serviceAccountName: "{{ template "redis.serviceAccountName" . }}" - {{- if .Values.slave.priorityClassName }} - priorityClassName: "{{ .Values.slave.priorityClassName }}" - {{- end }} - {{- if .Values.slave.nodeSelector }} - nodeSelector: -{{ toYaml .Values.slave.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.slave.tolerations }} - tolerations: -{{ toYaml .Values.slave.tolerations | indent 8 }} - {{- end }} - {{- if .Values.slave.schedulerName }} - schedulerName: "{{ .Values.slave.schedulerName }}" - {{- end }} - {{- with .Values.slave.affinity }} - affinity: -{{ tpl (toYaml .) $ | indent 8 }} - {{- end }} - containers: - - name: {{ template "redis.fullname" . }} - image: {{ template "redis.image" . }} - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - {{- if .Values.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.securityContext.runAsUser }} - {{- end }} - command: - - /bin/bash - - -c - - | - {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} - useradd redis - chown -R redis {{ .Values.slave.persistence.path }} - {{- end }} - if [[ -n $REDIS_PASSWORD_FILE ]]; then - password_aux=`cat ${REDIS_PASSWORD_FILE}` - export REDIS_PASSWORD=$password_aux - fi - if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then - password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` - export REDIS_MASTER_PASSWORD=$password_aux - fi - if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then - cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") - {{- if .Values.usePassword }} - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") - {{- else }} - ARGS+=("--protected-mode" "no") - {{- end }} - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") - {{- if .Values.slave.extraFlags }} - {{- range .Values.slave.extraFlags }} - ARGS+=({{ . | quote }}) - {{- end }} - {{- end }} - {{- if .Values.slave.command }} - {{ .Values.slave.command }} "${ARGS[@]}" - {{- else }} - redis-server "${ARGS[@]}" - {{- end }} - env: - - name: REDIS_REPLICATION_MODE - value: slave - - name: REDIS_MASTER_HOST - value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - - name: REDIS_PORT - value: {{ .Values.redisPort | quote }} - - name: REDIS_MASTER_PORT_NUMBER - value: {{ .Values.redisPort | quote }} - {{- if .Values.usePassword }} - {{- if .Values.usePasswordFile }} - - name: REDIS_PASSWORD_FILE - value: "/opt/bitnami/redis/secrets/redis-password" - - name: REDIS_MASTER_PASSWORD_FILE - value: "/opt/bitnami/redis/secrets/redis-password" - {{- else }} - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "redis.secretName" . }} - key: {{ template "redis.secretPasswordKey" . }} - - name: REDIS_MASTER_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "redis.secretName" . }} - key: {{ template "redis.secretPasswordKey" . }} - {{- end }} - {{- else }} - - name: ALLOW_EMPTY_PASSWORD - value: "yes" - {{- end }} - ports: - - name: redis - containerPort: {{ .Values.redisPort }} - {{- if .Values.slave.livenessProbe.enabled }} - livenessProbe: - initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} - exec: - command: - - sh - - -c - {{- if .Values.sentinel.enabled }} - - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} - {{- else }} - - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} - {{- end }} - {{- end }} - - {{- if .Values.slave.readinessProbe.enabled }} - readinessProbe: - initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} - exec: - command: - - sh - - -c - {{- if .Values.sentinel.enabled }} - - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} - {{- else }} - - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} - {{- end }} - {{- end }} - resources: -{{ toYaml .Values.slave.resources | indent 10 }} - volumeMounts: - - name: health - mountPath: /health - {{- if .Values.usePasswordFile }} - - name: redis-password - mountPath: /opt/bitnami/redis/secrets/ - {{- end }} - - name: redis-data - mountPath: /data - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc - {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} - - name: sentinel - image: "{{ template "sentinel.image" . }}" - imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} - {{- if .Values.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.securityContext.runAsUser }} - {{- end }} - command: - - /bin/bash - - -c - - | - if [[ -n $REDIS_PASSWORD_FILE ]]; then - password_aux=`cat ${REDIS_PASSWORD_FILE}` - export REDIS_PASSWORD=$password_aux - fi - if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then - cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf - {{- if .Values.usePassword }} - printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf - {{- if .Values.sentinel.usePassword }} - printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf - {{- end }} - {{- end }} - {{- if .Values.sentinel.staticID }} - printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf - {{- end }} - fi - - redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel - env: - {{- if .Values.usePassword }} - {{- if .Values.usePasswordFile }} - - name: REDIS_PASSWORD_FILE - value: "/opt/bitnami/redis/secrets/redis-password" - {{- else }} - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "redis.secretName" . }} - key: {{ template "redis.secretPasswordKey" . }} - {{- end }} - {{- else }} - - name: ALLOW_EMPTY_PASSWORD - value: "yes" - {{- end }} - - name: REDIS_SENTINEL_PORT - value: {{ .Values.sentinel.port | quote }} - ports: - - name: redis-sentinel - containerPort: {{ .Values.sentinel.port }} - {{- if .Values.sentinel.livenessProbe.enabled }} - livenessProbe: - initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} - exec: - command: - - sh - - -c - - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} - {{- end }} - {{- if .Values.sentinel.readinessProbe.enabled}} - readinessProbe: - initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} - exec: - command: - - sh - - -c - - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} - {{- end }} - resources: -{{ toYaml .Values.sentinel.resources | indent 10 }} - volumeMounts: - - name: health - mountPath: /health - {{- if .Values.usePasswordFile }} - - name: redis-password - mountPath: /opt/bitnami/redis/secrets/ - {{- end }} - - name: redis-data - mountPath: {{ .Values.master.persistence.path }} - subPath: {{ .Values.master.persistence.subPath }} - - name: config - mountPath: /opt/bitnami/redis-sentinel/mounted-etc - - name: sentinel-tmp-conf - mountPath: /opt/bitnami/redis-sentinel/etc - {{- end }} -{{- if .Values.metrics.enabled }} - - name: metrics - image: {{ template "redis.metrics.image" . }} - imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} - command: - - /bin/bash - - -c - - | - if [[ -f '/secrets/redis-password' ]]; then - export REDIS_PASSWORD=$(cat /secrets/redis-password) - fi - redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} - env: - - name: REDIS_ALIAS - value: {{ template "redis.fullname" . }} - {{- if and .Values.usePassword (not .Values.usePasswordFile) }} - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "redis.secretName" . }} - key: {{ template "redis.secretPasswordKey" . }} - {{- end }} - volumeMounts: - {{- if .Values.usePasswordFile }} - - name: redis-password - mountPath: /secrets/ - {{- end }} - ports: - - name: metrics - containerPort: 9121 - resources: -{{ toYaml .Values.metrics.resources | indent 10 }} -{{- end }} - {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }} - {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} - initContainers: - {{- if $needsVolumePermissions }} - - name: volume-permissions - image: "{{ template "redis.volumePermissions.image" . }}" - imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} - command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.slave.persistence.path }}"] - securityContext: - runAsUser: 0 - resources: -{{ toYaml .Values.volumePermissions.resources | indent 10 }} - volumeMounts: - - name: redis-data - mountPath: {{ .Values.slave.persistence.path }} - subPath: {{ .Values.slave.persistence.subPath }} - {{- end }} - {{- if .Values.sysctlImage.enabled }} - - name: init-sysctl - image: {{ template "redis.sysctl.image" . }} - imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} - resources: -{{ toYaml .Values.sysctlImage.resources | indent 10 }} - {{- if .Values.sysctlImage.mountHostSys }} - volumeMounts: - - name: host-sys - mountPath: /host-sys - {{- end }} - command: -{{ toYaml .Values.sysctlImage.command | indent 10 }} - securityContext: - privileged: true - runAsUser: 0 - {{- end }} - {{- end }} - volumes: - - name: health - configMap: - name: {{ template "redis.fullname" . }}-health - defaultMode: 0755 - {{- if .Values.usePasswordFile }} - - name: redis-password - secret: - secretName: {{ template "redis.secretName" . }} - items: - - key: {{ template "redis.secretPasswordKey" . }} - path: redis-password - {{- end }} - - name: config - configMap: - name: {{ template "redis.fullname" . }} - {{- if .Values.sysctlImage.mountHostSys }} - - name: host-sys - hostPath: - path: /sys - {{- end }} - - name: sentinel-tmp-conf - emptyDir: {} - - name: redis-tmp-conf - emptyDir: {} - {{- if not .Values.slave.persistence.enabled }} - - name: redis-data - emptyDir: {} - {{- else }} - volumeClaimTemplates: - - metadata: - name: redis-data - labels: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - component: slave - spec: - accessModes: - {{- range .Values.slave.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.slave.persistence.size | quote }} - {{ include "redis.slave.storageClass" . }} - {{- end }} - updateStrategy: - type: {{ .Values.slave.statefulset.updateStrategy }} - {{- if .Values.slave.statefulset.rollingUpdatePartition }} - {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} - rollingUpdate: null - {{- else }} - rollingUpdate: - partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} - {{- end }} - {{- end }} -{{- end }} diff --git a/chart/charts/redis/templates/redis-slave-svc.yaml b/chart/charts/redis/templates/redis-slave-svc.yaml deleted file mode 100755 index 052ecea..0000000 --- a/chart/charts/redis/templates/redis-slave-svc.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "redis.fullname" . }}-slave - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - {{- if .Values.slave.service.labels -}} - {{ toYaml .Values.slave.service.labels | nindent 4 }} - {{- end -}} -{{- if .Values.slave.service.annotations }} - annotations: -{{ toYaml .Values.slave.service.annotations | indent 4 }} -{{- end }} -spec: - type: {{ .Values.slave.service.type }} - {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} - {{- end }} - {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- with .Values.slave.service.loadBalancerSourceRanges }} -{{ toYaml . | indent 4 }} -{{- end }} - {{- end }} - ports: - - name: redis - port: {{ .Values.slave.service.port }} - targetPort: redis - {{- if .Values.slave.service.nodePort }} - nodePort: {{ .Values.slave.service.nodePort }} - {{- end }} - selector: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} - role: slave -{{- end }} diff --git a/chart/charts/redis/templates/redis-with-sentinel-svc.yaml b/chart/charts/redis/templates/redis-with-sentinel-svc.yaml deleted file mode 100755 index 984de21..0000000 --- a/chart/charts/redis/templates/redis-with-sentinel-svc.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.sentinel.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "redis.fullname" . }} - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - {{- if .Values.sentinel.service.labels }} - {{ toYaml .Values.sentinel.service.labels | nindent 4 }} - {{- end }} -{{- if .Values.sentinel.service.annotations }} - annotations: -{{ toYaml .Values.sentinel.service.annotations | indent 4 }} -{{- end }} -spec: - type: {{ .Values.sentinel.service.type }} - {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP -}} - loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} - {{ end -}} - {{- end -}} - ports: - - name: redis - port: {{ .Values.sentinel.service.redisPort }} - targetPort: redis - {{- if .Values.sentinel.service.redisNodePort }} - nodePort: {{ .Values.sentinel.service.redisNodePort }} - {{- end }} - - name: redis-sentinel - port: {{ .Values.sentinel.service.sentinelPort }} - targetPort: redis-sentinel - {{- if .Values.sentinel.service.sentinelNodePort }} - nodePort: {{ .Values.sentinel.service.sentinelNodePort }} - {{- end }} - selector: - app: {{ template "redis.name" . }} - release: {{ .Release.Name }} -{{- end }} diff --git a/chart/charts/redis/templates/secret.yaml b/chart/charts/redis/templates/secret.yaml deleted file mode 100755 index ead9c61..0000000 --- a/chart/charts/redis/templates/secret.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if and .Values.usePassword (not .Values.existingSecret) -}} -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "redis.fullname" . }} - labels: - app: {{ template "redis.name" . }} - chart: {{ template "redis.chart" . }} - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -type: Opaque -data: - redis-password: {{ include "redis.password" . | b64enc | quote }} -{{- end -}} diff --git a/chart/charts/redis/values-production.yaml b/chart/charts/redis/values-production.yaml deleted file mode 100755 index 7fa2d6d..0000000 --- a/chart/charts/redis/values-production.yaml +++ /dev/null @@ -1,583 +0,0 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets -## -global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass - redis: {} - -## Bitnami Redis image version -## ref: https://hub.docker.com/r/bitnami/redis/tags/ -## -image: - registry: docker.io - repository: bitnami/redis - ## Bitnami Redis image tag - ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links - ## - tag: 5.0.7-debian-9-r50 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - -## String to partially override redis.fullname template (will maintain the release name) -## -# nameOverride: - -## String to fully override redis.fullname template -## -# fullnameOverride: - -## Cluster settings -cluster: - enabled: true - slaveCount: 3 - -## Use redis sentinel in the redis pod. This will disable the master and slave services and -## create one redis service with ports to the sentinel and the redis instances -sentinel: - enabled: false - ## Require password authentication on the sentinel itself - ## ref: https://redis.io/topics/sentinel - usePassword: true - ## Bitnami Redis Sentintel image version - ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ - ## - image: - registry: docker.io - repository: bitnami/redis-sentinel - ## Bitnami Redis image tag - ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links - ## - tag: 5.0.7-debian-9-r44 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - masterSet: mymaster - initialCheckTimeout: 5 - quorum: 2 - downAfterMilliseconds: 60000 - failoverTimeout: 18000 - parallelSyncs: 1 - port: 26379 - ## Additional Redis configuration for the sentinel nodes - ## ref: https://redis.io/topics/config - ## - configmap: - ## Enable or disable static sentinel IDs for each replicas - ## If disabled each sentinel will generate a random id at startup - ## If enabled, each replicas will have a constant ID on each start-up - ## - staticID: false - ## Configure extra options for Redis Sentinel liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - ## Redis Sentinel resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - ## Redis Sentinel Service properties - service: - ## Redis Sentinel Service type - type: ClusterIP - sentinelPort: 26379 - redisPort: 6379 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # sentinelNodePort: - # redisNodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - labels: {} - loadBalancerIP: - -## Specifies the Kubernetes Cluster's Domain Name. -## -clusterDomain: cluster.local - -networkPolicy: - ## Specifies whether a NetworkPolicy should be created - ## - enabled: true - - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to the port Redis is listening - ## on. When true, Redis will accept connections from any source - ## (with the correct destination port). - ## - # allowExternal: true - - ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - -serviceAccount: - ## Specifies whether a ServiceAccount should be created - ## - create: false - ## The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the fullname template - name: - -rbac: - ## Specifies whether RBAC resources should be created - ## - create: false - - role: - ## Rules to create. It follows the role specification - # rules: - # - apiGroups: - # - extensions - # resources: - # - podsecuritypolicies - # verbs: - # - use - # resourceNames: - # - gce.unprivileged - rules: [] - -## Redis pod Security Context -securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - ## sysctl settings for master and slave pods - ## - ## Uncomment the setting below to increase the net.core.somaxconn value - ## - # sysctls: - # - name: net.core.somaxconn - # value: "10000" - -## Use password authentication -usePassword: true -## Redis password (both master and slave) -## Defaults to a random 10-character alphanumeric string if not set and usePassword is true -## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run -## -password: -## Use existing secret (ignores previous password) -# existingSecret: -## Password key to be retrieved from Redis secret -## -# existingSecretPasswordKey: - -## Mount secrets as files instead of environment variables -usePasswordFile: false - -## Persist data to a persistent volume (Redis Master) -persistence: {} - ## A manually managed Persistent Volume and Claim - ## Requires persistence.enabled: true - ## If defined, PVC must be created manually before volume will be bound - # existingClaim: - -# Redis port -redisPort: 6379 - -## -## Redis Master parameters -## -master: - ## Redis command arguments - ## - ## Can be used to specify command line arguments, for example: - ## - command: "/run.sh" - ## Additional Redis configuration for the master nodes - ## ref: https://redis.io/topics/config - ## - configmap: - ## Redis additional command line flags - ## - ## Can be used to specify command line flags, for example: - ## - ## extraFlags: - ## - "--maxmemory-policy volatile-ttl" - ## - "--repl-backlog-size 1024mb" - extraFlags: [] - ## Comma-separated list of Redis commands to disable - ## - ## Can be used to disable Redis commands for security reasons. - ## Commands will be completely disabled by renaming each to an empty string. - ## ref: https://redis.io/topics/security#disabling-of-specific-commands - ## - disableCommands: - - FLUSHDB - - FLUSHALL - - ## Redis Master additional pod labels and annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - podLabels: {} - podAnnotations: {} - - ## Redis Master resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## Configure extra options for Redis Master liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - - ## Redis Master Node selectors and tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## - # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} - # tolerations: [] - ## Redis Master pod/node affinity/anti-affinity - ## - affinity: {} - - ## Redis Master Service properties - service: - ## Redis Master Service type - type: ClusterIP - port: 6379 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - labels: {} - loadBalancerIP: - # loadBalancerSourceRanges: ["10.0.0.0/8"] - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - ## The path the volume will be mounted at, useful when using different - ## Redis images. - path: /data - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - subPath: "" - ## redis data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - - ## Update strategy, can be set to RollingUpdate or onDelete by default. - ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets - statefulset: - updateStrategy: RollingUpdate - ## Partition update strategy - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions - # rollingUpdatePartition: - - ## Redis Master pod priorityClassName - # priorityClassName: {} - -## -## Redis Slave properties -## Note: service.type is a mandatory parameter -## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master -## -slave: - ## Slave Service properties - service: - ## Redis Slave Service type - type: ClusterIP - ## Redis port - port: 6379 - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - labels: {} - loadBalancerIP: - # loadBalancerSourceRanges: ["10.0.0.0/8"] - - ## Redis slave port - port: 6379 - ## Can be used to specify command line arguments, for example: - ## - command: "/run.sh" - ## Additional Redis configuration for the slave nodes - ## ref: https://redis.io/topics/config - ## - configmap: - ## Redis extra flags - extraFlags: [] - ## List of Redis commands to disable - disableCommands: - - FLUSHDB - - FLUSHALL - - ## Redis Slave pod/node affinity/anti-affinity - ## - affinity: {} - - ## Configure extra options for Redis Slave liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 10 - successThreshold: 1 - failureThreshold: 5 - - ## Redis slave Resource - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - - ## Redis slave selectors and tolerations for pod assignment - # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} - # tolerations: [] - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## Redis slave pod Annotation and Labels - podLabels: {} - podAnnotations: {} - - ## Redis slave pod priorityClassName - # priorityClassName: {} - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - ## The path the volume will be mounted at, useful when using different - ## Redis images. - path: /data - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - subPath: "" - ## redis data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - - ## Update strategy, can be set to RollingUpdate or onDelete by default. - ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets - statefulset: - updateStrategy: RollingUpdate - ## Partition update strategy - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions - # rollingUpdatePartition: - -## Prometheus Exporter / Metrics -## -metrics: - enabled: true - - image: - registry: docker.io - repository: bitnami/redis-exporter - tag: 1.3.5-debian-9-r23 - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - - ## Metrics exporter resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - # resources: {} - - ## Extra arguments for Metrics exporter, for example: - ## extraArgs: - ## check-keys: myKey,myOtherKey - # extraArgs: {} - - ## Metrics exporter pod Annotation and Labels - podAnnotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9121" - # podLabels: {} - - # Enable this if you're using https://github.com/coreos/prometheus-operator - serviceMonitor: - enabled: false - ## Specify a namespace if needed - # namespace: monitoring - # fallback to the prometheus default unless specified - # interval: 10s - ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) - ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) - ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) - selector: - prometheus: kube-prometheus - - ## Metrics exporter pod priorityClassName - # priorityClassName: {} - service: - type: ClusterIP - ## Use serviceLoadBalancerIP to request a specific static IP, - ## otherwise leave blank - # loadBalancerIP: - annotations: {} - labels: {} - -## -## Init containers parameters: -## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup -## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: stretch - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - resources: {} - # resources: - # requests: - # memory: 128Mi - # cpu: 100m - -## Redis config file -## ref: https://redis.io/topics/config -## -configmap: |- - # Enable AOF https://redis.io/topics/persistence#append-only-file - appendonly yes - # Disable RDB persistence, AOF persistence already enabled. - save "" - -## Sysctl InitContainer -## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) -sysctlImage: - enabled: false - command: [] - registry: docker.io - repository: bitnami/minideb - tag: stretch - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - mountHostSys: false - resources: {} - # resources: - # requests: - # memory: 128Mi - # cpu: 100m - -## PodSecurityPolicy configuration -## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ -## -podSecurityPolicy: - ## Specifies whether a PodSecurityPolicy should be created - ## - create: false diff --git a/chart/charts/redis/values.schema.json b/chart/charts/redis/values.schema.json deleted file mode 100755 index 0c91011..0000000 --- a/chart/charts/redis/values.schema.json +++ /dev/null @@ -1,152 +0,0 @@ -{ - "$schema": "http://json-schema.org/schema#", - "type": "object", - "properties": { - "usePassword": { - "type": "boolean", - "title": "Use password authentication", - "form": true - }, - "password": { - "type": "string", - "title": "Password", - "form": true, - "description": "Defaults to a random 10-character alphanumeric string if not set", - "hidden": { - "condition": false, - "value": "usePassword" - } - }, - "cluster": { - "type": "object", - "title": "Cluster Settings", - "form": true, - "properties": { - "enabled": { - "type": "boolean", - "form": true, - "title": "Enable master-slave", - "description": "Enable master-slave architecture" - }, - "slaveCount": { - "type": "integer", - "title": "Slave Replicas", - "form": true, - "hidden": { - "condition": false, - "value": "cluster.enabled" - } - } - } - }, - "master": { - "type": "object", - "title": "Master replicas settings", - "form": true, - "properties": { - "persistence": { - "type": "object", - "title": "Persistence for master replicas", - "form": true, - "properties": { - "enabled": { - "type": "boolean", - "form": true, - "title": "Enable persistence", - "description": "Enable persistence using Persistent Volume Claims" - }, - "size": { - "type": "string", - "title": "Persistent Volume Size", - "form": true, - "render": "slider", - "sliderMin": 1, - "sliderMax": 100, - "sliderUnit": "Gi", - "hidden": { - "condition": false, - "value": "master.persistence.enabled" - } - } - } - } - } - }, - "slave": { - "type": "object", - "title": "Slave replicas settings", - "form": true, - "hidden": { - "condition": false, - "value": "cluster.enabled" - }, - "properties": { - "persistence": { - "type": "object", - "title": "Persistence for slave replicas", - "form": true, - "properties": { - "enabled": { - "type": "boolean", - "form": true, - "title": "Enable persistence", - "description": "Enable persistence using Persistent Volume Claims" - }, - "size": { - "type": "string", - "title": "Persistent Volume Size", - "form": true, - "render": "slider", - "sliderMin": 1, - "sliderMax": 100, - "sliderUnit": "Gi", - "hidden": { - "condition": false, - "value": "slave.persistence.enabled" - } - } - } - } - } - }, - "volumePermissions": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean", - "form": true, - "title": "Enable Init Containers", - "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" - } - } - }, - "metrics": { - "type": "object", - "form": true, - "title": "Prometheus metrics details", - "properties": { - "enabled": { - "type": "boolean", - "title": "Create Prometheus metrics exporter", - "description": "Create a side-car container to expose Prometheus metrics", - "form": true - }, - "serviceMonitor": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean", - "title": "Create Prometheus Operator ServiceMonitor", - "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", - "form": true, - "hidden": { - "condition": false, - "value": "metrics.enabled" - } - } - } - } - } - } - } -} diff --git a/chart/charts/redis/values.yaml b/chart/charts/redis/values.yaml deleted file mode 100755 index e9efcca..0000000 --- a/chart/charts/redis/values.yaml +++ /dev/null @@ -1,583 +0,0 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets -## -global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass - redis: {} - -## Bitnami Redis image version -## ref: https://hub.docker.com/r/bitnami/redis/tags/ -## -image: - registry: docker.io - repository: bitnami/redis - ## Bitnami Redis image tag - ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links - ## - tag: 5.0.7-debian-9-r50 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - -## String to partially override redis.fullname template (will maintain the release name) -## -# nameOverride: - -## String to fully override redis.fullname template -## -# fullnameOverride: - -## Cluster settings -cluster: - enabled: true - slaveCount: 2 - -## Use redis sentinel in the redis pod. This will disable the master and slave services and -## create one redis service with ports to the sentinel and the redis instances -sentinel: - enabled: false - ## Require password authentication on the sentinel itself - ## ref: https://redis.io/topics/sentinel - usePassword: true - ## Bitnami Redis Sentintel image version - ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ - ## - image: - registry: docker.io - repository: bitnami/redis-sentinel - ## Bitnami Redis image tag - ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links - ## - tag: 5.0.7-debian-9-r44 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - masterSet: mymaster - initialCheckTimeout: 5 - quorum: 2 - downAfterMilliseconds: 60000 - failoverTimeout: 18000 - parallelSyncs: 1 - port: 26379 - ## Additional Redis configuration for the sentinel nodes - ## ref: https://redis.io/topics/config - ## - configmap: - ## Enable or disable static sentinel IDs for each replicas - ## If disabled each sentinel will generate a random id at startup - ## If enabled, each replicas will have a constant ID on each start-up - ## - staticID: false - ## Configure extra options for Redis Sentinel liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - ## Redis Sentinel resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - ## Redis Sentinel Service properties - service: - ## Redis Sentinel Service type - type: ClusterIP - sentinelPort: 26379 - redisPort: 6379 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # sentinelNodePort: - # redisNodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - labels: {} - loadBalancerIP: - -## Specifies the Kubernetes Cluster's Domain Name. -## -clusterDomain: cluster.local - -networkPolicy: - ## Specifies whether a NetworkPolicy should be created - ## - enabled: false - - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to the port Redis is listening - ## on. When true, Redis will accept connections from any source - ## (with the correct destination port). - ## - # allowExternal: true - - ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - -serviceAccount: - ## Specifies whether a ServiceAccount should be created - ## - create: false - ## The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the fullname template - name: - -rbac: - ## Specifies whether RBAC resources should be created - ## - create: false - - role: - ## Rules to create. It follows the role specification - # rules: - # - apiGroups: - # - extensions - # resources: - # - podsecuritypolicies - # verbs: - # - use - # resourceNames: - # - gce.unprivileged - rules: [] - -## Redis pod Security Context -securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - ## sysctl settings for master and slave pods - ## - ## Uncomment the setting below to increase the net.core.somaxconn value - ## - # sysctls: - # - name: net.core.somaxconn - # value: "10000" - -## Use password authentication -usePassword: true -## Redis password (both master and slave) -## Defaults to a random 10-character alphanumeric string if not set and usePassword is true -## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run -## -password: "" -## Use existing secret (ignores previous password) -# existingSecret: -## Password key to be retrieved from Redis secret -## -# existingSecretPasswordKey: - -## Mount secrets as files instead of environment variables -usePasswordFile: false - -## Persist data to a persistent volume (Redis Master) -persistence: {} - ## A manually managed Persistent Volume and Claim - ## Requires persistence.enabled: true - ## If defined, PVC must be created manually before volume will be bound - # existingClaim: - -# Redis port -redisPort: 6379 - -## -## Redis Master parameters -## -master: - ## Redis command arguments - ## - ## Can be used to specify command line arguments, for example: - ## - command: "/run.sh" - ## Additional Redis configuration for the master nodes - ## ref: https://redis.io/topics/config - ## - configmap: - ## Redis additional command line flags - ## - ## Can be used to specify command line flags, for example: - ## - ## extraFlags: - ## - "--maxmemory-policy volatile-ttl" - ## - "--repl-backlog-size 1024mb" - extraFlags: [] - ## Comma-separated list of Redis commands to disable - ## - ## Can be used to disable Redis commands for security reasons. - ## Commands will be completely disabled by renaming each to an empty string. - ## ref: https://redis.io/topics/security#disabling-of-specific-commands - ## - disableCommands: - - FLUSHDB - - FLUSHALL - - ## Redis Master additional pod labels and annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - podLabels: {} - podAnnotations: {} - - ## Redis Master resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## Configure extra options for Redis Master liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - - ## Redis Master Node selectors and tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## - # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} - # tolerations: [] - ## Redis Master pod/node affinity/anti-affinity - ## - affinity: {} - - ## Redis Master Service properties - service: - ## Redis Master Service type - type: ClusterIP - port: 6379 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - labels: {} - loadBalancerIP: - # loadBalancerSourceRanges: ["10.0.0.0/8"] - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - ## The path the volume will be mounted at, useful when using different - ## Redis images. - path: /data - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - subPath: "" - ## redis data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - - ## Update strategy, can be set to RollingUpdate or onDelete by default. - ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets - statefulset: - updateStrategy: RollingUpdate - ## Partition update strategy - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions - # rollingUpdatePartition: - - ## Redis Master pod priorityClassName - # priorityClassName: {} - -## -## Redis Slave properties -## Note: service.type is a mandatory parameter -## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master -## -slave: - ## Slave Service properties - service: - ## Redis Slave Service type - type: ClusterIP - ## Redis port - port: 6379 - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - labels: {} - loadBalancerIP: - # loadBalancerSourceRanges: ["10.0.0.0/8"] - - ## Redis slave port - port: 6379 - ## Can be used to specify command line arguments, for example: - ## - command: "/run.sh" - ## Additional Redis configuration for the slave nodes - ## ref: https://redis.io/topics/config - ## - configmap: - ## Redis extra flags - extraFlags: [] - ## List of Redis commands to disable - disableCommands: - - FLUSHDB - - FLUSHALL - - ## Redis Slave pod/node affinity/anti-affinity - ## - affinity: {} - - ## Configure extra options for Redis Slave liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 10 - successThreshold: 1 - failureThreshold: 5 - - ## Redis slave Resource - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - - ## Redis slave selectors and tolerations for pod assignment - # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} - # tolerations: [] - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## Redis slave pod Annotation and Labels - podLabels: {} - podAnnotations: {} - - ## Redis slave pod priorityClassName - # priorityClassName: {} - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - ## The path the volume will be mounted at, useful when using different - ## Redis images. - path: /data - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. - subPath: "" - ## redis data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - - ## Update strategy, can be set to RollingUpdate or onDelete by default. - ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets - statefulset: - updateStrategy: RollingUpdate - ## Partition update strategy - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions - # rollingUpdatePartition: - -## Prometheus Exporter / Metrics -## -metrics: - enabled: false - - image: - registry: docker.io - repository: bitnami/redis-exporter - tag: 1.3.5-debian-9-r23 - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - - ## Metrics exporter resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - # resources: {} - - ## Extra arguments for Metrics exporter, for example: - ## extraArgs: - ## check-keys: myKey,myOtherKey - # extraArgs: {} - - ## Metrics exporter pod Annotation and Labels - podAnnotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9121" - # podLabels: {} - - # Enable this if you're using https://github.com/coreos/prometheus-operator - serviceMonitor: - enabled: false - ## Specify a namespace if needed - # namespace: monitoring - # fallback to the prometheus default unless specified - # interval: 10s - ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) - ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) - ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) - selector: - prometheus: kube-prometheus - - ## Metrics exporter pod priorityClassName - # priorityClassName: {} - service: - type: ClusterIP - ## Use serviceLoadBalancerIP to request a specific static IP, - ## otherwise leave blank - # loadBalancerIP: - annotations: {} - labels: {} - -## -## Init containers parameters: -## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup -## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: stretch - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - resources: {} - # resources: - # requests: - # memory: 128Mi - # cpu: 100m - -## Redis config file -## ref: https://redis.io/topics/config -## -configmap: |- - # Enable AOF https://redis.io/topics/persistence#append-only-file - appendonly yes - # Disable RDB persistence, AOF persistence already enabled. - save "" - -## Sysctl InitContainer -## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) -sysctlImage: - enabled: false - command: [] - registry: docker.io - repository: bitnami/minideb - tag: stretch - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - mountHostSys: false - resources: {} - # resources: - # requests: - # memory: 128Mi - # cpu: 100m - -## PodSecurityPolicy configuration -## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ -## -podSecurityPolicy: - ## Specifies whether a PodSecurityPolicy should be created - ## - create: false -- GitLab From c9c6a361675d37f9fd78a064753fb6d9ae017318 Mon Sep 17 00:00:00 2001 From: Kevin Wilder Date: Wed, 23 Dec 2020 12:01:36 -0700 Subject: [PATCH 5/8] remove external dependencies --- chart/requirements.lock | 21 --------------------- chart/requirements.yaml | 12 ++++++------ 2 files changed, 6 insertions(+), 27 deletions(-) delete mode 100644 chart/requirements.lock diff --git a/chart/requirements.lock b/chart/requirements.lock deleted file mode 100644 index 3121d69..0000000 --- a/chart/requirements.lock +++ /dev/null @@ -1,21 +0,0 @@ -dependencies: -- name: cert-manager - repository: https://charts.jetstack.io/ - version: 0.10.1 -- name: prometheus - repository: https://kubernetes-charts.storage.googleapis.com/ - version: 10.0.0 -- name: postgresql - repository: https://charts.bitnami.com/bitnami - version: 8.9.4 -- name: gitlab-runner - repository: https://charts.gitlab.io/ - version: 0.18.1 -- name: grafana - repository: https://kubernetes-charts.storage.googleapis.com/ - version: 4.0.1 -- name: redis - repository: https://charts.bitnami.com/bitnami - version: 10.3.4 -digest: sha256:359c29c27577a352c6315a621e222ed8c9f2485ac0120518b4ce7e3233eadfca -generated: "2020-12-07T17:03:30.441720026-07:00" diff --git a/chart/requirements.yaml b/chart/requirements.yaml index 7277c0c..098b181 100644 --- a/chart/requirements.yaml +++ b/chart/requirements.yaml @@ -1,26 +1,26 @@ dependencies: - name: cert-manager version: 0.10.1 - repository: https://charts.jetstack.io/ + repository: file://charts/cert-manager-v0.10.1.tgz condition: certmanager.install alias: certmanager - name: prometheus version: 10.0.0 - repository: https://kubernetes-charts.storage.googleapis.com/ + repository: file://charts/prometheus-10.0.0.tgz condition: prometheus.install - name: postgresql version: 8.9.4 - repository: https://charts.bitnami.com/bitnami + repository: file://charts/postgresql-8.9.4.tgz condition: postgresql.install - name: gitlab-runner version: 0.18.1 - repository: https://charts.gitlab.io/ + repository: file://charts/gitlab-runner-0.18.1.tgz condition: gitlab-runner.install - name: grafana version: 4.0.1 - repository: https://kubernetes-charts.storage.googleapis.com/ + repository: file://charts/grafana-4.0.1.tgz condition: global.grafana.enabled - name: redis version: 10.3.4 - repository: https://charts.bitnami.com/bitnami + repository: file://charts/redis-10.3.4.tgz condition: redis.install -- GitLab From d0ee7ac8a8d9b3fb474e52da52bf871ed7a02bc9 Mon Sep 17 00:00:00 2001 From: Kevin Wilder Date: Wed, 23 Dec 2020 12:12:19 -0700 Subject: [PATCH 6/8] change dependency path --- chart/requirements.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/chart/requirements.yaml b/chart/requirements.yaml index 098b181..8b902e9 100644 --- a/chart/requirements.yaml +++ b/chart/requirements.yaml @@ -1,26 +1,26 @@ dependencies: - name: cert-manager version: 0.10.1 - repository: file://charts/cert-manager-v0.10.1.tgz + repository: file://./charts/cert-manager-v0.10.1.tgz condition: certmanager.install alias: certmanager - name: prometheus version: 10.0.0 - repository: file://charts/prometheus-10.0.0.tgz + repository: file://./charts/prometheus-10.0.0.tgz condition: prometheus.install - name: postgresql version: 8.9.4 - repository: file://charts/postgresql-8.9.4.tgz + repository: file://./charts/postgresql-8.9.4.tgz condition: postgresql.install - name: gitlab-runner version: 0.18.1 - repository: file://charts/gitlab-runner-0.18.1.tgz + repository: file://./charts/gitlab-runner-0.18.1.tgz condition: gitlab-runner.install - name: grafana version: 4.0.1 - repository: file://charts/grafana-4.0.1.tgz + repository: file://./charts/grafana-4.0.1.tgz condition: global.grafana.enabled - name: redis version: 10.3.4 - repository: file://charts/redis-10.3.4.tgz + repository: file://./charts/redis-10.3.4.tgz condition: redis.install -- GitLab From c7ab738ce366bafa2fe253eff0167749ea7da1d5 Mon Sep 17 00:00:00 2001 From: Kevin Wilder Date: Tue, 5 Jan 2021 16:33:31 -0700 Subject: [PATCH 7/8] fix host --- chart/templates/bigbang/virtualservice.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chart/templates/bigbang/virtualservice.yaml b/chart/templates/bigbang/virtualservice.yaml index 76b49f5..152e331 100644 --- a/chart/templates/bigbang/virtualservice.yaml +++ b/chart/templates/bigbang/virtualservice.yaml @@ -12,7 +12,7 @@ spec: http: - route: - destination: - host: gitlab-webservice.gitlab.svc.cluster.local + host: {{ .Release.Namespace }}.gitlab-webservice.gitlab.svc.cluster.local port: number: 8181 match: @@ -23,7 +23,7 @@ spec: prefix: /admin/sidekiq route: - destination: - host: gitlab-webservice.gitlab.svc.cluster.local + host: {{ .Release.Namespace }}.gitlab-webservice.gitlab.svc.cluster.local port: number: 8080 {{- end }} -- GitLab From 6653bf2d75553ff6b0ba67898a5bd82f8714995b Mon Sep 17 00:00:00 2001 From: Kevin Wilder Date: Tue, 5 Jan 2021 16:41:32 -0700 Subject: [PATCH 8/8] fix host --- chart/templates/bigbang/virtualservice.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chart/templates/bigbang/virtualservice.yaml b/chart/templates/bigbang/virtualservice.yaml index 152e331..4a4aac6 100644 --- a/chart/templates/bigbang/virtualservice.yaml +++ b/chart/templates/bigbang/virtualservice.yaml @@ -12,7 +12,7 @@ spec: http: - route: - destination: - host: {{ .Release.Namespace }}.gitlab-webservice.gitlab.svc.cluster.local + host: {{ .Release.Namespace }}-gitlab-webservice.gitlab.svc.cluster.local port: number: 8181 match: @@ -23,7 +23,7 @@ spec: prefix: /admin/sidekiq route: - destination: - host: {{ .Release.Namespace }}.gitlab-webservice.gitlab.svc.cluster.local + host: {{ .Release.Namespace }}-gitlab-webservice.gitlab.svc.cluster.local port: number: 8080 {{- end }} -- GitLab