diff --git a/chart/templates/monitoring/values.yaml b/chart/templates/monitoring/values.yaml index 16299685183248b754517a90460ad92af3e86425..90b35a5b8525283cf4598c808f7a375e548b3da8 100644 --- a/chart/templates/monitoring/values.yaml +++ b/chart/templates/monitoring/values.yaml @@ -22,6 +22,9 @@ networkPolicies: minioOperator: enabled: {{ .Values.addons.minioOperator.enabled }} +gitlabRunner: + enabled: {{ .Values.addons.gitlabRunner.enabled }} + istio: {{- $monitoringInjection := dig "istio" "injection" "enabled" .Values.monitoring }} enabled: {{ .Values.istio.enabled }} @@ -58,12 +61,18 @@ alertmanager: imagePullPolicy: {{ .Values.imagePullPolicy }} - name: "config-reloader" imagePullPolicy: {{ .Values.imagePullPolicy }} - {{- if .Values.monitoring.sso.enabled }} - {{- $alertmanagerAuthserviceKey := (dig "selector" "key" "protect" .Values.addons.authservice.values) }} - {{- $alertmanagerAuthserviceValue := (dig "selector" "value" "keycloak" .Values.addons.authservice.values) }} + {{- if or .Values.monitoring.sso.enabled .Values.istio.enabled }} podMetadata: + {{- if .Values.monitoring.sso.enabled }} + {{- $alertmanagerAuthserviceKey := (dig "selector" "key" "protect" .Values.addons.authservice.values) }} + {{- $alertmanagerAuthserviceValue := (dig "selector" "value" "keycloak" .Values.addons.authservice.values) }} labels: {{ $alertmanagerAuthserviceKey }}: {{ $alertmanagerAuthserviceValue }} + {{- end }} + {{- if .Values.istio.enabled }} + annotations: + {{ include "istioAnnotation" . }} + {{- end }} {{- end }} prometheus: prometheusSpec: @@ -74,12 +83,18 @@ prometheus: imagePullPolicy: {{ .Values.imagePullPolicy }} - name: "config-reloader" imagePullPolicy: {{ .Values.imagePullPolicy }} - {{- if .Values.monitoring.sso.enabled }} - {{- $prometheusAuthserviceKey := (dig "selector" "key" "protect" .Values.addons.authservice.values) }} - {{- $prometheusAuthserviceValue := (dig "selector" "value" "keycloak" .Values.addons.authservice.values) }} + {{- if or .Values.monitoring.sso.enabled .Values.istio.enabled }} podMetadata: + {{- if .Values.monitoring.sso.enabled }} + {{- $prometheusAuthserviceKey := (dig "selector" "key" "protect" .Values.addons.authservice.values) }} + {{- $prometheusAuthserviceValue := (dig "selector" "value" "keycloak" .Values.addons.authservice.values) }} labels: {{ $prometheusAuthserviceKey }}: {{ $prometheusAuthserviceValue }} + {{- end }} + {{- if .Values.istio.enabled }} + annotations: + {{ include "istioAnnotation" . }} + {{- end }} {{- end }} anchore: @@ -106,6 +121,11 @@ grafana: sidecar: imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.istio.enabled }} + podAnnotations: + {{ include "istioAnnotation" . }} + {{- end }} + {{- if .Values.loki.enabled }} additionalDataSources: - name: Loki @@ -141,10 +161,15 @@ grafana: {{- list "tls_client_key" .tls_client_key | include "bigbang.addValueIfSet" | indent 6 }} {{- end }} - {{- if .Values.loki.enabled }} + {{- if or .Values.loki.enabled .Values.addons.gitlabRunner.enabled }} plugins: + {{- if .Values.loki.enabled }} - grafana-piechart-panel {{- end }} + {{- if .Values.addons.gitlabRunner.enabled }} + - grafana-polystat-panel + {{- end }} + {{- end }} prometheus-node-exporter: image: @@ -153,6 +178,11 @@ prometheus-node-exporter: imagePullSecrets: - name: private-registry + {{- if .Values.istio.enabled }} + podAnnotations: + {{ include "istioAnnotation" . }} + {{- end }} + {{- if .Values.openshift }} service: targetPort: 9102 @@ -165,6 +195,11 @@ kube-state-metrics: imagePullSecrets: - name: private-registry + {{- if .Values.istio.enabled }} + podAnnotations: + {{ include "istioAnnotation" . }} + {{- end }} + prometheusOperator: image: pullPolicy: {{ .Values.imagePullPolicy }} @@ -175,4 +210,9 @@ prometheusOperator: patch: image: pullPolicy: {{ .Values.imagePullPolicy }} + + {{- if .Values.istio.enabled }} + podAnnotations: + {{ include "istioAnnotation" . }} + {{- end }} {{- end -}} diff --git a/chart/values.yaml b/chart/values.yaml index 9de6e685136d3354f1cb7c43af3f8d32efe59889..8c3b2e51a6bc5671b9334855e3bdc7f3cb0f4d82 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -310,7 +310,7 @@ gatekeeper: git: repo: https://repo1.dso.mil/platform-one/big-bang/apps/core/policy.git path: "./chart" - tag: "3.6.0-bb.2" + tag: "3.7.0-bb.0" # -- Flux reconciliation overrides specifically for the OPA Gatekeeper Package flux: @@ -467,7 +467,7 @@ monitoring: git: repo: https://repo1.dso.mil/platform-one/big-bang/apps/core/monitoring.git path: "./chart" - tag: "23.1.6-bb.2" + tag: "23.1.6-bb.3" # -- Flux reconciliation overrides specifically for the Monitoring Package flux: diff --git a/docs/developer/scripts/k3d-dev.sh b/docs/developer/scripts/k3d-dev.sh index 0c12d3ae5af8ee3f2c4c1122f8d1f7c4da5de1a3..bf1c0c0345c2d4b7abeb6f9b71a08a5a9ed12c00 100755 --- a/docs/developer/scripts/k3d-dev.sh +++ b/docs/developer/scripts/k3d-dev.sh @@ -54,7 +54,7 @@ while [ -n "$1" ]; do # while loop starts BIG_INSTANCE=true ;; - -p) echo "-p option passed to cretate k3d cluster with private ip" + -p) echo "-p option passed to create k3d cluster with private ip" PRIVATE_IP=true ;; @@ -70,7 +70,14 @@ while [ -n "$1" ]; do # while loop starts # If instance exists then terminate it if [[ ! -z "${AWSINSTANCEIDs}" ]]; then echo "aws instances being terminated: ${AWSINSTANCEIDs}" - # TODO: should we add a user confirmation prompt here for safety? + + read -p "Are you sure you want to delete these instances (y/n)? " -r + if [[ ! $REPLY =~ ^[Yy]$ ]] + then + echo + exit 1 + fi + aws ec2 terminate-instances --instance-ids ${AWSINSTANCEIDs} &>/dev/null echo -n "waiting for instance termination..." aws ec2 wait instance-terminated --instance-ids ${AWSINSTANCEIDs} &> /dev/null @@ -295,7 +302,10 @@ ssh-keygen -f "${HOME}/.ssh/known_hosts" -R "${PublicIP}" echo "ssh init" # this is a do-nothing remote ssh command just to initialize ssh and make sure that the connection is working -ssh -i ~/.ssh/${KeyName}.pem -o ConnectionAttempts=10 -o StrictHostKeyChecking=no ubuntu@${publicIP} "hostname" +until ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "hostname"; do + sleep 5 + echo "Retry ssh command.." +done echo ##### Configure Instance @@ -349,7 +359,7 @@ echo echo # install k3d on instance echo "Installing k3d on instance" -ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.4.7 bash" +ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.2.2 bash" echo echo "k3d version" ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "k3d version" @@ -364,7 +374,7 @@ then ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "docker network create k3d-network --driver=bridge --subnet=172.20.0.0/16" # create k3d cluster - ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "k3d cluster create --servers 1 --agents 3 --volume /etc/machine-id:/etc/machine-id --k3s-server-arg "--disable=traefik" --k3s-server-arg "--disable=metrics-server" --k3s-server-arg "--tls-san=${PrivateIP}" --k3s-server-arg "--disable=servicelb" --network k3d-network --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443" + ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "k3d cluster create --servers 1 --agents 3 --volume /etc/machine-id:/etc/machine-id@server:0 --volume /etc/machine-id:/etc/machine-id@agent:0,1,2 --k3s-arg "--disable=traefik@server:0" --k3s-arg "--disable=metrics-server@server:0" --k3s-arg "--tls-san=${PrivateIP}@server:0" --k3s-arg "--disable=servicelb@server:0" --network k3d-network --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443" ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "kubectl config use-context k3d-k3s-default" ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "kubectl cluster-info" @@ -404,7 +414,7 @@ then elif [[ "$PRIVATE_IP" == true ]] then - ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "k3d cluster create --servers 1 --agents 3 --volume /etc/machine-id:/etc/machine-id --k3s-server-arg "--disable=traefik" --k3s-server-arg "--disable=metrics-server" --k3s-server-arg "--tls-san=${PrivateIP}" --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443" + ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "k3d cluster create --servers 1 --agents 3 --volume /etc/machine-id:/etc/machine-id@server:0 --volume /etc/machine-id:/etc/machine-id@agent:0,1,2 --k3s-arg "--disable=traefik@server:0" --k3s-arg "--disable=metrics-server@server:0" --k3s-arg "--tls-san=${PrivateIP}@server:0" --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443" ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "kubectl config use-context k3d-k3s-default" ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "kubectl cluster-info" echo @@ -414,7 +424,7 @@ then $sed_gsed -i "s/0\.0\.0\.0/${PrivateIP}/g" ~/.kube/${AWSUSERNAME}-dev-config else # default is public ip - ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "k3d cluster create --servers 1 --agents 3 --volume /etc/machine-id:/etc/machine-id --k3s-server-arg "--disable=traefik" --k3s-server-arg "--disable=metrics-server" --k3s-server-arg "--tls-san=${PublicIP}" --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443" + ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "k3d cluster create --servers 1 --agents 3 --volume /etc/machine-id:/etc/machine-id@server:0 --volume /etc/machine-id:/etc/machine-id@agent:0,1,2 --k3s-arg "--disable=traefik@server:0" --k3s-arg "--disable=metrics-server@server:0" --k3s-arg "--tls-san=${PublicIP}@server:0" --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443" ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "kubectl config use-context k3d-k3s-default" ssh -i ~/.ssh/${KeyName}.pem -o StrictHostKeyChecking=no ubuntu@${PublicIP} "kubectl cluster-info"