diff --git a/CHANGELOG.md b/CHANGELOG.md index cf9a1ba99d88f060efab8c45852aecbb89ef6691..8839eb0b197aeb2b740dedf2eab5dc4c8a28c9d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). --- +## [2.31.0] + +- [!2.31.0](https://repo1.dso.mil/big-bang/bigbang/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&milestone_title=2.31.0); List of merge requests in this release. + ## [2.30.0] - [!2.30.0](https://repo1.dso.mil/big-bang/bigbang/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&milestone_title=2.30.0); List of merge requests in this release. diff --git a/base/gitrepository.yaml b/base/gitrepository.yaml index e5a135bc5b0d478eae9b8614240f13764782719f..36bbeb50a2bab1cfe117b78dc6838cbb12da83c7 100644 --- a/base/gitrepository.yaml +++ b/base/gitrepository.yaml @@ -11,4 +11,4 @@ spec: interval: 10m url: https://repo1.dso.mil/big-bang/bigbang.git ref: - tag: 2.30.0 + tag: 2.31.0 diff --git a/chart/Chart.yaml b/chart/Chart.yaml index 654e225f26026dd7b5dd2ada3821c5b469a46bfd..aa19a7094aa5bc9fb70971010df2866e9be6f04e 100644 --- a/chart/Chart.yaml +++ b/chart/Chart.yaml @@ -1,6 +1,7 @@ apiVersion: v2 name: bigbang -version: 2.30.0 +version: 2.31.0 +kubeVersion: ">=1.28.0-0" description: Big Bang is a declarative, continuous delivery tool for core DoD hardened and approved packages into a Kubernetes cluster. type: application diff --git a/chart/templates/external-secrets/git-credentials.yaml b/chart/templates/external-secrets/git-credentials.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1d97643d654fe41f7b80cb753755ec29ec21593c --- /dev/null +++ b/chart/templates/external-secrets/git-credentials.yaml @@ -0,0 +1,7 @@ +{{- $gitCredsSecretDict := dict + "name" "externalSecrets" + "targetScope" .Values.addons.externalSecrets + "releaseName" .Release.Name + "releaseNamespace" .Release.Namespace +}} +{{- include "gitCredsSecret" $gitCredsSecretDict | nindent 0 -}} diff --git a/chart/templates/external-secrets/gitrepository.yaml b/chart/templates/external-secrets/gitrepository.yaml new file mode 100644 index 0000000000000000000000000000000000000000..82014b9d8504c661c34ea46ab3e8e076c0964b9c --- /dev/null +++ b/chart/templates/external-secrets/gitrepository.yaml @@ -0,0 +1,24 @@ +{{- if and (eq .Values.addons.externalSecrets.sourceType "git") .Values.addons.externalSecrets.enabled }} +{{- $gitCredsDict := dict + "name" "externalSecrets" + "packageGitScope" .Values.addons.externalSecrets.git + "rootScope" . + "releaseName" .Release.Name +}} +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: external-secrets + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: external-secrets + app.kubernetes.io/component: "core" + {{- include "commonLabels" . | nindent 4}} +spec: + interval: {{ .Values.flux.interval }} + url: {{ .Values.addons.externalSecrets.git.repo }} + ref: + {{- include "validRef" .Values.addons.externalSecrets.git | nindent 4 }} + {{ include "gitIgnore" . }} + {{- include "gitCredsExtended" $gitCredsDict | nindent 2 }} +{{- end }} diff --git a/chart/templates/external-secrets/helmrelease.yaml b/chart/templates/external-secrets/helmrelease.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1ff84d188e27bb2a4669df83766f25fc6a76c44c --- /dev/null +++ b/chart/templates/external-secrets/helmrelease.yaml @@ -0,0 +1,73 @@ +{{- $fluxSettings := merge .Values.addons.externalSecrets.flux .Values.flux -}} +{{- if .Values.addons.externalSecrets.enabled }} +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: external-secrets + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: external-secrets + app.kubernetes.io/component: "core" + {{- include "commonLabels" . | nindent 4}} + annotations: + checksum/bigbang-values: {{ include (print $.Template.BasePath "/external-secrets/values.yaml") . | sha256sum }} +spec: + targetNamespace: external-secrets + chart: + spec: + {{- if eq .Values.addons.externalSecrets.sourceType "git" }} + chart: {{ .Values.addons.externalSecrets.git.path }} + sourceRef: + kind: GitRepository + name: external-secrets + namespace: {{ .Release.Namespace }} + {{- else }} + chart: {{ .Values.addons.externalSecrets.helmRepo.chartName }} + version: {{ .Values.addons.externalSecrets.helmRepo.tag }} + sourceRef: + kind: HelmRepository + name: {{ .Values.addons.externalSecrets.helmRepo.repoName }} + namespace: {{ .Release.Namespace }} + {{- $repoType := include "getRepoType" (dict "repoName" .Values.addons.externalSecrets.helmRepo.repoName "allRepos" .Values.helmRepositories) -}} + {{- if (and .Values.addons.externalSecrets.helmRepo.cosignVerify (eq $repoType "oci")) }} # Needs to be an OCI repo + verify: + provider: cosign + secretRef: + name: {{ printf "%s-cosign-pub" .Values.addons.externalSecrets.helmRepo.repoName }} + {{- end }} + {{- end }} + interval: 5m + + {{- toYaml $fluxSettings | nindent 2 }} + + {{- if .Values.addons.externalSecrets.postRenderers }} + postRenderers: + {{- toYaml .Values.addons.externalSecrets.postRenderers | nindent 2 }} + {{- end }} + valuesFrom: + - name: {{ .Release.Name }}-external-secrets-values + kind: Secret + valuesKey: "common" + - name: {{ .Release.Name }}-external-secrets-values + kind: Secret + valuesKey: "defaults" + - name: {{ .Release.Name }}-external-secrets-values + kind: Secret + valuesKey: "overlays" + + {{- if or .Values.istio.enabled .Values.kyvernoPolicies.enabled .Values.monitoring.enabled }} + dependsOn: + {{- if .Values.istio.enabled }} + - name: istio + namespace: {{ .Release.Namespace }} + {{- end }} + {{- if .Values.kyvernoPolicies.enabled }} + - name: kyverno-policies + namespace: {{ .Release.Namespace }} + {{- end }} + {{- if .Values.monitoring.enabled }} + - name: monitoring + namespace: {{ .Release.Namespace }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/chart/templates/external-secrets/imagepullsecret.yaml b/chart/templates/external-secrets/imagepullsecret.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c01b2b78d1728d828fd42c71241dcf3f5437311 --- /dev/null +++ b/chart/templates/external-secrets/imagepullsecret.yaml @@ -0,0 +1,12 @@ +{{- if .Values.addons.externalSecrets.enabled }} +{{- if ( include "imagePullSecret" . ) }} +apiVersion: v1 +kind: Secret +metadata: + name: private-registry + namespace: external-secrets +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ template "imagePullSecret" . }} +{{- end }} +{{- end }} diff --git a/chart/templates/external-secrets/namespace.yaml b/chart/templates/external-secrets/namespace.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8a997f657bba1b66ac0cbb804ff7ee686d06209a --- /dev/null +++ b/chart/templates/external-secrets/namespace.yaml @@ -0,0 +1,13 @@ +{{- if .Values.addons.externalSecrets.enabled }} +apiVersion: v1 +kind: Namespace +metadata: + labels: + meta.helm.sh/release-namespace: bigbang + meta.helm.sh/release-name: bigbang + app.kubernetes.io/name: external-secrets + app.kubernetes.io/component: "core" + {{- include "commonLabels" . | nindent 4}} + istio-injection: enabled + name: external-secrets +{{- end }} diff --git a/chart/templates/external-secrets/values.yaml b/chart/templates/external-secrets/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3c979f079dbb817b27d1e509a50629336545cb97 --- /dev/null +++ b/chart/templates/external-secrets/values.yaml @@ -0,0 +1,26 @@ +{{- if .Values.addons.externalSecrets.enabled }} +{{- include "values-secret" (dict "root" $ "package" .Values.addons.externalSecrets "name" "external-secrets" "defaults" (include "bigbang.defaults.external-secrets" .)) }} +{{- end }} + +{{- define "bigbang.defaults.external-secrets" -}} + +image: + imagePullPolicy: {{ .Values.imagePullPolicy }} + +monitoring: + enabled: {{ .Values.monitoring.enabled }} + +networkPolicies: + enabled: {{ .Values.networkPolicies.enabled }} + controlPlaneCidr: {{ .Values.networkPolicies.controlPlaneCidr }} + +{{- if .Values.istio.enabled }} +annotations: + {{ include "istioAnnotation" . }} +{{- end }} + +istio: + enabled: {{ .Values.istio.enabled }} + +openshift: {{ .Values.openshift }} +{{- end -}} diff --git a/chart/templates/kyverno-policies/values.yaml b/chart/templates/kyverno-policies/values.yaml index ad1b805b9da99da208c5177bf6a33151270b65c7..85913b883f67051864aaba1d0e12d111c23dc77d 100644 --- a/chart/templates/kyverno-policies/values.yaml +++ b/chart/templates/kyverno-policies/values.yaml @@ -54,7 +54,7 @@ policies: # Istio services (istio ingress) can create type: NodePort services disallow-nodeport-services: - validationFailureAction: enforce + validationFailureAction: Enforce {{- if $nodePortIngressGateways }} exclude: any: @@ -71,7 +71,7 @@ policies: disallow-image-tags: enabled: true - validationFailureAction: enforce + validationFailureAction: Enforce disallow-istio-injection-bypass: enabled: {{ .Values.istio.enabled }} @@ -84,7 +84,7 @@ policies: disallow-namespaces: enabled: true - validationFailureAction: enforce + validationFailureAction: Enforce parameters: disallow: - bigbang @@ -144,6 +144,13 @@ policies: - source-controller* - kustomize-controller* {{- end }} + {{- if .Values.addons.externalSecrets.enabled }} + - resources: + namespaces: + - external-secrets + names: + - external-secrets* + {{- end }} {{- if or .Values.fluentbit.enabled .Values.monitoring.enabled .Values.twistlock.enabled }} disallow-tolerations: @@ -229,7 +236,7 @@ policies: # Kyverno Beta feature - https://kyverno.io/docs/writing-policies/verify-images/ require-image-signature: enabled: false - validationFailureAction: audit + validationFailureAction: Audit require-istio-on-namespaces: enabled: {{ .Values.istio.enabled }} @@ -433,7 +440,7 @@ policies: {{- end }} restrict-capabilities: - validationFailureAction: enforce + validationFailureAction: Enforce # NEEDS FURTHER JUSTIFICATION # Twistlock Defenders require the following capabilities # - NET_ADMIN - Process monitoring and Iptables @@ -479,7 +486,7 @@ policies: {{- end }} restrict-host-path-mount: - validationFailureAction: enforce + validationFailureAction: Enforce {{- if or .Values.fluentbit.enabled .Values.monitoring.enabled .Values.promtail.enabled .Values.twistlock.enabled .Values.neuvector.enabled $deployNodeAgent }} exclude: any: @@ -569,10 +576,10 @@ policies: # To override either disable this policy (not ideal) or add an allowed wildcard matching where local paths are provisioned. # See `docs/assets/configs/example/policy-overrides-k3d.yaml` for an example of how to do this for k3d. restrict-host-path-mount-pv: - validationFailureAction: enforce + validationFailureAction: Enforce restrict-host-path-write: - validationFailureAction: enforce + validationFailureAction: Enforce {{- if or .Values.neuvector.enabled .Values.twistlock.enabled }} exclude: any: @@ -621,7 +628,7 @@ policies: {{- end }} restrict-image-registries: - validationFailureAction: enforce + validationFailureAction: Enforce parameters: allow: - registry1.dso.mil diff --git a/chart/templates/neuvector/values.yaml b/chart/templates/neuvector/values.yaml index f4bbba5ebd4604f3608492d4e22d04be7ffe9355..49c2ab1c276a259b5c53212818da20971996aac4 100644 --- a/chart/templates/neuvector/values.yaml +++ b/chart/templates/neuvector/values.yaml @@ -104,8 +104,6 @@ cve: {{- if .Values.istio.enabled }} manager: - env: - ssl: false {{- if $istioInjection }} podAnnotations: {{ include "istioAnnotation" . }} diff --git a/chart/templates/package/gitrepository.yaml b/chart/templates/package/gitrepository.yaml index df9948efe1353ade97785e35d1c45dee57c54f5b..d4267b718eb701d7ab3fb7fd34c75032b1576c4e 100644 --- a/chart/templates/package/gitrepository.yaml +++ b/chart/templates/package/gitrepository.yaml @@ -17,7 +17,11 @@ apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: name: {{ $pkg }} - namespace: {{ dig "namespace" "name" $pkg $vals }} + namespace: {{ if dig "helmRelease" "namespace" nil (index $.Values.packages $pkg) }} + {{ dig "helmRelease" "namespace" "" (index $.Values.packages $pkg) }} + {{ else }} + {{ default (dig "namespace" "name" $pkg $vals) "" }} + {{ end }} labels: app.kubernetes.io/name: {{ $pkg }} {{- include "commonLabels" $ | nindent 4 }} diff --git a/chart/templates/package/helmrelease.yaml b/chart/templates/package/helmrelease.yaml index e1ded7507e707a8cd720ec680ebaf39e804398d8..603d77ab9d1221134a27dce4969a8e5d2acf2d25 100644 --- a/chart/templates/package/helmrelease.yaml +++ b/chart/templates/package/helmrelease.yaml @@ -13,7 +13,11 @@ apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ $pkg }} - namespace: {{ dig "namespace" "name" $pkg $vals }} + namespace: {{ if dig "helmRelease" "namespace" nil (index $.Values.packages $pkg) }} + {{ dig "helmRelease" "namespace" "" (index $.Values.packages $pkg) }} + {{ else }} + {{ default (dig "namespace" "name" $pkg $vals) "" }} + {{ end }} labels: app.kubernetes.io/name: {{ $pkg }} {{- include "commonLabels" $ | nindent 4 }} @@ -21,6 +25,7 @@ metadata: checksum/bigbang-values: {{ (toJson $vals.values) | sha256sum }} spec: releaseName: {{ $pkg }} + targetNamespace: {{ dig "namespace" "name" $pkg $vals }} chart: spec: {{- if $vals.git }} @@ -28,7 +33,11 @@ spec: sourceRef: kind: GitRepository name: {{ $pkg }} - namespace: {{ dig "namespace" "name" $pkg $vals }} + namespace: {{ if dig "helmRelease" "namespace" nil (index $.Values.packages $pkg) }} + {{ dig "helmRelease" "namespace" "" (index $.Values.packages $pkg) }} + {{ else }} + {{ default (dig "namespace" "name" $pkg $vals) "" }} + {{ end }} {{- else if $vals.helmRepo }} chart: {{ dig "helmRepo" "chartName" $pkg $vals }} version: {{ dig "helmRepo" "tag" nil $vals }} diff --git a/chart/templates/package/values.yaml b/chart/templates/package/values.yaml index 9937ad5e90fd6c83a72b2b03f4abdc11015099e4..01183c82618e71af71d55c087ca13c7ec73053ca 100644 --- a/chart/templates/package/values.yaml +++ b/chart/templates/package/values.yaml @@ -10,12 +10,16 @@ apiVersion: v1 kind: Secret metadata: name: {{ $pkg }}-values - namespace: {{ dig "namespace" "name" $pkg $vals }} + namespace: {{ if dig "helmRelease" "namespace" nil (index $.Values.packages $pkg) }} + {{ dig "helmRelease" "namespace" "" (index $.Values.packages $pkg) }} + {{ else }} + {{ default (dig "namespace" "name" $pkg $vals) "" }} + {{ end }} labels: {{- include "commonLabels" $ | nindent 4 }} type: Opaque stringData: - {{ if and (dig "enabled" true $vals) (not $vals.kustomize) -}} + {{ if (dig "enabled" true $vals) -}} values.yaml: | bigbang: {{- include "values-bigbang" $.Values | nindent 6 }} diff --git a/chart/templates/thanos/values.yaml b/chart/templates/thanos/values.yaml index 751b64987dbc18425c9e72bd24673397b958b68d..ea9a8bf067fad7c6776846268a4b19303caad663 100644 --- a/chart/templates/thanos/values.yaml +++ b/chart/templates/thanos/values.yaml @@ -57,12 +57,6 @@ monitoring: storegateway: enabled: true -compactor: - enabled: true - retentionResolutionRaw: 30d - retentionResolution5m: 30d - retentionResolution1h: 10y - query: dnsDiscovery: # to allow lookups to work with and without Istio enabled, we disable k8s dns service @@ -105,9 +99,6 @@ storegateway: compactor: enabled: true - retentionResolutionRaw: 30d - retentionResolution5m: 30d - retentionResolution1h: 30d query: extraFlags: diff --git a/chart/templates/wrapper/helmrelease.yaml b/chart/templates/wrapper/helmrelease.yaml index f30b0b4dd18fe7daaed9d56a8ab8480022787b95..9fe338bc09fa962983e996ebc65fb42abe9f1166 100644 --- a/chart/templates/wrapper/helmrelease.yaml +++ b/chart/templates/wrapper/helmrelease.yaml @@ -7,7 +7,11 @@ apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ $pkg }}-wrapper - namespace: {{ dig "namespace" "name" $pkg $vals }} + namespace: {{ if dig "helmRelease" "namespace" nil (index $.Values.packages $pkg) }} + {{ dig "helmRelease" "namespace" "" (index $.Values.packages $pkg) }} + {{ else }} + {{ default (dig "namespace" "name" $pkg $vals) "" }} + {{ end }} labels: app.kubernetes.io/name: {{ $pkg }}-wrapper {{- include "commonLabels" $ | nindent 4 }} diff --git a/chart/templates/wrapper/values.yaml b/chart/templates/wrapper/values.yaml index 7c05114408ef044798ca653be3d94af234afe408..4ebb26ae3fa6087748abf3d3ac4b46fe1c34a568 100644 --- a/chart/templates/wrapper/values.yaml +++ b/chart/templates/wrapper/values.yaml @@ -6,7 +6,11 @@ apiVersion: v1 kind: Secret metadata: name: {{ $pkg }}-wrapper-values - namespace: {{ dig "namespace" "name" $pkg $vals }} + namespace: {{ if dig "helmRelease" "namespace" nil (index $.Values.packages $pkg) }} + {{ dig "helmRelease" "namespace" "" (index $.Values.packages $pkg) }} + {{ else }} + {{ default (dig "namespace" "name" $pkg $vals) "" }} + {{ end }} type: Opaque stringData: values.yaml: | diff --git a/chart/values.yaml b/chart/values.yaml index f8d18245dc301cc3a564142cc9e5a803dda2cd29..733b799258978fe0052cd06a387bd9a57a525755 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -238,11 +238,11 @@ istio: git: repo: https://repo1.dso.mil/big-bang/product/packages/istio-controlplane.git path: "./chart" - tag: "1.22.2-bb.1" + tag: "1.22.3-bb.1" helmRepo: repoName: "registry1" chartName: "istio" - tag: "1.22.2-bb.1" + tag: "1.22.3-bb.1" # -- If the HelmRelease should verify the cosign signature of the HelmRepo (only relevant if Repo is OCI). Set to 'false' to disable verification. # cosignVerify: @@ -380,11 +380,11 @@ istioOperator: git: repo: https://repo1.dso.mil/big-bang/product/packages/istio-operator.git path: "./chart" - tag: "1.22.2-bb.0" + tag: "1.22.3-bb.0" helmRepo: repoName: "registry1" chartName: "istio-operator" - tag: "1.22.2-bb.0" + tag: "1.22.3-bb.0" # -- Flux reconciliation overrides specifically for the Istio Operator Package flux: {} @@ -554,11 +554,11 @@ kyverno: git: repo: https://repo1.dso.mil/big-bang/product/packages/kyverno.git path: "./chart" - tag: "3.1.4-bb.8" + tag: "3.2.5-bb.3" helmRepo: repoName: "registry1" chartName: "kyverno" - tag: "3.1.4-bb.8" + tag: "3.2.5-bb.3" # -- Flux reconciliation overrides specifically for the Kyverno Package flux: {} @@ -579,11 +579,11 @@ kyvernoPolicies: git: repo: https://repo1.dso.mil/big-bang/product/packages/kyverno-policies.git path: ./chart - tag: "3.0.4-bb.33" + tag: "3.2.5-bb.0" helmRepo: repoName: "registry1" chartName: "kyverno-policies" - tag: "3.0.4-bb.33" + tag: "3.2.5-bb.0" # -- Flux reconciliation overrides specifically for the Kyverno Package flux: {} @@ -712,11 +712,11 @@ fluentbit: git: repo: https://repo1.dso.mil/big-bang/product/packages/fluentbit.git path: "./chart" - tag: "0.47.1-bb.0" + tag: "0.47.3-bb.0" helmRepo: repoName: "registry1" chartName: "fluentbit" - tag: "0.47.1-bb.0" + tag: "0.47.3-bb.0" # -- Flux reconciliation overrides specifically for the Fluent-Bit Package flux: {} @@ -765,11 +765,11 @@ loki: git: repo: https://repo1.dso.mil/big-bang/product/packages/loki.git path: "./chart" - tag: "6.6.4-bb.2" + tag: "6.7.1-bb.0" helmRepo: repoName: "registry1" chartName: "loki" - tag: "6.6.4-bb.2" + tag: "6.7.1-bb.0" # -- Flux reconciliation overrides specifically for the Loki Package flux: {} @@ -822,11 +822,11 @@ neuvector: git: repo: https://repo1.dso.mil/big-bang/product/packages/neuvector.git path: "./chart" - tag: "2.7.7-bb.1" + tag: "2.7.7-bb.2" helmRepo: repoName: "registry1" chartName: "neuvector" - tag: "2.7.7-bb.1" + tag: "2.7.7-bb.2" # -- Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". ingress: @@ -942,11 +942,11 @@ monitoring: git: repo: https://repo1.dso.mil/big-bang/product/packages/monitoring.git path: "./chart" - tag: "60.4.0-bb.5" + tag: "61.2.0-bb.0" helmRepo: repoName: "registry1" chartName: "monitoring" - tag: "60.4.0-bb.5" + tag: "61.2.0-bb.0" # -- Flux reconciliation overrides specifically for the Monitoring Package flux: @@ -997,11 +997,11 @@ grafana: git: repo: https://repo1.dso.mil/big-bang/product/packages/grafana.git path: "./chart" - tag: "8.2.2-bb.1" + tag: "8.3.4-bb.0" helmRepo: repoName: "registry1" chartName: "grafana" - tag: "8.2.2-bb.1" + tag: "8.3.4-bb.0" # -- Flux reconciliation overrides specifically for the Monitoring Package flux: {} @@ -1048,11 +1048,11 @@ twistlock: git: repo: https://repo1.dso.mil/big-bang/product/packages/twistlock.git path: "./chart" - tag: "0.15.0-bb.14" + tag: "0.15.0-bb.16" helmRepo: repoName: "registry1" chartName: "twistlock" - tag: "0.15.0-bb.14" + tag: "0.15.0-bb.16" # -- Flux reconciliation overrides specifically for the Twistlock Package flux: {} @@ -1095,11 +1095,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/argocd.git path: "./chart" - tag: "7.3.4-bb.0" + tag: "7.3.9-bb.0" helmRepo: repoName: "registry1" chartName: "argocd" - tag: "7.3.4-bb.0" + tag: "7.3.9-bb.0" # -- Flux reconciliation overrides specifically for the ArgoCD Package flux: {} @@ -1152,11 +1152,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/authservice.git path: "./chart" - tag: "1.0.1-bb.2" + tag: "1.0.1-bb.3" helmRepo: repoName: "registry1" chartName: "authservice" - tag: "1.0.1-bb.2" + tag: "1.0.1-bb.3" # -- Flux reconciliation overrides specifically for the Authservice Package flux: {} @@ -1183,11 +1183,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/minio-operator.git path: "./chart" - tag: "5.0.15-bb.1" + tag: "5.0.16-bb.1" helmRepo: repoName: "registry1" chartName: "minio-operator" - tag: "5.0.15-bb.1" + tag: "5.0.16-bb.1" # -- Flux reconciliation overrides specifically for the Minio Operator Package flux: {} @@ -1212,11 +1212,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/minio.git path: "./chart" - tag: "5.0.15-bb.7" + tag: "5.0.16-bb.0" helmRepo: repoName: "registry1" chartName: "minio-instance" - tag: "5.0.15-bb.7" + tag: "5.0.16-bb.0" # -- Flux reconciliation overrides specifically for the Minio Package flux: {} @@ -1252,11 +1252,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/gitlab.git path: "./chart" - tag: "8.1.2-bb.0" + tag: "8.1.2-bb.3" helmRepo: repoName: "registry1" chartName: "gitlab" - tag: "8.1.2-bb.0" + tag: "8.1.2-bb.3" # -- Flux reconciliation overrides specifically for the Gitlab Package flux: {} @@ -1366,12 +1366,12 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/gitlab-runner.git path: "./chart" - tag: "0.65.0-bb.3" + tag: "0.66.0-bb.0" helmRepo: repoName: "registry1" chartName: "gitlab-runner" - tag: "0.65.0-bb.3" + tag: "0.66.0-bb.0" # -- Flux reconciliation overrides specifically for the Gitlab Runner Package @@ -1462,11 +1462,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/sonarqube.git path: "./chart" - tag: "8.0.6-bb.1" + tag: "8.0.6-bb.2" helmRepo: repoName: "registry1" chartName: "sonarqube" - tag: "8.0.6-bb.1" + tag: "8.0.6-bb.2" # -- Flux reconciliation overrides specifically for the Sonarqube Package flux: {} @@ -1566,11 +1566,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/haproxy.git path: "./chart" - tag: "1.19.3-bb.6" + tag: "1.19.3-bb.7" helmRepo: repoName: "registry1" chartName: "haproxy" - tag: "1.19.3-bb.6" + tag: "1.19.3-bb.7" # -- Flux reconciliation overrides specifically for the HAProxy Package flux: {} @@ -1595,11 +1595,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/anchore-enterprise.git path: "./chart" - tag: "2.4.2-bb.18" + tag: "2.7.0-bb.4" helmRepo: repoName: "registry1" chartName: "anchore" - tag: "2.4.2-bb.18" + tag: "2.7.0-bb.4" # -- Flux reconciliation overrides specifically for the Anchore Package flux: @@ -1687,11 +1687,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/mattermost-operator.git path: "./chart" - tag: "1.21.0-bb.2" + tag: "1.22.0-bb.0" helmRepo: repoName: "registry1" chartName: "mattermost-operator" - tag: "1.21.0-bb.2" + tag: "1.22.0-bb.0" # -- Flux reconciliation overrides specifically for the Mattermost Operator Package flux: {} @@ -1712,11 +1712,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/mattermost.git path: "./chart" - tag: "9.9.1-bb.1" + tag: "9.10.0-bb.0" helmRepo: repoName: "registry1" chartName: "mattermost" - tag: "9.9.1-bb.1" + tag: "9.10.0-bb.0" # -- Flux reconciliation overrides specifically for the Mattermost Package flux: {} @@ -1806,11 +1806,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/velero.git path: "./chart" - tag: "6.7.0-bb.2" + tag: "6.7.0-bb.4" helmRepo: repoName: "registry1" chartName: "velero" - tag: "6.7.0-bb.2" + tag: "6.7.0-bb.4" # -- Flux reconciliation overrides specifically for the Velero Package flux: {} @@ -1842,11 +1842,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/keycloak.git path: "./chart" - tag: "2.4.3-bb.0" + tag: "2.4.3-bb.2" helmRepo: repoName: "registry1" chartName: "keycloak" - tag: "2.4.3-bb.0" + tag: "2.4.3-bb.2" database: # -- Hostname of a pre-existing database to use for Keycloak. @@ -1899,11 +1899,11 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/vault.git path: "./chart" - tag: "0.25.0-bb.35" + tag: "0.25.0-bb.38" helmRepo: repoName: "registry1" chartName: "vault" - tag: "0.25.0-bb.35" + tag: "0.25.0-bb.38" # -- Flux reconciliation overrides specifically for the Vault Package flux: {} @@ -2122,12 +2122,12 @@ addons: git: repo: https://repo1.dso.mil/big-bang/product/packages/thanos.git - tag: "15.7.9-bb.4" + tag: "15.7.9-bb.5" path: "./chart" helmRepo: repoName: "registry1" chartName: "thanos" - tag: "15.7.9-bb.4" + tag: "15.7.9-bb.5" # -- Flux reconciliation overrides specifically for the Thanos Package flux: {} @@ -2140,6 +2140,33 @@ addons: postRenderers: [] + externalSecrets: + # -- Toggle deployment of external secrets + enabled: false + + # -- Choose source type of "git" or "helmRepo" + sourceType: "git" + + git: + repo: https://repo1.dso.mil/big-bang/product/packages/external-secrets.git + tag: "0.9.18-bb.7" + path: "./chart" + helmRepo: + repoName: "registry1" + chartName: "external-secrets" + tag: "0.9.18-bb.7" + + # -- Override flux settings for this package + flux: {} + + # -- Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". + ingress: + gateway: "" + + values: {} + + postRenderers: [] + # -- Wrapper chart for integrating Big Bang components alongside a package wrapper: # -- Choose source type of "git" or "helmRepo" diff --git a/docs/README.md b/docs/README.md index 9df4b97f92dadcb8c8143f8c457c266b8d6f0791..863dae808ae0df576966f522c3ccf44fe907721d 100644 --- a/docs/README.md +++ b/docs/README.md @@ -47,7 +47,9 @@ * These upgrades are pre-tested. The Big Bang team "eats our own dogfood." Our CI jobs for developing the Big Bang product, run against a Big Bang Dogfood Cluster, and as part of our release process we upgrade our Big Bang Dogfood Cluster, before publishing each release. > **Note:** We ONLY support and recommend successive upgrades. We do not test upgrades that skip multiple minor versions. * Auto updates are also possible by setting kustomization.yaml to 1.x.x, because Big Bang follows semantic versioning per the [Big Bang README](../README.md#release-schedule), and flux is smart enough to read x as the most recent version number. -* DoD Software Developers get a Developer User Experience of "Single Sign On (SSO) for free," instead of developers coding SSO support 10 times for 10 apps. The complexity of SSO support is baked into the platform, and after an operations team correctly configures the Platform's SSO settings, SSO works for all apps hosted on the platform. The developer's user experience for enabling SSO for their app then becomes as simple as adding the label `istio-injection=enabled` (which transparently injects mTLS service mesh protection into their application's Kubernetes YAML manifest) and adding the label `protect=keycloak` to each pod (which leverages an EnvoyFilter CustomResource to auto inject an SSO Authentication Proxy in front of the data path to get to their application). +* SSO support is included in the Big Bang platform offering. Operations teams can leverage Big Bang's free Single Sign On capability by deploying the [Keycloak project](https://www.keycloak.org/). Using Keycloak, an ops team configures the platform SSO settings so that SSO can be leveraged by all apps hosted on the platform. For details, see the [SSO Readme](docs/developer/package-integration/sso.md). Once Authservice is configured, to enable SSO for an individual app, developers need only ensure the presence of the two following labels: + - __Namespace__ `istio-injection=enabled`: transparently injects mTLS service mesh protection into their application's Kubernetes YAML manifest + - __Pod__ `protect=keycloak`: declares an EnvoyFilter CustomResource to auto inject an SSO Authentication Proxy in front of the data path to get to their application ## How do I deploy Big Bang? diff --git a/docs/guides/backup-and-restore/gitlab-backup-restore.md b/docs/guides/backup-and-restore/gitlab-backup-restore.md index fbf270b706693439907643c3e684e8e2d478bba5..1892d1cbce86142e4cb84e71d3131b939b838ccf 100644 --- a/docs/guides/backup-and-restore/gitlab-backup-restore.md +++ b/docs/guides/backup-and-restore/gitlab-backup-restore.md @@ -1,6 +1,7 @@ # Gitlab Backups and Restores ## Gitlab Helm Chart Configuration + 1. Follow the `Backup and rename gitlab-rails-secret` task within the [Production document](../../understanding-bigbang/configuration/sample-prod-config.md). 1. Fill in our externalStorage values, specifically `addons.gitlab.objectStorage.iamProfile` or both `.Values.addons.gitlab.objectStorage.accessKey` & `.Values.addons.gitlab.objectStorage.accessSecret` along with `.Values.addons.gitlab.objectStorage.bucketPrefix` or you can override in the name for your own bucket eg: ```yaml @@ -27,21 +28,23 @@ addons: ## Backing up Gitlab ### Manual Steps + To perform a manual complete backup of Gitlab, exec into your Gitlab Toolbox pod and run the following: - 1. find your Gitlab Toolbox pod + 1. Find your Gitlab Toolbox pod. ```shell kubectl get pods -l release=gitlab,app=toolbox -n gitlab kubectl exec -it gitlab-toolbox-XXXXXXXXX-XXXXX -n gitlab -- /bin/sh ``` - 1. Execute the backup-utility command which will pull down data from the database, gitaly, and other portions of the ecosystem, tar them up and push to your configured cloud storage. + 1. Execute the backup-utility command which will pull down data from the database, gitaly, and other portions of the ecosystem. Tar them up and push to your configured cloud storage. ```shell backup-utility --skip registry,lfs,artifacts,packages,uploads,pseudonymizer,terraformState,backups ``` -You can read more on the upstream documentation: https://docs.gitlab.com/charts/backup-restore/backup.html#create-the-backup +You can read more on the upstream documentation: https://docs.gitlab.com/charts/backup-restore/backup.html#create-the-backup. ### Automatic Cron-based Backups -It is recommended to setup automatic backups via Gitlab toolbox's cron settings: + +It is recommended to set up automatic backups via Gitlab toolbox's cron settings: ```yaml addons: gitlab: @@ -66,6 +69,7 @@ addons: You can read more on the upstream documentation: https://docs.gitlab.com/charts/charts/gitlab/toolbox/#configuration ## Restore Gitlab + 1. Ensure your gitlab-rails secret is present in gitops or in-cluster and it correctly matches the database to which the chart is pointed. * If you need to replace or update your rails secret, once it is updated be sure to restart the following pods: ```shell @@ -74,7 +78,7 @@ You can read more on the upstream documentation: https://docs.gitlab.com/charts/ kubectl rollout -n gitlab restart deploy/gitlab-toolbox ``` 2. Exec into the toolbox pod and run the backup-utility command: - 1. find your Gitlab Toolbox pod + 1. find your Gitlab Toolbox pod. ```shell kubectl get pods -l release=gitlab,app=toolbox -n gitlab kubectl exec -it gitlab-toolbox-XXXXXXXXX-XXXXX -n gitlab -- /bin/sh @@ -96,4 +100,4 @@ You can read more on the upstream documentation: https://docs.gitlab.com/charts/ # Using the Timestamp backup-utility --restore -t TIMESTAMP_VALUE ``` -You can read more on the upstream documentation: https://docs.gitlab.com/charts/backup-restore/restore.html#restoring-the-backup-file +You can read more on the upstream documentation: https://docs.gitlab.com/charts/backup-restore/restore.html#restoring-the-backup-file. diff --git a/docs/guides/backup-and-restore/nexus-migration-with-velero.md b/docs/guides/backup-and-restore/nexus-migration-with-velero.md index 140c4c4abbff63ad2ac66ec2c43cc8a9c809185f..8167d55d803aa6f8e4d1c6a18768a00a0b07e37f 100644 --- a/docs/guides/backup-and-restore/nexus-migration-with-velero.md +++ b/docs/guides/backup-and-restore/nexus-migration-with-velero.md @@ -6,16 +6,16 @@ This guide demonstrates how to perform a migration of Nexus repositories and art ## Prerequisites/Assumptions -- K8s running in AWS -- Nexus PersistentVolume is using AWS EBS -- Migration is between clusters on the same AWS instance and availability zone (due to known Velero [limitations](https://velero.io/docs/v1.6/locations/#limitations--caveats)) -- Migration occurs between K8s clusters with the same version -- Velero CLI [tool](https://github.com/vmware-tanzu/velero/releases) -- Crane CLI [tool](https://github.com/google/go-containerregistry) +* K8s running in AWS +* Nexus PersistentVolume is using AWS EBS +* Migration is between clusters on the same AWS instance and availability zone (due to known Velero [limitations](https://velero.io/docs/v1.6/locations/#limitations--caveats)) +* Migration occurs between K8s clusters with the same version +* Velero CLI [tool](https://github.com/vmware-tanzu/velero/releases) +* Crane CLI [tool](https://github.com/google/go-containerregistry) ## Preparation -1. Ensure the Velero addon in the Big Bang values file is properly configured, sample configuration below: +1. Ensure the Velero addon in the Big Bang values file is properly configured. Sample configuration is provided in the following: ```yaml addons: @@ -44,9 +44,9 @@ This guide demonstrates how to perform a migration of Nexus repositories and art aws_secret_access_key = <CHANGE ME> ``` -1. Manually create an S3 bucket that the backup configuration will be stored in (in this case it is named `nexus-velero-backup`), this should match the `configuration.backupStorageLocation.bucket` key above -1. The `credentials.secretContents.cloud` credentials should have the necessary permissions to read/write to S3, volumes and volume snapshots -1. As a sanity check, take a look at the Velero logs to make sure the backup location (S3 bucket) is valid, you should see something like: +1. Manually create an S3 bucket that the backup configuration will be stored in (in this case it is named `nexus-velero-backup`), this should match the `configuration.backupStorageLocation.bucket` key above. +1. The `credentials.secretContents.cloud` credentials should have the necessary permissions to read/write to S3, volumes and volume snapshots. +1. As a sanity check, take a look at the Velero logs to make sure the backup location (S3 bucket) is valid, you should see something similar to the following: ```plaintext level=info msg="Backup storage location valid, marking as available" backup-storage-location=default controller=backup-storage-location logSource="pkg/controller/backup_storage_location_controller.go:121" @@ -93,9 +93,9 @@ Also ensure an EBS volume snapshot has been created and the Snapshot status is ` ## Restoring From Backup -1. In the new cluster, ensure that Nexus and Velero are running and healthy +1. In the new cluster, ensure that Nexus and Velero are running and healthy. - It is critical to ensure that Nexus has been included in the new cluster's Big Bang deployment, otherwise the restored Nexus configuration will not be managed by the Big Bang Helm chart. -1. If you are using the same `velero.values` from above, Velero should automatically be configured to use the same backup location as before. Verify this with `velero backup get` and you should see output that looks like: +1. If you are using the same `velero.values` from above, Velero should automatically be configured to use the same backup location as before. Verify this with `velero backup get` and you should see output that looks similar to the following: ```console NAME STATUS ERRORS WARNINGS CREATED EXPIRES STORAGE LOCATION SELECTOR @@ -104,14 +104,15 @@ Also ensure an EBS volume snapshot has been created and the Snapshot status is ` 1. To perform the migration, Nexus must be shut down. In the Nexus Deployment, bring the `spec.replicas` down to `0`. 1. Ensure that the Nexus PVC and PV are also removed (**you may have to delete these manually!**), and that the corresponding Nexus EBS volume has been deleted. - - If you have to remove the Nexus PV and PVC manually, delete the PVC first, which should cascade to the PV; then, manually delete the underlying EBS volume (if it still exists) + - If you have to remove the Nexus PV and PVC manually, delete the PVC first, which should cascade to the PV. Then, manually delete the underlying EBS volume (if it still exists). 1. Now that Nexus is down and the new cluster is configured to use the same backup location as the old one, perform the migration by running: `velero restore create --from-backup nexus-ns-backup` -1. The Nexus PV and PVC should be recreated (verify before continuing!), but the pod will fail to start due to the previous change in the Nexus deployment spec. Change the Nexus deployment `spec.replicas` back to `1`. This will bring up the Nexus pod which should connect to the PVC and PV created during the Velero restore. -1. Once the Nexus pod is running and healthy, log in to Nexus and verify that the repositories have been restored - - The credentials to log in will have been restored from the Nexus backup, so they should match the credentials of the Nexus that was migrated (not the new installation!) +1. The Nexus PV and PVC should be recreated (**NOTE:** verify this before continuing!), but the pod will fail to start due to the previous change in the Nexus deployment spec. Change the Nexus deployment `spec.replicas` back to `1`. This will bring up the Nexus pod which should connect to the PVC and PV created during the Velero restore. + +1. Once the Nexus pod is running and healthy, log in to Nexus and verify that the repositories have been restored. + - The credentials to log in will have been restored from the Nexus backup, so they should match the credentials of the Nexus that was migrated (not the new installation!). - It is recommended to log in to Nexus and download a sampling of images/artifacts to ensure they are working as expected. For example, login to Nexus using the migrated credentials: diff --git a/docs/guides/deployment-scenarios/extra-package-deployment.md b/docs/guides/deployment-scenarios/extra-package-deployment.md index 0c0c02aff610568a233b44c69279ab32d6b66645..3fd93e5819048ec895ac479477e279c16b395de6 100644 --- a/docs/guides/deployment-scenarios/extra-package-deployment.md +++ b/docs/guides/deployment-scenarios/extra-package-deployment.md @@ -1,29 +1,30 @@ # Extra Package Deployment -When using Big Bang you often find that you need or want to deploy an additional package alongside your chosen core/addon packages. This might be a mission app or just an extra helm chart from the Big Bang community or broader helm/kubernetes community. +When using Big Bang you often find that you need or want to deploy an additional package alongside your chosen core/add-on packages. This might be a mission app or just an extra helm chart from the Big Bang community or broader helm/kubernetes community. -In order to ease the burden on end users and increase integration with Big Bang components we have provided a way to deploy these additional packages with optional extra "wrapping" to provide integration with Big Bang capabilities. +In order to ease the burden on end users and increase integration with Big Bang components, we have provided a way to deploy these additional packages with optional extra "wrapping" to provide integration with Big Bang capabilities. Please open an issue in the [Big Bang repository](https://repo1.dso.mil/big-bang/bigbang/-/issues) or in the [Wrapper repository ](https://repo1.dso.mil/big-bang/product/packages/wrapper/-/issues) for any bugs you discover or for any new features or functionality you would like the package/wrapper to support. ## What is provided -When utilizing the extra package values/logic there are two main pieces that are deployed: your package and optionally the "wrapper". Each of these pieces provides certain things necessary for deploying. The standalone package functionality is recommended for charts that already have Big Bang integration (i.e. networkpolicies, monitoring support, istio support). Utilizing the optional wrapper method is recommended for non-integrated charts or mission applications. +When utilizing the extra package values/logic, there are two main pieces that are deployed: your package and optionally, the "wrapper." Each of these pieces provides certain things necessary for deploying. The standalone package functionality is recommended for charts that already have Big Bang integration (i.e., networkpolicies, monitoring support, and Istio support). Utilizing the optional wrapper method is recommended for non-integrated charts or mission applications. ## Package Deployment -By deploying your package with the Big Bang values you will get the below all through Big Bang values control: -- Flux `GitRepository` or `HelmRepository` depending on configuration -- Flux `HelmRelease` or `Kustomization` depending on configuration -- Control of flux settings for the above -- Control of `postRenderers` if using Flux `HelmRelease` -- Passthrough of values to configure your package chart +By deploying your package with the Big Bang values, you will accomplish multiple things all through Big Bang values control. These accomplishments are listed in the following: + +* Flux `GitRepository` or `HelmRepository,` depending on configuration. +* Flux `HelmRelease` or `Kustomization,` depending on configuration. +* Control of flux settings for the above. +* Control of `postRenderers` if using Flux `HelmRelease.` +* Passthrough of values to configure your package chart. The alternative is that customers would need to manage these things in a "sideloaded" fashion and not have these tied to the Big Bang deployment lifecycle/management. ### Basic Overrides/Passthroughs -There are some basic override values provided to modify your Helm chart installation. These do NOT require the `wrapper`. An example of these values is included below: +There are some basic override values provided to modify your Helm chart installation. These do NOT require the `wrapper.` An example of these values is included in the following: ```yaml packages: @@ -42,14 +43,15 @@ packages: replicaCount: 3 ``` -In this example we are doing three things: -- Overriding the Flux timeout on our `HelmRelease` to be 5 minutes -- Adding a dependency on the `monitoring` HelmRelease in the `bigbang` namespace, to ensure `podinfo` doesn't deploy until after `monitoring` -- Passing a value directly to the Podinfo chart to create 3 replicas +In this example, we are doing three things: + +* Overriding the Flux timeout on our `HelmRelease` to be five minutes. +* Adding a dependency on the `monitoring` HelmRelease in the `bigbang` namespace to ensure `podinfo` doesn't deploy until after `monitoring.` +* Passing a value directly to the Podinfo chart to create three replicas. We could also specify a `postRenderers` value here, which is documented well in [this document](../../understanding-bigbang/configuration/postrenderers.md). -If you would like to have values for your extra package deployment adapt based on your Big Bang configuration you could do something like the below: +If you would like to have values for your extra package deployment adapt based on your Big Bang configuration, you could do something like what is listed in the following: ```yaml packages: @@ -59,21 +61,21 @@ packages: enabled: "{{ .Values.istio.enabled }}" ``` -In this example, Istio will only be configured for podinfo if Istio is enabled for BigBang. +In this example, Istio will only be configured for podinfo if Istio is enabled for Big Bang. ## Wrapper Deployment -The [Wrapper](https://repo1.dso.mil/big-bang/product/packages/wrapper) is a helm chart that provides additional integrations with key Big Bang components and standards, as well as extensibility features for common use cases. All of these can be tailored to a given package's needs with a simple interface. Currently included are: -- Istio: injection/sidecars, `VirtualService` for ingress, and `PeerAuthentication` for mTLS -- Monitoring: `ServiceMonitor` for metrics, alerts for alertmanager, dashboards for Grafana -- NetworkPolicies: Default set of "best practice" network policies with options to extend -- Secret creation (of arbitrary content) -- Configmap creation (of arbitrary content) -- SSO configuration with Authservice (not fully automated, requires additional configuration of chains and labeling of workload to route to authservice) +The [Wrapper](https://repo1.dso.mil/big-bang/product/packages/wrapper) is a helm chart that provides additional integrations with key Big Bang components and standards, as well as extensibility features for common use cases. All of these can be tailored to a given package's needs with a simple interface. Currently included are those listed in the following: +* **Istio:** injection/sidecars, `VirtualService` for ingress, and `PeerAuthentication` for mTLS. +* **Monitoring:** `ServiceMonitor` for metrics, alerts for alertmanager, dashboards for Grafana. +* **NetworkPolicies:** Default set of "best practice" network policies with options to extend. +* **Secret creation** (of arbitrary content). +* **Configmap creation** (of arbitrary content). +* **SSO configuration with Authservice** (not fully automated, requires additional configuration of chains and labeling of workload to route to authservice). -These pieces can typically be complicated to get setup correctly and connected to components that are provided in Big Bang core, so we provide a simplified interface to add them. +These pieces can typically be complicated to get set up correctly and connected to components that are provided in Big Bang core; therefore, we provide a simplified interface to add them. -### How to use it +### How to Use the Wrapper ```yaml packages: @@ -87,11 +89,11 @@ packages: path: charts/podinfo ``` -NOTE: The wrapper is an opt-in feature. Without enabling the wrapper, the `packages` will default to just deploying the pieces [mentioned above](#package-deployment). +**NOTE:** The wrapper is an opt-in feature. Without enabling the wrapper, the `packages` will default to just deploying the pieces [mentioned above](#package-deployment). -The package also has HelmRepository support for sourcing the artifacts from a HelmRepo (of normal or OCI type); usage of HelmRepos is encouraged if you have access to these types of artifacts. +The package also has HelmRepository support for sourcing the artifacts from a HelmRepo (of normal or OCI type). Usage of HelmRepos is encouraged if you have access to these types of artifacts. -With these values added you should have a very basic deployment of `podinfo` added onto your Big Bang install with some basic default integrations. The rest of this guide will walk you through each section of Big Bang touchpoints and some example configurations you could use. Each of the configurations are compatible with each other (i.e. you can combine the examples below). +With these values added, you should have a very basic deployment of `podinfo` added onto your Big Bang install with some basic default integrations. The rest of this guide will walk you through each section of Big Bang touchpoints and some example configurations you could use. Each of the configurations are compatible with each other (i.e., you can combine the examples that are provided in this document). ### Istio Configuration @@ -116,13 +118,13 @@ packages: port: 9898 ``` -In this example we are primarily adding a virtual service for ingress to our application (leveraging defaults to select the proper service). By using the wrapper we are also getting several default options including istio sidecar injection and STRICT mTLS. +In this example, we are primarily adding a virtual service for ingress to our application (i.e., leveraging defaults to select the proper service). By using the wrapper we are also getting several default options including istio sidecar injection and STRICT mTLS. -There are more ways to modify the virtual service creation and mTLS config; additional values can be referenced in the [wrapper chart istio section](https://repo1.dso.mil/big-bang/product/packages/wrapper/-/blob/6536759fef016db8b5504ad6c237f2daffe22844/chart/values.yaml#L31-75). +There are more ways to modify the virtual service creation and mTLS config. Additional values can be referenced in the [wrapper chart istio section](https://repo1.dso.mil/big-bang/product/packages/wrapper/-/blob/6536759fef016db8b5504ad6c237f2daffe22844/chart/values.yaml#L31-75). ### Monitoring Configuration -The wrapper chart also provides ways to integrate with the monitoring stack (Prometheus, Alertmanager, and Grafana). The example below is a basic way to configure monitoring for `podinfo`: +The wrapper chart also provides ways to integrate with the monitoring stack (e.g., Prometheus, Alertmanager, and Grafana). A basic way to configure monitoring for `podinfo` is provided in the following: ```yaml packages: @@ -140,13 +142,13 @@ packages: - port: http ``` -In this example we are adding a service monitor that will target the port named `http`. We are leveraging a number of defaults here to select the proper service and metrics paths. +In this example we are adding a service monitor that will target the port named `http.` We are leveraging a number of defaults here to select the proper service and metrics paths. -There are other ways to further modify monitoring settings including more advanced service monitor config; additional values can be referenced in the [wrapper chart monitor section](https://repo1.dso.mil/big-bang/product/packages/wrapper/-/blob/6536759fef016db8b5504ad6c237f2daffe22844/chart/values.yaml#L77-91). +There are other ways to further modify monitoring settings including more advanced service monitor config. Additional values can be referenced in the [wrapper chart monitor section](https://repo1.dso.mil/big-bang/product/packages/wrapper/-/blob/6536759fef016db8b5504ad6c237f2daffe22844/chart/values.yaml#L77-91). ### Network Policy Configuration -The wrapper chart provides ways to configure network policies as needed for your application. The example below again provides a basic config for the `podinfo` application: +The wrapper chart provides ways to configure network policies as needed for your application. A basic config for the `podinfo` application is provided in the following: ```yaml packages: @@ -173,13 +175,13 @@ packages: # port: 9999 ``` -In this example we are allowing the package to have egress to the Kubernetes control plane (aka API). This particular setting can be beneficial for operators that may need to create Kubernetes resources. +In this example, we are allowing the package to have egress to the Kubernetes control plane (i.e.,) API). This particular setting can be beneficial for operators that may need to create Kubernetes resources. -There are a number of additional configurations including allowing egress to https or more custom needs; additional values can be referenced in the [wrapper chart network section](https://repo1.dso.mil/big-bang/product/packages/wrapper/-/blob/6536759fef016db8b5504ad6c237f2daffe22844/chart/values.yaml#L93-113). +There are a number of additional configurations including allowing egress to https or more custom needs. Additional values can be referenced in the [wrapper chart network section](https://repo1.dso.mil/big-bang/product/packages/wrapper/-/blob/6536759fef016db8b5504ad6c237f2daffe22844/chart/values.yaml#L93-113). -### Configmap / Secret Creation +### Configmap/Secret Creation -Oftentimes when deploying a Helm chart you may be expected to point to an existing secret for credentials, a license, or external service configuration (S3/RDS). The below values can be helpful in creation of these items: +Often when deploying a Helm chart, you may be expected to point to an existing secret for credentials, a license, or external service configuration (i.e., S3/RDS). The values that can be helpful in creation of these items are provided in the following: ```yaml packages: @@ -200,4 +202,4 @@ packages: foo: YmFyCg== ``` -These secrets/configmaps are created prior to installation of your package, so that they can be referenced in any values you use to configure your package. +These secrets/configmaps are created prior to installation of your package; therefore, they can be referenced in any values you use to configure your package. diff --git a/docs/guides/deployment-scenarios/multiple-ingress.md b/docs/guides/deployment-scenarios/multiple-ingress.md index 3e2fa5eb1fc16da2e83e3da7bf9d2ead54fa8a87..e95cb8017a451834ba1e589fc8461093b1690d61 100644 --- a/docs/guides/deployment-scenarios/multiple-ingress.md +++ b/docs/guides/deployment-scenarios/multiple-ingress.md @@ -1,18 +1,18 @@ # Using Big Bang with Multiple Ingress Gateways -By default, Big Bang only creates one ingress for all of the packages. Although this architecture reduces complexity, it also limits the ability to independently control network access and load balancing to groups of packages. By configuring Big Bang for multiple ingress gateways through [Istio](https://istio.io/latest/), package access and load can be better controlled. +By default, Big Bang only creates one ingress for all of the packages. Although this architecture reduces complexity, it also limits the ability to independently control network access and load balancing to groups of packages. By configuring Big Bang for multiple ingress gateways through [Istio](https://istio.io/latest/), package access and load can be better controlled. ## Architecture The following diagram illustrates a typical multiple ingress architecture for Big Bang with the following characteristics: -- A Kubernetes cluster running on a private subnet -- Some apps with exposure to the internet through a public network load balancer -- Some apps without exposure to the internet through a private (aka internal) network load balancer -- Single sign on (SSO) connected to the internet through a dedicated public network load balancer -- A service mesh ([Istio](https://istio.io/latest/)) handling TLS for all apps except SSO +- A Kubernetes cluster running on a private subnet. +- Some apps with exposure to the internet through a public network load balancer. +- Some apps without exposure to the internet through a private (aka internal) network load balancer. +- Single Sign-On (SSO) connected to the internet through a dedicated public network load balancer. +- A service mesh ([Istio](https://istio.io/latest/)) handling TLS for all apps except SSO. -Big Bang is capable of setting up everything within the private subnet using configuration. The public load balancers would need to be configured outside of Big Bang's deployment. +Big Bang is capable of setting up everything within the private subnet using configuration. The public load balancers would need to be configured outside of Big Bang's deployment. ```mermaid graph LR @@ -52,13 +52,13 @@ graph LR ### Load Balancers -Load balancers are used to insure traffic is distributed to Istio's control plane running across the Kubernetes nodes. In the diagram above, we only show one Kubernetes node for simplicity. But, most clusters are run with multiple nodes. Load balancers should be connected to all of the nodes. It is recommended that you use Layer 3/4 network load balancers in Big Bang since Istio can handle layer 7 routing and balancing. +Load balancers are used to ensure traffic is distributed to Istio's control plane running across the Kubernetes nodes. In the diagram above, we only show one Kubernetes node for simplicity. However, most clusters are run with multiple nodes. Load balancers should be connected to all of the nodes. It is recommended that you use Layer 3/4 network load balancers in Big Bang since Istio can handle Layer 7 routing and balancing. #### Public Load Balancer -Public load balancers must be created independent of Big Bang. This is because the cluster is deployed in a private subnet and therefore does not have access to create resources in the public, internet-facing subnet. In order for the load balancer, in the public subnet, to communicate to the Istio's Ingress Gateway, in the private subnet, node ports must be used. Node ports will bind a port on each cluster node to a listener in the ingress gateway. The load balancer will distribute traffic on that port to the cluster nodes. +Public load balancers must be created independent of Big Bang. This is because the cluster is deployed in a private subnet and therefore, does not have access to create resources in the public, internet-facing subnet. In order for the load balancer, in the public subnet, to communicate to the Istio's Ingress Gateway, in the private subnet, node ports must be used. Node ports will bind a port on each cluster node to a listener in the ingress gateway. The load balancer will distribute traffic on that port to the cluster nodes. -> Not all deployments have a public subnet. For example, a private network that can only be accessed through a VPN would not have a public subnet and not require any public load balancers. +> Not all deployments have a public subnet. For example, a private network that can only be accessed through a VPN would not have a public subnet and not require any public load balancers. In Big Bang, this is how you would setup an ingress gateway for Node Ports: @@ -70,13 +70,13 @@ istio: nodePortBase: 30000 # Bind the following ports: Status <-> 30000; HTTP <-> 30001; HTTPS <-> 30002; SNI <-> 30003 ``` -The load balancer can then be setup to forward HTTP traffic to all nodes on port 30001 and HTTPS traffic on all nodes to 30002. Istio provides a ready status that can be reached via HTTP on the status port. So, the load balancer's health check can be setup for all nodes on port 30000 to the URL `/healthz/ready`. DNS entries should be created for each hostname to point to the load balancer's DNS. Package endpoints can then be accessed using the FQDN from the internet. +The load balancer can then be setup to forward HTTP traffic to all nodes on port 30001 and HTTPS traffic on all nodes to 30002. Istio provides a ready status that can be reached via HTTP on the status port. So, the load balancer's health check can be setup for all nodes on port 30000 to the URL `/healthz/ready.` DNS entries should be created for each hostname to point to the load balancer's DNS. Package endpoints can then be accessed using the FQDN from the internet. -#### Private / Internal Load Balancer +#### Private/Internal Load Balancer -Private or internal load balancers can usually be created automatically by Big Bang via Istio using service annotations. By using these annotations, a load balancer will be created for you and automatically mapped to the appropriate nodes/ports for distributing the load. +Private or internal load balancers can usually be created automatically by Big Bang via Istio using service annotations. By using these annotations, a load balancer will be created for you and automatically mapped to the appropriate nodes/ports for distributing the load. -Here is how you would setup Big Bang for a private load balancer on AWS. For other cloud providers, review [Kubernetes internal load balancer documentation](https://kubernetes.io/docs/concepts/services-networking/_print/#internal-load-balancer): +Here is how you would setup Big Bang for a private load balancer on AWS. For other cloud providers, review [Kubernetes internal load balancer documentation](https://kubernetes.io/docs/concepts/services-networking/_print/#internal-load-balancer): ```yaml istio: @@ -90,17 +90,20 @@ istio: service.beta.kubernetes.io/aws-load-balancer-internal: "true" ``` -After the load balancer is created, you will need to setup DNS entries (e.g. Route 53 on AWS) to point to the load balancer using the host names of the applications. You should then be able to access the package endpoints from the private network using the FQDN. +After the load balancer is created, you will need to setup DNS entries (e.g., Route 53 on AWS) to point to the load balancer using the host names of the applications. You should then be able to access the package endpoints from the private network using the FQDN. + > Private network access can be achieved through SSH on a jump box (aka bastion), VPN, or other secure gateway. ### Ingress Gateways -Istio's Ingress Gateways are services that sit on the edge of the Kubernetes cluster and listen for incoming traffic. In Big Bang, the Ingress Gateways are either setup as Node Port or Load Balancer services. As a Node Port type, ports on the node are bound to the service and incoming traffic is routed to the nodes on those ports. As a Load Balancer type, a load balancer is automatically created and configured to communicate to the service. +Istio's Ingress Gateways are services that sit on the edge of the Kubernetes cluster and listen for incoming traffic. In Big Bang, the Ingress Gateways are either setup as Node Port or Load Balancer services. As a Node Port type, ports on the node are bound to the service and incoming traffic is routed to the nodes on those ports. As a Load Balancer type, a load balancer is automatically created and configured to communicate to the service. + > In some cases, automatic load balancer creating and configuration is not supported and a Node Port service must be used. -Ingress Gateways will listen for incoming traffic on their assigned ports and forward that traffic to attached Gateways on the appropriate port. For example, traffic may be received on port 30002 and forwarded to all attached Gateways on port 8443. +Ingress Gateways will listen for incoming traffic on their assigned ports and forward that traffic to attached Gateways on the appropriate port. For example, traffic may be received on port 30002 and forwarded to all attached Gateways on port 8443. + +In Big Bang, ingress gateways can be created and configured using the `istio.ingressGateways` values. By adding additional keys under this value, additional ingress gateways will be created. An example of setting up three Ingress Gateways to match the architecture diagram above is provided in the following: -In Big Bang, ingress gateways can be created and configured using the `istio.ingressGateways` values. By adding additional keys under this value, additional ingress gateways will be created. The following is an example of setting up three Ingress Gateways to match the architecture diagram above. > It is recommended that you add `-ingressgateway` on the end of the name of the key to help identify the pods created in the Kubernetes cluster. ```yaml @@ -123,17 +126,17 @@ istio: nodePortBase: 30100 # Bind the following ports: Status (15021) <-> 30100; HTTP (8080) <-> 30101; HTTPS (8443) <-> 30102; SNI (15443) <-> 30103 ``` -The default values for Ingress Gateways will work for most situations. However, if you need finer control over the configuration, any of the settings in the [Kubernetes Resource Spec](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/#KubernetesResourcesSpec) can be added to `kubernetesResourceSpec` as a map. Some examples of additional settings include environmental variables, service selectors, affinity mapping, or additional ports. +The default values for Ingress Gateways will work for most situations. However, if you need finer control over the configuration, any of the settings in the [Kubernetes Resource Spec](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/#KubernetesResourcesSpec) can be added to `kubernetesResourceSpec` as a map. Some examples of additional settings include environmental variables, service selectors, affinity mapping, or additional ports. ### Gateways -While Ingress Gateways handle traffic using ports, Gateways manage traffic using protocol and hostname. Each Gateway must be assigned to one or more Ingress Gateways to receive traffic. Gateways are setup to listen on ports for specific protocols and hostnames. Traffic is then sent on to Virtual Services for further routing. +While Ingress Gateways handle traffic using ports, Gateways manage traffic using protocol and hostname. Each Gateway must be assigned to one or more Ingress Gateways to receive traffic. Gateways are set up to listen on ports for specific protocols and hostnames. Traffic is then sent on to Virtual Services for further routing. -Gateways can handle TLS encryption, including termination. If a Gateway is setup for TLS termination, it handles the full TLS handshake during HTTPS connections and decrypts messages at the Gateway before passing traffic on the backend in the clear. To perform this function, the Gateway must be provided a TLS private key and certificate. There are [other TLS modes](https://istio.io/latest/docs/reference/config/networking/gateway/#ServerTLSSettings-TLSmode) supported by Gateways that may also be used. +Gateways can handle TLS encryption, including termination. If a Gateway is set up for TLS termination, it handles the full TLS handshake during HTTPS connections and decrypts messages at the Gateway before passing traffic on the backend in the clear. To perform this function, the Gateway must be provided a TLS private key and certificate. There are [other TLS modes](https://istio.io/latest/docs/reference/config/networking/gateway/#ServerTLSSettings-TLSmode) supported by Gateways that may also be used. -In Big Bang, Gateways can be created and configured using the `istio.gateways` values. By adding additional keys under this value, additional Gateways will be created. By default, HTTP traffic is always redirected to HTTPS traffic in the Gateway. The following is an example of setting up three Gateways to match the architecture diagram above. +In Big Bang, Gateways can be created and configured using the `istio.gateways` values. By adding additional keys under this value, additional Gateways will be created. By default, HTTP traffic is always redirected to HTTPS traffic in the Gateway. An example of setting up three Gateways to match the architecture diagram above is provided in the following: -> By default Big Bang uses TLS termination on Gateways. For Keycloak, the package must manage the TLS encryption. In that case, we use TLS passthrough on the Gateway and setup the TLS keys in the package. +> By default Big Bang uses TLS termination on Gateways. For Keycloak, the package must manage the TLS encryption. In that case, we use TLS passthrough on the Gateway and setup the TLS keys in the package. ```yaml gateways: @@ -159,11 +162,11 @@ In Big Bang, Gateways can be created and configured using the `istio.gateways` v mode: "PASSTHROUGH" # Pass TLS encrypted traffic to application ``` -Big Bang will automatically create a secret with the TLS key and cert provided for each Gateway. In some cases, it may be advantageous to create the secrets ahead of time and have Big Bang use them. In this case a TLS secret named `{name of gateway}-cert` can be prepopulated with the key and `tls.key` and `tls.cert` values can be left blank. For example, for the `private` Gateway, a `private-cert` TLS secret would be created. +Big Bang will automatically create a secret with the TLS key and cert provided for each Gateway. In some cases, it may be advantageous to create the secrets ahead of time and have Big Bang use them. In this case a TLS secret named `{name of gateway}-cert` can be prepopulated with the key and `tls.key` and `tls.cert` values can be left blank. For example, for the `private` Gateway, a `private-cert` TLS secret would be created. ### Virtual Services -Virtual services use full URL host and path information to route incoming traffic to a Service. Each package in Big Bang manages its own Virtual Services since the paths and ports vary for each package. However, in order to receive traffic at the Virtual Service, it must be connected to a Gateway. In Big Bang we configure this under each package. The following is an example of this configuration that matches the architecture diagram above. +Virtual services use full URL host and path information to route incoming traffic to a service. Each package in Big Bang manages its own Virtual Services since the paths and ports vary for each package. However, in order to receive traffic at the Virtual Service, it must be connected to a Gateway. In Big Bang we configure this under each package. An example of this configuration that matches the architecture diagram above is provided in the following. ```yaml monitoring: @@ -189,4 +192,4 @@ addons: ### Services and Pods -Once traffic passes through the Virtual Service, it is passed to a Service. The service may have several redundant pods and a load balancing scheme to manage incoming traffic. It will route the traffic to the appropriate pod based on these settings. Each package implements the service and pods differently and typically the default configuration is adequate for most deployments. +Once traffic passes through the Virtual Service, it is passed to a Service. The service may have several redundant pods and a load balancing scheme to manage incoming traffic. It will route the traffic to the appropriate pod based on these settings. Each package implements the service and pods differently and typically the default configuration is adequate for most deployments. diff --git a/docs/guides/deployment-scenarios/quickstart.md b/docs/guides/deployment-scenarios/quickstart.md index 8ca79df0e56ec28b938798cca2ab989dc42fe4ff..db27250d66480c698cf231e2ee86d9769524cf4d 100644 --- a/docs/guides/deployment-scenarios/quickstart.md +++ b/docs/guides/deployment-scenarios/quickstart.md @@ -3,7 +3,8 @@ [[_TOC_]] ## Video Walkthrough -A 36min speed run video walkthrough of this quickstart can be found on the following 2 mirrored locations: + +A 36-minute speed run video walkthrough of this quickstart can be found on the following two mirrored locations: * [Google Drive - Video Mirror](https://drive.google.com/file/d/1m1pR0a-lrWr_Wed4EsI8-vimkYfb06GQ/view) * [Repo1 - Video Mirror](https://repo1.dso.mil/platform-one/bullhorn-delivery-static-assets/-/blob/master/big_bang/bigbang_quickstart.mp4) @@ -11,31 +12,32 @@ A 36min speed run video walkthrough of this quickstart can be found on the follo This quick start guide explains in beginner-friendly terminology how to complete the following tasks in under an hour: -1. Turn a virtual machine (VM) into a k3d single-node Kubernetes cluster. +1. Turn a Virtual Machine (VM) into a k3d single-node Kubernetes cluster. 1. Deploy Big Bang on the cluster using a demonstration and local development-friendly workflow. - > Note: This guide mainly focuses on the scenario of deploying Big Bang to a remote VM with enough resources to run Big Bang [(see step 1 for recommended resources)](#step-1-provision-a-virtual-machine). If your workstation has sufficient resources, or you are willing to disable packages to lower the resource requirements, then local development is possible. This quick start guide is valid for both remote and local deployment scenarios. + > **NOTE:** This guide mainly focuses on the scenario of deploying Big Bang to a remote VM with enough resources to run Big Bang [(refer to step 1 for recommended resources)](#step-1-provision-a-virtual-machine). If your workstation has sufficient resources, or you are willing to disable packages to lower the resource requirements, then local development is possible. This quick start guide is valid for both remote and local deployment scenarios. 1. Customize the demonstration deployment of Big Bang. ## Important Security Notice -All Developer and Quick Start Guides in this repo are intended to deploy environments for development, demo, and learning purposes. There are practices that are bad for security, but make perfect sense for these use cases: using of default values, minimal configuration, tinkering with new functionality that could introduce a security misconfiguration, and even purposefully using insecure passwords and disabling security measures like Kyverno for convenience. Many applications have default username and passwords combinations stored in the public git repo, these insecure default credentials and configurations are intended to be overridden during production deployments. +All Developer and Quick Start Guides in this repo are intended to deploy environments for development, demonstration, and learning purposes. There are practices that are bad for security, but make perfect sense for these use cases: using of default values, minimal configuration, tinkering with new functionality that could introduce a security misconfiguration, and even purposefully using insecure passwords and disabling security measures like Kyverno for convenience. Many applications have default username and passwords combinations stored in the public git repo, these insecure default credentials and configurations are intended to be overridden during production deployments. -When deploying a dev / demo environment there is a high chance of deploying Big Bang in an insecure configuration. Such deployments should be treated as if they could become easily compromised if made publicly accessible. +When deploying a dev/demo environment there is a high chance of deploying Big Bang in an insecure configuration. Such deployments should be treated as if they could become easily compromised if made publicly accessible. -### Recommended Security Guidelines for Dev / Demo Deployments +### Recommended Security Guidelines for Dev/Demo Deployments -* IDEALLY these environments should be spun up on VMs with private IP addresses that are not publicly accessible. Local network access or an authenticated remote network access solution like a VPN or [sshuttle](https://github.com/sshuttle/sshuttle#readme) should be used to reach the private network. -* DO NOT deploy publicly routable dev / demo clusters into shared VPCs (like a shared dev environment VPCs) or on VMs with IAM Roles attached. If the demo cluster were compromised, an adversary might be able to use it as a stepping stone to move deeper into an environment. +* Ideally, these environments should be spun up on VMs with private IP addresses that are not publicly accessible. Local network access or an authenticated remote network access solution like a VPN or [sshuttle](https://github.com/sshuttle/sshuttle#readme) should be used to reach the private network. +* DO NOT deploy publicly routable dev/demo clusters into shared VPCs (i.e., like a shared dev environment VPCs) or on VMs with IAM Roles attached. If the demo cluster were compromised, an adversary might be able to use it as a stepping stone to move deeper into an environment. * If you want to safely demo on Cloud Provider VMs with public IPs you must follow these guidelines: * Prevent Compromise: - * Use firewalls that only allow the 2 VMs to talk to each other and your whitelisted IP. + * Use firewalls that only allow the two VMs to talk to each other and your whitelisted IP. * Limit Blast Radius of Potential Compromise: * Only deploy to an isolated VPC, not a shared VPC. * Only deploy to VMs with no IAM roles/rights attached. ## Network Requirements Notice + This install guide by default requires network connectivity from your server to external DNS providers, specifically the Google DNS server at `8.8.8.8`, you can test that your node has connectivity to this DNS server by running the command `nslookup google.com 8.8.8.8` (run this from the node). If this command returns `DNS request timed out`, then you will need to follow the steps in [troubleshooting](#Troubleshooting) to change the upstream DNS server in your kubernetes cluster to your networks DNS server. @@ -48,12 +50,11 @@ Additionally, if your network has a proxy that has custom/internal SSL certifica `Details of how each prerequisite/dependency is quickly satisfied:` -* Operating System Prerequisite: Any Linux distribution that supports Docker should work. -* Operating System Pre-configuration: This quick start includes easy paste-able commands to quickly satisfy this prerequisite. -* Kubernetes Cluster Prerequisite: is implemented using k3d (k3s in Docker) -* Default Storage Class Prerequisite: k3d ships with a local volume storage class. -* Support for automated provisioning of Kubernetes Service of type LB Prerequisite: is implemented by taking advantage of k3d's ability to easily map port 443 of the VM to port 443 of a Dockerized LB that forwards traffic to a single Istio Ingress Gateway. -Important limitations of this quick start guide's implementation of k3d to be aware of: +* **Operating System Prerequisite:** Any Linux distribution that supports Docker should work. +* **Operating System Pre-configuration:** This quick start includes easy paste-able commands to quickly satisfy this prerequisite. +* **Kubernetes Cluster Prerequisite:** is implemented using k3d (k3s in Docker) +* **Default Storage Class Prerequisite:** k3d ships with a local volume storage class. +* **Support for automated provisioning of Kubernetes Service of type LB Prerequisite:** is implemented by taking advantage of k3d's ability to easily map port 443 of the VM to port 443 of a Dockerized LB that forwards traffic to a single Istio Ingress Gateway. Important limitations of this quick start guide's implementation of k3d to be aware of: * Multiple Ingress Gateways aren't supported by this implementation as they would each require their own LB, and this trick of using the host's port 443 only works for automated provisioning of a single service of type LB that leverages port 443. * Multiple Ingress Gateways makes a demoable/tinkerable KeyCloak and locally hosted SSO deployment much easier. * Multiple Ingress Gateways can be demoed on k3d if configuration tweaks are made, MetalLB is used, and you are developing using a local Linux Desktop. (network connectivity limitations of the implementation would only allow a the web browser on the k3d host server to see the webpages.) @@ -70,7 +71,7 @@ Important limitations of this quick start guide's implementation of k3d to be aw The following requirements are recommended for Demonstration Purposes: * 1 Virtual Machine with 32GB RAM, 8-Core CPU (t3a.2xlarge for AWS users), and 100GB of disk space should be sufficient. -* Ubuntu Server 20.04 LTS (Ubuntu comes up slightly faster than CentOS, in reality any Linux distribution with Docker installed should work) +* Ubuntu Server 20.04 LTS (Ubuntu comes up slightly faster than CentOS, in reality any Linux distribution with Docker installed should work). * Most Cloud Service Provider provisioned VMs default to passwordless sudo being preconfigured, but if you're doing local development or a bare metal deployment then it's recommended that you configure passwordless sudo. * Steps for configuring passwordless sudo: [(source)](https://unix.stackexchange.com/questions/468416/setting-up-passwordless-sudo-on-linux-distributions) 1. `sudo visudo` @@ -90,14 +91,14 @@ The following requirements are recommended for Demonstration Purposes: * Network connectivity to Virtual Machine (provisioning with a public IP and a security group locked down to your IP should work. Otherwise a Bare Metal server or even a Vagrant Box Virtual Machine configured for remote ssh works fine.) -> Note: If your workstation has Docker, sufficient compute, and has ports 80, 443, and 6443 free, you can use your workstation in place of a remote virtual machine and do local development. +> **NOTE**: If your workstation has Docker, sufficient compute, and has ports 80, 443, and 6443 free, you can use your workstation in place of a remote virtual machine and do local development. ## Step 2: SSH to Remote VM -* ssh and passwordless sudo should be configured on the remote machine +* ssh and passwordless sudo should be configured on the remote machine. * You can skip this step if you are doing local development. -1. Setup SSH +1. Set up SSH. ```shell # [admin@Unix_Laptop:~] @@ -115,7 +116,7 @@ The following requirements are recommended for Demonstration Purposes: echo "$temp" | tee -a ~/.ssh/config #tee -a, appends to preexisting config file ``` -1. SSH to instance +1. SSH to instance. ```shell # [admin@Laptop:~] @@ -126,9 +127,9 @@ The following requirements are recommended for Demonstration Purposes: ## Step 3: Install Prerequisite Software -Note: This guide follows the DevOps best practice of left-shifting feedback on mistakes and surfacing errors as early in the process as possible. This is done by leveraging tests and verification commands. +**NOTE:** This guide follows the DevOps best practice of left-shifting feedback on mistakes and surfacing errors as early in the process as possible. This is done by leveraging tests and verification commands. -1. Install Git +1. Install Git. ```shell sudo apt install git -y @@ -145,7 +146,7 @@ Note: This guide follows the DevOps best practice of left-shifting feedback on m # curl -fsSL https://get.docker.com | bash && sudo usermod --append --groups docker $USER ``` -1. Logout and login to allow the `usermod` change to take effect. +1. Log out and login to allow the `usermod` change to take effect. ```shell # [ubuntu@Ubuntu_VM:~] @@ -157,7 +158,7 @@ Note: This guide follows the DevOps best practice of left-shifting feedback on m ssh k3d ``` -1. Verify Docker Installation +1. Verify Docker Installation. ```shell # [ubuntu@Ubuntu_VM:~] @@ -168,7 +169,7 @@ Note: This guide follows the DevOps best practice of left-shifting feedback on m Hello from Docker! ``` -1. Install k3d +1. Install k3d. ```shell # [ubuntu@Ubuntu_VM:~] @@ -187,7 +188,7 @@ Note: This guide follows the DevOps best practice of left-shifting feedback on m # wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG=v5.5.1 bash ``` -1. Verify k3d installation +1. Verify k3d installation. ```shell # [ubuntu@Ubuntu_VM:~] @@ -199,7 +200,7 @@ Note: This guide follows the DevOps best practice of left-shifting feedback on m k3s version v1.26.4-k3s1 (default) ``` -1. Install kubectl +1. Install kubectl. ```shell # [ubuntu@Ubuntu_VM:~] @@ -217,7 +218,7 @@ Note: This guide follows the DevOps best practice of left-shifting feedback on m sudo ln -s /usr/local/bin/kubectl /usr/local/bin/k ``` -1. Verify kubectl installation +1. Verify kubectl installation. ```shell # [ubuntu@Ubuntu_VM:~] @@ -228,7 +229,7 @@ Note: This guide follows the DevOps best practice of left-shifting feedback on m Client Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.5", GitCommit:"c285e781331a3785a7f436042c65c5641ce8a9e9", GitTreeState:"clean", BuildDate:"2022-03-16T15:58:47Z", GoVersion:"go1.17.8", Compiler:"gc", Platform:"linux/amd64"} ``` -1. Install Kustomize +1. Install Kustomize. ```shell # [ubuntu@Ubuntu_VM:~] @@ -249,7 +250,7 @@ Note: This guide follows the DevOps best practice of left-shifting feedback on m # sudo mv kustomize /usr/bin/kustomize ``` -1. Verify Kustomize installation +1. Verify Kustomize installation. ```shell # [ubuntu@Ubuntu_VM:~] @@ -260,7 +261,7 @@ Note: This guide follows the DevOps best practice of left-shifting feedback on m {Version:kustomize/v4.5.4 GitCommit:cf3a452ddd6f83945d39d582243b8592ec627ae3 BuildDate:2022-03-28T23:12:45Z GoOs:linux GoArch:amd64} ``` -1. Install Helm +1. Install Helm. ```shell # [ubuntu@Ubuntu_VM:~] @@ -279,7 +280,7 @@ Note: This guide follows the DevOps best practice of left-shifting feedback on m # curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash ``` -1. Verify Helm installation +1. Verify Helm installation. ```shell # [ubuntu@Ubuntu_VM:~] @@ -410,17 +411,17 @@ k3d-k3s-default-server-0 Ready control-plane,master 11m v1.22.7+k3s1 1. Here we continue to follow the DevOps best practice of enabling early left-shifted feedback whenever possible; Before adding credentials to a configuration file and not finding out there is an issue until after we see an ImagePullBackOff error during deployment, we will do a quick left-shifted verification of the credentials. -1. Look up your IronBank image pull credentials +1. Look up your IronBank image pull credentials. - 1. In a web browser go to [https://registry1.dso.mil](https://registry1.dso.mil) - 1. Login via OIDC provider - 1. In the top right of the page, click your name, and then User Profile - 1. Your image pull username is labeled "Username" - 1. Your image pull password is labeled "CLI secret" + 1. In a web browser go to [https://registry1.dso.mil](https://registry1.dso.mil). + 1. Login via OIDC provider. + 1. In the top right of the page, click your name, and then User Profile. + 1. Your image pull username is labeled "Username." + 1. Your image pull password is labeled "CLI secret." - > Note: The image pull credentials are tied to the life cycle of an OIDC token which expires after ~3 days, so if 3 days have passed since your last login to IronBank, the credentials will stop working until you re-login to the [https://registry1.dso.mil](https://registry1.dso.mil) GUI + > **NOTE:** The image pull credentials are tied to the life cycle of an OIDC token which expires after ~3 days, so if 3 days have passed since your last login to IronBank, the credentials will stop working until you re-login to the [https://registry1.dso.mil](https://registry1.dso.mil) GUI. -1. Verify your credentials work +1. Verify your credentials work. ```shell # [ubuntu@Ubuntu_VM:~] @@ -457,7 +458,7 @@ cd ~ HEAD detached at (latest version) ``` -> HEAD is git speak for current context within a tree of commits +> **NOTE:** HEAD is git speak for current context within a tree of commits. ## Step 8: Install Flux @@ -617,14 +618,14 @@ This makes the command more idempotent by allowing the exact same command to wor bigbang is the name of the helm release that you'd see if you run `helm list -n=bigbang`. `$HOME/bigbang/chart` is a reference to the helm chart being installed. `--values https://repo1.dso.mil/big-bang/bigbang/-/raw/master/chart/ingress-certs.yaml`: -References demonstration HTTPS certificates embedded in the public repository. The *.bigbang.dev wildcard certificate is signed by Let's Encrypt, a free public internet Certificate Authority. Note the URL path to the copy of the cert on master branch is used instead of `$HOME/bigbang/chart/ingress-certs.yaml`, because the Let's Encrypt certs expire after 3 months, and if you deploy a tagged release of BigBang, like 1.15.0, the version of the cert stored in the tagged git commit / release of Big Bang could be expired. Referencing the master branches copy via URL ensures you receive the latest version of the cert, which won't be expired. +References demonstration HTTPS certificates embedded in the public repository. The *.bigbang.dev wildcard certificate is signed by Let's Encrypt, a free public internet Certificate Authority. Note the URL path to the copy of the cert on master branch is used instead of `$HOME/bigbang/chart/ingress-certs.yaml`, because the Let's Encrypt certs expire after 3 months, and if you deploy a tagged release of BigBang, like 1.15.0, the version of the cert stored in the tagged git commit/release of Big Bang could be expired. Referencing the master branches copy via URL ensures you receive the latest version of the cert, which won't be expired. `--namespace=bigbang --create-namespace`: Means it will install the bigbang helm chart in the bigbang namespace and create the namespace if it doesn't exist. ## Step 11: Verify Big Bang Has Had Enough Time To Finish Installing -* If you try to run the command in Step 11 too soon, you'll see an ignorable temporary error message +* If you try to run the command in Step 11 too soon, you'll see an ignorable temporary error message. ```shell # [ubuntu@Ubuntu_VM:~] @@ -642,7 +643,7 @@ Means it will install the bigbang helm chart in the bigbang namespace and create kubectl get po -A ``` -* If after running `kubectl get po -A` (which is the shorthand of `kubectl get pods --all-namespaces`) you see something like the following, then you need to wait longer +* If after running `kubectl get po -A` (which is the shorthand of `kubectl get pods --all-namespaces`) you see something like the following, then you need to wait longer. ```console NAMESPACE NAME READY STATUS RESTARTS AGE @@ -667,7 +668,7 @@ Means it will install the bigbang helm chart in the bigbang namespace and create logging logging-ek-es-master-0 0/2 Init:0/2 0 37s ``` -* Wait up to 10 minutes then re-run `kubectl get po -A`, until all pods show STATUS Running +* Wait up to 10 minutes then re-run `kubectl get po -A`, until all pods show STATUS Running. * `helm list -n=bigbang` should also show STATUS deployed @@ -688,7 +689,7 @@ Means it will install the bigbang helm chart in the bigbang namespace and create ## Step 12: Edit Your Workstation’s Hosts File To Access the Web Pages Hosted on the Big Bang Cluster -Run the following command, which is the short hand equivalent of `kubectl get virtualservices --all-namespaces` to see a list of websites you'll need to add to your hosts file +Run the following command, which is the short hand equivalent of `kubectl get virtualservices --all-namespaces` to see a list of websites you'll need to add to your hosts file. ```shell kubectl get vs -A @@ -742,7 +743,7 @@ Note, default credentials for Big Bang packages can be found [here](../using-big Here's an example of post deployment customization of Big Bang. After looking at <https://repo1.dso.mil/big-bang/bigbang/-/blob/master/chart/values.yaml> -It should make sense that the following is a valid edit +It should make sense that the following is a valid edit. ```shell # [ubuntu@Ubuntu_VM:~] @@ -773,7 +774,7 @@ kubectl get po -n=argocd ## Step 15: Implementing Mission Applications within your bigbang environment -BigBang by itself serves as a jumping off point, but many users will want to implement their own mission specific applications in to the cluster. BigBang has implemented a `packages:` and `wrapper:` section to enable and support this in a way that ensures connectivity between your mission specific requirements and existing BigBang utilities, such as istio, the monitoring stack, and network policy management. [Here](https://repo1.dso.mil/big-bang/bigbang/-/blob/master/docs/guides/deployment-scenarios/extra-package-deployment.md) is the documentation for the `packages` utility. +Big Bang by itself serves as a jumping off point, but many users will want to implement their own mission specific applications in to the cluster. BigBang has implemented a `packages:` and `wrapper:` section to enable and support this in a way that ensures connectivity between your mission specific requirements and existing BigBang utilities, such as istio, the monitoring stack, and network policy management. [Here](https://repo1.dso.mil/big-bang/bigbang/-/blob/master/docs/guides/deployment-scenarios/extra-package-deployment.md) is the documentation for the `packages` utility. We will implement a simple additional utility as a proof of concept, starting with a basic podinfo client. This will use the `wrapper` key to provide integration between bigbang and the Mission Application, without requiring the full Istio configuration to be placed inside BigBang specific keys of the dependent chart. @@ -889,7 +890,7 @@ This section will provide guidance for troubleshooting problems that may occur d ### Changing CoreDNS upstream DNS server: After completing step 5, if you are unable to connect to external DNS providers using the command `nslookup google.com 8.8.8.8`, to test the connection. Then use the steps below to change the upstream DNS server to your networks DNS server. Please note that this change will not perist after a restart of the host server therefore, if you restart or shutdown your server you will need to re-apply these changes to CoreDNS. -1. Open config editor to change the CoreDNS pod configuration +1. Open config editor to change the CoreDNS pod configuration. ```shell kubectl -n kube-system edit configmaps CoreDNS -o yaml @@ -907,30 +908,34 @@ After completing step 5, if you are unable to connect to external DNS providers forward . <DNS Server IP> ``` -1. Save changes in editor (for vi use `:wq`) +1. Save changes in editor (for vi use `:wq`). + 1. Verify changes in terminal output that prints new config ### Useful Commands for Obtaining Detailed Logs from Kubernetes Cluster or Containers -* Print all pods including information related to the status of each pod + +* Print all pods including information related to the status of each pod. ```shell kubectl get pods --all-namespaces ``` -* Print logs for specified pod +* Print logs for specified pod. ```shell kubectl logs <pod name> -n=<namespace of pod> ``` -* Print a dump of relevent information for debugging and diagnosing your kubernetes cluster +* Print a dump of relevent information for debugging and diagnosing your kubernetes cluster. ```shell kubectl cluster-info dump ``` -### Documentation References for command line tools used +### Documentation References for Command Line Tools Used + * Kubectl - https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands * k3d - https://k3d.io/v5.5.1/usage/k3s/ * Docker - https://docs.docker.com/desktop/linux/troubleshoot/#diagnosing-from-the-terminal * Helm - https://helm.sh/docs/helm/helm/ -### NeuVector "Failed to get container" +### NeuVector "Failed to Get Container" + If the NeuVector pods come online but give errors like: ```shell @@ -946,7 +951,8 @@ cat /sys/fs/cgroup/cgroup.controllers If you get a "No such file or directory", that means its running v1, and needs to be running v2. Follow the documentation here - https://rootlesscontaine.rs/getting-started/common/cgroup2/#checking-whether-cgroup-v2-is-already-enabled to enable v2 -### "Too many open files" +### "Too Many Open Files" + If the NeuVector pods fail to open, and you look at the K8s logs only to find that it's giving the "too many open files" error, you'll need to increase your inotify max's. Consider grabbing your current fs.inotify.max values and increasing them like the following ```shell @@ -955,10 +961,12 @@ sudo sysctl fs.inotify.max_user_instances=512 sudo sysctl fs.inotify.max_user_watches=501208 ``` ### Failed to provide IP to istio-system/public-ingressgateway -As one option to provide IP to the istio-system/public-ingressgateway, metallb can be run. The following steps will demonstrate a standard configuration, however, some changes may need to be made for each individual system (specific /ets/hosts addresses, etc.) -#### Step 1: K3d deploy -To facilitate metallb, servicelb needs to be disabled on the initial install. Replace the above k3d deploy command with the following. +As one option to provide IP to the istio-system/public-ingressgateway, metallb can be run. The following steps will demonstrate a standard configuration, however, some changes may need to be made for each individual system (e.g., specific /ets/hosts addresses). + +#### Step 1: K3d Deploy + +To facilitate metallb, servicelb needs to be disabled on the initial install. Replace the above k3d deploy command with the following: ```shell k3d cluster create \ --k3s-arg "--tls-san=$SERVER_IP@server:0" \ @@ -972,6 +980,7 @@ k3d cluster create \ ``` #### Step 2: Deploy MetalLB + After following the above instructions to deploy flux, deploy the metallb controller and speaker. ```shell kubectl create -f https://raw.githubusercontent.com/metallb/metallb/v0.13.9/config/manifests/metallb-native.yaml @@ -988,7 +997,8 @@ speaker-jrddv 1/1 Running 0 30s ``` #### Step 3: Configure MetalLB -Note: This step will not work if either the controller or speaker are not in a running condition. + +**NOTE:** This step will not work if either the controller or speaker are not in a running condition. The following configuration addresses will need to be filled with the values that match your configuration. These can typically be found by looking at your docker subnet using the 'docker network ls' command. If there is no subnet currently configured you can use the following as an example to set up your subnet. 'docker network create --opt com.docker.network.bridge.name=$NETWORK_NAME $NETWORK_NAME --driver=bridge -o "com.docker.network.driver.mtu"="1450" --subnet=172.x.x.x/16 --gateway 172.x.x.x'. Be sure to replace the network name, subnet and gateway values as needed. @@ -1017,7 +1027,9 @@ EOF kubectl create -f $HOME/metallb-config.yaml ``` #### Step 4: Configure /etc/hosts -Lastly configure /etc/hosts/ with the new IP Addresses (you can add your own as needed for services). You will need to fill in the values used for the subnet. + +Lastly, configure /etc/hosts/ with the new IP Addresses (**NOTE:** you can add your own as needed for services). You will need to fill in the values used for the subnet. + ```shell export PASSTHROUGH_GATEWAY_IP=172.x.x.x export PUBLIC_GATEWAY_IP=172.x.x.x @@ -1034,10 +1046,12 @@ Lastly configure /etc/hosts/ with the new IP Addresses (you can add your own as From this point continue with the helm upgrade command above. ### WSL2 -This section will provide guidance for troubleshooting problems that may occur during your Big Bang installation specifically involving WSL2 -#### NeuVector "Failed to get container" -In you receive a similar error to the above "Failed to get container" with NeuVector it could be because of the cgroup configurations in WSL2. WSL2 often tries to run both cgroup and cgroup v2 in a unified manner which can confuse docker and affect deployments. To remedy this you need to create a .wslconfig file in the C:\Users\<UserName>\ directory. In this file you need to add: +This section will provide guidance for troubleshooting problems that may occur during your Big Bang installation specifically involving WSL2. + +#### NeuVector "Failed to Get Container" + +In you receive a similar error to the above "Failed to get container" with NeuVector it could be because of the cgroup configurations in WSL2. WSL2 often tries to run both cgroup and cgroup v2 in a unified manner which can confuse docker and affect deployments. To remedy this you need to create a .wslconfig file in the C:\Users\<UserName>\ directory. In this file you need to add the following: ```shell [wsl2] @@ -1046,14 +1060,15 @@ kernelCommandLine = cgroup_no_v1=all Once created you need to restart wsl2. -If this doesn't remedy the issue and the cgroup.controllers file is still located in the /sys/fs/cgroup/unified directory you may have to modify /etc/fstab and add +If this doesn't remedy the issue and the cgroup.controllers file is still located in the /sys/fs/cgroup/unified directory you may have to modify /etc/fstab and add the following: ```shell cgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate 0 0 ``` -#### Container fails to start "Not enough memory" -Wsl2 limits the amount of memory available to half of what your computer has. If you have 32g or less (16g or less available) this is often not enough to run all of the standard big bang services. If you have more available memory you can modify the initial limit by modifying (or creating) the C:\Users\<UserName>\.wslconfig file by adding: +#### Container Fails to Start: "Not Enough Memory" + +Wsl2 limits the amount of memory available to half of what your computer has. If you have 32g or less (16g or less available) this is often not enough to run all of the standard big bang services. If you have more available memory you can modify the initial limit by modifying (or creating) the C:\Users\<UserName>\.wslconfig file by adding: ```shell [wsl2] diff --git a/docs/guides/deployment-scenarios/sso-quickstart.md b/docs/guides/deployment-scenarios/sso-quickstart.md index 54335e401719ec3aa737c084f8bba0e28ee9e9ca..0972bd48a273d9d8399489dcd22aa2a8ad22ff57 100644 --- a/docs/guides/deployment-scenarios/sso-quickstart.md +++ b/docs/guides/deployment-scenarios/sso-quickstart.md @@ -3,15 +3,16 @@ [[_TOC_]] ## Video Walkthrough -A 54min speed run with explanations video walkthrough of this sso quickstart guide can be found on the following 2 mirrored locations: + +A 54-minute speed run with explanations video walkthrough of this Single Sign-On (SSO) quick-start guide can be found on the following two mirrored locations: * [Google Drive - Video Mirror](https://drive.google.com/file/d/1xzRKhFQy4WXW97YWUFpixclLGAKfgA6Z/preview) * [Repo1 - Video Mirror](https://repo1.dso.mil/platform-one/bullhorn-delivery-static-assets/-/blob/master/big_bang/bigbang_sso_quickstart.mp4) -> SSO values have changed since these videos were created. The old values used in the videos should still work, but you will receive warnings that they have been deprecated. +> SSO values have changed since these videos were created. The old values used in the videos should still work, but you will receive warnings that they have been deprecated. ## Blue Team Knowledge Drop -Imagine a scenario where <https://authdemo.bigbang.dev> represents a mock-up of a custom-built mission application lacking built-in SSO, authentication, or authorization. By integrating Auth Service, we can create multiple layers of defense-in-depth. This service allows only authenticated users to access the application, enforces multi-factor authentication (MFA) for these users, and requires them to be authorized based on their group membership within their Identity Provider. This setup enables safe self-registration of users without compromising security. +Imagine a scenario where <https://authdemo.bigbang.dev> represents a mock-up of a custom-built mission application lacking built-in SSO, authentication, or authorization. By integrating Auth Service, we can create multiple layers of defense-in-depth. This service allows only authenticated users to access the application, enforces Multi-Factor Authentication (MFA) for these users, and requires them to be authorized based on their group membership within their Identity Provider. This setup enables safe self-registration of users without compromising security. ### Enhancing Existing Applications with Authentication Proxies @@ -21,48 +22,52 @@ Auth Service's Authentication Proxy offers significant security enhancements. Ev Istio, AuthService, and Keycloak are not only Free Open Source Software (FOSS) but also operate efficiently in internet-disconnected environments. We will demonstrate this capability using only Kubernetes DNS and workstation hostfile edits, avoiding the need for conventional LAN/Internet DNS configurations. -## Overview +# Overview + +This SSO quick-start guide explains how to set up an SSO demo environment, from scratch within two hours, that will allow you to demo Auth Service's functionality. You'll gain hands-on configuration experience with Auth Service, Keycloak, and a Mock Mission Application. + +# Steps -This SSO Quick Start Guide explains how to set up an SSO demo environment, from scratch within two hours, that will allow you to demo Auth Service's functionality. You'll gain hands-on configuration experience with Auth Service, Keycloak, and a Mock Mission Application. +1. This document assumes you have already gone through and are familiar with the generic quick-start guide. +1. Given two Virtual Machines (VMs) (i.e, each with 8 CPU cores/32 GB ram) that are each set up for SSH, turn the two VMs into two single node k3d clusters. -**Steps:** +Why two VMs? Two reasons: -1. This document assumes you have already gone through and are familiar with the generic quick start guide. -1. Given 2 VMs (each with 8 CPU cores / 32 GB ram) that are each set up for ssh, turn the 2 VMs into 2 single node k3d clusters. -Why 2 VMs? 2 reasons: 1. It works around k3d only supporting 1 LB, but Keycloak needs its LB with TCP_PASSTHROUGH. -1. This mimics the way the Big Bang team recommends Keycloak be deployed in production, giving it its dedicated cluster (Note: from a technical standpoint nothing is stopping it from being hosted on the same cluster). -1. Use Big Bang demo workflow to turn 1 k3d cluster into a Keycloak Cluster. -1. Use Big Bang demo workflow to turn 1 k3d cluster into a Workload Cluster. +1. This mimics the way the Big Bang team recommends Keycloak be deployed in production, giving it its dedicated cluster (**NOTE:** from a technical standpoint nothing is stopping it from being hosted on the same cluster). +1. Use Big Bang demo workflow to turn one k3d cluster into a Keycloak Cluster. +1. Use Big Bang demo workflow to turn one k3d cluster into a Workload Cluster. 1. In the Keycloak Cluster: - * Deploy Keycloak + * Deploy Keycloak. * Create a Human User and Service Account for the authdemo service. 1. In the Workload Cluster: - * Deploy a mock mission application + * Deploy a mock mission application. * Protect the mock mission application, by deploying and configuring auth service to interface with Keycloak and require users to log in to Keycloak and be in the correct authorization group before being able to access the mock mission application. -### Differences between this and the generic quick start +## Differences between this Guide and the Generic Quick-Start Guide * Topics explained in previous quick start guides won't have notes or they will be less detailed. * The previous quick start supported deploying k3d to either localhost or remote VM, this quick start only supports deployment to remote VMs. -* The previous quick start supported multiple Linux distributions, this one requires Ubuntu 20.04, and it must be configured for passwordless sudo (this guide has more automation of prerequisites, so we needed a standard to automate against.) -* The automation also assumes Admin's Laptop has a Unix Shell. (Mac, Linux, or Windows Subsystem for Linux) -* This quick start assumes you have kubectl installed on your Administrator Workstation +* The previous quick start supported multiple Linux distributions, this one requires Ubuntu 20.04, and it must be configured for passwordless sudo (**NOTE:** this guide has more automation of prerequisites, so we needed a standard to automate against). +* The automation also assumes Admin's Laptop has a Unix Shell (Mac, Linux, or Windows Subsystem for Linux). +* This quick start assumes you have kubectl installed on your Administrator Workstation. -### Additional Auth Service and Keycloak documentation can be found in these locations +## Additional Auth Service and Keycloak Documentation + +Additional Auth service and Keycloack documentation can be found in the following locations: * [Authservice](https://repo1.dso.mil/big-bang/product/packages/authservice) * [Authservice Architecture](../../understanding-bigbang/package-architecture/authservice.md) * [Keycloak](https://repo1.dso.mil/big-bang/product/packages/keycloak) * [Keycloak Architecture](../../understanding-bigbang/package-architecture/keycloak.md) -## Step 1: Provision 2 Virtual Machines +## Step 1: Provision Two Virtual Machines -* 2 Virtual Machines each with 32GB RAM, 8-Core CPU (t3a.2xlarge for AWS users), and 100GB of disk space should be sufficient. +* Two Virtual Machines each with 32GB RAM, 8-Core CPU (i.e., t3a.2xlarge for AWS users), and 100GB of disk space should be sufficient. -## Step 2: Setup SSH to both VMs +## Step 2: Set up SSH to Both VMs -1. Setup SSH to both VMs +1. Set up SSH to both VMs. ```shell # [admin@Unix_Laptop:~] @@ -85,7 +90,7 @@ Why 2 VMs? 2 reasons: echo "$temp" | tee -a ~/.ssh/config #tee -a, appends to preexisting config file ``` -1. Verify SSH works for both VMs +1. Verify SSH works for both VMs. ```shell # [admin@Laptop:~] @@ -103,13 +108,13 @@ Why 2 VMs? 2 reasons: # [admin@Laptop:~] ``` -## Step 3: Prep work - Install dependencies and configure both VMs +## Step 3: Prep work: Install Dependencies and Configure both VMs -1. Set some Variables and push them to each VM - * We'll pass some environment variables into the VMs that will help with automation - * We'll also update the PS1 var so we can tell the 2 machines apart when ssh'd into. - * All of the commands in the following section are run from the Admin Laptop - * Note: The REGISTRY1_USERNAME and REGISTRY1_PASSWORD in the code block below, can't be blindly copy pasted. +1. Set variables and push them to each VM. + * We'll pass some environment variables into the VMs that will help with automation, + * We'll also update the PS1 var so we can tell the two machines apart when ssh'd into. + * All of the commands in the following section are run from the Admin Laptop. + * **NOTE:** The REGISTRY1_USERNAME and REGISTRY1_PASSWORD in the code block below can't be blindly copy pasted. ```shell # [admin@Laptop:~] @@ -125,7 +130,7 @@ Why 2 VMs? 2 reasons: REGISTRY1_PASSWORD="REPLACE_ME" ``` - * Note: The following code block can be copy pasted into the terminal as is + * **NOTE:** The following code block can be copy pasted into the terminal as-is. ```shell # [admin@Laptop:~] @@ -139,7 +144,7 @@ Why 2 VMs? 2 reasons: echo "Please manually verify that the IPs of your keycloak and workload k3d VMs look correct before moving on." ``` - * Copy paste the following code block into your workstation's unix terminal. + * Copy and paste the following code block into your workstation's unix terminal. (This is using cat command to generate files. Specifically scripts templatized using environment variables.) ```shell @@ -186,7 +191,7 @@ Why 2 VMs? 2 reasons: EOFworkload-k3d-prepwork-commandsEOF ``` - * Run the following against your Laptop / Workstation's Unix terminal. + * Run the following against your Laptop/Workstation's Unix terminal. ```shell # [admin@Laptop:~] @@ -212,7 +217,7 @@ Why 2 VMs? 2 reasons: wait command waits for background processes to finish ``` -1. Take a look at one of the VMs to understand what happened +1. Take a look at one of the VMs to understand what happened. ```shell # [admin@Laptop:~] @@ -235,8 +240,8 @@ Why 2 VMs? 2 reasons: # [admin@Laptop:~] ``` -1. Configure host OS prerequisites and install prerequisite software on both VMs - * Copy paste the following to generate an automation script +1. Configure host OS prerequisites and install prerequisite software on both VMs. + * Copy and paste the following to generate an automation script. ```shell # [admin@Laptop:~] @@ -287,7 +292,7 @@ Why 2 VMs? 2 reasons: EOFshared-k3d-prepwork-commandsEOF ``` - * Copy paste the following to run the above prerequisite automation script against both VMs + * Copy paste the following to run the above prerequisite automation script against both VMs. ```shell # [admin@Laptop:~] @@ -297,7 +302,7 @@ Why 2 VMs? 2 reasons: wait ``` - * Copy paste the following to run validation checks against both VMs + * Copy paste the following to run validation checks against both VMs. ```shell # [admin@Laptop:~] @@ -314,11 +319,10 @@ Why 2 VMs? 2 reasons: ssh workload-cluster < ~/qs/shared-k3d-prepwork-verification-commands.txt ``` -## Step 4: Create k3d cluster on both VMs and make sure you have access to both +## Step 4: Create k3d Cluster on both VMs (and make sure you have access to both) ```text -Note: There's no need to copy paste commands from this text box, - it's intended to explain some of the shell below. +**NOTE:** There's no need to copy paste commands from this text box; it's intended to explain some of the shell below. If you were to copy paste the following into your laptop/workstation's terminal. ssh keycloak-cluster 'env | grep K3D_IP' @@ -330,7 +334,7 @@ export K3D_IP=\$(cat ~/.bashrc | grep K3D_IP | cut -d \" -f 2) (It's a workaround that allows the env var values to be used in a non interactive shell) ``` -* Create a k3d cluster on both VMs +* Create a k3d cluster on both VMs. ```shell # [admin@Laptop:~] @@ -362,7 +366,7 @@ ssh workload-cluster < ~/qs/shared-k3d-install-commands.txt & wait ``` -* Copy pasting these verification commands, will make sure you have access to both clusters. +* Copying and pasting these verification commands will make sure you have access to both clusters. ```shell # [admin@Laptop:~] @@ -378,7 +382,7 @@ kubectl get node ## Step 5: Clone Big Bang and Install Flux on both Clusters -* Note after copy pasting the following block of automation, it might look stuck on "networkpolicy.networking.k8s.io/allow-webhooks created", the install_flux.sh script has logic near the end that waits for a healthy deployment, so just wait about 4 minutes. After which `kubectl get po -n=flux-system` should show a healthy deployment and you should be able to interactively use your terminal again. +* **NOTE:** after copying and pasting the following block of automation, it might look stuck on "networkpolicy.networking.k8s.io/allow-webhooks created", the install_flux.sh script has logic near the end that waits for a healthy deployment, so just wait about four minutes. After which `kubectl get po -n=flux-system` should show a healthy deployment and you should be able to interactively use your terminal again. ```shell # [admin@Laptop:~] @@ -399,7 +403,7 @@ ssh workload-cluster < ~/qs/shared-flux-install-commands.txt & wait ``` -* Note: It's possible for the above flux install commands to give a false error message, along the lines of "error: timed out waiting for the condition on deployments/helm-controller", if the deployment takes longer than 5 minutes, the wait for healthy logic will time out. If you follow these steps using cloud service provider infrastructure, you're unlikely to see the error. If you follow these steps on a home network lab with slower download speed you might see the error message, its ignorable, and you can use the following copy pasteable command block to verify health of the flux pods. +* **NOTE:** It's possible for the above flux install commands to give a false error message, along the lines of "error: timed out waiting for the condition on deployments/helm-controller." If the deployment takes longer than five minutes, the wait for healthy logic will time out. If you follow these steps using cloud service provider infrastructure, you're unlikely to see the error. If you follow these steps on a home network lab with slower download speed you might see the error message, its ignorable, and you can use the following copy pasteable command block to verify health of the flux pods. ```shell # [admin@Laptop:~] @@ -514,7 +518,7 @@ EOFdeploy-workloadsEOF ssh workload-cluster < ~/qs/deploy-workloads.txt ``` -* The following command can be used to check the status of the deployment. You can optionally re-run `kubectl get hr -A` multiple times until you see READY: True, but there's no need to wait for it to finish before moving on. +* The following command can be used to check the status of the deployment. You can optionally re-run `kubectl get hr -A` multiple times until you see READY, but there's no need to wait for it to finish before moving on. ```shell sleep 5 @@ -584,7 +588,7 @@ EOFdeploy-keycloakEOF ssh keycloak-cluster < ~/qs/deploy-keycloak.txt ``` -## Step 8: Edit your workstation's Hosts file to access the web pages hosted on the Big Bang Clusters +## Step 8: Edit your Workstation's Hosts File to Access the Web Pages Hosted on the Big Bang Clusters ### Linux/Mac Users @@ -610,14 +614,11 @@ cat /etc/hosts * Edit similarly using method mentioned in the generic quickstart -## Step 9: Make sure the clusters have had enough time to finish their deployments +## Step 9: Make sure the Clusters have had Enough Time to Finish their Deployments -* Note: - After copy pasting the following, you may need to wait up to 10 minutes. If you're too - fast you may see a temporary error about pod keycloak-0 not found. It's recommended to - copy paste this block of verification commands a 2nd time after 10 minutes have passed. +* **NOTE:** After copy pasting the following, you may need to wait up to 10 minutes. If you're too fast, you may see a temporary error about pod keycloak-0 not found. It's recommended to copy and paste this block of verification commands a second time after 10 minutes have passed. -* Note when you run `kubectl get svc -n=istio-system`, against each cluster, verify that EXTERNAL-IP isn't stuck in pending. +* **NOTE:** when you run `kubectl get svc -n=istio-system`, against each cluster, verify that EXTERNAL-IP isn't stuck in pending. ```shell # [admin@Laptop:~] @@ -636,13 +637,13 @@ kubectl get hr -A kubectl get svc -n=istio-system ``` -## Step 10: Verify that you can access websites hosted in both clusters +## Step 10: Verify that you can Access Websites Hosted in Both Clusters * In a Web Browser visit the following 2 webpages * <https://keycloak.bigbang.dev> * <https://grafana.bigbang.dev> -## Step 11: Deploy a mock mission application to the workload cluster +## Step 11: Deploy a Mock Mission Application to the Workload Cluster ```shell # [admin@Laptop:~] @@ -687,31 +688,31 @@ export KUBECONFIG=$HOME/.kube/workload-cluster kubectl wait --for=condition=available deployment/podinfo --timeout=3m -n=mock-mission-app ``` -## Step 12: Visit the newly added webpage +## Step 12: Visit the Newly Added Webpage -* In a browser navigate to <https://authdemo.bigbang.dev> -* Note: authdemo currently isn't protected by the authservice AuthN/AuthZ proxy, the next steps configure that protection. +* In a browser, navigate to <https://authdemo.bigbang.dev>. +* **NOTE:** Authdemo currently isn't protected by the authservice AuthN/AuthZ proxy. The next steps configure that protection. ## Step 13: Create a Human User Account in Keycloak -1. Visit <https://keycloak.bigbang.dev> -1. Follow the self-registration link or visit it directly <https://keycloak.bigbang.dev/register> -1. Create a demo account, the email you specify doesn't have to exist for demo purposes, make sure you write down the demo username and password. +1. Visit <https://keycloak.bigbang.dev>. +1. Follow the self-registration link or visit it directly <https://keycloak.bigbang.dev/register>. +1. Create a demo account. The email you specify doesn't have to exist for demo purposes. Make sure you write down the demo username and password. 1. Create an MFA device. -1. It'll say "You need to verify your email address to activate your account" (You can ignore that and close the page.) -1. Visit <https://keycloak.bigbang.dev/auth/admin> -1. Log in as a keycloak admin, using the default creds of admin:password - (Note: The admin's initial default credentials can be specified in code, by updating helm values.) +1. You'll recieve a message that reads: "You need to verify your email address to activate your account." You can ignore that and close the page. +1. Visit <https://keycloak.bigbang.dev/auth/admin>. +1. Log in as a keycloak admin, using the default creds of admin:password. + (**NOTE:** The admin's initial default credentials can be specified in code, by updating helm values.) 1. In the GUI: - 1. Navigate to: Manage/Users > [View all users] > [Edit] (your demo user) - 1. Under "Required User Actions": Delete [Verify Email] - 1. Under "Email Verified": Toggle Off to On - 1. Click Save + 1. Navigate to: Manage/Users > [View all users] > [Edit] i.e., your demo user). + 1. Under "Required User Actions": Delete [Verify Email]. + 1. Under "Email Verified": Toggle Off to On. + 1. Click Save. -## Step 14: Create an Application Identity / Service Account / Non-Person Entity in Keycloak for the authdemo webpage +## Step 14: Create an Application Identity/Service Account/Non-Person Entity in Keycloak for the Authdemo Webpage -1. Visit <https://keycloak.bigbang.dev/auth/admin> -1. log in as a keycloak admin, using the default creds of admin:password +1. Visit <https://keycloak.bigbang.dev/auth/admin>. +1. log in as a keycloak admin, using the default creds of admin:password. 1. In the GUI: 1. Navigate to: Manage/Groups > Impact Level 2 Authorized (double click) Notice the group UUID in the URL: 00eb8904-5b88-4c68-ad67-cec0d2e07aa6 @@ -727,11 +728,11 @@ kubectl wait --for=condition=available deployment/podinfo --timeout=3m -n=mock-m 1. Under "Access Type": Change Public to Confidential 1. Under "Valid Redirect URIs": Add "https://authdemo.bigbang.dev/login/generic_oauth" Note: /login/generic_oauth comes from auth service - 1. Save + 1. Save. 1. Scroll up to the top of the page and you'll see a newly added [Credentials] tab, click it. - 1. Copy the secret for the authdemo Client Application Identity, (it's labeled secret) you'll paste it into the next step + 1. Copy the secret for the authdemo Client Application Identity, (it's labeled secret) you'll paste it into the next step. -## Step 15: Deploy auth service to the workload cluster and use it to protect the mock mission app +## Step 15: Deploy Auth Service to the Workload Cluster and use it to Protect the Mock Mission App ```shell # [admin@Laptop:~] @@ -754,7 +755,7 @@ export KEYCLOAK_IDP_JWKS=$(curl https://keycloak.bigbang.dev/auth/realms/baby-yo export KEYCLOAK_CERTS_CA=$(curl https://letsencrypt.org/certs/isrgrootx1.pem) ``` -* You can copy paste the following command block as is +* **NOTE:** You can copy and paste the following command block as-is. ```shell # [admin@Laptop:~] @@ -812,35 +813,34 @@ ssh workload-cluster 'helm get values bigbang -n=bigbang' # You can eyeball this ## Step 16: Revisit authdemo.bigbang.dev -* Go to <https://authdemo.bigbang.dev> -* Before we were taken straight to the mock mission app webpage -* Now* (or 30-120 seconds after copy pasting the above block of commands into the terminal), when you create a new tab and try to visit this URL it immediately redirects to a KeyCloak Log in Prompt and if you log in with your demo user, you'll a message like this: +* Go to <https://authdemo.bigbang.dev>. +* Before we were taken straight to the mock mission app webpage. +* Now, or between 30 to 120 seconds after copy pasting the above block of commands into the terminal, when you create a new tab and try to visit this URL, it immediately redirects to a KeyCloak Log in Prompt. If you log in with your demo user, you'll recieve a message like this: > RBAC: access denied > Your account has not been granted access to this application group yet. -## Step 17: Update the group membership of the user +## Step 17: Update the Group Membership of the User -1. Go to <https://keycloak.bigbang.dev/auth/admin> -1. Login with admin:password +1. Go to <https://keycloak.bigbang.dev/auth/admin>. +1. Login with admin:password. 1. In the GUI: - 1. Navigate to: Manage/Users > [View all users] > [Edit] (your Demo user) - 1. Click the Groups tab at the top - 1. Click Impact Level 2 Authorized - 1. Click [Join] + 1. Navigate to: Manage/Users > [View all users] > [Edit] (your Demo user). + 1. Click the Groups tab at the top. + 1. Click Impact Level 2 Authorized. + 1. Click [Join]. -> Note: -> If you try to repeat step 16 at this stage, you'll see either an infinite loading screen or message like this: +> **NOTE:** If you try to repeat step 16 at this stage, you'll see either an infinite loading screen or message like this: > `Access to authdemo.bigbang.dev was denied` > `You don't have authorization to view this page.` > `HTTP ERROR 403` -> The reason for this is that we configured our workstation's hostfile /etc/hosts to avoid needing to configure DNS. But the 2 k3d clusters are unable to resolve the DNS Names. -> AuthService pods on the Workload Cluster need to be able to resolve the DNS name of keycloak.bigbang.dev -> Keycloak pod on the Keycloak Cluster needs to be able to resolve the DNS name of authdemo.bigbang.dev +> The reason for this is that we configured our workstation's hostfile /etc/hosts to avoid needing to configure DNS. But the two k3d clusters are unable to resolve the DNS Names. +> AuthService pods on the Workload Cluster need to be able to resolve the DNS name of keycloak.bigbang.dev. +> Keycloak pod on the Keycloak Cluster needs to be able to resolve the DNS name of authdemo.bigbang.dev. ## Step 18: Update Inner Cluster DNS on the Workload Cluster -* The following commands will show there's an issue with DNS +* The following commands will show there's an issue with DNS. ```shell # [admin@Laptop:~] @@ -895,8 +895,8 @@ kubectl exec -it test -- ping authdemo.bigbang.dev -c 1 | head -n 1 # Now the k3d clusters can resolve the DNS to IP mappings, similar to our workstations /etc/hosts file ``` -## Step 19: Revisit authdemo.bigbang.dev +## Step 19: Revisit Authdemo.bigbang.dev -1. Visit <https://authdemo.bigbang.dev> -1. You'll get redirected to keycloak.bigbang.dev -1. Log in to keycloak, and afterwords you'll get redirected to authdemo.bigbang.dev +1. Visit <https://authdemo.bigbang.dev>. +1. You'll get redirected to keycloak.bigbang.dev. +1. Log in to keycloak, and afterwords you'll get redirected to authdemo.bigbang.dev. diff --git a/docs/guides/renovate/deployment.md b/docs/guides/renovate/deployment.md index f91bdeb09688384a5f12efeac54be1aff1c1a6cf..4fa32e1476de8faf30c3b6f5540a7c32c3ddbe8f 100644 --- a/docs/guides/renovate/deployment.md +++ b/docs/guides/renovate/deployment.md @@ -31,7 +31,7 @@ packages: ``` #### Config -The configuration sets up a self-hosted instance of Renovate that connects with a platform. In the example we connect to GitLab using the GitLab API v4 at a specified URL. +The configuration sets up a self-hosted instance of Renovate that connects with a platform. In the example, we connect to GitLab using the GitLab API v4 at a specified URL. ##### Auth It is recommended to use a repository-scoped auth token with developer access for least privilege. @@ -42,7 +42,7 @@ The repositories key in this self-hosted renovate configuration specifies which See [Self Hosted Configuration](https://docs.renovatebot.com/self-hosted-configuration/#self-hosted-configuration-options) for more details #### Cron Job -See [Scheduling Renovate Guide](./scheduling.md) +Refer to the [Scheduling Renovate Guide](./scheduling.md). #### Individual Package Configuration -The configuration file for Renovate is called `renovate.json` and is located in each project's root directory. See [Package Configuration](./package-configuration.md) \ No newline at end of file +The configuration file for Renovate is called `renovate.json` and is located in each project's root directory. See [Package Configuration](./package-configuration.md) diff --git a/docs/guides/renovate/package-configuration.md b/docs/guides/renovate/package-configuration.md index b5c482444f0fba53f4c8b373b4f01ba486bc8946..3d3f53c47457e23af5fa6b8c9972f53937eaf363 100644 --- a/docs/guides/renovate/package-configuration.md +++ b/docs/guides/renovate/package-configuration.md @@ -6,7 +6,7 @@ > The following example is for a user fork of the [customer template](https://repo1.dso.mil/big-bang/customers/template). -The first 10 lines set up the basics of what a Renovate ticket is expected to look like when created. Package Rules is set up to use git-tags. Regex Managers are detailed below. +The first ten lines set up the basics of what a Renovate ticket is expected to look like when created. Package Rules is set up to use git-tags. Regex Managers are detailed below. ```json { "baseBranches": ["main"], @@ -59,7 +59,7 @@ The first 10 lines set up the basics of what a Renovate ticket is expected to lo #### RegEx Managers This is where regex-based rules for updating dependencies are defined. This is where the majority of the work is done for Renovate. -In this example the version of Big Bang tracked by the base/kustomization.yaml is the target of renovate. +In this example, the version of Big Bang tracked by the base/kustomization.yaml is the target of renovate. The regex targets `- git::https://repo1.dso.mil/big-bang/bigbang.git//base?ref=1.41.0` setting `1.41.0` as a capture group. ```json @@ -73,10 +73,10 @@ The regex targets `- git::https://repo1.dso.mil/big-bang/bigbang.git//base?ref=1 "versioningTemplate": "regex:^(?<major>\\d+)\\.(?<minor>\\d+)\\.(?<patch>\\d+)$" } ``` -> The same concept can be applied to dev/kustomization.yaml or the kustomization for any folder for a specific environment +> The same concept can be applied to dev/kustomization.yaml or the kustomization for any folder for a specific environment. -Targeting packages requires a more complex regex statement. In this example we are asking renovate to update the version number of `git.tag` where `git.repository` matches the `depName` +Targeting packages requires a more complex regex statement. In this example, we are asking renovate to update the version number of `git.tag` where `git.repository` matches the `depName` ```json { @@ -99,10 +99,10 @@ kyverno: replicaCount: 1 ``` ## Package Configuration Options -The following options are commonly used to configure the options of Renovate: +The options that are commonly used to configure the options of Renovate include the following: regexManagers, Dashboard options, and packageRules. ### regexManagers -Several `regexManagers` are defined in the package configuration example, each with a specific `fileMatch` path and `matchStrings` regex. It can accept an array of objects. Below is some of the common properties of those objects. +Several `regexManagers` are defined in the package configuration example, each with a specific `fileMatch` path and `matchStrings` regex. It can accept an array of objects. Provided below are several of the common properties of those objects. #### File Match @@ -110,11 +110,11 @@ The `fileMatch` array is a list of files that you want to parse. It uses a regu #### Match Strings -`matchString` is used to identify the current version, data source type, dependency name or current digest in a file. You must use special capture groups in regex to identify these items, or create a template for Renovate to understand. The following are required to be captured: +`matchString` is used to identify the current version, data source type, dependency name or current digest in a file. You must use special capture groups in regex to identify these items, or create a template for Renovate to understand. The following are required to be captured: -- `<currentValue>`: This is the current version or tag of the dependency (e.g. v1.2.3) +- `<currentValue>`: This is the current version or tag of the dependency (e.g. v1.2.3). - `<datasource>`: This is the type of the dependency. For Big Bang packages you will want to use `git-tags`. -- `<depName>`: This is the name of the dependency and is uses as the repository for the dependency when looking it up in the registry +- `<depName>`: This is the name of the dependency and is uses as the repository for the dependency when looking it up in the registry. You can optionally capture `<currentDigest>` as the SHA256 digest for an image if you want renovate to replace this value. @@ -122,21 +122,21 @@ To capture a group, you simply use [regex named groups](https://www.regular-expr See [Renovate Configuration](https://docs.renovatebot.com/configuration-options/#regexmanagers) for more details. -### Dashboard options +### Dashboard Options + #### dependencyDashboard -When the Dependency Dashboard is enabled, Renovate will create a new issue in the configured repository. This issue acts as a "dashboard" where you can get an overview of the status of all updates. It can accept a boolean value. +When the dependencyDashboard is enabled, Renovate will create a new issue in the configured repository. This issue acts as a "dashboard" where you can get an overview of the status of all updates. It can accept a boolean value. #### dependencyDashboardHeader -This key sets a header for the dependency dashboard which lists tasks to be completed by the user in the form of the issue description on Gitlab. The header will appear at the top of the dependency dashboard. In the given example, the header contains a checklist for reviewing the BB release notes/changelog. It can accept a string. +This key sets a header for the dependencyDashboard which lists tasks to be completed by the user in the form of the issue description on Gitlab. The header will appear at the top of the dependencyDashboard. In the given example, the header contains a checklist for reviewing the BB release notes/changelog. It can accept a string. #### dependencyDashboardTitle -This key is used to set the title for the dependency dashboard. In the example, it is set as "Renovate: Upgrade Big Bang". It can accept a string. - -> See [Renovate Configuration](https://docs.renovatebot.com/configuration-options/#dependencydashboard) for more info. +This key is used to set the title for the dependencyDashboard. In the example, it is set as "Renovate: Upgrade Big Bang". It can accept a string. +> Refer to [Renovate Configuration](https://docs.renovatebot.com/configuration-options/#dependencydashboard) for more information. ### packageRules -This key provides an array of rules that define how packages are matched and grouped. In the example, any matching package with the datasource `git-tags` will be grouped under the name `Big Bang`. It can accept an array of objects see [Renovate Package Rules Docs](https://docs.renovatebot.com/configuration-options/#packagerules) for more info. +This key provides an array of rules that define how packages are matched and grouped. In the example, any matching package with the datasource `git-tags` will be grouped under the name `Big Bang`. It can accept an array of objects see [Renovate packageRules Docs](https://docs.renovatebot.com/configuration-options/#packagerules) for more info. ## Additional Package Configuration Options @@ -165,7 +165,6 @@ This key sets a prefix that will be added to commit messages. It can accept a st This key specifies whether to separate major/minor updates into separate pull requests. It can accept a boolean value. ### ignoreDeps -The configuration field allows you to define a list of dependency names to be ignored by Renovate. Currently it supports only "exact match" dependency names and not any patterns. It can accept an array of strings. - +The configuration field allows you to define a list of dependency names to be ignored by Renovate. Currently, it supports only "exact match" dependency names and not any patterns. It can accept an array of strings. In conclusion, the `renovate.json` file allows us to configure the Renovate bot to keep our Helm chart repository up-to-date with the latest dependencies, by using various settings to suit our needs. diff --git a/docs/guides/renovate/scheduling.md b/docs/guides/renovate/scheduling.md index 74f5b1fe1d66b8d9f5d32780248b7ee543a07a65..06946ebee6bf623ed9eb05fe0bd25c20e75db734 100644 --- a/docs/guides/renovate/scheduling.md +++ b/docs/guides/renovate/scheduling.md @@ -1,13 +1,15 @@ ## Handling Scheduling in the Chart + To handle scheduling in the chart for a Renovate configuration, you can use a Kubernetes CronJob object, which allows you to schedule jobs to run at specific intervals. To configure the scheduling, you will need to modify the schedule field in the cronjob section of the values.yaml file. -The schedule option allows you to define times of week or month for Renovate updates. Running Renovate around the clock can be too "noisy" for some projects. To reduce the noise you can use the schedule config option to limit the time frame in which Renovate will perform actions on your repository. You can use the standard Cron syntax and Later syntax to define your schedule. +The schedule option allows you to define times of week or month for Renovate updates. Running Renovate around the clock can be too "noisy" for some projects. To reduce the noise, you can use the schedule config option to limit the time frame in which Renovate will perform actions on your repository. You can use the standard Cron syntax and Later syntax to define your schedule. The default value for schedule is `0 1 * * *` for at `01:00 everyday`. -The easiest way to define a schedule is to use a preset if one of them fits your requirements. See Schedule presets for details and feel free to request a new one in the source repository if you think others would benefit from it too. +The easiest way to define a schedule is to use a preset if one of them fits your requirements. Refer to the schedule presets for details and feel free to request a new one in the source repository if you think others would also benefit from it. + +### Additional Examples -##### Later examples Otherwise, here are some text schedules that are known to work: ``` every weekend @@ -30,21 +32,20 @@ every 3 months on the first day of the month +------------------------- minute (0 - 59) ``` -> For example, to run the Renovate job every day at 1:00 AM, you would set the schedule field to `0 1 * * *` +> For example, to run the Renovate job every day at 1:00 AM, you would set the schedule field to `0 1 * * *`. ### Other Options -You can also configure other options in the CronJob section, such as suspend to temporarily disable the job, concurrencyPolicy to control how multiple instances of the job are run, and startingDeadlineSeconds to specify the maximum amount of time to wait for a job to start before considering it failed. +You can also configure other options in the CronJob section, including: suspend to temporarily disable the job, concurrencyPolicy to control how multiple instances of the job are run, and startingDeadlineSeconds to specify the maximum amount of time to wait for a job to start before considering it failed. -* `suspend` - If set to `true`, the job will be suspended and will not be executed. -* `concurrencyPolicy` - This determines how the job handles concurrent executions. Valid values are `Allow`, `Forbid`, and `Replace`. -* `failedJobsHistoryLimit` - This defines the number of failed jobs that will be kept in history. -* `successfulJobsHistoryLimit` - This defines the number of successful jobs that will be kept in history. -* `jobRestartPolicy` - This determines how the job will be restarted when it fails. Valid values are `Never` and `OnFailure`. -* `jobBackoffLimit` - This defines the maximum number of retries that can be attempted before the job is considered failed. -* `startingDeadlineSeconds` - This defines the deadline for starting the job. If the job is not started before the deadline, it will be cancelled. +* `suspend`: If set to `true`, the job will be suspended and will not be executed. +* `concurrencyPolicy`: This determines how the job handles concurrent executions. Valid values are `Allow`, `Forbid`, and `Replace`. +* `failedJobsHistoryLimit`: This defines the number of failed jobs that will be kept in history. +* `successfulJobsHistoryLimit`: This defines the number of successful jobs that will be kept in history. +* `jobRestartPolicy`: This determines how the job will be restarted when it fails. Valid values are `Never` and `OnFailure`. +* `jobBackoffLimit`: This defines the maximum number of retries that can be attempted before the job is considered failed. +* `startingDeadlineSeconds`: This defines the deadline for starting the job. If the job is not started before the deadline, it will be cancelled. -# Once you have configured the schedule in the values.yaml file, you can deploy the Renovate chart using `helm install` or `helm upgrade` commands. The Renovate job will then run according to the specified schedule. ### Example Yaml @@ -70,4 +71,4 @@ packages: jobBackoffLimit: '' startingDeadlineSeconds: '' - ``` \ No newline at end of file + ``` diff --git a/docs/guides/using-bigbang/style.md b/docs/guides/using-bigbang/style.md index 60085165339b5642de6863c6b02ac1cdc1ccc7bf..6dc6df21daa600e3de4df08c6aef2e647318037f 100644 --- a/docs/guides/using-bigbang/style.md +++ b/docs/guides/using-bigbang/style.md @@ -1,8 +1,8 @@ # General Conventions Style Guide -This style guide outlines the general conventions to follow for package names, structure standardization, version numbers, and YAML formatting focusing on the Big Bang Helm chart. Individual packages (core, addons, community) may not follow these exact standards. +This style guide outlines the general conventions to follow for package names, structure standardization, version numbers, and YAML formatting focusing on the Big Bang Helm chart. Individual packages (e.g., core, addons, community) may not follow these exact standards. ## Package Names -When creating package names, consider that different usages of the name will require different formats. For Helm values keys use camelCase to delineate multi-word package names. Avoid using . or - within values keys to simplify Helm templating. Kubernetes resources require translation to kebab-case as they do not support uppercase. Package naming for Kubernetes resources should be consistent across all resources (GitRepository, Namespace, HelmRelease, labels, etc). +When creating package names, consider that different usages of the name will require different formats. For Helm values keys use camelCase to delineate multi-word package names. Avoid using . or - within values keys to simplify Helm templating. Kubernetes resources require translation to kebab-case as they do not support uppercase. Package naming for Kubernetes resources should be consistent across all resources (e.g., GitRepository, Namespace, HelmRelease, and/or labels). ##### Notable Exceptions > If a package name is two words and the additional words are less than four characters, consider it as part of the single name. Examples include "fluentbit" (technically "Fluent Bit") and "argocd" (technically "Argo CD"). @@ -12,17 +12,17 @@ When creating package names, consider that different usages of the name will req ## Formatting YAML When formatting YAML files, follow these guidelines: -- Indent using two spaces, not tabs. -- Use camelCase and alphanumeric keys, without any special characters. -- Ensure that all Kubernetes resource names, repository names, and namespaces are lowercase, alphanumeric, or hyphenated, using kebab-case. +* Indent using two spaces, not tabs. +* Use camelCase and alphanumeric keys, without any special characters. +* Ensure that all Kubernetes resource names, repository names, and namespaces are lowercase, alphanumeric, or hyphenated, using kebab-case. ## Structure Standardization For each package, ensure that the following items have the same name: -- Folder: chart/templates/<package\> -- Top-level key: chart/templates/values.yaml -- Namespace: chart/templates/<package\>/namespace.yaml, unless targeting another package's namespace. -- Repo name: https://repo1.dso.mil/bigbang/packages/<package\> +* Folder: chart/templates/<package\> +* Top-level key: chart/templates/values.yaml +* Namespace: chart/templates/<package\>/namespace.yaml, unless targeting another package's namespace. +* Repo name: https://repo1.dso.mil/bigbang/packages/<package\> ## diff --git a/docs/guides/using-bigbang/testing-deployments.md b/docs/guides/using-bigbang/testing-deployments.md index 05c26a568b155a9edd3842e2dac6a3be641958ce..f3c70da3f4b4dddb1de642f2b88c1fb2a8c7da16 100644 --- a/docs/guides/using-bigbang/testing-deployments.md +++ b/docs/guides/using-bigbang/testing-deployments.md @@ -6,13 +6,13 @@ 3. [Resources](#resources) ## Introduction -Big Bang leverages a sub-chart called Gluon to perform both scripted and UI tests of deployed applications. This guide is designed to describe the basics of the UI testing portion of Gluon and how it can be extended to suit your needs. +Big Bang leverages a sub-chart called Gluon to perform both scripted and User Interface (UI) tests of deployed applications. This guide is designed to describe the basics of the UI testing portion of Gluon and how it can be extended to suit your needs. The UI testing is performed via a container running Cypress and can be enabled on a per-package level by setting the value of bbtests.enabled to true. Be sure to review the values for each package as there may be additional settings related to the Cypress test under the bbtests.cypress section. With the correct values in place, you can run a test by specifying the following command: `helm test kiali-kiali -n bigbang` - > Note: You can run the following command to grab the proper names for each helm chart deployed in Big Bang: "helm list -n bigbang" + > **NOTE:** You can run the following command to grab the proper names for each helm chart deployed in Big Bang: "helm list -n bigbang." Upon running the command, a new pod will be created in the same namespace as the package. This pod will install Cypress, download a Cypress configuration file, download a file that contains custom Cypress commands, and run any available tests. In order for the Cypress pod to run successfully, it needs to have external access to the internet and any resources it is attempting to reach out to for testing. This communication should be allowed by default. You may also need to add exceptions if using Kyverno or Gatekeeper within Big Bang. Links to these exceptions required can be found below in the Resources section. @@ -26,6 +26,7 @@ Additionally, test isolation has been disabled by default within Big Bang's impl The above command can be executed either at the beginning to ensure a clean session or at the end of any given test to ensure everything is clean before the next test. ## Extending Cypress Tests + If the default provided Cypress tests are not enough for your installation of Big Bang, there are two options available to extend the behaviour to suit your needs: ### Option 1: Augment Default Tests with Custom Cypress Tests @@ -132,4 +133,4 @@ kubectl kustomize ../bigbang [In Depth BB Test Documentation](https://repo1.dso.mil/big-bang/product/packages/gluon/-/blob/master/docs/bb-tests.md?ref_type=heads) [Kyverno Exceptions for Cypress](https://repo1.dso.mil/big-bang/bigbang/-/blob/master/chart/templates/kyverno-policies/values.yaml) [Gatekeeper Exceptions for Cypress](https://repo1.dso.mil/big-bang/bigbang/-/blob/master/chart/templates/gatekeeper/values.yaml) -[Cypress Environment Variables](https://docs.cypress.io/guides/guides/environment-variables#Option-4---env) \ No newline at end of file +[Cypress Environment Variables](https://docs.cypress.io/guides/guides/environment-variables#Option-4---env) diff --git a/docs/guides/using-bigbang/upgrading-bigbang.md b/docs/guides/using-bigbang/upgrading-bigbang.md index 4a078d52b545e6bef792361c8577cd12d9152b97..175602a685d158d3922a3f1fa9fdb4c6d4cfb52a 100644 --- a/docs/guides/using-bigbang/upgrading-bigbang.md +++ b/docs/guides/using-bigbang/upgrading-bigbang.md @@ -1,18 +1,18 @@ # Upgrading Big Bang ## Before Upgrading -Before upgrading Big Bang please first check the Release Notes and the Changelog to look for any notes that apply to Big Bang Updates and Package Updates. +Before upgrading Big Bang, please first check the Release Notes and the Changelog to look for any notes that apply to Big Bang Updates and Package Updates. -Two important things to review when upgrading: -- "Upgrade Notices" in the Big Bang release notes: +There are two important things to review when upgrading: +1. "Upgrade Notices" in the Big Bang release notes: - These capture any critical notes that the Big Bang development team identified during the release process. - This may be an update to Flux which requires a "manual" application, or a change to a specific package that we deem important to include. -- Changelog entries for individual packages that you are deploying: +2. Changelog entries for individual packages that you are deploying: - Oftentimes individual packages could have breaking changes depending on your configuration. - It is important to review the changes included with those packages and determine if your configuration needs to be adjusted as a result. ## Supported Upgrades -Generally we expect upgrades to be done one minor release at a time. If necessary, it is possible to jump past several versions provided there is careful review of the release notes in between the versions and there are no problems. +Generally, we expect upgrades to be done one minor release at a time. If necessary, it is possible to jump past several versions provided there is careful review of the release notes in between the versions and there are no problems. NOTE: It is recommended that upgrades first be tested in a staging environment that mirrors the production environment so that errors are caught early. @@ -65,10 +65,10 @@ resources: ``` ## Verifying the Upgrade -After upgrading the cluster there are some places to look to verify that the upgrade was completed successfully. +After upgrading the cluster, there are some places to look to verify that the upgrade was completed successfully. ### Verify Helm releases - - Verify all the helm releases have succeeded +Verify all the helm releases have succeeded If everything has updated successfully you should see `Release reconciliation succeeded` as the status for each HelmRelease. ```bash @@ -81,9 +81,9 @@ bigbang istio 5h1m True Release reconciliation succeeded ``` ### Verify Pods - - Verify that there are all pods are either `Running` or `Completed` - - Look for any pods that recently restarted (crashing recently) - - Below see an example of a pod that has restarted multiple times in a short time + - Verify that there are all pods are either `Running` or `Completed.` + - Look for any pods that recently restarted (crashing recently). + - Below see an example of a pod that has restarted multiple times in a short time. ```bash ⯠k get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE @@ -93,7 +93,7 @@ monitoring alertmanager-monitoring-monitoring-kube-alertmanager-0 ``` ### Verify Image Versions for Specific Packages - - Check for specific package versions (image version on pods) + - Check for specific package versions (image version on pods). - There may be cases where you are hoping to use new features in a new package version, as such it can be beneficial to validate that package did update to the new version as expected. - It can also be important to validate Istio sidecar versions, especially for packages outside of Big Bang core/addons. See an example of checking the image version of the running pod below: ```bash @@ -118,6 +118,6 @@ status: - It is important to validate those specific applications/features are functioning as expected post-upgrade. ## Upgrade Troubleshooting -Oftentimes a good place to start with troubleshooting is to identify which package had issues upgrading. After identifying the package that had problems it can be helpful to re-review the release notes and changelog for that specific package to see if any changes were missed that may have caused the upgrade issue you ran into. +Usually, a good place to start with troubleshooting is to identify which package had issues upgrading. After identifying the package that had problems it can be helpful to re-review the release notes and changelog for that specific package to see if any changes were missed that may have caused the upgrade issue you ran into. Specific troubleshooting steps for common issues will be added here in the future. diff --git a/docs/guides/using-bigbang/values-guide.md b/docs/guides/using-bigbang/values-guide.md index 657b6a18a7f8b93e422778c344151acffc66cb38..a024119cbf974b8969f1787560ef9b0e02575a26 100644 --- a/docs/guides/using-bigbang/values-guide.md +++ b/docs/guides/using-bigbang/values-guide.md @@ -12,7 +12,7 @@ Big Bang is a slight variation from the typical "umbrella" helm chart pattern. I The variables in Big Bang's `values.yaml` file are either passed to Flux or Helm depending on the deployment methodology. Values specific to individual packages will be passed to Flux and used to deploy the package itself. Technically speaking, when you deploy Big Bang you are deploying a number of Flux objects and Flux does the heavy lifting to deploy the actual applications. For more information on Flux, see its official [documentation](https://fluxcd.io/docs/components/). -A high level conceptual graph of how values flow through Big Bang is provided below: +A high level conceptual graph of how values flow through Big Bang is provided in the following: ```mermaid graph TD @@ -42,7 +42,7 @@ Variables defined in Big Bang's [values.yaml](../../../chart/values.yaml) are va ### Big Bang Configuration Values -There are a number of values in the Big Bang chart that are solely used for configuration of the Big Bang chart itself. Typically these values are used for the Flux templates (HelmRelease, GitRepository) or for secrets (registry credentials, git credentials). +There are a number of values in the Big Bang chart that are solely used for configuration of the Big Bang chart itself. Typically these values are used for the Flux templates (e.g., HelmRelease and/or GitRepository) or for secrets (e.g., registry credentials and/or git credentials). Some examples of these values include: - `registryCredentials`: Used to configure the image pull secrets for every namespace @@ -55,14 +55,14 @@ Some examples of these values include: Global values within Big Bang are used in cases where packages inherit a common configuration. These mainly include configuration for networking, common SSO provider config, and other common usability values. -Some examples of these values include: +Some examples of these values are included in the following: - `domain`: This value informs the VirtualService configurations for all packages - `openshift`: This toggle provides for configuration of any OpenShift specific values across all packages - `networkPolicies`: These values inform configuration of network policies (enabling, disabling, setting IP ranges) - `imagePullPolicy`: When set this value is used to configure all packages (and their pods) with a common pull policy - `sso` (top level value): The configuration set here is used to assist with packages' individual OIDC or SAML configuration, such as endpoints -**Important Note**: While we use the term "global" here these values are NOT the exact same as a Helm global value. Helm globals are directly passed to all subcharts. Big Bang globals are in some cases passed directly to the package charts, but in other cases they are manipulated/customized to inform package values. +**NOTE**: While we use the term "global" here these values are NOT the exact same as a Helm global value. Helm globals are directly passed to all subcharts. Big Bang globals are in some cases passed directly to the package charts, but in other cases they are manipulated/customized to inform package values. ### Abstracted Package Values @@ -71,8 +71,8 @@ Each package generally has some configuration that is commonly used for a produc When these values are set they are passed through to the package itself. In some cases the Big Bang chart provides additional utility by creating any necessary secrets or toggling other values that are needed. Some examples of these values include: -- `<package>.database`: Simplified configuration for an external database connection (user, pass, host, port, etc) -- `<package>.objectStorage`: Similar configuration for external storage (i.e. S3) +- `<package>.database`: Simplified configuration for an external database connection (e.g., user, pass, host, and/or port) +- `<package>.objectStorage`: Similar configuration for external storage (i.e., S3) - `<package>.sso.client_id`: Specific SSO client ID for the package - `<package>.enterprise.license`: Enterprise license for the specific package diff --git a/docs/understanding-bigbang/configuration/base-config.md b/docs/understanding-bigbang/configuration/base-config.md index fe8939b40f7d4569a1b50fd02fa70cecdf3b1a84..16f856d1284d7e0e34b5a90d70d5d4819b851ce3 100644 --- a/docs/understanding-bigbang/configuration/base-config.md +++ b/docs/understanding-bigbang/configuration/base-config.md @@ -1,6 +1,6 @@ # bigbang -  +  Big Bang is a declarative, continuous delivery tool for core DoD hardened and approved packages into a Kubernetes cluster. @@ -68,10 +68,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | istio.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | istio.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/istio-controlplane.git"` | | | istio.git.path | string | `"./chart"` | | -| istio.git.tag | string | `"1.22.1-bb.0"` | | +| istio.git.tag | string | `"1.22.2-bb.0"` | | | istio.helmRepo.repoName | string | `"registry1"` | | | istio.helmRepo.chartName | string | `"istio"` | | -| istio.helmRepo.tag | string | `"1.22.1-bb.0"` | | +| istio.helmRepo.tag | string | `"1.22.2-bb.0"` | | | istio.enterprise | bool | `false` | Tetrate Istio Distribution - Tetrate provides FIPs verified Istio and Envoy software and support, validated through the FIPs Boring Crypto module. Find out more from Tetrate - https://www.tetrate.io/tetrate-istio-subscription | | istio.ingressGateways.public-ingressgateway.type | string | `"LoadBalancer"` | | | istio.ingressGateways.public-ingressgateway.kubernetesResourceSpec | object | `{}` | | @@ -88,10 +88,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | istioOperator.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | istioOperator.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/istio-operator.git"` | | | istioOperator.git.path | string | `"./chart"` | | -| istioOperator.git.tag | string | `"1.22.1-bb.0"` | | +| istioOperator.git.tag | string | `"1.22.2-bb.0"` | | | istioOperator.helmRepo.repoName | string | `"registry1"` | | | istioOperator.helmRepo.chartName | string | `"istio-operator"` | | -| istioOperator.helmRepo.tag | string | `"1.22.1-bb.0"` | | +| istioOperator.helmRepo.tag | string | `"1.22.2-bb.0"` | | | istioOperator.flux | object | `{}` | Flux reconciliation overrides specifically for the Istio Operator Package | | istioOperator.values | object | `{}` | Values to passthrough to the istio-operator chart: https://repo1.dso.mil/big-bang/product/packages/istio-operator.git | | istioOperator.postRenderers | list | `[]` | Post Renderers. See docs/postrenders.md | @@ -114,10 +114,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | kiali.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | kiali.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/kiali.git"` | | | kiali.git.path | string | `"./chart"` | | -| kiali.git.tag | string | `"1.86.0-bb.2"` | | +| kiali.git.tag | string | `"1.86.2-bb.0"` | | | kiali.helmRepo.repoName | string | `"registry1"` | | | kiali.helmRepo.chartName | string | `"kiali"` | | -| kiali.helmRepo.tag | string | `"1.86.0-bb.2"` | | +| kiali.helmRepo.tag | string | `"1.86.2-bb.0"` | | | kiali.flux | object | `{}` | Flux reconciliation overrides specifically for the Kiali Package | | kiali.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | kiali.sso.enabled | bool | `false` | Toggle SSO for Kiali on and off | @@ -129,10 +129,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | clusterAuditor.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | clusterAuditor.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/cluster-auditor.git"` | | | clusterAuditor.git.path | string | `"./chart"` | | -| clusterAuditor.git.tag | string | `"1.5.0-bb.17"` | | +| clusterAuditor.git.tag | string | `"1.5.0-bb.19"` | | | clusterAuditor.helmRepo.repoName | string | `"registry1"` | | | clusterAuditor.helmRepo.chartName | string | `"cluster-auditor"` | | -| clusterAuditor.helmRepo.tag | string | `"1.5.0-bb.17"` | | +| clusterAuditor.helmRepo.tag | string | `"1.5.0-bb.19"` | | | clusterAuditor.flux | object | `{}` | Flux reconciliation overrides specifically for the Cluster Auditor Package | | clusterAuditor.values | object | `{}` | Values to passthrough to the cluster auditor chart: https://repo1.dso.mil/big-bang/product/packages/cluster-auditor.git | | clusterAuditor.postRenderers | list | `[]` | Post Renderers. See docs/postrenders.md | @@ -162,10 +162,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | kyvernoPolicies.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | kyvernoPolicies.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/kyverno-policies.git"` | | | kyvernoPolicies.git.path | string | `"./chart"` | | -| kyvernoPolicies.git.tag | string | `"3.0.4-bb.32"` | | +| kyvernoPolicies.git.tag | string | `"3.0.4-bb.33"` | | | kyvernoPolicies.helmRepo.repoName | string | `"registry1"` | | | kyvernoPolicies.helmRepo.chartName | string | `"kyverno-policies"` | | -| kyvernoPolicies.helmRepo.tag | string | `"3.0.4-bb.32"` | | +| kyvernoPolicies.helmRepo.tag | string | `"3.0.4-bb.33"` | | | kyvernoPolicies.flux | object | `{}` | Flux reconciliation overrides specifically for the Kyverno Package | | kyvernoPolicies.values | object | `{}` | Values to passthrough to the kyverno policies chart: https://repo1.dso.mil/big-bang/product/packages/kyverno-policies.git | | kyvernoPolicies.postRenderers | list | `[]` | Post Renderers. See docs/postrenders.md | @@ -184,15 +184,16 @@ To start using Big Bang, you will need to create your own Big Bang environment t | elasticsearchKibana.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | elasticsearchKibana.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/elasticsearch-kibana.git"` | | | elasticsearchKibana.git.path | string | `"./chart"` | | -| elasticsearchKibana.git.tag | string | `"1.16.0-bb.0"` | | +| elasticsearchKibana.git.tag | string | `"1.17.0-bb.2"` | | | elasticsearchKibana.helmRepo.repoName | string | `"registry1"` | | | elasticsearchKibana.helmRepo.chartName | string | `"elasticsearch-kibana"` | | -| elasticsearchKibana.helmRepo.tag | string | `"1.16.0-bb.0"` | | +| elasticsearchKibana.helmRepo.tag | string | `"1.17.0-bb.2"` | | | elasticsearchKibana.flux | object | `{"timeout":"20m"}` | Flux reconciliation overrides specifically for the Logging (EFK) Package | | elasticsearchKibana.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | elasticsearchKibana.sso.enabled | bool | `false` | Toggle OIDC SSO for Kibana/Elasticsearch on and off. Enabling this option will auto-create any required secrets. | | elasticsearchKibana.sso.client_id | string | `""` | Elasticsearch/Kibana OIDC client ID | | elasticsearchKibana.sso.client_secret | string | `""` | Elasticsearch/Kibana OIDC client secret | +| elasticsearchKibana.serviceAccountAnnotations | object | `{"elasticsearch":{},"kibana":{}}` | Elasticsearch/Kibana Service Account Annotations | | elasticsearchKibana.license.trial | bool | `false` | Toggle trial license installation of elasticsearch. Note that enterprise (non trial) is required for SSO to work. | | elasticsearchKibana.license.keyJSON | string | `""` | Elasticsearch license in json format seen here: https://repo1.dso.mil/big-bang/product/packages/elasticsearch-kibana#enterprise-license | | elasticsearchKibana.values | object | `{}` | Values to passthrough to the elasticsearch-kibana chart: https://repo1.dso.mil/big-bang/product/packages/elasticsearch-kibana.git | @@ -201,10 +202,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | eckOperator.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | eckOperator.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/eck-operator.git"` | | | eckOperator.git.path | string | `"./chart"` | | -| eckOperator.git.tag | string | `"2.13.0-bb.1"` | | +| eckOperator.git.tag | string | `"2.13.0-bb.2"` | | | eckOperator.helmRepo.repoName | string | `"registry1"` | | | eckOperator.helmRepo.chartName | string | `"eck-operator"` | | -| eckOperator.helmRepo.tag | string | `"2.13.0-bb.1"` | | +| eckOperator.helmRepo.tag | string | `"2.13.0-bb.2"` | | | eckOperator.flux | object | `{}` | Flux reconciliation overrides specifically for the ECK Operator Package | | eckOperator.values | object | `{}` | Values to passthrough to the eck-operator chart: https://repo1.dso.mil/big-bang/product/packages/eck-operator.git | | eckOperator.postRenderers | list | `[]` | Post Renderers. See docs/postrenders.md | @@ -212,10 +213,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | fluentbit.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | fluentbit.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/fluentbit.git"` | | | fluentbit.git.path | string | `"./chart"` | | -| fluentbit.git.tag | string | `"0.46.10-bb.0"` | | +| fluentbit.git.tag | string | `"0.46.10-bb.2"` | | | fluentbit.helmRepo.repoName | string | `"registry1"` | | | fluentbit.helmRepo.chartName | string | `"fluentbit"` | | -| fluentbit.helmRepo.tag | string | `"0.46.10-bb.0"` | | +| fluentbit.helmRepo.tag | string | `"0.46.10-bb.2"` | | | fluentbit.flux | object | `{}` | Flux reconciliation overrides specifically for the Fluent-Bit Package | | fluentbit.values | object | `{}` | Values to passthrough to the fluentbit chart: https://repo1.dso.mil/big-bang/product/packages/fluentbit.git | | fluentbit.postRenderers | list | `[]` | Post Renderers. See docs/postrenders.md | @@ -223,10 +224,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | promtail.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | promtail.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/promtail.git"` | | | promtail.git.path | string | `"./chart"` | | -| promtail.git.tag | string | `"6.15.5-bb.5"` | | +| promtail.git.tag | string | `"6.16.2-bb.1"` | | | promtail.helmRepo.repoName | string | `"registry1"` | | | promtail.helmRepo.chartName | string | `"promtail"` | | -| promtail.helmRepo.tag | string | `"6.15.5-bb.5"` | | +| promtail.helmRepo.tag | string | `"6.16.2-bb.1"` | | | promtail.flux | object | `{}` | Flux reconciliation overrides specifically for the Promtail Package | | promtail.values | object | `{}` | Values to passthrough to the promtail chart: https://repo1.dso.mil/big-bang/product/packages/fluentbit.git | | promtail.postRenderers | list | `[]` | Post Renderers. See docs/postrenders.md | @@ -234,10 +235,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | loki.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | loki.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/loki.git"` | | | loki.git.path | string | `"./chart"` | | -| loki.git.tag | string | `"6.6.2-bb.4"` | | +| loki.git.tag | string | `"6.6.4-bb.1"` | | | loki.helmRepo.repoName | string | `"registry1"` | | | loki.helmRepo.chartName | string | `"loki"` | | -| loki.helmRepo.tag | string | `"6.6.2-bb.4"` | | +| loki.helmRepo.tag | string | `"6.6.4-bb.1"` | | | loki.flux | object | `{}` | Flux reconciliation overrides specifically for the Loki Package | | loki.strategy | string | `"monolith"` | Loki architecture. Options are monolith and scalable | | loki.clusterName | string | `""` | Loki clusterName identifier for Promtail and Dashboards | @@ -291,10 +292,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | monitoring.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | monitoring.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/monitoring.git"` | | | monitoring.git.path | string | `"./chart"` | | -| monitoring.git.tag | string | `"60.1.0-bb.0"` | | +| monitoring.git.tag | string | `"60.4.0-bb.2"` | | | monitoring.helmRepo.repoName | string | `"registry1"` | | | monitoring.helmRepo.chartName | string | `"monitoring"` | | -| monitoring.helmRepo.tag | string | `"60.1.0-bb.0"` | | +| monitoring.helmRepo.tag | string | `"60.4.0-bb.2"` | | | monitoring.flux | object | `{"install":{"crds":"CreateReplace"},"upgrade":{"crds":"CreateReplace"}}` | Flux reconciliation overrides specifically for the Monitoring Package | | monitoring.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | monitoring.sso.enabled | bool | `false` | Toggle SSO for monitoring components on and off | @@ -308,10 +309,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | grafana.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | grafana.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/grafana.git"` | | | grafana.git.path | string | `"./chart"` | | -| grafana.git.tag | string | `"8.0.0-bb.0"` | | +| grafana.git.tag | string | `"8.2.2-bb.1"` | | | grafana.helmRepo.repoName | string | `"registry1"` | | | grafana.helmRepo.chartName | string | `"grafana"` | | -| grafana.helmRepo.tag | string | `"8.0.0-bb.0"` | | +| grafana.helmRepo.tag | string | `"8.2.2-bb.1"` | | | grafana.flux | object | `{}` | Flux reconciliation overrides specifically for the Monitoring Package | | grafana.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | grafana.sso.enabled | bool | `false` | Toggle SSO for grafana components on and off | @@ -326,10 +327,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | twistlock.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | twistlock.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/twistlock.git"` | | | twistlock.git.path | string | `"./chart"` | | -| twistlock.git.tag | string | `"0.15.0-bb.11"` | | +| twistlock.git.tag | string | `"0.15.0-bb.14"` | | | twistlock.helmRepo.repoName | string | `"registry1"` | | | twistlock.helmRepo.chartName | string | `"twistlock"` | | -| twistlock.helmRepo.tag | string | `"0.15.0-bb.11"` | | +| twistlock.helmRepo.tag | string | `"0.15.0-bb.14"` | | | twistlock.flux | object | `{}` | Flux reconciliation overrides specifically for the Twistlock Package | | twistlock.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | twistlock.sso.enabled | bool | `false` | Toggle SAML SSO, requires a license and enabling the init job - see https://repo1.dso.mil/big-bang/product/packages/initialization.md | @@ -342,10 +343,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.argocd.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.argocd.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/argocd.git"` | | | addons.argocd.git.path | string | `"./chart"` | | -| addons.argocd.git.tag | string | `"6.11.1-bb.1"` | | +| addons.argocd.git.tag | string | `"7.3.2-bb.1"` | | | addons.argocd.helmRepo.repoName | string | `"registry1"` | | | addons.argocd.helmRepo.chartName | string | `"argocd"` | | -| addons.argocd.helmRepo.tag | string | `"6.11.1-bb.1"` | | +| addons.argocd.helmRepo.tag | string | `"7.3.2-bb.1"` | | | addons.argocd.flux | object | `{}` | Flux reconciliation overrides specifically for the ArgoCD Package | | addons.argocd.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | addons.argocd.redis.host | string | `""` | Hostname of a pre-existing Redis to use for ArgoCD. Entering connection info will enable external Redis and will auto-create any required secrets. | @@ -372,10 +373,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.minioOperator.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.minioOperator.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/minio-operator.git"` | | | addons.minioOperator.git.path | string | `"./chart"` | | -| addons.minioOperator.git.tag | string | `"5.0.15-bb.0"` | | +| addons.minioOperator.git.tag | string | `"5.0.15-bb.1"` | | | addons.minioOperator.helmRepo.repoName | string | `"registry1"` | | | addons.minioOperator.helmRepo.chartName | string | `"minio-operator"` | | -| addons.minioOperator.helmRepo.tag | string | `"5.0.15-bb.0"` | | +| addons.minioOperator.helmRepo.tag | string | `"5.0.15-bb.1"` | | | addons.minioOperator.flux | object | `{}` | Flux reconciliation overrides specifically for the Minio Operator Package | | addons.minioOperator.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | addons.minioOperator.values | object | `{}` | Values to passthrough to the minio operator chart: https://repo1.dso.mil/big-bang/product/packages/minio-operator.git | @@ -384,10 +385,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.minio.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.minio.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/minio.git"` | | | addons.minio.git.path | string | `"./chart"` | | -| addons.minio.git.tag | string | `"5.0.15-bb.3"` | | +| addons.minio.git.tag | string | `"5.0.15-bb.5"` | | | addons.minio.helmRepo.repoName | string | `"registry1"` | | | addons.minio.helmRepo.chartName | string | `"minio-instance"` | | -| addons.minio.helmRepo.tag | string | `"5.0.15-bb.3"` | | +| addons.minio.helmRepo.tag | string | `"5.0.15-bb.5"` | | | addons.minio.flux | object | `{}` | Flux reconciliation overrides specifically for the Minio Package | | addons.minio.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | addons.minio.accesskey | string | `""` | Default access key to use for minio. | @@ -400,10 +401,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.gitlab.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.gitlab.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/gitlab.git"` | | | addons.gitlab.git.path | string | `"./chart"` | | -| addons.gitlab.git.tag | string | `"8.1.1-bb.0"` | | +| addons.gitlab.git.tag | string | `"8.1.2-bb.0"` | | | addons.gitlab.helmRepo.repoName | string | `"registry1"` | | | addons.gitlab.helmRepo.chartName | string | `"gitlab"` | | -| addons.gitlab.helmRepo.tag | string | `"8.1.1-bb.0"` | | +| addons.gitlab.helmRepo.tag | string | `"8.1.2-bb.0"` | | | addons.gitlab.flux | object | `{}` | Flux reconciliation overrides specifically for the Gitlab Package | | addons.gitlab.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | addons.gitlab.sso.enabled | bool | `false` | Toggle OIDC SSO for Gitlab on and off. Enabling this option will auto-create any required secrets. | @@ -432,10 +433,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.gitlabRunner.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.gitlabRunner.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/gitlab-runner.git"` | | | addons.gitlabRunner.git.path | string | `"./chart"` | | -| addons.gitlabRunner.git.tag | string | `"0.65.0-bb.0"` | | +| addons.gitlabRunner.git.tag | string | `"0.65.0-bb.3"` | | | addons.gitlabRunner.helmRepo.repoName | string | `"registry1"` | | | addons.gitlabRunner.helmRepo.chartName | string | `"gitlab-runner"` | | -| addons.gitlabRunner.helmRepo.tag | string | `"0.65.0-bb.0"` | | +| addons.gitlabRunner.helmRepo.tag | string | `"0.65.0-bb.3"` | | | addons.gitlabRunner.flux | object | `{}` | Flux reconciliation overrides specifically for the Gitlab Runner Package | | addons.gitlabRunner.values | object | `{}` | Values to passthrough to the gitlab runner chart: https://repo1.dso.mil/big-bang/product/packages/gitlab-runner.git | | addons.gitlabRunner.postRenderers | list | `[]` | Post Renderers. See docs/postrenders.md | @@ -443,10 +444,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.nexusRepositoryManager.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.nexusRepositoryManager.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/nexus.git"` | | | addons.nexusRepositoryManager.git.path | string | `"./chart"` | | -| addons.nexusRepositoryManager.git.tag | string | `"69.0.0-bb.0"` | | +| addons.nexusRepositoryManager.git.tag | string | `"69.0.0-bb.1"` | | | addons.nexusRepositoryManager.helmRepo.repoName | string | `"registry1"` | | | addons.nexusRepositoryManager.helmRepo.chartName | string | `"nexus-repository-manager"` | | -| addons.nexusRepositoryManager.helmRepo.tag | string | `"69.0.0-bb.0"` | | +| addons.nexusRepositoryManager.helmRepo.tag | string | `"69.0.0-bb.1"` | | | addons.nexusRepositoryManager.license_key | string | `""` | Base64 encoded license file. | | addons.nexusRepositoryManager.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | addons.nexusRepositoryManager.sso.enabled | bool | `false` | Toggle SAML SSO for NXRM. -- handles SAML SSO, a Client must be configured in Keycloak or IdP -- to complete setup. -- https://support.sonatype.com/hc/en-us/articles/1500000976522-SAML-integration-for-Nexus-Repository-Manager-Pro-3-and-Nexus-IQ-Server-with-Keycloak#h_01EV7CWCYH3YKAPMAHG8XMQ599 | @@ -464,10 +465,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.sonarqube.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.sonarqube.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/sonarqube.git"` | | | addons.sonarqube.git.path | string | `"./chart"` | | -| addons.sonarqube.git.tag | string | `"8.0.4-bb.6"` | | +| addons.sonarqube.git.tag | string | `"8.0.6-bb.0"` | | | addons.sonarqube.helmRepo.repoName | string | `"registry1"` | | | addons.sonarqube.helmRepo.chartName | string | `"sonarqube"` | | -| addons.sonarqube.helmRepo.tag | string | `"8.0.4-bb.6"` | | +| addons.sonarqube.helmRepo.tag | string | `"8.0.6-bb.0"` | | | addons.sonarqube.flux | object | `{}` | Flux reconciliation overrides specifically for the Sonarqube Package | | addons.sonarqube.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | addons.sonarqube.sso.enabled | bool | `false` | Toggle SAML SSO for SonarQube. Enabling this option will auto-create any required secrets. | @@ -487,10 +488,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.fortify.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.fortify.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/fortify.git"` | | | addons.fortify.git.path | string | `"./chart"` | | -| addons.fortify.git.tag | string | `"1.1.2320154-bb.14"` | | +| addons.fortify.git.tag | string | `"1.1.2320154-bb.15"` | | | addons.fortify.helmRepo.repoName | string | `"registry1"` | | | addons.fortify.helmRepo.chartName | string | `"fortify-ssc"` | | -| addons.fortify.helmRepo.tag | string | `"1.1.2320154-bb.14"` | | +| addons.fortify.helmRepo.tag | string | `"1.1.2320154-bb.15"` | | | addons.fortify.flux | object | `{}` | Flux reconciliation overrides specifically for the Fortify Package | | addons.fortify.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | addons.fortify.sso.enabled | bool | `false` | Toggle SSO for Fortify on and off | @@ -513,10 +514,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.anchore.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.anchore.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/anchore-enterprise.git"` | | | addons.anchore.git.path | string | `"./chart"` | | -| addons.anchore.git.tag | string | `"2.4.2-bb.16"` | | +| addons.anchore.git.tag | string | `"2.4.2-bb.18"` | | | addons.anchore.helmRepo.repoName | string | `"registry1"` | | | addons.anchore.helmRepo.chartName | string | `"anchore"` | | -| addons.anchore.helmRepo.tag | string | `"2.4.2-bb.16"` | | +| addons.anchore.helmRepo.tag | string | `"2.4.2-bb.18"` | | | addons.anchore.flux | object | `{"upgrade":{"disableWait":true}}` | Flux reconciliation overrides specifically for the Anchore Package | | addons.anchore.adminPassword | string | `""` | Initial admin password used to authenticate to Anchore. | | addons.anchore.enterprise | object | `{"licenseYaml":"FULL LICENSE\n"}` | Anchore Enterprise functionality. | @@ -552,10 +553,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.mattermost.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.mattermost.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/mattermost.git"` | | | addons.mattermost.git.path | string | `"./chart"` | | -| addons.mattermost.git.tag | string | `"9.9.0-bb.1"` | | +| addons.mattermost.git.tag | string | `"9.9.0-bb.4"` | | | addons.mattermost.helmRepo.repoName | string | `"registry1"` | | | addons.mattermost.helmRepo.chartName | string | `"mattermost"` | | -| addons.mattermost.helmRepo.tag | string | `"9.9.0-bb.1"` | | +| addons.mattermost.helmRepo.tag | string | `"9.9.0-bb.4"` | | | addons.mattermost.flux | object | `{}` | Flux reconciliation overrides specifically for the Mattermost Package | | addons.mattermost.enterprise | object | `{"enabled":false,"license":""}` | Mattermost Enterprise functionality. | | addons.mattermost.enterprise.enabled | bool | `false` | Toggle the Mattermost Enterprise. This must be accompanied by a valid license unless you plan to start a trial post-install. | @@ -582,10 +583,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.velero.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.velero.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/velero.git"` | | | addons.velero.git.path | string | `"./chart"` | | -| addons.velero.git.tag | string | `"6.7.0-bb.0"` | | +| addons.velero.git.tag | string | `"6.7.0-bb.2"` | | | addons.velero.helmRepo.repoName | string | `"registry1"` | | | addons.velero.helmRepo.chartName | string | `"velero"` | | -| addons.velero.helmRepo.tag | string | `"6.7.0-bb.0"` | | +| addons.velero.helmRepo.tag | string | `"6.7.0-bb.2"` | | | addons.velero.flux | object | `{}` | Flux reconciliation overrides specifically for the Velero Package | | addons.velero.plugins | list | `[]` | Plugin provider for Velero - requires at least one plugin installed. Current supported values: aws, azure, csi | | addons.velero.values | object | `{}` | Values to passthrough to the Velero chart: https://repo1.dso.mil/big-bang/product/packages/values.yaml | @@ -594,10 +595,10 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.keycloak.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.keycloak.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/keycloak.git"` | | | addons.keycloak.git.path | string | `"./chart"` | | -| addons.keycloak.git.tag | string | `"24.0.5-bb.0"` | | +| addons.keycloak.git.tag | string | `"24.0.5-bb.1"` | | | addons.keycloak.helmRepo.repoName | string | `"registry1"` | | | addons.keycloak.helmRepo.chartName | string | `"keycloak"` | | -| addons.keycloak.helmRepo.tag | string | `"24.0.5-bb.0"` | | +| addons.keycloak.helmRepo.tag | string | `"24.0.5-bb.1"` | | | addons.keycloak.database.host | string | `""` | Hostname of a pre-existing database to use for Keycloak. Entering connection info will disable the deployment of an internal database and will auto-create any required secrets. | | addons.keycloak.database.type | string | `"postgres"` | Pre-existing database type (e.g. postgres) to use for Keycloak. | | addons.keycloak.database.port | int | `5432` | Port of a pre-existing database to use for Keycloak. | @@ -651,11 +652,11 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.holocron.enabled | bool | `false` | Toggle deployment of Holocron. | | addons.holocron.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.holocron.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/holocron.git"` | | -| addons.holocron.git.tag | string | `"1.0.9"` | | +| addons.holocron.git.tag | string | `"1.0.10"` | | | addons.holocron.git.path | string | `"./chart"` | | | addons.holocron.helmRepo.repoName | string | `"registry1"` | | | addons.holocron.helmRepo.chartName | string | `"holocron"` | | -| addons.holocron.helmRepo.tag | string | `"1.0.9"` | | +| addons.holocron.helmRepo.tag | string | `"1.0.10"` | | | addons.holocron.collectorAuth.existingSecret | string | `""` | Name of existing secret with auth tokens for collector services: https://repo1.dso.mil/groups/big-bang/apps/sandbox/holocron/-/wikis/Administrator-Guide -- Default keys for secret are: -- gitlab-scm-0, gitlab-workflow-0, gitlab-build-0, jira-workflow-0, sonarqube-project-analysis-0 -- If not provided, one will be created | | addons.holocron.collectorAuth.gitlabToken | string | `"mygitlabtoken"` | Tokens for the secret to be created | | addons.holocron.collectorAuth.jiraToken | string | `"myjiratoken"` | | @@ -666,6 +667,8 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.holocron.flux | object | `{}` | Flux reconciliation overrides specifically for the Holocron Package | | addons.holocron.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | addons.holocron.sso.enabled | bool | `false` | Toggle SSO for Holocron on and off | +| addons.holocron.sso.client_id | string | `""` | OIDC Client ID to use for Holocron | +| addons.holocron.sso.client_secret | string | `""` | OIDC Client Secret to use for Holocron | | addons.holocron.sso.groups | object | `{"admin":"","leadership":""}` | Holocron SSO group roles: https://repo1.dso.mil/groups/big-bang/apps/sandbox/holocron/-/wikis/Administrator-Guide | | addons.holocron.database.host | string | `""` | Hostname of a pre-existing PostgreSQL database to use for Gitlab. -- Entering connection info will disable the deployment of an internal database and will auto-create any required secrets. | | addons.holocron.database.port | int | `5432` | Port of a pre-existing PostgreSQL database to use for Gitlab. | @@ -686,11 +689,11 @@ To start using Big Bang, you will need to create your own Big Bang environment t | addons.thanos.objectStorage.insecure | bool | `false` | Whether or not objectStorage connection should require HTTPS, if connecting to in-cluster object | | addons.thanos.sourceType | string | `"git"` | Choose source type of "git" or "helmRepo" | | addons.thanos.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/thanos.git"` | | -| addons.thanos.git.tag | string | `"15.7.9-bb.1"` | | +| addons.thanos.git.tag | string | `"15.7.9-bb.2"` | | | addons.thanos.git.path | string | `"./chart"` | | | addons.thanos.helmRepo.repoName | string | `"registry1"` | | | addons.thanos.helmRepo.chartName | string | `"thanos"` | | -| addons.thanos.helmRepo.tag | string | `"15.7.9-bb.1"` | | +| addons.thanos.helmRepo.tag | string | `"15.7.9-bb.2"` | | | addons.thanos.flux | object | `{}` | Flux reconciliation overrides specifically for the Thanos Package | | addons.thanos.ingress | object | `{"gateway":""}` | Redirect the package ingress to a specific Istio Gateway (listed in `istio.gateways`). The default is "public". | | addons.thanos.values | object | `{}` | | @@ -703,11 +706,12 @@ To start using Big Bang, you will need to create your own Big Bang environment t | wrapper.git.repo | string | `"https://repo1.dso.mil/big-bang/product/packages/wrapper.git"` | Git repo holding the wrapper helm chart, example: https://repo1.dso.mil/big-bang/product/packages/wrapper | | wrapper.git.path | string | `"chart"` | Path inside of the git repo to find the helm chart, example: chart | | wrapper.git.tag | string | `"0.4.9"` | Git tag to check out. Takes precedence over branch. [More info](https://fluxcd.io/flux/components/source/gitrepositories/#reference), example: 0.0.2 | -| packages | object | `{"sample":{"configMaps":{},"dependsOn":[],"enabled":false,"flux":{},"git":{"branch":null,"commit":null,"credentials":{"caFile":"","knownHosts":"","password":"","privateKey":"","publicKey":"","username":""},"existingSecret":"","path":null,"repo":null,"semver":null,"tag":null},"helmRepo":{"chartName":null,"repoName":null,"tag":null},"istio":{},"kustomize":false,"monitor":{},"network":{},"postRenderers":[],"secrets":{},"sourceType":"git","values":{},"wrapper":{"enabled":false}}}` | Packages to deploy with Big Bang @default - '{}' | +| packages | object | `{"sample":{"configMaps":{},"dependsOn":[],"enabled":false,"flux":{},"git":{"branch":null,"commit":null,"credentials":{"caFile":"","knownHosts":"","password":"","privateKey":"","publicKey":"","username":""},"existingSecret":"","path":null,"repo":null,"semver":null,"tag":null},"helmRepo":{"chartName":null,"repoName":null,"tag":null},"istio":{},"kustomize":false,"monitor":{},"network":{},"postRenderers":[],"secrets":{},"sourceType":"git","values":{},"wrapper":{"enabled":false,"postRenderers":[]}}}` | Packages to deploy with Big Bang @default - '{}' | | packages.sample | object | Uses `defaults/<package name>.yaml` for defaults. See `package` Helm chart for additional values that can be set. | Package name. Each package will be independently wrapped for Big Bang integration. | | packages.sample.enabled | bool | true | Toggle deployment of this package | | packages.sample.sourceType | string | `"git"` | Choose source type of "git" ("helmRepo" not supported yet) | | packages.sample.wrapper | object | false | Toggle wrapper functionality. See https://docs-bigbang.dso.mil/latest/docs/guides/deployment-scenarios/extra-package-deployment/#Wrapper-Deployment for more details. | +| packages.sample.wrapper.postRenderers | list | `[]` | After deployment, patch wrapper resources. [More info](https://fluxcd.io/flux/components/helm/helmreleases/#post-renderers) | | packages.sample.kustomize | bool | `false` | Use a kustomize deployment rather than Helm | | packages.sample.helmRepo | object | `{"chartName":null,"repoName":null,"tag":null}` | HelmRepo source is supported as an option for Helm deployments. If both `git` and `helmRepo` are provided `git` will take precedence. | | packages.sample.helmRepo.repoName | string | `nil` | Name of the HelmRepo specified in `helmRepositories` | @@ -726,7 +730,7 @@ To start using Big Bang, you will need to create your own Big Bang environment t | packages.sample.git.credentials.caFile | string | `""` | HTTPS certificate authority file. Required for any repo with a self signed certificate | | packages.sample.git.credentials.privateKey | string | `""` | SSH git credentials, privateKey, publicKey, and knownHosts must be provided | | packages.sample.flux | object | `{}` | Override flux settings for this package | -| packages.sample.postRenderers | list | `[]` | After deployment, patch resources. [More info](https://fluxcd.io/flux/components/helm/helmreleases/#post-renderers) | +| packages.sample.postRenderers | list | `[]` | After deployment, patch package resources. [More info](https://fluxcd.io/flux/components/helm/helmreleases/#post-renderers) | | packages.sample.dependsOn | list | `[]` | Specify dependencies for the package. Only used for HelmRelease, does not effect Kustomization. See [here](https://fluxcd.io/flux/components/helm/helmreleases/#helmrelease-dependencies) for a reference. | | packages.sample.istio | object | `{}` | Package details for Istio. See [wrapper values](https://repo1.dso.mil/big-bang/product/packages/wrapper/-/blob/main/chart/values.yaml) for settings. | | packages.sample.monitor | object | `{}` | Package details for monitoring. See [wrapper values](https://repo1.dso.mil/big-bang/product/packages/wrapper/-/blob/main/chart/values.yaml) for settings. | diff --git a/docs/understanding-bigbang/package-architecture/harbor.md b/docs/understanding-bigbang/package-architecture/harbor.md new file mode 100644 index 0000000000000000000000000000000000000000..89c236740e00c05d847543766371bb831375bc95 --- /dev/null +++ b/docs/understanding-bigbang/package-architecture/harbor.md @@ -0,0 +1,125 @@ +# Harbor + +## Overview + +[Harbor](https://goharbor.io/) is an open source registry that secures artifacts with policies and role-based access control, ensures images are scanned and free from vulnerabilities, and signs images as trusted. + +### Harbor + +```mermaid +graph LR + subgraph "Harbor" + harborpods("Harbor Pods") + end + + subgraph "Ingress" + ig(Ingress Gateway) --> harborpods("Harbor Pods") + end + + subgraph "External Databases" + harborpods("Harbor Pods") --> database1[(PostgreSQL DB)] + harborpods("Harbor Pods") --> database2[(Redis DB)] + end + + subgraph "Object Storage (S3/Swift)" + harborpods("Harbor Pods") --> bucket[(Harbor Bucket)] + end + + subgraph "Image Scanner" + harborpods("Harbor Pods") --> Trivy("Trivy") + end + + subgraph "Logging" + harborpods("Harbor Pods") --> fluent(Fluentbit) --> logging-ek-es-http + logging-ek-es-http{{Elastic Service<br />logging-ek-es-http}} --> plg[(PLG Storage)] + end + + subgraph "Monitoring" + svcmonitor("Service Monitor") --> harborpods("Harbor Pods") + Prometheus --> svcmonitor("Service Monitor") + end +``` + + + +For more information on the Harbor architecture, see [Harbor Overview and Architecture](https://github.com/goharbor/harbor/wiki/Architecture-Overview-of-Harbor). + +## Harbor Touch Points + +### Storage + +By default Harbor uses local storage for the registry, but you can optionally configure the storage_service setting so that Harbor uses external storage. + +See below for an example of the values to provide an external storage backend for Harbor: + +```yaml +persistence: + imageChartStorage: + # Specify the type of storage: "filesystem", "azure", "gcs", "s3", "swift", + # "oss" and fill the information needed in the corresponding section. The type + # must be "filesystem" if you want to use persistent volumes for registry + type: s3 + s3: + # Set an existing secret for S3 accesskey and secretkey + # keys in the secret should be REGISTRY_STORAGE_S3_ACCESSKEY and REGISTRY_STORAGE_S3_SECRETKEY for registry + #existingSecret: "" + region: us-west-1 + bucket: bucketname + #accesskey: awsaccesskey + #secretkey: awssecretkey + #regionendpoint: http://myobjects.local + #encrypt: false + #keyid: mykeyid + #secure: true +``` + +### High Availability + +Reference the [Harbor High Availability Guide](https://repo1.dso.mil/big-bang/apps/sandbox/harbor/-/blob/main/chart/docs/High%20Availability.md) for an overview of a harbor high availability deployment. + +See below for an example of the values to provide high availability within harbor: + +```yaml +portal: + replicas: 2 +core: + replicas: 2 +jobservice: + replicas: 2 +registry: + replicas: 2 +``` + +### UI + +Harbor is accessible via extensible API and web UI. Within the values you are able to configure the URL that harbor is able to be accessed. + +See below for an example of how to set the values to set the URL for UI within Harbor: + +```yaml +externalURL: https://core.harbor.domain +core: + secretName: "name_of_secret" +``` + +For additional information reference [Deploying Harbor in Production](https://repo1.dso.mil/big-bang/apps/sandbox/harbor/-/blob/harbor-architecture/docs/production.md) + +### Logging + +Harbor keeps a log of all of the operations that users perform in a project. You can apply filters to help you to search the logs. By default, Harbor tracks all image pull, push, and delete operations performed and keeps a record of these actions in a database. Harbor offers the ability to manage audit logs by configuring an audit log retention window and setting a syslog endpoint to forward audit logs. + +### Monitoring + +Harbor exposes prometheus metrics in the API of each service if the config.yaml used by that service has the metrics.enabled keys set to enabled. Each service exports its own metrics and can be scraped by the monitoring package within a BigBang installation. + +See below for an example of how to set the values to enable metrics for Harbor: + +```yaml +metrics: + enabled: true +``` + +### Dependent Packages + +- PostgreSQL (in-cluster by default; can be configured to use an external postgres) +- Redis (in-cluster by default; can be configured to use an external redis) diff --git a/tests/package-mapping.yaml b/tests/package-mapping.yaml index 61f39b2923ff2031efde39f144c0b9b41dbae205..b98854f2604d4ce50dccc66761905f22cd60d6d9 100644 --- a/tests/package-mapping.yaml +++ b/tests/package-mapping.yaml @@ -87,3 +87,7 @@ metricsServer: repoName: "metrics-server" hrName: "metrics-server" filePath: "metrics-server" +externalSecrets: + repoName: "external-secrets" + hrName: "external-secrets" + filePath: "external-secrets" \ No newline at end of file diff --git a/tests/test-values.yaml b/tests/test-values.yaml index 824856bbb7deb27c5f47f1002a9404797662d704..b2974b4009dfd2b4687797c55c310d556d62b4fc 100644 --- a/tests/test-values.yaml +++ b/tests/test-values.yaml @@ -79,10 +79,6 @@ istio: dashboard: auth: strategy: "anonymous" - values: - pilot: - env: - "ENABLE_NATIVE_SIDECARS": "true" jaeger: enabled: false @@ -917,6 +913,7 @@ loki: - 'cdn.cypress.io' - 'repo1.dso.mil' - 'grafana.dev.bigbang.mil' + - 'optimizationguide-pa.googleapis.com' location: MESH_EXTERNAL ports: - number: 443 @@ -1201,16 +1198,8 @@ neuvector: bbtests: enabled: true cypress: - artifacts: true envs: cypress_url: https://neuvector.dev.bigbang.mil - resources: - requests: - cpu: "2" - memory: "1500M" - limits: - cpu: "2" - memory: "1500M" twistlock: enabled: false @@ -1506,9 +1495,9 @@ addons: - "registry.dev.bigbang.mil" - 'keycloak.dev.bigbang.mil' - 'repo1.dso.mil' - - "index.docker.io" - - "auth.docker.io" - - "production.cloudflare.docker.com" + - 'registry1.dso.mil' + - 'ib-prod-harbor-storage.s3.us-gov-west-1.amazonaws.com' + - 'ib-prod-harbor-storage.s3.us-gov-east-1.amazonaws.com' location: MESH_EXTERNAL ports: - number: 443 @@ -2362,7 +2351,7 @@ addons: quarkus.properties: '{{ .Files.Get "resources/dev/quarkus.properties" }}' extraInitContainers: |- - name: plugin - image: registry1.dso.mil/ironbank/big-bang/p1-keycloak-plugin:3.5.0 + image: registry1.dso.mil/ironbank/big-bang/p1-keycloak-plugin:3.5.1 imagePullPolicy: Always command: - sh @@ -2793,6 +2782,9 @@ addons: enabled: true compactor: enabled: true + retentionResolutionRaw: 30d + retentionResolution5m: 30d + retentionResolution1h: 30d bbtests: enabled: true cypress: @@ -2864,3 +2856,12 @@ addons: name: https resolution: DNS + externalSecrets: + values: + istio: + hardened: + enabled: true + bbtests: + enabled: true + cypress: + artifacts: true