diff --git a/.gitignore b/.gitignore index 0edec293f061be23a17926b7b3c0f7d55d0ca80d..9ea33f73e5a98a85ba4ffb4609e982d2f9ab6a4d 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ chart/tests/cypress/fixtures/ chart/tests/cypress/support/ chart/tests/cypress/screenshots/ chart/tests/cypress/videos/ -node_modules \ No newline at end of file +node_modules +debug.yaml \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index f8467de024b63abddc198ff4a36dec151fa9afbd..6b454a766481ef06c76ea1665b54107fb5e4c93b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,26 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), --- +## [8.9.1-bb.0] (2025-02-28) + +### Changed + +- ironbank/gitlab/gitlab/gitlab-webservice (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/certificates (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/gitaly (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-base (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-container-registry (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-exporter (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-mailroom (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-pages (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-shell (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-sidekiq (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-toolbox (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-webservice (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-workhorse (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/gitlab/gitlab/kubectl (source) 17.8.2 -> 17.9.1 +- registry1.dso.mil/ironbank/opensource/postgres/postgresql (source) 14.16 -> 14.17 + ## [8.8.2-bb.0] (2025-02-24) ### Changed diff --git a/README.md b/README.md index c219cf8bd2b8ad172ad90e881960c52644928eec..f37cda05318ceaea40e93595008e04ce23fe9e76 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ <!-- Warning: Do not manually edit this file. See notes on gluon + helm-docs at the end of this file for more information. --> # gitlab -   +   GitLab is the most comprehensive AI-powered DevSecOps Platform. @@ -27,7 +27,7 @@ The [upstream chart's release notes](https://gitlab.com/gitlab-org/charts/gitlab Install Helm -https://helm.sh/docs/intro/install/ +<https://helm.sh/docs/intro/install/> ## Deployment @@ -48,7 +48,7 @@ helm install gitlab chart/ | global.image | object | `{}` | | | global.pod.labels | object | `{}` | | | global.edition | string | `"ee"` | | -| global.gitlabVersion | string | `"17.8.2"` | | +| global.gitlabVersion | string | `"17.9.1"` | | | global.application.create | bool | `false` | | | global.application.links | list | `[]` | | | global.application.allowClusterRoles | bool | `true` | | @@ -362,7 +362,7 @@ helm install gitlab chart/ | global.workhorse.tls.enabled | bool | `false` | | | global.webservice.workerTimeout | int | `60` | | | global.certificates.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/certificates"` | | -| global.certificates.image.tag | string | `"17.8.2"` | | +| global.certificates.image.tag | string | `"17.9.1"` | | | global.certificates.image.pullSecrets[0].name | string | `"private-registry"` | | | global.certificates.init.securityContext.capabilities.drop[0] | string | `"ALL"` | | | global.certificates.init.securityContext.runAsUser | int | `65534` | | @@ -399,13 +399,13 @@ helm install gitlab chart/ | global.certificates.customCAs[29].secret | string | `"ca-certs-dod-trust-anchors-self-signed"` | | | global.certificates.customCAs[30].secret | string | `"ca-certs-eca"` | | | global.kubectl.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/kubectl"` | | -| global.kubectl.image.tag | string | `"17.8.2"` | | +| global.kubectl.image.tag | string | `"17.9.1"` | | | global.kubectl.image.pullSecrets[0].name | string | `"private-registry"` | | | global.kubectl.securityContext.runAsUser | int | `65534` | | | global.kubectl.securityContext.fsGroup | int | `65534` | | | global.kubectl.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | | global.gitlabBase.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-base"` | | -| global.gitlabBase.image.tag | string | `"17.8.2"` | | +| global.gitlabBase.image.tag | string | `"17.9.1"` | | | global.gitlabBase.image.pullSecrets[0].name | string | `"private-registry"` | | | global.serviceAccount.enabled | bool | `true` | | | global.serviceAccount.create | bool | `true` | | @@ -747,7 +747,7 @@ helm install gitlab chart/ | postgresql.resources.requests.memory | string | `"500Mi"` | | | postgresql.image.registry | string | `"registry1.dso.mil"` | | | postgresql.image.repository | string | `"ironbank/opensource/postgres/postgresql"` | | -| postgresql.image.tag | string | `"14.15"` | | +| postgresql.image.tag | string | `"14.17"` | | | postgresql.image.pullSecrets[0] | string | `"private-registry"` | | | postgresql.auth.username | string | `"gitlab"` | | | postgresql.auth.password | string | `"bogus-satisfy-upgrade"` | | @@ -790,7 +790,7 @@ helm install gitlab chart/ | registry.resources.requests.cpu | string | `"200m"` | | | registry.resources.requests.memory | string | `"1024Mi"` | | | registry.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-container-registry"` | | -| registry.image.tag | string | `"17.8.2"` | | +| registry.image.tag | string | `"17.9.1"` | | | registry.image.pullSecrets[0].name | string | `"private-registry"` | | | registry.ingress.enabled | bool | `false` | | | registry.metrics.enabled | bool | `true` | | @@ -850,7 +850,7 @@ helm install gitlab chart/ | gitlab.toolbox.replicas | int | `1` | | | gitlab.toolbox.antiAffinityLabels.matchLabels.app | string | `"gitaly"` | | | gitlab.toolbox.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-toolbox"` | | -| gitlab.toolbox.image.tag | string | `"17.8.2"` | | +| gitlab.toolbox.image.tag | string | `"17.9.1"` | | | gitlab.toolbox.image.pullSecrets[0].name | string | `"private-registry"` | | | gitlab.toolbox.init.resources.requests.cpu | string | `"200m"` | | | gitlab.toolbox.init.resources.requests.memory | string | `"200Mi"` | | @@ -887,7 +887,7 @@ helm install gitlab chart/ | gitlab.gitlab-exporter.resources.requests.memory | string | `"200Mi"` | | | gitlab.gitlab-exporter.capabilities.drop[0] | string | `"ALL"` | | | gitlab.gitlab-exporter.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-exporter"` | | -| gitlab.gitlab-exporter.image.tag | string | `"17.8.2"` | | +| gitlab.gitlab-exporter.image.tag | string | `"17.9.1"` | | | gitlab.gitlab-exporter.image.pullSecrets[0].name | string | `"private-registry"` | | | gitlab.gitlab-exporter.metrics.enabled | bool | `true` | | | gitlab.gitlab-exporter.metrics.port | int | `9168` | | @@ -909,7 +909,7 @@ helm install gitlab chart/ | gitlab.migrations.resources.requests.cpu | string | `"500m"` | | | gitlab.migrations.resources.requests.memory | string | `"1.5G"` | | | gitlab.migrations.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-toolbox"` | | -| gitlab.migrations.image.tag | string | `"17.8.2"` | | +| gitlab.migrations.image.tag | string | `"17.9.1"` | | | gitlab.migrations.image.pullSecrets[0].name | string | `"private-registry"` | | | gitlab.migrations.securityContext.runAsUser | int | `1000` | | | gitlab.migrations.securityContext.runAsGroup | int | `1000` | | @@ -933,14 +933,14 @@ helm install gitlab chart/ | gitlab.webservice.resources.requests.cpu | string | `"300m"` | | | gitlab.webservice.resources.requests.memory | string | `"2.5G"` | | | gitlab.webservice.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-webservice"` | | -| gitlab.webservice.image.tag | string | `"17.8.2"` | | +| gitlab.webservice.image.tag | string | `"17.9.1"` | | | gitlab.webservice.image.pullSecrets[0].name | string | `"private-registry"` | | | gitlab.webservice.workhorse.resources.limits.cpu | string | `"600m"` | | | gitlab.webservice.workhorse.resources.limits.memory | string | `"2.5G"` | | | gitlab.webservice.workhorse.resources.requests.cpu | string | `"600m"` | | | gitlab.webservice.workhorse.resources.requests.memory | string | `"2.5G"` | | | gitlab.webservice.workhorse.image | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-workhorse"` | | -| gitlab.webservice.workhorse.tag | string | `"17.8.2"` | | +| gitlab.webservice.workhorse.tag | string | `"17.9.1"` | | | gitlab.webservice.workhorse.pullSecrets[0].name | string | `"private-registry"` | | | gitlab.webservice.workhorse.metrics.enabled | bool | `true` | | | gitlab.webservice.workhorse.metrics.serviceMonitor.enabled | bool | `true` | | @@ -951,7 +951,7 @@ helm install gitlab chart/ | gitlab.webservice.metrics.serviceMonitor.enabled | bool | `true` | | | gitlab.webservice.helmTests.enabled | bool | `false` | | | gitlab.sidekiq.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-sidekiq"` | | -| gitlab.sidekiq.image.tag | string | `"17.8.2"` | | +| gitlab.sidekiq.image.tag | string | `"17.9.1"` | | | gitlab.sidekiq.image.pullSecrets[0].name | string | `"private-registry"` | | | gitlab.sidekiq.init.resources.limits.cpu | string | `"200m"` | | | gitlab.sidekiq.init.resources.limits.memory | string | `"200Mi"` | | @@ -969,7 +969,7 @@ helm install gitlab chart/ | gitlab.sidekiq.containerSecurityContext.runAsGroup | int | `1000` | | | gitlab.sidekiq.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | | gitlab.gitaly.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitaly"` | | -| gitlab.gitaly.image.tag | string | `"17.8.2"` | | +| gitlab.gitaly.image.tag | string | `"17.9.1"` | | | gitlab.gitaly.image.pullSecrets[0].name | string | `"private-registry"` | | | gitlab.gitaly.init.resources.limits.cpu | string | `"200m"` | | | gitlab.gitaly.init.resources.limits.memory | string | `"200Mi"` | | @@ -989,7 +989,7 @@ helm install gitlab chart/ | gitlab.gitaly.containerSecurityContext.runAsGroup | int | `1000` | | | gitlab.gitaly.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | | gitlab.gitlab-shell.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-shell"` | | -| gitlab.gitlab-shell.image.tag | string | `"17.8.2"` | | +| gitlab.gitlab-shell.image.tag | string | `"17.9.1"` | | | gitlab.gitlab-shell.image.pullSecrets[0].name | string | `"private-registry"` | | | gitlab.gitlab-shell.init.resources.limits.cpu | string | `"200m"` | | | gitlab.gitlab-shell.init.resources.limits.memory | string | `"200Mi"` | | @@ -1007,15 +1007,15 @@ helm install gitlab chart/ | gitlab.gitlab-shell.containerSecurityContext.runAsGroup | int | `1000` | | | gitlab.gitlab-shell.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | | gitlab.mailroom.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-mailroom"` | | -| gitlab.mailroom.image.tag | string | `"17.8.2"` | | +| gitlab.mailroom.image.tag | string | `"17.9.1"` | | | gitlab.mailroom.image.pullSecrets[0].name | string | `"private-registry"` | | | gitlab.mailroom.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | | gitlab.gitlab-pages.service.customDomains.type | string | `"ClusterIP"` | | | gitlab.gitlab-pages.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-pages"` | | -| gitlab.gitlab-pages.image.tag | string | `"17.8.2"` | | +| gitlab.gitlab-pages.image.tag | string | `"17.9.1"` | | | gitlab.gitlab-pages.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | | gitlab.praefect.image.repository | string | `"registry1.dso.mil/ironbank/gitlab/gitlab/gitaly"` | | -| gitlab.praefect.image.tag | string | `"17.8.2"` | | +| gitlab.praefect.image.tag | string | `"17.9.1"` | | | gitlab.praefect.init.resources.limits.cpu | string | `"200m"` | | | gitlab.praefect.init.resources.limits.memory | string | `"200Mi"` | | | gitlab.praefect.init.resources.requests.cpu | string | `"200m"` | | @@ -1146,4 +1146,3 @@ Please see the [contributing guide](./CONTRIBUTING.md) if you are interested in --- _This file is programatically generated using `helm-docs` and some BigBang-specific templates. The `gluon` repository has [instructions for regenerating package READMEs](https://repo1.dso.mil/big-bang/product/packages/gluon/-/blob/master/docs/bb-package-readme.md)._ - diff --git a/chart/.gitlab-ci.yml b/chart/.gitlab-ci.yml index b4853e2e1170f0e188295d302971f0e4a9f4d894..ce77bfe6fd82fa263910c92a43a3677d6f5f3cbb 100644 --- a/chart/.gitlab-ci.yml +++ b/chart/.gitlab-ci.yml @@ -24,13 +24,14 @@ # Note: Auto CI does not work with multiple buildpacks yet default: - image: registry.gitlab.com/gitlab-org/gitlab-build-images/debian-${DEBIAN_VERSION}-slim-ruby-${RUBY_VERSION}:kubectl-1.30-helm-3.10-helm_kubeconform-0.1.17-vcluster-0.19-awscli-1.32.93 + image: registry.gitlab.com/gitlab-org/gitlab-build-images/debian-${DEBIAN_VERSION}-slim-ruby-${RUBY_VERSION}:kubectl-1.31-helm-3.16-helm_kubeconform-0.1.17-vcluster-0.22-awscli-1.37.9 variables: AUTO_DEPLOY_TAG_REGEX: '^[0-9]+\.[0-9]+\.[0-9]+\+[a-z0-9]{7,}$' DOCKER_VERSION: "27.1.1" HELM_VERSION: "3.10.3" - KUBECTL_VERSION: "1.27.9" + KUBECTL_VERSION: "1.28.3" + VCLUSTER_VERSION: "default" STABLE_REPO_URL: "https://charts.helm.sh/stable" GOOGLE_APPLICATION_CREDENTIALS: ${CI_PROJECT_DIR}/.google_keyfile.json # AUTO_DEVOPS_DOMAIN is the application deployment domain and should be set as a variable at the group or project level. @@ -48,9 +49,9 @@ variables: QA_SANITY_SUITE_OPTIONS: '--tag smoke --tag ~skip_live_env --tag ~orchestrated' GITLAB_QA_ADMIN_ACCESS_TOKEN: $GITLAB_ADMIN_TOKEN DEBIAN_VERSION: bookworm - RUBY_VERSION: "3.1.5" + RUBY_VERSION: "3.3.7" CI_TOOLS_VERSION: "4.22.0" - GITLAB_QA_VERSION: "15.0.0" + GITLAB_QA_VERSION: "15.1.0" # STRICT_VERSIONS is used in RSpecs to ensure exact version match for tools like "helm" and "kubectl" STRICT_VERSIONS: "true" KUBE_CRD_SCHEMA_URL: "https://raw.githubusercontent.com/kubernetes/kubernetes/master/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json" @@ -135,8 +136,8 @@ tag_auto_deploy: - if: '$PIPELINE_TYPE == "AUTO_DEPLOY_TRIGGER_PIPELINE"' .source_autodevops: - before_script: - - source scripts/ci/autodevops.sh + before_script: + - source scripts/ci/autodevops.sh update-trigger-branch: stage: prepare @@ -237,7 +238,7 @@ trigger_review_current: optional: true - job: pin_image_versions rules: - - !reference [.rule:skip_if_fork] + - !reference [.rule:skip_if_not_canonical_or_security] - if: '$PIPELINE_TYPE == "DOCS_PIPELINE"' when: never - if: '$CI_PIPELINE_SOURCE != "pipeline" && $CI_PIPELINE_SOURCE != "parent_pipeline" && $PIPELINE_TYPE =~ /DEFAULT_BRANCH_PIPELINE$/ ' @@ -249,7 +250,7 @@ trigger_review_current: .trigger_review_current: rules: - - !reference [.rule:skip_if_fork] + - !reference [.rule:skip_if_not_canonical_or_security] - if: '$PIPELINE_TYPE == "DOCS_PIPELINE"' when: never - if: '$CI_PIPELINE_SOURCE != "pipeline" && $CI_PIPELINE_SOURCE != "parent_pipeline"' @@ -269,7 +270,7 @@ trigger_review_secondary: rules: - if: '$PIPELINE_TYPE == "DOCS_PIPELINE"' when: never - - !reference [.rule:skip_if_fork] + - !reference [.rule:skip_if_not_canonical_or_security] - if: '$CI_PIPELINE_SOURCE != "pipeline" && $CI_PIPELINE_SOURCE != "parent_pipeline"' when: manual - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' @@ -279,7 +280,7 @@ trigger_review_secondary: rules: - if: '$PIPELINE_TYPE == "DOCS_PIPELINE"' when: never - - !reference [.rule:skip_if_fork] + - !reference [.rule:skip_if_not_canonical_or_security] - if: '$CI_PIPELINE_SOURCE != "pipeline" && $CI_PIPELINE_SOURCE != "parent_pipeline"' - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' @@ -293,6 +294,9 @@ trigger_review_secondary: pipeline_variables: true inherit: variables: + - CI_PROJECT_PATH + - CANONICAL_PROJECT_PATH + - SECURITY_PROJECT_PATH - PIPELINE_TYPE - REVIEW_REF_PREFIX - LIMIT_TO @@ -391,7 +395,7 @@ trigger-eks130: # --------------------------------------------------------------------------- -.specs: &specs +.specs: image: registry.gitlab.com/gitlab-org/gitlab-build-images/debian-${DEBIAN_VERSION}-ruby-${RUBY_VERSION}-golang-${GO_VERSION}-rust-${RUST_VERSION}-node-20.12-postgresql-${PG_VERSION}:rubygems-${RUBYGEMS_VERSION}-git-2.45-lfs-2.9-chrome-${CHROME_VERSION}-yarn-1.22-graphicsmagick-1.3.36 stage: specs services: @@ -407,9 +411,9 @@ trigger-eks130: CHROME_VERSION: "123" extends: .source_autodevops script: - - ./scripts/ci/install_spec_dependencies + - ./scripts/ci/install_spec_dependencies.sh - set_context - - ./scripts/ci/run_specs + - ./scripts/ci/run_specs.sh artifacts: when: on_failure expire_in: 7d diff --git a/chart/.gitlab/ci/checks.yml b/chart/.gitlab/ci/checks.yml index 9106ece354207019ccef2532e67baec00c7558b0..98e4e90a64e2626484cdce65dab53d8789909d32 100644 --- a/chart/.gitlab/ci/checks.yml +++ b/chart/.gitlab/ci/checks.yml @@ -21,7 +21,7 @@ rubocop: # Perform content linting on documentation Markdown files check_docs_content: - image: registry.gitlab.com/gitlab-org/gitlab-docs/lint-markdown:alpine-3.20-vale-3.7.1-markdownlint2-0.14.0-lychee-0.15.1 + image: registry.gitlab.com/gitlab-org/technical-writing/docs-gitlab-com/lint-markdown:alpine-3.20-vale-3.9.3-markdownlint2-0.17.1-lychee-0.15.1 stage: prepare cache: {} dependencies: [] @@ -36,7 +36,7 @@ check_docs_content: # Perform linting on documentation Markdown files check_docs_markdown: - image: registry.gitlab.com/gitlab-org/gitlab-docs/lint-markdown:alpine-3.20-vale-3.7.1-markdownlint2-0.14.0-lychee-0.15.1 + image: registry.gitlab.com/gitlab-org/technical-writing/docs-gitlab-com/lint-markdown:alpine-3.20-vale-3.9.3-markdownlint2-0.17.1-lychee-0.15.1 stage: prepare cache: {} dependencies: [] @@ -51,7 +51,7 @@ check_docs_markdown: # Perform link checking on documentation Markdown files check_docs_links: - image: registry.gitlab.com/gitlab-org/gitlab-docs/lint-markdown:alpine-3.20-vale-3.7.1-markdownlint2-0.14.0-lychee-0.15.1 + image: registry.gitlab.com/gitlab-org/technical-writing/docs-gitlab-com/lint-markdown:alpine-3.20-vale-3.9.3-markdownlint2-0.17.1-lychee-0.15.1 stage: prepare cache: {} dependencies: [] @@ -64,6 +64,29 @@ check_docs_links: - if: '$PIPELINE_TYPE =~ /MR_PIPELINE$/' - if: '$PIPELINE_TYPE =~ /BRANCH_PIPELINE$/' +docs-test hugo: + image: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/hugomods/hugo:exts-0.142.0 + stage: prepare + before_script: + - apk add --no-cache git make nodejs npm bash + - git clone --depth 1 https://gitlab.com/gitlab-org/technical-writing/docs-gitlab-com.git + - cd docs-gitlab-com + - make add-latest-icons + # Copy the current project's docs to the appropriate location in the docs website + - mkdir content/charts + - cp -r ../doc/* content/charts/ + script: + # Test that Hugo will build + - hugo --gc --printPathWarnings --panicOnWarning + # Test for invalid index pages + # See https://gitlab.com/gitlab-org/technical-writing/docs-gitlab-com/-/blob/main/scripts/check-index-filenames.sh + - make check-index-pages SEARCH_DIR="../doc" + rules: + - !reference [.rule:skip_if_fork] + - if: '$PIPELINE_TYPE == "DOCS_PIPELINE"' + - if: '$PIPELINE_TYPE =~ /MR_PIPELINE$/' + - if: '$PIPELINE_TYPE =~ /BRANCH_PIPELINE$/' + # https://github.com/zegl/kube-score # Initially motivated to detect duplicated environment variable definitions kube-score: @@ -99,7 +122,6 @@ lint_package: paths: - build rules: - - if: '$PIPELINE_TYPE == "DOCS_PIPELINE"' - if: '$PIPELINE_TYPE =~ /_MR_PIPELINE$/' - if: '$PIPELINE_TYPE =~ /_BRANCH_PIPELINE$/' - if: '$PIPELINE_TYPE == "NIGHTLY_PIPELINE"' diff --git a/chart/.gitlab/ci/operator.gitlab-ci.yml b/chart/.gitlab/ci/operator.gitlab-ci.yml index b1571eb07747437acd07c01b2ed91099c7ad9730..bbedc7d6568a3a632d332466bec09f9d7e8d19c7 100644 --- a/chart/.gitlab/ci/operator.gitlab-ci.yml +++ b/chart/.gitlab/ci/operator.gitlab-ci.yml @@ -6,10 +6,10 @@ trigger_operator_test: variables: CHARTS_REF: "${CI_COMMIT_SHA}" TRIGGER_PROJECT: "${CI_PROJECT_PATH}" - PIPELINE_TYPE: "${PIPELINE_TYPE}" inherit: variables: false + allow_failure: true rules: - - if: '$PIPELINE_TYPE == "DOCS_PIPELINE"' - when: never + - !reference [.rule:skip_docs_pipeline] + - !reference [.rule:skip_if_not_canonical_or_security] - when: manual diff --git a/chart/.gitlab/ci/review-apps.gitlab-ci.yml b/chart/.gitlab/ci/review-apps.gitlab-ci.yml index cad1676c19a5e644e89dfeed1ea22aa52dc7e1eb..5a181d7189ab8dd6e37cf161a9792c8e785b5a98 100644 --- a/chart/.gitlab/ci/review-apps.gitlab-ci.yml +++ b/chart/.gitlab/ci/review-apps.gitlab-ci.yml @@ -1,12 +1,13 @@ .review_app_common: stage: review variables: - AGENT_NAME: "gke129-ci-cluster" # connect to 1.29 cluster until we have a dedicated cluster + AGENT_NAME: "gkevc-ci-cluster" environment: - name: gke129_vcluster/${VCLUSTER_NAME} + name: gkevc_review/${VCLUSTER_NAME} auto_stop_in: 1 hour before_script: - source scripts/ci/vcluster.sh + - vcluster_install allow_failure: true rules: - !reference [.rule:skip_if_no_cluster] @@ -46,7 +47,7 @@ review_vcluster_128: extends: .review_app_template variables: - VCLUSTER_K8S_VERSION: "1.28" + VCLUSTER_K8S_VERSION: "v1.28.0" VCLUSTER_NAME: vcluster-1-28-${REVIEW_REF_PREFIX}${CI_COMMIT_SHORT_SHA} environment: on_stop: stop_review_vcluster_128 @@ -60,7 +61,7 @@ stop_review_vcluster_128: review_vcluster_131: extends: .review_app_template variables: - VCLUSTER_K8S_VERSION: "1.31" + VCLUSTER_K8S_VERSION: "v1.31.0" VCLUSTER_NAME: vcluster-1-31-${REVIEW_REF_PREFIX}${CI_COMMIT_SHORT_SHA} environment: on_stop: stop_review_vcluster_131 diff --git a/chart/.gitlab/ci/review-docs.yml b/chart/.gitlab/ci/review-docs.yml index 8f3077adb342e561b31c657b780edbab19929923..141d14293c3783e0f65065ab0756577f7366b627 100644 --- a/chart/.gitlab/ci/review-docs.yml +++ b/chart/.gitlab/ci/review-docs.yml @@ -8,7 +8,7 @@ before_script: - gem install gitlab --no-doc # We need to download the script rather than clone the repo since the - # review-docs-cleanup and review-docs-hugo-cleanup job will not be able to run when the branch gets + # review-docs-cleanup job will not be able to run when the branch gets # deleted (when merging the MR). - apk add --update openssl - wget https://gitlab.com/gitlab-org/gitlab/-/raw/master/scripts/trigger-build.rb @@ -25,21 +25,21 @@ - if: '$PIPELINE_TYPE =~ /FEATURE_BRANCH_PIPELINE$/' - if: '$PIPELINE_TYPE == "DOCS_PIPELINE"' -# Trigger a docs build in gitlab-docs +# Trigger a docs build in docs-gitlab-com project # Useful to preview the docs changes live -# https://docs.gitlab.com/ee/development/documentation/review_apps.html +# https://docs.gitlab.com/development/documentation/review_apps/ review-docs-deploy: extends: - .review-docs environment: name: review-docs/mr-${CI_MERGE_REQUEST_IID} - url: https://${DOCS_BRANCH}-${DOCS_GITLAB_REPO_SUFFIX}-${CI_MERGE_REQUEST_IID}.${DOCS_REVIEW_APPS_DOMAIN}/${DOCS_GITLAB_REPO_SUFFIX} + url: https://docs.gitlab.com/upstream-review-mr-${DOCS_GITLAB_REPO_SUFFIX}-${CI_MERGE_REQUEST_IID}/${DOCS_GITLAB_REPO_SUFFIX} auto_stop_in: 2 weeks on_stop: review-docs-cleanup script: - - ./trigger-build.rb docs deploy + - ./trigger-build.rb docs-hugo deploy -# Cleanup remote environment of gitlab-docs +# Cleanup remote environment of docs-gitlab-com review-docs-cleanup: extends: - .review-docs @@ -47,28 +47,4 @@ review-docs-cleanup: name: review-docs/mr-${CI_MERGE_REQUEST_IID} action: stop script: - - ./trigger-build.rb docs cleanup - -# Trigger a docs build in gitlab-docs-hugo -# Useful to preview the docs changes live -# https://docs.gitlab.com/ee/development/documentation/review_apps.html -review-docs-hugo-deploy: - extends: - - .review-docs - environment: - name: review-docs/mr-${CI_MERGE_REQUEST_IID}-hugo - url: https://new.docs.gitlab.com/upstream-review-mr-${DOCS_GITLAB_REPO_SUFFIX}-${CI_MERGE_REQUEST_IID}/${DOCS_GITLAB_REPO_SUFFIX} - auto_stop_in: 2 weeks - on_stop: review-docs-hugo-cleanup - script: - - ./trigger-build.rb docs-hugo deploy - -# Cleanup remote environment of gitlab-docs-hugo -review-docs-hugo-cleanup: - extends: - - .review-docs - environment: - name: review-docs/mr-${CI_MERGE_REQUEST_IID}-hugo - action: stop - script: - ./trigger-build.rb docs-hugo cleanup diff --git a/chart/.gitlab/ci/rules.gitlab-ci.yml b/chart/.gitlab/ci/rules.gitlab-ci.yml index 78e6879289509e11f1229774ff2ee651219c1a2a..ad4f9afe34311ad54560b526e4dc7cc0df04cb3a 100644 --- a/chart/.gitlab/ci/rules.gitlab-ci.yml +++ b/chart/.gitlab/ci/rules.gitlab-ci.yml @@ -82,7 +82,17 @@ workflow: if: '$KUBECONFIG == null' when: never -# Sister rule to ".rule:skip_if_no_cluster" as only jobs with `environment` set get KUBECONFIG set +# Skip for all repos that are not canonical. .rule:skip_if_fork: - - if: '$CI_SERVER_HOST != "gitlab.com" && $CI_PROJECT_PATH != $CANONICAL_PROJECT_PATH' + - if: '$CI_SERVER_HOST != "gitlab.com" || $CI_PROJECT_PATH != $CANONICAL_PROJECT_PATH' + when: never + +# Skip for all repos that are not canonical or the security fork. +# Sister rule to ".rule:skip_if_no_cluster" as only jobs with `environment` set get KUBECONFIG set +.rule:skip_if_not_canonical_or_security: + - if: '$CI_SERVER_HOST != "gitlab.com" || ($CI_PROJECT_PATH != $CANONICAL_PROJECT_PATH && $CI_PROJECT_PATH != $SECURITY_PROJECT_PATH)' + when: never + +.rule:skip_docs_pipeline: + - if: '$CI_COMMIT_BRANCH =~ /(^docs-|-docs$|^docs\/)/ || $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME =~ /(^docs-|-docs$|^docs\/)/' when: never diff --git a/chart/.gitlab/ci/validations.yml b/chart/.gitlab/ci/validations.yml index 1843f5b8db183c964e451fbf455a2515ff9e7b35..78fb38ef393badf69166f7ecb4627e2243ff8124 100644 --- a/chart/.gitlab/ci/validations.yml +++ b/chart/.gitlab/ci/validations.yml @@ -20,16 +20,15 @@ --output json . rules: - - if: '$PIPELINE_TYPE == "DOCS_PIPELINE"' - if: '$PIPELINE_TYPE =~ /_MR_PIPELINE$/' - if: '$PIPELINE_TYPE =~ /_BRANCH_PIPELINE$/' - if: '$PIPELINE_TYPE == "NIGHTLY_PIPELINE"' needs: ['lint_package'] -"Validate 1.27.5": +"Validate 1.28.3": extends: .kubeconform variables: - KUBE_VERSION: "1.27.5" + KUBE_VERSION: "1.28.3" HELM_SETTINGS: | global: ingress: @@ -42,27 +41,20 @@ cronJob: apiVersion: batch/v1 -"Validate 1.28.3": - extends: .kubeconform - variables: - KUBE_VERSION: "1.28.3" - HELM_SETTINGS: !reference ["Validate 1.27.5", variables, HELM_SETTINGS] - "Validate 1.29.4": extends: .kubeconform variables: KUBE_VERSION: "1.29.4" - HELM_SETTINGS: !reference ["Validate 1.27.5", variables, HELM_SETTINGS] + HELM_SETTINGS: !reference ["Validate 1.28.3", variables, HELM_SETTINGS] "Validate 1.30.1": extends: .kubeconform variables: KUBE_VERSION: "1.30.1" - HELM_SETTINGS: !reference ["Validate 1.27.5", variables, HELM_SETTINGS] + HELM_SETTINGS: !reference ["Validate 1.28.3", variables, HELM_SETTINGS] "Validate 1.31.1": extends: .kubeconform variables: KUBE_VERSION: "1.31.1" - HELM_SETTINGS: !reference ["Validate 1.27.5", variables, HELM_SETTINGS] - + HELM_SETTINGS: !reference ["Validate 1.28.3", variables, HELM_SETTINGS] diff --git a/chart/.gitlab/route-map.yml b/chart/.gitlab/route-map.yml new file mode 100644 index 0000000000000000000000000000000000000000..e7f0ad4905752cd25ecde3c2c53f85de962b7d74 --- /dev/null +++ b/chart/.gitlab/route-map.yml @@ -0,0 +1,5 @@ +# Documentation +- source: /doc/(.+?/)_index\.md/ # doc/build/_index.md + public: '\1' # build/ +- source: /doc/(.+?)\.md/ # doc/build/page.md + public: '\1/' # build/page/ diff --git a/chart/.tool-versions b/chart/.tool-versions index 1811c9a90b5c6477aef05cc030bf6a564443b081..e16719459cfe684bc9b098470dd8d48267c4d68b 100644 --- a/chart/.tool-versions +++ b/chart/.tool-versions @@ -6,5 +6,5 @@ yq 4.34.2 gomplate v3.11.5 # For linting documentation -markdownlint-cli2 0.14.0 -vale 3.7.1 +markdownlint-cli2 0.17.1 +vale 3.9.3 diff --git a/chart/CHANGELOG.md b/chart/CHANGELOG.md index 6f878037675655957ca8e8e362373e6e992dba68..42b3631f374db6709c9e052e402b05c4fcbff071 100644 --- a/chart/CHANGELOG.md +++ b/chart/CHANGELOG.md @@ -2,6 +2,41 @@ documentation](doc/development/changelog.md) for instructions on adding your own entry. +## 8.9.1 (2025-02-26) + +No changes. + +## 8.9.0 (2025-02-19) + +### Added (5 changes) + +- [Add app.kubernetes.io name and version label to workloads](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/453577194e2f5a2add72a46e338efda8be9abb0a) by @afaras72 ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4033)) +- [Make Azure workload identity work for object storage](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/6d82072f78fe0cd8db9b5a65933b60a73a535b4c) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4116)) +- [Workhorse: Support custom Redis sentinel scheme](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/85525884c51b2e65d0fb9fa2db46e0ff625f9a83) by @joawin ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/3800)) +- [Add topologySpreadConstraints](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/b2aa028d2fd9dd94a9ec5f19039a84e745023ddc) by @12bodickyn ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/3599)) +- [Enable pg_sequences data collection for GitLab Exporter](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/c5d0f3d1cb9b4bc4917517426bb8298fbe480861) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4065)) + +### Fixed (2 changes) + +- [Mount certificates to geo-logcursor init containers](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/d211f4d39d05184c7637bf6591bf31d1d9d79533) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4122)) +- [Fix Jobs not using addional labels](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/c8fb3a8ea7ddf39e9ea8b18567d9ebaf0ebee39b) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4056)) + +### Changed (5 changes) + +- [Update dependency gitlab-exporter to v15.2.0](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/a7ce0b021e7222eb01e51bd0763c0fb0e66256b1) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4100)) +- [Update dependency container-registry to v4.15.2-gitlab](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/8e3c2181b382e289beb876399bd6497907cc5d3c) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4064)) +- [gitaly: Add negotiation timeouts](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/8d466b11adcc3e23fe9acff03fa83aae57bdd002) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4060)) +- [Update dependency gitlab-qa to v15.1.0](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/8bd4b2b5be0debec834f1cc8b68c40c6a8cd2bf4) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4074)) +- [Update Helm release cert-manager to v1.12.15](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/79377f1ebbade16e336e1569a640c27c3d2bb53d) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4058)) + +### Deprecated (1 change) + +- [Document Kubernetes 1.28 as deprecated](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/2d8580c1ed16fa762f1fcdaaf8418989f7a104cf) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4115)) + +## 8.8.2 (2025-02-11) + +No changes. + ## 8.8.1 (2025-01-22) No changes. @@ -26,6 +61,14 @@ No changes. - [Update Helm release gitlab-runner to v0.72.0](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/13515980ce6275fff3d8241b73725a48f443d2fb) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4038)) - [Update dependency gitlab-qa to v15](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/a1b4854886a67007690053b6f7606636d9703f53) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4032)) +## 8.7.6 (2025-02-11) + +No changes. + +## 8.7.5 (2025-01-22) + +No changes. + ## 8.7.4 (2025-01-15) No changes. @@ -58,6 +101,14 @@ No changes. - [Update dependency container-registry to v4.14.0-gitlab](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/dcc8ce8e48d88f5ff1aee9f0aa67bf4b505de585) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4017)) - [Update Helm release gitlab-runner to v0.71.0](https://gitlab.com/gitlab-org/charts/gitlab/-/commit/45c82f324306ca23d68384a65103ec889c1b6cee) ([merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/4011)) +## 8.6.5 (2025-02-11) + +No changes. + +## 8.6.4 (2025-01-22) + +No changes. + ## 8.6.3 (2025-01-08) No changes. diff --git a/chart/Chart.yaml b/chart/Chart.yaml index 31afc27950466c1597c26b25ed9004f2f3028fcf..aff2e9715e581a843b600d125559fbeefd4af523 100644 --- a/chart/Chart.yaml +++ b/chart/Chart.yaml @@ -1,8 +1,8 @@ --- apiVersion: v1 name: gitlab -version: 8.8.2-bb.0 -appVersion: 17.8.2 +version: 8.9.1-bb.0 +appVersion: v17.9.1 description: GitLab is the most comprehensive AI-powered DevSecOps Platform. keywords: - gitlab @@ -16,7 +16,7 @@ maintainers: annotations: bigbang.dev/maintenanceTrack: bb_integrated bigbang.dev/applicationVersions: | - - Gitlab: 17.8.2 + - Gitlab: 17.9.1 bigbang.dev/upstreamReleaseNotesMarkdown: | The [upstream chart's release notes](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/CHANGELOG.md) may help when reviewing this package. helm.sh/images: | @@ -27,44 +27,44 @@ annotations: condition: redis.install image: registry1.dso.mil/ironbank/bitnami/redis:7.4.2 - name: alpine-certificates - image: registry1.dso.mil/ironbank/gitlab/gitlab/certificates:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/certificates:17.9.1 - name: cfssl-self-sign condition: shared-secrets.enabled image: registry1.dso.mil/ironbank/gitlab/gitlab/cfssl-self-sign:1.6.1 - name: gitaly - image: registry1.dso.mil/ironbank/gitlab/gitlab/gitaly:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/gitaly:17.9.1 - name: gitlab-container-registry condition: registry.enabled - image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-container-registry:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-container-registry:17.9.1 - name: gitlab-shell - image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-shell:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-shell:17.9.1 - name: gitlab-sidekiq - image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-sidekiq:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-sidekiq:17.9.1 - name: gitlab-toolbox - image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-toolbox:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-toolbox:17.9.1 - name: gitlab-webservice - image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-webservice:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-webservice:17.9.1 - name: gitlab-workhorse - image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-workhorse:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-workhorse:17.9.1 - name: gitlab-pages - image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-pages:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-pages:17.9.1 - name: kubectl - image: registry1.dso.mil/ironbank/gitlab/gitlab/kubectl:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/kubectl:17.9.1 - name: mc image: registry1.dso.mil/ironbank/opensource/minio/mc:RELEASE.2024-10-02T08-27-28Z - name: minio image: registry1.dso.mil/ironbank/opensource/minio/minio:RELEASE.2024-06-04T19-20-08Z - name: postgresql condition: postgresql.install - image: registry1.dso.mil/ironbank/opensource/postgres/postgresql:14.16 + image: registry1.dso.mil/ironbank/opensource/postgres/postgresql:14.17 - name: ubi9 condition: upgradeCheck.enabled image: registry1.dso.mil/ironbank/redhat/ubi/ubi9:9.5 - name: gitlab-base - image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-base:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-base:17.9.1 - name: gitlab-exporter condition: gitlab.gitlab-exporter.enabled - image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-exporter:17.8.2 + image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-exporter:17.9.1 - name: bbtests condition: bbtests.enabled image: registry1.dso.mil/bigbang-ci/gitlab-tester:0.0.4 diff --git a/chart/Kptfile b/chart/Kptfile index 4d4e6bddc0e9ec7ec5c86ed5ed2cabcf6628bf92..2c24b8e515b5c3b050e1fd8ad1feed066b331eec 100644 --- a/chart/Kptfile +++ b/chart/Kptfile @@ -5,7 +5,7 @@ metadata: upstream: type: git git: - commit: 7596b590ab65ab01e6d843ee5a99eb881255ee93 + commit: d3d68f54eb8350a99a9c61d4513cebc8ad74be44 repo: https://gitlab.com/gitlab-org/charts/gitlab directory: / - ref: v8.8.2 + ref: v8.9.1 diff --git a/chart/charts/cert-manager-v1.12.14.tgz b/chart/charts/cert-manager-v1.12.14.tgz deleted file mode 100644 index 56f883c038307ac872b76eace97ff6415a8492b3..0000000000000000000000000000000000000000 Binary files a/chart/charts/cert-manager-v1.12.14.tgz and /dev/null differ diff --git a/chart/charts/cert-manager-v1.12.15.tgz b/chart/charts/cert-manager-v1.12.15.tgz new file mode 100644 index 0000000000000000000000000000000000000000..66c61566428a83bc27f50e8e3fe057f8e2a4cd44 Binary files /dev/null and b/chart/charts/cert-manager-v1.12.15.tgz differ diff --git a/chart/charts/certmanager-issuer/Chart.yaml b/chart/charts/certmanager-issuer/Chart.yaml index c4d1ac1f489a61d74cbd127a402898e7f7a1c258..05d743be70dddc318bf086455c1dd0730efec73a 100644 --- a/chart/charts/certmanager-issuer/Chart.yaml +++ b/chart/charts/certmanager-issuer/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: certmanager-issuer -version: 0.2.0 +version: 0.2.1 appVersion: 0.2.2 description: Configuration Job to add LetsEncrypt Issuer to cert-manager keywords: diff --git a/chart/charts/certmanager-issuer/templates/issuer-job.yaml b/chart/charts/certmanager-issuer/templates/issuer-job.yaml index 6c5dd8fbdec098313f6851b4ebcead806a494827..b20d71872dabbb8475d09d7464c3c77d59603b90 100644 --- a/chart/charts/certmanager-issuer/templates/issuer-job.yaml +++ b/chart/charts/certmanager-issuer/templates/issuer-job.yaml @@ -14,8 +14,9 @@ spec: template: metadata: labels: - app: {{ template "name" . }} - release: {{ .Release.Name }} + {{- include "gitlab.standardLabels" . | nindent 8 }} + {{- include "gitlab.commonLabels" . | nindent 8 }} + {{- include "gitlab.podLabels" . | nindent 8 }} spec: {{- include "gitlab.nodeSelector" . | nindent 6 }} {{- include "gitlab.podSecurityContext" .Values.global.kubectl.securityContext | nindent 6 }} diff --git a/chart/charts/gitlab-zoekt-1.4.3.tgz b/chart/charts/gitlab-zoekt-1.4.3.tgz deleted file mode 100644 index 365802616ee8dd27b9efbfd6198b0b76a6e199c2..0000000000000000000000000000000000000000 Binary files a/chart/charts/gitlab-zoekt-1.4.3.tgz and /dev/null differ diff --git a/chart/charts/gitlab-zoekt-1.5.0.tgz b/chart/charts/gitlab-zoekt-1.5.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..8128d87e064fac2f23bd8947b93826751a977307 Binary files /dev/null and b/chart/charts/gitlab-zoekt-1.5.0.tgz differ diff --git a/chart/charts/gitlab/charts/geo-logcursor/Chart.yaml b/chart/charts/gitlab/charts/geo-logcursor/Chart.yaml index 4db87a700a2f661dd3a7842e5ffef001eec39ed6..fc458dc42d1a68716c9c45f3c9201ee30653bf50 100644 --- a/chart/charts/gitlab/charts/geo-logcursor/Chart.yaml +++ b/chart/charts/gitlab/charts/geo-logcursor/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: geo-logcursor -version: 8.8.2 -appVersion: v17.8.2 +version: 8.9.1 +appVersion: v17.9.1 description: GitLab Geo logcursor keywords: - gitlab diff --git a/chart/charts/gitlab/charts/geo-logcursor/templates/deployment.yaml b/chart/charts/gitlab/charts/geo-logcursor/templates/deployment.yaml index ba428b533bb9fea1464594d865739f8b9c99e468..798fbcc70ab5b5d98f50484c2d9e8ac6ddeb00f7 100644 --- a/chart/charts/gitlab/charts/geo-logcursor/templates/deployment.yaml +++ b/chart/charts/gitlab/charts/geo-logcursor/templates/deployment.yaml @@ -35,6 +35,9 @@ spec: {{ $key }}: {{ $value | quote }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- if .Values.tolerations }} tolerations: {{- toYaml .Values.tolerations | nindent 8 }} @@ -60,6 +63,7 @@ spec: {{- include "gitlab.extraEnvFrom" (dict "root" $ "local" .) | nindent 10 }} {{- include "gitlab.timeZone.env" . | nindent 10 }} volumeMounts: + {{- include "gitlab.certificates.volumeMount" . | nindent 10 }} {{- include "gitlab.extraVolumeMounts" . | nindent 10 }} {{- include "gitlab.psql.ssl.volumeMount" . | nindent 10 }} {{- include "gitlab.geo.psql.ssl.volumeMount" . | nindent 10 }} @@ -93,6 +97,7 @@ spec: {{- include "gitlab.extraEnvFrom" (dict "root" $ "local" .) | nindent 12 }} {{- include "gitlab.timeZone.env" . | nindent 12 }} volumeMounts: + {{- include "gitlab.certificates.volumeMount" . | nindent 12 }} {{- include "gitlab.extraVolumeMounts" . | nindent 12 }} - name: logcursor-config mountPath: '/var/opt/gitlab/templates' diff --git a/chart/charts/gitlab/charts/geo-logcursor/values.yaml b/chart/charts/gitlab/charts/geo-logcursor/values.yaml index bec04f8ec3217e32f363edad78d62413b625ecc5..c4fdadd463416ca4cb6ca59720301bf13a84dec1 100644 --- a/chart/charts/gitlab/charts/geo-logcursor/values.yaml +++ b/chart/charts/gitlab/charts/geo-logcursor/values.yaml @@ -120,3 +120,13 @@ serviceAccount: # automountServiceAccountToken: false ## Name to be used for serviceAccount, otherwise defaults to chart fullname # name: + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule diff --git a/chart/charts/gitlab/charts/gitaly/Chart.yaml b/chart/charts/gitlab/charts/gitaly/Chart.yaml index 0d1fe500308170fe59fbae86f49d0040ae768fe5..21ed5bd7c0463f3665f65e1db90fed05c898acaf 100644 --- a/chart/charts/gitlab/charts/gitaly/Chart.yaml +++ b/chart/charts/gitlab/charts/gitaly/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: gitaly -version: 8.8.2 -appVersion: 17.8.2 +version: 8.9.1 +appVersion: 17.9.1 description: Git RPC service for handling all the git calls made by GitLab keywords: - gitlab diff --git a/chart/charts/gitlab/charts/gitaly/templates/_configmap_spec.yaml b/chart/charts/gitlab/charts/gitaly/templates/_configmap_spec.yaml index 92c75a7299d70fdb4278f36a3067b103c927be96..f6979ce1da19d020b0f871b0d65316538d4d517b 100644 --- a/chart/charts/gitlab/charts/gitaly/templates/_configmap_spec.yaml +++ b/chart/charts/gitlab/charts/gitaly/templates/_configmap_spec.yaml @@ -214,4 +214,14 @@ data: max_cgroups_per_repo = {{ .maxCgroupsPerRepo | int64 }} {{- end }} {{- end }} - {{- end }} \ No newline at end of file + {{- end }} + + {{- with .Values.timeout }} + [timeout] + {{- with .uploadPackNegotiation }} + upload_pack_negotiation = {{ . | quote }} + {{- end }} + {{- with .uploadArchiveNegotiation }} + upload_archive_negotiation = {{ . | quote }} + {{- end }} + {{- end }} diff --git a/chart/charts/gitlab/charts/gitaly/templates/_statefulset_spec.yaml b/chart/charts/gitlab/charts/gitaly/templates/_statefulset_spec.yaml index d44cd521aaddf061942d0d25b5a4a8609da843d8..04691ec775531cf4e3f21e8fc41b3179ce918b27 100644 --- a/chart/charts/gitlab/charts/gitaly/templates/_statefulset_spec.yaml +++ b/chart/charts/gitlab/charts/gitaly/templates/_statefulset_spec.yaml @@ -186,9 +186,8 @@ spec: {{- end }} {{- if .Values.statefulset.startupProbe.enabled }} startupProbe: - exec: - command: - - /scripts/healthcheck + grpc: + port: {{ coalesce .Values.service.internalPort .Values.global.gitaly.service.internalPort }} initialDelaySeconds: {{ .Values.statefulset.startupProbe.initialDelaySeconds }} periodSeconds: {{ .Values.statefulset.startupProbe.periodSeconds }} timeoutSeconds: {{ .Values.statefulset.startupProbe.timeoutSeconds }} @@ -196,18 +195,16 @@ spec: failureThreshold: {{ .Values.statefulset.startupProbe.failureThreshold }} {{- end }} livenessProbe: - exec: - command: - - /scripts/healthcheck + grpc: + port: {{ coalesce .Values.service.internalPort .Values.global.gitaly.service.internalPort }} initialDelaySeconds: {{ ternary 0 .Values.statefulset.livenessProbe.initialDelaySeconds .Values.statefulset.startupProbe.enabled }} periodSeconds: {{ .Values.statefulset.livenessProbe.periodSeconds }} timeoutSeconds: {{ .Values.statefulset.livenessProbe.timeoutSeconds }} successThreshold: {{ .Values.statefulset.livenessProbe.successThreshold }} failureThreshold: {{ .Values.statefulset.livenessProbe.failureThreshold }} readinessProbe: - exec: - command: - - /scripts/healthcheck + grpc: + port: {{ coalesce .Values.service.internalPort .Values.global.gitaly.service.internalPort }} initialDelaySeconds: {{ ternary 0 .Values.statefulset.readinessProbe.initialDelaySeconds .Values.statefulset.startupProbe.enabled }} periodSeconds: {{ .Values.statefulset.readinessProbe.periodSeconds }} timeoutSeconds: {{ .Values.statefulset.readinessProbe.timeoutSeconds }} diff --git a/chart/charts/gitlab/charts/gitaly/templates/statefulset.yml b/chart/charts/gitlab/charts/gitaly/templates/statefulset.yml index 02b49b51d3e4804f65e293217fb6dd5edb1d0d1a..1d6da9b78ffaffcb7486ef80d780ea573c29a547 100644 --- a/chart/charts/gitlab/charts/gitaly/templates/statefulset.yml +++ b/chart/charts/gitlab/charts/gitaly/templates/statefulset.yml @@ -9,4 +9,4 @@ metadata: {{- include "gitlab.commonLabels" . | nindent 4 }} {{- include "gitlab.app.kubernetes.io.labels" $ | nindent 4 }} {{ include (print $.Template.BasePath "/_statefulset_spec.yaml") . }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/chart/charts/gitlab/charts/gitaly/values.yaml b/chart/charts/gitlab/charts/gitaly/values.yaml index 4051387a1a930f618e9267b0a3dbf8c6ad8a6e42..a35dc987df40d1c916358c06552c811a27e3e9fe 100644 --- a/chart/charts/gitlab/charts/gitaly/values.yaml +++ b/chart/charts/gitlab/charts/gitaly/values.yaml @@ -245,3 +245,7 @@ backup: {} # default enable gomemlimit to avoid gc related OOM errors gomemlimit: enabled: true + +timeout: {} + # uploadPackNegotiation: "10m" + # uploadArchiveNegotiation: "1m" diff --git a/chart/charts/gitlab/charts/gitlab-exporter/Chart.yaml b/chart/charts/gitlab/charts/gitlab-exporter/Chart.yaml index 52f3f89d98a664c2a445d4300176bc7a4afa833e..4f21fabf73e55c06878b5a7c1a8f7a2477ced8be 100644 --- a/chart/charts/gitlab/charts/gitlab-exporter/Chart.yaml +++ b/chart/charts/gitlab/charts/gitlab-exporter/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: gitlab-exporter -version: 8.8.2 -appVersion: 15.1.0 +version: 8.9.1 +appVersion: 15.2.0 description: Exporter for GitLab Prometheus metrics (e.g. CI, pull mirrors) keywords: - gitlab diff --git a/chart/charts/gitlab/charts/gitlab-exporter/templates/configmap.yaml b/chart/charts/gitlab/charts/gitlab-exporter/templates/configmap.yaml index d5498aaa1698ff8a2ee364e4ff72a8cafce53e48..fd2a38ef5a0bcb83b5e1dd30020b6d4c37687811 100644 --- a/chart/charts/gitlab/charts/gitlab-exporter/templates/configmap.yaml +++ b/chart/charts/gitlab/charts/gitlab-exporter/templates/configmap.yaml @@ -84,6 +84,9 @@ data: rows_count: class_name: Database::RowCountProber <<: *db_common + pg_sequences: + class_name: Database::PgSequencesProber + <<: *db_common configure: | {{- include "gitlab.scripts.configure.secrets" (dict "required" "none" "optional" "redis redis-sentinel postgres gitlab-exporter") | nindent 4 }} diff --git a/chart/charts/gitlab/charts/gitlab-pages/Chart.yaml b/chart/charts/gitlab/charts/gitlab-pages/Chart.yaml index 79b29eda30f43ae017af5c1318b58a9817cac791..8e3a0461d645c60e35d11409e5986a942bf29a36 100644 --- a/chart/charts/gitlab/charts/gitlab-pages/Chart.yaml +++ b/chart/charts/gitlab/charts/gitlab-pages/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: gitlab-pages -version: 8.8.2 -appVersion: 17.8.2 +version: 8.9.1 +appVersion: 17.9.1 description: Daemon for serving static websites from GitLab projects keywords: - gitlab diff --git a/chart/charts/gitlab/charts/gitlab-pages/templates/deployment.yaml b/chart/charts/gitlab/charts/gitlab-pages/templates/deployment.yaml index 9f83bf4bdcfbd2857262f2f9d4a9ce2160c7f46c..2e6e1b8c73768fca57ee1c462462a5eb40adb95b 100644 --- a/chart/charts/gitlab/charts/gitlab-pages/templates/deployment.yaml +++ b/chart/charts/gitlab/charts/gitlab-pages/templates/deployment.yaml @@ -49,6 +49,9 @@ spec: {{- end }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- include "gitlab.nodeSelector" . | nindent 6 }} {{- if .Values.tolerations }} tolerations: diff --git a/chart/charts/gitlab/charts/gitlab-pages/values.yaml b/chart/charts/gitlab/charts/gitlab-pages/values.yaml index 5c960dcb74671d8662a59bf89b8a8556b58c852c..b2ec00d97b9bdb007db348de20fc94c74e2efe14 100644 --- a/chart/charts/gitlab/charts/gitlab-pages/values.yaml +++ b/chart/charts/gitlab/charts/gitlab-pages/values.yaml @@ -260,3 +260,13 @@ affinity: # rateLimitTLSDomain: # rateLimitTLSDomainBurst: # rateLimitSubnetsAllowList: + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule diff --git a/chart/charts/gitlab/charts/gitlab-shell/Chart.yaml b/chart/charts/gitlab/charts/gitlab-shell/Chart.yaml index 168a588cf9b2e0c6b4b4b931e06a9dcbd9113ddd..99f548026368eb2f6e80963fc8760c931f2331a9 100644 --- a/chart/charts/gitlab/charts/gitlab-shell/Chart.yaml +++ b/chart/charts/gitlab/charts/gitlab-shell/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: gitlab-shell -version: 8.8.2 -appVersion: 14.39.0 +version: 8.9.1 +appVersion: 14.40.0 description: sshd for Gitlab keywords: - gitlab diff --git a/chart/charts/gitlab/charts/gitlab-shell/templates/deployment.yaml b/chart/charts/gitlab/charts/gitlab-shell/templates/deployment.yaml index 136a629f4f8ee2f5a756ca2e896fd86f3ee432af..25cd720017f0bc319ead61ead41f296c743285ae 100644 --- a/chart/charts/gitlab/charts/gitlab-shell/templates/deployment.yaml +++ b/chart/charts/gitlab/charts/gitlab-shell/templates/deployment.yaml @@ -46,6 +46,9 @@ spec: {{- end }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- if .Values.tolerations }} tolerations: {{- toYaml .Values.tolerations | nindent 8 }} diff --git a/chart/charts/gitlab/charts/gitlab-shell/values.yaml b/chart/charts/gitlab/charts/gitlab-shell/values.yaml index de740f12deabe862f7119604fda2596b8b703363..c997c1c284216a459b3e5d32ce7f50bbfd76e7ac 100644 --- a/chart/charts/gitlab/charts/gitlab-shell/values.yaml +++ b/chart/charts/gitlab/charts/gitlab-shell/values.yaml @@ -211,3 +211,13 @@ affinity: nodeAffinity: key: values: + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule diff --git a/chart/charts/gitlab/charts/kas/Chart.yaml b/chart/charts/gitlab/charts/kas/Chart.yaml index b431ab0399a25b26e78fc942c9f791a117f6d173..f2a80c9655f8fc03c0b7430cd7264f90a77768ba 100644 --- a/chart/charts/gitlab/charts/kas/Chart.yaml +++ b/chart/charts/gitlab/charts/kas/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: kas -version: 8.8.2 -appVersion: 17.8.2 +version: 8.9.1 +appVersion: 17.9.1 description: GitLab Agent Server keywords: - agent diff --git a/chart/charts/gitlab/charts/kas/templates/_default-config.yaml b/chart/charts/gitlab/charts/kas/templates/_default-config.yaml index 595e76668a9d8a28cea93ef8b30e39254e7d4fba..cdaeb6cde5d88314281448d3f08a02fac2f06e71 100644 --- a/chart/charts/gitlab/charts/kas/templates/_default-config.yaml +++ b/chart/charts/gitlab/charts/kas/templates/_default-config.yaml @@ -46,3 +46,27 @@ private_api: ca_certificate_file: "/etc/ssl/certs/ca-certificates.crt" {{- end }} +{{- if (.Values.autoflow).enabled }} +autoflow: + temporal: + host_port: "{{ ((.Values.autoflow).temporal).namespace }}.tmprl.cloud:7233" + namespace: "{{ ((.Values.autoflow).temporal).namespace }}" + + enable_tls: true + certificate_file: "/etc/kas/temporal-worker-client-mtls.crt" + key_file: "/etc/kas/temporal-worker-client-mtls.key" + + workflow_data_encryption: + secret_key_file: "/etc/kas/.gitlab_kas_autoflow_temporal_workflow_data_encryption_secret" + codec_server: + listen: + network: "tcp" + address: ":{{ .Values.service.autoflowCodecServerApiPort }}" + {{- if $.Values.global.kas.tls.enabled }} + certificate_file: /etc/kas/tls.crt + key_file: /etc/kas/tls.key + {{- end }} + temporal_web_ui_url: "https://cloud.temporal.io" + temporal_oidc_url: "https://login.tmprl.cloud/.well-known/openid-configuration" + authorized_user_emails: {{ default list ((((.Values.autoflow).temporal).workflowDataEncryption).codecServer).authorizedUserEmails | toJson }} +{{- end }} diff --git a/chart/charts/gitlab/charts/kas/templates/_helpers.tpl b/chart/charts/gitlab/charts/kas/templates/_helpers.tpl index 72f66b6598e6290973ab8969a4865ace04357791..47e81dbe012ee9176841344ed8f34311814c939e 100644 --- a/chart/charts/gitlab/charts/kas/templates/_helpers.tpl +++ b/chart/charts/gitlab/charts/kas/templates/_helpers.tpl @@ -30,7 +30,7 @@ username: {{ .redisMergedConfig.user }} {{- if .redisMergedConfig.password.enabled }} password_file: /etc/kas/redis/{{ printf "%s-password" (default "redis" .redisConfigName) }} {{- end }} -database_index: {{ .redisMergedConfig.database }} +database_index: {{ .redisMergedConfig.database }} {{- if not .redisMergedConfig.sentinels }} server: address: {{ template "gitlab.redis.host" . }}:{{ template "gitlab.redis.port" . }} diff --git a/chart/charts/gitlab/charts/kas/templates/deployment.yaml b/chart/charts/gitlab/charts/kas/templates/deployment.yaml index 772a389b74bba9cc221a79cb5f87f7783167ffbf..f1b27650e87b12d8908ea3cc3aa9a3b4fe779dc7 100644 --- a/chart/charts/gitlab/charts/kas/templates/deployment.yaml +++ b/chart/charts/gitlab/charts/kas/templates/deployment.yaml @@ -43,6 +43,9 @@ spec: {{ $key }}: {{ $value | quote }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- include "gitlab.nodeSelector" . | nindent 6 }} {{- if .Values.tolerations }} tolerations: @@ -87,6 +90,13 @@ spec: name: {{ template "name" . }}-private-api - containerPort: {{ .Values.observability.port }} name: http-metrics + {{- if (.Values.autoflow).enabled }} + - containerPort: {{ .Values.service.autoflowCodecServerApiPort }} + # Below abbreviation because of 15 chars name limit. + # af=autoflow + # cs=codec server + name: {{ template "name" . }}-af-cs-api + {{- end }} readinessProbe: httpGet: path: {{ .Values.observability.readinessProbe.path }} @@ -132,6 +142,20 @@ spec: items: - key: {{ template "gitlab.kas.websocketToken.key" . }} path: .gitlab_kas_websocket_token_secret + {{- if (.Values.autoflow).enabled }} + - secret: + name: {{ template "gitlab.kas.autoflow.temporal.workflowDataEncryption.secret" . }} + items: + - key: {{ template "gitlab.kas.autoflow.temporal.workflowDataEncryption.key" . }} + path: .gitlab_kas_autoflow_temporal_workflow_data_encryption_secret + - secret: + name: {{ ((.Values.autoflow.temporal).workerMtls).secretName | quote }} + items: + - key: tls.crt + path: temporal-worker-client-mtls.crt + - key: tls.key + path: temporal-worker-client-mtls.key + {{- end }} {{- if $.Values.global.kas.tls.enabled }} - secret: name: {{ .Values.global.kas.tls.secretName | quote }} diff --git a/chart/charts/gitlab/charts/kas/templates/ingress.yaml b/chart/charts/gitlab/charts/kas/templates/ingress.yaml index ee5fc6edee6bd30e244566b141f6abbeea1adadb..dbc2662e3ba8deb7650916119939fd81392ab360 100644 --- a/chart/charts/gitlab/charts/kas/templates/ingress.yaml +++ b/chart/charts/gitlab/charts/kas/templates/ingress.yaml @@ -64,6 +64,21 @@ spec: serviceName: {{ template "gitlab.kas.serviceName" . }} servicePort: {{ .Values.service.externalPort }} {{- end }} + {{ if (.Values.autoflow).enabled }} + - path: "{{ trimSuffix "/" $.Values.ingress.autoflowCodecServerApiPath }}{{ $.Values.global.ingress.path }}" + {{ if or ($.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress") (eq $.Values.global.ingress.apiVersion "networking.k8s.io/v1") -}} + pathType: {{ default "Prefix" $.Values.global.ingress.pathType }} + backend: + service: + name: {{ template "gitlab.kas.serviceName" . }} + port: + number: {{ .Values.service.autoflowCodecServerApiPort }} + {{- else -}} + backend: + serviceName: {{ template "gitlab.kas.serviceName" . }} + servicePort: {{ .Values.service.autoflowCodecServerApiPort }} + {{- end }} + {{- end }} {{- if (and $tlsSecret (eq (include "gitlab.ingress.tls.enabled" $) "true" )) }} tls: - hosts: diff --git a/chart/charts/gitlab/charts/kas/templates/service.yaml b/chart/charts/gitlab/charts/kas/templates/service.yaml index ab37efda2dc05dd0b2290b6a40e463229212892c..a52972c8dd3418deab39bdb385945cfbe39981b3 100644 --- a/chart/charts/gitlab/charts/kas/templates/service.yaml +++ b/chart/charts/gitlab/charts/kas/templates/service.yaml @@ -42,6 +42,12 @@ spec: protocol: TCP name: http-metrics {{- end }} + {{- if (.Values.autoflow).enabled }} + - port: {{ .Values.service.autoflowCodecServerApiPort }} + targetPort: {{ .Values.service.autoflowCodecServerApiPort }} + protocol: TCP + name: tcp-{{ template "name" . }}-autoflow-codec-server-api + {{- end }} selector: {{- include "kas.podSelectorLabels" . | nindent 4 }} {{- end -}} diff --git a/chart/charts/gitlab/charts/kas/values.yaml b/chart/charts/gitlab/charts/kas/values.yaml index ed423e24c7a39fbbffa2dba2f2082c61b294bfc4..c2f82f85c1dca86e4f705834db1de811194828aa 100644 --- a/chart/charts/gitlab/charts/kas/values.yaml +++ b/chart/charts/gitlab/charts/kas/values.yaml @@ -75,6 +75,7 @@ ingress: tls: {} agentPath: / k8sApiPath: /k8s-proxy + autoflowCodecServerApiPath: /autoflow/codec-server maxReplicas: 10 maxUnavailable: 1 minReplicas: 2 @@ -95,6 +96,7 @@ service: apiInternalPort: 8153 kubernetesApiPort: 8154 privateApiPort: 8155 + autoflowCodecServerApiPort: 8142 type: ClusterIP # loadBalancerIP: # loadBalancerSourceRanges: @@ -138,6 +140,19 @@ websocketToken: {} # secret: # key: +## Configure AutoFlow. AutoFlow is a GitLab internal-use experiment. +autoflow: {} + # enabled: true + # temporal: + # namespace: 'xxx' + # workerMtls: + # secretName: + # workflowDataEncryption: + # secret: + # key: + # codecServer: + # authorizedUserEmails: ["maintainer@gitlab.example.com"] + privateApi: {} # secret: # key: @@ -185,3 +200,13 @@ affinity: # Priority class assigned to pods priorityClassName: "" + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule diff --git a/chart/charts/gitlab/charts/mailroom/Chart.yaml b/chart/charts/gitlab/charts/mailroom/Chart.yaml index ff5116a56e06abc44f36a030da9fa2980ac1444a..edf152b0560e9c1799658ed53cdc8b54c3ca1d25 100644 --- a/chart/charts/gitlab/charts/mailroom/Chart.yaml +++ b/chart/charts/gitlab/charts/mailroom/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: mailroom -version: 8.8.2 -appVersion: v17.8.2 +version: 8.9.1 +appVersion: v17.9.1 description: Handling incoming emails keywords: - gitlab diff --git a/chart/charts/gitlab/charts/mailroom/templates/deployment.yaml b/chart/charts/gitlab/charts/mailroom/templates/deployment.yaml index 25b7d6e0bd76acbca6aa8dfd90f2aade485bf118..6d20f325bf173dc12d862128908efd6a370546e2 100644 --- a/chart/charts/gitlab/charts/mailroom/templates/deployment.yaml +++ b/chart/charts/gitlab/charts/mailroom/templates/deployment.yaml @@ -34,6 +34,9 @@ spec: {{ $key }}: {{ $value | quote }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- if .Values.tolerations }} tolerations: {{- toYaml .Values.tolerations | nindent 8 }} diff --git a/chart/charts/gitlab/charts/mailroom/values.yaml b/chart/charts/gitlab/charts/mailroom/values.yaml index 4bdad07c357e38dae6b51474f559c0a53ef46318..f5adf77ce3b1dccf337fbebbe56a05f90e59c5e1 100644 --- a/chart/charts/gitlab/charts/mailroom/values.yaml +++ b/chart/charts/gitlab/charts/mailroom/values.yaml @@ -206,3 +206,13 @@ affinity: # Priority class assigned to pods priorityClassName: "" + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule diff --git a/chart/charts/gitlab/charts/migrations/Chart.yaml b/chart/charts/gitlab/charts/migrations/Chart.yaml index 6f1f3e9f596834a08d2f3e9a4e9f2343d0d2bf3c..0694724cd9d11a997e5560a79622953b8d6f5ade 100644 --- a/chart/charts/gitlab/charts/migrations/Chart.yaml +++ b/chart/charts/gitlab/charts/migrations/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: migrations -version: 8.8.2 -appVersion: v17.8.2 +version: 8.9.1 +appVersion: v17.9.1 description: Database migrations and other versioning tasks for upgrading Gitlab keywords: - gitlab diff --git a/chart/charts/gitlab/charts/praefect/Chart.yaml b/chart/charts/gitlab/charts/praefect/Chart.yaml index d5e45ca0d277cbeb996c61d4073de18fc194ace8..ea8dd1a5eadf2fadf64b980138f05b42ba2a9eed 100644 --- a/chart/charts/gitlab/charts/praefect/Chart.yaml +++ b/chart/charts/gitlab/charts/praefect/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: praefect -version: 8.8.2 -appVersion: 17.8.2 +version: 8.9.1 +appVersion: 17.9.1 description: Praefect is a router and transaction manager for Gitaly, and a required component for running a Gitaly Cluster. keywords: diff --git a/chart/charts/gitlab/charts/praefect/templates/statefulset.yaml b/chart/charts/gitlab/charts/praefect/templates/statefulset.yaml index 6843c5f558b182e84312586c65409757398760b4..f678a94dbb3f95426b9403e0c1db56f97fd01985 100644 --- a/chart/charts/gitlab/charts/praefect/templates/statefulset.yaml +++ b/chart/charts/gitlab/charts/praefect/templates/statefulset.yaml @@ -40,6 +40,9 @@ spec: prometheus.io/path: {{ .Values.metrics.path }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- include "gitlab.affinity" . | nindent 6 }} {{- include "gitlab.nodeSelector" . | nindent 6 }} {{- if .Values.tolerations }} diff --git a/chart/charts/gitlab/charts/praefect/values.yaml b/chart/charts/gitlab/charts/praefect/values.yaml index 26c8aa38f1a78f0442b9d606907bd6f421c61a53..27e81a1bff27ac94dfbe82eb312a0ed412ef9356 100644 --- a/chart/charts/gitlab/charts/praefect/values.yaml +++ b/chart/charts/gitlab/charts/praefect/values.yaml @@ -89,7 +89,16 @@ statefulset: affinity: podAntiAffinity: topologyKey: - nodeAffinity: - key: - values: - + nodeAffinity: + key: + values: + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule diff --git a/chart/charts/gitlab/charts/sidekiq/Chart.yaml b/chart/charts/gitlab/charts/sidekiq/Chart.yaml index ba33ce6cb803f190bf26e488182b430ae522046f..a9750f0eee72dc1aeaeabfc0d67c8274183f1a6d 100644 --- a/chart/charts/gitlab/charts/sidekiq/Chart.yaml +++ b/chart/charts/gitlab/charts/sidekiq/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: sidekiq -version: 8.8.2 -appVersion: v17.8.2 +version: 8.9.1 +appVersion: v17.9.1 description: Gitlab Sidekiq for asynchronous task processing in rails keywords: - gitlab diff --git a/chart/charts/gitlab/charts/sidekiq/templates/deployment.yaml b/chart/charts/gitlab/charts/sidekiq/templates/deployment.yaml index 34af353a87d52583252c5f64be44d8fc385d1683..61dd0c57958933d3d1b3cafd3a90f29683080414 100644 --- a/chart/charts/gitlab/charts/sidekiq/templates/deployment.yaml +++ b/chart/charts/gitlab/charts/sidekiq/templates/deployment.yaml @@ -34,8 +34,8 @@ metadata: namespace: {{ $.Release.Namespace }} labels: {{- include "gitlab.standardLabels" $ | nindent 4 }} - {{- include "gitlab.app.kubernetes.io.labels" $ | nindent 4 }} {{- include "sidekiq.commonLabels" (dict "pod" .common.labels "global" $.Values.common.labels) | nindent 4 }} + {{- include "gitlab.app.kubernetes.io.labels" $ | nindent 4 }} queue-pod-name: {{ .name }} annotations: {{- include "gitlab.deploymentAnnotations" $ | nindent 4 }} @@ -51,8 +51,8 @@ spec: metadata: labels: {{- include "gitlab.standardLabels" $ | nindent 8 }} + {{- include "sidekiq.commonLabels" (dict "pod" .common.labels "global" $.Values.common.labels) | nindent 8 }} {{- include "gitlab.app.kubernetes.io.labels" $ | nindent 8 }} - {{- include "sidekiq.commonLabels" (dict "pod" .common.labels "global" $.Values.common.labels) | nindent 8 }} {{- include "sidekiq.podLabels" (dict "pod" .podLabels ) | nindent 8 }} queue-pod-name: {{ .name }} annotations: @@ -78,6 +78,9 @@ spec: {{- end }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- with $tolerations }} tolerations: {{- toYaml . | nindent 8 }} diff --git a/chart/charts/gitlab/charts/sidekiq/values.yaml b/chart/charts/gitlab/charts/sidekiq/values.yaml index f8ba45ac0eb120733ccab01b91ae629cc970192f..9ad733785132be7c49ad3d0791f1229dc72e98ef 100644 --- a/chart/charts/gitlab/charts/sidekiq/values.yaml +++ b/chart/charts/gitlab/charts/sidekiq/values.yaml @@ -381,3 +381,13 @@ priorityClassName: "" affinity: podAntiAffinity: topologyKey: + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule diff --git a/chart/charts/gitlab/charts/spamcheck/Chart.yaml b/chart/charts/gitlab/charts/spamcheck/Chart.yaml index 904cf9fb0fd72523086b30a430cb9cd2c4779d74..031cb2d64b30bffc3638111a6c96772554525a00 100644 --- a/chart/charts/gitlab/charts/spamcheck/Chart.yaml +++ b/chart/charts/gitlab/charts/spamcheck/Chart.yaml @@ -1,6 +1,7 @@ +--- apiVersion: v1 name: spamcheck -version: 8.8.1 +version: 8.9.1 appVersion: 1.2.3 description: GitLab Anti-Spam Engine keywords: diff --git a/chart/charts/gitlab/charts/spamcheck/templates/deployment.yaml b/chart/charts/gitlab/charts/spamcheck/templates/deployment.yaml index c587b1bd5240ae2ea6db47488332216f24d9c07c..ad3b3cd85cf399646e197578c3d2560d873f6372 100644 --- a/chart/charts/gitlab/charts/spamcheck/templates/deployment.yaml +++ b/chart/charts/gitlab/charts/spamcheck/templates/deployment.yaml @@ -32,6 +32,9 @@ spec: {{ $key }}: {{ $value | quote }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- include "gitlab.nodeSelector" . | nindent 6 }} {{- if .Values.tolerations }} tolerations: diff --git a/chart/charts/gitlab/charts/spamcheck/values.yaml b/chart/charts/gitlab/charts/spamcheck/values.yaml index af83f868aa057f9f3ff57ec4bbb0d69258d822f8..3ecb3381aba875da88128f8c633e0c0875dee470 100644 --- a/chart/charts/gitlab/charts/spamcheck/values.yaml +++ b/chart/charts/gitlab/charts/spamcheck/values.yaml @@ -111,3 +111,13 @@ affinity: nodeAffinity: key: values: + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule diff --git a/chart/charts/gitlab/charts/toolbox/Chart.yaml b/chart/charts/gitlab/charts/toolbox/Chart.yaml index 6b63059209f864a4b4772c49876397c71912a382..9eebf8a65dd95e50032a02d265c4164ed06257be 100644 --- a/chart/charts/gitlab/charts/toolbox/Chart.yaml +++ b/chart/charts/gitlab/charts/toolbox/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: toolbox -version: 8.8.2 -appVersion: v17.8.2 +version: 8.9.1 +appVersion: v17.9.1 description: For manually running rake tasks through kubectl keywords: - gitlab diff --git a/chart/charts/gitlab/charts/toolbox/templates/deployment.yaml b/chart/charts/gitlab/charts/toolbox/templates/deployment.yaml index 2a4ab47c9d538d01c5009489efd04063dcceb5a3..7011781608ef6942f7743590b375620e7abc23c4 100644 --- a/chart/charts/gitlab/charts/toolbox/templates/deployment.yaml +++ b/chart/charts/gitlab/charts/toolbox/templates/deployment.yaml @@ -35,6 +35,9 @@ spec: {{ $key }}: {{ $value | quote }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- if .Values.tolerations }} tolerations: {{- toYaml .Values.tolerations | nindent 8 }} diff --git a/chart/charts/gitlab/charts/toolbox/values.yaml b/chart/charts/gitlab/charts/toolbox/values.yaml index 7466a6129eac123a8a07d5007cb2449be76a0589..6079c5b79ea7295519fa27f24741fa3bf8886d66 100644 --- a/chart/charts/gitlab/charts/toolbox/values.yaml +++ b/chart/charts/gitlab/charts/toolbox/values.yaml @@ -291,3 +291,13 @@ deployment: strategy: type: Recreate rollingUpdate: null + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule diff --git a/chart/charts/gitlab/charts/webservice/Chart.yaml b/chart/charts/gitlab/charts/webservice/Chart.yaml index dbfc1974ed63416356850ba19920d320ed5da4fc..44521fe2e8802d0c711fbb3b2ef5efa1a22276f2 100644 --- a/chart/charts/gitlab/charts/webservice/Chart.yaml +++ b/chart/charts/gitlab/charts/webservice/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: webservice -version: 8.8.2 -appVersion: v17.8.2 +version: 8.9.1 +appVersion: v17.9.1 description: HTTP server for Gitlab keywords: - gitlab diff --git a/chart/charts/gitlab/charts/webservice/templates/_datamodel.tpl b/chart/charts/gitlab/charts/webservice/templates/_datamodel.tpl index a4f1e7c4e0e1f254b00e8fe9a7f94494e1911b04..017d2e962cbb2fc1bd289dedcde1971fbbf3f949 100644 --- a/chart/charts/gitlab/charts/webservice/templates/_datamodel.tpl +++ b/chart/charts/gitlab/charts/webservice/templates/_datamodel.tpl @@ -58,6 +58,7 @@ This is output as YAML, it can be read back in as a dict via `toYaml`. proxyConnectTimeout: {{ $v.proxyConnectTimeout }} proxyReadTimeout: {{ $v.proxyReadTimeout }} proxyBodySize: {{ $v.proxyBodySize | quote }} + serviceUpstream: {{ $v.serviceUpstream | quote }} useGeoClass: {{ $v.useGeoClass }} {{- end }} common: diff --git a/chart/charts/gitlab/charts/webservice/templates/_helpers.tpl b/chart/charts/gitlab/charts/webservice/templates/_helpers.tpl index 21fd7be309faf80449941fb18192d6d21a1452bb..762e0b9eb92fafb1352c4f65793205f1ef5b6e07 100644 --- a/chart/charts/gitlab/charts/webservice/templates/_helpers.tpl +++ b/chart/charts/gitlab/charts/webservice/templates/_helpers.tpl @@ -103,7 +103,7 @@ provider = "{% $connection.provider %}" aws_access_key_id = {% $connection.aws_access_key_id | strings.TrimSpace | data.ToJSON %} aws_secret_access_key = {% $connection.aws_secret_access_key | strings.TrimSpace | data.ToJSON %} {%- else if eq $connection.provider "AzureRM" %} -{%- $connection = coll.Merge $connection (coll.Dict "azure_storage_account_name" "" "azure_storage_account_name" "" ) %} +{%- $connection = coll.Merge $connection (coll.Dict "azure_storage_account_name" "" "azure_storage_access_key" "") %} # Azure Blob storage configuration. [object_storage.azurerm] azure_storage_account_name = {% $connection.azure_storage_account_name | strings.TrimSpace | data.ToJSON %} diff --git a/chart/charts/gitlab/charts/webservice/templates/_ingress.tpl b/chart/charts/gitlab/charts/webservice/templates/_ingress.tpl index 283b90a95c4beaf44b094ae79cd6d9758dd531e2..e95212fa933648a2fc4e4db6223a4d282b204c01 100644 --- a/chart/charts/gitlab/charts/webservice/templates/_ingress.tpl +++ b/chart/charts/gitlab/charts/webservice/templates/_ingress.tpl @@ -28,23 +28,8 @@ metadata: {{- include "ingress.class.annotation" .ingressCfg | nindent 4 }} {{- end }} kubernetes.io/ingress.provider: "{{ template "gitlab.ingress.provider" .ingressCfg }}" - {{- if eq "nginx" (default $global.ingress.provider .ingressCfg.local.provider) }} - {{- if $global.workhorse.tls.enabled }} - nginx.ingress.kubernetes.io/backend-protocol: https - {{- if pluck "verify" .deployment.workhorse.tls (dict "verify" true) | first }} - nginx.ingress.kubernetes.io/proxy-ssl-verify: 'on' - nginx.ingress.kubernetes.io/proxy-ssl-name: {{ include "webservice.fullname.withSuffix" .deployment }}.{{ .root.Release.Namespace }}.svc - {{- if .deployment.workhorse.tls.caSecretName }} - nginx.ingress.kubernetes.io/proxy-ssl-secret: {{ .root.Release.Namespace }}/{{ .deployment.workhorse.tls.caSecretName }} - {{- end }} - {{- end }} - {{- end }} - nginx.ingress.kubernetes.io/proxy-body-size: {{ .ingressCfg.local.proxyBodySize | quote }} - nginx.ingress.kubernetes.io/proxy-read-timeout: {{ .ingressCfg.local.proxyReadTimeout | quote }} - nginx.ingress.kubernetes.io/proxy-connect-timeout: {{ .ingressCfg.local.proxyConnectTimeout | quote }} - {{- end }} {{- include "gitlab.certmanager_annotations" .root | nindent 4 }} - {{- range $key, $value := merge .ingressCfg.local.annotations $global.ingress.annotations }} + {{- range $key, $value := merge .ingressCfg.local.annotations $global.ingress.annotations (include "webservice.ingress.nginx.annotations" . | fromYaml)}} {{ $key }}: {{ $value | quote }} {{- end }} spec: @@ -80,4 +65,3 @@ spec: {{- end }} {{- end }} {{- end -}} - diff --git a/chart/charts/gitlab/charts/webservice/templates/_nginx.tpl b/chart/charts/gitlab/charts/webservice/templates/_nginx.tpl new file mode 100644 index 0000000000000000000000000000000000000000..d7d974044427dcf539973ed65fd9efc6381fe8ef --- /dev/null +++ b/chart/charts/gitlab/charts/webservice/templates/_nginx.tpl @@ -0,0 +1,43 @@ +{{/* +Detect if `tls.verify` is set +Returns `.tls.verify` if it is a boolean, +Return false in any other case. +*/}} +{{- define "webservice.ingress.nginx.tls.verify" -}} +{{- $deploymentSet := and (hasKey . "tls") (and (hasKey .tls "verify") (kindIs "bool" .tls.verify)) }} +{{- if $deploymentSet }} +{{- .tls.verify }} +{{- else }} +{{- false }} +{{- end -}} +{{- end -}} + +{{/* +Generate the nginx annotations for the webservice ingress to be used in the merge of annotations in the ingress template. +Returns a YAML string with the annotations. +*/}} +{{- define "webservice.ingress.nginx.annotations" -}} +{{- $ingressCfg := .ingressCfg -}} +{{- $global := .root.Values.global }} +{{- $ingress := merge (index .root "ingress" | default dict) (default dict $ingressCfg) -}} +{{- if eq "nginx" (default $global.ingress.provider $ingressCfg.local.provider) }} +{{- if eq (default "nginx" $ingress.provider) "nginx" -}} +{{ $annotations := dict -}} +{{- if $global.workhorse.tls.enabled }} +{{- $_ := set $annotations "nginx.ingress.kubernetes.io/backend-protocol" "https" }} +{{- if eq (include "webservice.ingress.nginx.tls.verify" .deployment.workhorse) "true" -}} +{{- $_ := set $annotations "nginx.ingress.kubernetes.io/proxy-ssl-verify" "on" }} +{{- $_ := set $annotations "nginx.ingress.kubernetes.io/proxy-ssl-name" (printf "%s.%s.svc" (include "webservice.fullname.withSuffix" .deployment) .root.Release.Namespace) }} +{{- if .deployment.workhorse.tls.caSecretName }} +{{- $_ := set $annotations "nginx.ingress.kubernetes.io/proxy-ssl-secret" (printf "%s/%s" .root.Release.Namespace .deployment.workhorse.tls.caSecretName) }} +{{- end }} +{{- end }} +{{- end }} +{{- $_ := set $annotations "nginx.ingress.kubernetes.io/proxy-body-size" $ingress.local.proxyBodySize -}} +{{- $_ := set $annotations "nginx.ingress.kubernetes.io/proxy-read-timeout" $ingress.local.proxyReadTimeout -}} +{{- $_ := set $annotations "nginx.ingress.kubernetes.io/proxy-connect-timeout" $ingress.local.proxyConnectTimeout -}} +{{- $_ := set $annotations "nginx.ingress.kubernetes.io/service-upstream" $ingress.local.serviceUpstream -}} +{{- $annotations | toYaml -}} +{{- end }} +{{- end }} +{{- end }} diff --git a/chart/charts/gitlab/charts/webservice/templates/deployment.yaml b/chart/charts/gitlab/charts/webservice/templates/deployment.yaml index f5e487d4e2fdfc9f1d490e66b3b54e3ba180be3c..392cd389d778e7b7e4558eb62bc8a0a94673032f 100644 --- a/chart/charts/gitlab/charts/webservice/templates/deployment.yaml +++ b/chart/charts/gitlab/charts/webservice/templates/deployment.yaml @@ -19,8 +19,8 @@ metadata: labels: {{- include "gitlab.standardLabels" $ | nindent 4 }} {{- include "webservice.labels" . | nindent 4 }} - {{- include "webservice.commonLabels" . | nindent 4 }} {{- include "gitlab.app.kubernetes.io.labels" $ | nindent 4 }} + {{- include "webservice.commonLabels" . | nindent 4 }} {{- if .deployment.labels -}} {{- toYaml .deployment.labels | nindent 4 }} {{- end }} @@ -44,8 +44,8 @@ spec: metadata: labels: {{- include "gitlab.standardLabels" $ | nindent 8 }} - {{- include "webservice.labels" . | nindent 8 }} {{- include "gitlab.app.kubernetes.io.labels" $ | nindent 8 }} + {{- include "webservice.labels" . | nindent 8 }} {{- include "gitlab.podLabels" $ | nindent 8 }} {{- include "webservice.commonLabels" . | nindent 8 }} {{- include "webservice.podLabels" . | nindent 8 }} @@ -72,6 +72,9 @@ spec: {{- end }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- if .tolerations }} tolerations: {{- toYaml .tolerations | nindent 8 }} diff --git a/chart/charts/gitlab/charts/webservice/values.yaml b/chart/charts/gitlab/charts/webservice/values.yaml index 8fc389143ca5f033683374ea12b048ac0912f0af..ce09441bb08ffec3d94aef0b775b658b06e47049 100644 --- a/chart/charts/gitlab/charts/webservice/values.yaml +++ b/chart/charts/gitlab/charts/webservice/values.yaml @@ -115,8 +115,8 @@ ingress: # secretName: # smartcardSecretName: # enabled: true - annotations: - nginx.ingress.kubernetes.io/service-upstream: "true" + serviceUpstream: true + annotations: {} configureCertmanager: # Use this in combination of .deployments below requireBasePath: true @@ -133,8 +133,8 @@ extraIngress: # secretName: # smartcardSecretName: # enabled: true - annotations: - nginx.ingress.kubernetes.io/service-upstream: "true" + serviceUpstream: true + annotations: {} configureCertmanager: # Use this in combination of .deployments below requireBasePath: true @@ -508,3 +508,13 @@ deployments: {} affinity: podAntiAffinity: topologyKey: + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule diff --git a/chart/charts/gitlab/templates/_redis.tpl b/chart/charts/gitlab/templates/_redis.tpl index a026d49847876039adfdda52359e7b3a979cb6fc..7e534faad1e64cc4ab25ddc9701c74d02975d7c2 100644 --- a/chart/charts/gitlab/templates/_redis.tpl +++ b/chart/charts/gitlab/templates/_redis.tpl @@ -162,8 +162,10 @@ Return Sentinel list in format for Workhorse {{- define "gitlab.redis.workhorse.sentinel-list" }} {{- include "gitlab.redis.selectedMergedConfig" . -}} {{- $sentinelList := list }} +{{- $scheme := default "redis" .redisMergedConfig.scheme }} {{- range $i, $entry := .redisMergedConfig.sentinels }} - {{- $sentinelList = append $sentinelList (quote (print "tcp://" (trim $entry.host) ":" ( default 26379 $entry.port | int ) ) ) }} + {{- $sentinel := printf "%s://%s:%d" $scheme (trim $entry.host) ($entry.port | default 26379 | int) }} + {{- $sentinelList = append $sentinelList ($sentinel | quote) }} {{- end }} {{- $sentinelList | join "," }} {{- end -}} diff --git a/chart/charts/gitlab/values.yaml b/chart/charts/gitlab/values.yaml index 50836c1255ea1dde1dc2816b75ccf0358b7a1a35..1a00eeb4dd8a675e25431f082bef6d2fb819e984 100644 --- a/chart/charts/gitlab/values.yaml +++ b/chart/charts/gitlab/values.yaml @@ -40,6 +40,7 @@ global: - main - ci - embedding + - sec clickhouse: enabled: false # main: diff --git a/chart/charts/minio/templates/minio_deployment.yaml b/chart/charts/minio/templates/minio_deployment.yaml index a4610f4bb9c9933e535c7d55996de64338f3c8d7..f1dae2b632cd33a07aaffa65146d83b9c569ecf0 100755 --- a/chart/charts/minio/templates/minio_deployment.yaml +++ b/chart/charts/minio/templates/minio_deployment.yaml @@ -37,6 +37,9 @@ spec: {{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- include "gitlab.automountServiceAccountToken" . | nindent 6 }} {{- include "gitlab.nodeSelector" . | nindent 6 }} {{- if .Values.tolerations }} diff --git a/chart/charts/minio/values.yaml b/chart/charts/minio/values.yaml index 7dd3eff13e1bb7f582646000d29653fe3a84ca2b..355d7259f201d821b46af0f5faffde7c7fcbda08 100755 --- a/chart/charts/minio/values.yaml +++ b/chart/charts/minio/values.yaml @@ -279,3 +279,13 @@ deployment: serviceAccount: {} ## Used for local override of global ServiceAccount token mounting # automountServiceAccountToken: false + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule \ No newline at end of file diff --git a/chart/charts/registry/templates/deployment.yaml b/chart/charts/registry/templates/deployment.yaml index 3d6454a581a2932fb3e18713e9b3cb20e86d3808..486c6a562298e1930cef5f987e1feb5b30a10ae8 100644 --- a/chart/charts/registry/templates/deployment.yaml +++ b/chart/charts/registry/templates/deployment.yaml @@ -48,6 +48,9 @@ spec: {{ $key }}: {{ $value | quote }} {{- end }} spec: + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- toYaml $.Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- include "gitlab.nodeSelector" . | nindent 6 }} {{- if .Values.tolerations }} tolerations: diff --git a/chart/charts/registry/values.yaml b/chart/charts/registry/values.yaml index 1838859af77b890b5b5ac23bceaf1fd6f84a8ca6..f2899d4b9aff5caa52908d72a2e34101643af505 100644 --- a/chart/charts/registry/values.yaml +++ b/chart/charts/registry/values.yaml @@ -455,3 +455,13 @@ tls: # - TLS_ECDHE_RSA_AES_128_GCM_SHA256 # - TLS_ECDHE_RSA_AES_256_GCM_SHA384 # - TLS_ECDHE_RSA_CHACHA20_POLY1305_SHA256 + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app: name + # maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule diff --git a/chart/danger/chart-version/Dangerfile b/chart/danger/chart-version/Dangerfile index 70bd1f9efa43293dea934656011ca051f5234822..994d50dbc6d73c31a743885f8d05ad0fd5a0ad23 100644 --- a/chart/danger/chart-version/Dangerfile +++ b/chart/danger/chart-version/Dangerfile @@ -1,6 +1,9 @@ # frozen_string_literal: true -if helper.all_changed_files.detect(-> { false }) { |filename| filename == 'charts/certmanager-issuer/templates/issuer-job.yaml' } +issuer_job_changed = helper.all_changed_files.any?('charts/certmanager-issuer/templates/issuer-job.yaml') +chart_yaml_changed = helper.all_changed_files.any?('charts/certmanager-issuer/Chart.yaml') + +if issuer_job_changed && !chart_yaml_changed failure <<~MSG Jobs templates are immutable. Changing it breaks upgrades if there's an existing job with the same name. Please bump the certmanager-issuer chart version, so that it gets a different generated name diff --git a/chart/doc/.vale/gitlab_docs/Badges-Offerings.yml b/chart/doc/.vale/gitlab_docs/Badges-Offerings.yml index e32fbc445b048cfb6225c9a1505e3cf6233eacd1..8187fe15ff681db4ca5a34fbe24c84bd3b9b47e5 100644 --- a/chart/doc/.vale/gitlab_docs/Badges-Offerings.yml +++ b/chart/doc/.vale/gitlab_docs/Badges-Offerings.yml @@ -4,10 +4,12 @@ # # For a list of all options, see https://docs.gitlab.com/ee/development/documentation/styleguide/#available-product-tier-badges extends: existence -message: "Offerings should be comma-separated, without `and`, and must be capitalized. Example: `GitLab.com, Self-managed, GitLab Dedicated`." +message: "Offerings should be comma-separated, without `and`, and must be capitalized. Example: `GitLab.com, GitLab Self-Managed, GitLab Dedicated`." link: https://docs.gitlab.com/ee/development/documentation/styleguide/#available-product-tier-badges +vocab: false level: error nonword: true scope: raw tokens: - - ^\*\*Offering:\*\* (Dedicated|[^\n]*(SaaS|self-managed|Self-Managed|GitLab dedicated|and|GitLab Dedicated,|, GitLab\.com|, Dedicated)) + - ^\*\*Offering:\*\*[^\n]*(SaaS|[Ss]elf-managed|dedicated|and|Dedicated,|, GitLab\.com) + - ^\*\*Offering:\*\*[^\n]*(?<!GitLab )(Self-Managed|Dedicated) diff --git a/chart/doc/.vale/gitlab_docs/FrontMatter.yml b/chart/doc/.vale/gitlab_docs/FrontMatter.yml new file mode 100644 index 0000000000000000000000000000000000000000..6e055f42f95efabe0254544b3e648bbd20238ae3 --- /dev/null +++ b/chart/doc/.vale/gitlab_docs/FrontMatter.yml @@ -0,0 +1,52 @@ +extends: script +message: "Front matter must have valid 'title' and be closed." +link: https://docs.gitlab.com/ee/development/documentation/metadata/ +level: error +scope: raw +script: | + text := import("text") + matches := [] + + // Initialize variables + frontmatterDelimiterCount := 0 + frontmatter := "" + hasError := false + + // Check if frontmatter exists + if !text.re_match("^---\n", scope) { + hasError = true + } + + if !hasError { + for line in text.split(scope, "\n") { + if frontmatterDelimiterCount == 1 { + frontmatter += line + "\n" + } + if frontmatterDelimiterCount == 2 { + break + } + if text.re_match("^---", line) { + frontmatterDelimiterCount++ + start := text.index(scope, line) + matches = append(matches, {begin: start, end: start + len(line)}) + } + } + + // Check for unclosed frontmatter + if frontmatterDelimiterCount != 2 { + hasError = true + } + + // First check if we have a title key at all + hasTitleKey := text.re_match("(?m)^[tT]itle:", frontmatter) + // Then check if it has content (anything but whitespace) after the colon + hasValidTitle := text.re_match("(?m)^[tT]itle:[^\\n]*[^\\s][^\\n]*$", frontmatter) + + if !hasError && (!hasTitleKey || !hasValidTitle) { + hasError = true + } + } + + if !hasError { + matches = [] + } diff --git a/chart/doc/_index.md b/chart/doc/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..614c03d97e12a1cc469b27421d40981c90f23a57 --- /dev/null +++ b/chart/doc/_index.md @@ -0,0 +1,52 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: GitLab Helm chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +To install a cloud-native version of GitLab, use the GitLab Helm chart. +This chart contains all the required components to get started and can scale to large deployments. + +For OpenShift-based installations, use [GitLab Operator](https://docs.gitlab.com/operator/), +otherwise you must update the [security context constraints](https://docs.gitlab.com/operator/security_context_constraints.html) +yourself. + +{{< alert type="warning" >}} + +The default Helm chart configuration is **not intended for production**. +The default values create an implementation where _all_ GitLab services are +deployed in the cluster, which is **not suitable for production workloads**. +For production deployments, you **must** follow the [Cloud Native Hybrid reference architectures](installation/_index.md#use-the-reference-architectures). + +{{< /alert >}} + +For a production deployment, you should have strong working knowledge of Kubernetes. +This method of deployment has different management, observability, and concepts than traditional deployments. + +The GitLab Helm chart is made up of multiple [subcharts](charts/gitlab/_index.md), +each of which can be installed separately. + +## Learn more + +- [Test the GitLab chart on GKE or EKS](quickstart/_index.md) +- [Migrate from using the Linux package to the GitLab chart](installation/migration/_index.md) +- [Prepare to deploy](installation/_index.md) +- [Deploy](installation/deployment.md) +- [View deployment options](installation/command-line-options.md) +- [Configure globals](charts/globals.md) +- [View the subcharts](charts/gitlab/_index.md) +- [View advanced configuration options](advanced/_index.md) +- [View architectural decisions](architecture/_index.md) +- Contribute to development by viewing the [developer documentation](development/_index.md) and + [contribution guidelines](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/CONTRIBUTING.md) +- Create an [issue](https://gitlab.com/gitlab-org/charts/gitlab/-/issues) +- Create a [merge request](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests) +- View [troubleshooting](troubleshooting/_index.md) information diff --git a/chart/doc/advanced/_index.md b/chart/doc/advanced/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..871762ab1cd2e98f9dd5469b6aaab28eaa9f1fc2 --- /dev/null +++ b/chart/doc/advanced/_index.md @@ -0,0 +1,20 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Advanced configuration +--- + +- Bringing your own custom [Docker images](custom-images/_index.md) +- Using an [external database](external-db/_index.md) +- Using an [external Gitaly](external-gitaly/_index.md) +- Using an [external GitLab Pages instance](external-gitlab-pages/_index.md) +- Using an [external Mattermost](external-mattermost/_index.md) +- Using your own [NGINX Ingress Controller](external-nginx/_index.md) +- Using an [external object storage](external-object-storage/_index.md) +- Using an [external Redis](external-redis/_index.md) +- Using [FIPS-compliant images](fips/_index.md) +- Making use of [GitLab Geo functionality](geo/_index.md) +- Enabling [internal TLS between services](internal-tls/_index.md) +- After install, [managing Persistent Volumes](persistent-volumes/_index.md) +- Using [Red Hat UBI-based images](ubi/_index.md) diff --git a/chart/doc/advanced/custom-images/_index.md b/chart/doc/advanced/custom-images/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..b93189a16d346b2d7b44087cfb3df8a7368143c0 --- /dev/null +++ b/chart/doc/advanced/custom-images/_index.md @@ -0,0 +1,47 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Use custom Docker images for the GitLab chart +--- + +In certain scenarios (i.e. offline environments), you may want to bring your own images rather than pulling them down from the Internet. This requires specifying your own Docker image registry/repository for each of the charts that make up the GitLab release. + +## Default image format + +Our default format for the image in most cases includes the full path to the image, excluding the tag: + +```yaml +image: + repository: repo.example.com/image + tag: custom-tag +``` + +The end result will be `repo.example.com/image:custom-tag`. + +## Current images and tags + +When planning an upgrade, your current `values.yaml` and the target version of the +GitLab chart can be used to generate a [Helm template](https://helm.sh/docs/helm/helm_template/). +This template will contain the images and their respective tags that will be +needed by the specified version of the chart. + +```shell +# Gather the latest values +helm get values gitlab > gitlab.yaml + +# Use the gitlab.yaml to find the images and tags +helm template versionfinder gitlab/gitlab -f gitlab.yaml --version 7.3.0 | grep 'image:' | tr -d '[[:blank:]]' | sort --unique +``` + +This command can also be used to verify any custom configurations. + +## Example values file + +There is an [example values file](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/custom-images/values.yaml) that demonstrates how to configure a custom Docker registry/repository and tag. You can copy relevant sections of this file for your own releases. + +{{< alert type="note" >}} + +Some of the charts (especially third party charts) sometimes have slightly different conventions for specifying the image registry/repository and tag. You can find documentation for third party charts on the [Artifact Hub](https://artifacthub.io/). + +{{< /alert >}} diff --git a/chart/doc/advanced/external-db/_index.md b/chart/doc/advanced/external-db/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..59dda3f079f3c82bef857caba41e2f0f6d4f2069 --- /dev/null +++ b/chart/doc/advanced/external-db/_index.md @@ -0,0 +1,66 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with an external database +--- + +For a production-ready GitLab chart deployment, use an external database. + +Prerequisites: + +- A deployment of PostgreSQL 14 or later. If you do not have one, consider + a cloud provided solution like [AWS RDS PostgreSQL](https://aws.amazon.com/rds/postgresql/) + or [GCP Cloud SQL](https://cloud.google.com/sql/). For an alternative solution, + consider [the Linux package](external-omnibus-psql.md). +- An empty database named `gitlabhq_production` by default. +- A user with full database access. See the + [external database documentation](https://docs.gitlab.com/administration/postgresql/external/) for details. +- A [Kubernetes Secret](https://kubernetes.io/docs/concepts/configuration/secret/) with the password for the database user. +- The [`pg_trgm` and `btree_gist` extensions](https://docs.gitlab.com/install/postgresql_extensions/). If you don't provide an account with + the Superuser flag to GitLab, ensure these extensions are loaded prior to + proceeding with the database installation. + +Networking prerequisites: + +- Ensure that the database is reachable from the cluster. Be sure that your firewall policies allow traffic. +- If you plan to use PostgreSQL as a load balancing cluster and Kubernetes + DNS for service discovery, when you install the `bitnami/postgresql` chart, + use `--set slave.service.clusterIP=None`. + This setting configures the PostgreSQL secondary service as a headless service to + allow DNS `A` records to be created for each secondary instance. + + For an example of how to use Kubernetes DNS for service discovery, + see [`examples/database/values-loadbalancing-discover.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/database/values-loadbalancing-discover.yaml). + +To configure the GitLab chart to use an external database: + +1. Set the following parameters: + + - `postgresql.install`: Set to `false` to disable the embedded database. + - `global.psql.host`: Set to the hostname of the external database, can be a domain or an IP address. + - `global.psql.password.secret`: The name of the [secret that contains the database password for the `gitlab` user](../../installation/secrets.md#postgresql-password). + - `global.psql.password.key`: Within the secret, the key that contains the password. + +1. Optional. The following items can be further customized if you are not using the defaults: + + - `global.psql.port`: The port the database is available on. Defaults to `5432`. + - `global.psql.database`: The name of the database. + - `global.psql.username`: The user with access to the database. + +1. Optional. If you use a mutual TLS connection to the database, set the following: + + - `global.psql.ssl.secret`: A secret that contains the client certificate, key, and certificate authority. + - `global.psql.ssl.serverCA`: In the secret, the key that refers to the certificate authority (CA). + - `global.psql.ssl.clientCertificate`: In the secret, the key that refers to the client certificate. + - `global.psql.ssl.clientKey`: In the secret, the client. + +1. When you deploy the GitLab chart, add the values by using the `--set` flag. For example: + + ```shell + helm install gitlab gitlab/gitlab + --set postgresql.install=false + --set global.psql.host=psql.example + --set global.psql.password.secret=gitlab-postgresql-password + --set global.psql.password.key=postgres-password + ``` diff --git a/chart/doc/advanced/external-db/external-omnibus-psql.md b/chart/doc/advanced/external-db/external-omnibus-psql.md index 27a74e6d7f93c2f3c6e1b4bb541c917055eeeccc..1e7a911e8f0072e4adf683414c1f63ac4a80a8bd 100644 --- a/chart/doc/advanced/external-db/external-omnibus-psql.md +++ b/chart/doc/advanced/external-db/external-omnibus-psql.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Set up standalone PostgreSQL database --- -# Set up standalone PostgreSQL database - We'll make use of the [Linux package](https://about.gitlab.com/install/#ubuntu) for Ubuntu. This package provides versions of the services that are guaranteed to be compatible with the charts' services. ## Create VM with the Linux package @@ -23,7 +22,7 @@ Follow the installation instructions for the [Linux package](https://about.gitla Create a minimal `gitlab.rb` file to be placed at `/etc/gitlab/gitlab.rb`. Be very explicit about what is enabled on this node, use the contents below. -_Note_: This example is not intended to provide [PostgreSQL for scaling](https://docs.gitlab.com/ee/administration/postgresql/index.html). +_Note_: This example is not intended to provide [PostgreSQL for scaling](https://docs.gitlab.com/administration/postgresql/). _**NOTE**: The values below should be replaced_ diff --git a/chart/doc/advanced/external-gitaly/_index.md b/chart/doc/advanced/external-gitaly/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..964bfdf4862d09febd1ba0339f7ff2af3f3e2e65 --- /dev/null +++ b/chart/doc/advanced/external-gitaly/_index.md @@ -0,0 +1,787 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with an external Gitaly +--- + +This document intends to provide documentation on how to configure this Helm chart with an external Gitaly service. + +If you don't have Gitaly configured, for on-premise or deployment to VM, +consider using our [Linux package](external-omnibus-gitaly.md). + +{{< alert type="note" >}} + +External Gitaly _services_ can be provided by Gitaly nodes, or +[Praefect](https://docs.gitlab.com/administration/gitaly/praefect/) clusters. + +{{< /alert >}} + +## Configure the chart + +Disable the `gitaly` chart and the Gitaly service it provides, and point the other services to the external service. + +You need to set the following properties: + +- `global.gitaly.enabled`: Set to `false` to disable the included Gitaly chart. +- `global.gitaly.external`: This is an array of [external Gitaly service(s)](../../charts/globals.md#external). +- `global.gitaly.authToken.secret`: The name of the [secret which contains the token for authentication](../../installation/secrets.md#gitaly-secret). +- `global.gitaly.authToken.key`: The key within the secret, which contains the token content. + +The external Gitaly services will make use of their own instances of GitLab Shell. +Depending your implementation, you can configure those with the secrets from this +chart, or you can configure this chart's secrets with the content from a predefined +source. + +You **may** need to set the following properties: + +- `global.shell.authToken.secret`: The name of the [secret which contains secret for GitLab Shell](../../installation/secrets.md#gitlab-shell-secret). +- `global.shell.authToken.key`: The key within the secret, which contains the secret content. + +A complete example configuration, with two external services (`external-gitaly.yml`): + +```yaml +global: + gitaly: + enabled: false + external: + - name: default # required + hostname: node1.git.example.com # required + port: 8075 # optional, default shown + - name: praefect # required + hostname: ha.git.example.com # required + port: 2305 # Praefect uses port 2305 + tlsEnabled: false # optional, overrides gitaly.tls.enabled + authToken: + secret: external-gitaly-token # required + key: token # optional, default shown + tls: + enabled: false # optional, default shown +``` + +Example installation using the above configuration file in conjunction other +configuration via `gitlab.yml`: + +```shell +helm upgrade --install gitlab gitlab/gitlab \ + -f gitlab.yml \ + -f external-gitaly.yml +``` + +## Multiple external Gitaly + +If your implementation uses multiple Gitaly nodes external to these charts, +you can define multiple hosts as well. The syntax is slightly different, as +to allow the complexity required. + +An [example values file](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/examples/gitaly/values-multiple-external.yaml) is provided, which shows the +appropriate set of configuration. The content of this values file is not +interpreted correctly via `--set` arguments, so should be passed to Helm +with the `-f / --values` flag. + +### Connecting to external Gitaly over TLS + +If your external [Gitaly server listens over TLS port](https://docs.gitlab.com/administration/gitaly/#enable-tls-support), +you can make your GitLab instance communicate with it over TLS. To do this, you +have to + +1. Create a Kubernetes secret containing the certificate of the Gitaly + server + + ```shell + kubectl create secret generic gitlab-gitaly-tls-certificate --from-file=gitaly-tls.crt=<path to certificate> + ``` + +1. Add the certificate of external Gitaly server to the list of + [custom Certificate Authorities](../../charts/globals.md#custom-certificate-authorities) + In the values file, specify the following + + ```yaml + global: + certificates: + customCAs: + - secret: gitlab-gitaly-tls-certificate + ``` + + or pass it to the `helm upgrade` command using `--set` + + ```shell + --set global.certificates.customCAs[0].secret=gitlab-gitaly-tls-certificate + ``` + +1. To enable TLS for all Gitaly instances, set `global.gitaly.tls.enabled: true`. + + ```yaml + global: + gitaly: + tls: + enabled: true + ``` + + To enable for instances individually, set `tlsEnabled: true` for that entry. + + ```yaml + global: + gitaly: + external: + - name: default + hostname: node1.git.example.com + tlsEnabled: true + ``` + +{{< alert type="note" >}} + +You can choose any valid secret name and key for this, but make +sure the key is unique across all the secrets specified in `customCAs` to avoid +collision since all keys within the secrets will be mounted. You **do not** +need to provide the key for the certificate, as this is the _client side_. + +{{< /alert >}} + +## Test that GitLab can connect to Gitaly + +To check that GitLab can connect to the external Gitaly server: + +```shell +kubectl exec -it <toolbox-pod> -- gitlab-rake gitlab:gitaly:check +``` + +If you are using Gitaly with TLS, you can also check if GitLab Chart trusts the Gitaly certificate: + +```shell +kubectl exec -it <toolbox-pod> -- echo | /usr/bin/openssl s_client -connect <gitaly-host>:<gitaly-port> +``` + +## Migrate from Gitaly chart to external Gitaly + +If you are using the Gitaly Chart to provide the Gitaly service and you need to migrate all of your +repositories to an external Gitaly service, this can be done with one of the following methods: + +- [Migrate with the repository storage moves API (recommended)](#migrate-with-the-repository-storage-moves-api). +- [Migrate with the backup/restore method](#migrate-with-the-backuprestore-method). + +### Migrate with the repository storage moves API + +This method: + +- Uses the [repository storage moves API](https://docs.gitlab.com/api/project_repository_storage_moves/) + to migrate repositories from the Gitaly chart to the external Gitaly service. +- Can be performed with zero downtime. +- Requires that the external Gitaly service resides within the same VPC/zone as the Gitaly pods. +- Has not been tested with the [Praefect chart](../../charts/gitlab/praefect/_index.md) and is not supported. + +#### Step 1: Set up external Gitaly Service or Gitaly Cluster + +Set up an [external Gitaly](https://docs.gitlab.com/administration/gitaly/configure_gitaly/) +or [external Gitaly Cluster](https://docs.gitlab.com/administration/gitaly/praefect/). You must +provide the Gitaly token and GitLab Shell secret from your Chart installation as part of those steps: + +```shell +# Get the GitLab Shell secret +kubectl get secret <release>-gitlab-shell-secret -ojsonpath='{.data.secret}' | base64 -d + +# Get the Gitaly token +kubectl get secret <release>-gitaly-secret -ojsonpath='{.data.token}' | base64 -d +``` + +{{< tabs >}} + +{{< tab title="Gitaly" >}} + +- The Gitaly token extracted here should be used for the `AUTH_TOKEN` value. +- The GitLab Shell secret extracted here should be used for the `shellsecret` value. + +{{< /tab >}} + +{{< tab title="Gitaly Cluster" >}} + +- The Gitaly token extracted here should be used for the `PRAEFECT_EXTERNAL_TOKEN`. +- The GitLab Shell secret extracted here should be used for the `GITLAB_SHELL_SECRET_TOKEN`. + +{{< /tab >}} + +{{< /tabs >}} + +Lastly, ensure that the firewall for the external Gitaly service allows traffic on the configured +Gitaly port for your Kubernetes pod IP range. + +#### Step 2: Configure Instance to use new Gitaly Service + +1. Configure GitLab to use the external Gitaly. + If there are any Gitaly references in your main `gitlab.yml` configuration file, remove those + and create a new `mixed-gitaly.yml` file with the following content. + + If you have previously defined additional Gitaly storages, you need to ensure a matching Gitaly + storage with the same name is specified in the new configuration, otherwise the restore operation + fails. + + Refer to the + [connecting to external Gitaly over TLS](#connecting-to-external-gitaly-over-tls) section if you + are configuring TLS: + + {{< tabs >}} + + {{< tab title="Gitaly" >}} + + ```yaml + global: + gitaly: + internal: + names: + - default + external: + - name: ext-gitaly # required + hostname: node1.git.example.com # required + port: 8075 # optional, default shown + tlsEnabled: false # optional, overrides gitaly.tls.enabled + ``` + + {{< /tab >}} + + {{< tab title="Gitaly Cluster" >}} + + ```yaml + global: + gitaly: + internal: + names: + - default + external: + - name: ext-gitaly-cluster # required + hostname: ha.git.example.com # required + port: 2305 # Praefect uses port 2305 + tlsEnabled: false # optional, overrides gitaly.tls.enabled + ``` + + {{< /tab >}} + + {{< /tabs >}} + +1. Apply the new configuration using the `gitlab.yml` and `mixed-gitaly.yml` files: + + ```shell + helm upgrade --install gitlab gitlab/gitlab \ + -f gitlab.yml \ + -f mixed-gitaly.yml + ``` + +1. On the Toolbox pod, confirm that GitLab can connect to the external Gitaly successfully: + + ```shell + kubectl exec <toolbox pod name> -it -- gitlab-rake gitlab:gitaly:check + ``` + +1. Ensure that the external Gitaly can connect back to your Chart install: + + {{< tabs >}} + + {{< tab title="Gitaly" >}} + + Ensure that the Gitaly service can perform callbacks to the GitLab API successfully: + + ```shell + sudo /opt/gitlab/embedded/bin/gitaly check /var/opt/gitlab/gitaly/config.toml + ``` + + {{< /tab >}} + + {{< tab title="Gitaly Cluster" >}} + + On all Praefect nodes, ensure that the Praefect service can connect to the Gitaly nodes: + + ```shell + # Run on Praefect nodes + sudo /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml dial-nodes + ``` + + On all Gitaly nodes, ensure that the Gitaly service can perform callbacks to the GitLab API + successfully: + + ```shell + # Run on Gitaly nodes + sudo /opt/gitlab/embedded/bin/gitaly check /var/opt/gitlab/gitaly/config.toml + ``` + + {{< /tab >}} + + {{< /tabs >}} + +#### Step 3: Get the Gitaly pod IP and hostnames + +For the repository storage moves API to succeed, the external Gitaly service needs to be able to connect back to +the Gitaly pods using the pod service hostname. In order for the pod service hostnames to be resolvable, we +need to add the hostnames to the hosts file on each external Gitaly service running the Gitaly process. + +1. Fetch a list of Gitaly pods and their respective internal IP addresses/hostnames: + + ```shell + kubectl get pods -l app=gitaly -o jsonpath='{range .items[*]}{.status.podIP}{"\t"}{.spec.hostname}{"."}{.spec.subdomain}{"."}{.metadata.namespace}{".svc\n"}{end}' + ``` + +1. Add the output from the last step to the `/etc/hosts` file on each external Gitaly service running the Gitaly process. +1. Confirm that the Gitaly pod hostnames can be pinged from each external Gitaly service running the Gitaly process: + + ```shell + ping <gitaly pod hostname> + ``` + +After connectivity is confirmed, we can proceed to scheduling the repository storage move. + +#### Step 4: Schedule the repository storage move + +Schedule the move by following the steps indicated in [moving repositories](https://docs.gitlab.com/administration/operations/moving_repositories/#moving-repositories). + +#### Step 5: Final configuration and validation + +1. If you have multiple Gitaly storages, [configure where new repositories are stored](https://docs.gitlab.com/administration/repository_storage_paths/#configure-where-new-repositories-are-stored). + +1. Consider generating a consolidated `gitlab.yml` for the future that includes the external Gitaly configuration: + + ```shell + helm get values <RELEASE_NAME> -o yaml > gitlab.yml + ``` + +1. Disable the internal Gitaly subchart in the `gitlab.yml` file, and point the new `default` repository storage to the external Gitaly service. [GitLab requires a default repository storage](https://docs.gitlab.com/administration/gitaly/configure_gitaly/#gitlab-requires-a-default-repository-storage): + + {{< tabs >}} + + {{< tab title="Gitaly" >}} + + ```yaml + global: + gitaly: + enabled: false # Disable the internal Gitaly subchart + external: + - name: ext-gitaly # required + hostname: node1.git.example.com # required + port: 8075 # optional, default shown + tlsEnabled: false # optional, overrides gitaly.tls.enabled + - name: default # Add the default repository storage, use the same settings as ext-gitaly + hostname: node1.git.example.com + port: 8075 + tlsEnabled: false + ``` + + {{< /tab >}} + + {{< tab title="Gitaly Cluster" >}} + + ```yaml + global: + gitaly: + enabled: false # Disable the internal Gitaly subchart + external: + - name: ext-gitaly-cluster # required + hostname: ha.git.example.com # required + port: 2305 # Praefect uses port 2305 + tlsEnabled: false # optional, overrides gitaly.tls.enabled + - name: default # Add the default repository storage, use the same settings as ext-gitaly-cluster + hostname: ha.git.example.com + port: 2305 + tlsEnabled: false + ``` + + {{< /tab >}} + + {{< /tabs >}} + +1. Apply the new configuration: + + ```shell + helm upgrade --install gitlab gitlab/gitlab \ + -f gitlab.yml + ``` + +1. Optional. Remove the changes made to each external Gitaly `/etc/hosts` file after following the [get the Gitaly pod IP and hostnames](#step-3-get-the-gitaly-pod-ip-and-hostnames) step. + +1. After you have confirmed everything is working as expected, you can delete the Gitaly PVC: + + WARNING: Do not delete the Gitaly PVC until you have double checked that everything is working as expected. + + ```shell + kubectl delete pvc repo-data-<release>-gitaly-0 + ``` + +### Migrate with the backup/restore method + +This method: + +- Backs up your repositories from the Gitaly chart PersistentVolumeClaim (PVC) and then restore them to the +external Gitaly service. +- Does incur downtime to all users. +- Has not been tested with the [Praefect chart](../../charts/gitlab/praefect/_index.md) and is not supported. + +#### Step 1: Get the current release revision of the GitLab Chart + +In the unlikely event that something goes wrong during the migration, get the current release +revision of the GitLab Chart. Copy the output and put it aside just in case we need to perform a +[rollback](#rollback): + +```shell +helm history <release> --max=1 +``` + +#### Step 2: Setup external Gitaly Service or Gitaly Cluster + +Set up an [external Gitaly](https://docs.gitlab.com/administration/gitaly/configure_gitaly/) +or [external Gitaly Cluster](https://docs.gitlab.com/administration/gitaly/praefect/). You must +provide the Gitaly token and GitLab Shell secret from your Chart installation as part of those steps: + +```shell +# Get the GitLab Shell secret +kubectl get secret <release>-gitlab-shell-secret -ojsonpath='{.data.secret}' | base64 -d + +# Get the Gitaly token +kubectl get secret <release>-gitaly-secret -ojsonpath='{.data.token}' | base64 -d +``` + +{{< tabs >}} + +{{< tab title="Gitaly" >}} + +- The Gitaly token extracted here should be used for the `AUTH_TOKEN` value. +- The GitLab Shell secret extracted here should be used for the `shellsecret` value. + +{{< /tab >}} + +{{< tab title="Gitaly Cluster" >}} + +- The Gitaly token extracted here should be used for the `PRAEFECT_EXTERNAL_TOKEN`. +- The GitLab Shell secret extracted here should be used for the `GITLAB_SHELL_SECRET_TOKEN`. + +{{< /tab >}} + +{{< /tabs >}} + +#### Step 3: Verify no Git changes can be made during migration + +To ensure the data integrity of the migration, prevent any changes from being made to your Git +repositories in the following steps: + +**1. Enable Maintenance Mode** + +If you are using GitLab Enterprise Edition, enable [maintenance mode](https://docs.gitlab.com/administration/maintenance_mode/#enable-maintenance-mode) either through the UI, API or the Rails console: + +```shell +kubectl exec <toolbox pod name> -it -- gitlab-rails runner 'Gitlab::CurrentSettings.update!(maintenance_mode: true)' +``` + +**2. Scale down Runner pods** + +If you are using GitLab Community Edition, you must scale down any GitLab Runner pods that are running in the cluster. This prevents +the Runners from connecting to GitLab to process CI/CD jobs. + +If you are using GitLab Enterprise Edition, this step is optional because [maintenance mode](https://docs.gitlab.com/administration/maintenance_mode/#enable-maintenance-mode) +prevents Runners in the cluster from connecting to GitLab. + +```shell +# Make note of the current number of replicas for Runners so we can scale up to this number later +kubectl get deploy -lapp=gitlab-gitlab-runner,release=<release> -o jsonpath='{.items[].spec.replicas}{"\n"}' + +# Scale down the Runners pods to zero +kubectl scale deploy -lapp=gitlab-gitlab-runner,release=<release> --replicas=0 +``` + +**3. Confirm no CI jobs are running** + +In the Admin Area, go to **CI/CD > Jobs**. This page shows you all jobs, but confirm that there are no jobs with the **Running** status. You need to wait for the jobs to complete before proceeding to the next step. + +**4. Disable Sidekiq cron jobs** + +To prevent Sidekiq jobs from being scheduled and executed during the migration, disable all Sidekiq cron jobs: + +```shell +kubectl exec <toolbox pod name> -it -- gitlab-rails runner 'Sidekiq::Cron::Job.all.map(&:disable!)' +``` + +**5. Confirm no background jobs are running** + +We need to wait for any enqueued or in progress jobs to complete before proceeding to the next step. + +1. In the Admin Area, go to [**Monitoring**](https://docs.gitlab.com/administration/admin_area/#background-jobs) and select **Background Jobs**. +1. Under the Sidekiq dashboard, select **Queues** and then **Live Poll**. +1. Wait for **Busy** and **Enqueued** to drop to 0. + +  + +**6. Scale down Sidekiq and Webservice pods** + +Scale down the Sidekiq and Webservice pods to ensure that a consistent backup is taken. Both services are scaled +up at a later stage: + +- The Sidekiq pods are scaled back up during the restore step +- The Webservice pods are scaled back up after switching to the external Gitaly service to test connectivity + +```shell +# Make note of the current number of replicas for Sidekiq and Webservice so we can scale up to this number later +kubectl get deploy -lapp=sidekiq,release=<release> -o jsonpath='{.items[].spec.replicas}{"\n"}' +kubectl get deploy -lapp=webservice,release=<release> -o jsonpath='{.items[].spec.replicas}{"\n"}' + +# Scale down the Sidekiq and Webservice pods to zero +kubectl scale deploy -lapp=sidekiq,release=<release> --replicas=0 +kubectl scale deploy -lapp=webservice,release=<release> --replicas=0 +``` + +**7. Restrict external connections to the cluster** + +To prevent users and external GitLab Runners from making any changes to GitLab, we need to restrict all +unnecessary connections to GitLab. + +Once these steps are completed, GitLab is completely unavailable in the browser until the restore is completed. + +In order to keep the cluster accessible to the new external Gitaly service during the migration, we must add the +IP address for the external Gitaly service to the `nginx-ingress` configuration as the only external exception. + +1. Create a `ingress-only-allow-ext-gitaly.yml` file with the following content: + + ```yaml + nginx-ingress: + controller: + service: + loadBalancerSourceRanges: + - "x.x.x.x/32" + ``` + + `x.x.x.x` should be the IP address of the external Gitaly service. + +1. Apply the new configuration using both `gitlab.yml` and `ingress-only-allow-ext-gitaly.yml` files: + + ```shell + helm upgrade <release> gitlab/gitlab \ + -f gitlab.yml \ + -f ingress-only-allow-ext-gitaly.yml + ``` + +**8. Create list of repository checksums** + +Prior to running the backup, [check all GitLab repositories](https://docs.gitlab.com/administration/raketasks/check/#check-all-gitlab-repositories) +and create a list of repository checksums. Pipe the output to a file so we can `diff` the checksums after the migration: + +```shell +kubectl exec <toolbox pod name> -it -- gitlab-rake gitlab:git:checksum_projects > ~/checksums-before.txt +``` + +#### Step 4: Backup all repositories + +[Create a backup](../../backup-restore/backup.md#create-the-backup) of your repositories only: + +```shell +kubectl exec <toolbox pod name> -it -- backup-utility --skip artifacts,ci_secure_files,db,external_diffs,lfs,packages,pages,registry,terraform_state,uploads +``` + +#### Step 5: Configure Instance to use new Gitaly Service + +1. Disable the Gitaly subchart and configure GitLab to use the external Gitaly. + If there are any Gitaly references in your main `gitlab.yml` configuration file, remove those + and create a new `external-gitaly.yml` file with the following content. + + If you have previously defined additional Gitaly storages, you need to ensure a matching Gitaly + storage with the same name is specified in the new configuration, otherwise the restore operation + fails. + + Refer to the + [connecting to external Gitaly over TLS](#connecting-to-external-gitaly-over-tls) section if you + are configuring TLS: + + {{< tabs >}} + + {{< tab title="Gitaly" >}} + + ```yaml + global: + gitaly: + enabled: false + external: + - name: default # required + hostname: node1.git.example.com # required + port: 8075 # optional, default shown + tlsEnabled: false # optional, overrides gitaly.tls.enabled + ``` + + {{< /tab >}} + + {{< tab title="Gitaly Cluster" >}} + + ```yaml + global: + gitaly: + enabled: false + external: + - name: default # required + hostname: ha.git.example.com # required + port: 2305 # Praefect uses port 2305 + tlsEnabled: false # optional, overrides gitaly.tls.enabled + ``` + + {{< /tab >}} + + {{< /tabs >}} + +1. Apply the new configuration using the `gitlab.yml`, `ingress-only-allow-ext-gitaly.yml`, and `external-gitaly.yml` files: + + ```shell + helm upgrade --install gitlab gitlab/gitlab \ + -f gitlab.yml \ + -f ingress-only-allow-ext-gitaly.yml \ + -f external-gitaly.yml + ``` + +1. Scale up your Webservice pods to the original replica count if they aren't running. This is required so we can test the + GitLab to external Gitaly connection in the following steps. + + ```shell + kubectl scale deploy -lapp=webservice,release=<release> --replicas=<value> + ``` + +1. On the Toolbox pod, confirm that GitLab can connect to the external Gitaly successfully: + + ```shell + kubectl exec <toolbox pod name> -it -- gitlab-rake gitlab:gitaly:check + ``` + +1. Ensure that the external Gitaly can connect back to your Chart install: + + {{< tabs >}} + + {{< tab title="Gitaly" >}} + + Ensure that the Gitaly service can perform callbacks to the GitLab API successfully: + + ```shell + sudo /opt/gitlab/embedded/bin/gitaly check /var/opt/gitlab/gitaly/config.toml + ``` + + {{< /tab >}} + + {{< tab title="Gitaly Cluster" >}} + + On all Praefect nodes, ensure that the Praefect service can connect to the Gitaly nodes: + + ```shell + # Run on Praefect nodes + sudo /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml dial-nodes + ``` + + On all Gitaly nodes, ensure that the Gitaly service can perform callbacks to the GitLab API + successfully: + + ```shell + # Run on Gitaly nodes + sudo /opt/gitlab/embedded/bin/gitaly check /var/opt/gitlab/gitaly/config.toml + ``` + + {{< /tab >}} + + {{< /tabs >}} + +#### Step 6: Restore and validate repository backup + +1. [Restore the backup file](../../backup-restore/restore.md#restoring-the-backup-file) created previously. + As a result, the repositories are copied to the configured external Gitaly or Gitaly Cluster. + +1. [Check all GitLab repositories](https://docs.gitlab.com/administration/raketasks/check/#check-all-gitlab-repositories) + and create a list of repository checksums. Pipe the output to a file so we can `diff` the checksums in the next step: + + ```shell + kubectl exec <toolbox pod name> -it -- gitlab-rake gitlab:git:checksum_projects > ~/checksums-after.txt + ``` + +1. Compare the repository checksums before and after the repository migration. If the checksums are identical, this command + returns no output: + + ```shell + diff ~/checksums-before.txt ~/checksums-after.txt + ``` + + If you observe a blank checksum changing to `0000000000000000000000000000000000000000` in the `diff` output for a specific line, + this is expected and can be safely ignored. + +#### Step 7: Final configuration and validation + +1. To allow external users and GitLab Runners to connect to GitLab again, apply the `gitlab.yml` and `external-gitaly.yml` files. As + we aren't specifying `ingress-only-allow-ext-gitaly.yml`, it removes the IP restrictions: + + ```shell + helm upgrade <release> gitlab/gitlab \ + -f gitlab.yml \ + -f external-gitaly.yml + ``` + + Consider generating a consolidated `gitlab.yml` for the future that includes the external Gitaly configuration: + + ```shell + helm get values <release> gitlab/gitlab -o yaml > gitlab.yml + ``` + +1. If you are using GitLab Enterprise Edition, disable [maintenance mode](https://docs.gitlab.com/administration/maintenance_mode/#enable-maintenance-mode) either through the UI, API or the Rails console: + + ```shell + kubectl exec <toolbox pod name> -it -- gitlab-rails runner 'Gitlab::CurrentSettings.update!(maintenance_mode: false)' + ``` + +1. If you have multiple Gitaly storages, [configure where new repositories are stored](https://docs.gitlab.com/administration/repository_storage_paths/#configure-where-new-repositories-are-stored). + +1. Enable Sidekiq cron jobs: + + ```shell + kubectl exec <toolbox pod name> -it -- gitlab-rails runner 'Sidekiq::Cron::Job.all.map(&:enable!)' + ``` + +1. Scale up your Runner pods to the original replica count if they aren't running: + + ```shell + kubectl scale deploy -lapp=gitlab-gitlab-runner,release=<release> --replicas=<value> + ``` + +1. After you have confirmed everything is working as expected, you can delete the Gitaly PVC: + + WARNING: Do not delete the Gitaly PVC until you have confirmed the checksums match as per [step 6](#step-6-restore-and-validate-repository-backup) and + double checked that everything is working as expected. + + ```shell + kubectl delete pvc repo-data-<release>-gitaly-0 + ``` + +#### Rollback + +If you run into any problems, you can rollback the changes made so the Gitaly subchart is used again. + +The original Gitaly PVC must exist to rollback successfully. + +1. Rollback the GitLab Chart to the previous release using the revision number obtained +in [Step 1: Get the current release revision of the GitLab Chart](#step-1-get-the-current-release-revision-of-the-gitlab-chart): + + ```shell + helm rollback <release> <revision> + ``` + +1. Scale up your Webservice pods to the original replica count if they aren't running: + + ```shell + kubectl scale deploy -lapp=webservice,release=<release> --replicas=<value> + ``` + +1. Scale up your Sidekiq pods to the original replica count if they aren't running: + + ```shell + kubectl scale deploy -lapp=sidekiq,release=<release> --replicas=<value> + ``` + +1. Enable Sidekiq cron jobs if you previously disabled them: + + ```shell + kubectl exec <toolbox pod name> -it -- gitlab-rails runner 'Sidekiq::Cron::Job.all.map(&:enable!)' + ``` + +1. Scale up your Runner pods to the original replica count if they aren't running: + + ```shell + kubectl scale deploy -lapp=gitlab-gitlab-runner,release=<release> --replicas=<value> + ``` + +1. If you are using GitLab Enterprise Edition, disable [maintenance mode](https://docs.gitlab.com/administration/maintenance_mode/#disable-maintenance-mode) + if it is enabled. + +### Related documentation + +- [Migrate to Gitaly Cluster](https://docs.gitlab.com/administration/gitaly/#migrate-to-gitaly-cluster) diff --git a/chart/doc/advanced/external-gitaly/external-omnibus-gitaly.md b/chart/doc/advanced/external-gitaly/external-omnibus-gitaly.md index 8e9562f6c3e3b106e3100646da8f432c23d59ea9..377817811af857e1bd0a2d6e76cb1efb509e0d2c 100644 --- a/chart/doc/advanced/external-gitaly/external-omnibus-gitaly.md +++ b/chart/doc/advanced/external-gitaly/external-omnibus-gitaly.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Setup standalone Gitaly --- -# Setup standalone Gitaly - The instructions here make use of the [Linux package](https://about.gitlab.com/install/#ubuntu) for Ubuntu. This package provides versions of the services that are guaranteed to be compatible with the charts' services. @@ -26,7 +25,7 @@ the Linux package installation, **_do not_** provide the `EXTERNAL_URL=` value. Create a minimal `gitlab.rb` file to be placed at `/etc/gitlab/gitlab.rb`. Be _very_ explicit about what's enabled on this node, using the following contents based on the documentation for -[running Gitaly on its own server](https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#run-gitaly-on-its-own-server). +[running Gitaly on its own server](https://docs.gitlab.com/administration/gitaly/configure_gitaly/#run-gitaly-on-its-own-server). _**NOTE**: The values below should be replaced_ diff --git a/chart/doc/advanced/external-gitlab-pages/_index.md b/chart/doc/advanced/external-gitlab-pages/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..577da8ea3b26690c68d1980804079126dd3e4974 --- /dev/null +++ b/chart/doc/advanced/external-gitlab-pages/_index.md @@ -0,0 +1,89 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with external GitLab Pages +--- + +This document intends to provide documentation on how to configure this Helm +chart with a GitLab Pages instance, configured outside of the cluster using a Linux package. +[Issue 418259](https://gitlab.com/gitlab-org/gitlab/-/issues/418259) proposes adding documentation for a Linux package instance with an external +GitLab Pages using the Helm chart. + +## Requirements + +1. [External Object Storage](../external-object-storage/_index.md), as + recommended for production instances, should be used. +1. Base64 encoded form of a 32-bytes-long API secret key for Pages to interact + with GitLab Pages. + +## Known limitations + +1. [GitLab Pages Access Control](https://docs.gitlab.com/user/project/pages/pages_access_control/) + is not supported out of the box. + +## Configure external GitLab Pages instance + +1. [Install GitLab](https://about.gitlab.com/install/) using the Linux + package. + +1. Edit `/etc/gitlab/gitlab.rb` file and replace its contents with the + following snippet. Update the values below to match your configuration: + + ```ruby + roles ['pages_role'] + + # Root domain where Pages will be served. + pages_external_url '<Pages root domain>' # Example: 'http://pages.example.io' + + # Information regarding GitLab instance + gitlab_pages['gitlab_server'] = '<GitLab URL>' # Example: 'https://gitlab.example.com' + gitlab_pages['api_secret_key'] = '<Base64 encoded form of API secret key>' + ``` + +1. Apply the changes by running `sudo gitlab-ctl reconfigure`. + +## Configure the chart + +1. Create a bucket named `gitlab-pages` in the object storage for storing Pages + deployments. + +1. Create a secret `gitlab-pages-api-key` with the Base64 encoded form of API + secret key as value. + + ```shell + kubectl create secret generic gitlab-pages-api-key --from-literal="shared_secret=<Base 64 encoded API Secret Key>" + ``` + +1. Refer the following configuration snippet and add necessary entries to your + values file. + + ```yaml + global: + pages: + path: '/srv/gitlab/shared/pages' + host: <Pages root domain> + port: '80' # Set to 443 if Pages is served over HTTPS + https: false # Set to true if Pages is served over HTTPS + artifactsServer: true + objectStore: + enabled: true + bucket: 'gitlab-pages' + apiSecret: + secret: gitlab-pages-api-key + key: shared_secret + extraEnv: + PAGES_UPDATE_LEGACY_STORAGE: true # Bypass automatic disabling of disk storage + ``` + + {{< alert type="note" >}} + +By setting `PAGES_UPDATE_LEGACY_STORAGE` environment variable to true, + the feature flag `pages_update_legacy_storage` is enabled which deploys Pages + to local disk. When you migrate to object storage, do remember to remove this + variable. + + {{< /alert >}} + +1. [Deploy the chart](../../installation/deployment.md#deploy-using-helm) + using this configuration. diff --git a/chart/doc/advanced/external-mattermost/_index.md b/chart/doc/advanced/external-mattermost/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..4e22253565e2725c58d1b5ea3aae5279b82d21ee --- /dev/null +++ b/chart/doc/advanced/external-mattermost/_index.md @@ -0,0 +1,79 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with Mattermost Team Edition +--- + +This document describes how to install Mattermost Team Edition Helm chart in proximity with an existing GitLab Helm chart deployment. + +As the Mattermost Helm chart is installed in a separate namespace, it is recommended that +`cert-manager` and `nginx-ingress` be configured to manage cluster-wide Ingress and certificate resources. For additional configuration information, +refer to the [Mattermost Helm configuration guide](https://github.com/mattermost/mattermost-helm/tree/master/charts/mattermost-team-edition#configuration). + +## Prerequisites + +- A running Kubernetes cluster. +- [Helm v3](https://helm.sh/docs/intro/install/) + +{{< alert type="note" >}} + +For the Team Edition you can have just one replica running. + +{{< /alert >}} + +## Deploy the Mattermost Team Edition Helm chart + +Once you have installed the Mattermost Team Edition Helm chart, you can deploy it using the following command: + +```shell +helm repo add mattermost https://helm.mattermost.com +helm repo update +helm upgrade --install mattermost -f values.yaml mattermost/mattermost-team-edition +``` + +Wait for the pods to run. Then, using the Ingress host you specified in the configuration, access your Mattermost server. + +For additional configuration information, refer to the [Mattermost Helm configuration guide](https://github.com/mattermost/mattermost-helm/tree/master/charts/mattermost-team-edition#configuration). +you experience any issues with this, please view the [Mattermost Helm chart issue repository](https://github.com/mattermost/mattermost-helm/issues) or +the [Mattermost Forum](https://forum.mattermost.com/search?q=helm). + +## Deploy GitLab Helm chart + +To deploy the GitLab Helm chart, follow the instructions described [here](../../_index.md). + +Here's a light way to install it: + +```shell +helm repo add gitlab https://charts.gitlab.io/ +helm repo update +helm upgrade --install gitlab gitlab/gitlab \ + --timeout 600s \ + --set global.hosts.domain=<your-domain> \ + --set global.hosts.externalIP=<external-ip> \ + --set certmanager-issuer.email=<email> +``` + +- `<your-domain>`: your desired domain, such as `gitlab.example.com`. +- `<external-ip>`: the external IP pointing to your Kubernetes cluster. +- `<email>`: email to register in Let's Encrypt to retrieve TLS certificates. + +Once you've deployed the GitLab instance, follow the instructions for the [initial login](../../installation/deployment.md#initial-login). + +## Create an OAuth application with GitLab + +The next part of the process is setting up the GitLab SSO integration. +To do so, you need to [create the OAuth application](https://docs.mattermost.com/deployment/sso-gitlab.html) to allow Mattermost to use GitLab as the authentication provider. + +{{< alert type="note" >}} + +Only the default GitLab SSO is officially supported. “Double SSOâ€, where GitLab SSO is chained to other SSO solutions, is not supported. It may be possible to connect +GitLab SSO with AD, LDAP, SAML, or MFA add-ons in some cases, but because of the special logic required they’re not officially +supported and are known not to work on some experiences. + +{{< /alert >}} + +## Troubleshooting + +If you are following a process other than the one provided and experience authentication and/or deployment issues, +let us know in the [Mattermost troubleshooting forum](https://docs.mattermost.com/install/troubleshooting.html?&redirect_source=mm-org). diff --git a/chart/doc/advanced/external-nginx/_index.md b/chart/doc/advanced/external-nginx/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..bc3cb4664cc8ce7352585821cf60db75b6a902d8 --- /dev/null +++ b/chart/doc/advanced/external-nginx/_index.md @@ -0,0 +1,96 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with an external NGINX Ingress Controller +--- + +This chart configures `Ingress` resources for use with the official +[NGINX Ingress](https://github.com/kubernetes/ingress-nginx) implementation. The +NGINX Ingress Controller is deployed as a part of this chart. If you want to +reuse an existing NGINX Ingress Controller already available in your cluster, +this guide will help. + +## TCP services in the external Ingress Controller + +The GitLab Shell component requires TCP traffic to pass through on +port 22 (by default; this can be changed). Ingress does not directly support TCP services, so some additional configuration is necessary. Your NGINX Ingress Controller may have been [deployed directly](https://github.com/kubernetes/ingress-nginx/blob/master/docs/deploy/index.md) (i.e. with a Kubernetes spec file) or through the [official Helm chart](https://github.com/kubernetes/ingress-nginx). The configuration of the TCP pass through will differ depending on the deployment approach. + +### Direct deployment + +In a direct deployment, the NGINX Ingress Controller handles configuring TCP services with a +`ConfigMap` (see docs [here](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/exposing-tcp-udp-services.md)). +Assuming your GitLab chart is deployed to the namespace `gitlab` and your Helm +release is named `mygitlab`, your `ConfigMap` should be something like this: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: tcp-configmap-example +data: + 22: "gitlab/mygitlab-gitlab-shell:22" +``` + +After you have that `ConfigMap`, you can enable it as described in the NGINX +Ingress Controller [docs](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/exposing-tcp-udp-services.md) +using the `--tcp-services-configmap` option. + +```yaml +args: + - /nginx-ingress-controller + - --tcp-services-configmap=gitlab/tcp-configmap-example +``` + +Finally make sure that the `Service` for your NGINX Ingress Controller is exposing +port 22 in addition to 80 and 443. + +### Helm deployment + +If you have installed or plan to install the NGINX Ingress Controller using it's [Helm chart](https://github.com/kubernetes/ingress-nginx), +then you have to add a value to the chart using the command line: + +```shell +--set tcp.22="gitlab/mygitlab-gitlab-shell:22" +``` + +or a `values.yaml` file: + +```yaml +tcp: + 22: "gitlab/mygitlab-gitlab-shell:22" +``` + +The format for the value is the same as describe above in the "Direct Deployment" section. + +## Customize the GitLab Ingress options + +The NGINX Ingress Controller uses an annotation to mark which Ingress Controller +will service a particular `Ingress` (see [docs](https://github.com/kubernetes/ingress-nginx#annotation-ingressclass)). +You can configure the Ingress class to use with this chart using the +`global.ingress.class` setting. Make sure to set this in your Helm options. + +```shell +--set global.ingress.class=myingressclass +``` + +While not necessarily required, if you're using an external Ingress Controller, you will likely want to +disable the Ingress Controller that is deployed by default with this chart: + +```shell +--set nginx-ingress.enabled=false +``` + +## Custom certificate management + +The full scope of your TLS options are documented [elsewhere](../../installation/tls.md). + +If you are using an external Ingress Controller, you may also be using an external cert-manager instance +or managing your certificates in some other custom manner. The full documentation around your TLS options is [here](../../installation/tls.md), +however for the purposes of this discussion, here are the two values that would need to be set to disable the cert-manager chart and tell +the GitLab component charts to NOT look for the built in certificate resources: + +```shell +--set certmanager.install=false +--set global.ingress.configureCertmanager=false +``` diff --git a/chart/doc/advanced/external-object-storage/_index.md b/chart/doc/advanced/external-object-storage/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..3ed73973800c40289448e381f40b8452a983bbf3 --- /dev/null +++ b/chart/doc/advanced/external-object-storage/_index.md @@ -0,0 +1,366 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with an external object storage +--- + +GitLab relies on object storage for highly-available persistent data in Kubernetes. +By default, an S3-compatible storage solution named `minio` is deployed with the +chart. For production quality deployments, we recommend using a hosted +object storage solution like Google Cloud Storage or AWS S3. + +To disable MinIO, set this option and then follow the related documentation below: + +```shell +--set global.minio.enabled=false +``` + +An [example of the full configuration](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/examples/values-external-objectstorage.yaml) +has been provided in the [examples](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples). + +This documentation specifies usage of access and secret keys for AWS. It is also possible to use [IAM roles](aws-iam-roles.md). + +## S3 encryption + +GitLab supports [Amazon KMS](https://aws.amazon.com/kms/) +to [encrypt data stored in S3 buckets](https://docs.gitlab.com/administration/object_storage/#encrypted-s3-buckets). +You can enable this in two ways: + +- In AWS, [configure the S3 bucket to use default encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +- In GitLab, enable [server side encryption headers](../../charts/globals.md#storage_options). + +These two options are not mutually exclusive. You can set a default encryption +policy, but also enable server-side encryption headers to override those defaults. + +See the [GitLab documentation on encrypted S3 buckets](https://docs.gitlab.com/administration/object_storage/#encrypted-s3-buckets) +for more details. + +## Azure Blob Storage + +Direct support for Azure Blob storage is available for +[uploaded attachments, CI job artifacts, LFS, and other object types supported via the consolidated settings](https://docs.gitlab.com/administration/object_storage/#storage-specific-configuration). In previous GitLab versions, an [Azure MinIO gateway](azure-minio-gateway.md) was needed. + +{{< alert type="note" >}} + +GitLab [does not support](https://github.com/minio/minio/issues/9978) the Azure MinIO gateway as the storage for the Docker Registry. +Please refer to the [corresponding Azure example](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/registry.azure.yaml) when [setting up the Docker Registry](#docker-registry-images). + +{{< /alert >}} + +Although Azure uses the word container to denote a collection of blobs, +GitLab standardizes on the term bucket. + +Azure Blob storage requires the use of the +[consolidated object storage settings](../../charts/globals.md#consolidated-object-storage). A +single Azure storage account name and key must be used across multiple +Azure blob containers. Customizing individual `connection` settings by +object type (for example, `artifacts`, `uploads`, and so on) is not permitted. + +To enable Azure Blob storage, see +[`rails.azurerm.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/rails.azurerm.yaml) +as an example to define the Azure `connection`. You can load this as a +secret via: + +```shell +kubectl create secret generic gitlab-rails-storage --from-file=connection=rails.azurerm.yml +``` + +Then, disable MinIO and set these global settings: + +```shell +--set global.minio.enabled=false +--set global.appConfig.object_store.enabled=true +--set global.appConfig.object_store.connection.secret=gitlab-rails-storage +``` + +Be sure to create Azure containers for the [default names or set the container names in the bucket configuration](../../charts/globals.md#specify-buckets). + +{{< alert type="note" >}} + +If you experience requests failing with `Requests to the local network are not allowed`, +see the [Troubleshooting section](#troubleshooting). + +{{< /alert >}} + +## Docker Registry images + +Configuration of object storage for the `registry` chart is done via the `registry.storage` key, and the `global.registry.bucket` key. + +```shell +--set registry.storage.secret=registry-storage +--set registry.storage.key=config +--set global.registry.bucket=bucket-name +``` + +{{< alert type="note" >}} + +The bucket name needs to be set both in the secret, and in `global.registry.bucket`. The secret is used in the registry server, and +the global is used by GitLab backups. + +{{< /alert >}} + +Create the secret per [registry chart documentation on storage](../../charts/registry/_index.md#storage), then configure the chart to make use of this secret. + +Examples for [S3](https://distribution.github.io/distribution/storage-drivers/s3/)(S3 compatible storages, but Azure MinIO gateway not supported, see [Azure Blob Storage](#azure-blob-storage)), [Azure](https://distribution.github.io/distribution/storage-drivers/azure/) and [GCS](https://distribution.github.io/distribution/storage-drivers/gcs/) drivers can be found in +[`examples/objectstorage`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage). + +- [`registry.s3.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/registry.s3.yaml) +- [`registry.gcs.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/registry.gcs.yaml) +- [`registry.azure.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/registry.azure.yaml) + +### Registry configuration + +1. Decide on which storage service to use. +1. Copy appropriate file to `registry-storage.yaml`. +1. Edit with the correct values for the environment. +1. Follow [registry chart documentation on storage](../../charts/registry/_index.md#storage) for creating the secret. +1. Configure the chart as documented. + +## LFS, Artifacts, Uploads, Packages, External Diffs, Terraform State, Dependency Proxy, Secure Files + +Configuration of object storage for LFS, artifacts, uploads, packages, external +diffs, Terraform state, Secure Files, and pseudonymizer is done via the following keys: + +- `global.appConfig.lfs` +- `global.appConfig.artifacts` +- `global.appConfig.uploads` +- `global.appConfig.packages` +- `global.appConfig.externalDiffs` +- `global.appConfig.dependencyProxy` +- `global.appConfig.terraformState` +- `global.appConfig.ciSecureFiles` + +Note also that: + +- You must create buckets for the [default names or custom names in the bucket configuration](../../charts/globals.md#specify-buckets). +- A different bucket is needed for each, otherwise performing a restore from + backup doesn't function properly. +- Storing MR diffs on external storage is not enabled by default, so, + for the object storage settings for `externalDiffs` to take effect, + `global.appConfig.externalDiffs.enabled` key should have a `true` value. +- The dependency proxy feature is not enabled by default, so, + for the object storage settings for `dependencyProxy` to take effect, + `global.appConfig.dependencyProxy.enabled` key should have a `true` value. + +Below is an example of the configuration options: + +```shell +--set global.appConfig.lfs.bucket=gitlab-lfs-storage +--set global.appConfig.lfs.connection.secret=object-storage +--set global.appConfig.lfs.connection.key=connection + +--set global.appConfig.artifacts.bucket=gitlab-artifacts-storage +--set global.appConfig.artifacts.connection.secret=object-storage +--set global.appConfig.artifacts.connection.key=connection + +--set global.appConfig.uploads.bucket=gitlab-uploads-storage +--set global.appConfig.uploads.connection.secret=object-storage +--set global.appConfig.uploads.connection.key=connection + +--set global.appConfig.packages.bucket=gitlab-packages-storage +--set global.appConfig.packages.connection.secret=object-storage +--set global.appConfig.packages.connection.key=connection + +--set global.appConfig.externalDiffs.bucket=gitlab-externaldiffs-storage +--set global.appConfig.externalDiffs.connection.secret=object-storage +--set global.appConfig.externalDiffs.connection.key=connection + +--set global.appConfig.terraformState.bucket=gitlab-terraform-state +--set global.appConfig.terraformState.connection.secret=object-storage +--set global.appConfig.terraformState.connection.key=connection + +--set global.appConfig.dependencyProxy.bucket=gitlab-dependencyproxy-storage +--set global.appConfig.dependencyProxy.connection.secret=object-storage +--set global.appConfig.dependencyProxy.connection.key=connection + +--set global.appConfig.ciSecureFiles.bucket=gitlab-ci-secure-files +--set global.appConfig.ciSecureFiles.connection.secret=object-storage +--set global.appConfig.ciSecureFiles.connection.key=connection +``` + +See the [charts/globals documentation on appConfig](../../charts/globals.md#configure-appconfig-settings) for full details. + +Create the secret(s) per the [connection details documentation](../../charts/globals.md#connection), and then configure the chart to use the provided secrets. Note, the same secret can be used for all of them. + +Examples for [AWS](https://fog.github.io/storage/#using-amazon-s3-and-fog) (any S3 compatible like [Azure using MinIO](azure-minio-gateway.md)) and [Google](https://fog.github.io/storage/#google-cloud-storage) providers can be found in +[`examples/objectstorage`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage). + +- [`rails.s3.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/rails.s3.yaml) +- [`rails.gcs.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/rails.gcs.yaml) +- [`rails.azure.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/rails.azure.yaml) +- [`rails.azurerm.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/rails.azurerm.yaml) + +### appConfig configuration + +1. Decide on which storage service to use. +1. Copy appropriate file to `rails.yaml`. +1. Edit with the correct values for the environment. +1. Follow [connection details documentation](../../charts/globals.md#connection) for creating the secret. +1. Configure the chart as documented. + +## Backups + +Backups are also stored in object storage, and must be configured to point +externally rather than the included MinIO service. The backup/restore procedure uses two separate buckets: + +- A bucket for storing backups (`global.appConfig.backups.bucket`) +- A temporary bucket for preserving existing data during the restore process (`global.appConfig.backups.tmpBucket`) + +AWS S3-compatible object storage systems, Google Cloud Storage, and Azure Blob Storage +are supported backends. You can configure the backend type by setting `global.appConfig.backups.objectStorage.backend` +to `s3` for AWS S3, `gcs` for Google Cloud Storage, or `azure` for Azure Blob Storage. +You must also provide a connection configuration through the `gitlab.toolbox.backups.objectStorage.config` key. + +When using Google Cloud Storage with a secret, the GCP project must be set with the `global.appConfig.backups.objectStorage.config.gcpProject` value. + +For S3-compatible storage: + +```shell +--set global.appConfig.backups.bucket=gitlab-backup-storage +--set global.appConfig.backups.tmpBucket=gitlab-tmp-storage +--set gitlab.toolbox.backups.objectStorage.config.secret=storage-config +--set gitlab.toolbox.backups.objectStorage.config.key=config +``` + +For Google Cloud Storage (GCS) with a secret: + +```shell +--set global.appConfig.backups.bucket=gitlab-backup-storage +--set global.appConfig.backups.tmpBucket=gitlab-tmp-storage +--set gitlab.toolbox.backups.objectStorage.backend=gcs +--set gitlab.toolbox.backups.objectStorage.config.gcpProject=my-gcp-project-id +--set gitlab.toolbox.backups.objectStorage.config.secret=storage-config +--set gitlab.toolbox.backups.objectStorage.config.key=config +``` + +For Google Cloud Storage (GCS) with [Workload Identity Federation for GKE](gke-workload-identity.md), only the backend and buckets need to be set. +Make sure `gitlab.toolbox.backups.objectStorage.config.secret` and `gitlab.toolbox.backups.objectStorage.config.key` are not set, +so that the cluster uses [Google's Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials): + +```shell +--set global.appConfig.backups.bucket=gitlab-backup-storage +--set global.appConfig.backups.tmpBucket=gitlab-tmp-storage +--set gitlab.toolbox.backups.objectStorage.backend=gcs +``` + +For Azure Blob Storage: + +```shell +--set global.appConfig.backups.bucket=gitlab-backup-storage +--set global.appConfig.backups.tmpBucket=gitlab-tmp-storage +--set gitlab.toolbox.backups.objectStorage.backend=azure +--set gitlab.toolbox.backups.objectStorage.config.secret=storage-config +--set gitlab.toolbox.backups.objectStorage.config.key=config +``` + +See the [backup/restore object storage documentation](../../backup-restore/_index.md#object-storage) for full details. + +{{< alert type="note" >}} + +To backup or restore files from the other object storage locations, the configuration file needs to be +configured to authenticate as a user with sufficient access to read/write to all GitLab buckets. + +{{< /alert >}} + +### Backups storage example + +1. Create the `storage.config` file: + + - On Amazon S3, the contents should be in the [s3cmd configuration file format](https://s3tools.org/kb/item14.htm) + + ```ini + [default] + access_key = AWS_ACCESS_KEY + secret_key = AWS_SECRET_KEY + bucket_location = us-east-1 + multipart_chunk_size_mb = 128 # default is 15 (MB) + ``` + + - On Google Cloud Storage, you can create the file by creating a service account + with the `storage.admin` role and then + [creating a service account key](https://cloud.google.com/iam/docs/keys-create-delete#creating_service_account_keys). + Below is an example of using the `gcloud` CLI to create the file. + + ```shell + export PROJECT_ID=$(gcloud config get-value project) + gcloud iam service-accounts create gitlab-gcs --display-name "Gitlab Cloud Storage" + gcloud projects add-iam-policy-binding --role roles/storage.admin ${PROJECT_ID} --member=serviceAccount:gitlab-gcs@${PROJECT_ID}.iam.gserviceaccount.com + gcloud iam service-accounts keys create --iam-account gitlab-gcs@${PROJECT_ID}.iam.gserviceaccount.com storage.config + ``` + + - On Azure Storage + + ```ini + [default] + # Setup endpoint: hostname of the Web App + host_base = https://your_minio_setup.azurewebsites.net + host_bucket = https://your_minio_setup.azurewebsites.net + # Leave as default + bucket_location = us-west-1 + use_https = True + multipart_chunk_size_mb = 128 # default is 15 (MB) + + # Setup access keys + # Access Key = Azure Storage Account name + access_key = AZURE_ACCOUNT_NAME + # Secret Key = Azure Storage Account Key + secret_key = AZURE_ACCOUNT_KEY + + # Use S3 v4 signature APIs + signature_v2 = False + ``` + +1. Create the secret + + ```shell + kubectl create secret generic storage-config --from-file=config=storage.config + ``` + +## Google Cloud CDN + +{{< history >}} + +- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/98010) in GitLab 15.5. + +{{< /history >}} + +You can use [Google Cloud CDN](https://cloud.google.com/cdn) to cache +and fetch data from the artifacts bucket. This can help improve +performance and reduce network egress costs. + +Configuration of Cloud CDN is done via the following keys: + +- `global.appConfig.artifacts.cdn.secret` +- `global.appConfig.artifacts.cdn.key` (default is `cdn`) + +To use Cloud CDN: + +1. Set up [Cloud CDN to use the artifacts bucket as the backend](https://cloud.google.com/cdn/docs/setting-up-cdn-with-bucket). +1. Create a [key for signed URLs](https://cloud.google.com/cdn/docs/using-signed-urls). +1. Give the [Cloud CDN service account permission to read from the bucket](https://cloud.google.com/cdn/docs/using-signed-urls#configuring_permissions). +1. Prepare a YAML file with the parameters using the example in [`rails.googlecdn.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/cdn/rails.googlecdn.yaml). + You will need to fill in the following information: + - `url`: Base URL of the CDN host from step 1 + - `key_name`: Key name from step 2 + - `key`: The actual secret from step 2 +1. Load this YAML file into a Kubernetes secret under the `cdn` key. For example, to create a secret `gitlab-rails-cdn`: + + ```shell + kubectl create secret generic gitlab-rails-cdn --from-file=cdn=rails.googlecdn.yml + ``` + +1. Set `global.appConfig.artifacts.cdn.secret` to `gitlab-rails-cdn`. If you're setting this via a `helm` + parameter, use: + + ```shell + --set global.appConfig.artifacts.cdn.secret=gitlab-rails-cdn + ``` + +## Troubleshooting + +### Azure Blob: URL \[FILTERED] is blocked: Requests to the local network are not allowed + +This happens when the Azure Blob hostname is resolved to a [RFC1918 (local / private) IP address](https://learn.microsoft.com/en-us/azure/storage/common/storage-private-endpoints#dns-changes-for-private-endpoints). As a workaround, +allow [Outbound requests](https://docs.gitlab.com/security/webhooks/#allowlist-for-local-requests) +for your Azure Blob hostname (`yourinstance.blob.core.windows.net`). diff --git a/chart/doc/advanced/external-object-storage/aws-iam-roles.md b/chart/doc/advanced/external-object-storage/aws-iam-roles.md index 22ba0aa20b3b126b58ec2521078694cb81ddf4cd..0cc068f505fc432e2edd309aef1779a86d1d0d9a 100644 --- a/chart/doc/advanced/external-object-storage/aws-iam-roles.md +++ b/chart/doc/advanced/external-object-storage/aws-iam-roles.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: IAM roles for AWS when using the GitLab chart --- -# IAM roles for AWS when using the GitLab chart - The default configuration for external object storage in the charts uses access and secret keys. It is also possible to use IAM roles in combination with [`kube2iam`](https://github.com/jtblin/kube2iam), [`kiam`](https://github.com/uswitch/kiam), or [IRSA](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/). @@ -26,7 +25,7 @@ An IAM role can be specified via the annotations key: --set registry.annotations."iam\.amazonaws\.com/role"=<role name> ``` -When creating the [`registry-storage.yaml`](../../charts/registry/index.md#storage) secret, omit the access and secret key: +When creating the [`registry-storage.yaml`](../../charts/registry/_index.md#storage) secret, omit the access and secret key: ```yaml s3: @@ -48,7 +47,7 @@ For LFS, artifacts, uploads, and packages an IAM role can be specified via the a For the [`object-storage.yaml`](../../charts/globals.md#connection) secret, omit the access and secret key. Because the GitLab Rails codebase uses Fog for S3 -storage, the [`use_iam_profile`](https://docs.gitlab.com/ee/administration/job_artifacts.html#s3-compatible-connection-settings) +storage, the [`use_iam_profile`](https://docs.gitlab.com/administration/cicd/secure_files/#s3-compatible-connection-settings) key should be added for Fog to use the role: ```yaml @@ -57,12 +56,15 @@ use_iam_profile: true region: us-east-1 ``` -NOTE: +{{< alert type="note" >}} + Do NOT include `endpoint` in this configuration. IRSA makes use of [STS tokens, which use specialized endpoints](https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html). When `endpoint` is provided, the AWS client will attempt [to send an `AssumeRoleWithWebIdentity` message to this endpoint and will fail](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/3148#note_889357676). +{{< /alert >}} + ### Backups The Toolbox configuration allows for annotations to be set to upload backups to S3: @@ -71,7 +73,7 @@ The Toolbox configuration allows for annotations to be set to upload backups to --set gitlab.toolbox.annotations."iam\.amazonaws\.com/role"=<role name> ``` -The [`s3cmd.config`](index.md#backups-storage-example) secret is to be created without the access and secret keys: +The [`s3cmd.config`](_index.md#backups-storage-example) secret is to be created without the access and secret keys: ```ini [default] diff --git a/chart/doc/advanced/external-object-storage/azure-minio-gateway.md b/chart/doc/advanced/external-object-storage/azure-minio-gateway.md index d19a8c896bd71ef91507eb58171d69d70b9de49c..35059331ce2b6d93a10515d12c4925094b5a4e18 100644 --- a/chart/doc/advanced/external-object-storage/azure-minio-gateway.md +++ b/chart/doc/advanced/external-object-storage/azure-minio-gateway.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Azure MinIO gateway when using the GitLab chart --- -# Azure MinIO gateway when using the GitLab chart - [MinIO](https://min.io/) is an object storage server that exposes S3-compatible APIs and it has a gateway feature that allows proxying requests to Azure Blob Storage. To set up our gateway, we will make use of Azure's Web App on Linux. To get started, make sure you have installed Azure CLI and you are logged in (`az login`). Proceed to create a [Resource group](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/overview#resource-groups), if you don't have one already: diff --git a/chart/doc/advanced/external-object-storage/gke-workload-identity.md b/chart/doc/advanced/external-object-storage/gke-workload-identity.md index 6fa3b33d17bf5e73b55ac55667c714f96b9a41a9..c45e9686b7a5ecfd990aff6fd365334ea7980769 100644 --- a/chart/doc/advanced/external-object-storage/gke-workload-identity.md +++ b/chart/doc/advanced/external-object-storage/gke-workload-identity.md @@ -2,11 +2,14 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Workload Identity Federation for GKE using the GitLab chart --- -# Workload Identity Federation for GKE using the GitLab chart +{{< history >}} -> - [Introduced](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/3434) in GitLab 17.0. +- [Introduced](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/3434) in GitLab 17.0. + +{{< /history >}} The default configuration for external object storage in the charts uses secret keys. [Workload Identity Federation for GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity) diff --git a/chart/doc/advanced/external-object-storage/minio.md b/chart/doc/advanced/external-object-storage/minio.md index 6638a9f1a60cc478dc315bf3f92b7f0bf11d9f36..596f46240971795558b621de8da8520e431854c1 100644 --- a/chart/doc/advanced/external-object-storage/minio.md +++ b/chart/doc/advanced/external-object-storage/minio.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure MinIO with the GitLab chart --- -# Configure MinIO with the GitLab chart - [MinIO](https://min.io/) is an object storage server that exposes S3-compatible APIs. MinIO can be deployed to several different platforms. To launch a new MinIO instance, diff --git a/chart/doc/advanced/external-redis/_index.md b/chart/doc/advanced/external-redis/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..090503caf5ee78a28d5555ad999944a60e8817ee --- /dev/null +++ b/chart/doc/advanced/external-redis/_index.md @@ -0,0 +1,147 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with an external Redis +--- + +This document intends to provide documentation on how to configure this Helm chart with an external Redis service. + +If you don't have Redis configured, for on-premise or deployment to VM, +consider using our [Linux package](external-omnibus-redis.md). + +For details about currently supported Redis versions, see [Installation system requirements](https://docs.gitlab.com/install/requirements/#redis). + +## Configure the chart + +Disable the `redis` chart and the Redis service it provides, and point the other services to the external service. + +You must set the following parameters: + +- `redis.install`: Set to `false` to disable including the Redis chart. +- `global.redis.host`: Set to the hostname of the external Redis, can be a domain or an IP address. +- `global.redis.auth.enabled`: Set to `false` if the external Redis does not require a password. +- `global.redis.auth.secret`: The name of the [secret which contains the token for authentication](../../installation/secrets.md#redis-password). +- `global.redis.auth.key`: The key in the secret, which contains the token content. + +Items below can be further customized if you are not using the defaults: + +- `global.redis.port`: The port the database is available on, defaults to `6379`. +- `global.redis.database`: The database to connect to on the Redis server, defaults to `0`. + +For example, pass these values via Helm's `--set` flag while deploying: + +```shell +helm install gitlab gitlab/gitlab \ + --set redis.install=false \ + --set global.redis.host=redis.example \ + --set global.redis.auth.secret=gitlab-redis \ + --set global.redis.auth.key=redis-password \ +``` + +If you are connecting to a Redis HA cluster that has Sentinel servers +running, the `global.redis.host` attribute needs to be set to the name of +the Redis instance group (such as `mymaster` or `resque`), as +specified in the `sentinel.conf`, but not to the hostname of the Redis master. +Sentinel servers can be referenced +using the `global.redis.sentinels[0].host` and `global.redis.sentinels[0].port` +values for the `--set` flag. The index is zero based. + +## Use multiple Redis instances + +GitLab supports splitting several of the resource intensive +Redis operations across multiple Redis instances. This chart supports distributing +those persistence classes to other Redis instances. + +More detailed information on configuring the chart for using multiple Redis +instances can be found in the [globals](../../charts/globals.md#multiple-redis-support) +documentation. + +## Specify secure Redis scheme (SSL) + +To connect to Redis using SSL, use the `rediss` (note the double `s`) scheme parameter: + +```shell +--set global.redis.scheme=rediss +``` + +## `redis.yml` override + +If you want to override the contents of the [`redis.yml` config file introduced in GitLab 15.8](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/106854) +you can do so by defining values under +`global.redis.redisYmlOverride`. All values and sub-values under that +key will be rendered into `redis.yml` as-is. + +The `global.redis.redisYmlOverride` setting is intended for use with +external Redis services. You must set `redis.install` to `false`. See +[configure Redis settings](../../charts/globals.md#configure-redis-settings) +for further details. + +Example: + +```yaml +redis: + install: false +global: + redis: + redisYmlOverride: + raredis: + host: rare-redis.example.com:6379 + password: + enabled: true + secret: secretname + key: password + exotic_redis: + host: redis.example.com:6379 + password: <%= File.read('/path/to/secret').strip.to_json %> + mystery_setting: + deeply: + nested: value +``` + +Assuming `/path/to/secret` contains `THE SECRET` and `/path/to/secret/raredis-override-password` contains `RARE SECRET`, his will cause the +following to be rendered in `redis.yml`: + +```yaml +production: + raredis: + host: rare-redis.example.com:6379 + password: "RARE SECRET" + exotic_redis: + host: redis.example.com:6379 + password: "THE SECRET" + mystery_setting: + deeply: + nested: value +``` + +### Things to look out for + +The flip side of the flexibility of `redisYmlOverride` is that it is less user friendly. For example: + +1. To insert passwords into `redis.yml` you may either: + - Use the existing [password definition](../../charts/globals.md#multiple-redis-support) + and let Helm replace it with an ERB statement. + - Write correct ERB `<%= File.read('/path/to/secret').strip.to_json %>` statements yourself, + using whatever path the secret is mounted in the container at. +1. In `redisYmlOverride` you must follow the naming conventions of + GitLab Rails. For example, the "SharedState" instance is not called + `sharedState` but `shared_state`. +1. There is no inheritance of configuration values. For example, if + you have three Redis instances that share a single set of Sentinels, + you have to repeat the Sentinel configuration three times. +1. The CNG images [expect a valid `resque.yml` and `cable.yml`](https://gitlab.com/gitlab-org/build/CNG/-/blob/4d314e505edb25ccefd4297d212bfbbb5bc562f9/gitlab-rails/scripts/lib/checks/redis.rb#L54) + so you still need to configure at least `global.redis.host` to get a `resque.yml` file. + +## Troubleshooting + +<!-- markdownlint-disable line-length --> + +### `ERR Error running script (call to f_5962bd591b624c0e0afce6631ff54e7e4402ebd8): @user_script:7: ERR syntax error` + +You might see this error in the logs of `webservice` and `sidekiq` pods if you use external Redis 5 with Helm chart 7.2 or later. Redis 5 +[is not supported](https://docs.gitlab.com/install/requirements/#redis). + +To fix it, upgrade your external Redis instance to 6.x or later. + +<!-- markdownlint-enable line-length --> diff --git a/chart/doc/advanced/external-redis/external-omnibus-redis.md b/chart/doc/advanced/external-redis/external-omnibus-redis.md index 8cffae7b9a0d7692711bdf0a547d12d09fd18ec5..64979bc3a82d22f8172b8f077658c2132246e13d 100644 --- a/chart/doc/advanced/external-redis/external-omnibus-redis.md +++ b/chart/doc/advanced/external-redis/external-omnibus-redis.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Setup standalone Redis --- -# Setup standalone Redis - The instructions here make use of the [Linux package](https://about.gitlab.com/install/#ubuntu) for Ubuntu. This package provides versions of the services that are guaranteed to be compatible with the charts' services. @@ -24,8 +23,11 @@ Follow the installation instructions for the [Linux package](https://about.gitla Create a minimal `gitlab.rb` file to be placed at `/etc/gitlab/gitlab.rb`. Be _very_ explicit about what is enabled on this node, use the contents below. -NOTE: -This example is not intended to provide [Redis for scaling](https://docs.gitlab.com/ee/administration/redis/index.html). +{{< alert type="note" >}} + +This example is not intended to provide [Redis for scaling](https://docs.gitlab.com/administration/redis/). + +{{< /alert >}} - `REDIS_PASSWORD` should be replaced with the value in the [`gitlab-redis` secret](../../installation/secrets.md#redis-password). diff --git a/chart/doc/advanced/fips/_index.md b/chart/doc/advanced/fips/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..a81644ff4196702d93810fad4c780b2540f1d9e3 --- /dev/null +++ b/chart/doc/advanced/fips/_index.md @@ -0,0 +1,22 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with FIPS-compliant images +--- + +GitLab offers [FIPS-compliant](https://docs.gitlab.com/development/fips_compliance/) +versions of its images, allowing you to run GitLab on FIPS-enabled clusters. + +These images are based upon [Red Hat Universal Base Images](https://access.redhat.com/articles/4238681). +To function in fully-compliant FIPS mode, it is expected that all hosts are configured for FIPS mode. + +## Sample values + +We provide an example for GitLab chart values in +[`examples/fips/values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/fips/values.yaml) +which can help you to build a FIPS-compatible GitLab deployment. + +Note the comment under the `nginx-ingress.controller` key that provides the +relevant configuration to use a FIPS-compatible NGINX Ingress Controller image. This image is +maintained in our [NGINX Ingress Controller fork](https://gitlab.com/gitlab-org/cloud-native/charts/gitlab-ingress-nginx). diff --git a/chart/doc/advanced/geo/_index.md b/chart/doc/advanced/geo/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..389eff874a569d7581f476f255402484f6b2cc6f --- /dev/null +++ b/chart/doc/advanced/geo/_index.md @@ -0,0 +1,787 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with GitLab Geo +--- + +GitLab Geo provides the ability to have geographically distributed application +deployments. + +While external database services can be used, these documents focus on +the use of the [Linux package](https://docs.gitlab.com/omnibus/) for PostgreSQL to provide the +most platform agnostic guide, and make use of the automation included in `gitlab-ctl`. + +In this guide, both clusters have the same external URL. This feature is supported by the chart +since version 7.3. See [Set up a Unified URL for Geo sites](https://docs.gitlab.com/administration/geo/secondary_proxy/#set-up-a-unified-url-for-geo-sites). You can optionally [configure a separate URL for the secondary site](#configure-a-separate-url-for-the-secondary-site-optional). + +For known issues, see the [Geo documentation](https://docs.gitlab.com/administration/geo/#known-issues). + +{{< alert type="note" >}} + +See the [defined terms](https://docs.gitlab.com/administration/geo/glossary/) +to describe all aspects of Geo (mainly the distinction between `site` and `node`). + +{{< /alert >}} + +## Requirements + +To use GitLab Geo with the GitLab Helm chart, the following requirements must be met: + +- The use of [external PostgreSQL](../external-db/_index.md) services, as the + PostgresSQL included with the chart is not exposed to outside networks, and doesn't + have WAL support required for replication. +- The supplied database must: + - Support replication. + - The primary database must be reachable by the primary site, + and all secondary database nodes (for replication). + - Secondary databases only need to be reachable by the secondary sites. + - Support SSL between primary and secondary database nodes. +- The primary site must be reachable via HTTP(S) by all secondary sites. + Secondary sites must be accessible to the primary site via HTTP(S). +- See [requirements for running Geo](https://docs.gitlab.com/administration/geo/#requirements-for-running-geo) for the full list of requirements. + +## Overview + +This guide uses 2 database nodes created by using the Linux package, +configuring only the PostgreSQL services needed, and 2 deployments of the +GitLab Helm chart. It is intended to be the _minimal_ required configuration. +This documentation does not include SSL from application to database, support +for other database providers, or +[promoting a secondary site to primary](https://docs.gitlab.com/administration/geo/disaster_recovery/). + +The outline below should be followed in order: + +1. [Set up Linux package database nodes](#set-up-linux-package-database-nodes) +1. [Set up Kubernetes clusters](#set-up-kubernetes-clusters) +1. [Collect information](#collect-information) +1. [Configure Primary database](#configure-primary-database) +1. [Deploy chart as Geo Primary site](#deploy-chart-as-geo-primary-site) +1. [Set the Geo Primary site](#set-the-geo-primary-site) +1. [Configure Secondary database](#configure-secondary-database) +1. [Copy secrets from the primary site to the secondary site](#copy-secrets-from-the-primary-site-to-the-secondary-site) +1. [Deploy chart as Geo Secondary site](#deploy-chart-as-geo-secondary-site) +1. [Add Secondary Geo site via Primary](#add-secondary-geo-site-via-primary) +1. [Confirm Operational Status](#confirm-operational-status) +1. [Configure a separate URL for the secondary site (Optional)](#configure-a-separate-url-for-the-secondary-site-optional) +1. [Registry](#registry) +1. [Cert-manager and unified URL](#cert-manager-and-unified-url) + +## Set up Linux package database nodes + +For this process, two nodes are required. One is the Primary database node, the +other the Secondary database node. You may use any provider of machine +infrastructure, on-premise or from a cloud provider. + +Bear in mind that communication is required: + +- Between the two database nodes for replication. +- Between each database node and their respective Kubernetes deployments: + - The primary needs to expose TCP port `5432`. + - The secondary needs to expose TCP ports `5432` & `5431`. + +Install an [operating system supported by the Linux package](https://docs.gitlab.com/install/requirements/#operating-systems), and then +[install the Linux package](https://about.gitlab.com/install/) onto it. Do not provide the +`EXTERNAL_URL` environment variable when installing, as we'll provide a minimal +configuration file before reconfiguring the package. + +After you have installed the operating system, and the GitLab package, configuration +can be created for the services that will be used. Before we do that, information +must be collected. + +## Set up Kubernetes clusters + +For this process, two Kubernetes clusters should be used. These can be from any +provider, on-premise or from a cloud provider. + +Bear in mind that communication is required: + +- To the respective database nodes: + - Primary outbound to TCP `5432`. + - Secondary outbound to TCP `5432` and `5431`. +- Between both Kubernetes Ingress via HTTPS. + +Each cluster that is provisioned should have: + +- Enough resources to support a base-line installation of these charts. +- Access to persistent storage: + - MinIO not required if using [external object storage](../external-object-storage/_index.md). + - Gitaly not required if using [external Gitaly](../external-gitaly/_index.md). + - Redis not required if using [external Redis](../external-redis/_index.md). + +## Collect information + +To continue with the configuration, the following information needs to be +collected from the various sources. Collect these, and make notes for use through +the rest of this documentation. + +- Primary database: + - IP address + - hostname (optional) +- Secondary database: + - IP address + - hostname (optional) +- Primary cluster: + - External URL + - Internal URL + - IP addresses of nodes +- Secondary cluster: + - Internal URL + - IP addresses of nodes +- Database Passwords (_must pre-decide the passwords_): + - `gitlab` (used in `postgresql['sql_user_password']`, `global.psql.password`) + - `gitlab_geo` (used in `geo_postgresql['sql_user_password']`, `global.geo.psql.password`) + - `gitlab_replicator` (needed for replication) +- Your GitLab license file + +The Internal URL of each cluster must be unique to the cluster, so that all +clusters can make requests to all other clusters. For example: + +- External URL of all clusters: `https://gitlab.example.com` +- Primary cluster's Internal URL: `https://london.gitlab.example.com` +- Secondary cluster's Internal URL: `https://shanghai.gitlab.example.com` + +This guide does not cover setting up DNS. + +The `gitlab` and `gitlab_geo` database user passwords must exist in two +forms: bare password, and PostgreSQL hashed password. To obtain the hashed form, +perform the following commands on one of the Linux package installation instances, which asks +you to enter and confirm the password before outputting an appropriate hash +value for you to make note of. + +1. `gitlab-ctl pg-password-md5 gitlab` +1. `gitlab-ctl pg-password-md5 gitlab_geo` + +## Configure Primary database + +_This section is performed on the Primary Linux package installation database node._ + +To configure the Primary database node's Linux package installation, work from +this example configuration: + +```ruby +### Geo Primary +external_url 'http://gitlab.example.com' +roles ['geo_primary_role'] +# The unique identifier for the Geo node. +gitlab_rails['geo_node_name'] = 'London Office' +gitlab_rails['auto_migrate'] = false +## turn off everything but the DB +sidekiq['enable']=false +puma['enable']=false +gitlab_workhorse['enable']=false +nginx['enable']=false +geo_logcursor['enable']=false +gitaly['enable']=false +redis['enable']=false +gitlab_kas['enable']=false +prometheus_monitoring['enable'] = false +## Configure the DB for network +postgresql['enable'] = true +postgresql['listen_address'] = '0.0.0.0' +postgresql['sql_user_password'] = 'gitlab_user_password_hash' +# !! CAUTION !! +# This list of CIDR addresses should be customized +# - primary application deployment +# - secondary database node(s) +postgresql['md5_auth_cidr_addresses'] = ['0.0.0.0/0'] +``` + +We must replace several items: + +- `external_url` must be updated to reflect the host name of our Primary site. +- `gitlab_rails['geo_node_name']` must be replaced with a unique name for your + site. See the Name field in + [Common settings](https://docs.gitlab.com/administration/geo_sites/#common-settings). +- `gitlab_user_password_hash` must be replaced with the hashed form of the + `gitlab` password. +- `postgresql['md5_auth_cidr_addresses']` can be update to be a list of + explicit IP addresses, or address blocks in CIDR notation. + +The `md5_auth_cidr_addresses` should be in the form of +`[ '127.0.0.1/24', '10.41.0.0/16']`. It is important to include `127.0.0.1` in +this list, as the automation in the Linux package connects using this. The +addresses in this list should include the IP address (not hostname) of your +Secondary database, and all nodes of your primary Kubernetes cluster. This _can_ +be left as `['0.0.0.0/0']`, however _it is not best practice_. + +After the configuration above is prepared: + +1. Place the content into `/etc/gitlab/gitlab.rb` +1. Run `gitlab-ctl reconfigure`. If you experience any issues in regards to the + service not listening on TCP, try directly restarting it with + `gitlab-ctl restart postgresql`. +1. Run `gitlab-ctl set-replication-password` to set the password for + the `gitlab_replicator` user. +1. Retrieve the Primary database node's public certificate, this is needed + for the Secondary database to be able to replicate (save this output): + + ```shell + cat ~gitlab-psql/data/server.crt + ``` + +## Deploy chart as Geo Primary site + +_This section is performed on the Primary site's Kubernetes cluster._ + +To deploy this chart as a Geo Primary, start [from this example configuration](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/geo/primary.yaml): + +1. Create a secret containing the database password for the + chart to consume. Replace `PASSWORD` below with the password for the `gitlab` + database user: + + ```shell + kubectl --namespace gitlab create secret generic geo --from-literal=postgresql-password=PASSWORD + ``` + +1. Create a `primary.yaml` file based on the [example configuration](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/geo/primary.yaml) + and update the configuration to reflect the correct values: + + ```yaml + ### Geo Primary + global: + # See docs.gitlab.com/charts/charts/globals + # Configure host & domain + hosts: + domain: example.com + # optionally configure a static IP for the default LoadBalancer + # externalIP: + # optionally configure a static IP for the Geo LoadBalancer + # externalGeoIP: + # configure DB connection + psql: + host: geo-1.db.example.com + port: 5432 + password: + secret: geo + key: postgresql-password + # configure geo (primary) + geo: + nodeName: London Office + enabled: true + role: primary + # configure Geo Nginx Controller for internal Geo site traffic + nginx-ingress-geo: + enabled: true + gitlab: + webservice: + # Use the Geo NGINX controller. + ingress: + useGeoClass: true + # Configure an Ingress for internal Geo traffic + extraIngress: + enabled: true + hostname: gitlab.london.example.com + useGeoClass: true + # External DB, disable + postgresql: + install: false + ``` + + <!-- markdownlint-disable MD044 --> + - [`global.hosts.domain`](../../charts/globals.md#configure-host-settings) + - [`global.psql.host`](../../charts/globals.md#configure-postgresql-settings) + - `global.geo.nodeName` must match + [the Name field of a Geo site in the Admin Area](https://docs.gitlab.com/administration/geo_sites/#common-settings) + - Set [`nginx-ingress-geo.enabled`](../../charts/nginx/_index.md#gitlab-geo) to enable an Ingress controller + for Geo traffic forwarded from secondaries. + - Configure the primary Geo site's [`gitlab.webservice`](../../charts/gitlab/webservice/_index.md#ingress-settings) Ingresses for Geo traffic. + - Also configure any additional settings, such as: + - [Configuring SSL/TLS](../../installation/tools.md#tls-certificates) + - [Using external Redis](../external-redis/_index.md) + - [using external Object Storage](../external-object-storage/_index.md) + <!-- markdownlint-enable MD044 --> + +1. Deploy the chart using this configuration: + + ```shell + helm upgrade --install gitlab-geo gitlab/gitlab --namespace gitlab -f primary.yaml + ``` + + {{< alert type="note" >}} + +This assumes you are using the `gitlab` namespace. If you want to use a different namespace, + you should also replace it in `--namespace gitlab` throughout the rest of this document. + + {{< /alert >}} + +1. Wait for the deployment to complete, and the application to come online. When + the application is reachable, log in. + +1. Sign in to GitLab, and [activate your GitLab subscription](https://docs.gitlab.com/administration/license/). + + {{< alert type="note" >}} + +**This step is required for Geo to function.** + + {{< /alert >}} + +## Set the Geo Primary site + +Now that the chart has been deployed, and a license uploaded, we can configure +this as the Primary site. We will do this via the Toolbox Pod. + +1. Find the Toolbox Pod + + ```shell + kubectl --namespace gitlab get pods -lapp=toolbox + ``` + +1. Run `gitlab-rake geo:set_primary_node` with `kubectl exec`: + + ```shell + kubectl --namespace gitlab exec -ti gitlab-geo-toolbox-XXX -- gitlab-rake geo:set_primary_node + ``` + +1. Set the primary site's Internal URL with a Rails runner command. Replace `https://primary.gitlab.example.com` with the actual Internal URL: + + ```shell + kubectl --namespace gitlab exec -ti gitlab-geo-toolbox-XXX -- gitlab-rails runner "GeoNode.primary_node.update!(internal_url: 'https://primary.gitlab.example.com')" + ``` + +1. Check the status of Geo configuration: + + ```shell + kubectl --namespace gitlab exec -ti gitlab-geo-toolbox-XXX -- gitlab-rake gitlab:geo:check + ``` + + You should see output similar to below: + + ```plaintext + WARNING: This version of GitLab depends on gitlab-shell 10.2.0, but you're running Unknown. Please update gitlab-shell. + Checking Geo ... + + GitLab Geo is available ... yes + GitLab Geo is enabled ... yes + GitLab Geo secondary database is correctly configured ... not a secondary node + Database replication enabled? ... not a secondary node + Database replication working? ... not a secondary node + GitLab Geo HTTP(S) connectivity ... not a secondary node + HTTP/HTTPS repository cloning is enabled ... yes + Machine clock is synchronized ... Exception: getaddrinfo: Servname not supported for ai_socktype + Git user has default SSH configuration? ... yes + OpenSSH configured to use AuthorizedKeysCommand ... no + Reason: + Cannot find OpenSSH configuration file at: /assets/sshd_config + Try fixing it: + If you are not using our official docker containers, + make sure you have OpenSSH server installed and configured correctly on this system + For more information see: + doc/administration/operations/fast_ssh_key_lookup.md + GitLab configured to disable writing to authorized_keys file ... yes + GitLab configured to store new projects in hashed storage? ... yes + All projects are in hashed storage? ... yes + + Checking Geo ... Finished + ``` + + - Don't worry about `Exception: getaddrinfo: Servname not supported for ai_socktype`, as Kubernetes containers don't have access to the host clock. _This is OK_. + - `OpenSSH configured to use AuthorizedKeysCommand ... no` _is expected_. This + Rake task is checking for a local SSH server, which is actually present in the + `gitlab-shell` chart, deployed elsewhere, and already configured appropriately. + +## Configure Secondary database + +_This section is performed on the Secondary Linux package installation database node._ + +To configure the Secondary database node's Linux package installation, work from +this example configuration: + +```ruby +### Geo Secondary +# external_url must match the Primary cluster's external_url +external_url 'http://gitlab.example.com' +roles ['geo_secondary_role'] +gitlab_rails['enable'] = true +# The unique identifier for the Geo node. +gitlab_rails['geo_node_name'] = 'Shanghai Office' +gitlab_rails['auto_migrate'] = false +geo_secondary['auto_migrate'] = false +## turn off everything but the DB +sidekiq['enable']=false +puma['enable']=false +gitlab_workhorse['enable']=false +nginx['enable']=false +geo_logcursor['enable']=false +gitaly['enable']=false +redis['enable']=false +prometheus_monitoring['enable'] = false +gitlab_kas['enable']=false +## Configure the DBs for network +postgresql['enable'] = true +postgresql['listen_address'] = '0.0.0.0' +postgresql['sql_user_password'] = 'gitlab_user_password_hash' +# !! CAUTION !! +# This list of CIDR addresses should be customized +# - secondary application deployment +# - secondary database node(s) +postgresql['md5_auth_cidr_addresses'] = ['0.0.0.0/0'] +geo_postgresql['listen_address'] = '0.0.0.0' +geo_postgresql['sql_user_password'] = 'gitlab_geo_user_password_hash' +# !! CAUTION !! +# This list of CIDR addresses should be customized +# - secondary application deployment +# - secondary database node(s) +geo_postgresql['md5_auth_cidr_addresses'] = ['0.0.0.0/0'] +gitlab_rails['db_password']='gitlab_user_password' +``` + +We must replace several items: + +- `gitlab_rails['geo_node_name']` must be replaced with a unique name for your site. See the Name field in + [Common settings](https://docs.gitlab.com/administration/geo_sites/#common-settings). +- `gitlab_user_password_hash` must be replaced with the hashed form of the + `gitlab` password. +- `postgresql['md5_auth_cidr_addresses']` should be updated to be a list of + explicit IP addresses, or address blocks in CIDR notation. +- `gitlab_geo_user_password_hash` must be replaced with the hashed form of the + `gitlab_geo` password. +- `geo_postgresql['md5_auth_cidr_addresses']` should be updated to be a list of + explicit IP addresses, or address blocks in CIDR notation. +- `gitlab_user_password` must be updated, and is used here to allow the Linux package + to automate the PostgreSQL configuration. + +The `md5_auth_cidr_addresses` should be in the form of +`[ '127.0.0.1/24', '10.41.0.0/16']`. It is important to include `127.0.0.1` in +this list, as the automation in the Linux package connects using this. The +addresses in this list should include the IP addresses of all nodes of your +Secondary Kubernetes cluster. This _can_ be left as `['0.0.0.0/0']`, however +_it is not best practice_. + +After configuration above is prepared: + +1. Check TCP connectivity to the **primary** site's PostgreSQL node: + + ```shell + openssl s_client -connect <primary_node_ip>:5432 </dev/null + ``` + + The output should show the following: + + ```plaintext + CONNECTED(00000003) + write:errno=0 + ``` + + {{< alert type="note" >}} + +If this step fails, you may be using the wrong IP address, or a firewall may + be preventing access to the server. Check the IP address, paying close + attention to the difference between public and private addresses and ensure + that, if a firewall is present, the **secondary** PostgreSQL node is + permitted to connect to the **primary** PostgreSQL node on TCP port 5432. + + {{< /alert >}} + +1. Place the content into `/etc/gitlab/gitlab.rb` +1. Run `gitlab-ctl reconfigure`. If you experience any issues in regards to the + service not listening on TCP, try directly restarting it with + `gitlab-ctl restart postgresql`. +1. Place the Primary PostgreSQL node's certificate content from above into `primary.crt` +1. Set up PostgreSQL TLS verification on the **secondary** PostgreSQL node: + + Install the `primary.crt` file: + + ```shell + install \ + -D \ + -o gitlab-psql \ + -g gitlab-psql \ + -m 0400 \ + -T primary.crt ~gitlab-psql/.postgresql/root.crt + ``` + + PostgreSQL will now only recognize that exact certificate when verifying TLS + connections. The certificate can only be replicated by someone with access + to the private key, which is **only** present on the **primary** PostgreSQL + node. + +1. Test that the `gitlab-psql` user can connect to the **primary** site's PostgreSQL + (the default Linux package database name is `gitlabhq_production`): + + ```shell + sudo \ + -u gitlab-psql /opt/gitlab/embedded/bin/psql \ + --list \ + -U gitlab_replicator \ + -d "dbname=gitlabhq_production sslmode=verify-ca" \ + -W \ + -h <primary_database_node_ip> + ``` + + When prompted enter the password collected earlier for the + `gitlab_replicator` user. If all worked correctly, you should see + the list of **primary** PostgreSQL node's databases. + + A failure to connect here indicates that the TLS configuration is incorrect. + Ensure that the contents of `~gitlab-psql/data/server.crt` on the + **primary** PostgreSQL node + match the contents of `~gitlab-psql/.postgresql/root.crt` on the + **secondary** PostgreSQL node. + +1. Replicate the databases. Replace `PRIMARY_DATABASE_HOST` with the IP or hostname +of your Primary PostgreSQL node: + + ```shell + gitlab-ctl replicate-geo-database --slot-name=geo_2 --host=PRIMARY_DATABASE_HOST --sslmode=verify-ca + ``` + +1. After replication has finished, we must reconfigure the Linux package one last time + to ensure `pg_hba.conf` is correct for the secondary PostgreSQL node: + + ```shell + gitlab-ctl reconfigure + ``` + +## Copy secrets from the primary site to the secondary site + +Now copy a few secrets from the Primary site's Kubernetes deployment to the +Secondary site's Kubernetes deployment: + +- `gitlab-geo-gitlab-shell-host-keys` +- `gitlab-geo-rails-secret` +- `gitlab-geo-registry-secret`, if Registry replication is enabled. + +1. Change your `kubectl` context to that of your Primary. +1. Collect these secrets from the Primary deployment: + + ```shell + kubectl get --namespace gitlab -o yaml secret gitlab-geo-gitlab-shell-host-keys > ssh-host-keys.yaml + kubectl get --namespace gitlab -o yaml secret gitlab-geo-rails-secret > rails-secrets.yaml + kubectl get --namespace gitlab -o yaml secret gitlab-geo-registry-secret > registry-secrets.yaml + ``` + +1. Change your `kubectl` context to that of your Secondary. +1. Apply these secrets: + + ```shell + kubectl --namespace gitlab apply -f ssh-host-keys.yaml + kubectl --namespace gitlab apply -f rails-secrets.yaml + kubectl --namespace gitlab apply -f registry-secrets.yaml + ``` + +Next create a secret containing the database passwords. Replace the +passwords below with the appropriate values: + +```shell +kubectl --namespace gitlab create secret generic geo \ + --from-literal=postgresql-password=gitlab_user_password \ + --from-literal=geo-postgresql-password=gitlab_geo_user_password +``` + +## Deploy chart as Geo Secondary site + +_This section is performed on the Secondary site's Kubernetes cluster._ + +To deploy this chart as a Geo Secondary site, start [from this example configuration](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/geo/secondary.yaml). + +1. Create a `secondary.yaml` file based on the [example configuration](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/geo/secondary.yaml) + and update the configuration to reflect the correct values: + + ```yaml + ## Geo Secondary + global: + # See docs.gitlab.com/charts/charts/globals + # Configure host & domain + hosts: + domain: shanghai.example.com + # use a unified URL (same external URL as the primary site) + gitlab: + name: gitlab.example.com + # configure DB connection + psql: + host: geo-2.db.example.com + port: 5432 + password: + secret: geo + key: postgresql-password + # configure geo (secondary) + geo: + enabled: true + role: secondary + nodeName: Shanghai Office + psql: + host: geo-2.db.example.com + port: 5431 + password: + secret: geo + key: geo-postgresql-password + # Optional for secondary sites: Configure Geo Nginx Controller for internal Geo site traffic. + # nginx-ingress-geo: + # enabled: true + gitlab: + webservice: + # Configure a Ingress for internal Geo traffic + extraIngress: + enabled: true + hostname: shanghai.gitlab.example.com + # External DB, disable + postgresql: + install: false + ``` + + <!-- markdownlint-disable MD044 --> + - [`global.hosts.domain`](../../charts/globals.md#configure-host-settings) + - [`global.psql.host`](../../charts/globals.md#configure-postgresql-settings) + - [`global.geo.psql.host`](../../charts/globals.md#configure-postgresql-settings) + - `global.geo.nodeName` must match + [the Name field of a Geo site in the Admin Area](https://docs.gitlab.com/administration/geo_sites/#common-settings) + - Optionally set `nginx-ingress-geo.enabled` to enable an ingress controller pre-configured for internal Geo traffic. + [This makes it easier to promote the site to a primary.](../../charts/nginx/_index.md#gitlab-geo). + - Configure an extra Ingress for [gitlab.webservice](../../charts/gitlab/webservice/_index.md#ingress-settings) to handle + traffic sent to the secondary site's internal URL. + - Also configure any additional settings, such as: + - [Configuring SSL/TLS](../../installation/tools.md#tls-certificates) + - [Using external Redis](../external-redis/_index.md) + - [using external Object Storage](../external-object-storage/_index.md) + - For external databases, `global.psql.host` is the secondary, read-only replica database, while `global.geo.psql.host` is the Geo tracking database + <!-- markdownlint-enable MD044 --> + +1. Deploy the chart using this configuration: + + ```shell + helm upgrade --install gitlab-geo gitlab/gitlab --namespace gitlab -f secondary.yaml + ``` + +1. Wait for the deployment to complete, and the application to come online. + +## Add Secondary Geo site via Primary + +Now that both databases are configured and applications are deployed, we must tell +the Primary site that the Secondary site exists: + +1. Visit the **primary** site. +1. On the left sidebar, at the bottom, select **Admin Area**. +1. Select **Geo > Add site**. +1. Add the **secondary** site. Use the full GitLab URL for the URL. +1. Enter a Name with the `global.geo.nodeName` of the Secondary site. These values must always match exactly, character for character. +1. Enter Internal URL, for example `https://shanghai.gitlab.example.com`. +1. Optionally, choose which groups or storage shards should be replicated by the + **secondary** site. Leave blank to replicate all. +1. Select **Add node**. + +After the **secondary** site is added to the administration panel, it automatically starts +replicating missing data from the **primary** site. This process is known as "backfill". +Meanwhile, the **primary** site starts to notify each **secondary** site of any changes, so +that the **secondary** site can replicate those changes promptly. + +## Confirm Operational Status + +The final step is to double check the Geo configuration on the secondary site once fully +configured, via the Toolbox Pod. + +1. Find the Toolbox Pod: + + ```shell + kubectl --namespace gitlab get pods -lapp=toolbox + ``` + +1. Attach to the Pod with `kubectl exec`: + + ```shell + kubectl --namespace gitlab exec -ti gitlab-geo-toolbox-XXX -- bash -l + ``` + +1. Check the status of Geo configuration: + + ```shell + gitlab-rake gitlab:geo:check + ``` + + You should see output similar to below: + + ```plaintext + WARNING: This version of GitLab depends on gitlab-shell 10.2.0, but you're running Unknown. Please update gitlab-shell. + Checking Geo ... + + GitLab Geo is available ... yes + GitLab Geo is enabled ... yes + GitLab Geo secondary database is correctly configured ... yes + Database replication enabled? ... yes + Database replication working? ... yes + GitLab Geo HTTP(S) connectivity ... + * Can connect to the primary node ... yes + HTTP/HTTPS repository cloning is enabled ... yes + Machine clock is synchronized ... Exception: getaddrinfo: Servname not supported for ai_socktype + Git user has default SSH configuration? ... yes + OpenSSH configured to use AuthorizedKeysCommand ... no + Reason: + Cannot find OpenSSH configuration file at: /assets/sshd_config + Try fixing it: + If you are not using our official docker containers, + make sure you have OpenSSH server installed and configured correctly on this system + For more information see: + doc/administration/operations/fast_ssh_key_lookup.md + GitLab configured to disable writing to authorized_keys file ... yes + GitLab configured to store new projects in hashed storage? ... yes + All projects are in hashed storage? ... yes + + Checking Geo ... Finished + ``` + + - Don't worry about `Exception: getaddrinfo: Servname not supported for ai_socktype`, + as Kubernetes containers do not have access to the host clock. _This is OK_. + - `OpenSSH configured to use AuthorizedKeysCommand ... no` _is expected_. This + Rake task is checking for a local SSH server, which is actually present in the + `gitlab-shell` chart, deployed elsewhere, and already configured appropriately. + +## Configure a separate URL for the secondary site (Optional) + +A single, unified URL for the primary and secondary site is usually more convenient for users. For example, you can: + +- Place both sites behind a load balancer. +- Route users to the closest site using your cloud provider's DNS features. + +In some cases, you may want to give users control over which site they visit. For this purpose, you can configure the secondary Geo site to use a unique external URL. For example: + +- Primary cluster's External URL: `https://gitlab.example.com` +- Secondary cluster's External URL: `https://shanghai.gitlab.example.com` + +1. Edit `secondary.yaml` and update the secondary cluster's external URL so that the `webservice` chart can process those requests: + + ```yaml + global: + # See docs.gitlab.com/charts/charts/globals + # Configure host & domain + hosts: + domain: example.com + # use a unique external URL for the secondary site + gitlab: + name: shanghai.gitlab.example.com + ``` + +1. Update the secondary site's External URL in GitLab so that it can use the URL wherever it's needed: + - Using the Admin UI: + 1. Visit the **primary** site. + 1. On the left sidebar, at the bottom, select **Admin Area**. + 1. Select **Geo > Sites**. + 1. Select the pencil icon to **Edit the secondary site**. + 1. Edit the External URL, for example `https://shanghai.gitlab.example.com`. + 1. Select **Save changes**. + +1. Redeploy the secondary site's chart: + + ```shell + helm upgrade --install gitlab-geo gitlab/gitlab --namespace gitlab -f secondary.yaml + ``` + +1. Wait for the deployment to complete, and the application to come online. + +## Registry + +To sync the secondary registry with the primary registry you can configure +[registry replication](https://docs.gitlab.com/administration/geo/replication/container_registry/#configure-container-registry-replication) +using a [notification secret](../../charts/registry/_index.md#notification-secret). + +## Cert-manager and unified URL + +Geo's unified URL is often used with geolocation-aware routing (for example, using Amazon Route 53 or Google Cloud DNS), which can +cause problems if the [HTTP01 challenge](https://letsencrypt.org/docs/challenge-types/#http-01-challenge) is used to validate that the +domain name is under your control. + +When you request a certificate for one Geo site, Let's Encrypt must resolve the DNS name to the requesting Geo site. If the DNS resolves +to a different Geo site, the certificate for the unified URL will not be issued or refreshed. + +To reliably create and refresh certificates with cert-manager, either [set the challenge nameserver](https://cert-manager.io/docs/configuration/acme/http01/#setting-nameservers-for-http-01-solver-propagation-checks) +to a server that is known to resolve the unified hostname to the Geo sites IP address or configure +a [DNS01](https://letsencrypt.org/docs/challenge-types/#dns-01-challenge) [Issuer](https://cert-manager.io/docs/configuration/acme/dns01/). diff --git a/chart/doc/advanced/internal-tls/_index.md b/chart/doc/advanced/internal-tls/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..2abc0bb0f5c49a42c50cf12bc9d6e4ac859d7e0c --- /dev/null +++ b/chart/doc/advanced/internal-tls/_index.md @@ -0,0 +1,270 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Use TLS between components of the GitLab chart +--- + +The GitLab charts can use transport-layer security (TLS) between the various +components. This requires you to provide certificates for the services +you want to enable, and configure those services to make use of those +certificates and the certificate authority (CA) that signed them. + +## Preparation + +Each chart has documentation regarding enabling TLS for that service, and the various +settings required to ensure that appropriate configuration. + +### Generating certificates for internal use + +{{< alert type="note" >}} + +GitLab does not purport to provide high-grade PKI infrastructure, or certificate +authorities. + +{{< /alert >}} + +For the purposes of this documentation, we provide a **Proof of Concept** script +below, which makes use of [Cloudflare's CFSSL](https://github.com/cloudflare/cfssl/) +to produce a self-signed Certificate Authority, and a wildcard certificate that can be +used for all services. + +This script will: + +- Generate a CA key pair. +- Sign a certificate meant to service all GitLab component service endpoints. +- Create two Kubernetes Secret objects: + - A secret of type `kuberetes.io/tls` which has the server certificate and key pair. + - A secret of type `Opaque` which **only** contains the public certificate of the CA as `ca.crt` + as need by NGINX Ingress. + +Prerequisites: + +- Bash, or compatible shell. +- `cfssl` is available to your shell, and within `PATH`. +- `kubectl` is available, and configured to point to your Kubernetes cluster + where GitLab will later be installed. + - Be sure to have created the namespace you wish to have these certificates + installed into before operating the script. + +You may copy the content of this script to your computer, and make the resulting +file executable. We suggest `poc-gitlab-internal-tls.sh`. + +```shell +#!/bin/bash +set -e +############# +## make and change into a working directory +pushd $(mktemp -d) + +############# +## setup environment +NAMESPACE=${NAMESPACE:-default} +RELEASE=${RELEASE:-gitlab} +## stop if variable is unset beyond this point +set -u +## known expected patterns for SAN +CERT_SANS="*.${NAMESPACE}.svc,${RELEASE}-metrics.${NAMESPACE}.svc,*.${RELEASE}-gitaly.${NAMESPACE}.svc" + +############# +## generate default CA config +cfssl print-defaults config > ca-config.json +## generate a CA +echo '{"CN":"'${RELEASE}.${NAMESPACE}.internal.ca'","key":{"algo":"ecdsa","size":256}}' | \ + cfssl gencert -initca - | \ + cfssljson -bare ca - +## generate certificate +echo '{"CN":"'${RELEASE}.${NAMESPACE}.internal'","key":{"algo":"ecdsa","size":256}}' | \ + cfssl gencert -config=ca-config.json -ca=ca.pem -ca-key=ca-key.pem -profile www -hostname="${CERT_SANS}" - |\ + cfssljson -bare ${RELEASE}-services + +############# +## load certificates into K8s +kubectl -n ${NAMESPACE} create secret tls ${RELEASE}-internal-tls \ + --cert=${RELEASE}-services.pem \ + --key=${RELEASE}-services-key.pem +kubectl -n ${NAMESPACE} create secret generic ${RELEASE}-internal-tls-ca \ + --from-file=ca.crt=ca.pem +``` + +{{< alert type="note" >}} + +This script _does not_ preserve the CA's private key. It is a Proof-of-Concept +helper, and _is not intended for production use_. + +{{< /alert >}} + +The script expects two environment variables to be set: + +1. `NAMESPACE`: The Kubernetes Namespace you will later install GitLab to. This defaults to `default`, as with `kubectl`. +1. `RELEASE`: The Helm Release name you will later use to install GitLab. This defaults to `gitlab`. + +To operate this script, you may `export` the two variables, or prepend the +script name with their values. + +```shell +export NAMESPACE=testing +export RELEASE=gitlab + +./poc-gitlab-internal-tls.sh +``` + +After the script has run, you will find the two secrets created, and the +temporary working directory contains all certificates and their keys. + +```plaintext +$ pwd +/tmp/tmp.swyMgf9mDs +$ kubectl -n ${NAMESPACE} get secret | grep internal-tls +testing-internal-tls kubernetes.io/tls 2 11s +testing-internal-tls-ca Opaque 1 10s +$ ls -1 +ca-config.json +ca.csr +ca-key.pem +ca.pem +testing-services.csr +testing-services-key.pem +testing-services.pem +``` + +#### Required certificate CN and SANs + +The various GitLab components speak to each other over their Service's DNS names. +The Ingress objects generated by the GitLab chart must provide NGINX the +name to verify, when `tls.verify: true` (which is the default). As a result +of this, each GitLab component should receive a certificate with a SAN including +either their Service's name, or a wildcard acceptable to the Kubernetes Service +DNS entry. + +- `service-name.namespace.svc` +- `*.namespace.svc` + +Failure to ensure these SANs within certificates _will_ result in a non-functional +instance, and logs that can be quite cryptic, refering to "connection failure" +or "SSL verification failed". + +You can make use of `helm template` to retrieve a full list of all +Service object names, if needed. If your GitLab has been deployed without TLS, +you can query Kubernetes for those names: + +`kubectl -n ${NAMESPACE} get service -lrelease=${RELEASE}` + +## Configuration + +Example configurations can be found in [examples/internal-tls](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/internal-tls/). + +For the purposes of this documentation, we have provided `shared-cert-values.yaml` +which configures the GitLab components to consume the certificates generated with +the script above, in [generating certificates for internal use](#generating-certificates-for-internal-use). + +Key items to configure: + +1. Global [Custom Certificate Authorities](../../charts/globals.md#custom-certificate-authorities). +1. Per-component TLS for the service listeners. + (See each chart's documentation, under [charts/](../../charts/_index.md)) + +This process is greatly simplified by making use of YAML's native anchor +functionality. A truncated snippet of `shared-cert-values.yaml` shows this: + +```yaml +.internal-ca: &internal-ca gitlab-internal-tls-ca +.internal-tls: &internal-tls gitlab-internal-tls + +global: + certificates: + customCAs: + - secret: *internal-ca + workhorse: + tls: + enabled: true +gitlab: + webservice: + tls: + secretName: *internal-tls + workhorse: + tls: + verify: true # default + secretName: *internal-tls + caSecretName: *internal-ca +``` + +## Result + +When all components have been configured to provide TLS on their service +listeners, all communication between GitLab components will traverse the +network with TLS security, including connections from NGINX Ingress to +each GitLab component. + +NGINX Ingress will terminate any _inbound_ TLS, determine the appropriate +services to pass the traffic to, and then form a new TLS connection to +the GitLab component. When configured as shown here, it will also _verify_ +the certificates served by the GitLab components against the CA. + +This can be verified by connecting to the Toolbox pod, and querying the +various component Services. One such example, connecting to the Webservice +Pod's primary service port that NGINX Ingress uses: + +```plaintext +$ kubectl -n ${NAMESPACE} get pod -lapp=toolbox,release=${RELEASE} +NAME READY STATUS RESTARTS AGE +gitlab-toolbox-5c447bfdb4-pfmpc 1/1 Running 0 65m +$ kubectl exec -ti gitlab-toolbox-5c447bfdb4-pfmpc -c toolbox -- \ + curl -Iv "https://gitlab-webservice-default.testing.svc:8181" +``` + +The output should be similar to following example: + +```plaintext +* Trying 10.60.0.237:8181... +* Connected to gitlab-webservice-default.testing.svc (10.60.0.237) port 8181 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_128_GCM_SHA256 +* ALPN, server did not agree to a protocol +* Server certificate: +* subject: CN=gitlab.testing.internal +* start date: Jul 18 19:15:00 2022 GMT +* expire date: Jul 18 19:15:00 2023 GMT +* subjectAltName: host "gitlab-webservice-default.testing.svc" matched cert's "*.testing.svc" +* issuer: CN=gitlab.testing.internal.ca +* SSL certificate verify ok. +> HEAD / HTTP/1.1 +> Host: gitlab-webservice-default.testing.svc:8181 +``` + +## Troubleshooting + +If your GitLab instance appears unreachable from the browser, by rendering an +HTTP 503 error, NGINX Ingress is likely having a problem verifying the +certificates of the GitLab components. + +You may work around this by temporarily setting +`gitlab.webservice.workhorse.tls.verify` to `false`. + +The NGINX Ingress controller can be connected to, and will evidence a message +in `nginx.conf`, regarding problems verifying the certificate(s). + +Example content, where the Secret is not reachable: + +```plaintext +# Location denied. Reason: "error obtaining certificate: local SSL certificate + testing/gitlab-internal-tls-ca was not found" +return 503; +``` + +Common problems that cause this: + +- CA certificate is not in a key named `ca.crt` within the Secret. +- The Secret was not properly supplied, or may not exist within the Namespace. diff --git a/chart/doc/advanced/multiple-databases/_index.md b/chart/doc/advanced/multiple-databases/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..926ccf8f851a6b33d6e02bc89a4b3f692cf9b8f1 --- /dev/null +++ b/chart/doc/advanced/multiple-databases/_index.md @@ -0,0 +1,254 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with multiple external databases +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed +- Status: Beta + +{{< /details >}} + +By default, GitLab uses a single application database, referred to as the `main` database. + +To scale GitLab, you can configure GitLab to use multiple application databases. + +## Configure a new installation GitLab chart with multiple external databases + +{{< alert type="warning" >}} + +Because of [known issues](https://docs.gitlab.com/administration/postgresql/multiple_databases/#known-issues), this feature may not be ready for production use. + +{{< /alert >}} + +By default, GitLab uses a single application database, referred to as the `main` database. + +To scale GitLab, you can configure GitLab to use multiple external application databases, +namely `main` and `ci`. The following diagram shows how the pods talk to the multiple databases: + +```mermaid +graph LR + subgraph External Databases + B[(Main Database)] + C[(CI Database)] + end + subgraph Kubernetes Cluster + A[GitLab Pod 1] --> B + A --> C + D[GitLab Pod 2] --> B + D --> C + end +``` + +Prerequisites: + +- All prerequisites listed in the [external database document](../external-db/_index.md). +- An additional empty `gitlabhq_production_ci` database, which can be running on the same PostgreSQL + server as the `gitlabhq_production` database. +- Access to the Kubernetes cluster using + the [`kubectl`](https://kubernetes.io/docs/reference/kubectl/) and [`helm`](https://helm.sh/docs/intro/install/) + CLI tools. Refer to the [GitLab chart prerequisites](../../installation/tools.md) for more information. + +To set up multiple external databases: + +1. Create the Kubernetes secret that holds the PostgreSQL secrets for the database user `gitlab`. + This password can be different, to support having the multiple databases on two different physical servers + with different passwords. + + Let's choose the name `gitlab-postgresql-password` for this Kubernetes secret: + + ```shell + kubectl create secret generic gitlab-postgresql-password \ + --from-literal=main-gitlab-password=<main-database-password> \ + --from-literal=ci-gitlab-password=<ci-database-password> + ``` + +1. Add the following to your existing YAML file that you use to + [deploy the GitLab chart](../../installation/deployment.md) + (for example `gitlab-values.yaml`), and replace the `host` values with yours: + + ```yaml + global: + psql: + main: + host: main.database.host # set this to the host of your external main database + database: gitlabhq_production + password: + secret: gitlab-postgresql-password + key: main-gitlab-password + ci: + host: ci.database.host # set this to the host of your external ci database. Can be the same as the one for main database + database: gitlabhq_production_ci # difference in database containing CI schema, results in `database_tasks: true` as well + password: + secret: gitlab-postgresql-password + key: ci-gitlab-password + postgresql: + install: false + ``` + + Where: + + - `postgresql.install`: Set to `false` to disable the embedded database, and use the external database instead. + - `global.psql.main.host`: Set to the hostname of the external `main` database, can be a domain or an IP address. + - `global.psql.main.password.secret`: The name of the Kubernetes secret, that was used to hold the PostgreSQL user. + In our example it's `gitlab-postgresql-password`. + - `global.psql.main.password.key`: Within the secret, the key that contains the password. + In our example it's `main-gitlab-password`. + - `global.psql.ci.host`: Set to the hostname of the external `ci` database, can be a domain or an IP address. It can be the + same value as `global.psql.main.host` if both databases `main` and `ci` are on the same database server. + - `global.psql.ci.password.secret`: The name of the Kubernetes secret, that was used to hold the PostgreSQL user. + In our example it's `gitlab-postgresql-password`. + - `global.psql.ci.password.key`: Within the secret, the key that contains the password. + In our example it's `ci-gitlab-password`. + +1. Finally, deploy the GitLab chart using `gitlab-values.yaml`: + + ```shell + helm repo add gitlab https://charts.gitlab.io/ + helm repo update + helm upgrade --install gitlab gitlab/gitlab --timeout=900s -f gitlab-values.yaml + ``` + +## Migrate an existing installation to multiple databases + +You can migrate an existing installation to multiple databases. + +### Preparation + +Before migrating an existing installation to multiple databases: + +1. Take a [backup](../../backup-restore/backup.md) of the database. +1. Locate the toolbox pod: + + ```shell + kubectl get pods -lrelease=RELEASE_NAME,app=toolbox + ``` + +1. Access the database console and provide the database password: + + ```shell + kubectl exec <toolbox pod name> -it -c toolbox -- gitlab-rails dbconsole + ``` + + You can replace this command with a suitable `psql` command from any host that has a direct access to the database server. + +1. Make sure you have enough disk space, plan for the downtime, and create the new `gitlabhq_production_ci` database using SQL commands outlined in the + [multiple databases](https://docs.gitlab.com/administration/postgresql/multiple_databases/#preparation) documentation. Because you've already accessed the + database console, you can omit the `sudo gitlab-psql` command. +1. Exit the database console: + + ```shell + \q + ``` + +1. Exit the toolbox pod: + + ```shell + exit + ``` + +### Shut down GitLab + +To prevent GitLab from writing to the database while switching to multiple databases, you need to shut down +all the services that write to the current GitLab database. + +1. Save the existing replicas for each database accessing deployment: + + ```shell + rm -f /tmp/deployments.txt + deployments=$(kubectl get deployment --selector 'app in (webservice, sidekiq, kas, gitlab-exporter)' --no-headers -o custom-columns=":metadata.name") + for deployment in ${deployments} + do + replicas=$(kubectl get deployment $deployment -o=jsonpath='{.status.replicas}') + echo "$deployment/$replicas" >> /tmp/deployments.txt + done + ``` + +1. Scale down the database accessing deployments: + + ```shell + kubectl scale deployment --replicas 0 --selector 'app in (webservice, sidekiq, kas, gitlab-exporter)' + ``` + +### Migrate the database + +To migrate the database: + +1. Access the Bash shell on the toolbox pod: + + ```shell + kubectl exec <toolbox_pod_name> -it -c toolbox -- bash + ``` + +1. From the toolbox pod, run the migrate command: + + ```shell + gitlab-rake gitlab:db:decomposition:migrate + ``` + + The command should print the following message: `Database migration finished!`. + +1. Exit the toolbox pod: + + ```shell + exit + ``` + +### Switch GitLab to use two databases + +Switch GitLab to use two databases, `main` and `ci`: + +```shell +helm upgrade gitlab gitlab/gitlab \ + --set global.psql.database=null \ + --set global.psql.main.database=gitlabhq_production \ + --set global.psql.ci.database=gitlabhq_production_ci \ + --set gitlab.migrations.enabled=false \ + --set global.extraEnv.GITLAB_ALLOW_SEPARATE_CI_DATABASE=1 \ + --reuse-values +``` + +### Verify that the migration completed + +1. Locate the new toolbox pod: + + ```shell + kubectl get pods -lrelease=RELEASE_NAME,app=toolbox + ``` + +1. Access the bash shell on the toolbox pod: + + ```shell + kubectl exec <toolbox_pod_name> -it -c toolbox -- bash + ``` + +1. Run `cat /srv/gitlab/config/database.yml` to make sure that GitLab configuration contains both `main` and `ci` and that both of them are pointing to two different databases: + `gitlabhq_production` and `gitlabhq_production_ci`. + +### Post-migration and clean up + +Before you start GitLab again, you need to make sure you lock the legacy tables for writes, and clean them up. + +From inside the toolbox pod, run: + +```shell +gitlab-rake gitlab:db:lock_writes +gitlab-rake gitlab:db:truncate_legacy_tables:main +gitlab-rake gitlab:db:truncate_legacy_tables:ci +``` + +### Start GitLab + +To start GitLab, run the following: + +```shell +while read line; do + deployment=$(echo $line|cut -d'/' -f 1) + replicas=$(echo $line|cut -d'/' -f 2) + kubectl patch deployment "${deployment}" -p "{\"spec\": {\"replicas\": ${replicas}}}" +done < /tmp/deployments.txt +``` diff --git a/chart/doc/advanced/persistent-volumes/_index.md b/chart/doc/advanced/persistent-volumes/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..b7624b89b706654710273149358c18756137b3cd --- /dev/null +++ b/chart/doc/advanced/persistent-volumes/_index.md @@ -0,0 +1,469 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with persistent volumes +--- + +Some of the included services require persistent storage, configured through +[Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes) that specify which disks your cluster has access to. +Documentation on the storage configuration necessary to install this chart can be found in our +[Storage Guide](../../installation/storage.md). + +Storage changes after installation need to be manually handled by your cluster +administrators. Automated management of these volumes after installation is not +handled by the GitLab chart. + +Examples of changes not automatically managed after initial installation +include: + +- Mounting different volumes to the Pods +- Changing the effective accessModes or [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/) +- Expanding the storage size of your volume*<sup>1</sup> + +<sup>1</sup> In Kubernetes 1.11, [expanding the storage size of your volume is supported](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) +if you have `allowVolumeExpansion` configured to true in your [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/). + +Automating theses changes is complicated due to: + +1. Kubernetes does not allow changes to most fields in an existing [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) +1. Unless [manually configured](../../installation/storage.md), the [PVC](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) is the only reference to dynamically provisioned [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes) +1. `Delete` is the default [reclaimPolicy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy) for dynamically provisioned [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes) + +This means in order to make changes, we need to delete the [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) +and create a new one with our changes. But due to the default [reclaimPolicy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy), +deleting the [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) may delete the [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes) +and underlying disk. And unless configured with appropriate volumeNames and/or +labelSelectors, the chart doesn't know the volume to attach to. + +We will continue to look into making this process easier, but for now a manual +process needs to be followed to make changes to your storage. + +## Locate the GitLab Volumes + +Find the volumes/claims that are being used: + +```shell +kubectl --namespace <namespace> get PersistentVolumeClaims -l release=<chart release name> -ojsonpath='{range .items[*]}{.spec.volumeName}{"\t"}{.metadata.labels.app}{"\n"}{end}' +``` + +- `<namespace>` should be replaced with the namespace where you installed the GitLab chart. +- `<chart release name>` should be replaced with the name you used to install the GitLab chart. + +The command prints a list of the volume names, followed by the name of the +service they are for. + +For example: + +```shell +$ kubectl --namespace helm-charts-win get PersistentVolumeClaims -l release=review-update-app-h8qogp -ojsonpath='{range .items[*]}{.spec.volumeName}{"\t"}{.metadata.labels.app}{"\n"}{end}' +pvc-6247502b-8c2d-11e8-8267-42010a9a0113 gitaly +pvc-61bbc05e-8c2d-11e8-8267-42010a9a0113 minio +pvc-61bc6069-8c2d-11e8-8267-42010a9a0113 postgresql +pvc-61bcd6d2-8c2d-11e8-8267-42010a9a0113 prometheus +pvc-61bdf136-8c2d-11e8-8267-42010a9a0113 redis +``` + +## Before making storage changes + +The person making the changes needs to have administrator access to the cluster, and appropriate access to the storage +solutions being used. Often the changes will first need to be applied in the storage solution, then the results need to +be updated in Kubernetes. + +Before making changes, you should ensure your [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes) are using +the `Retain` [reclaimPolicy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy) so they don't get removed while you are +making changes. + +First, [find the volumes/claims that are being used](#locate-the-gitlab-volumes). + +Next, edit each volume and change the value of `persistentVolumeReclaimPolicy` +under the `spec` field, to be `Retain` rather than `Delete` + +For example: + +```shell +kubectl --namespace helm-charts-win edit PersistentVolume pvc-6247502b-8c2d-11e8-8267-42010a9a0113 +``` + +Editing Output: + +```yaml +# Please edit the object below. Lines beginning with a '#' will be ignored, +# and an empty file will abort the edit. If an error occurs while saving this file will be +# reopened with the relevant failures. +# +apiVersion: v1 +kind: PersistentVolume +metadata: + annotations: + kubernetes.io/createdby: gce-pd-dynamic-provisioner + pv.kubernetes.io/bound-by-controller: "yes" + pv.kubernetes.io/provisioned-by: kubernetes.io/gce-pd + creationTimestamp: 2018-07-20T14:58:43Z + labels: + failure-domain.beta.kubernetes.io/region: europe-west2 + failure-domain.beta.kubernetes.io/zone: europe-west2-b + name: pvc-6247502b-8c2d-11e8-8267-42010a9a0113 + resourceVersion: "48362431" + selfLink: /api/v1/persistentvolumes/pvc-6247502b-8c2d-11e8-8267-42010a9a0113 + uid: 650bd649-8c2d-11e8-8267-42010a9a0113 +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 50Gi + claimRef: + apiVersion: v1 + kind: PersistentVolumeClaim + name: repo-data-review-update-app-h8qogp-gitaly-0 + namespace: helm-charts-win + resourceVersion: "48362307" + uid: 6247502b-8c2d-11e8-8267-42010a9a0113 + gcePersistentDisk: + fsType: ext4 + pdName: gke-cloud-native-81a17-pvc-6247502b-8c2d-11e8-8267-42010a9a0113 +# Changed the following line + persistentVolumeReclaimPolicy: Retain + storageClassName: standard +status: + phase: Bound +``` + +## Making storage changes + +First, make the desired changes to the disk outside of the cluster. (Resize the +disk in GKE, or create a new disk from a snapshot or clone, etc). + +How you do this, and whether or not it can be done live, without downtime, is +dependent on the storage solutions you are using, and can't be covered by this +document. + +Next, evaluate whether you need these changes to be reflected in the Kubernetes +objects. For example: with expanding the disk storage size, the storage size +settings in the [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) will only be used when a new volume +resource is requested. So you would only need to increase the values in the +[PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) if you intend to scale up more disks (for use in +additional Gitaly pods). + +If you do need to have the changes reflected in Kubernetes, be sure that you've +updated your reclaim policy on the volumes as described in the [Before making storage changes](#before-making-storage-changes) +section. + +The paths we have documented for storage changes are: + +- [Changes to an existing Volume](#changes-to-an-existing-volume) +- [Switching to a different Volume](#switching-to-a-different-volume) + +### Changes to an existing Volume + +First [locate the volume name](#locate-the-gitlab-volumes) you are changing. + +Use `kubectl edit` to make the desired configuration changes to the volume. (These changes +should only be updates to reflect the real state of the attached disk) + +For example: + +```shell +kubectl --namespace helm-charts-win edit PersistentVolume pvc-6247502b-8c2d-11e8-8267-42010a9a0113 +``` + +Editing Output: + +```yaml +# Please edit the object below. Lines beginning with a '#' will be ignored, +# and an empty file will abort the edit. If an error occurs while saving this file will be +# reopened with the relevant failures. +# +apiVersion: v1 +kind: PersistentVolume +metadata: + annotations: + kubernetes.io/createdby: gce-pd-dynamic-provisioner + pv.kubernetes.io/bound-by-controller: "yes" + pv.kubernetes.io/provisioned-by: kubernetes.io/gce-pd + creationTimestamp: 2018-07-20T14:58:43Z + labels: + failure-domain.beta.kubernetes.io/region: europe-west2 + failure-domain.beta.kubernetes.io/zone: europe-west2-b + name: pvc-6247502b-8c2d-11e8-8267-42010a9a0113 + resourceVersion: "48362431" + selfLink: /api/v1/persistentvolumes/pvc-6247502b-8c2d-11e8-8267-42010a9a0113 + uid: 650bd649-8c2d-11e8-8267-42010a9a0113 +spec: + accessModes: + - ReadWriteOnce + capacity: + # Updated the storage size + storage: 100Gi + claimRef: + apiVersion: v1 + kind: PersistentVolumeClaim + name: repo-data-review-update-app-h8qogp-gitaly-0 + namespace: helm-charts-win + resourceVersion: "48362307" + uid: 6247502b-8c2d-11e8-8267-42010a9a0113 + gcePersistentDisk: + fsType: ext4 + pdName: gke-cloud-native-81a17-pvc-6247502b-8c2d-11e8-8267-42010a9a0113 + persistentVolumeReclaimPolicy: Retain + storageClassName: standard +status: + phase: Bound +``` + +Now that the changes have been reflected in the [volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes), we need to update +the [claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). + +Follow the instructions in the [Make changes to the PersistentVolumeClaim](#make-changes-to-the-persistentvolumeclaim) section. + +#### Update the volume to bind to the claim + +In a separate terminal, start watching to see when the [claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) has its status change to bound, +and then move onto the next step to make the volume available for use in the new claim. + +```shell +kubectl --namespace <namespace> get --watch PersistentVolumeClaim <claim name> +``` + +Edit the volume to make it available to the new claim. Remove the `.spec.claimRef` section. + +```shell +kubectl --namespace <namespace> edit PersistentVolume <volume name> +``` + +Editing Output: + +```yaml +# Please edit the object below. Lines beginning with a '#' will be ignored, +# and an empty file will abort the edit. If an error occurs while saving this file will be +# reopened with the relevant failures. +# +apiVersion: v1 +kind: PersistentVolume +metadata: + annotations: + kubernetes.io/createdby: gce-pd-dynamic-provisioner + pv.kubernetes.io/bound-by-controller: "yes" + pv.kubernetes.io/provisioned-by: kubernetes.io/gce-pd + creationTimestamp: 2018-07-20T14:58:43Z + labels: + failure-domain.beta.kubernetes.io/region: europe-west2 + failure-domain.beta.kubernetes.io/zone: europe-west2-b + name: pvc-6247502b-8c2d-11e8-8267-42010a9a0113 + resourceVersion: "48362431" + selfLink: /api/v1/persistentvolumes/pvc-6247502b-8c2d-11e8-8267-42010a9a0113 + uid: 650bd649-8c2d-11e8-8267-42010a9a0113 +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 100Gi + gcePersistentDisk: + fsType: ext4 + pdName: gke-cloud-native-81a17-pvc-6247502b-8c2d-11e8-8267-42010a9a0113 + persistentVolumeReclaimPolicy: Retain + storageClassName: standard +status: + phase: Released +``` + +Shortly after making the change to the [Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes), the terminal watching the claim status should show `Bound`. + +Finally, [apply the changes to the GitLab chart](#apply-the-changes-to-the-gitlab-chart) + +### Switching to a different Volume + +If you want to switch to using a new volume, using a disk that has a copy of the +appropriate data from the old volume, then first you need to create the new +[Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes) in Kubernetes. + +In order to create a Persistent Volume for your disk, you will need to +locate the [driver specific documentation](https://kubernetes.io/docs/concepts/storage/volumes/#types-of-volumes) +for your storage type. You may want to use an existing Persistent Volume of the same [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/) as a starting point: + +```shell +kubectl --namespace <namespace> get PersistentVolume <volume name> -o yaml > <volume name>.bak.yaml +``` + +There are a couple of things to keep in mind when following the driver documentation: + +- You need to use the driver to create a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes), not a Pod object with a volume as shown in a lot of the documentation. +- You do **not** want to create a [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) for the volume, we will be editing the existing claim instead. + +The driver documentation often includes examples for using the driver in a Pod, for example: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-pd +spec: + containers: + - image: registry.k8s.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + # This GCE PD must already exist. + gcePersistentDisk: + pdName: my-data-disk + fsType: ext4 +``` + +What you actually want, is to create a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes), like so: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: test-volume +spec: + capacity: + storage: 400Gi + accessModes: + - ReadWriteOnce + gcePersistentDisk: + pdName: my-data-disk + fsType: ext4 +``` + +You normally create a local `yaml` file with the [PersistentVolume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes) information, +then issue a create command to Kubernetes to create the object using the file. + +```shell +kubectl --namespace <your namespace> create -f <local-pv-file>.yaml +``` + +Once your volume is created, you can move on to [Making changes to the PersistentVolumeClaim](#make-changes-to-the-persistentvolumeclaim) + +## Make changes to the PersistentVolumeClaim + +Find the [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) you want to change. + +```shell +kubectl --namespace <namespace> get PersistentVolumeClaims -l release=<chart release name> -ojsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.app}{"\n"}{end}' +``` + +- `<namespace>` should be replaced with the namespace where you installed the GitLab chart. +- `<chart release name>` should be replaced with the name you used to install the GitLab chart. + +The command will print a list of the PersistentVolumeClaim names, followed by the name of the +service they are for. + +Then save a copy of the [claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) to your local filesystem: + +```shell +kubectl --namespace <namespace> get PersistentVolumeClaim <claim name> -o yaml > <claim name>.bak.yaml +``` + +Example Output: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + annotations: + pv.kubernetes.io/bind-completed: "yes" + pv.kubernetes.io/bound-by-controller: "yes" + volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/gce-pd + creationTimestamp: 2018-07-20T14:58:38Z + labels: + app: gitaly + release: review-update-app-h8qogp + name: repo-data-review-update-app-h8qogp-gitaly-0 + namespace: helm-charts-win + resourceVersion: "48362433" + selfLink: /api/v1/namespaces/helm-charts-win/persistentvolumeclaims/repo-data-review-update-app-h8qogp-gitaly-0 + uid: 6247502b-8c2d-11e8-8267-42010a9a0113 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard + volumeName: pvc-6247502b-8c2d-11e8-8267-42010a9a0113 +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 50Gi + phase: Bound +``` + +Create a new YAML file for a new PVC object. Have it use the same `metadata.name`, `metadata.labels`, `metadata.namespace`, and `spec` fields (with your updates applied) and drop the other settings: + +Example: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app: gitaly + release: review-update-app-h8qogp + name: repo-data-review-update-app-h8qogp-gitaly-0 + namespace: helm-charts-win +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + # This is our updated field + storage: 100Gi + storageClassName: standard + volumeName: pvc-6247502b-8c2d-11e8-8267-42010a9a0113 +``` + +Now delete the old [claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims): + +```shell +kubectl --namespace <namespace> delete PersistentVolumeClaim <claim name> +``` + +You may need to clear `finalizers` to allow deletion to finish: + +```shell +kubectl --namespace <namespace> patch PersistentVolumeClaim <claim name> -p '{"metadata":{"finalizers":null}}' +``` + +Create the new claim: + +```shell +kubectl --namespace <namespace> create -f <new claim yaml file> +``` + +If you are binding to the same [PersistentVolume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes) that was previous bound to +the claim, then proceed to [update the volume to bind to the claim](#update-the-volume-to-bind-to-the-claim) + +Otherwise, if you have bound the claim to a new volume, move onto [apply the changes to the GitLab chart](#apply-the-changes-to-the-gitlab-chart) + +## Apply the changes to the GitLab chart + +After making changes to the [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes) and [PersistentVolumeClaims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims), +you will also want to issue a Helm update with the changes applied to the chart +settings as well. + +See the [installation storage guide](../../installation/storage.md#using-the-custom-storage-class) +for the options. + +> **Note**: If you made changes to the Gitaly [volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims), you will need to delete the +> Gitaly [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) before you will be able to issue a Helm update. This is +> because the StatefulSet's Volume Template is immutable, and cannot be changed. +> +> You can delete the StatefulSet without deleting the Gitaly Pods: +> `kubectl --namespace <namespace> delete --cascade=false StatefulSet <release-name>-gitaly` +> The Helm update command will recreate the StatefulSet, which will adopt and +> update the Gitaly pods. + +Update the chart, and include the updated configuration: + +Example: + +```shell +helm upgrade --install review-update-app-h8qogp gitlab/gitlab \ + --set gitlab.gitaly.persistence.size=100Gi \ + <your other config settings> +``` diff --git a/chart/doc/advanced/ubi/_index.md b/chart/doc/advanced/ubi/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..d2595639ede9e1d154bb162134515ef9025021a9 --- /dev/null +++ b/chart/doc/advanced/ubi/_index.md @@ -0,0 +1,38 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab chart with UBI-based images +--- + +GitLab offers [Red Hat UBI](https://www.redhat.com/en/blog/introducing-red-hat-universal-base-image) +versions of its images, allowing you to replace standard images with UBI-based +images. These images use the same tag as standard images with `-ubi` extension. + +{{< alert type="note" >}} + +The UBI-based images prior to GitLab 17.3 use the `-ubi8` extension. + +{{< /alert >}} + +The GitLab chart uses third-party images that are not based on UBI. These images +are mostly offer external services to GitLab, such as Redis, PostgreSQL, and so on. +If you wish to deploy a GitLab instance that purely based on UBI you must +disable the internal services, and use external deployments or services. + +The services that must be disabled and provided externally are: + +- PostgreSQL +- MinIO (Object Store) +- Redis + +The services must be disabled are: + +- CertManager (Let's Encrypt integration) +- Prometheus +- GitLab Runner + +## Sample values + +We provide an example for GitLab chart values in [`examples/ubi/values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/ubi/values.yaml) +which can help you to build a pure UBI GitLab deployment. diff --git a/chart/doc/architecture/_index.md b/chart/doc/architecture/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..2800f7616d7292d92c2fe8f1a10c7f22076b2564 --- /dev/null +++ b/chart/doc/architecture/_index.md @@ -0,0 +1,13 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Architecture of Cloud native GitLab Helm charts +--- + +Documentation Organization: + +- [Goals](goals.md) +- [Architecture](architecture.md) +- [Design Decisions](decisions.md) +- [Resource Usage](resource-usage.md) diff --git a/chart/doc/architecture/architecture.md b/chart/doc/architecture/architecture.md index 67aa2a06d6bb0bf360cf2d2c27047a432ffc4e1f..c9a5b583b3e34b89917d4fce4ab7e2da187d5b13 100644 --- a/chart/doc/architecture/architecture.md +++ b/chart/doc/architecture/architecture.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Architecture --- -# Architecture - We plan to support three tiers of components: 1. Docker Containers @@ -83,22 +82,22 @@ documented individually, and laid in a structure that matches the [charts](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts) directory structure. Non-GitLab components are packaged and documented on the top level. GitLab -component services are documented under the [GitLab](../charts/gitlab/index.md) chart: - -- [NGINX](../charts/nginx/index.md) -- [MinIO](../charts/minio/index.md) -- [Registry](../charts/registry/index.md) -- GitLab/[Gitaly](../charts/gitlab/gitaly/index.md) -- GitLab/[GitLab Exporter](../charts/gitlab/gitlab-exporter/index.md) -- GitLab/[GitLab Shell](../charts/gitlab/gitlab-shell/index.md) -- GitLab/[Migrations](../charts/gitlab/migrations/index.md) -- GitLab/[Sidekiq](../charts/gitlab/sidekiq/index.md) -- GitLab/[Webservice](../charts/gitlab/webservice/index.md) +component services are documented under the [GitLab](../charts/gitlab/_index.md) chart: + +- [NGINX](../charts/nginx/_index.md) +- [MinIO](../charts/minio/_index.md) +- [Registry](../charts/registry/_index.md) +- GitLab/[Gitaly](../charts/gitlab/gitaly/_index.md) +- GitLab/[GitLab Exporter](../charts/gitlab/gitlab-exporter/_index.md) +- GitLab/[GitLab Shell](../charts/gitlab/gitlab-shell/_index.md) +- GitLab/[Migrations](../charts/gitlab/migrations/_index.md) +- GitLab/[Sidekiq](../charts/gitlab/sidekiq/_index.md) +- GitLab/[Webservice](../charts/gitlab/webservice/_index.md) ### Components list A list of which components are deployed when using the chart, and configuration instructions if needed, -is available on the [architecture components list](https://docs.gitlab.com/ee/development/architecture.html#component-list) page. +is available on the [architecture components list](https://docs.gitlab.com/development/architecture/#component-list) page. ## Design Decisions diff --git a/chart/doc/architecture/backup-restore.md b/chart/doc/architecture/backup-restore.md index 9d626d55d23e81644e78037b4f584dbf76091cc1..c15feefbe88401aa89bf56ccd0df6f7b8e438cfe 100644 --- a/chart/doc/architecture/backup-restore.md +++ b/chart/doc/architecture/backup-restore.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Backup and restore --- -# Backup and restore - This document explains the technical implementation of the backup and restore into/from CNG. ## Toolbox pod @@ -45,7 +44,7 @@ Backups are made using the following steps, in order: - `--skip <component>` - You can skip parts of the backup process by using `--skip <component>` for every component that you want to skip in the backup process. Skippable components are found in [Excluding specific data from the backup](https://docs.gitlab.com/ee/administration/backup_restore/backup_gitlab.html#excluding-specific-data-from-the-backup). + You can skip parts of the backup process by using `--skip <component>` for every component that you want to skip in the backup process. Skippable components are found in [Excluding specific data from the backup](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/#excluding-specific-data-from-the-backup). - `-t <timestamp-override-value>` @@ -59,12 +58,15 @@ Backups are made using the following steps, in order: It is also possible to specify the storage class in which the backup is stored using `--storage-class <storage-class-name>`, allowing you to save on backup storage costs. If unspecified, this will use the default of the storage backend. - NOTE: - This storage class name is passed through as-is to the storage class argument of your specified backend. + {{< alert type="note" >}} + +This storage class name is passed through as-is to the storage class argument of your specified backend. + + {{< /alert >}} #### GitLab backup bucket -The default name of the bucket that will be used to store backups is `gitlab-backups`. This is configurable +The default name of the bucket used to store backups is `gitlab-backups`. This is configurable using the `BACKUP_BUCKET_NAME` environment variable. #### Backing up to Google Cloud Storage @@ -92,5 +94,8 @@ After fetching the backup tar the sequence of execution is: - clean up the corresponding bucket - restore the backup content into the corresponding bucket -NOTE: +{{< alert type="note" >}} + If the restore fails, the user will need to revert to previous backup using data in `tmp` directory of the backup bucket. This is currently a manual process. + +{{< /alert >}} diff --git a/chart/doc/architecture/decision-making.md b/chart/doc/architecture/decision-making.md index 5e0e768a6cc1b3ec198ba1715fbc5f0bada89baf..b2bdefdb109fb4638f242a7035ca18b7514c77d5 100644 --- a/chart/doc/architecture/decision-making.md +++ b/chart/doc/architecture/decision-making.md @@ -2,10 +2,9 @@ stage: Enablement group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#designated-technical-writers +title: Decision Making --- -# Decision Making - Changes to this repository are first reviewed using the [merge request workflow](https://handbook.gitlab.com/handbook/engineering/infrastructure/core-platform/systems/distribution/merge_requests/) then merged by project maintainers. Architectural decisions (such as those that would appear on the [architecture](architecture.md) or [decisions](decisions.md) pages) require the review of the project's senior technical leadership. Senior technical leadership are individuals identified by the Engineering Manager of the team responsible for the project, as well as that team's Staff+ leadership as mentioned in the [architecture handbook](https://handbook.gitlab.com/handbook/engineering/architecture/#architecture-as-a-practice-is-everyones-responsibility) and any current working group formed around a goal specific to the project. diff --git a/chart/doc/architecture/decisions.md b/chart/doc/architecture/decisions.md index 33cc86ca98efe520dafffc780a1fa9ccba496b97..f04b57b62d7b3a94295ffbdb5e34039d5fba10b9 100644 --- a/chart/doc/architecture/decisions.md +++ b/chart/doc/architecture/decisions.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Design Decisions --- -# Design Decisions - This documentation collects reasoning and decisions made regarding the design of the Helm charts in this repository. Proposals welcome, see [Decision Making](decision-making.md) for how we apply decisions. @@ -30,7 +29,7 @@ from properties to secrets (in observance of our preference). As a means of preventing a user from accidentally deploying an updated version of these charts which includes a breaking change against a configuration that would not function, we -have chosen to implement [deprecation](../development/index.md#handling-configuration-deprecations) notifications. These are designed to detect +have chosen to implement [deprecation](../development/_index.md#handling-configuration-deprecations) notifications. These are designed to detect properties have been relocated, altered, replaced, or removed entirely, then inform the user of what changes need to be made to the configuration. This may include informing the user to see documentation on how to replace a property with a secret. These notifications @@ -103,7 +102,7 @@ Related issue: ## Forked charts The following charts have been forked or re-created in this repository following -our [guidelines for forking](../development/index.md#guidelines-for-forking) +our [guidelines for forking](../development/_index.md#guidelines-for-forking) ### Redis @@ -118,7 +117,7 @@ which has added optional HA support. ### MinIO -Our [MinIO chart](../charts/minio/index.md) was altered from the upstream [MinIO](https://github.com/helm/charts/tree/master/stable/minio). +Our [MinIO chart](../charts/minio/_index.md) was altered from the upstream [MinIO](https://github.com/helm/charts/tree/master/stable/minio). - Make use of pre-existing Kubernetes secrets instead of creating new ones from properties. - Remove providing the sensitive keys via Environment. @@ -127,14 +126,14 @@ Our [MinIO chart](../charts/minio/index.md) was altered from the upstream [MinIO ### registry -Our [registry chart](../charts/registry/index.md) was altered from the upstream [`docker-registry`](https://github.com/helm/charts/tree/master/stable/docker-registry). +Our [registry chart](../charts/registry/_index.md) was altered from the upstream [`docker-registry`](https://github.com/helm/charts/tree/master/stable/docker-registry). - Enable the use of in-chart MinIO services automatically. - Automatically hook authentication to the GitLab services. ### NGINX Ingress -Our [NGINX Ingress chart](../charts/nginx/index.md) was altered from the upstream [NGINX Ingress](https://github.com/kubernetes/ingress-nginx). +Our [NGINX Ingress chart](../charts/nginx/_index.md) was altered from the upstream [NGINX Ingress](https://github.com/kubernetes/ingress-nginx). - Add feature to allow for the TCP ConfigMap to be external to the chart - Add feature to allow Ingress class to be templated based on release name diff --git a/chart/doc/architecture/goals.md b/chart/doc/architecture/goals.md index 168aefb6548ced3afb02ac0baf15fc63b8ab816c..697faa572a9fc8a37f93038edc7798a09b11e77d 100644 --- a/chart/doc/architecture/goals.md +++ b/chart/doc/architecture/goals.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Goals --- -# Goals - We have a few core goals with this initiative: 1. Easy to scale horizontally diff --git a/chart/doc/architecture/resource-usage.md b/chart/doc/architecture/resource-usage.md index 41ff37721fc60a8192a137e236d4cb238968306b..d92e6f2b64b661152f81be04a45d82f3ef60b457 100644 --- a/chart/doc/architecture/resource-usage.md +++ b/chart/doc/architecture/resource-usage.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Resource usage --- -# Resource usage - ## Resource Requests All of our containers include predefined resource request values. By default we @@ -88,20 +87,20 @@ In future tests we will try to include sustained concurrent load, to better matc - cpu: > `300m` (greater than stress task) - memory: > `20M` (greater than stress task) -Check the [troubleshooting documentation](../troubleshooting/index.md#git-over-ssh-the-remote-end-hung-up-unexpectedly) +Check the [troubleshooting documentation](../troubleshooting/_index.md#git-over-ssh-the-remote-end-hung-up-unexpectedly) for details on what might happen if `gitlab.gitlab-shell.resources.limits.memory` is set too low. ### Webservice Webservice resources were analyzed during testing with the -[10k reference architecture](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html). -Notes can be found in the [Webservice resources documentation](../charts/gitlab/webservice/index.md#resources). +[10k reference architecture](https://docs.gitlab.com/administration/reference_architectures/10k_users/). +Notes can be found in the [Webservice resources documentation](../charts/gitlab/webservice/_index.md#resources). ### Sidekiq Sidekiq resources were analyzed during testing with the -[10k reference architecture](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html). -Notes can be found in the [Sidekiq resources documentation](../charts/gitlab/sidekiq/index.md#resources). +[10k reference architecture](https://docs.gitlab.com/administration/reference_architectures/10k_users/). +Notes can be found in the [Sidekiq resources documentation](../charts/gitlab/sidekiq/_index.md#resources). ### KAS diff --git a/chart/doc/backup-restore/_index.md b/chart/doc/backup-restore/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..8c2e64644deb523e2ea5d2601552617d06d53cff --- /dev/null +++ b/chart/doc/backup-restore/_index.md @@ -0,0 +1,258 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Backup and restore a GitLab instance +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +GitLab Helm chart provides a utility pod from the Toolbox sub-chart that acts as an interface for the purpose of backing up and restoring GitLab instances. It is equipped with a `backup-utility` executable which interacts with other necessary pods for this task. +Technical details for how the utility works can be found in the [architecture documentation](../architecture/backup-restore.md). + +## Prerequisites + +- Backup and Restore procedures described here have only been tested with S3 compatible APIs. Support for other object storage services, like Google Cloud Storage, will be tested in future revisions. + +- During restoration, the backup tarball needs to be extracted to disk. This means the Toolbox pod should have disk of [necessary size available](../charts/gitlab/toolbox/_index.md#restore-considerations). + +- This chart relies on the use of [object storage](#object-storage) for `artifacts`, `uploads`, `packages`, `registry` and `lfs` objects, and does not currently migrate these for you during restore. If you are restoring a backup taken from another instance, you must migrate your existing instance to using object storage before taking the backup. See [issue 646](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/646). + +## Backup and Restoring procedures + +- [Backing up a GitLab installation](backup.md) +- [Restoring a GitLab installation](restore.md) + +## Object storage + +We provide a MinIO instance out of the box when using this charts unless an [external object storage](../advanced/external-object-storage/_index.md) is specified. The Toolbox connects to the included MinIO by default, unless specific settings are given. The Toolbox can also be configured to back up to Amazon S3 or Google Cloud Storage (GCS). + +### Backups to S3 + +The Toolbox uses `s3cmd` by default to connect to object storage unless you [specify another s3 tool to use](../backup-restore/backup.md#specify-s3-tool-to-use). In order to configure connectivity to external object storage `gitlab.toolbox.backups.objectStorage.config.secret` should be specified which points to a Kubernetes secret containing a `.s3cfg` file. `gitlab.toolbox.backups.objectStorage.config.key` should be specified if different from the default of `config`. This points to the key containing the contents of a `.s3cfg` file. + +It should look like this: + +```shell +helm install gitlab gitlab/gitlab \ + --set gitlab.toolbox.backups.objectStorage.config.secret=my-s3cfg \ + --set gitlab.toolbox.backups.objectStorage.config.key=config . +``` + +s3cmd `.s3cfg` file documentation can be found [here](https://s3tools.org/kb/item14.htm) + +In addition, two bucket locations need to be configured, one for storing the backups, and one temporary bucket that is used +when restoring a backup. + +```shell +--set global.appConfig.backups.bucket=gitlab-backup-storage +--set global.appConfig.backups.tmpBucket=gitlab-tmp-storage +``` + +### Backups to Google Cloud Storage (GCS) + +To backup to GCS, you must first set `gitlab.toolbox.backups.objectStorage.backend` to `gcs`. This ensures +that the Toolbox uses the `gsutil` CLI when storing and retrieving +objects. + +In addition, two bucket locations need to be configured, one for storing +the backups, and one temporary bucket that is used when restoring a +backup. + +```shell +--set global.appConfig.backups.bucket=gitlab-backup-storage +--set global.appConfig.backups.tmpBucket=gitlab-tmp-storage +``` + +The backup utility needs access to these buckets. There are two ways to grant access: + +- Specifying credentials in a Kubernetes secret. +- Configuring [Workload Identity Federation for GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity). + +#### GCS credentials + +First, set `gitlab.toolbox.backups.objectStorage.config.gcpProject` to the project ID of the GCP project that contains your storage buckets. + +You must create a Kubernetes secret with the contents of an active service account JSON key where the service account has the `storage.admin` role for the buckets +you will use for backup. Below is an example of using the `gcloud` and `kubectl` to create the secret. + +```shell +export PROJECT_ID=$(gcloud config get-value project) +gcloud iam service-accounts create gitlab-gcs --display-name "Gitlab Cloud Storage" +gcloud projects add-iam-policy-binding --role roles/storage.admin ${PROJECT_ID} --member=serviceAccount:gitlab-gcs@${PROJECT_ID}.iam.gserviceaccount.com +gcloud iam service-accounts keys create --iam-account gitlab-gcs@${PROJECT_ID}.iam.gserviceaccount.com storage.config +kubectl create secret generic storage-config --from-file=config=storage.config +``` + +Configure your Helm chart as follows to use the service account key to authenticate to GCS for backups: + +```shell +helm install gitlab gitlab/gitlab \ + --set gitlab.toolbox.backups.objectStorage.config.secret=storage-config \ + --set gitlab.toolbox.backups.objectStorage.config.key=config \ + --set gitlab.toolbox.backups.objectStorage.config.gcpProject=my-gcp-project-id \ + --set gitlab.toolbox.backups.objectStorage.backend=gcs +``` + +#### Configuring Workload Identity Federation for GKE + +See the [documentation on Workload Identity Federation for GKE using the GitLab chart](../advanced/external-object-storage/gke-workload-identity.md). + +When creating an IAM allow policy that references the Kubernetes ServiceAccount, grant the `roles/storage.objectAdmin` role. + +For backups, ensure that Google's Application Default Credentials are used by making sure that +`gitlab.toolbox.backups.objectStorage.config.secret` and `gitlab.toolbox.backups.objectStorage.config.key` are NOT set. + +### Backups to Azure blob storage + +Azure blob storage can be used to store backups by setting +`gitlab.toolbox.backups.objectStorage.backend` to `azure`. This enables +Toolbox to use the included copy of `azcopy` to transmit and retrieve the +backup files to the Azure blob storage. + +To use Azure blob storage, one will need to create a storage account +in an existing resource group. Create a config secret with your storage +account's name, access key and blob host. + +Create a config file containing the paramters: + +```yaml +# azure-backup-conf.yaml +azure_storage_account_name: <storage account> +azure_storage_access_key: <access key value> +azure_storage_domain: blob.core.windows.net # optional +``` + +The following `kubectl` command can be used to create the Kubernetes Secret: + +```shell +kubectl create secret generic backup-azure-creds \ + --from-file=config=azure-backup-conf.yaml +``` + +Once the Secret has been created, the GitLab Helm chart can be +configured by adding the backup settings to your deployed values or by supplying +the settings on the Helm command line. For example: + +```shell +helm install gitlab gitlab/gitlab \ + --set gitlab.toolbox.backups.objectStorage.config.secret=backup-azure-creds \ + --set gitlab.toolbox.backups.objectStorage.config.key=config \ + --set gitlab.toolbox.backups.objectStorage.backend=azure +``` + +The access key from the Secret is used to generate and refresh shorter-lived shared +access signature (SAS) tokens to access the storage account. + +In addition, two buckets/containers need to be created beforehand, one for storing the +backups, and one temporary bucket that is used when restoring a backup. Add the +bucket names to your values or settings. For example: + +```shell +--set global.appConfig.backups.bucket=gitlab-backup-storage +--set global.appConfig.backups.tmpBucket=gitlab-tmp-storage +``` + +## Troubleshooting + +### Pod eviction issues + +As the backups are assembled locally outside of the object storage target, temporary disk space is needed. The required space might exceed the size of the actual backup archive. +The default configuration will use the Toolbox pod's file system to store the temporary data. If you find pod being evicted due to low resources, you should attach a persistent volume to the pod to hold the temporary data. +On GKE, add the following settings to your Helm command: + +```shell +--set gitlab.toolbox.persistence.enabled=true +``` + +If your backups are being run as part of the included backup cron job, then you will want to enable persistence for the cron job as well: + +```shell +--set gitlab.toolbox.backups.cron.persistence.enabled=true +``` + +For other providers, you may need to create a persistent volume. See our [Storage documentation](../installation/storage.md) for possible examples on how to do this. + +### "Bucket not found" errors + +If you see `Bucket not found` errors during backups, check the +credentials are configured for your bucket. + +The command depends on the cloud service provider: + +- For AWS S3, the credentials are stored on the toolbox pod in `~/.s3cfg`. Run: + + ```shell + s3cmd ls + ``` + +- For GCP GCS, run: + + ```shell + gsutil ls + ``` + +You should see a list of available buckets. + +### "AccessDeniedException: 403" errors in GCP + +An error like `[Error] AccessDeniedException: 403 <GCP Account> does not have storage.objects.list access to the Google Cloud Storage bucket.` +usually happens during a backup or restore of a GitLab instance, because of missing permissions. + +The backup and restore operations use all buckets in the environment, so +confirm that all buckets in your environment have been created, and that the GCP account can access (list, read, and write) all buckets: + +1. Find your toolbox pod: + + ```shell + kubectl get pods -lrelease=RELEASE_NAME,app=toolbox + ``` + +1. Get all buckets in the pod's environment. Replace `<toolbox-pod-name>` with your actual toolbox pod name, but leave `"BUCKET_NAME"` as it is: + + ```shell + kubectl describe pod <toolbox-pod-name> | grep "BUCKET_NAME" + ``` + +1. Confirm that you have access to every bucket in the environment: + + ```shell + # List + gsutil ls gs://<bucket-to-validate>/ + + # Read + gsutil cp gs://<bucket-to-validate>/<object-to-get> <save-to-location> + + # Write + gsutil cp -n <local-file> gs://<bucket-to-validate>/ + ``` + +### "ERROR: `/home/git/.s3cfg`: None" error when running `backup-utility` with `--backend s3` + +This error happens when a Kubernetes secret containing a `.s3cfg` file was not specified through the `gitlab.toolbox.backups.objectStorage.config.secret` value. + +To fix this, follow the instructions in [backups to S3](_index.md#backups-to-s3). + +### "PermissionError: File not writable" errors using S3 + +An error like `[Error] WARNING: <file> not writable: Operation not permitted` happens if the toolbox user does not have +permissions to write files that match the stored permissions of the bucket items. + +To prevent this, configure `s3cmd` not to preserve file owner, mode and timestamps by adding the +following flag to your `.s3cfg` file referenced via `gitlab.toolbox.backups.objectStorage.config.secret`. + +```toml +preserve_attrs = False +``` + +### Repositories skipped on restore + +Starting with GitLab 16.6/Chart 7.6 repositories may be skipped on restore if the backup archive has been renamed. +To avoid this, do not rename backup archives and rename backups to their original names (`{backup_id}_gitlab_backup.tar`). + +The original backup ID can be extracted from the repository backup directory structure: `repositories/@hashed/*/*/*/{backup_id}/LATEST` diff --git a/chart/doc/backup-restore/backup.md b/chart/doc/backup-restore/backup.md index 26d3f4dd6734f0f6389934a329e4a5136f88c87d..00fab8b1f4a71780af3206e85a4123efe4b0f0aa 100644 --- a/chart/doc/backup-restore/backup.md +++ b/chart/doc/backup-restore/backup.md @@ -2,19 +2,21 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Backing up a GitLab installation --- -# Backing up a GitLab installation +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} GitLab backups are taken by running the `backup-utility` command in the Toolbox pod provided in the chart. Backups can also be automated by enabling the [Cron based backup](#cron-based-backup) functionality of this chart. Before running the backup for the first time, you should ensure the -[Toolbox is properly configured](../charts/gitlab/toolbox/index.md#configuration) -for access to [object storage](index.md#object-storage). +[Toolbox is properly configured](../charts/gitlab/toolbox/_index.md#configuration) +for access to [object storage](_index.md#object-storage). Follow these steps for backing up a GitLab Helm chart based installation. @@ -32,13 +34,14 @@ Follow these steps for backing up a GitLab Helm chart based installation. kubectl exec <Toolbox pod name> -it -- backup-utility ``` -1. Visit the `gitlab-backups` bucket in the object storage service and ensure a tarball has been added. It will be named in `<timestamp>_gitlab_backup.tar` format. Read what the [backup timestamp](https://docs.gitlab.com/ee/administration/backup_restore/backup_gitlab.html#backup-timestamp) is about. +1. Visit the `gitlab-backups` bucket in the object storage service and ensure a tarball has been added. It will be named in `<timestamp>_gitlab_backup.tar` format. Read what the [backup timestamp](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/#backup-timestamp) is about. 1. This tarball is required for restoration. ## Cron based backup -NOTE: +{{< alert type="note" >}} + The Kubernetes CronJob created by the Helm chart sets the `cluster-autoscaler.kubernetes.io/safe-to-evict: "false"` annotation on the jobTemplate. Some Kubernetes environments, such as @@ -46,6 +49,8 @@ GKE Autopilot, don't allow this annotation to be set and will not create Job Pods for the backup. This annotation can be changed by setting the `gitlab.toolbox.backups.cron.safeToEvict` parameter to `true`, which will allow the Jobs to be created but at the risk of being evicted and corrupting the backup. +{{< /alert >}} + Cron based backups can be enabled in this chart to happen at regular intervals as defined by the [Kubernetes schedule](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule). You need to set the following parameters: @@ -60,7 +65,7 @@ The backup utility can take some extra arguments. ### Skipping components -Skip components by using the `--skip` argument. Valid components names can be found at [Excluding specific data from the backup](https://docs.gitlab.com/ee/administration/backup_restore/backup_gitlab.html#excluding-specific-data-from-the-backup). +Skip components by using the `--skip` argument. Valid components names can be found at [Excluding specific data from the backup](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/#excluding-specific-data-from-the-backup). Each component must have its own `--skip` argument. For example: @@ -119,21 +124,28 @@ gitlab: extraArgs: "--s3tool awscli --aws-s3-endpoint-url <MINIO-INGRESS-URL>" ``` -NOTE: +{{< alert type="note" >}} + The S3 CLI tool `s5cmd` support is under investigation. See [issue 523](https://gitlab.com/gitlab-org/build/CNG/-/issues/523) to track the progress. +{{< /alert >}} + ### Server-side repository backups -> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/438393) in GitLab 17.0. +{{< history >}} + +- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/438393) in GitLab 17.0. + +{{< /history >}} Instead of storing large repository backups in the backup archive, repository backups can be configured so that the Gitaly node that hosts each repository is responsible for creating the backup and streaming it to object storage. This helps reduce the network resources required to create and restore a backup. -See [Create server-side repository backups](https://docs.gitlab.com/ee/administration/backup_restore/backup_gitlab.html#create-server-side-repository-backups). +See [Create server-side repository backups](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/#create-server-side-repository-backups). ### Other arguments @@ -163,5 +175,5 @@ You also need to save a copy of the rails secrets as these are not included in t ## Additional Information -- [GitLab chart Backup/Restore Introduction](index.md) +- [GitLab chart Backup/Restore Introduction](_index.md) - [Restoring a GitLab installation](restore.md) diff --git a/chart/doc/backup-restore/restore.md b/chart/doc/backup-restore/restore.md index 7a26f547b635f78ceb3d3d033bb3fcdf9c1b4028..2917ce9d133596d6ad78b5739782e882d7d49bf8 100644 --- a/chart/doc/backup-restore/restore.md +++ b/chart/doc/backup-restore/restore.md @@ -2,17 +2,19 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Restoring a GitLab installation --- -# Restoring a GitLab installation +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} To obtain a backup tarball of an existing GitLab instance that used other installation methods like the Linux package or GitLab Helm chart, follow the instructions -[given in documentation](https://docs.gitlab.com/ee/administration/backup_restore/backup_gitlab.html). +[given in documentation](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/). If you are restoring a backup taken from another instance, you must migrate your existing instance to using object storage before taking the backup. See [issue 646](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/646). @@ -21,8 +23,8 @@ It is recommended that you restore a backup to the same version of GitLab on whi GitLab backup restores are taken by running the `backup-utility` command on the Toolbox pod provided in the chart. -Before running the restore for the first time, you should ensure the [Toolbox is properly configured](index.md) for -access to [object storage](index.md#object-storage) +Before running the restore for the first time, you should ensure the [Toolbox is properly configured](_index.md) for +access to [object storage](_index.md#object-storage) The backup utility provided by GitLab Helm chart supports restoring a tarball from any of the following locations @@ -103,7 +105,7 @@ The steps for restoring a GitLab installation are kubectl get pods -lrelease=RELEASE_NAME,app=toolbox ``` -1. Get the tarball ready in any of the above locations. Make sure it is named in the `<timestamp>_gitlab_backup.tar` format. Read what the [backup timestamp](https://docs.gitlab.com/ee/administration/backup_restore/backup_gitlab.html#backup-timestamp) is about. +1. Get the tarball ready in any of the above locations. Make sure it is named in the `<timestamp>_gitlab_backup.tar` format. Read what the [backup timestamp](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/#backup-timestamp) is about. 1. Note the current number of replicas for database clients for subsequent restart: @@ -146,20 +148,23 @@ The steps for restoring a GitLab installation are kubectl scale deploy -lapp=prometheus,release=<helm release name> -n <namespace> --replicas=<value> ``` -NOTE: +{{< alert type="note" >}} + During restoration, the backup tarball needs to be extracted to disk. This means the Toolbox pod should have disk of necessary size available. -For more details and configuration please see the [Toolbox documentation](../charts/gitlab/toolbox/index.md#persistence-configuration). +For more details and configuration please see the [Toolbox documentation](../charts/gitlab/toolbox/_index.md#persistence-configuration). + +{{< /alert >}} ### Restore the runner registration token After restoring, the included runner will not be able to register to the instance because it no longer has the correct registration token. -Follow these [troubleshooting steps](../troubleshooting/index.md#included-gitlab-runner-failing-to-register) to get it updated. +Follow these [troubleshooting steps](../troubleshooting/_index.md#included-gitlab-runner-failing-to-register) to get it updated. ## Enable Kubernetes related settings If the restored backup was not from an existing installation of the chart, you will also need to enable some Kubernetes specific features after the restore. Such as -[incremental CI job logging](https://docs.gitlab.com/ee/administration/job_logs.html#new-incremental-logging-architecture). +[incremental CI job logging](https://docs.gitlab.com/administration/job_logs/#new-incremental-logging-architecture). 1. Find your Toolbox pod by executing the following command @@ -200,5 +205,5 @@ The restoration process does not update the `gitlab-initial-root-password` secre ## Additional Information -- [GitLab chart Backup/Restore Introduction](index.md) +- [GitLab chart Backup/Restore Introduction](_index.md) - [Backing up a GitLab installation](backup.md) diff --git a/chart/doc/charts/_index.md b/chart/doc/charts/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..ba7010d7070368c0f49a1422fc3be078342689f9 --- /dev/null +++ b/chart/doc/charts/_index.md @@ -0,0 +1,19 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure the GitLab Helm charts +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The GitLab Helm chart is made up of multiple [subcharts](gitlab/_index.md). + +To configure any of the charts, use [globals](globals.md). + +You can also do [advanced configuration](../advanced/_index.md). diff --git a/chart/doc/charts/certmanager-issuer/_index.md b/chart/doc/charts/certmanager-issuer/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..546949ef7f4059fd519f8266b3f3c8e7b1eb53f8 --- /dev/null +++ b/chart/doc/charts/certmanager-issuer/_index.md @@ -0,0 +1,65 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using certmanager-issuer for CertManager Issuer creation +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +This chart is a helper for [Jetstack's CertManager Helm chart](https://cert-manager.io/docs/installation/helm/). +It automatically provisions an Issuer object, used by CertManager when requesting TLS certificates for +GitLab Ingresses. + +## Configuration + +We describe all the major sections of the configuration below. When configuring +from the parent chart, these values are: + +```yaml +certmanager-issuer: + # Configure an ACME Issuer in cert-manager. Only used if global.ingress.configureCertmanager is true. + server: https://acme-v02.api.letsencrypt.org/directory + + # Provide an email to associate with your TLS certificates + # email: + + rbac: + create: true + + resources: + requests: + cpu: 50m + + # Priority class assigned to pods + priorityClassName: "" + + common: + labels: {} +``` + +## Installation parameters + +This table contains all the possible charts configurations that can be supplied +to the `helm install` command using the `--set` flags: + +| Parameter | Default | Description | +|-----------------------------------------------------|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `server` | `https://acme-v02.api.letsencrypt.org/directory` | Let's Encrypt server for use with the [ACME CertManager Issuer](https://cert-manager.io/docs/configuration/acme/). | +| `email` | | You must provide an email to associate with your TLS certificates. Let's Encrypt uses this address to contact you about expiring certificates, and issues related to your account. | +| `rbac.create` | `true` | When `true`, creates RBAC-related resources to allow for manipulation of CertManager Issuer objects. | +| `resources.requests.cpu` | `50m` | Requested CPU resources for the Issuer creation Job. | +| `common.labels` | | Common labels to apply to the ServiceAccount, Job, ConfigMap, and Issuer. | +| `priorityClassName` | | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods. | +| `containerSecurityContext` | | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which Certmanager is started | +| `containerSecurityContext.runAsUser` | `65534` | User ID under which the container should be started | +| `containerSecurityContext.runAsGroup` | `65534` | Group ID under which the container should be started | +| `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process can gain more privileges than its parent process | +| `containerSecurityContext.runAsNonRoot` | `true` | Controls whether the container runs with a non-root user | +| `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | +| `ttlSecondsAfterFinished` | `1800` | Controls when a finished job becomes eligible for cascading removal. | diff --git a/chart/doc/charts/certmanager-issuer/index.md b/chart/doc/charts/certmanager-issuer/index.md index bf11cf550eae40236081b0c86b8d262b64a7e140..b8889d2935d12f31a63d43a7b6cd9e01f097a1a1 100644 --- a/chart/doc/charts/certmanager-issuer/index.md +++ b/chart/doc/charts/certmanager-issuer/index.md @@ -60,4 +60,4 @@ to the `helm install` command using the `--set` flags: | `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process can gain more privileges than its parent process | | `containerSecurityContext.runAsNonRoot` | `true` | Controls whether the container runs with a non-root user | | `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | -| `ttlSecondsAfterFinished` | `1800` | Controls when a finished job becomes eligible for cascading removal. | +| `ttlSecondsAfterFinished` | `1800` | Controls when a finished job becomes eligible for cascading removal. | diff --git a/chart/doc/charts/gitlab/_index.md b/chart/doc/charts/gitlab/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..c5f50b8cc2beaf350b5e90ed39557ebcf615e333 --- /dev/null +++ b/chart/doc/charts/gitlab/_index.md @@ -0,0 +1,97 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: GitLab Helm subcharts +--- + +The GitLab Helm chart is made up of multiple subcharts, +which provide the core GitLab components: + +- [Gitaly](gitaly/_index.md) +- [GitLab Exporter](gitlab-exporter/_index.md) +- [GitLab Pages](gitlab-pages/_index.md) +- [GitLab Runner](gitlab-runner/_index.md) +- [GitLab Shell](gitlab-shell/_index.md) +- [GitLab agent server (KAS)](kas/_index.md) +- [Mailroom](mailroom/_index.md) +- [Migrations](migrations/_index.md) +- [Praefect](praefect/_index.md) +- [Sidekiq](sidekiq/_index.md) +- [Spamcheck](spamcheck/_index.md) +- [Toolbox](toolbox/_index.md) +- [Webservice](webservice/_index.md) + +The parameters for each subchart must be under the `gitlab` key. For example, +GitLab Shell parameters would be similar to: + +```yaml +gitlab: + gitlab-shell: + ... +``` + +Use these charts for optional dependencies: + +- [MinIO](../minio/_index.md) +- [NGINX](../nginx/_index.md) +- [HAProxy](../haproxy/_index.md) +- [PostgreSQL](https://artifacthub.io/packages/helm/bitnami/postgresql) +- [Redis](https://artifacthub.io/packages/helm/bitnami/redis) +- [Registry](../registry/_index.md) +- [Traefik](../traefik/_index.md) + +Use these charts as optional additions: + +- [Prometheus](https://artifacthub.io/packages/helm/prometheus-community/prometheus) +- [_Unprivileged_](https://docs.gitlab.com/runner/install/kubernetes.html#running-docker-in-docker-containers-with-gitlab-runner) [GitLab Runner](https://docs.gitlab.com/runner/) that uses the Kubernetes executor +- Automatically provisioned SSL from [Let's Encrypt](https://letsencrypt.org/), which uses [Jetstack](https://venafi.com/jetstack-consult/)'s [cert-manager](https://cert-manager.io/docs/) with [certmanager-issuer](../certmanager-issuer/_index.md) + +## GitLab Helm subchart optional parameters + +### affinity + +{{< history >}} + +- [Introduced](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/3770) in GitLab 17.3 (Charts 8.3) for all GitLab Helm subcharts except `webservice` and `sidekiq`. + +{{< /history >}} + +`affinity` is an optional parameter in all GitLab Helm subcharts. When you set it, it takes precedence over the [global `affinity`](../globals.md#affinity) value. +For more information about `affinity`, see [the relevant Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity). + +{{< alert type="note" >}} + +The `webservice` and `sidekiq` Helm charts can only use the [global `affinity`](../globals.md#affinity) value. Follow [issue 25403](https://gitlab.com/gitlab-com/gl-infra/production-engineering/-/issues/25403) to learn when the local `affinity` is implemented for `webservice` and `sidekiq`. + +{{< /alert >}} + +With `affinity`, you can set either or both: + +- `podAntiAffinity` rules to: + - Not schedule pods in the same domain as the pods that match the expression corresponding to the `topology key`. + - Set two modes of `podAntiAffinity` rules: required (`requiredDuringSchedulingIgnoredDuringExecution`) and preferred + (`preferredDuringSchedulingIgnoredDuringExecution`). Using the variable `antiAffinity` in `values.yaml`, set the setting to `soft` so that the preferred mode is + applied or set it to `hard` so that the required mode is applied. +- `nodeAffinity` rules to: + - Schedule pods to nodes that belong to a specific zone or zones. + - Set two modes of `nodeAffinity` rules: required (`requiredDuringSchedulingIgnoredDuringExecution`) and preferred + (`preferredDuringSchedulingIgnoredDuringExecution`). When set to `soft`, the preferred mode is applied. When set to `hard`, the required mode is applied. This + rule is implemented only for the `registry` chart and the `gitlab` chart alongwith all its subcharts except `webservice` and `sidekiq`. + +`nodeAffinity` only implements the [`In` operator](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#operators). + +The following example sets `affinity`, with both `nodeAffinity` and `antiAffinity` set to `hard`: + +```yaml +nodeAffinity: "hard" +antiAffinity: "hard" +affinity: + nodeAffinity: + key: "test.com/zone" + values: + - us-east1-a + - us-east1-b + podAntiAffinity: + topologyKey: "test.com/hostname" +``` diff --git a/chart/doc/charts/gitlab/gitaly/_index.md b/chart/doc/charts/gitlab/gitaly/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..5a54bc59f0db1f114604ec031135084e55c4657c --- /dev/null +++ b/chart/doc/charts/gitlab/gitaly/_index.md @@ -0,0 +1,519 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the GitLab-Gitaly chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The `gitaly` sub-chart provides a configurable deployment of Gitaly Servers. + +## Requirements + +This chart depends on access to the Workhorse service, either as part of the +complete GitLab chart or provided as an external service reachable from the Kubernetes +cluster this chart is deployed onto. + +## Design Choices + +The Gitaly container used in this chart also contains the GitLab Shell codebase in +order to perform the actions on the Git repositories that have not yet been ported into Gitaly. +The Gitaly container includes a copy of the GitLab Shell container within it, and +as a result we also need to configure GitLab Shell within this chart. + +## Configuration + +The `gitaly` chart is configured in two parts: [external services](#external-services), +and [chart settings](#chart-settings). + +Gitaly is by default deployed as a component when deploying the GitLab +chart. If deploying Gitaly separately, `global.gitaly.enabled` needs to +be set to `false` and additional configuration will need to be performed +as described in the [external Gitaly documentation](../../../advanced/external-gitaly/_index.md). + +### Installation command line options + +The table below contains all the possible charts configurations that can be supplied to +the `helm install` command using the `--set` flags. + +| Parameter | Default | Description | +|----------------------------------------------------------|---------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `annotations` | | Pod annotations | +| `backup.goCloudUrl` | | Object storage URL for [server side Gitaly backups](https://docs.gitlab.com/administration/gitaly/configure_gitaly/#configure-server-side-backups). | +| `common.labels` | `{}` | Supplemental labels that are applied to all objects created by this chart. | +| `podLabels` | | Supplemental Pod labels. Will not be used for selectors. | +| `external[].hostname` | `- ""` | hostname of external node | +| `external[].name` | `- ""` | name of external node storage | +| `external[].port` | `- ""` | port of external node | +| `extraContainers` | | Multiline literal style string containing a list of containers to include | +| `extraInitContainers` | | List of extra init containers to include | +| `extraVolumeMounts` | | List of extra volumes mounts to do | +| `extraVolumes` | | List of extra volumes to create | +| `extraEnv` | | List of extra environment variables to expose | +| `extraEnvFrom` | | List of extra environment variables from other data sources to expose | +| `gitaly.serviceName` | | The name of the generated Gitaly service. Overrides `global.gitaly.serviceName`, and defaults to `<RELEASE-NAME>-gitaly` | +| `gpgSigning.enabled` | `false` | If [Gitaly GPG signing](https://docs.gitlab.com/administration/gitaly/configure_gitaly/#configure-commit-signing-for-gitlab-ui-commits) should be used. | +| `gpgSigning.secret` | | The name of the secret used for Gitaly GPG signing. | +| `gpgSigning.key` | | The key in the GPG secret containing Gitaly's GPG signing key. | +| `image.pullPolicy` | `Always` | Gitaly image pull policy | +| `image.pullSecrets` | | Secrets for the image repository | +| `image.repository` | `registry.gitlab.com/gitlab-org/build/cng/gitaly` | Gitaly image repository | +| `image.tag` | `master` | Gitaly image tag | +| `init.image.repository` | | initContainer image | +| `init.image.tag` | | initContainer image tag | +| `init.containerSecurityContext` | | initContainer specific [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) | +| `init.containerSecurityContext.allowPrivilegeEscalation` | `false` | initContainer specific: Controls whether a process can gain more privileges than its parent process | +| `init.containerSecurityContext.runAsNonRoot` | `true` | initContainer specific: Controls whether the container runs with a non-root user | +| `init.containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | +| `internal.names[]` | `- default` | Ordered names of StatefulSet storages | +| `serviceLabels` | `{}` | Supplemental service labels | +| `service.externalPort` | `8075` | Gitaly service exposed port | +| `service.internalPort` | `8075` | Gitaly internal port | +| `service.name` | `gitaly` | The name of the Service port that Gitaly is behind in the Service object. | +| `service.type` | `ClusterIP` | Gitaly service type | +| `service.clusterIP` | `None` | You can specify your own cluster IP address as part of a Service creation request. This follows the same conventions as the Kubernetes' Service object's clusterIP. This must not be set if `service.type` is LoadBalancer. | +| `service.loadBalancerIP` | | An ephemeral IP address will be created if not set. This follows the same conventions as the Kubernetes' Service object's loadbalancerIP configuration. | +| `serviceAccount.annotations` | `{}` | ServiceAccount annotations | +| `serviceAccount.automountServiceAccountToken` | `false` | Indicates whether or not the default ServiceAccount access token should be mounted in pods | +| `serviceAccount.create` | `false` | Indicates whether or not a ServiceAccount should be created | +| `serviceAccount.enabled` | `false` | Indicates whether or not to use a ServiceAccount | +| `serviceAccount.name` | | Name of the ServiceAccount. If not set, the full chart name is used | +| `securityContext.fsGroup` | `1000` | Group ID under which the pod should be started | +| `securityContext.fsGroupChangePolicy` | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | +| `securityContext.runAsUser` | `1000` | User ID under which the pod should be started | +| `securityContext.seccompProfile.type` | `RuntimeDefault` | Seccomp profile to use | +| `shareProcessNamespace` | `false` | Allows making container processes visible to all other contains in the same pod | +| `containerSecurityContext` | | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the Gitaly container is started | +| `containerSecurityContext.runAsUser` | `1000` | Allow overwriting of the specific security context user ID under which the Gitaly container is started | +| `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the Gitaly container can gain more privileges than its parent process | +| `containerSecurityContext.runAsNonRoot` | `true` | Controls whether the Gitaly container runs with a non-root user | +| `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | +| `tolerations` | `[]` | Toleration labels for pod assignment | +| `affinity` | `{}` | [Affinity rules](../_index.md#affinity) for pod assignment | +| `persistence.accessMode` | `ReadWriteOnce` | Gitaly persistence access mode | +| `persistence.annotations` | | Gitaly persistence annotations | +| `persistence.enabled` | `true` | Gitaly enable persistence flag | +| `persistance.labels` | | Gitaly persistence labels | +| `persistence.matchExpressions` | | Label-expression matches to bind | +| `persistence.matchLabels` | | Label-value matches to bind | +| `persistence.size` | `50Gi` | Gitaly persistence volume size | +| `persistence.storageClass` | | storageClassName for provisioning | +| `persistence.subPath` | | Gitaly persistence volume mount path | +| `priorityClassName` | | Gitaly StatefulSet priorityClassName | +| `logging.level` | | Log level | +| `logging.format` | `json` | Log format | +| `logging.sentryDsn` | | Sentry DSN URL - Exceptions from Go server | +| `logging.sentryEnvironment` | | Sentry environment to be used for logging | +| `shell.concurrency[]` | | Concurrency of each RPC endpoint Specified using keys `rpc` and `maxPerRepo` | +| `packObjectsCache.enabled` | `false` | Enable the Gitaly pack-objects cache | +| `packObjectsCache.dir` | `/home/git/repositories/+gitaly/PackObjectsCache` | Directory where cache files get stored | +| `packObjectsCache.max_age` | `5m` | Cache entries lifespan | +| `packObjectsCache.min_occurrences` | `1` | Minimum count requiredto create a cache entry | +| `git.catFileCacheSize` | | Cache size used by Git cat-file process | +| `git.config[]` | `[]` | Git configuration that Gitaly should set when spawning Git commands | +| `prometheus.grpcLatencyBuckets` | | Buckets corresponding to histogram latencies on GRPC method calls to be recorded by Gitaly. A string form of the array (for example, `"[1.0, 1.5, 2.0]"`) is required as input | +| `statefulset.strategy` | `{}` | Allows one to configure the update strategy utilized by the StatefulSet | +| `statefulset.livenessProbe.initialDelaySeconds` | 0 | Delay before liveness probe is initiated. If startupProbe is enabled, this will be set to 0. | +| `statefulset.livenessProbe.periodSeconds` | 10 | How often to perform the liveness probe | +| `statefulset.livenessProbe.timeoutSeconds` | 3 | When the liveness probe times out | +| `statefulset.livenessProbe.successThreshold` | 1 | Minimum consecutive successes for the liveness probe to be considered successful after having failed | +| `statefulset.livenessProbe.failureThreshold` | 3 | Minimum consecutive failures for the liveness probe to be considered failed after having succeeded | +| `statefulset.readinessProbe.initialDelaySeconds` | 0 | Delay before readiness probe is initiated. If startupProbe is enabled, this will be set to 0. | +| `statefulset.readinessProbe.periodSeconds` | 5 | How often to perform the readiness probe | +| `statefulset.readinessProbe.timeoutSeconds` | 3 | When the readiness probe times out | +| `statefulset.readinessProbe.successThreshold` | 1 | Minimum consecutive successes for the readiness probe to be considered successful after having failed | +| `statefulset.readinessProbe.failureThreshold` | 3 | Minimum consecutive failures for the readiness probe to be considered failed after having succeeded | +| `statefulset.startupProbe.enabled` | `true` | Whether a startup probe is enabled. | +| `statefulset.startupProbe.initialDelaySeconds` | 1 | Delay before startup probe is initiated | +| `statefulset.startupProbe.periodSeconds` | 1 | How often to perform the startup probe | +| `statefulset.startupProbe.timeoutSeconds` | 1 | When the startup probe times out | +| `statefulset.startupProbe.successThreshold` | 1 | Minimum consecutive successes for the startup probe to be considered successful after having failed | +| `statefulset.startupProbe.failureThreshold` | 60 | Minimum consecutive failures for the startup probe to be considered failed after having succeeded | +| `metrics.enabled` | `false` | If a metrics endpoint should be made available for scraping | +| `metrics.port` | `9236` | Metrics endpoint port | +| `metrics.path` | `/metrics` | Metrics endpoint path | +| `metrics.serviceMonitor.enabled` | `false` | If a ServiceMonitor should be created to enable Prometheus Operator to manage the metrics scraping, note that enabling this removes the `prometheus.io` scrape annotations | +| `metrics.serviceMonitor.additionalLabels` | `{}` | Additional labels to add to the ServiceMonitor | +| `metrics.serviceMonitor.endpointConfig` | `{}` | Additional endpoint configuration for the ServiceMonitor | +| `metrics.metricsPort` | | **DEPRECATED** Use `metrics.port` | +| `gomemlimit.enabled` | `true` | This will automatically set the `GOMEMLIMIT` environment variable for the Gitaly container to `resources.limits.memory`, if that limit is also set. Users can override this value by setting this value false and setting `GOMEMLIMIT` in `extraEnv`. This must meet [documented format criteria](https://pkg.go.dev/runtime#hdr-Environment_Variables). | +| `cgroups.enabled` |`false` | Gitaly has built-in cgroups control. When configured, Gitaly assigns Git processes to a cgroup based on the repository the Git command is operating in. This parameter will enable repository cgroups. Note only cgroups v2 will be supported if enabled. | +| `cgroups.initContainer.image.repository` | `registry.com/gitlab-org/build/cng/gitaly-init-cgroups` | Gitaly image repository | +| `cgroups.initContainer.image.tag` | `master` | Gitaly image tag | +| `cgroups.initContainer.image.pullPolicy` | `IfNotPresent` | Gitaly image pull policy | +| `cgroups.mountpoint` | `/etc/gitlab-secrets/gitaly-pod-cgroup` | Where the parent cgroup directory is mounted. | +| `cgroups.hierarchyRoot` | `gitaly` | Parent cgroup under which Gitaly creates groups, and is expected to be owned by the user and group Gitaly runs as. | +| `cgroups.memoryBytes` | | The total memory limit that is imposed collectively on all Git processes that Gitaly spawns. 0 implies no limit. | +| `cgroups.cpuShares` | | The CPU limit that is imposed collectively on all Git processes that Gitaly spawns. 0 implies no limit. The maximum is 1024 shares, which represents 100% of CPU. | +| `cgroups.cpuQuotaUs` | | Used to throttle the cgroups’ processes if they exceed this quota value. We set cpuQuotaUs to 100ms so 1 core is 100000. 0 implies no limit. | +| `cgroups.repositories.count` | | The number of cgroups in the cgroups pool. Each time a new Git command is spawned, Gitaly assigns it to one of these cgroups based on the repository the command is for. A circular hashing algorithm assigns Git commands to these cgroups, so a Git command for a repository is always assigned to the same cgroup. | +| `cgroups.repositories.memoryBytes` | | The total memory limit imposed on all Git processes contained in a repository cgroup. 0 implies no limit. This value cannot exceed that of the top level memoryBytes. | +| `cgroups.repositories.cpuShares` | | The CPU limit that is imposed on all Git processes contained in a repository cgroup. 0 implies no limit. The maximum is 1024 shares, which represents 100% of CPU. This value cannot exceed that of the top level cpuShares. | +| `cgroups.repositories.cpuQuotaUs` | | The cpuQuotaUs that is imposed on all Git processes contained in a repository cgroup. A Git process can’t use more then the given quota. We set cpuQuotaUs to 100ms so 1 core is 100000. 0 implies no limit. | +| `cgroups.repositories.maxCgroupsPerRepo` | 1 | The number of repository cgroups that Git processes targeting a specific repository can be distributed across. This enables more conservative CPU and memory limits to be configured for repository cgroups while still allowing for bursty workloads. For instance, with a `maxCgroupsPerRepo` of `2` and a `memoryBytes` limit of 10GB, independent Git operations against a specific repository can consume up to 20GB of memory. | +| `gracefulRestartTimeout` | `25` | Gitaly shutdown grace period, how long to wait for in-flight requests to complete (seconds). Pod `terminationGracePeriodSeconds` is set to this value + 5 seconds. | +| `timeout.uploadPackNegotiation` | | See [Configure the negotiation timeouts](https://docs.gitlab.com/administration/settings/gitaly_timeouts/#configure-the-negotiation-timeouts). | +| `timeout.uploadArchiveNegotiation` | | See [Configure the negotiation timeouts](https://docs.gitlab.com/administration/settings/gitaly_timeouts/#configure-the-negotiation-timeouts). | + +## Chart configuration examples + +### extraEnv + +`extraEnv` allows you to expose additional environment variables in all containers in the pods. + +Below is an example use of `extraEnv`: + +```yaml +extraEnv: + SOME_KEY: some_value + SOME_OTHER_KEY: some_other_value +``` + +When the container is started, you can confirm that the environment variables are exposed: + +```shell +env | grep SOME +SOME_KEY=some_value +SOME_OTHER_KEY=some_other_value +``` + +### extraEnvFrom + +`extraEnvFrom` allows you to expose additional environment variables from other data sources in all containers in the pods. + +Below is an example use of `extraEnvFrom`: + +```yaml +extraEnvFrom: + MY_NODE_NAME: + fieldRef: + fieldPath: spec.nodeName + MY_CPU_REQUEST: + resourceFieldRef: + containerName: test-container + resource: requests.cpu + SECRET_THING: + secretKeyRef: + name: special-secret + key: special_token + # optional: boolean + CONFIG_STRING: + configMapKeyRef: + name: useful-config + key: some-string + # optional: boolean +``` + +### image.pullSecrets + +`pullSecrets` allows you to authenticate to a private registry to pull images for a pod. + +Additional details about private registries and their authentication methods can be +found in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). + +Below is an example use of `pullSecrets` + +```yaml +image: + repository: my.gitaly.repository + tag: latest + pullPolicy: Always + pullSecrets: + - name: my-secret-name + - name: my-secondary-secret-name +``` + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `annotations` | Map | `{}` | ServiceAccount annotations. | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `create` | Boolean | `false` | Indicates whether or not to create a ServiceAccount. | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | +| `name` | String | | Name of the ServiceAccount. If not set, the full chart name is used. | + +### tolerations + +`tolerations` allow you schedule pods on tainted worker nodes + +Below is an example use of `tolerations`: + +```yaml +tolerations: +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoSchedule" +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoExecute" +``` + +### affinity + +For more information, see [`affinity`](../_index.md#affinity). + +### annotations + +`annotations` allows you to add annotations to the Gitaly pods. + +Below is an example use of `annotations`: + +```yaml +annotations: + kubernetes.io/example-annotation: annotation-value +``` + +### priorityClassName + +`priorityClassName` allows you to assign a [PriorityClass](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) +to the Gitaly pods. + +Below is an example use of `priorityClassName`: + +```yaml +priorityClassName: persistence-enabled +``` + +### `git.config` + +`git.config` allows you to add configuration to all Git commands spawned by +Gitaly. Accepts configuration as documented in `git-config(1)` in `key` / +`value` pairs, as shown below. + +```yaml +git: + config: + - key: "pack.threads" + value: 4 + - key: "fsck.missingSpaceBeforeDate" + value: ignore +``` + +### cgroups + +To prevent exhaustion, Gitaly uses **cgroups** to assign Git processes to a +cgroup based on the repository being operated on. Each cgroup has memory +and CPU limits, ensuring system stability and preventing resource saturation. + +Please note that the `initContainer` that runs before Gitaly starts requires to be +**executed as root**. This container will configure the permissions so that Gitaly can manage cgroups. +Hence, it will mount a volume on the filesystem to have write access to `/sys/fs/cgroup`. + +[Example of Oversubscription](https://docs.gitlab.com/administration/gitaly/configure_gitaly/#configuring-oversubscription) + +```yaml +cgroups: + enabled: true + # Total limit across all repository cgroups + memoryBytes: 64424509440 # 60GiB + cpuShares: 1024 + cpuQuotaUs: 1200000 # 12 cores + # Per repository limits, 1000 repository cgroups + repositories: + count: 1000 + memoryBytes: 32212254720 # 30GiB + cpuShares: 512 + cpuQuotaUs: 400000 # 4 cores +``` + +## External Services + +This chart should be attached the Workhorse service. + +### Workhorse + +```yaml +workhorse: + host: workhorse.example.com + serviceName: webservice + port: 8181 +``` + +| Name | Type | Default | Description | +| :------------ | :-----: | :----------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `host` | String | | The hostname of the Workhorse server. This can be omitted in lieu of `serviceName`. | +| `port` | Integer | `8181` | The port on which to connect to the Workhorse server. | +| `serviceName` | String | `webservice` | The name of the `service` which is operating the Workhorse server. If this is present, and `host` is not, the chart will template the hostname of the service (and current `.Release.Name`) in place of the `host` value. This is convenient when using Workhorse as a part of the overall GitLab chart. | + +## Chart settings + +The following values are used to configure the Gitaly Pods. + +{{< alert type="note" >}} + +Gitaly uses an Auth Token to authenticate with the Workhorse and Sidekiq +services. The Auth Token secret and key are sourced from the `global.gitaly.authToken` +value. Additionally, the Gitaly container has a copy of GitLab Shell, which has some configuration +that can be set. The Shell authToken is sourced from the `global.shell.authToken` +values. + +{{< /alert >}} + +### Git Repository Persistence + +This chart provisions a PersistentVolumeClaim and mounts a corresponding persistent +volume for the Git repository data. You'll need physical storage available in the +Kubernetes cluster for this to work. If you'd rather use emptyDir, disable PersistentVolumeClaim +with: `persistence.enabled: false`. + +{{< alert type="note" >}} + +The persistence settings for Gitaly are used in a volumeClaimTemplate +that should be valid for all your Gitaly pods. You should *not* include settings +that are meant to reference a single specific volume (such as `volumeName`). If you want +to reference a specific volume, you need to manually create the PersistentVolumeClaim. + +{{< /alert >}} + +{{< alert type="note" >}} + +You can't change these through our settings once you've deployed. In [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) +the `VolumeClaimTemplate` is immutable. +{{< /alert >}} + +```yaml +persistence: + enabled: true + storageClass: standard + accessMode: ReadWriteOnce + size: 50Gi + matchLabels: {} + matchExpressions: [] + subPath: "data" + annotations: {} +``` + +| Name | Type | Default | Description | +| :----------------- | :-----: | :-------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `accessMode` | String | `ReadWriteOnce` | Sets the accessMode requested in the PersistentVolumeClaim. See [Kubernetes Access Modes Documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) for details. | +| `enabled` | Boolean | `true` | Sets whether or not to use a PersistentVolumeClaims for the repository data. If `false`, an emptyDir volume is used. | +| `matchExpressions` | Array | | Accepts an array of label condition objects to match against when choosing a volume to bind. This is used in the `PersistentVolumeClaim` `selector` section. See the [volumes documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector). | +| `matchLabels` | Map | | Accepts a Map of label names and label values to match against when choosing a volume to bind. This is used in the `PersistentVolumeClaim` `selector` section. See the [volumes documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector). | +| `size` | String | `50Gi` | The minimum volume size to request for the data persistence. | +| `storageClass` | String | | Sets the storageClassName on the Volume Claim for dynamic provisioning. When unset or null, the default provisioner will be used. If set to a hyphen, dynamic provisioning is disabled. | +| `subPath` | String | | Sets the path within the volume to mount, rather than the volume root. The root is used if the subPath is empty. | +| `annotations` | Map | | Sets the annotations on the Volume Claim for dynamic provisioning. See [Kubernetes Annotations Documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) for details. | + +### Running Gitaly over TLS + +{{< alert type="note" >}} + +This section refers to Gitaly being run inside the cluster using +the Helm charts. If you are using an external Gitaly instance and want to use +TLS for communicating with it, refer [the external Gitaly documentation](../../../advanced/external-gitaly/_index.md#connecting-to-external-gitaly-over-tls) + +{{< /alert >}} + +Gitaly supports communicating with other components over TLS. This is controlled +by the settings `global.gitaly.tls.enabled` and `global.gitaly.tls.secretName`. +Follow the steps to run Gitaly over TLS: + +1. The Helm chart expects a certificate to be provided for communicating over + TLS with Gitaly. This certificate should apply to all the Gitaly nodes that + are present. Hence all hostnames of each of these Gitaly nodes should be + added as a Subject Alternate Name (SAN) to the certificate. + + To know the hostnames to use, check the file `/srv/gitlab/config/gitlab.yml` + file in the Toolbox pod and check the various + `gitaly_address` fields specified under `repositories.storages` key within it. + + ```shell + kubectl exec -it <Toolbox pod> -- grep gitaly_address /srv/gitlab/config/gitlab.yml + ``` + +{{< alert type="note" >}} + +A basic script for generating custom signed certificates for +internal Gitaly pods [can be found in this repository](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/scripts/generate_certificates.sh). +Users can use or refer that script to generate certificates with proper +SAN attributes. + +{{< /alert >}} + +1. Create a k8s TLS secret using the certificate created. + + ```shell + kubectl create secret tls gitaly-server-tls --cert=gitaly.crt --key=gitaly.key + ``` + +1. Redeploy the Helm chart by passing `--set global.gitaly.tls.enabled=true`. + +### Global server hooks + +The Gitaly StatefulSet has support for [Global server hooks](https://docs.gitlab.com/administration/server_hooks/#create-a-global-server-hook-for-all-repositories). The hook scripts run on the Gitaly pod, and are therefore limited to the tools available in the [Gitaly container](https://gitlab.com/gitlab-org/build/CNG/-/blob/master/gitaly/Dockerfile). + +The hooks are populated using [ConfigMaps](https://kubernetes.io/docs/concepts/configuration/configmap/), and can be used by setting the following values as appropriate: + +1. `global.gitaly.hooks.preReceive.configmap` +1. `global.gitaly.hooks.postReceive.configmap` +1. `global.gitaly.hooks.update.configmap` + +To populate the ConfigMap, you can point `kubectl` to a directory of scripts: + +```shell +kubectl create configmap MAP_NAME --from-file /PATH/TO/SCRIPT/DIR +``` + +### GPG signing commits created by GitLab + +Gitaly has the ability to [GPG sign all commits](https://docs.gitlab.com/administration/gitaly/configure_gitaly/#configure-commit-signing-for-gitlab-ui-commits) created via the GitLab UI, e.g. the WebIDE, +as well as commits created by GitLab, such as merge commits and squashes. + +1. Create a k8s secret using your GPG private key. + + ```shell + kubectl create secret generic gitaly-gpg-signing-key --from-file=signing_key=/path/to/gpg_signing_key.gpg + ``` + +1. Enable GPG signing in your `values.yaml`. + + ```yaml + gitlab: + gitaly: + gpgSigning: + enabled: true + secret: gitaly-gpg-signing-key + key: signing_key + ``` + +### Server-side backups + +The chart supports [Gitaly server-side backups](https://docs.gitlab.com/administration/gitaly/configure_gitaly/#configure-server-side-backups). +To use them: + +1. Create a bucket to store the backups. +1. Configure the object store credentials and the storage URL. + + ```yaml + gitlab: + gitaly: + extraEnvFrom: + # Mount the exisitign object store secret to the expected environment variables. + AWS_ACCESS_KEY_ID: + secretKeyRef: + name: <Rails object store secret> + key: aws_access_key_id + AWS_SECRET_ACCESS_KEY: + secretKeyRef: + name: <Rails object store secret> + key: aws_secret_access_key + backup: + # This is the connection string for Gitaly server side backups. + goCloudUrl: <object store connection URL> + ``` + + For the expected environment variables and storage URL format for your object storage backend, see + the [Gitaly documentation](https://docs.gitlab.com/administration/gitaly/configure_gitaly/#configure-server-side-backups). + +1. [Enable server-side backups with `backup-utility`](../../../backup-restore/backup.md#server-side-repository-backups). diff --git a/chart/doc/charts/gitlab/gitaly/index.md b/chart/doc/charts/gitlab/gitaly/index.md index a1c06077a788ba822b2d43cc72bc55d567bf5f41..954b0319a92593b6d171970b16bd913ec3bee191 100644 --- a/chart/doc/charts/gitlab/gitaly/index.md +++ b/chart/doc/charts/gitlab/gitaly/index.md @@ -86,7 +86,7 @@ the `helm install` command using the `--set` flags. | `securityContext.fsGroupChangePolicy` | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | | `securityContext.runAsUser` | `1000` | User ID under which the pod should be started | | `securityContext.seccompProfile.type` | `RuntimeDefault` | Seccomp profile to use | -| `shareProcessNamespace` | `false` | Allows making container processes visible to all other contains in the same pod | +| `shareProcessNamespace` | `false` | Allows making container processes visible to all other contains in the same pod | | `containerSecurityContext` | | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the Gitaly container is started | | `containerSecurityContext.runAsUser` | `1000` | Allow overwriting of the specific security context user ID under which the Gitaly container is started | | `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the Gitaly container can gain more privileges than its parent process | diff --git a/chart/doc/charts/gitlab/gitlab-exporter/_index.md b/chart/doc/charts/gitlab/gitlab-exporter/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..bad619f27b515344dbd7ea5cb82bede5dbd1a891 --- /dev/null +++ b/chart/doc/charts/gitlab/gitlab-exporter/_index.md @@ -0,0 +1,201 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the GitLab-Exporter chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The `gitlab-exporter` sub-chart provides Prometheus metrics for GitLab +application-specific data. It talks to PostgreSQL directly to perform +queries to retrieve data for CI builds, pull mirrors, etc. In addition, +it uses the Sidekiq API, which talks to Redis to gather different +metrics around the state of the Sidekiq queues (e.g. number of jobs). + +## Requirements + +This chart depends on Redis and PostgreSQL services, either as part of +the complete GitLab chart or provided as external services reachable +from the Kubernetes cluster on which this chart is deployed. + +## Configuration + +The `gitlab-exporter` chart is configured as follows: +[Global settings](#global-settings) and [Chart settings](#chart-settings). + +## Installation command line options + +The table below contains all the possible chart configurations that can be supplied +to the `helm install` command using the `--set` flags. + +| Parameter | Default | Description | +|----------------------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `affinity` | `{}` | [Affinity rules](../_index.md#affinity) for pod assignment | +| `annotations` | | Pod annotations | +| `common.labels` | `{}` | Supplemental labels that are applied to all objects created by this chart. | +| `podLabels` | | Supplemental Pod labels. Will not be used for selectors. | +| `common.labels` | | Supplemental labels that are applied to all objects created by this chart. | +| `deployment.strategy` | `{}` | Allows one to configure the update strategy utilized by the deployment | +| `enabled` | `true` | GitLab Exporter enabled flag | +| `extraContainers` | | Multiline literal style string containing a list of containers to include | +| `extraInitContainers` | | List of extra init containers to include | +| `extraVolumeMounts` | | List of extra volumes mounts to do | +| `extraVolumes` | | List of extra volumes to create | +| `extraEnv` | | List of extra environment variables to expose | +| `extraEnvFrom` | | List of extra environment variables from other data sources to expose | +| `image.pullPolicy` | `IfNotPresent` | GitLab image pull policy | +| `image.pullSecrets` | | Secrets for the image repository | +| `image.repository` | `registry.gitlab.com/gitlab-org/build/cng/gitlab-exporter` | GitLab Exporter image repository | +| `image.tag` | | image tag | +| `init.image.repository` | | initContainer image | +| `init.image.tag` | | initContainer image tag | +| `init.containerSecurityContext` | | initContainer specific [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) | +| `init.containerSecurityContext.allowPrivilegeEscalation` | `false` | initContainer specific: Controls whether a process can gain more privileges than its parent process | +| `init.containerSecurityContext.runAsNonRoot` | `true` | initContainer specific: Controls whether the container runs with a non-root user | +| `init.containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | +| `metrics.enabled` | `true` | If a metrics endpoint should be made available for scraping | +| `metrics.port` | `9168` | Metrics endpoint port | +| `metrics.path` | `/metrics` | Metrics endpoint path | +| `metrics.serviceMonitor.enabled` | `false` | If a ServiceMonitor should be created to enable Prometheus Operator to manage the metrics scraping, note that enabling this removes the `prometheus.io` scrape annotations | +| `metrics.serviceMonitor.additionalLabels` | `{}` | Additional labels to add to the ServiceMonitor | +| `metrics.serviceMonitor.endpointConfig` | `{}` | Additional endpoint configuration for the ServiceMonitor | +| `metrics.annotations` | | **DEPRECATED** Set explicit metrics annotations. Replaced by template content. | +| `priorityClassName` | | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods. | +| `resources.requests.cpu` | `75m` | GitLab Exporter minimum CPU | +| `resources.requests.memory` | `100M` | GitLab Exporter minimum memory | +| `serviceLabels` | `{}` | Supplemental service labels | +| `service.externalPort` | `9168` | GitLab Exporter exposed port | +| `service.internalPort` | `9168` | GitLab Exporter internal port | +| `service.name` | `gitlab-exporter` | GitLab Exporter service name | +| `service.type` | `ClusterIP` | GitLab Exporter service type | +| `serviceAccount.annotations` | `{}` | ServiceAccount annotations | +| `serviceAccount.automountServiceAccountToken` | `false` | Indicates whether or not the default ServiceAccount access token should be mounted in pods | +| `serviceAccount.create` | `false` | Indicates whether or not a ServiceAccount should be created | +| `serviceAccount.enabled` | `false` | Indicates whether or not to use a ServiceAccount | +| `serviceAccount.name` | | Name of the ServiceAccount. If not set, the full chart name is used | +| `securityContext.fsGroup` | `1000` | Group ID under which the pod should be started | +| `securityContext.runAsUser` | `1000` | User ID under which the pod should be started | +| `securityContext.fsGroupChangePolicy` | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | +| `securityContext.seccompProfile.type` | `RuntimeDefault` | Seccomp profile to use | +| `containerSecurityContext` | | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the container is started | +| `containerSecurityContext.runAsUser` | `1000` | Allows overwriting of the specific security context user ID under which the container is started | +| `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the container can gain more privileges than its parent process | +| `containerSecurityContext.runAsNonRoot` | `false` | Controls whether the container runs with a non-root user | +| `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | +| `tolerations` | `[]` | Toleration labels for pod assignment | +| `psql.port` | | Set PostgreSQL server port. Takes precedence over `global.psql.port` | +| `tls.enabled` | `false` | GitLab Exporter TLS enabled | +| `tls.secretName` | `{Release.Name}-gitlab-exporter-tls` | GitLab Exporter TLS secret. Must point to a [Kubernetes TLS secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets). | + +## Chart configuration examples + +### extraEnv + +`extraEnv` allows you to expose additional environment variables in all containers in the pods. + +Below is an example use of `extraEnv`: + +```yaml +extraEnv: + SOME_KEY: some_value + SOME_OTHER_KEY: some_other_value +``` + +When the container is started, you can confirm that the environment variables are exposed: + +```shell +env | grep SOME +SOME_KEY=some_value +SOME_OTHER_KEY=some_other_value +``` + +### extraEnvFrom + +`extraEnvFrom` allows you to expose additional environment variables from other data sources in all containers in the pods. + +Below is an example use of `extraEnvFrom`: + +```yaml +extraEnvFrom: + MY_NODE_NAME: + fieldRef: + fieldPath: spec.nodeName + MY_CPU_REQUEST: + resourceFieldRef: + containerName: test-container + resource: requests.cpu + SECRET_THING: + secretKeyRef: + name: special-secret + key: special_token + # optional: boolean + CONFIG_STRING: + configMapKeyRef: + name: useful-config + key: some-string + # optional: boolean +``` + +### image.pullSecrets + +`pullSecrets` allows you to authenticate to a private registry to pull images for a pod. + +Additional details about private registries and their authentication methods can be +found in [the Kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). + +Below is an example use of `pullSecrets`: + +```YAML +image: + repository: my.image.repository + pullPolicy: Always + pullSecrets: + - name: my-secret-name + - name: my-secondary-secret-name +``` + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `annotations` | Map | `{}` | ServiceAccount annotations. | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `create` | Boolean | `false` | Indicates whether or not a ServiceAccount should be created. | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | +| `name` | String | | Name of the ServiceAccount. If not set, the full chart name is used. | + +### affinity + +For more information, see [`affinity`](../_index.md#affinity). + +### annotations + +`annotations` allows you to add annotations to the GitLab Exporter pods. For example: + +```YAML +annotations: + kubernetes.io/example-annotation: annotation-value +``` + +## Global settings + +We share some common global settings among our charts. See the [Globals Documentation](../../globals.md) +for common configuration options, such as GitLab and Registry hostnames. + +## Chart settings + +The following values are used to configure the GitLab Exporter pod. + +### metrics.enabled + +By default, the pod exposes a metrics endpoint at `/metrics`. When +metrics are enabled, annotations are added to each pod allowing a +Prometheus server to discover and scrape the exposed metrics. diff --git a/chart/doc/charts/gitlab/gitlab-pages/_index.md b/chart/doc/charts/gitlab/gitlab-pages/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..5e32b544abbe75c8c830d21f4c017c479392be21 --- /dev/null +++ b/chart/doc/charts/gitlab/gitlab-pages/_index.md @@ -0,0 +1,462 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the GitLab Pages chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The `gitlab-pages` subchart provides a daemon for serving static websites from +GitLab projects. + +## Requirements + +This chart depends on access to the Workhorse services, either as part of the +complete GitLab chart or provided as an external service reachable from the Kubernetes +cluster this chart is deployed onto. + +## Configuration + +The `gitlab-pages` chart is configured as follows: +[Global settings](#global-settings) and [Chart settings](#chart-settings). + +## Global Settings + +We share some common global settings among our charts. See the +[Globals Documentation](../../globals.md#configure-gitlab-pages) for details. + +## Chart settings + +The tables in following two sections contains all the possible chart +configurations that can be supplied to the `helm install` command using the +`--set` flags. + +### General settings + +| Parameter | Default | Description | +|----------------------------------------------------------|---------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `affinity` | `{}` | [Affinity rules](../_index.md#affinity) for pod assignment | +| `annotations` | | Pod annotations | +| `common.labels` | `{}` | Supplemental labels that are applied to all objects created by this chart. | +| `deployment.strategy` | `{}` | Allows one to configure the update strategy used by the deployment. When not provided, the cluster default is used. | +| `extraEnv` | | List of extra environment variables to expose | +| `extraEnvFrom` | | List of extra environment variables from other data source to expose | +| `hpa.behavior` | `{scaleDown: {stabilizationWindowSeconds: 300 }}` | Behavior contains the specifications for up- and downscaling behavior (requires `autoscaling/v2beta2` or higher) | +| `hpa.customMetrics` | `[]` | Custom metrics contains the specifications for which to use to calculate the desired replica count (overrides the default use of Average CPU Utilization configured in `targetAverageUtilization`) | +| `hpa.cpu.targetType` | `AverageValue` | Set the autoscaling CPU target type, must be either `Utilization` or `AverageValue` | +| `hpa.cpu.targetAverageValue` | `100m` | Set the autoscaling CPU target value | +| `hpa.cpu.targetAverageUtilization` | | Set the autoscaling CPU target utilization | +| `hpa.memory.targetType` | | Set the autoscaling memory target type, must be either `Utilization` or `AverageValue` | +| `hpa.memory.targetAverageValue` | | Set the autoscaling memory target value | +| `hpa.memory.targetAverageUtilization` | | Set the autoscaling memory target utilization | +| `hpa.minReplicas` | `1` | Minimum number of replicas | +| `hpa.maxReplicas` | `10` | Maximum number of replicas | +| `hpa.targetAverageValue` | | **DEPRECATED** Set the autoscaling CPU target value | +| `image.pullPolicy` | `IfNotPresent` | GitLab image pull policy | +| `image.pullSecrets` | | Secrets for the image repository | +| `image.repository` | `registry.gitlab.com/gitlab-org/build/cng/gitlab-pages` | GitLab Pages image repository | +| `image.tag` | | image tag | +| `init.image.repository` | | initContainer image | +| `init.image.tag` | | initContainer image tag | +| `init.containerSecurityContext` | | initContainer specific [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) | +| `init.containerSecurityContext.allowPrivilegeEscalation` | `false` | initContainer specific: Controls whether a process can gain more privileges than its parent process | +| `init.containerSecurityContext.runAsNonRoot` | `true` | initContainer specific: Controls whether the container runs with a non-root user | +| `init.containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | +| `keda.enabled` | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `keda.pollingInterval` | `30` | The interval to check each trigger on | +| `keda.cooldownPeriod` | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `keda.minReplicaCount` | | Minimum number of replicas KEDA will scale the resource down to, defaults to `hpa.minReplicas` | +| `keda.maxReplicaCount` | | Maximum number of replicas KEDA will scale the resource up to, defaults to `hpa.maxReplicas` | +| `keda.fallback` | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `keda.hpaName` | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `keda.restoreToOriginalReplicaCount` | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `keda.behavior` | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `keda.triggers` | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | +| `metrics.enabled` | `true` | If a metrics endpoint should be made available for scraping | +| `metrics.port` | `9235` | Metrics endpoint port | +| `metrics.path` | `/metrics` | Metrics endpoint path | +| `metrics.serviceMonitor.enabled` | `false` | If a ServiceMonitor should be created to enable Prometheus Operator to manage the metrics scraping, note that enabling this removes the `prometheus.io` scrape annotations | +| `metrics.serviceMonitor.additionalLabels` | `{}` | Additional labels to add to the ServiceMonitor | +| `metrics.serviceMonitor.endpointConfig` | `{}` | Additional endpoint configuration for the ServiceMonitor | +| `metrics.annotations` | | **DEPRECATED** Set explicit metrics annotations. Replaced by template content. | +| `metrics.tls.enabled` | `false` | TLS enabled for the metrics endpoint | +| `metrics.tls.secretName` | `{Release.Name}-pages-metrics-tls` | Secret for the metrics endpoint TLS cert and key | +| `priorityClassName` | | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods. | +| `podLabels` | | Supplemental Pod labels. Will not be used for selectors. | +| `resources.requests.cpu` | `900m` | GitLab Pages minimum CPU | +| `resources.requests.memory` | `2G` | GitLab Pages minimum memory | +| `securityContext.fsGroup` | `1000` | Group ID under which the pod should be started | +| `securityContext.runAsUser` | `1000` | User ID under which the pod should be started | +| `securityContext.fsGroupChangePolicy` | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | +| `securityContext.seccompProfile.type` | `RuntimeDefault` | Seccomp profile to use | +| `containerSecurityContext` | | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the container is started | +| `containerSecurityContext.runAsUser` | `1000` | Allow to overwrite the specific security context user ID under which the container is started | +| `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the container can gain more privileges than its parent process | +| `containerSecurityContext.runAsNonRoot` | `true` | Controls whether the container runs with a non-root user | +| `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | +| `service.externalPort` | `8090` | GitLab Pages exposed port | +| `service.internalPort` | `8090` | GitLab Pages internal port | +| `service.name` | `gitlab-pages` | GitLab Pages service name | +| `service.annotations` | | Annotations for all pages services. | +| `service.primary.annotations` | | Annotations for the primary service only. | +| `service.metrics.annotations` | | Annotations for the metrics service only. | +| `service.customDomains.annotations` | | Annotations for the custom domains service only. | +| `service.customDomains.type` | `LoadBalancer` | Type of service created for handling custom domains | +| `service.customDomains.internalHttpsPort` | `8091` | Port where Pages daemon listens for HTTPS requests | +| `service.customDomains.internalHttpsPort` | `8091` | Port where Pages daemon listens for HTTPS requests | +| `service.customDomains.nodePort.http` | | Node Port to be opened for HTTP connections. Valid only if `service.customDomains.type` is `NodePort` | +| `service.customDomains.nodePort.https` | | Node Port to be opened for HTTPS connections. Valid only if `service.customDomains.type` is `NodePort` | +| `service.sessionAffinity` | `None` | Type of the session affinity. Must be either `ClientIP` or `None` (this only makes sense for traffic originating from within the cluster) | +| `service.sessionAffinityConfig` | | Session affinity config. If `service.sessionAffinity` == `ClientIP` the default session sticky time is 3 hours (10800) | +| `serviceAccount.annotations` | `{}` | ServiceAccount annotations | +| `serviceAccount.automountServiceAccountToken` | `false` | Indicates whether or not the default ServiceAccount access token should be mounted in pods | +| `serviceAccount.create` | `false` | Indicates whether or not a ServiceAccount should be created | +| `serviceAccount.enabled` | `false` | Indicates whether or not to use a ServiceAccount | +| `serviceAccount.name` | | Name of the ServiceAccount. If not set, the full chart name is used | +| `serviceLabels` | `{}` | Supplemental service labels | +| `tolerations` | `[]` | Toleration labels for pod assignment | + +### Pages specific settings + +| Parameter | Default | Description | +| --------------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `artifactsServerTimeout` | `10` | Timeout (in seconds) for a proxied request to the artifacts server | +| `artifactsServerUrl` | | API URL to proxy artifact requests to | +| `extraVolumeMounts` | | List of extra volumes mounts to add | +| `extraVolumes` | | List of extra volumes to create | +| `gitlabCache.cleanup` | int | See: [Pages Global Settings](https://docs.gitlab.com/administration/pages/#global-settings) | +| `gitlabCache.expiry` | int | See: [Pages Global Settings](https://docs.gitlab.com/administration/pages/#global-settings) | +| `gitlabCache.refresh` | int | See: [Pages Global Settings](https://docs.gitlab.com/administration/pages/#global-settings) | +| `gitlabClientHttpTimeout` | | GitLab API HTTP client connection timeout in seconds | +| `gitlabClientJwtExpiry` | | JWT Token expiry time in seconds | +| `gitlabRetrieval.interval` | int | See: [Pages Global Settings](https://docs.gitlab.com/administration/pages/#global-settings) | +| `gitlabRetrieval.retries` | int | See: [Pages Global Settings](https://docs.gitlab.com/administration/pages/#global-settings) | +| `gitlabRetrieval.timeout` | int | See: [Pages Global Settings](https://docs.gitlab.com/administration/pages/#global-settings) | +| `gitlabServer` | | GitLab server FQDN | +| `headers` | `[]` | Specify any additional http headers that should be sent to the client with each response. Multiple headers can be given as an array, header and value as one string, for example `['my-header: myvalue', 'my-other-header: my-other-value']` | +| `insecureCiphers` | `false` | Use default list of cipher suites, may contain insecure ones like 3DES and RC4 | +| `internalGitlabServer` | | Internal GitLab server used for API requests | +| `logFormat` | `json` | Log output format | +| `logVerbose` | `false` | Verbose logging | +| `maxConnections` | | Limit on the number of concurrent connections to the HTTP, HTTPS or proxy listeners | +| `maxURILength` | | Limit the length of URI, 0 for unlimited. | +| `propagateCorrelationId` | | Reuse existing Correlation-ID from the incoming request header `X-Request-ID` if present | +| `redirectHttp` | `false` | Redirect pages from HTTP to HTTPS | +| `sentry.enabled` | `false` | Enable Sentry reporting | +| `sentry.dsn` | | The address for sending Sentry crash reporting to | +| `sentry.environment` | | The environment for Sentry crash reporting | +| `serverShutdowntimeout` | `30s` | GitLab Pages server shutdown timeout in seconds | +| `statusUri` | | The URL path for a status page | +| `tls.minVersion` | | Specifies the minimum SSL/TLS version | +| `tls.maxVersion` | | Specifies the maximum SSL/TLS version | +| `useHTTPProxy` | `false` | Use this option when GitLab Pages is behind a Reverse Proxy. | +| `useProxyV2` | `false` | Force HTTPS request to utilize the PROXYv2 protocol. | +| `zipCache.cleanup` | int | See: [Zip Serving and Cache Configuration](https://docs.gitlab.com/administration/pages/#zip-serving-and-cache-configuration) | +| `zipCache.expiration` | int | See: [Zip Serving and Cache Configuration](https://docs.gitlab.com/administration/pages/#zip-serving-and-cache-configuration) | +| `zipCache.refresh` | int | See: [Zip Serving and Cache Configuration](https://docs.gitlab.com/administration/pages/#zip-serving-and-cache-configuration) | +| `zipOpenTimeout` | int | See: [Zip Serving and Cache Configuration](https://docs.gitlab.com/administration/pages/#zip-serving-and-cache-configuration) | +| `zipHTTPClientTimeout` | int | See: [Zip Serving and Cache Configuration](https://docs.gitlab.com/administration/pages/#zip-serving-and-cache-configuration) | +| `rateLimitSourceIP` | | See: [GitLab Pages rate-limits](https://docs.gitlab.com/administration/pages/#rate-limits). | +| `rateLimitSourceIPBurst` | | See: [GitLab Pages rate-limits](https://docs.gitlab.com/administration/pages/#rate-limits) | +| `rateLimitDomain` | | See: [GitLab Pages rate-limits](https://docs.gitlab.com/administration/pages/#rate-limits). | +| `rateLimitDomainBurst` | | See: [GitLab Pages rate-limits](https://docs.gitlab.com/administration/pages/#rate-limits) | +| `rateLimitTLSSourceIP` | | See: [GitLab Pages rate-limits](https://docs.gitlab.com/administration/pages/#rate-limits). | +| `rateLimitTLSSourceIPBurst` | | See: [GitLab Pages rate-limits](https://docs.gitlab.com/administration/pages/#rate-limits) | +| `rateLimitTLSDomain` | | See: [GitLab Pages rate-limits](https://docs.gitlab.com/administration/pages/#rate-limits). | +| `rateLimitTLSDomainBurst` | | See: [GitLab Pages rate-limits](https://docs.gitlab.com/administration/pages/#rate-limits) | +| `rateLimitSubnetsAllowList` | | See: [GitLab Pages rate-limits](#rate-limits) | +| `serverReadTimeout` | `5s` | See: [GitLab Pages global settings](https://docs.gitlab.com/administration/pages/#global-settings) | +| `serverReadHeaderTimeout` | `1s` | See: [GitLab Pages global settings](https://docs.gitlab.com/administration/pages/#global-settings) | +| `serverWriteTimeout` | `5m` | See: [GitLab Pages global settings](https://docs.gitlab.com/administration/pages/#global-settings) | +| `serverKeepAlive` | `15s` | See: [GitLab Pages global settings](https://docs.gitlab.com/administration/pages/#global-settings) | +| `authTimeout` | `5s` | See: [GitLab Pages global settings](https://docs.gitlab.com/administration/pages/#global-settings) | +| `authCookieSessionTimeout` | `10m` | See: [GitLab Pages global settings](https://docs.gitlab.com/administration/pages/#global-settings) | + +### Configuring the `ingress` + +This section controls the GitLab Pages Ingress. + +| Name | Type | Default | Description | +| :--------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `apiVersion` | String | | Value to use in the `apiVersion` field. | +| `annotations` | String | | This field is an exact match to the standard `annotations` for [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/). | +| `configureCertmanager` | Boolean | `false` | Toggles Ingress annotation `cert-manager.io/issuer` and `acme.cert-manager.io/http01-edit-in-place`. The acquisition of a TLS certificate for GitLab Pages via cert-manager is disabled because a wildcard certificate acquisition requires a cert-manager Issuer with a [DNS01 solver](https://cert-manager.io/docs/configuration/acme/dns01/), and the Issuer deployed by this chart only provides a [HTTP01 solver](https://cert-manager.io/docs/configuration/acme/http01/). For more information see the [TLS requirement for GitLab Pages](../../../installation/tls.md). | +| `enabled` | Boolean | | Setting that controls whether to create Ingress objects for services that support them. When not set, the `global.ingress.enabled` setting is used. | +| `tls.enabled` | Boolean | | When set to `false`, you disable TLS for the Pages subchart. This is mainly useful for cases in which you cannot use TLS termination at `ingress-level`, like when you have a TLS-terminating proxy before the Ingress Controller. | +| `tls.secretName` | String | | The name of the Kubernetes TLS Secret that contains a valid certificate and key for the pages URL. When not set, the `global.ingress.tls.secretName` is used instead. Defaults to not being set. | + +## Chart configuration examples + +### extraVolumes + +`extraVolumes` allows you to configure extra volumes chart-wide. + +Below is an example use of `extraVolumes`: + +```yaml +extraVolumes: | + - name: example-volume + persistentVolumeClaim: + claimName: example-pvc +``` + +### extraVolumeMounts + +`extraVolumeMounts` allows you to configure extra volumeMounts on all containers chart-wide. + +Below is an example use of `extraVolumeMounts`: + +```yaml +extraVolumeMounts: | + - name: example-volume + mountPath: /etc/example +``` + +### Configuring the `networkpolicy` + +This section controls the +[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/). +This configuration is optional and is used to limit Egress and Ingress of the +Pods to specific endpoints. + +| Name | Type | Default | Description | +| :---------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | This setting enables the `NetworkPolicy` | +| `ingress.enabled` | Boolean | `false` | When set to `true`, the `Ingress` network policy will be activated. This will block all Ingress connections unless rules are specified. | +| `ingress.rules` | Array | `[]` | Rules for the Ingress policy, for details see <https://kubernetes.io/docs/concepts/services-networking/network-policies/#the-networkpolicy-resource> and the example below | +| `egress.enabled` | Boolean | `false` | When set to `true`, the `Egress` network policy will be activated. This will block all egress connections unless rules are specified. | +| `egress.rules` | Array | `[]` | Rules for the egress policy, these for details see <https://kubernetes.io/docs/concepts/services-networking/network-policies/#the-networkpolicy-resource> and the example below | + +### Example Network Policy + +The `gitlab-pages` service requires Ingress connections for port 80 and 443 and +Egress connections to various to default workhorse port 8181. This example adds +the following network policy: + +- Allows Ingress requests: + - From the `nginx-ingress` pod to port `8090` + - From the `prometheus` pod to port `9235` +- Allows Egress requests: + - To `kube-dns` on port `53` + - To the `webservice` pod to port `8181` + - To endpoints like AWS VPC endpoint for S3 `172.16.1.0/24` on port `443` + +_Note the example provided is only an example and may not be complete_ + +The example is based on the assumption that `kube-dns` was deployed +to the namespace `kube-system`, `prometheus` was deployed to the namespace +`monitoring` and `nginx-ingress` was deployed to the namespace `nginx-ingress`. + +```yaml +networkpolicy: + enabled: true + ingress: + enabled: true + rules: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app: prometheus + component: server + release: gitlab + ports: + - port: 9235 + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: nginx-ingress + podSelector: + matchLabels: + app: nginx-ingress + component: controller + ports: + - port: 8090 + egress: + enabled: true + rules: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - port: 53 + protocol: UDP + - to: + - ipBlock: + cidr: 172.16.1.0/24 + ports: + - port: 443 + - to: + - podSelector: + matchLabels: + app: webservice + ports: + - port: 8181 +``` + +### TLS access to GitLab Pages + +To have TLS access to the GitLab Pages feature you must: + +1. Create a dedicated wildcard certificate for your GitLab Pages domain in this format: + `*.pages.<yourdomain>`. + +1. Create the secret in Kubernetes: + + ```shell + kubectl create secret tls tls-star-pages-<mysecret> --cert=<path/to/fullchain.pem> --key=<path/to/privkey.pem> + ``` + +1. Configure GitLab Pages to use this secret: + + ```yaml + gitlab: + gitlab-pages: + ingress: + tls: + secretName: tls-star-pages-<mysecret> + ``` + +1. Create a DNS entry in your DNS provider with the name `*.pages.<yourdomaindomain>` + pointing to your LoadBalancer. + +### Pages domain without wildcard DNS + +{{< history >}} + +- [Introduced](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/5570) as a [beta](https://docs.gitlab.com/policy/development_stages_support/#beta) in GitLab 17.2. +- [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/483365) in GitLab 17.4. + +{{< /history >}} + +{{< alert type="warning" >}} + +GitLab Pages supports only one URL scheme at a time: Either with wildcard DNS, or without wildcard DNS. If you enable `namespaceInPath`, existing GitLab Pages websites are accessible only on domains without wildcard DNS. + +{{< /alert >}} + +1. Enable `namespaceInPath` in the global Pages settings. + + ```yaml + global: + pages: + namespaceInPath: true + ``` + +1. Create a DNS entry in your DNS provider with the name `pages.<yourdomaindomain>` pointing to your LoadBalancer. + +#### TLS access to GitLab Pages domain without wildcard DNS + +1. Create a certificate for your GitLab Pages domain in this format: `pages.<yourdomain>`. +1. Create the secret in Kubernetes: + + ```shell + kubectl create secret tls tls-star-pages-<mysecret> --cert=<path/to/fullchain.pem> --key=<path/to/privkey.pem> + ``` + +1. Configure GitLab Pages to use this secret: + + ```yaml + gitlab: + gitlab-pages: + ingress: + tls: + secretName: tls-star-pages-<mysecret> + ``` + +#### Configure access control + +1. Enable `accessControl` in the global pages settings. + + ```yaml + global: + pages: + accessControl: true + ``` + +1. Optional. If [TLS access](#tls-access-to-gitlab-pages-domain-without-wildcard-dns) is configured, update the redirect URI in the GitLab Pages + [System OAuth application](https://docs.gitlab.com/integration/oauth_provider/#create-an-instance-wide-application) + to use the HTTPS protocol. + +{{< alert type="warning" >}} + +GitLab Pages does not update the OAuth application, and the default `authRedirectUri` is updated to `https://pages.<yourdomaindomain>/projects/auth`. While accessing a private Pages site, if you encounter an error 'The redirect URI included is not valid', update the redirect URI in the GitLab Pages [System OAuth application](https://docs.gitlab.com/integration/oauth_provider/#create-an-instance-wide-application) to `https://pages.<yourdomaindomain>/projects/auth`. + +{{< /alert >}} + +### Rate limits + +You can enforce rate limits to help minimize the risk of a Denial of Service (DoS) attack. Detailed [rate limits documentation](https://docs.gitlab.com/administration/pages/#rate-limits) is available. + +To allow certain IP ranges (subnets) to bypass all rate limits: + +- `rateLimitSubnetsAllowList`: Sets the allow list with the IP ranges (subnets) that should bypass all rate limits. + +#### Configure rate limits subnets allow list + +Set the allow list with the IP ranges (subnets) in `charts/gitlab/charts/gitlab-pages/values.yaml`: + +```yaml +gitlab: + gitlab-pages: + rateLimitSubnetsAllowList: + - "1.2.3.4/24" + - "2001:db8::1/32" +``` + +### Configuring KEDA + +This `keda` section enables the installation of [KEDA](https://keda.sh/) `ScaledObjects` instead of regular `HorizontalPodAutoscalers`. +This configuration is optional and can be used when there is a need for autoscaling based on custom or external metrics. + +Most settings default to the values set in the `hpa` section where applicable. + +If the following are true, CPU and memory triggers are added automatically based on the CPU and memory thresholds set in the `hpa` section: + +- `triggers` is not set. +- The corresponding `request.cpu.request` or `request.memory.request` setting is also set to a non-zero value. + +If no triggers are set, the `ScaledObject` is not created. + +Refer to the [KEDA documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/) for more details about those settings. + +| Name | Type | Default | Description | +| :---------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `pollingInterval` | Integer | `30` | The interval to check each trigger on | +| `cooldownPeriod` | Integer | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `minReplicaCount` | Integer | | Minimum number of replicas KEDA will scale the resource down to, defaults to `hpa.minReplicas` | +| `maxReplicaCount` | Integer | | Maximum number of replicas KEDA will scale the resource up to, defaults to `hpa.maxReplicas` | +| `fallback` | Map | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `hpaName` | String | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `restoreToOriginalReplicaCount` | Boolean | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `behavior` | Map | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `triggers` | Array | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `annotations` | Map | `{}` | ServiceAccount annotations. | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `create` | Boolean | `false` | Indicates whether or not a ServiceAccount should be created. | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | +| `name` | String | | Name of the ServiceAccount. If not set, the chart full name is used. | + +### affinity + +For more information, see [`affinity`](../_index.md#affinity). diff --git a/chart/doc/charts/gitlab/gitlab-runner/_index.md b/chart/doc/charts/gitlab/gitlab-runner/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..4ad285b6196c53efc6a5c363e76c1524687beaaf --- /dev/null +++ b/chart/doc/charts/gitlab/gitlab-runner/_index.md @@ -0,0 +1,97 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the GitLab Runner chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The GitLab Runner subchart provides a GitLab Runner for running CI jobs. It is enabled by default and should work out of the box with support for caching using s3 compatible object storage. + +{{< alert type="warning" >}} + +The default configuration of the included GitLab Runner chart is **not intended for production**. +It is provided as a proof of concept (PoC) implementation where all GitLab services are deployed +in the cluster. For production deployments, install GitLab Runner on a separate machine for +[security and performance reasons](https://docs.gitlab.com/install/requirements/#gitlab-runner). +For more information, see the +[reference architecture documentation](../../../installation/_index.md#use-the-reference-architectures). + +{{< /alert >}} + +## Requirements + +In GitLab 16.0, we introduced a new runner creation workflow that uses runner authentication tokens to +register runners. The legacy workflow that uses registration tokens is deprecated and disabled by default +in GitLab 17.0. It will be removed in GitLab 18.0. + +To use the recommended workflow: + +- [Generate an authentication token.](https://docs.gitlab.com/ci/runners/new_creation_workflow/#prevent-your-runner-registration-workflow-from-breaking) +- Update the runner secret (`<release>-gitlab-runner-secret`) manually, as the configuration + is not handled by the [`shared-secrets`](../../shared-secrets.md) job. +- Set `gitlab-runner.runners.locked` to `null`: + + ```yaml + gitlab-runner: + runners: + locked: null + ``` + +If you want to use the legacy workflow (not recommended): + +- You must [re-enable the legacy workflow](https://docs.gitlab.com/administration/settings/continuous_integration/#enable-runner-registrations-tokens). +- The registration token is populated by the [`shared-secrets`](../../shared-secrets.md) Job. +- You must migrate to the new workflow before GitLab 18.0, which will remove support for the legacy workflow. + +## Configuration + +For more information, see the documentation on [usage and configuration](https://docs.gitlab.com/runner/install/kubernetes.html). + +## Deploying a stand-alone runner + +By default we do infer `gitlabUrl`, automatically generate a registration token, and generate it through the `migrations` chart. This behavior will not work if you intend to deploy it with a running GitLab instance. + +In this case you will need to set `gitlabUrl` value to be the URL of the running GitLab instance. You will also need to manually create `gitlab-runner` secret and fill it with the `registrationToken` provided by the running GitLab. + +## Using Docker-in-Docker + +In order to run Docker-in-Docker, the runner container needs to be privileged to have access to the needed capabilities. To enable it set the `privileged` value to `true`. See the [upstream documentation](https://docs.gitlab.com/runner/install/kubernetes.html#running-docker-in-docker-containers-with-gitlab-runners) in regards to why this is does not default to `true`. + +### Security concerns + +Privileged containers have extended capabilities, for example they can mount arbitrary files from the host they run on. Make sure to run the container in an isolated environment such that nothing important runs beside it. + +## Default runner configuration + +The default runner configuration used in the GitLab chart has been customized to use the included MinIO for cache by default. If you are setting the runner `config` value, you will need to also configure your own cache configuration. + +```yaml +gitlab-runner: + runners: + config: | + [[runners]] + [runners.kubernetes] + image = "ubuntu:22.04" + {{- if .Values.global.minio.enabled }} + [runners.cache] + Type = "s3" + Path = "gitlab-runner" + Shared = true + [runners.cache.s3] + ServerAddress = {{ include "gitlab-runner.cache-tpl.s3ServerAddress" . }} + BucketName = "runner-cache" + BucketLocation = "us-east-1" + Insecure = false + {{ end }} +``` + +All customized GitLab Runner chart configuration is available in the +[top-level `values.yaml` file](https://gitlab.com/gitlab-org/charts/gitlab/raw/master/values.yaml) +under the `gitlab-runner` key. diff --git a/chart/doc/charts/gitlab/gitlab-shell/_index.md b/chart/doc/charts/gitlab/gitlab-shell/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..1682ee7fbc97cb475890e760b9d439eea5d4393a --- /dev/null +++ b/chart/doc/charts/gitlab/gitlab-shell/_index.md @@ -0,0 +1,529 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the GitLab Shell chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The `gitlab-shell` sub-chart provides an SSH server configured for Git SSH access to GitLab. + +## Requirements + +This chart depends on access to the Workhorse services, either as part of the +complete GitLab chart or provided as an external service reachable from the Kubernetes +cluster this chart is deployed onto. + +## Design Choices + +In order to easily support SSH replicas, and avoid using shared storage for the SSH +authorized keys, we are using the SSH [AuthorizedKeysCommand](https://man.openbsd.org/sshd_config#AuthorizedKeysCommand) +to authenticate against the GitLab authorized keys endpoint. As a result, we don't persist +or update the AuthorizedKeys file within these pods. + +## Configuration + +The `gitlab-shell` chart is configured in two parts: [external services](#external-services), +and [chart settings](#chart-settings). The port exposed through Ingress is configured +with `global.shell.port`, and defaults to `22`. The Service's external port is also +controlled by `global.shell.port`. + +## Installation command line options + +| Parameter | Default | Description | +|----------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `affinity` | `{}` | [Affinity rules](../_index.md#affinity) for pod assignment | +| `annotations` | | Pod annotations | +| `podLabels` | | Supplemental Pod labels. Will not be used for selectors. | +| `common.labels` | | Supplemental labels that are applied to all objects created by this chart. | +| `config.clientAliveInterval` | `0` | Interval between keepalive pings on otherwise idle connections; the default value of 0 disables this ping | +| `config.loginGraceTime` | `60` | Specifies amount of time that the server will disconnect after if the user has not successfully logged in | +| `config.maxStartups.full` | `100` | SSHd refuse probability will increase linearly and all unauthenticated connection attempts would be refused when unauthenticated connections number will reach specified number | +| `config.maxStartups.rate` | `30` | SSHd will refuse connections with specified probability when there would be too many unauthenticated connections (optional) | +| `config.maxStartups.start` | `10` | SSHd will refuse connection attempts with some probability if there are currently more than the specified number of unauthenticated connections (optional) | +| `config.proxyProtocol` | `false` | Enable PROXY protocol support for the `gitlab-sshd` daemon | +| `config.proxyPolicy` | `"use"` | Specify policy for handling PROXY protocol. Value must be one of `use, require, ignore, reject` | +| `config.proxyHeaderTimeout` | `"500ms"` | The maximum duration `gitlab-sshd` will wait before giving up on reading the PROXY protocol header. Must include units: `ms`, `s`, or `m`. | +| `config.ciphers` | `[aes128-gcm@openssh.com, chacha20-poly1305@openssh.com, aes256-gcm@openssh.com, aes128-ctr, aes192-ctr, aes256-ctr]` | Specify the ciphers allowed. | +| `config.kexAlgorithms` | `[curve25519-sha256, curve25519-sha256@libssh.org, ecdh-sha2-nistp256, ecdh-sha2-nistp384, ecdh-sha2-nistp521, diffie-hellman-group14-sha256, diffie-hellman-group14-sha1]` | Specifies the available KEX (Key Exchange) algorithms. | +| `config.macs` | `[hmac-sha2-256-etm@openssh.com, hmac-sha2-512-etm@openssh.com, hmac-sha2-256, hmac-sha2-512, hmac-sha1]` | Specifies the available MAC (message authentication code algorithms. | +| `config.publicKeyAlgorithms` | `[]` | Custom list of public key algorithms. If empty, the default algorithms are used. | +| `config.gssapi.enabled` | `false` | Enable GSS-API support for the `gitlab-sshd` daemon | +| `config.gssapi.keytab.secret` | | The name of a Kubernetes secret holding the keytab for the gssapi-with-mic authentication method | +| `config.gssapi.keytab.key` | `keytab` | Key holding the keytab in the Kubernetes secret | +| `config.gssapi.krb5Config` | | Content of the `/etc/krb5.conf` file in the GitLab Shell container | +| `config.gssapi.servicePrincipalName` | | The Kerberos service name to be used by the `gitlab-sshd` daemon | +| `config.lfs.pureSSHProtocol` | `false` | Enable LFS Pure SSH protocol support | +| `config.pat.enabled` | `true` | Enable PAT using SSH | +| `config.pat.allowedScopes` | `[]` | An array of scopes allowed for PATs generated with SSH | +| `opensshd.supplemental_config` | | Supplemental configuration, appended to `sshd_config`. Strict alignment to [man page](https://manpages.debian.org/bookworm/openssh-server/sshd_config.5.en.html) | +| `deployment.livenessProbe.initialDelaySeconds` | 10 | Delay before liveness probe is initiated | +| `deployment.livenessProbe.periodSeconds` | 10 | How often to perform the liveness probe | +| `deployment.livenessProbe.timeoutSeconds` | 3 | When the liveness probe times out | +| `deployment.livenessProbe.successThreshold` | 1 | Minimum consecutive successes for the liveness probe to be considered successful after having failed | +| `deployment.livenessProbe.failureThreshold` | 3 | Minimum consecutive failures for the liveness probe to be considered failed after having succeeded | +| `deployment.readinessProbe.initialDelaySeconds` | 10 | Delay before readiness probe is initiated | +| `deployment.readinessProbe.periodSeconds` | 5 | How often to perform the readiness probe | +| `deployment.readinessProbe.timeoutSeconds` | 3 | When the readiness probe times out | +| `deployment.readinessProbe.successThreshold` | 1 | Minimum consecutive successes for the readiness probe to be considered successful after having failed | +| `deployment.readinessProbe.failureThreshold` | 2 | Minimum consecutive failures for the readiness probe to be considered failed after having succeeded | +| `deployment.strategy` | `{}` | Allows one to configure the update strategy utilized by the deployment | +| `deployment.terminationGracePeriodSeconds` | 30 | Seconds that Kubernetes will wait for a pod to forcibly exit | +| `enabled` | `true` | Shell enable flag | +| `extraContainers` | | Multiline literal style string containing a list of containers to include | +| `extraInitContainers` | | List of extra init containers to include | +| `extraVolumeMounts` | | List of extra volumes mounts to do | +| `extraVolumes` | | List of extra volumes to create | +| `extraEnv` | | List of extra environment variables to expose | +| `extraEnvFrom` | | List of extra environment variables from other data sources to expose | +| `hpa.behavior` | `{scaleDown: {stabilizationWindowSeconds: 300 }}` | Behavior contains the specifications for up- and downscaling behavior (requires `autoscaling/v2beta2` or higher) | +| `hpa.customMetrics` | `[]` | Custom metrics contains the specifications for which to use to calculate the desired replica count (overrides the default use of Average CPU Utilization configured in `targetAverageUtilization`) | +| `hpa.cpu.targetType` | `AverageValue` | Set the autoscaling CPU target type, must be either `Utilization` or `AverageValue` | +| `hpa.cpu.targetAverageValue` | `100m` | Set the autoscaling CPU target value | +| `hpa.cpu.targetAverageUtilization` | | Set the autoscaling CPU target utilization | +| `hpa.memory.targetType` | | Set the autoscaling memory target type, must be either `Utilization` or `AverageValue` | +| `hpa.memory.targetAverageValue` | | Set the autoscaling memory target value | +| `hpa.memory.targetAverageUtilization` | | Set the autoscaling memory target utilization | +| `hpa.targetAverageValue` | | **DEPRECATED** Set the autoscaling CPU target value | +| `image.pullPolicy` | `IfNotPresent` | Shell image pull policy | +| `image.pullSecrets` | | Secrets for the image repository | +| `image.repository` | `registry.gitlab.com/gitlab-org/build/cng/gitlab-shell` | Shell image repository | +| `image.tag` | `master` | Shell image tag | +| `init.image.repository` | | initContainer image | +| `init.image.tag` | | initContainer image tag | +| `init.containerSecurityContext` | | initContainer specific [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) | +| `init.containerSecurityContext.allowPrivilegeEscalation` | `false` | initContainer specific: Controls whether a process can gain more privileges than its parent process | +| `init.containerSecurityContext.runAsNonRoot` | `true` | initContainer specific: Controls whether the container runs with a non-root user | +| `init.containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | +| `keda.enabled` | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `keda.pollingInterval` | `30` | The interval to check each trigger on | +| `keda.cooldownPeriod` | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `keda.minReplicaCount` | | Minimum number of replicas KEDA will scale the resource down to, defaults to `minReplicas` | +| `keda.maxReplicaCount` | | Maximum number of replicas KEDA will scale the resource up to, defaults to `maxReplicas` | +| `keda.fallback` | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `keda.hpaName` | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `keda.restoreToOriginalReplicaCount` | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `keda.behavior` | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `keda.triggers` | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | +| `logging.format` | `json` | Set to `text` for unstructured logs | +| `logging.sshdLogLevel` | `ERROR` | Log level for underlying SSH daemon | +| `priorityClassName` | | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods. | +| `replicaCount` | `1` | Shell replicas | +| `serviceLabels` | `{}` | Supplemental service labels | +| `service.allocateLoadBalancerNodePorts` | Not set, to use Kubernetes default value. | Allows to disable NodePort allocation on LoadBalancer service, see the [documentation](https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation) | +| `service.externalTrafficPolicy` | `Cluster` | Shell service external traffic policy (Cluster or Local) | +| `service.internalPort` | `2222` | Shell internal port | +| `service.nodePort` | | Sets shell nodePort if set | +| `service.name` | `gitlab-shell` | Shell service name | +| `service.type` | `ClusterIP` | Shell service type | +| `service.loadBalancerIP` | | IP address to assign to LoadBalancer (if supported) | +| `service.loadBalancerSourceRanges` | | List of IP CIDRs allowed access to LoadBalancer (if supported) | +| `serviceAccount.annotations` | `{}` | ServiceAccount annotations | +| `serviceAccount.automountServiceAccountToken` | `false` | Indicates whether or not the default ServiceAccount access token should be mounted in pods | +| `serviceAccount.create` | `false` | Indicates whether or not a ServiceAccount should be created | +| `serviceAccount.enabled` | `false` | Indicates whether or not to use a ServiceAccount | +| `serviceAccount.name` | | Name of the ServiceAccount. If not set, the full chart name is used | +| `securityContext.fsGroup` | `1000` | Group ID under which the pod should be started | +| `securityContext.runAsUser` | `1000` | User ID under which the pod should be started | +| `securityContext.fsGroupChangePolicy` | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | +| `securityContext.seccompProfile.type` | `RuntimeDefault` | Seccomp profile to use | +| `containerSecurityContext` | | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the container is started | +| `containerSecurityContext.runAsUser` | `1000` | Allow to overwrite the specific security context under which the container is started | +| `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the container can gain more privileges than its parent process | +| `containerSecurityContext.runAsNonRoot` | `true` | Controls whether the container runs with a non-root user | +| `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | +| `sshDaemon` | `openssh` | Selects which SSH daemon would be run, possible values (`openssh`, `gitlab-sshd`) | +| `tolerations` | `[]` | Toleration labels for pod assignment | +| `traefik.entrypoint` | `gitlab-shell` | When using traefik, which traefik entrypoint to use for GitLab Shell. Defaults to `gitlab-shell` | +| `traefik.tcpMiddlewares` | `[]` | When using traefik, which TCP Middlewares to add to IngressRouteTCP resource. No middlewares by default | +| `workhorse.serviceName` | `webservice` | Workhorse service name (by default, Workhorse is a part of the webservice Pods / Service) | +| `metrics.enabled` | `false` | If a metrics endpoint should be made available for scraping (requires `sshDaemon=gitlab-sshd`). | +| `metrics.port` | `9122` | Metrics endpoint port | +| `metrics.path` | `/metrics` | Metrics endpoint path | +| `metrics.serviceMonitor.enabled` | `false` | If a ServiceMonitor should be created to enable Prometheus Operator to manage the metrics scraping, note that enabling this removes the `prometheus.io` scrape annotations | +| `metrics.serviceMonitor.additionalLabels` | `{}` | Additional labels to add to the ServiceMonitor | +| `metrics.serviceMonitor.endpointConfig` | `{}` | Additional endpoint configuration for the ServiceMonitor | +| `metrics.annotations` | | **DEPRECATED** Set explicit metrics annotations. Replaced by template content. | + +## Chart configuration examples + +### extraEnv + +`extraEnv` allows you to expose additional environment variables in all containers in the pods. + +Below is an example use of `extraEnv`: + +```yaml +extraEnv: + SOME_KEY: some_value + SOME_OTHER_KEY: some_other_value +``` + +When the container is started, you can confirm that the environment variables are exposed: + +```shell +env | grep SOME +SOME_KEY=some_value +SOME_OTHER_KEY=some_other_value +``` + +### extraEnvFrom + +`extraEnvFrom` allows you to expose additional environment variables from other data sources in all containers in the pods. + +Below is an example use of `extraEnvFrom`: + +```yaml +extraEnvFrom: + MY_NODE_NAME: + fieldRef: + fieldPath: spec.nodeName + MY_CPU_REQUEST: + resourceFieldRef: + containerName: test-container + resource: requests.cpu + SECRET_THING: + secretKeyRef: + name: special-secret + key: special_token + # optional: boolean + CONFIG_STRING: + configMapKeyRef: + name: useful-config + key: some-string + # optional: boolean +``` + +### image.pullSecrets + +`pullSecrets` allows you to authenticate to a private registry to pull images for a pod. + +Additional details about private registries and their authentication methods can be +found in [the Kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). + +Below is an example use of `pullSecrets`: + +```yaml +image: + repository: my.shell.repository + tag: latest + pullPolicy: Always + pullSecrets: + - name: my-secret-name + - name: my-secondary-secret-name +``` + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `annotations` | Map | `{}` | ServiceAccount annotations. | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `create` | Boolean | `false` | Indicates whether or not a ServiceAccount should be created. | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | +| `name` | String | | Name of the ServiceAccount. If not set, the full chart name is used. | + +### livenessProbe/readinessProbe + +`deployment.livenessProbe` and `deployment.readinessProbe` provide a mechanism +to help control the termination of Pods under some scenarios. + +Larger repositories benefit from tuning liveness and readiness probe +times to match their typical long-running connections. Set readiness +probe duration shorter than liveness probe duration to minimize +potential interruptions during `clone` and `push` operations. Increase +`terminationGracePeriodSeconds` and give these operations more time before +the scheduler terminates the pod. Consider the example below as a starting +point to tune GitLab Shell pods for increased stability and efficiency +with larger repository workloads. + +```yaml +deployment: + livenessProbe: + initialDelaySeconds: 10 + periodSeconds: 20 + timeoutSeconds: 3 + successThreshold: 1 + failureThreshold: 10 + readinessProbe: + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 3 + terminationGracePeriodSeconds: 300 +``` + +Reference the official [Kubernetes Documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) +for additional details regarding this configuration. + +### tolerations + +`tolerations` allow you schedule pods on tainted worker nodes + +Below is an example use of `tolerations`: + +```yaml +tolerations: +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoSchedule" +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoExecute" +``` + +### affinity + +For more information, see [`affinity`](../_index.md#affinity). + +### annotations + +`annotations` allows you to add annotations to the GitLab Shell pods. + +Below is an example use of `annotations` + +```yaml +annotations: + kubernetes.io/example-annotation: annotation-value +``` + +## External Services + +This chart should be attached the Workhorse service. + +### Workhorse + +```yaml +workhorse: + host: workhorse.example.com + serviceName: webservice + port: 8181 +``` + +| Name | Type | Default | Description | +| :------------ | :-----: | :----------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `host` | String | | The hostname of the Workhorse server. This can be omitted in lieu of `serviceName`. | +| `port` | Integer | `8181` | The port on which to connect to the Workhorse server. | +| `serviceName` | String | `webservice` | The name of the `service` which is operating the Workhorse server. By default, Workhorse is a part of the webservice Pods / Service. If this is present, and `host` is not, the chart will template the hostname of the service (and current `.Release.Name`) in place of the `host` value. This is convenient when using Workhorse as a part of the overall GitLab chart. | + +## Chart settings + +The following values are used to configure the GitLab Shell Pods. + +### hostKeys.secret + +The name of the Kubernetes `secret` to grab the SSH host keys from. The keys in the +secret must start with the key names `ssh_host_` in order to be used by GitLab Shell. + +### authToken + +GitLab Shell uses an Auth Token in its communication with Workhorse. Share the token +with GitLab Shell and Workhorse using a shared Secret. + +```yaml +authToken: + secret: gitlab-shell-secret + key: secret +``` + +| Name | Type | Default | Description | +| :----------------- | :----: | :------ | :-------------------------------------------------------------------- | +| `authToken.key` | String | | The name of the key in the above secret that contains the auth token. | +| `authToken.secret` | String | | The name of the Kubernetes `Secret` to pull from. | + +### LoadBalancer Service + +If the `service.type` is set to `LoadBalancer`, you can optionally specify `service.loadBalancerIP` to create +the `LoadBalancer` with a user-specified IP (if your cloud provider supports it). + +You can also optionally specify a list of `service.loadBalancerSourceRanges` to restrict +the CIDR ranges that can access the `LoadBalancer` (if your cloud provider supports it). + +Additional information about the `LoadBalancer` service type can be found in +[the Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/#loadbalancer) + +```yaml +service: + type: LoadBalancer + loadBalancerIP: 1.2.3.4 + loadBalancerSourceRanges: + - 5.6.7.8/32 + - 10.0.0.0/8 +``` + +### OpenSSH supplemental configuration + +When making use of OpenSSH's `sshd` (via `.sshDaemon: openssh`), it is possible to provide supplemental configuration +in two ways: `.opensshd.supplemental_config`, and via mounting configuration snippets to `/etc/ssh/sshd_config.d/*.conf`. + +Any configuration supplied _must_ meet the functional requirements of `sshd_config`. Ensure you read the [manual page](https://man.openbsd.org/sshd_config). + +#### opensshd.supplemental_config + +The content of `.opensshd.supplemental_config` will be directly placed at the end the `sshd_config` file within the container. +This value should be a mutli-line string. + +Example, enabling older clients using the `ssh-rsa` key exchange algorithms. Note that enabling deprecated algorithms, such as `ssh-rsa`, creates [significant security vulnerabilities](https://www.openssh.com/txt/release-8.8). The likelihood of exploitation is **significantly amplified** on publicly exposed GitLab instances with these changes. + +```yaml +opensshd: + supplemental_config: |- + HostKeyAlgorithms +ssh-rsa,ssh-rsa-cert-v01@openssh.com + PubkeyAcceptedAlgorithms +ssh-rsa,ssh-rsa-cert-v01@openssh.com + CASignatureAlgorithms +ssh-rsa +``` + +#### sshd_config.d + +You may provide full configuration snippets to `sshd` via mounting content into `/etc/ssh/sshd_config.d`, with the files +matching `*.conf`. Note, that these are included _after_ the default configuration which is required for the application +to function in the container, and within the chart. These values _will not_ override the contents of `sshd_config`, but +extend them. + +Example, mounting a single item of a ConfigMap into the container via `extraVolumes` and `extraVolumeMounts`: + +```yaml +extraVolumes: | + - name: gitlab-sshdconfig-extra + configMap: + name: gitlab-sshdconfig-extra + +extraVolumeMounts: | + - name: gitlab-sshdconfig-extra + mountPath: /etc/ssh/sshd_config.d/extra.conf + subPath: extra.conf +``` + +### Configuring the `networkpolicy` + +This section controls the +[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/). +This configuration is optional and is used to limit Egress and Ingress of the +Pods to specific endpoints. + +| Name | Type | Default | Description | +| :---------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | This setting enables the `NetworkPolicy` | +| `ingress.enabled` | Boolean | `false` | When set to `true`, the `Ingress` network policy will be activated. This will block all Ingress connections unless rules are specified. | +| `ingress.rules` | Array | `[]` | Rules for the Ingress policy, for details see <https://kubernetes.io/docs/concepts/services-networking/network-policies/#the-networkpolicy-resource> and the example below | +| `egress.enabled` | Boolean | `false` | When set to `true`, the `Egress` network policy will be activated. This will block all egress connections unless rules are specified. | +| `egress.rules` | Array | `[]` | Rules for the egress policy, these for details see <https://kubernetes.io/docs/concepts/services-networking/network-policies/#the-networkpolicy-resource> and the example below | + +### Example Network Policy + +The `gitlab-shell` service requires Ingress connections for port 22 and Egress +connections to various to default workhorse port 8181. This example adds the +following network policy: + +- Allows Ingress requests: + - From the `nginx-ingress` pod to port `2222` + - From the `prometheus` pod to port `9122` + + NOTE: + Access from `prometheus` to port `9122` is only necessary when the SSH daemon is set to `gitlab-sshd` + +- Allows Egress requests: + - To the `webservice` pod to port `8181` + - To the `gitaly` pod to port `8075` + +_Note the example provided is only an example and may not be complete_ + +The example is based on the assumption that `kube-dns` was deployed +to the namespace `kube-system`, `prometheus` was deployed to the namespace +`monitoring` and `nginx-ingress` was deployed to the namespace `nginx-ingress`. + +```yaml +networkpolicy: + enabled: true + ingress: + enabled: true + rules: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: nginx-ingress + podSelector: + matchLabels: + app: nginx-ingress + component: controller + ports: + - port: 2222 + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app: prometheus + component: server + release: gitlab + ports: + - port: 9122 + egress: + enabled: true + rules: + - to: + - podSelector: + matchLabels: + app: gitaly + ports: + - port: 8075 + - to: + - podSelector: + matchLabels: + app: webservice + ports: + - port: 8181 + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - port: 53 + protocol: UDP +``` + +## Configuring KEDA + +This `keda` section enables the installation of [KEDA](https://keda.sh/) `ScaledObjects` instead of regular `HorizontalPodAutoscalers`. +This configuration is optional and can be used when there is a need for autoscaling based on custom or external metrics. + +Most settings default to the values set in the `hpa` section where applicable. + +If the following are true, CPU and memory triggers are added automatically based on the CPU and memory thresholds set in the `hpa` section: + +- `triggers` is not set. +- The corresponding `request.cpu.request` or `request.memory.request` setting is also set to a non-zero value. + +If no triggers are set, the `ScaledObject` is not created. + +Refer to the [KEDA documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/) for more details about those settings. + +| Name | Type | Default | Description | +| :---------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `pollingInterval` | Integer | `30` | The interval to check each trigger on | +| `cooldownPeriod` | Integer | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `minReplicaCount` | Integer | | Minimum number of replicas KEDA will scale the resource down to, defaults to `minReplicas` | +| `maxReplicaCount` | Integer | | Maximum number of replicas KEDA will scale the resource up to, defaults to `maxReplicas` | +| `fallback` | Map | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `hpaName` | String | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `restoreToOriginalReplicaCount` | Boolean | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `behavior` | Map | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `triggers` | Array | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | + +See [`examples/keda/gitlab-shell.yml`](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/keda/gitlab-shell.yml) for an usage example of `keda`. diff --git a/chart/doc/charts/gitlab/gitlab-zoekt/_index.md b/chart/doc/charts/gitlab/gitlab-zoekt/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..dca71c0a7c91a4f46f227468682df717e03b3475 --- /dev/null +++ b/chart/doc/charts/gitlab/gitlab-zoekt/_index.md @@ -0,0 +1,125 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Zoekt chart +--- + +{{< details >}} + +- Tier: Premium, Ultimate +- Offering: GitLab.com, GitLab Self-Managed +- Status: Beta + +{{< /details >}} + +{{< history >}} + +- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/105049) as a [beta](https://docs.gitlab.com/policy/development_stages_support/#beta) in GitLab 15.9 [with flags](https://docs.gitlab.com/administration/feature_flags/) named `index_code_with_zoekt` and `search_code_with_zoekt`. Disabled by default. +- [Enabled on GitLab.com](https://gitlab.com/gitlab-org/gitlab/-/issues/388519) in GitLab 16.6. +- Feature flags `index_code_with_zoekt` and `search_code_with_zoekt` [removed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/148378) in GitLab 17.1. + +{{< /history >}} + +{{< alert type="warning" >}} + +This feature is in [beta](https://docs.gitlab.com/policy/development_stages_support/#beta) and subject to change without notice. +For more information, see [epic 9404](https://gitlab.com/groups/gitlab-org/-/epics/9404). + +{{< /alert >}} + +The Zoekt chart provides support for +[exact code search](https://docs.gitlab.com/user/search/exact_code_search/). +You can install the chart by setting `gitlab-zoekt.install` to `true`. +For more information, see [`gitlab-zoekt`](https://gitlab.com/gitlab-org/cloud-native/charts/gitlab-zoekt). + +## Enable the Zoekt chart + +To enable the Zoekt chart, set the following values: + +```shell +--set gitlab-zoekt.install=true \ +--set gitlab-zoekt.replicas=2 \ # Number of Zoekt pods. If you want to use only one pod, you can skip this setting. +--set gitlab-zoekt.indexStorage=128Gi # Disk size for the Zoekt node. Zoekt requires up to three times the repository's default branch's storage size, depending on the number of large and binary files. +``` + +## Set CPU and memory usage + +You can define requests and limits for the Zoekt chart by modifying the following GitLab.com default settings: + +```yaml + webserver: + resources: + requests: + cpu: 4 + memory: 32Gi + limits: + cpu: 16 + memory: 128Gi + indexer: + resources: + requests: + cpu: 4 + memory: 6Gi + limits: + cpu: 16 + memory: 12Gi + gateway: + resources: + requests: + cpu: 2 + memory: 512Mi + limits: + cpu: 4 + memory: 1Gi +``` + +## Configure Zoekt in GitLab + +{{< history >}} + +- Shards [renamed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/134717) to nodes in GitLab 16.6. + +{{< /history >}} + +To configure Zoekt for a top-level group in GitLab: + +1. Connect to the Rails console of the toolbox pod: + + ```shell + kubectl exec <toolbox pod name> -it -c toolbox -- gitlab-rails console -e production + ``` + +1. [Enable exact code search](https://docs.gitlab.com/integration/exact_code_search/zoekt/#enable-exact-code-search). +1. Set up indexing: + + {{< tabs >}} + + {{< tab title="GitLab 17.7 and later" >}} + + ```shell + node = ::Search::Zoekt::Node.online.last + namespace = Namespace.find_by_full_path('<top-level-group-to-index>') + enabled_namespace = Search::Zoekt::EnabledNamespace.find_or_create_by(namespace: namespace) + replica = enabled_namespace.replicas.find_or_create_by(namespace_id: enabled_namespace.root_namespace_id) + node.indices.create!(zoekt_enabled_namespace_id: enabled_namespace.id, namespace_id: namespace.id, zoekt_replica_id: replica.id) + ``` + + {{< /tab >}} + + {{< tab title="GitLab 17.6 and earlier" >}} + + ```shell + node = ::Search::Zoekt::Node.online.last + namespace = Namespace.find_by_full_path('<top-level-group-to-index>') + enabled_namespace = Search::Zoekt::EnabledNamespace.find_or_create_by(namespace: namespace) + replica = enabled_namespace.replicas.find_or_create_by(namespace_id: enabled_namespace.root_namespace_id) + replica.ready! + node.indices.create!(zoekt_enabled_namespace_id: enabled_namespace.id, namespace_id: namespace.id, zoekt_replica_id: replica.id, state: :ready) + ``` + + {{< /tab >}} + + {{< /tabs >}} + +Zoekt can now index projects in that group after any project is updated or created. For the initial indexing, wait at least a few minutes for Zoekt to start indexing the namespace. diff --git a/chart/doc/charts/gitlab/kas/_index.md b/chart/doc/charts/gitlab/kas/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..20c4defacbda3ca2b728b9424e8f73012cb2fa4b --- /dev/null +++ b/chart/doc/charts/gitlab/kas/_index.md @@ -0,0 +1,258 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the GitLab `kas` chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The `kas` sub-chart provides a configurable deployment of the +[GitLab agent server (KAS)](https://docs.gitlab.com/administration/clusters/kas/). +The agent server is a component you install together with GitLab. It is required to +manage the [GitLab agent for Kubernetes](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent). + +This chart depends on access to the GitLab API and the Gitaly Servers. +When you enable this chart, an Ingress is deployed. + +To consume minimal resources, the `kas` container uses a distroless image. +The deployed services are exposed by an Ingress, which uses +[WebSocket proxying](https://nginx.org/en/docs/http/websocket.html) for communication. +This proxy allows long-lived connections with the external component, +[`agentk`](https://docs.gitlab.com/user/clusters/agent/install/). +`agentk` is the Kubernetes cluster-side agent counterpart. + +The route to access the service depends on your [Ingress configuration](#specify-an-ingress). + +For more information, see the +[GitLab agent for Kubernetes architecture](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/blob/master/doc/architecture.md). + +## Disable the agent server + +The GitLab agent server (`kas`) is enabled by default. +To disable it on your GitLab instance, set the Helm property `global.kas.enabled` to `false`. + +For example: + +```shell +helm upgrade --install kas --set global.kas.enabled=false +``` + +### Specify an Ingress + +When you use the chart's Ingress with the default configuration, +the service for the agent server is reachable on a subdomain. +For example, for `global.hosts.domain: example.com`, the agent server +is reachable at `kas.example.com`. + +The [KAS Ingress](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/charts/gitlab/charts/kas/templates/ingress.yaml) +can use a different domain than the `global.hosts.domain`. + +Set `global.hosts.kas.name`, for example: + +```shell +global.hosts.kas.name: kas.my-other-domain.com +``` + +This example uses `kas.my-other-domain.com` as the host for the KAS Ingress alone. +The rest of the services (including GitLab, Registry, MinIO, etc.) use the domain +specified in `global.hosts.domain`. + +### Installation command line options + +You can pass these parameters to the `helm install` command by using the `--set` flags. + +| Parameter | Default | Description | +|----------------------------------------------------------|-------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `affinity` | `{}` | [Affinity rules](../_index.md#affinity) for pod assignment | +| `annotations` | `{}` | Pod annotations. | +| `common.labels` | `{}` | Supplemental labels that are applied to all objects created by this chart. | +| `securityContext.runAsUser` | `65532` | User ID under which the pod should be started | +| `securityContext.runAsGroup` | `65534` | Group ID under which the pod should be started | +| `securityContext.fsGroup` | `65532` | Group ID under which the pod should be started | +| `securityContext.fsGroupChangePolicy` | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | +| `securityContext.seccompProfile.type` | `RuntimeDefault` | Seccomp profile to use | +| `containerSecurityContext.runAsUser` | `65532` | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) user ID under which the container is started | +| `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the container can gain more privileges than its parent process | +| `containerSecurityContext.runAsNonRoot` | `true` | Controls whether the container runs with a non-root user | +| `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | +| `extraContainers` | | Multiline literal style string containing a list of containers to include. | +| `extraEnv` | | List of extra environment variables to expose | +| `extraEnvFrom` | | List of extra environment variables from other data sources to expose | +| `init.containerSecurityContext` | | init container securityContext overrides | +| `init.containerSecurityContext.allowPrivilegeEscalation` | `false` | initContainer specific: Controls whether a process can gain more privileges than its parent process | +| `init.containerSecurityContext.runAsNonRoot` | `true` | initContainer specific: Controls whether the container runs with a non-root user | +| `init.containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | +| `image.repository` | `registry.gitlab.com/gitlab-org/build/cng/gitlab-kas` | Image repository. | +| `image.tag` | `v13.7.0` | Image tag. | +| `hpa.behavior` | `{scaleDown: {stabilizationWindowSeconds: 300 }}` | Behavior contains the specifications for up- and downscaling behavior (requires `autoscaling/v2beta2` or higher). | +| `hpa.customMetrics` | `[]` | Custom metrics contains the specifications for which to use to calculate the desired replica count (overrides the default use of Average CPU Utilization configured in `targetAverageUtilization`). | +| `hpa.cpu.targetType` | `AverageValue` | Set the autoscaling CPU target type, must be either `Utilization` or `AverageValue`. | +| `hpa.cpu.targetAverageValue` | `100m` | Set the autoscaling CPU target value. | +| `hpa.cpu.targetAverageUtilization` | | Set the autoscaling CPU target utilization. | +| `hpa.memory.targetType` | | Set the autoscaling memory target type, must be either `Utilization` or `AverageValue`. | +| `hpa.memory.targetAverageValue` | | Set the autoscaling memory target value. | +| `hpa.memory.targetAverageUtilization` | | Set the autoscaling memory target utilization. | +| `hpa.targetAverageValue` | | **DEPRECATED** Set the autoscaling CPU target value | +| `ingress.enabled` | `true` if `global.kas.enabled=true` | You can use `kas.ingress.enabled` to explicitly turn it on or off. If not set, you can optionally use `global.ingress.enabled` for the same purpose. | +| `ingress.apiVersion` | | Value to use in the `apiVersion` field. | +| `ingress.annotations` | `{}` | Ingress annotations. | +| `ingress.tls` | `{}` | Ingress TLS configuration. | +| `ingress.agentPath` | `/` | Ingress path for the agent API endpoint. | +| `ingress.k8sApiPath` | `/k8s-proxy` | Ingress path for Kubernetes API endpoint. | +| `keda.enabled` | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `keda.pollingInterval` | `30` | The interval to check each trigger on | +| `keda.cooldownPeriod` | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `keda.minReplicaCount` | | Minimum number of replicas KEDA will scale the resource down to, defaults to `minReplicas` | +| `keda.maxReplicaCount` | | Maximum number of replicas KEDA will scale the resource up to, defaults to `maxReplicas` | +| `keda.fallback` | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `keda.hpaName` | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `keda.restoreToOriginalReplicaCount` | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `keda.behavior` | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `keda.triggers` | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | +| `metrics.enabled` | `true` | If a metrics endpoint should be made available for scraping. | +| `metrics.path` | `/metrics` | Metrics endpoint path. | +| `metrics.serviceMonitor.enabled` | `false` | If a ServiceMonitor should be created to enable Prometheus Operator to manage the metrics scraping. Enabling removes the `prometheus.io` scrape annotations. It cannot be enabled together with `metrics.podMonitor.enabled`. | +| `metrics.serviceMonitor.additionalLabels` | `{}` | Additional labels to add to the ServiceMonitor. | +| `metrics.serviceMonitor.endpointConfig` | `{}` | Additional endpoint configuration for the ServiceMonitor. | +| `metrics.podMonitor.enabled` | `false` | If a PodMonitor should be created to enable Prometheus Operator to manage the metrics scraping. Enabling removes the `prometheus.io` scrape annotations. It cannot be enabled together with `metrics.serviceMonitor.enabled`. | +| `metrics.podMonitor.additionalLabels` | `{}` | Additional labels to add to the PodMonitor. | +| `metrics.podMonitor.endpointConfig` | `{}` | Additional endpoint configuration for the PodMonitor. | +| `maxReplicas` | `10` | HPA `maxReplicas`. | +| `maxUnavailable` | `1` | HPA `maxUnavailable`. | +| `minReplicas` | `2` | HPA `maxReplicas`. | +| `nodeSelector` | | Define a [nodeSelector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) for the `Pod`s of this `Deployment`, if present. | +| `observability.port` | `8151` | Observability endpoint port. Used for metrics and probe endpoints. | +| `observability.livenessProbe.path` | `/liveness` | URI for the liveness probe endpoint. This value has to match the `observability.liveness_probe.url_path` value from the KAS service configuration. | +| `observability.readinessProbe.path` | `/readiness` | URI for the readiness probe endpoint. This value has to match the `observability.readiness_probe.url_path` value from the KAS service configuration. | +| `serviceAccount.annotations` | `{}` | Service account annotations. | +| `podLabels` | `{}` | Supplemental Pod labels. Not used for selectors. | +| `serviceLabels` | `{}` | Supplemental service labels. | +| `common.labels` | | Supplemental labels that are applied to all objects created by this chart. | +| `redis.enabled` | `true` | Allows opting-out of using Redis for KAS features. Warnings: Redis will become a hard dependency soon, so this key is already deprecated. | +| `resources.requests.cpu` | `75m` | GitLab Exporter minimum CPU. | +| `resources.requests.memory` | `100M` | GitLab Exporter minimum memory. | +| `service.externalPort` | `8150` | External port (for `agentk` connections). | +| `service.internalPort` | `8150` | Internal port (for `agentk` connections). | +| `service.apiInternalPort` | `8153` | Internal port for the internal API (for GitLab backend). | +| `service.loadBalancerIP` | `nil` | A custom load balancer IP when `service.type` is `LoadBalancer`. | +| `service.loadBalancerSourceRanges` | `nil` | A list of custom load balancer source ranges when `service.type` is `LoadBalancer`. | +| `service.kubernetesApiPort` | `8154` | External port to expose proxied Kubernetes API on. | +| `service.privateApiPort` | `8155` | Internal port to expose `kas`' private API on (for `kas` -> `kas` communication). | +| `serviceAccount.annotations` | `{}` | ServiceAccount annotations. | +| `serviceAccount.automountServiceAccountToken`| `false` | Indicates whether or not the default ServiceAccount access token should be mounted in pods. | +| `serviceAccount.create` | `false` | Indicates whether or not a ServiceAccount should be created. | +| `serviceAccount.enabled` | `false` | Indicates whether or not to use a ServiceAccount. | +| `serviceAccount.name` | | Name of the ServiceAccount. If not set, the full chart name is used. | +| `websocketToken.secret` | Autogenerated | The name of the secret to use for WebSocket Token signing and verification. | +| `websocketToken.key` | Autogenerated | The name of the key in `websocketToken.secret` to use. | +| `privateApi.secret` | Autogenerated | The name of the secret to use for authenticating with the database. | +| `privateApi.key` | Autogenerated | The name of the key in `privateApi.secret` to use. | +| `global.kas.service.apiExternalPort` | `8153` | External port for the internal API (for GitLab backend). | +| `service.type` | `ClusterIP` | Service type. | +| `tolerations` | `[]` | Toleration labels for pod assignment. | +| `customConfig` | `{}` | When given, merges the default `kas` configuration with these values giving precedence to those defined here. | +| `deployment.minReadySeconds` | `0` | Minimum number of seconds that must pass before a `kas` pod is considered ready. | +| `deployment.strategy` | `{}` | Allows one to configure the update strategy utilized by the deployment. | +| `deployment.terminationGracePeriodSeconds` | `300` | How much time in seconds a Pod is allowed to spend shutting down after receiving SIGTERM. | +| `priorityClassName` | | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods. | + +## Enable TLS communication + +Enable TLS communication between your `kas` pods and other GitLab chart components, +through the [global KAS attribute](../../globals.md#tls-settings-1). + +## Test the `kas` chart + +To install the chart: + +1. Create your own Kubernetes cluster. +1. Check out the merge request's working branch. +1. Install (or upgrade) GitLab with `kas` enabled by default from your local chart branch: + + ```shell + helm upgrade --force --install gitlab . \ + --timeout 600s \ + --set global.hosts.domain=your.domain.com \ + --set global.hosts.externalIP=XYZ.XYZ.XYZ.XYZ \ + --set certmanager-issuer.email=your@email.com + ``` + +1. Use the GDK to run the process to configure and use the + [GitLab agent for Kubernetes](https://docs.gitlab.com/user/clusters/agent/): + (You can also follow the steps to configure and use the agent manually.) + + 1. From your GDK GitLab repository, move into the QA folder: `cd qa`. + 1. Run the following command to run the QA test: + + ```shell + GITLAB_USERNAME=$ROOT_USER + GITLAB_PASSWORD=$ROOT_PASSWORD + GITLAB_ADMIN_USERNAME=$ROOT_USER + GITLAB_ADMIN_PASSWORD=$ROOT_PASSWORD + bundle exec bin/qa Test::Instance::All https://your.gitlab.domain/ -- --tag orchestrated --tag quarantine qa/specs/features/ee/api/7_configure/kubernetes/kubernetes_agent_spec.rb + ``` + + You can also customize the `agentk` version to install with an environment variable: `GITLAB_AGENTK_VERSION=v13.7.1` + +## Configuring KEDA + +This `keda` section enables the installation of [KEDA](https://keda.sh/) `ScaledObjects` instead of regular `HorizontalPodAutoscalers`. +This configuration is optional and can be used when there is a need for autoscaling based on custom or external metrics. + +Most settings default to the values set in the `hpa` section where applicable. + +If the following are true, CPU and memory triggers are added automatically based on the CPU and memory thresholds set in the `hpa` section: + +- `triggers` is not set. +- The corresponding `request.cpu.request` or `request.memory.request` setting is also set to a non-zero value. + +If no triggers are set, the `ScaledObject` is not created. + +Refer to the [KEDA documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/) for more details about those settings. + +| Name | Type | Default | Description | +| :---------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `pollingInterval` | Integer | `30` | The interval to check each trigger on | +| `cooldownPeriod` | Integer | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `minReplicaCount` | Integer | | Minimum number of replicas KEDA will scale the resource down to, defaults to `minReplicas` | +| `maxReplicaCount` | Integer | | Maximum number of replicas KEDA will scale the resource up to, defaults to `maxReplicas` | +| `fallback` | Map | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `hpaName` | String | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `restoreToOriginalReplicaCount` | Boolean | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `behavior` | Map | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `triggers` | Array | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `annotations` | Map | `{}` | ServiceAccount annotations. | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `create` | Boolean | `false` | Indicates whether or not a ServiceAccount should be created. | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | +| `name` | String | | Name of the ServiceAccount. If not set, the full chart name is used. | + +### affinity + +For more information, see [`affinity`](../_index.md#affinity). + +## Enable debug logging + +To enable debug logging for the KAS sub-chart, add the following to the `kas` section of your `values.yaml` file: + +```yaml +customConfig: + observability: + logging: + level: debug + grpc_level: debug +``` diff --git a/chart/doc/charts/gitlab/mailroom/_index.md b/chart/doc/charts/gitlab/mailroom/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..68d4ce60ab8af9efca70798fcd980bb8c3ed1df6 --- /dev/null +++ b/chart/doc/charts/gitlab/mailroom/_index.md @@ -0,0 +1,265 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the Mailroom chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The Mailroom Chart handles [incoming email](https://docs.gitlab.com/administration/incoming_email/). + +## Configuration + +```yaml +image: + repository: registry.gitlab.com/gitlab-org/build/cng/gitlab-mailroom + # tag: v0.9.1 + pullSecrets: [] + # pullPolicy: IfNotPresent + +enabled: true + +init: + image: {} + # repository: + # tag: + resources: + requests: + cpu: 50m + +annotations: {} + +# Tolerations for pod scheduling +tolerations: [] +affinity: {} +podLabels: {} + +hpa: + minReplicas: 1 + maxReplicas: 2 + cpu: + targetAverageUtilization: 75 + + # Note that the HPA is limited to autoscaling/v2beta1, autoscaling/v2beta2 and autoscaling/v2 + customMetrics: [] + behavior: {} + +networkpolicy: + enabled: false + egress: + enabled: false + rules: [] + ingress: + enabled: false + rules: [] + annotations: {} + +resources: + # limits: + # cpu: 1 + # memory: 2G + requests: + cpu: 50m + memory: 150M + +## Allow to overwrite under which User and Group we're running. +securityContext: + runAsUser: 1000 + fsGroup: 1000 + +## Enable deployment to use a serviceAccount +serviceAccount: + enabled: false + create: false + annotations: {} + ## Name to be used for serviceAccount, otherwise defaults to chart fullname + # name: +``` + +| Parameter | Description | Default | +| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------- | +| `affinity` | `{}` | [Affinity rules](../_index.md#affinity) for pod assignment | +| `annotations` | Pod annotations. | `{}` | +| `deployment.strategy` | Allows one to configure the update strategy utilized by the deployment | `{}` | +| `enabled` | Mailroom enablement flag | `true` | +| `hpa.behavior` | Behavior contains the specifications for up- and downscaling behavior (requires `autoscaling/v2beta2` or higher) | `{scaleDown: {stabilizationWindowSeconds: 300 }}` | +| `hpa.customMetrics` | Custom metrics contains the specifications for which to use to calculate the desired replica count (overrides the default use of Average CPU Utilization configured in `targetAverageUtilization`) | `[]` | +| `hpa.cpu.targetType` | Set the autoscaling CPU target type, must be either `Utilization` or `AverageValue` | `Utilization` | +| `hpa.cpu.targetAverageValue` | Set the autoscaling CPU target value | | +| `hpa.cpu.targetAverageUtilization` | Set the autoscaling CPU target utilization | `75` | +| `hpa.memory.targetType` | Set the autoscaling memory target type, must be either `Utilization` or `AverageValue` | | +| `hpa.memory.targetAverageValue` | Set the autoscaling memory target value | | +| `hpa.memory.targetAverageUtilization` | Set the autoscaling memory target utilization | | +| `hpa.maxReplicas` | Maximum number of replicas | `2` | +| `hpa.minReplicas` | Minimum number of replicas | `1` | +| `image.pullPolicy` | Mailroom image pull policy | `IfNotPresent` | +| `extraEnvFrom` | List of extra environment variables from other data sources to expose | | +| `image.pullSecrets` | Mailroom image pull secrets | | +| `image.registry` | Mailroom image registry | | +| `image.repository` | Mailroom image repository | `registry.gitlab.com/gitlab-org/build/cng/gitlab-mailroom` | +| `image.tag` | Mailroom image tag | | +| `init.image.repository` | Mailroom init image repository | | +| `init.image.tag` | Mailroom init image tag | | +| `init.resources` | Mailroom init container resource requirements | `{ requests: { cpu: 50m }}` | +| `init.containerSecurityContext` | | initContainer container specific [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) | +| `keda.enabled` | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `keda.pollingInterval` | `30` | The interval to check each trigger on | +| `keda.cooldownPeriod` | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `keda.minReplicaCount` | | Minimum number of replicas KEDA will scale the resource down to, defaults to `hpa.minReplicas` | +| `keda.maxReplicaCount` | | Maximum number of replicas KEDA will scale the resource up to, defaults to `hpa.maxReplicas` | +| `keda.fallback` | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `keda.hpaName` | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `keda.restoreToOriginalReplicaCount` | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `keda.behavior` | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `keda.triggers` | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | +| `podLabels` | Labels for running Mailroom Pods | `{}` | +| `common.labels` | Supplemental labels that are applied to all objects created by this chart. | `{}` | +| `resources` | Mailroom resource requirements | `{ requests: { cpu: 50m, memory: 150M }}` | +| `networkpolicy.annotations` | Annotations to add to the NetworkPolicy | `{}` | +| `networkpolicy.egress.enabled` | Flag to enable egress rules of NetworkPolicy | `false` | +| `networkpolicy.egress.rules` | Define a list of egress rules for NetworkPolicy | `[]` | +| `networkpolicy.enabled` | Flag for using NetworkPolicy | `false` | +| `networkpolicy.ingress.enabled` | Flag to enable `ingress` rules of NetworkPolicy | `false` | +| `networkpolicy.ingress.rules` | Define a list of `ingress` rules for NetworkPolicy | `[]` | +| `securityContext.fsGroup` | Group ID under which the pod should be started | `1000` | +| `securityContext.runAsUser` | User ID under which the pod should be started | `1000` | +| `securityContext.fsGroupChangePolicy` | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | | +| `containerSecurityContext` | | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the container is started | +| `containerSecurityContext.runAsUser` | `1000` | Allow to overwrite the specific security context under which the container is started | +| `serviceAccount.annotations` | Annotations for ServiceAccount | `{}` | +| `serviceAccount.automountServiceAccountToken`| Indicates whether or not the default ServiceAccount access token should be mounted in pods | `false` | +| `serviceAccount.enabled` | Indicates whether or not to use a ServiceAccount | `false` | +| `serviceAccount.create` | Indicates whether or not a ServiceAccount should be created | `false` | +| `serviceAccount.name` | Name of the ServiceAccount. If not set, the full chart name is used | | +| `tolerations` | Tolerations to add to the Mailroom | | +| `priorityClassName` | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods. | | + +## Configuring KEDA + +This `keda` section enables the installation of [KEDA](https://keda.sh/) `ScaledObjects` instead of regular `HorizontalPodAutoscalers`. +This configuration is optional and can be used when there is a need for autoscaling based on custom or external metrics. + +Most settings default to the values set in the `hpa` section where applicable. + +If the following are true, CPU and memory triggers are added automatically based on the CPU and memory thresholds set in the `hpa` section: + +- `triggers` is not set. +- The corresponding `request.cpu.request` or `request.memory.request` setting is also set to a non-zero value. + +If no triggers are set, the `ScaledObject` is not created. + +Refer to the [KEDA documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/) for more details about those settings. + +| Name | Type | Default | Description | +| :---------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `pollingInterval` | Integer | `30` | The interval to check each trigger on | +| `cooldownPeriod` | Integer | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `minReplicaCount` | Integer | | Minimum number of replicas KEDA will scale the resource down to, defaults to `hpa.minReplicas` | +| `maxReplicaCount` | Integer | | Maximum number of replicas KEDA will scale the resource up to, defaults to `hpa.maxReplicas` | +| `fallback` | Map | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `hpaName` | String | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `restoreToOriginalReplicaCount` | Boolean | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `behavior` | Map | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `triggers` | Array | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | + +## Incoming email + +By default, incoming email is disabled. There are two methods for +reading incoming email: + +- [IMAP](#imap) +- [Microsoft Graph](#microsoft-graph) + +First, enable it by setting the [common settings](../../../installation/command-line-options.md#common-settings). +Then configure the [IMAP settings](../../../installation/command-line-options.md#imap-settings) or +[Microsoft Graph settings](../../../installation/command-line-options.md#microsoft-graph-settings). + +These methods can be configured in `values.yaml`. See the following examples: + +- [Incoming email with IMAP](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/email/values-incoming-email.yaml) +- [Incoming email with Microsoft Graph](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/email/values-msgraph.yaml) + +### IMAP + +To enable incoming e-mail for IMAP, provide details of your IMAP server +and access credentials using the `global.appConfig.incomingEmail` +settings. + +In addition, the [requirements for the IMAP email account](https://docs.gitlab.com/administration/incoming_email/) +should be reviewed to ensure that the targeted IMAP account can be used +by GitLab for receiving email. Several common email services are also +documented on the same page to aid in setting up incoming email. + +The IMAP password will still need to be created as a Kubernetes Secret as +described in the [secrets guide](../../../installation/secrets.md#imap-password-for-incoming-emails). + +### Microsoft Graph + +See the [GitLab documentation on creating an Azure Active Directory application](https://docs.gitlab.com/administration/incoming_email/#microsoft-graph). + +Provide the tenant ID, client ID, and client secret. You can find details for these settings in the [command line options](../../../installation/command-line-options.md#incoming-email-configuration). + +Create a Kubernetes secret containing the client secret as described in the [secrets guide](../../../installation/secrets.md#microsoft-graph-client-secret-for-incoming-emails). + +### Reply-by-email + +To use the reply-by-email feature, where users can reply to notification emails to +comment on issues and MRs, you need to configure both [outgoing email](../../../installation/command-line-options.md#outgoing-email-configuration) +and incoming email settings. + +### Service Desk email + +By default, the Service Desk email is disabled. + +As with incoming e-mail, enable it by setting the [common settings](../../../installation/command-line-options.md#common-settings-1). +Then configure the [IMAP settings](../../../installation/command-line-options.md#imap-settings-1) or +[Microsoft Graph settings](../../../installation/command-line-options.md#microsoft-graph-settings-1). + +These options can also be configured in `values.yaml`. See the following examples: + +- [Service Desk with IMAP](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/email/values-service-desk-email.yaml) +- [Service Desk with Microsoft Graph](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/email/values-msgraph.yaml) + +Service Desk email _requires_ that [Incoming email](#incoming-email) be configured. + +#### IMAP + +Provide details of your IMAP server and access credentials using the +`global.appConfig.serviceDeskEmail` settings. You can find details for +these settings in the [command line options](../../../installation/command-line-options.md#service-desk-email-configuration). + +Create a Kubernetes secret containing IMAP password as described in the [secrets guide](../../../installation/secrets.md#imap-password-for-service-desk-emails). + +#### Microsoft Graph + +See the [GitLab documentation on creating an Azure Active Directory application](https://docs.gitlab.com/administration/incoming_email/#microsoft-graph). + +Provide the tenant ID, client ID, and client secret using the +`global.appConfig.serviceDeskEmail` settings. You can find details for +these settings in the [command line options](../../../installation/command-line-options.md#service-desk-email-configuration). + +You will also have to create a Kubernetes secret containing the client secret +as described in the [secrets guide](../../../installation/secrets.md#imap-password-for-service-desk-emails). + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `annotations` | Map | `{}` | ServiceAccount annotations. | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `create` | Boolean | `false` | Indicates whether or not a ServiceAccount should be created. | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | +| `name` | String | | Name of the ServiceAccount. If not set, the full chart name is used. | + +### affinity + +For more information, see [`affinity`](../_index.md#affinity). diff --git a/chart/doc/charts/gitlab/migrations/_index.md b/chart/doc/charts/gitlab/migrations/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..9ffb5e73c5a9c17eef99e93b66f4705d64cf009c --- /dev/null +++ b/chart/doc/charts/gitlab/migrations/_index.md @@ -0,0 +1,269 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the GitLab-Migrations chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The `migrations` sub-chart provides a single migration [Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) that handles seeding/migrating the GitLab database. The chart runs using the GitLab Rails codebase. + +After migrating, this Job also edits the application settings in the database to turn off [writes to authorized keys file](https://docs.gitlab.com/administration/operations/fast_ssh_key_lookup/#setting-up-fast-lookup-via-gitlab-shell). In the charts we are only supporting use of the GitLab Authorized Keys API with the SSH `AuthorizedKeysCommand` instead of support for writing to an authorized keys file. + +## Requirements + +This chart depends on Redis, and PostgreSQL, either as part of the complete GitLab chart or provided as external services reachable from the Kubernetes cluster this chart is deployed onto. + +## Design Choices + +The `migrations` creates a new migrations [Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) each time the chart is deployed. In order to prevent job name collisions, we append the chart revision, and a random alpha-numeric value to the Job name each time is created. The purpose of the random text is described further in this section. + +For now we also have the jobs remain as objects in the cluster after they complete. This is so we can observe the migration logs. Currently this means these Jobs persist even after a `helm uninstall`. This is one of the reasons why we append random text to the Job name, so that future deployments using the same release name don't cause conflicts. Once we have some form of log-shipping in place, we can revisit the persistence of these objects. + +The container used in this chart has some additional optimizations that we are not currently using in this Chart. Mainly the ability to quickly skip running migrations if they are already up to date, without needing to boot up the rails application to check. This optimization requires us to persist the migration status. Which we are not doing with this chart at the moment. In the future we will introduce storage support for the migrations status to this chart. + +## Configuration + +The `migrations` chart is configured in two parts: external services, and chart settings. + +## Installation command line options + +Table below contains all the possible charts configurations that can be supplied to `helm install` command using the `--set` flags + +| Parameter | Description | Default | +| --------------------------- | ---------------------------------------- | ---------------- | +| `common.labels` | Supplemental labels that are applied to all objects created by this chart. | `{}` | +| `image.repository` | Migrations image repository | `registry.gitlab.com/gitlab-org/build/cng/gitlab-toolbox-ee` | +| `image.tag` | Migrations image tag | | +| `image.pullPolicy` | Migrations pull policy | `Always` | +| `image.pullSecrets` | Secrets for the image repository | | +| `init.image.repository` | initContainer image repository | `registry.gitlab.com/gitlab-org/build/cng/gitlab-base` | +| `init.image.tag` | initContainer image tag | `master` | +| `init.image.containerSecurityContext` | init container securityContext overrides | `{}` | +| `init.containerSecurityContext.allowPrivilegeEscalation` | initContainer specific: Controls whether a process can gain more privileges than its parent process | `false` | +| `init.containerSecurityContext.runAsNonRoot` | initContainer specific: Controls whether the container runs with a non-root user | `true` | +| `init.containerSecurityContext.capabilities.drop` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | `[ "ALL" ]` | +| `enabled` | Migrations enable flag | `true` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | [Affinity rules](../_index.md#affinity) for pod assignment | `{}` | +| `annotations` | Annotations for the job spec | `{}` | +| `podAnnotations` | Annotations for the pob spec | `{}` | +| `podLabels` | Supplemental Pod labels. Will not be used for selectors. | | +| `redis.serviceName` | Redis service name | `redis` | +| `psql.serviceName` | Name of Service providing PostgreSQL | `release-postgresql` | +| `psql.password.secret` | psql secret | `gitlab-postgres` | +| `psql.password.key` | key to psql password in psql secret | `psql-password` | +| `psql.port` | Set PostgreSQL server port. Takes precedence over `global.psql.port` | | +| `resources.requests.cpu` | GitLab Migrations minimum CPU | `250m` | +| `resources.requests.memory` | GitLab Migrations minimum memory | `200Mi` | +| `securityContext.fsGroup` | Group ID under which the pod should be started | `1000` | +| `securityContext.runAsUser` | User ID under which the pod should be started | `1000` | +| `securityContext.fsGroupChangePolicy` | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | | +| `securityContext.seccompProfile.type` | Seccomp profile to use | `RuntimeDefault` | +| `containerSecurityContext.runAsUser` | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the container is started | `1000` | +| `containerSecurityContext.allowPrivilegeEscalation` | Controls whether a process of the container can gain more privileges than its parent process | `false` | +| `containerSecurityContext.runAsNonRoot` | Controls whether the container runs with a non-root user | `true` | +| `containerSecurityContext.capabilities.drop` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | `[ "ALL" ]` | +| `serviceAccount.annotations` | ServiceAccount annotations | `{}` | +| `serviceAccount.automountServiceAccountToken`| Indicates whether or not the default ServiceAccount access token should be mounted in pods | `false` | +| `serviceAccount.create` | Indicates whether or not a ServiceAccount should be created | `false` | +| `serviceAccount.enabled` | Indicates whether or not to use a ServiceAccount | `false` | +| `serviceAccount.name` | Name of the ServiceAccount. If not set, the full chart name is used | | +| `extraInitContainers` | List of extra init containers to include | | +| `extraContainers` | Multiline literal style string containing a list of containers to include | | +| `extraVolumes` | List of extra volumes to create | | +| `extraVolumeMounts` | List of extra volumes mounts to do | | +| `extraEnv` | List of extra environment variables to expose | | +| `extraEnvFrom` | List of extra environment variables from other data sources to expose| | +| `priorityClassName` | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods. | | + +## Chart configuration examples + +### extraEnv + +`extraEnv` allows you to expose additional environment variables in all containers in the pods. + +Below is an example use of `extraEnv`: + +```yaml +extraEnv: + SOME_KEY: some_value + SOME_OTHER_KEY: some_other_value +``` + +When the container is started, you can confirm that the environment variables are exposed: + +```shell +env | grep SOME +SOME_KEY=some_value +SOME_OTHER_KEY=some_other_value +``` + +### extraEnvFrom + +`extraEnvFrom` allows you to expose additional environment variables from other data sources in all containers in the pods. + +Below is an example use of `extraEnvFrom`: + +```yaml +extraEnvFrom: + MY_NODE_NAME: + fieldRef: + fieldPath: spec.nodeName + MY_CPU_REQUEST: + resourceFieldRef: + containerName: test-container + resource: requests.cpu + SECRET_THING: + secretKeyRef: + name: special-secret + key: special_token + # optional: boolean + CONFIG_STRING: + configMapKeyRef: + name: useful-config + key: some-string + # optional: boolean +``` + +### image.pullSecrets + +`pullSecrets` allow you to authenticate to a private registry to pull images for a pod. + +Additional details about private registries and their authentication methods +can be found in [the Kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). + +Below is an example use of `pullSecrets`: + +```YAML +image: + repository: my.migrations.repository + pullPolicy: Always + pullSecrets: + - name: my-secret-name + - name: my-secondary-secret-name +``` + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `annotations` | Map | `{}` | ServiceAccount annotations. | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `create` | Boolean | `false` | Indicates whether or not a ServiceAccount should be created. | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | +| `name` | String | | Name of the ServiceAccount. If not set, the full chart name is used. | + +### affinity + +For more information, see [`affinity`](../_index.md#affinity). + +## Using the Community Edition of this chart + +By default, the Helm charts use the Enterprise Edition of GitLab. If desired, you can instead use the Community Edition. Learn more about the [difference between the two](https://about.gitlab.com/install/ce-or-ee/). + +In order to use the Community Edition, set `image.repository` to `registry.gitlab.com/gitlab-org/build/cng/gitlab-toolbox-ce` + +## External Services + +### Redis + +```YAML +redis: + host: redis.example.com + serviceName: redis + port: 6379 + sentinels: + - host: sentinel1.example.com + port: 26379 + password: + secret: gitlab-redis + key: redis-password +``` + +#### host + +The hostname of the Redis server with the database to use. This can be omitted in lieu of `serviceName`. If using Redis Sentinels, the `host` attribute needs to be set to the cluster name as specified in the `sentinel.conf`. + +#### serviceName + +The name of the `service` which is operating the Redis database. If this is present, and `host` is not, the chart will template the hostname of the service (and current `.Release.Name`) in place of the `host` value. This is convenient when using Redis as a part of the overall GitLab chart. This will default to `redis` + +#### port + +The port on which to connect to the Redis server. Defaults to `6379`. + +#### password + +The `password` attribute for Redis has two sub keys: + +- `secret` defines the name of the Kubernetes `Secret` to pull from +- `key` defines the name of the key in the above secret that contains the password. + +#### sentinels + +The `sentinels` attribute allows for a connection to a Redis HA cluster. +The sub keys describe each Sentinel connection. + +- `host` defines the hostname for the Sentinel service +- `port` defines the port number to reach the Sentinel service, defaults to `26379` + +_Note:_ The current Redis Sentinel support only supports Sentinels that have +been deployed separately from the GitLab chart. As a result, the Redis +deployment through the GitLab chart should be disabled with `redis.install=false`. +The Secret containing the Redis password will need to be manually created +before deploying the GitLab chart. + +### PostgreSQL + +```yaml +psql: + host: psql.example.com + serviceName: pgbouncer + port: 5432 + database: gitlabhq_production + username: gitlab + preparedStatements: false + password: + secret: gitlab-postgres + key: psql-password +``` + +#### host + +The hostname of the PostgreSQL server with the database to use. This can be omitted if `postgresql.install=true` (default non-production). + +#### serviceName + +The name of the service which is operating the PostgreSQL database. If this is present, and `host` is not, the chart will template the hostname of the service in place of the `host` value. + +#### port + +The port on which to connect to the PostgreSQL server. Defaults to `5432`. + +#### database + +The name of the database to use on the PostgreSQL server. This defaults to `gitlabhq_production`. + +#### preparedStatements + +If prepared statements should be used when communicating with the PostgreSQL server. Defaults to `false`. + +#### username + +The username with which to authenticate to the database. This defaults to `gitlab` + +#### password + +The `password` attribute for PostgreSQL has to sub keys: + +- `secret` defines the name of the Kubernetes `Secret` to pull from +- `key` defines the name of the key in the above secret that contains the password. diff --git a/chart/doc/charts/gitlab/praefect/_index.md b/chart/doc/charts/gitlab/praefect/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..af75a3f0d62ed4193116aac8252e481ecac1fe32 --- /dev/null +++ b/chart/doc/charts/gitlab/praefect/_index.md @@ -0,0 +1,353 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the Praefect chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed +- Status: Experiment + +{{< /details >}} + +{{< alert type="warning" >}} + +The Praefect chart is still under development. This experimental version is not yet suitable for production use. Upgrades may require significant manual intervention. +See our [Praefect GA release Epic](https://gitlab.com/groups/gitlab-org/charts/-/epics/33) for more information. + +{{< /alert >}} + +The Praefect chart is used to manage a [Gitaly cluster](https://docs.gitlab.com/administration/gitaly/praefect/) inside a GitLab installment deployed with the Helm charts. + +## Known limitations and issues + +1. The database has to be [manually created](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2310). +1. The cluster size is fixed: [Gitaly Cluster does not currently support autoscaling](https://gitlab.com/gitlab-org/gitaly/-/issues/2997). +1. Using a Praefect instance in the cluster to manage Gitaly instances outside the cluster is [not supported](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2662). + +## Requirements + +This chart consumes the Gitaly chart. Settings from `global.gitaly` are used to configure the instances created by this chart. Documentation of these settings can be found in [Gitaly chart documentation](../gitaly/_index.md). + +_Important_: `global.gitaly.tls` is independent of `global.praefect.tls`. They are configured separately. + +By default, this chart will create 3 Gitaly Replicas. + +## Configuration + +The chart is disabled by default. To enable it as part of a chart deploy set `global.praefect.enabled=true`. + +### Replicas + +The default number of replicas to deploy is 3. This can be changed by setting `global.praefect.virtualStorages[].gitalyReplicas` with the desired number of replicas. For example: + +```yaml +global: + praefect: + enabled: true + virtualStorages: + - name: default + gitalyReplicas: 4 + maxUnavailable: 1 +``` + +### Multiple virtual storages + +Multiple virtual storages can be configured (see [Gitaly Cluster](https://docs.gitlab.com/administration/gitaly/praefect/) documentation). For example: + +```yaml +global: + praefect: + enabled: true + virtualStorages: + - name: default + gitalyReplicas: 4 + maxUnavailable: 1 + - name: vs2 + gitalyReplicas: 5 + maxUnavailable: 2 +``` + +This will create two sets of resources for Gitaly. This includes two Gitaly StatefulSets (one per virtual storage). + +Administrators can then [configure where new repositories are stored](https://docs.gitlab.com/administration/repository_storage_paths/#configure-where-new-repositories-are-stored). + +### Persistence + +It is possible to provide persistence configuration per virtual storage. + +```yaml +global: + praefect: + enabled: true + virtualStorages: + - name: default + gitalyReplicas: 4 + maxUnavailable: 1 + persistence: + enabled: true + size: 50Gi + accessMode: ReadWriteOnce + storageClass: storageclass1 + - name: vs2 + gitalyReplicas: 5 + maxUnavailable: 2 + persistence: + enabled: true + size: 100Gi + accessMode: ReadWriteOnce + storageClass: storageclass2 +``` + +## defaultReplicationFactor + +`defaultReplicationFactor` can be configured on each virtual storages. (see [configure replication-factor](https://docs.gitlab.com/administration/gitaly/praefect/#configure-replication-factor) documentation). + +```yaml +global: + praefect: + enabled: true + virtualStorages: + - name: default + gitalyReplicas: 5 + maxUnavailable: 2 + defaultReplicationFactor: 3 + - name: secondary + gitalyReplicas: 4 + maxUnavailable: 1 + defaultReplicationFactor: 2 +``` + +### Migrating to Praefect + +{{< alert type="note" >}} + +Group wikis [cannot be moved by using the API](https://docs.gitlab.com/api/project_repository_storage_moves/). + +{{< /alert >}} + +When migrating from standalone Gitaly instances to a Praefect setup, `global.praefect.replaceInternalGitaly` can be set to `false`. +This ensures that the existing Gitaly instances are preserved while the new Praefect-managed Gitaly instances are created. + +```yaml +global: + praefect: + enabled: true + replaceInternalGitaly: false + virtualStorages: + - name: virtualStorage2 + gitalyReplicas: 5 + maxUnavailable: 2 +``` + +{{< alert type="note" >}} + +When migrating to Praefect, none of Praefect's virtual storages can be named `default`. +This is because there must be at least one storage named `default` at all times, +therefore the name is already taken by the non-Praefect configuration. + +{{< /alert >}} + +The instructions to [migrate to Gitaly Cluster](https://docs.gitlab.com/administration/gitaly/#migrating-to-gitaly-cluster) +can then be followed to move data from the `default` storage to `virtualStorage2`. If additional storages +were defined under `global.gitaly.internal.names`, be sure to migrate repositories from those storages as well. + +After the repositories have been migrated to `virtualStorage2`, `replaceInternalGitaly` can be set back to `true` if a storage named +`default` is added in the Praefect configuration. + +```yaml +global: + praefect: + enabled: true + replaceInternalGitaly: true + virtualStorages: + - name: default + gitalyReplicas: 4 + maxUnavailable: 1 + - name: virtualStorage2 + gitalyReplicas: 5 + maxUnavailable: 2 +``` + +The instructions to [migrate to Gitaly Cluster](https://docs.gitlab.com/administration/gitaly/#migrating-to-gitaly-cluster) +can be followed again to move data from `virtualStorage2` to the newly-added `default` storage if desired. + +Finally, see the [repository storage paths documentation](https://docs.gitlab.com/administration/repository_storage_paths/#choose-where-new-repositories-are-stored) +to configure where new repositories are stored. + +### Creating the database + +Praefect uses its own database to track its state. This has to be manually created in order for Praefect to be functional. + +{{< alert type="note" >}} + +These instructions assume you are using the bundled PostgreSQL server. If you are using your own server, +there will be some variation in how you connect. + +{{< /alert >}} + +1. Log into your database instance: + + ```shell + kubectl exec -it $(kubectl get pods -l app.kubernetes.io/name=postgresql -o custom-columns=NAME:.metadata.name --no-headers) -- bash + ``` + + ```shell + PGPASSWORD=$(echo $POSTGRES_POSTGRES_PASSWORD) psql -U postgres -d template1 + ``` + +1. Create the database user: + + ```sql + CREATE ROLE praefect WITH LOGIN; + ``` + +1. Set the database user password. + + By default, the `shared-secrets` Job will generate a secret for you. + + 1. Fetch the password: + + ```shell + kubectl get secret RELEASE_NAME-praefect-dbsecret -o jsonpath="{.data.secret}" | base64 --decode + ``` + + 1. Set the password in the `psql` prompt: + + ```sql + \password praefect + ``` + +1. Create the database: + + ```sql + CREATE DATABASE praefect WITH OWNER praefect; + ``` + +### Running Praefect over TLS + +Praefect supports communicating with client and Gitaly nodes over TLS. This is +controlled by the settings `global.praefect.tls.enabled` and `global.praefect.tls.secretName`. +To run Praefect over TLS follow these steps: + +1. The Helm chart expects a certificate to be provided for communicating over + TLS with Praefect. This certificate should apply to all the Praefect nodes that + are present. Hence all hostnames of each of these nodes should be added as a + Subject Alternate Name (SAN) to the certificate or alternatively, you can use wildcards. + + To know the hostnames to use, check the file `/srv/gitlab/config/gitlab.yml` + file in the Toolbox Pod and check the various `gitaly_address` fields specified + under `repositories.storages` key within it. + + ```shell + kubectl exec -it <Toolbox Pod> -- grep gitaly_address /srv/gitlab/config/gitlab.yml + ``` + +{{< alert type="note" >}} + +A basic script for generating custom signed certificates for internal Praefect Pods +[can be found in this repository](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/scripts/generate_certificates.sh). +Users can use or refer that script to generate certificates with proper SAN attributes. + +{{< /alert >}} + +1. Create a TLS Secret using the certificate created. + + ```shell + kubectl create secret tls <secret name> --cert=praefect.crt --key=praefect.key + ``` + +1. Redeploy the Helm chart by passing `--set global.praefect.tls.enabled=true`. + +When running Gitaly over TLS, a secret name must be provided for each virtual storage. + +```yaml +global: + gitaly: + tls: + enabled: true + praefect: + enabled: true + tls: + enabled: true + secretName: praefect-tls + virtualStorages: + - name: default + gitalyReplicas: 4 + maxUnavailable: 1 + tlsSecretName: default-tls + - name: vs2 + gitalyReplicas: 5 + maxUnavailable: 2 + tlsSecretName: vs2-tls +``` + +### Installation command line options + +The table below contains all the possible charts configurations that can be supplied to +the `helm install` command using the `--set` flags. + +| Parameter | Default | Description | +| ----------------------------------------- | ------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| common.labels | `{}` | Supplemental labels that are applied to all objects created by this chart. | +| failover.enabled | true | Whether Praefect should perform failover on node failure | +| failover.readonlyAfter | false | Whether the nodes should be in read-only mode after failover | +| autoMigrate | true | Automatically run migrations on startup | +| image.repository | `registry.gitlab.com/gitlab-org/build/cng/gitaly` | The default image repository to use. Praefect is bundled as part of the Gitaly image | +| podLabels | `{}` | Supplemental Pod labels. Will not be used for selectors. | +| ntpHost | `pool.ntp.org` | Configure the NTP server Praefect should ask the for the current time. | +| service.name | `praefect` | The name of the service to create | +| service.type | ClusterIP | The type of service to create | +| service.internalPort | 8075 | The internal port number that the Praefect pod will be listening on | +| service.externalPort | 8075 | The port number the Praefect service should expose in the cluster | +| init.resources | | | +| init.image | | | +| `init.containerSecurityContext.allowPrivilegeEscalation` | `false` | initContainer specific: Controls whether a process can gain more privileges than its parent process | +| `init.containerSecurityContext.runAsNonRoot` | `true` | initContainer specific: Controls whether the container runs with a non-root user | +| `init.containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | +| extraEnvFrom | |List of extra environment variables from other data sources to expose | +| logging.level | | Log level | +| logging.format | `json` | Log format | +| logging.sentryDsn | | Sentry DSN URL - Exceptions from Go server | +| logging.sentryEnvironment | | Sentry environment to be used for logging | +| `metrics.enabled` | `true` | If a metrics endpoint should be made available for scraping | +| `metrics.port` | `9236` | Metrics endpoint port | +| `metrics.separate_database_metrics` | `true` | If true then metrics scrapes will not perform database queries, setting to false [may cause performance problems](https://gitlab.com/gitlab-org/gitaly/-/issues/3796) | +| `metrics.path` | `/metrics` | Metrics endpoint path | +| `metrics.serviceMonitor.enabled` | `false` | If a ServiceMonitor should be created to enable Prometheus Operator to manage the metrics scraping, note that enabling this removes the `prometheus.io` scrape annotations | +| `affinity` | `{}` | [Affinity rules](../_index.md#affinity) for pod assignment | +| `metrics.serviceMonitor.additionalLabels` | `{}` | Additional labels to add to the ServiceMonitor | +| `metrics.serviceMonitor.endpointConfig` | `{}` | Additional endpoint configuration for the ServiceMonitor | +| securityContext.runAsUser | 1000 | | +| securityContext.fsGroup | 1000 | | +| securityContext.fsGroupChangePolicy | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | +| `securityContext.seccompProfile.type` | `RuntimeDefault` | Seccomp profile to use | +| `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the container can gain more privileges than its parent process | +| `containerSecurityContext.runAsNonRoot` | `true` | Controls whether the container runs with a non-root user | +| `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | +| `serviceAccount.annotations` | `{}` | ServiceAccount annotations | +| `serviceAccount.automountServiceAccountToken` | `false` | Indicates whether or not the default ServiceAccount access token should be mounted in pods | +| `serviceAccount.create` | `false` | Indicates whether or not a ServiceAccount should be created | +| `serviceAccount.enabled` | `false` | Indicates whether or not to use a ServiceAccount | +| `serviceAccount.name` | | Name of the ServiceAccount. If not set, the full chart name is used | +| serviceLabels | `{}` | Supplemental service labels | +| statefulset.strategy | `{}` | Allows one to configure the update strategy utilized by the statefulset | + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `annotations` | Map | `{}` | ServiceAccount annotations. | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `create` | Boolean | `false` | Indicates whether or not a ServiceAccount should be created. | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | +| `name` | String | | Name of the ServiceAccount. If not set, the full chart name is used. | + +### affinity + +For more information, see [`affinity`](../_index.md#affinity). diff --git a/chart/doc/charts/gitlab/sidekiq/_index.md b/chart/doc/charts/gitlab/sidekiq/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..0ce2bac3744d7b4383115b658d9f80e981bc4f73 --- /dev/null +++ b/chart/doc/charts/gitlab/sidekiq/_index.md @@ -0,0 +1,731 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the GitLab-Sidekiq chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The `sidekiq` sub-chart provides configurable deployment of Sidekiq workers, explicitly +designed to provide separation of queues across multiple `Deployment`s with individual +scalability and configuration. + +While this chart provides a default `pods:` declaration, if you provide an empty definition, +you will have *no* workers. + +## Requirements + +This chart depends on access to Redis, PostgreSQL, and Gitaly services, either as +part of the complete GitLab chart or provided as external services reachable from +the Kubernetes cluster this chart is deployed onto. + +## Design Choices + +This chart creates multiple `Deployment`s and associated `ConfigMap`s. It was decided +that it would be clearer to make use of `ConfigMap` behaviours instead of using `environment` +attributes or additional arguments to the `command` for the containers, in order to +avoid any concerns about command length. This choice results in a large number of +`ConfigMap`s, but provides very clear definitions of what each pod should be doing. + +## Configuration + +The `sidekiq` chart is configured in three parts: chart-wide [external services](#external-services), +[chart-wide defaults](#chart-wide-defaults), and [per-pod definitions](#per-pod-settings). + +## Installation command line options + +The table below contains all the possible charts configurations that can be supplied +to the `helm install` command using the `--set` flags: + +| Parameter | Default | Description | +| ------------------------------------------ | ------------------------------------------------------------ |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `annotations` | | Pod annotations | +| `podLabels` | | Supplemental Pod labels. Will not be used for selectors. | +| `common.labels` | | Supplemental labels that are applied to all objects created by this chart. | +| `concurrency` | `20` | Sidekiq default concurrency | +| `deployment.strategy` | `{}` | Allows one to configure the update strategy utilized by the deployment | +| `deployment.terminationGracePeriodSeconds` | `30` | Optional duration in seconds the pod needs to terminate gracefully. | +| `enabled` | `true` | Sidekiq enabled flag | +| `extraContainers` | | Multiline literal style string containing a list of containers to include | +| `extraInitContainers` | | List of extra init containers to include | +| `extraVolumeMounts` | | String template of extra volume mounts to configure | +| `extraVolumes` | | String template of extra volumes to configure | +| `extraEnv` | | List of extra environment variables to expose | +| `extraEnvFrom` | | List of extra environment variables from other data sources to expose | +| `gitaly.serviceName` | `gitaly` | Gitaly service name | +| `health_checks.port` | `3808` | Health check server port | +| `hpa.behaviour` | `{scaleDown: {stabilizationWindowSeconds: 300 }}` | Behavior contains the specifications for up- and downscaling behavior (requires `autoscaling/v2beta2` or higher) | +| `hpa.customMetrics` | `[]` | Custom metrics contains the specifications for which to use to calculate the desired replica count (overrides the default use of Average CPU Utilization configured in `targetAverageUtilization`) | +| `hpa.cpu.targetType` | `AverageValue` | Set the autoscaling CPU target type, must be either `Utilization` or `AverageValue` | +| `hpa.cpu.targetAverageValue` | `350m` | Set the autoscaling CPU target value | +| `hpa.cpu.targetAverageUtilization` | | Set the autoscaling CPU target utilization | +| `hpa.memory.targetType` | | Set the autoscaling memory target type, must be either `Utilization` or `AverageValue` | +| `hpa.memory.targetAverageValue` | | Set the autoscaling memory target value | +| `hpa.memory.targetAverageUtilization` | | Set the autoscaling memory target utilization | +| `hpa.targetAverageValue` | | **DEPRECATED** Set the autoscaling CPU target value | +| `keda.enabled` | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `keda.pollingInterval` | `30` | The interval to check each trigger on | +| `keda.cooldownPeriod` | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `keda.minReplicaCount` | | Minimum number of replicas KEDA will scale the resource down to, defaults to `minReplicas` | +| `keda.maxReplicaCount` | | Maximum number of replicas KEDA will scale the resource up to, defaults to `maxReplicas` | +| `keda.fallback` | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `keda.hpaName` | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `keda.restoreToOriginalReplicaCount` | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `keda.behavior` | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `keda.triggers` | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | +| `minReplicas` | `2` | Minimum number of replicas | +| `maxReplicas` | `10` | Maximum number of replicas | +| `maxUnavailable` | `1` | Limit of maximum number of Pods to be unavailable | +| `image.pullPolicy` | `Always` | Sidekiq image pull policy | +| `image.pullSecrets` | | Secrets for the image repository | +| `image.repository` | `registry.gitlab.com/gitlab-org/build/cng/gitlab-sidekiq-ee` | Sidekiq image repository | +| `image.tag` | | Sidekiq image tag | +| `init.image.repository` | | initContainer image | +| `init.image.tag` | | initContainer image tag | +| `init.containerSecurityContext` | | initContainer specific [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) | +| `init.containerSecurityContext.runAsUser` | `1000` | initContainer specific: User ID under which the container should be started | +| `init.containerSecurityContext.allowPrivilegeEscalation` | `false` | initContainer specific: Controls whether a process can gain more privileges than its parent process | +| `init.containerSecurityContext.runAsNonRoot` | `true` | initContainer specific: Controls whether the container runs with a non-root user | +| `init.containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | +| `logging.format` | `json` | Set to `text` for non-JSON logs | +| `metrics.enabled` | `true` | If a metrics endpoint should be made available for scraping | +| `metrics.port` | `3807` | Metrics endpoint port | +| `metrics.path` | `/metrics` | Metrics endpoint path | +| `metrics.log_enabled` | `false` | Enables or disables metrics server logs written to `sidekiq_exporter.log` | +| `metrics.podMonitor.enabled` | `false` | If a PodMonitor should be created to enable Prometheus Operator to manage the metrics scraping | +| `metrics.podMonitor.additionalLabels` | `{}` | Additional labels to add to the PodMonitor | +| `metrics.podMonitor.endpointConfig` | `{}` | Additional endpoint configuration for the PodMonitor | +| `metrics.annotations` | | **DEPRECATED** Set explicit metrics annotations. Replaced by template content. | +| `metrics.tls.enabled` | `false` | TLS enabled for the `metrics/sidekiq_exporter` endpoint | +| `metrics.tls.secretName` | `{Release.Name}-sidekiq-metrics-tls` | Secret for the `metrics/sidekiq_exporter` endpoint TLS cert and key | +| `psql.password.key` | `psql-password` | key to psql password in psql secret | +| `psql.password.secret` | `gitlab-postgres` | psql password secret | +| `psql.port` | | Set PostgreSQL server port. Takes precedence over `global.psql.port` | +| `redis.serviceName` | `redis` | Redis service name | +| `resources.requests.cpu` | `900m` | Sidekiq minimum needed CPU | +| `resources.requests.memory` | `2G` | Sidekiq minimum needed memory | +| `resources.limits.memory` | | Sidekiq maximum allowed memory | +| `timeout` | `25` | Sidekiq job timeout | +| `tolerations` | `[]` | Toleration labels for pod assignment | +| `memoryKiller.daemonMode` | `true` | If `false`, uses the legacy memory killer mode | +| `memoryKiller.maxRss` | `2000000` | Maximum RSS before delayed shutdown triggered expressed in kilobytes | +| `memoryKiller.graceTime` | `900` | Time to wait before a triggered shutdown expressed in seconds | +| `memoryKiller.shutdownWait` | `30` | Amount of time after triggered shutdown for existing jobs to finish expressed in seconds | +| `memoryKiller.hardLimitRss` | | Maximum RSS before immediate shutdown triggered expressed in kilobyte in daemon mode | +| `memoryKiller.checkInterval` | `3` | Amount of time between memory checks | +| `livenessProbe.initialDelaySeconds` | `20` | Delay before liveness probe is initiated | +| `livenessProbe.periodSeconds` | `60` | How often to perform the liveness probe | +| `livenessProbe.timeoutSeconds` | `30` | When the liveness probe times out | +| `livenessProbe.successThreshold` | `1` | Minimum consecutive successes for the liveness probe to be considered successful after having failed | +| `livenessProbe.failureThreshold` | `3` | Minimum consecutive failures for the liveness probe to be considered failed after having succeeded | +| `readinessProbe.initialDelaySeconds` | `0` | Delay before readiness probe is initiated | +| `readinessProbe.periodSeconds` | `10` | How often to perform the readiness probe | +| `readinessProbe.timeoutSeconds` | `2` | When the readiness probe times out | +| `readinessProbe.successThreshold` | `1` | Minimum consecutive successes for the readiness probe to be considered successful after having failed | +| `readinessProbe.failureThreshold` | `3` | Minimum consecutive failures for the readiness probe to be considered failed after having succeeded | +| `securityContext.fsGroup` | `1000` | Group ID under which the pod should be started | +| `securityContext.runAsUser` | `1000` | User ID under which the pod should be started | +| `securityContext.fsGroupChangePolicy` | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | +| `securityContext.seccompProfile.type` | `RuntimeDefault` | Seccomp profile to use | +| `containerSecurityContext` | | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the container is started | +| `containerSecurityContext.runAsUser` | `1000` | Allow to overwrite the specific security context under which the container is started | +| `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the container can gain more privileges than its parent process | +| `containerSecurityContext.runAsNonRoot` | `true` | Controls whether the container runs with a non-root user | +| `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | +| `serviceAccount.annotations` | `{}` | ServiceAccount annotations | +| `serviceAccount.automountServiceAccountToken` | `false` | Indicates whether or not the default ServiceAccount access token should be mounted in pods | +| `serviceAccount.create` | `false` | Indicates whether or not a ServiceAccount should be created | +| `serviceAccount.enabled` | `false` | Indicates whether or not to use a ServiceAccount | +| `serviceAccount.name` | | Name of the ServiceAccount. If not set, the full chart name is used | +| `priorityClassName` | `""` | Allow configuring pods `priorityClassName`, this is used to control pod priority in case of eviction | + +## Chart configuration examples + +### resources + +`resources` allows you to configure the minimum and maximum amount of resources (memory and CPU) a Sidekiq +pod can consume. + +Sidekiq pod workloads vary greatly between deployments. Generally speaking, it is understood that each Sidekiq +process consumes approximately 1 vCPU and 2 GB of memory. Vertical scaling should generally align to this `1:2` +ratio of `vCPU:Memory`. + +Below is an example use of `resources`: + +```yaml +resources: + limits: + memory: 5G + requests: + memory: 2G + cpu: 900m +``` + +### extraEnv + +`extraEnv` allows you to expose additional environment variables in the dependencies container. + +Below is an example use of `extraEnv`: + +```yaml +extraEnv: + SOME_KEY: some_value + SOME_OTHER_KEY: some_other_value +``` + +When the container is started, you can confirm that the environment variables are exposed: + +```shell +env | grep SOME +SOME_KEY=some_value +SOME_OTHER_KEY=some_other_value +``` + +You can also set `extraEnv` for a specific pod: + +```yaml +extraEnv: + SOME_KEY: some_value + SOME_OTHER_KEY: some_other_value +pods: + - name: mailers + queues: mailers + extraEnv: + SOME_POD_KEY: some_pod_value + - name: catchall +``` + +This will set `SOME_POD_KEY` only for application containers in the `mailers` +pod. Pod-level `extraEnv` settings are not added to [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/). + +### extraEnvFrom + +`extraEnvFrom` allows you to expose additional environment variables from other data sources in all containers in the pods. +Subsequent variables can be overridden per Sidekiq pod. + +Below is an example use of `extraEnvFrom`: + +```yaml +extraEnvFrom: + MY_NODE_NAME: + fieldRef: + fieldPath: spec.nodeName + MY_CPU_REQUEST: + resourceFieldRef: + containerName: test-container + resource: requests.cpu + SECRET_THING: + secretKeyRef: + name: special-secret + key: special_token + # optional: boolean +pods: + - name: immediate + extraEnvFrom: + CONFIG_STRING: + configMapKeyRef: + name: useful-config + key: some-string + # optional: boolean +``` + +### extraVolumes + +`extraVolumes` allows you to configure extra volumes chart-wide. + +Below is an example use of `extraVolumes`: + +```yaml +extraVolumes: | + - name: example-volume + persistentVolumeClaim: + claimName: example-pvc +``` + +### extraVolumeMounts + +`extraVolumeMounts` allows you to configure extra volumeMounts on all containers chart-wide. + +Below is an example use of `extraVolumeMounts`: + +```yaml +extraVolumeMounts: | + - name: example-volume-mount + mountPath: /etc/example +``` + +### image.pullSecrets + +`pullSecrets` allows you to authenticate to a private registry to pull images for a pod. + +Additional details about private registries and their authentication methods can be +found in [the Kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). + +Below is an example use of `pullSecrets`: + +```yaml +image: + repository: my.sidekiq.repository + pullPolicy: Always + pullSecrets: + - name: my-secret-name + - name: my-secondary-secret-name +``` + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `annotations` | Map | `{}` | ServiceAccount annotations. | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `create` | Boolean | `false` | Indicates whether or not a ServiceAccount should be created. | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | +| `name` | String | | Name of the ServiceAccount. If not set, the full chart name is used. | + +### tolerations + +`tolerations` allow you schedule pods on tainted worker nodes + +Below is an example use of `tolerations`: + +```yaml +tolerations: +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoSchedule" +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoExecute" +``` + +### annotations + +`annotations` allows you to add annotations to the Sidekiq pods. + +Below is an example use of `annotations`: + +```yaml +annotations: + kubernetes.io/example-annotation: annotation-value +``` + +## Using the Community Edition of this chart + +By default, the Helm charts use the Enterprise Edition of GitLab. If desired, you +can use the Community Edition instead. Learn more about the +[differences between the two](https://about.gitlab.com/install/ce-or-ee/). + +In order to use the Community Edition, set `image.repository` to +`registry.gitlab.com/gitlab-org/build/cng/gitlab-sidekiq-ce`. + +## External Services + +This chart should be attached to the same Redis, PostgreSQL, and Gitaly instances +as the Webservice chart. The values of external services will be populated into a `ConfigMap` +that is shared across all Sidekiq pods. + +### Redis + +```yaml +redis: + host: rank-racoon-redis + port: 6379 + sentinels: + - host: sentinel1.example.com + port: 26379 + password: + secret: gitlab-redis + key: redis-password +``` + +| Name | Type | Default | Description | +| :------------------ | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `host` | String | | The hostname of the Redis server with the database to use. This can be omitted in lieu of `serviceName`. If using Redis Sentinels, the `host` attribute needs to be set to the cluster name as specified in the `sentinel.conf`. | +| `password.key` | String | | The `password.key` attribute for Redis defines the name of the key in the secret (below) that contains the password. | +| `password.secret` | String | | The `password.secret` attribute for Redis defines the name of the Kubernetes `Secret` to pull from. | +| `port` | Integer | `6379` | The port on which to connect to the Redis server. | +| `serviceName` | String | `redis` | The name of the `service` which is operating the Redis database. If this is present, and `host` is not, the chart will template the hostname of the service (and current `.Release.Name`) in place of the `host` value. This is convenient when using Redis as a part of the overall GitLab chart. | +| `sentinels.[].host` | String | | The hostname of Redis Sentinel server for a Redis HA setup. | +| `sentinels.[].port` | Integer | `26379` | The port on which to connect to the Redis Sentinel server. | + +{{< alert type="note" >}} + +The current Redis Sentinel support only supports Sentinels that have +been deployed separately from the GitLab chart. As a result, the Redis +deployment through the GitLab chart should be disabled with `redis.install=false`. +The Secret containing the Redis password needs to be manually created +before deploying the GitLab chart. + +{{< /alert >}} + +### PostgreSQL + +```yaml +psql: + host: rank-racoon-psql + serviceName: pgbouncer + port: 5432 + database: gitlabhq_production + username: gitlab + preparedStatements: false + password: + secret: gitlab-postgres + key: psql-password +``` + +| Name | Type | Default | Description | +| :------------------- | :-----: | :-------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `host` | String | | The hostname of the PostgreSQL server with the database to use. This can be omitted if `postgresql.install=true` (default non-production). | +| `serviceName` | String | | The name of the `service` which is operating the PostgreSQL database. If this is present, and `host` is not, the chart will template the hostname of the service in place of the `host` value. | +| `database` | String | `gitlabhq_production` | The name of the database to use on the PostgreSQL server. | +| `password.key` | String | | The `password.key` attribute for PostgreSQL defines the name of the key in the secret (below) that contains the password. | +| `password.secret` | String | | The `password.secret` attribute for PostgreSQL defines the name of the Kubernetes `Secret` to pull from. | +| `port` | Integer | `5432` | The port on which to connect to the PostgreSQL server. | +| `username` | String | `gitlab` | The username with which to authenticate to the database. | +| `preparedStatements` | Boolean | `false` | If prepared statements should be used when communicating with the PostgreSQL server. | + +### Gitaly + +```YAML +gitaly: + internal: + names: + - default + - default2 + external: + - name: node1 + hostname: node1.example.com + port: 8079 + authToken: + secret: gitaly-secret + key: token +``` + +| Name | Type | Default | Description | +| :----------------- | :-----: | :------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `host` | String | | The hostname of the Gitaly server to use. This can be omitted in lieu of `serviceName`. | +| `serviceName` | String | `gitaly` | The name of the `service` which is operating the Gitaly server. If this is present, and `host` is not, the chart will template the hostname of the service (and current `.Release.Name`) in place of the `host` value. This is convenient when using Gitaly as a part of the overall GitLab chart. | +| `port` | Integer | `8075` | The port on which to connect to the Gitaly server. | +| `authToken.key` | String | | The name of the key in the secret below that contains the authToken. | +| `authToken.secret` | String | | The name of the Kubernetes `Secret` to pull from. | + +## Metrics + +By default, a Prometheus metrics exporter is enabled per pod. Metrics are only available +when [GitLab Prometheus metrics](https://docs.gitlab.com/administration/monitoring/prometheus/gitlab_metrics/) +are enabled in the Admin area. The exporter exposes a `/metrics` endpoint on port +`3807`. When metrics are enabled, annotations are added to each pod allowing a Prometheus +server to discover and scrape the exposed metrics. + +## Chart-wide defaults + +The following values will be used chart-wide, in the event that a value is not presented +on a per-pod basis. + +| Name | Type | Default | Description | +| :--------------------------- | :-----: | :-------- | :------------------------------------------------------------------------------------------------------------------------------------ | +| `concurrency` | Integer | `25` | The number of tasks to process simultaneously. | +| `timeout` | Integer | `4` | The Sidekiq shutdown timeout. The number of seconds after Sidekiq gets the TERM signal before it forcefully shuts down its processes. | +| `memoryKiller.checkInterval` | Integer | `3` | Amount of time in seconds between memory checks | +| `memoryKiller.maxRss` | Integer | `2000000` | Maximum RSS before delayed shutdown triggered expressed in kilobytes | +| `memoryKiller.graceTime` | Integer | `900` | Time to wait before a triggered shutdown expressed in seconds | +| `memoryKiller.shutdownWait` | Integer | `30` | Amount of time after triggered shutdown for existing jobs to finish expressed in seconds | +| `minReplicas` | Integer | `2` | Minimum number of replicas | +| `maxReplicas` | Integer | `10` | Maximum number of replicas | +| `maxUnavailable` | Integer | `1` | Limit of maximum number of Pods to be unavailable | + +{{< alert type="note" >}} + +[Detailed documentation of the Sidekiq memory killer is available](https://docs.gitlab.com/administration/sidekiq/sidekiq_memory_killer/) +in the Linux package documentation. + +{{< /alert >}} + +## Per-pod Settings + +The `pods` declaration provides for the declaration of all attributes for a worker +pod. These will be templated to `Deployment`s, with individual `ConfigMap`s for their +Sidekiq instances. + +{{< alert type="note" >}} + +The settings default to including a single pod that is set up to monitor +all queues. Making changes to the pods section will *overwrite the default pod* with +a different pod configuration. It will not add a new pod in addition to the default. + +{{< /alert >}} + +| Name | Type | Default | Description | +| :----------------------------------- | :-----: | :------------------------------------------------------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `concurrency` | Integer | | The number of tasks to process simultaneously. If not provided, it will be pulled from the chart-wide default. | +| `name` | String | | Used to name the `Deployment` and `ConfigMap` for this pod. It should be kept short, and should not be duplicated between any two entries. | +| `queues` | String | | [See below](#queues). | +| `timeout` | Integer | | The Sidekiq shutdown timeout. The number of seconds after Sidekiq gets the TERM signal before it forcefully shuts down its processes. If not provided, it will be pulled from the chart-wide default. This value **must** be less than `terminationGracePeriodSeconds`. | +| `resources` | | | Each pod can present it's own `resources` requirements, which will be added to the `Deployment` created for it, if present. These match the Kubernetes documentation. | +| `nodeSelector` | | | Each pod can be configured with a `nodeSelector` attribute, which will be added to the `Deployment` created for it, if present. These definitions match the Kubernetes documentation. | +| `memoryKiller.checkInterval` | Integer | `3` | Amount of time between memory checks | +| `memoryKiller.maxRss` | Integer | `2000000` | Overrides the maximum RSS for a given pod. | +| `memoryKiller.graceTime` | Integer | `900` | Overrides the time to wait before a triggered shutdown for a given Pod | +| `memoryKiller.shutdownWait` | Integer | `30` | Overrides the amount of time after triggered shutdown for existing jobs to finish for a given Pod | +| `minReplicas` | Integer | `2` | Minimum number of replicas | +| `maxReplicas` | Integer | `10` | Maximum number of replicas | +| `maxUnavailable` | Integer | `1` | Limit of maximum number of Pods to be unavailable | +| `podLabels` | Map | `{}` | Supplemental Pod labels. Will not be used for selectors. | +| `strategy` | | `{}` | Allows one to configure the update strategy utilized by the deployment | +| `extraVolumes` | String | | Configures extra volumes for the given pod. | +| `extraVolumeMounts` | String | | Configures extra volume mounts for the given pod. | +| `priorityClassName` | String | `""` | Allow configuring pods `priorityClassName`, this is used to control pod priority in case of eviction | +| `hpa.customMetrics` | Array | `[]` | Custom metrics contains the specifications for which to use to calculate the desired replica count (overrides the default use of Average CPU Utilization configured in `targetAverageUtilization`) | +| `hpa.cpu.targetType` | String | `AverageValue` | Overrides the autoscaling CPU target type, must be either `Utilization` or `AverageValue` | +| `hpa.cpu.targetAverageValue` | String | `350m` | Overrides the autoscaling CPU target value | +| `hpa.cpu.targetAverageUtilization` | Integer | | Overrides the autoscaling CPU target utilization | +| `hpa.memory.targetType` | String | | Overrides the autoscaling memory target type, must be either `Utilization` or `AverageValue` | +| `hpa.memory.targetAverageValue` | String | | Overrides the autoscaling memory target value | +| `hpa.memory.targetAverageUtilization` | Integer | | Overrides the autoscaling memory target utilization | +| `hpa.targetAverageValue` | String | | **DEPRECATED** Overrides the autoscaling CPU target value | +| `keda.enabled` | Boolean | `false` | Overrides enabling KEDA | +| `keda.pollingInterval` | Integer | `30` | Overrides the KEDA polling interval | +| `keda.cooldownPeriod` | Integer | `300` | Overrides the KEDA cooldown period | +| `keda.minReplicaCount` | Integer | | Overrides the KEDA minimum replica count | +| `keda.maxReplicaCount` | Integer | | Overrides the KEDA maximum replica count | +| `keda.fallback` | Map | | Overrides the KEDA fallback configuration | +| `keda.hpaName` | String | | Overrides the KEDA HPA name | +| `keda.restoreToOriginalReplicaCount` | Boolean | | Overrides enabling the restoration of the original replica count | +| `keda.behavior` | Map | | Overrides the KEDA HPA behavior | +| `keda.triggers` | Array | | Overrides the KEDA triggers | +| `extraEnv` | Map | | List of extra environment variables to expose. The chart-wide value is merged into this, with values from the pod taking precedence | +| `extraEnvFrom` | Map | | List of extra environment variables from other data source to expose | +| `terminationGracePeriodSeconds` | Integer | `30` | Optional duration in seconds the pod needs to terminate gracefully. | + +### queues + +The `queues` value is a string containing a comma-separated list of queues to be +processed. By default, it is not set, meaning that all queues will be processed. + +The string should not contain spaces: `merge,post_receive,process_commit` will +work, but `merge, post_receive, process_commit` will not. + +Any queue to which jobs are added but are not represented as a part of at least +one pod item *will not be processed*. For a complete list of all queues, see +these files in the GitLab source: + +1. [`app/workers/all_queues.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_queues.yml) +1. [`ee/app/workers/all_queues.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/all_queues.yml) + +In addition to configuring `gitlab.sidekiq.pods[].queues`, you must also configure `global.appConfig.sidekiq.routingRules`. For more information, see +[Sidekiq routing rules settings](../../globals.md#sidekiq-routing-rules-settings). + +### Example `pod` entry + +```YAML +pods: + - name: immediate + concurrency: 10 + minReplicas: 2 # defaults to inherited value + maxReplicas: 10 # defaults to inherited value + maxUnavailable: 5 # defaults to inherited value + queues: merge,post_receive,process_commit + extraVolumeMounts: | + - name: example-volume-mount + mountPath: /etc/example + extraVolumes: | + - name: example-volume + persistentVolumeClaim: + claimName: example-pvc + resources: + limits: + cpu: 800m + memory: 2Gi + hpa: + cpu: + targetType: Value + targetAverageValue: 350m +``` + +### Full example of Sidekiq configuration + +The following is a full example of Sidekiq configuration using a separate Sidekiq pod for import-related jobs, a Sidekiq pod for export-related jobs using a separate Redis instance and another pod for everything else. + +```yaml +... +global: + appConfig: + sidekiq: + routingRules: + - ["feature_category=importers", "import"] + - ["feature_category=exporters", "export", "queues_shard_extra_shard"] + - ["*", "default"] + redis: + redisYmlOverride: + queues_shard_extra_shard: ... +... +gitlab: + sidekiq: + pods: + - name: import + queues: import + - name: export + queues: export + extraEnv: + SIDEKIQ_SHARD_NAME: queues_shard_extra_shard # to match key in global.redis.redisYmlOverride + - name: default +... +``` + +## Configuring the `networkpolicy` + +This section controls the +[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/). +This configuration is optional and is used to limit Egress and Ingress of the +Pods to specific endpoints. + +| Name | Type | Default | Description | +| :---------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | This setting enables the network policy | +| `ingress.enabled` | Boolean | `false` | When set to `true`, the `Ingress` network policy will be activated. This will block all Ingress connections unless rules are specified. | +| `ingress.rules` | Array | `[]` | Rules for the Ingress policy, for details see <https://kubernetes.io/docs/concepts/services-networking/network-policies/#the-networkpolicy-resource> and the example below | +| `egress.enabled` | Boolean | `false` | When set to `true`, the `Egress` network policy will be activated. This will block all egress connections unless rules are specified. | +| `egress.rules` | Array | `[]` | Rules for the egress policy, these for details see <https://kubernetes.io/docs/concepts/services-networking/network-policies/#the-networkpolicy-resource> and the example below | + +### Example Network Policy + +The Sidekiq service requires Ingress connections for only the Prometheus +exporter if enabled, and normally requires Egress connections to various +places. This examples adds the following network policy: + +- Allows Ingress requests: + - From the `Prometheus` pod to port `3807` +- Allows Egress requests: + - To `kube-dns` to port `53` + - To the `gitaly` pod to port `8075` + - To the `registry` pod to port `5000` + - To the `kas` pod to port `8153` + - To external database `172.16.0.10/32` to port `5432` + - To external Redis `172.16.0.11/32` to port `6379` + - To external Elasticsearch `172.16.0.12/32` to port `443` + - To mail gateway `172.16.0.13/32` to port `587` + - To endpoints like AWS VPC endpoint for S3 or STS `172.16.1.0/24` to port `443` + - To internal subnets `172.16.2.0/24` to port `443` to send webhooks + +*Note the example provided is only an example and may not be complete* + +NOTE: +The Sidekiq service requires outbound connectivity to the public +internet for images on [external object storage](../../../advanced/external-object-storage) if no local endpoint is available. + +The example is based on the assumption that `kube-dns` was deployed +to the namespace `kube-system`, `prometheus` was deployed to the namespace +`monitoring` and `nginx-ingress` was deployed to the namespace `nginx-ingress`. + +```yaml +networkpolicy: + enabled: true + ingress: + enabled: true + rules: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app: prometheus + component: server + release: gitlab + ports: + - port: 3807 + egress: + enabled: true + rules: + - to: + - podSelector: + matchLabels: + app: gitaly + ports: + - port: 8075 + - to: + - podSelector: + matchLabels: + app: kas + ports: + - port: 8153 + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - port: 53 + protocol: UDP + - to: + - ipBlock: + cidr: 172.16.0.10/32 + ports: + - port: 5432 + - to: + - ipBlock: + cidr: 172.16.0.11/32 + ports: + - port: 6379 + - to: + - ipBlock: + cidr: 172.16.0.12/32 + ports: + - port: 25 + - to: + - ipBlock: + cidr: 172.16.0.13/32 + ports: + - port: 443 + - to: + - ipBlock: + cidr: 172.16.1.0/24 + ports: + - port: 443 + - to: + - ipBlock: + cidr: 172.16.2.0/24 + ports: + - port: 443 +``` + +## Configuring KEDA + +This `keda` section enables the installation of [KEDA](https://keda.sh/) `ScaledObjects` instead of regular `HorizontalPodAutoscalers`. +This configuration is optional and can be used when there is a need for autoscaling based on custom or external metrics. + +Most settings default to the values set in the `hpa` section where applicable. + +If the following are true, CPU and memory triggers are added automatically based on the CPU and memory thresholds set in the `hpa` section: + +- `triggers` is not set. +- The corresponding `request.cpu.request` or `request.memory.request` setting is also set to a non-zero value. + +If no triggers are set, the `ScaledObject` is not created. + +Refer to the [KEDA documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/) for more details about those settings. + +| Name | Type | Default | Description | +| :---------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `pollingInterval` | Integer | `30` | The interval to check each trigger on | +| `cooldownPeriod` | Integer | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `minReplicaCount` | Integer | | Minimum number of replicas KEDA will scale the resource down to, defaults to `minReplicas` | +| `maxReplicaCount` | Integer | | Maximum number of replicas KEDA will scale the resource up to, defaults to `maxReplicas` | +| `fallback` | Map | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `hpaName` | String | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `restoreToOriginalReplicaCount` | Boolean | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `behavior` | Map | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `triggers` | Array | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | diff --git a/chart/doc/charts/gitlab/sidekiq/index.md b/chart/doc/charts/gitlab/sidekiq/index.md index fb67aa5e24ee8f926284d13ceaf12094d7d110eb..db2b55bdd16e7bef639fd35076da75436a3dd09f 100644 --- a/chart/doc/charts/gitlab/sidekiq/index.md +++ b/chart/doc/charts/gitlab/sidekiq/index.md @@ -601,8 +601,8 @@ places. This examples adds the following network policy: *Note the example provided is only an example and may not be complete* -_Note that the Sidekiq service requires outbound connectivity to the public -internet for images on [external object storage](../../../advanced/external-object-storage)_ +*Note that the Sidekiq service requires outbound connectivity to the public +internet for images on [external object storage](../../../advanced/external-object-storage)* ```yaml networkpolicy: diff --git a/chart/doc/charts/gitlab/spamcheck/_index.md b/chart/doc/charts/gitlab/spamcheck/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..7d77aafdc788a7e82b4e058bd47be171b3b94aa7 --- /dev/null +++ b/chart/doc/charts/gitlab/spamcheck/_index.md @@ -0,0 +1,219 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the GitLab-Spamcheck chart +--- + +{{< details >}} + +- Tier: Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The `spamcheck` sub-chart provides a deployment of [Spamcheck](https://gitlab.com/gitlab-org/spamcheck) which is an anti-spam engine developed by GitLab originally to combat the rising amount of spam in GitLab.com, and later made public to be used in GitLab Self-Managed. + +## Requirements + +This chart depends on access to the GitLab API. + +## Configuration + +### Enable Spamcheck + +`spamcheck` is disabled by default. To enable it on your GitLab instance, set the Helm property `global.spamcheck.enabled` to `true`, for example: + +```shell +helm upgrade --force --install gitlab . \ +--set global.hosts.domain='your.domain.com' \ +--set global.hosts.externalIP=XYZ.XYZ.XYZ.XYZ \ +--set certmanager-issuer.email='me@example.com' \ +--set global.spamcheck.enabled=true +``` + +### Configure GitLab to use Spamcheck + +1. On the left sidebar, at the bottom, select **Admin Area**. +1. Select **Settings > Reporting**. +1. Expand **Spam and Anti-bot Protection**. +1. Update the Spam Check settings: + 1. Check the "Enable Spam Check via external API endpoint" checkbox + 1. For URL of the external Spam Check endpoint use `grpc://gitlab-spamcheck.default.svc:8001`, where `default` is replaced with the Kubernetes namespace where GitLab is deployed. + 1. Leave "Spam Check API key" blank. +1. Select **Save changes**. + +## Installation command line options + +The table below contains all the possible charts configurations that can be supplied to the `helm install` command using the `--set` flags. + +| Parameter | Default | Description | +| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | +| `affinity` | `{}` | [Affinity rules](../_index.md#affinity) for pod assignment | +| `annotations` | `{}` | Pod annotations | +| `common.labels` | `{}` | Supplemental labels that are applied to all objects created by this chart. | +| `deployment.livenessProbe.initialDelaySeconds` | 20 | Delay before liveness probe is initiated | +| `deployment.livenessProbe.periodSeconds` | 60 | How often to perform the liveness probe | +| `deployment.livenessProbe.timeoutSeconds` | 30 | When the liveness probe times out | +| `deployment.livenessProbe.successThreshold` | 1 | Minimum consecutive successes for the liveness probe to be considered successful after having failed | +| `deployment.livenessProbe.failureThreshold` | 3 | Minimum consecutive failures for the liveness probe to be considered failed after having succeeded | +| `deployment.readinessProbe.initialDelaySeconds` | 0 | Delay before readiness probe is initiated | +| `deployment.readinessProbe.periodSeconds` | 10 | How often to perform the readiness probe | +| `deployment.readinessProbe.timeoutSeconds` | 2 | When the readiness probe times out | +| `deployment.readinessProbe.successThreshold` | 1 | Minimum consecutive successes for the readiness probe to be considered successful after having failed | +| `deployment.readinessProbe.failureThreshold` | 3 | Minimum consecutive failures for the readiness probe to be considered failed after having succeeded | +| `deployment.strategy` | `{}` | Allows one to configure the update strategy used by the deployment. When not provided, the cluster default is used. | +| `hpa.behavior` | `{scaleDown: {stabilizationWindowSeconds: 300 }}` | Behavior contains the specifications for up- and downscaling behavior (requires `autoscaling/v2beta2` or higher) | +| `hpa.customMetrics` | `[]` | Custom metrics contains the specifications for which to use to calculate the desired replica count (overrides the default use of Average CPU Utilization configured in `targetAverageUtilization`) | +| `hpa.cpu.targetType` | `AverageValue` | Set the autoscaling CPU target type, must be either `Utilization` or `AverageValue` | +| `hpa.cpu.targetAverageValue` | `100m` | Set the autoscaling CPU target value | +| `hpa.cpu.targetAverageUtilization` | | Set the autoscaling CPU target utilization | +| `hpa.memory.targetType` | | Set the autoscaling memory target type, must be either `Utilization` or `AverageValue` | +| `hpa.memory.targetAverageValue` | | Set the autoscaling memory target value | +| `hpa.memory.targetAverageUtilization` | | Set the autoscaling memory target utilization | +| `hpa.targetAverageValue` | | **DEPRECATED** Set the autoscaling CPU target value | +| `image.registry` | | Spamcheck image registry | +| `image.repository` | `registry.gitlab.com/gitlab-com/gl-security/engineering-and-research/automation-team/spam/spamcheck` | Spamcheck image repository | +| `image.tag` | | Spamcheck image tag | +| `image.digest` | | Spamcheck image digest | +| `keda.enabled` | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `keda.pollingInterval` | `30` | The interval to check each trigger on | +| `keda.cooldownPeriod` | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `keda.minReplicaCount` | | Minimum number of replicas KEDA will scale the resource down to, defaults to `hpa.minReplicas` | +| `keda.maxReplicaCount` | | Maximum number of replicas KEDA will scale the resource up to, defaults to `hpa.maxReplicas` | +| `keda.fallback` | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `keda.hpaName` | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `keda.restoreToOriginalReplicaCount` | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `keda.behavior` | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `keda.triggers` | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | +| `logging.level` | `info` | Log level | +| `maxReplicas` | `10` | HPA `maxReplicas` | +| `maxUnavailable` | `1` | HPA `maxUnavailable` | +| `minReplicas` | `2` | HPA `maxReplicas` | +| `podLabels` | `{}` | Supplemental Pod labels. Not used for selectors. | +| `resources.requests.cpu` | `100m` | Spamcheck minimum CPU | +| `resources.requests.memory` | `100M` | Spamcheck minimum memory | +| `securityContext.fsGroup` | `1000` | Group ID under which the pod should be started | +| `securityContext.runAsUser` | `1000` | User ID under which the pod should be started | +| `securityContext.fsGroupChangePolicy` | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | +| `serviceLabels` | `{}` | Supplemental service labels | +| `service.externalPort` | `8001` | Spamcheck external port | +| `service.internalPort` | `8001` | Spamcheck internal port | +| `service.type` | `ClusterIP` | Spamcheck service type | +| `serviceAccount.automountServiceAccountToken` | `false` | Indicates whether or not the default ServiceAccount access token should be mounted in pods | +| `serviceAccount.create` | `false` | Indicates whether or not a ServiceAccount should be created | +| `serviceAccount.enabled` | `false` | Indicates whether or not to use a ServiceAccount | +| `tolerations` | `[]` | Toleration labels for pod assignment | +| `extraEnvFrom` | `{}` | List of extra environment variables from other data sources to expose | +| `priorityClassName` | | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods. | + +## Configuring KEDA + +This `keda` section enables the installation of [KEDA](https://keda.sh/) `ScaledObjects` instead of regular `HorizontalPodAutoscalers`. +This configuration is optional and can be used when there is a need for autoscaling based on custom or external metrics. + +Most settings default to the values set in the `hpa` section where applicable. + +If the following are true, CPU and memory triggers are added automatically based on the CPU and memory thresholds set in the `hpa` section: + +- `triggers` is not set. +- The corresponding `request.cpu.request` or `request.memory.request` setting is also set to a non-zero value. + +If no triggers are set, the `ScaledObject` is not created. + +Refer to the [KEDA documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/) for more details about those settings. + +| Name | Type | Default | Description | +| :---------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `pollingInterval` | Integer | `30` | The interval to check each trigger on | +| `cooldownPeriod` | Integer | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `minReplicaCount` | Integer | | Minimum number of replicas KEDA will scale the resource down to, defaults to `hpa.minReplicas` | +| `maxReplicaCount` | Integer | | Maximum number of replicas KEDA will scale the resource up to, defaults to `hpa.maxReplicas` | +| `fallback` | Map | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `hpaName` | String | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `restoreToOriginalReplicaCount` | Boolean | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `behavior` | Map | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `triggers` | Array | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | + +## Chart configuration examples + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `create` | Boolean | `false` | Indicates whether or not a ServiceAccount should be created. | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | + +### tolerations + +`tolerations` allow you schedule pods on tainted worker nodes + +Below is an example use of `tolerations`: + +```yaml +tolerations: +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoSchedule" +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoExecute" +``` + +### affinity + +For more information, see [`affinity`](../_index.md#affinity). + +### annotations + +`annotations` allows you to add annotations to the Spamcheck pods. For example: + +```yaml +annotations: + kubernetes.io/example-annotation: annotation-value +``` + +### resources + +`resources` allows you to configure the minimum and maximum amount of resources (memory and CPU) a Spamcheck pod can consume. + +For example: + +```yaml +resources: + requests: + memory: 100m + cpu: 100M +``` + +### livenessProbe/readinessProbe + +`deployment.livenessProbe` and `deployment.readinessProbe` provide a mechanism to help control the termination of Spamcheck Pods in certain scenarios, +such as, when a container is in a broken state. + +For example: + +```yaml +deployment: + livenessProbe: + initialDelaySeconds: 10 + periodSeconds: 20 + timeoutSeconds: 3 + successThreshold: 1 + failureThreshold: 10 + readinessProbe: + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 3 +``` + +Refer to the official [Kubernetes Documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) +for additional details regarding this configuration. diff --git a/chart/doc/charts/gitlab/toolbox/_index.md b/chart/doc/charts/gitlab/toolbox/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..a596c03c9d02604a951206fa86cbcc3e1ff627af --- /dev/null +++ b/chart/doc/charts/gitlab/toolbox/_index.md @@ -0,0 +1,238 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Toolbox +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The Toolbox Pod is used to execute periodic housekeeping tasks within +the GitLab application. These tasks include backups, Sidekiq maintenance, +and Rake tasks. + +## Configuration + +The following configuration settings are the default settings provided by the +Toolbox chart: + +```yaml +gitlab: + ## doc/charts/gitlab/toolbox + toolbox: + enabled: true + replicas: 1 + backups: + cron: + enabled: false + concurrencyPolicy: Replace + failedJobsHistoryLimit: 1 + schedule: "0 1 * * *" + successfulJobsHistoryLimit: 3 + suspend: false + backoffLimit: 6 + safeToEvict: false + restartPolicy: "OnFailure" + resources: + requests: + cpu: 50m + memory: 350M + persistence: + enabled: false + accessMode: ReadWriteOnce + useGenericEphemeralVolume: false + size: 10Gi + objectStorage: + backend: s3 + config: {} + persistence: + enabled: false + accessMode: 'ReadWriteOnce' + size: '10Gi' + resources: + requests: + cpu: '50m' + memory: '350M' + securityContext: + fsGroup: '1000' + runAsUser: '1000' + runAsGroup: '1000' + containerSecurityContext: + runAsUser: '1000' + affinity: {} +``` + +| Parameter | Description | Default | +|----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `affinity` | [Affinity rules](../_index.md#affinity) for pod assignment | `{}` | +| `annotations` | Annotations to add to the Toolbox Pods and Jobs | `{}` | +| `common.labels` | Supplemental labels that are applied to all objects created by this chart. | `{}` | +| `antiAffinityLabels.matchLabels` | Labels for setting anti-affinity options | | +| `backups.cron.activeDeadlineSeconds` | Backup CronJob active deadline seconds (if null, no active deadline is applied) | `null` | +| `backups.cron.ttlSecondsAfterFinished` | Backup CronJob job time to live after finished (if null, no time to liveis applied) | `null` | +| `backups.cron.safeToEvict` | Autoscaling safe-to-evict annotation | false | +| `backups.cron.backoffLimit` | Backup CronJob backoff limit | `6` | +| `backups.cron.concurrencyPolicy` | Kubernetes Job concurrency policy | `Replace` | +| `backups.cron.enabled` | Backup CronJob enabled flag | false | +| `backups.cron.extraArgs` | String of arguments to pass to the backup utility | | +| `backups.cron.failedJobsHistoryLimit` | Number of failed backup jobs list in history | `1` | +| `backups.cron.persistence.accessMode` | Backup cron persistence access mode | `ReadWriteOnce` | +| `backups.cron.persistence.enabled` | Backup cron enable persistence flag | false | +| `backups.cron.persistence.matchExpressions` | Label-expression matches to bind | | +| `backups.cron.persistence.matchLabels` | Label-value matches to bind | | +| `backups.cron.persistence.useGenericEphemeralVolume` | Use a [generic ephemeral volume](https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes) | false | +| `backups.cron.persistence.size` | Backup cron persistence volume size | `10Gi` | +| `backups.cron.persistence.storageClass` | StorageClass name for provisioning | | +| `backups.cron.persistence.subPath` | Backup cron persistence volume mount path | | +| `backups.cron.persistence.volumeName` | Existing persistent volume name | | +| `backups.cron.resources.requests.cpu` | Backup cron minimum needed CPU | `50m` | +| `backups.cron.resources.requests.memory` | Backup cron minimum needed memory | `350M` | +| `backups.cron.restartPolicy` | Backup cron restart policy (`Never` or `OnFailure`) | `OnFailure` | +| `backups.cron.schedule` | Cron style schedule string | `0 1 * * *` | +| `backups.cron.startingDeadlineSeconds` | Backup cron job starting deadline, in seconds (if null, no starting deadline is applied) | `null` | +| `backups.cron.successfulJobsHistoryLimit` | Number of successful backup jobs list in history | `3` | +| `backups.cron.suspend` | Backup cron job is suspended | `false` | +| `backups.cron.timeZone` | Time zone for the backup schedule. For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones). Uses the cluster time zone if not specified. | "" | +| `backups.cron.tolerations` | Tolerations to add to the backup cron job | "" | +| `backups.cron.nodeSelector` | Backup cron job node selection | "" | +| `backups.objectStorage.backend` | Object storage provider to use (`s3`, `gcs` or `azure`) | `s3` | +| `backups.objectStorage.config.gcpProject` | GCP Project to use when backend is `gcs` | "" | +| `backups.objectStorage.config.key` | Key containing credentials in secret | "" | +| `backups.objectStorage.config.secret` | Object storage credentials secret | "" | +| `common.labels` | Supplemental labels that are applied to all objects created by this chart. | `{}` | +| `deployment.strategy` | Allows one to configure the update strategy utilized by the deployment | { `type`: `Recreate` } | +| `enabled` | Toolbox enablement flag | true | +| `extra` | YAML block for [extra `gitlab.yml` configuration](https://gitlab.com/gitlab-org/gitlab/-/blob/8d2b59dbf232f17159d63f0359fa4793921896d5/config/gitlab.yml.example#L1193-1199) | {} | +| `image.pullPolicy` | Toolbox image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Toolbox image pull secrets | | +| `image.repository` | Toolbox image repository | `registry.gitlab.com/gitlab-org/build/cng/gitlab-toolbox-ee` | +| `image.tag` | Toolbox image tag | `master` | +| `init.image.repository` | Toolbox init image repository | | +| `init.image.tag` | Toolbox init image tag | | +| `init.resources` | Toolbox init container resource requirements | { `requests`: { `cpu`: `50m` }} | +| `init.containerSecurityContext` | initContainer specific [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) | | +| `init.containerSecurityContext.allowPrivilegeEscalation` | initContainer specific: Controls whether a process can gain more privileges than its parent process | `false` | +| `init.containerSecurityContext.runAsUser` | initContainer specific: User ID under which the container should be started | `1000` | +| `init.containerSecurityContext.allowPrivilegeEscalation` | initContainer specific: Controls whether a process can gain more privileges than its parent process | `false` | +| `init.containerSecurityContext.runAsNonRoot` | initContainer specific: Controls whether the container runs with a non-root user | `true` | +| `init.containerSecurityContext.capabilities.drop` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | `[ "ALL" ]` | +| `nodeSelector` | Toolbox and backup job node selection | | +| `persistence.accessMode` | Toolbox persistence access mode | `ReadWriteOnce` | +| `persistence.enabled` | Toolbox enable persistence flag | false | +| `persistence.matchExpressions` | Label-expression matches to bind | | +| `persistence.matchLabels` | Label-value matches to bind | | +| `persistence.size` | Toolbox persistence volume size | `10Gi` | +| `persistence.storageClass` | StorageClass name for provisioning | | +| `persistence.subPath` | Toolbox persistence volume mount path | | +| `persistence.volumeName` | Existing PersistentVolume name | | +| `podLabels` | Labels for running Toolbox Pods | {} | +| `priorityClassName` | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods. | | +| `replicas` | Number of Toolbox Pods to run | `1` | +| `resources.requests` | Toolbox minimum requested resources | { `cpu`: `50m`, `memory`: `350M` | +| `securityContext.fsGroup` | File System Group ID under which the pod should be started | `1000` | +| `securityContext.runAsUser` | User ID under which the pod should be started | `1000` | +| `securityContext.runAsGroup` | Group ID under which the pod should be started | `1000` | +| `securityContext.fsGroupChangePolicy` | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | | +| `securityContext.seccompProfile.type` | Seccomp profile to use | `RuntimeDefault` | +| `containerSecurityContext` | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the container is started | | +| `containerSecurityContext.runAsUser` | Allow to overwrite the specific security context under which the container is started | `1000` | +| `containerSecurityContext.allowPrivilegeEscalation` | Controls whether a process of the container can gain more privileges than its parent process | `false` | +| `containerSecurityContext.runAsNonRoot` | Controls whether the container runs with a non-root user | `true` | +| `containerSecurityContext.capabilities.drop` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | `[ "ALL" ]` | +| `serviceAccount.annotations` | Annotations for ServiceAccount | {} | +| `serviceAccount.automountServiceAccountToken`| Indicates whether or not the default ServiceAccount access token should be mounted in pods | `false` | +| `serviceAccount.enabled` | Indicates whether or not to use a ServiceAccount | false | +| `serviceAccount.create` | Indicates whether or not a ServiceAccount should be created | false | +| `serviceAccount.name` | Name of the ServiceAccount. If not set, the full chart name is used | | +| `tolerations` | Tolerations to add to the Toolbox | | +| `extraEnvFrom` | List of extra environment variables from other data sources to expose | | + +## Configuring backups + +Information concerning configuring backups in the +[backup and restore documentation](../../../backup-restore/_index.md). Additional +information about the technical implementation of how the backups are +performed can be found in the +[backup and restore architecture documentation](../../../architecture/backup-restore.md).] + +## Persistence configuration + +The persistent stores for backups and restorations are configured separately. +Please review the following considerations when configuring GitLab for +backup and restore operations. + +Backups use the `backups.cron.persistence.*` properties and restorations +use the `persistence.*` properties. Further descriptions concerning the +configuration of a persistence store will use just the final property key +(e.g. `.enabled` or `.size`) and the appropriate prefix will need to be +added. + +The persistence stores are disabled by default, thus `.enabled` needs to +be set to `true` for a backup or restoration of any appreciable size. +In addition, either `.storageClass` needs to be specified for a PersistentVolume +to be created by Kubernetes or a PersistentVolume needs to be manually created. +If `.storageClass` is specified as '-', then the PersistentVolume will be +created using the [default StorageClass](https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/) +as specified in the Kubernetes cluster. + +If the PersistentVolume is created manually, then the volume can be specified +using the `.volumeName` property or by using the selector `.matchLables` / +`.matchExpressions` properties. + +In most cases the default value of `.accessMode` will provide adequate +controls for only Toolbox accessing the PersistentVolumes. Please consult +the documentation for the CSI driver installed in the Kubernetes cluster to +ensure that the setting is correct. + +### Backup considerations + +A backup operation needs an amount of disk space to hold the individual +components that are being backed up before they are written to the backup +object store. The amount of disk space depends on the following factors: + +- Number of projects and the amount of data stored under each project +- Size of the PostgresSQL database (issues, MRs, etc.) +- Size of each object store backend + +Once the rough size has been determined, the `backups.cron.persistence.size` +property can be set so that backups can commence. + +### Restore considerations + +During the restoration of a backup, the backup needs to be extracted to disk +before the files are replaced on the running instance. The size of this +restoration disk space is controlled by the `persistence.size` property. Be +mindful that as the size of the GitLab installation grows the size of the +restoration disk space also needs to grow accordingly. In most cases the +size of the restoration disk space should be the same size as the backup +disk space. + +## Toolbox included tools + +The Toolbox container contains useful GitLab tools such as Rails console, +Rake tasks, etc. These commands allow one to check the status of the database +migrations, execute Rake tasks for administrative tasks, interact with +the Rails console: + +```shell +# locate the Toolbox pod +kubectl get pods -lapp=toolbox + +# Launch a shell inside the pod +kubectl exec -it <Toolbox pod name> -- bash + +# open Rails console +gitlab-rails console -e production + +# execute a Rake task +gitlab-rake gitlab:env:info +``` + +### affinity + +For more information, see [`affinity`](../_index.md#affinity). diff --git a/chart/doc/charts/gitlab/webservice/_index.md b/chart/doc/charts/gitlab/webservice/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..518158ed5485986abb9b21a797254a6d40c9fae7 --- /dev/null +++ b/chart/doc/charts/gitlab/webservice/_index.md @@ -0,0 +1,929 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the GitLab Webservice chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The `webservice` sub-chart provides the GitLab Rails webserver with two Webservice workers +per pod, which is the minimum necessary for a single pod to be able to serve any web request in GitLab. + +The pods of this chart make use of two containers: `gitlab-workhorse` and `webservice`. +[GitLab Workhorse](https://gitlab.com/gitlab-org/gitlab/-/tree/master/workhorse) listens on +port `8181`, and should _always_ be the destination for inbound traffic to the pod. +The `webservice` houses the GitLab [Rails codebase](https://gitlab.com/gitlab-org/gitlab), +listens on `8080`, and is accessible for metrics collection purposes. +`webservice` should never receive normal traffic directly. + +## Requirements + +This chart depends on Redis, PostgreSQL, Gitaly, and Registry services, either as +part of the complete GitLab chart or provided as external services reachable from +the Kubernetes cluster this chart is deployed onto. + +## Configuration + +The `webservice` chart is configured as follows: [Global settings](#global-settings), +[Deployments settings](#deployments-settings), [Ingress settings](#ingress-settings), [External services](#external-services), and +[Chart settings](#chart-settings). + +## Installation command line options + +The table below contains all the possible chart configurations that can be supplied +to the `helm install` command using the `--set` flags. + +| Parameter | Default | Description | +|---------------------------------------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `annotations` | | Pod annotations | +| `podLabels` | | Supplemental Pod labels. Will not be used for selectors. | +| `common.labels` | | Supplemental labels that are applied to all objects created by this chart. | +| `deployment.terminationGracePeriodSeconds` | 30 | Seconds that Kubernetes will wait for a pod to exit, note this must be longer than `shutdown.blackoutSeconds` | +| `deployment.livenessProbe.initialDelaySeconds` | 20 | Delay before liveness probe is initiated | +| `deployment.livenessProbe.periodSeconds` | 60 | How often to perform the liveness probe | +| `deployment.livenessProbe.timeoutSeconds` | 30 | When the liveness probe times out | +| `deployment.livenessProbe.successThreshold` | 1 | Minimum consecutive successes for the liveness probe to be considered successful after having failed | +| `deployment.livenessProbe.failureThreshold` | 3 | Minimum consecutive failures for the liveness probe to be considered failed after having succeeded | +| `deployment.readinessProbe.initialDelaySeconds` | 0 | Delay before readiness probe is initiated | +| `deployment.readinessProbe.periodSeconds` | 10 | How often to perform the readiness probe | +| `deployment.readinessProbe.timeoutSeconds` | 2 | When the readiness probe times out | +| `deployment.readinessProbe.successThreshold` | 1 | Minimum consecutive successes for the readiness probe to be considered successful after having failed | +| `deployment.readinessProbe.failureThreshold` | 3 | Minimum consecutive failures for the readiness probe to be considered failed after having succeeded | +| `deployment.strategy` | `{}` | Allows one to configure the update strategy used by the deployment. When not provided, the cluster default is used. | +| `enabled` | `true` | Webservice enabled flag | +| `extraContainers` | | Multiline literal style string containing a list of containers to include | +| `extraInitContainers` | | List of extra init containers to include | +| `extras.google_analytics_id` | `nil` | Google Analytics ID for frontend | +| `extraVolumeMounts` | | List of extra volumes mounts to do | +| `extraVolumes` | | List of extra volumes to create | +| `extraEnv` | | List of extra environment variables to expose | +| `extraEnvFrom` | | List of extra environment variables from other data sources to expose | +| `gitlab.webservice.workhorse.image` | `registry.gitlab.com/gitlab-org/build/cng/gitlab-workhorse-ee` | Workhorse image repository | +| `gitlab.webservice.workhorse.tag` | | Workhorse image tag | +| `hpa.behavior` | `{scaleDown: {stabilizationWindowSeconds: 300 }}` | Behavior contains the specifications for up- and downscaling behavior (requires `autoscaling/v2beta2` or higher) | +| `hpa.customMetrics` | `[]` | Custom metrics contains the specifications for which to use to calculate the desired replica count (overrides the default use of Average CPU Utilization configured in `targetAverageUtilization`) | +| `hpa.cpu.targetType` | `AverageValue` | Set the autoscaling CPU target type, must be either `Utilization` or `AverageValue` | +| `hpa.cpu.targetAverageValue` | `1` | Set the autoscaling CPU target value | +| `hpa.cpu.targetAverageUtilization` | | Set the autoscaling CPU target utilization | +| `hpa.memory.targetType` | | Set the autoscaling memory target type, must be either `Utilization` or `AverageValue` | +| `hpa.memory.targetAverageValue` | | Set the autoscaling memory target value | +| `hpa.memory.targetAverageUtilization` | | Set the autoscaling memory target utilization | +| `hpa.targetAverageValue` | | **DEPRECATED** Set the autoscaling CPU target value | +| `sshHostKeys.mount` | `false` | Whether to mount the GitLab Shell secret containing the public SSH keys. | +| `sshHostKeys.mountName` | `ssh-host-keys` | Name of the mounted volume. | +| `sshHostKeys.types` | `[dsa,rsa,ecdsa,ed25519]` | List of SSH key types to mount. | +| `image.pullPolicy` | `Always` | Webservice image pull policy | +| `image.pullSecrets` | | Secrets for the image repository | +| `image.repository` | `registry.gitlab.com/gitlab-org/build/cng/gitlab-webservice-ee` | Webservice image repository | +| `image.tag` | | Webservice image tag | +| `init.image.repository` | | initContainer image | +| `init.image.tag` | | initContainer image tag | +| `init.containerSecurityContext.runAsUser` | `1000` | initContainer specific: User ID under which the container should be started | +| `init.containerSecurityContext.allowPrivilegeEscalation` | `false` | initContainer specific: Controls whether a process can gain more privileges than its parent process | +| `init.containerSecurityContext.runAsNonRoot` | `true` | initContainer specific: Controls whether the container runs with a non-root user | +| `init.containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | +| `keda.enabled` | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `keda.pollingInterval` | `30` | The interval to check each trigger on | +| `keda.cooldownPeriod` | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `keda.minReplicaCount` | | Minimum number of replicas KEDA will scale the resource down to, defaults to `minReplicas` | +| `keda.maxReplicaCount` | | Maximum number of replicas KEDA will scale the resource up to, defaults to `maxReplicas` | +| `keda.fallback` | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `keda.hpaName` | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `keda.restoreToOriginalReplicaCount` | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `keda.behavior` | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `keda.triggers` | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | +| `metrics.enabled` | `true` | If a metrics endpoint should be made available for scraping | +| `metrics.port` | `8083` | Metrics endpoint port | +| `metrics.path` | `/metrics` | Metrics endpoint path | +| `metrics.serviceMonitor.enabled` | `false` | If a ServiceMonitor should be created to enable Prometheus Operator to manage the metrics scraping, note that enabling this removes the `prometheus.io` scrape annotations | +| `metrics.serviceMonitor.additionalLabels` | `{}` | Additional labels to add to the ServiceMonitor | +| `metrics.serviceMonitor.endpointConfig` | `{}` | Additional endpoint configuration for the ServiceMonitor | +| `metrics.annotations` | | **DEPRECATED** Set explicit metrics annotations. Replaced by template content. | +| `metrics.tls.enabled` | | TLS enabled for the metrics/web_exporter endpoint. Defaults to `tls.enabled`. | +| `metrics.tls.secretName` | | Secret for the metrics/web_exporter endpoint TLS cert and key. Defaults to `tls.secretName`. | +| `minio.bucket` | `git-lfs` | Name of storage bucket, when using MinIO | +| `minio.port` | `9000` | Port for MinIO service | +| `minio.serviceName` | `minio-svc` | Name of MinIO service | +| `monitoring.ipWhitelist` | `[0.0.0.0/0]` | List of IPs to whitelist for the monitoring endpoints | +| `monitoring.exporter.enabled` | `false` | Enable webserver to expose Prometheus metrics, this is overridden by `metrics.enabled` if the metrics port is set to the monitoring exporter port | +| `monitoring.exporter.port` | `8083` | Port number to use for the metrics exporter | +| `psql.password.key` | `psql-password` | Key to psql password in psql secret | +| `psql.password.secret` | `gitlab-postgres` | psql secret name | +| `psql.port` | | Set PostgreSQL server port. Takes precedence over `global.psql.port` | +| `puma.disableWorkerKiller` | `true` | Disables Puma worker memory killer | +| `puma.workerMaxMemory` | | The maximum memory (in megabytes) for the Puma worker killer | +| `puma.threads.min` | `4` | The minimum amount of Puma threads | +| `puma.threads.max` | `4` | The maximum amount of Puma threads | +| `rack_attack.git_basic_auth` | `{}` | See [GitLab documentation](https://docs.gitlab.com/administration/settings/protected_paths/) for details | +| `redis.serviceName` | `redis` | Redis service name | +| `global.registry.api.port` | `5000` | Registry port | +| `global.registry.api.protocol` | `http` | Registry protocol | +| `global.registry.api.serviceName` | `registry` | Registry service name | +| `global.registry.enabled` | `true` | Add/Remove registry link in all projects menu | +| `global.registry.tokenIssuer` | `gitlab-issuer` | Registry token issuer | +| `replicaCount` | `1` | Webservice number of replicas | +| `resources.requests.cpu` | `300m` | Webservice minimum CPU | +| `resources.requests.memory` | `1.5G` | Webservice minimum memory | +| `service.externalPort` | `8080` | Webservice exposed port | +| `securityContext.fsGroup` | `1000` | Group ID under which the pod should be started | +| `securityContext.runAsUser` | `1000` | User ID under which the pod should be started | +| `securityContext.fsGroupChangePolicy` | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | +| `securityContext.seccompProfile.type` | `RuntimeDefault` | Seccomp profile to use | +| `containerSecurityContext` | | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the container is started | +| `containerSecurityContext.runAsUser` | `1000` | Allow to overwrite the specific security context user ID under which the container is started | +| `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the Gitaly container can gain more privileges than its parent process | +| `containerSecurityContext.runAsNonRoot` | `true` | Controls whether the Gitaly container runs with a non-root user | +| `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | +| `serviceAccount.automountServiceAccountToken` | `false` | Indicates whether or not the default ServiceAccount access token should be mounted in pods | +| `serviceAccount.create` | `false` | Indicates whether or not a ServiceAccount should be created | +| `serviceAccount.enabled` | `false` | Indicates whether or not to use a ServiceAccount | +| `serviceAccount.name` | | Name of the ServiceAccount. If not set, the full chart name is used | +| `serviceLabels` | `{}` | Supplemental service labels | +| `service.internalPort` | `8080` | Webservice internal port | +| `service.type` | `ClusterIP` | Webservice service type | +| `service.workhorseExternalPort` | `8181` | Workhorse exposed port | +| `service.workhorseInternalPort` | `8181` | Workhorse internal port | +| `service.loadBalancerIP` | | IP address to assign to LoadBalancer (if supported by cloud provider) | +| `service.loadBalancerSourceRanges` | | List of IP CIDRs allowed access to LoadBalancer (if supported) Required for service.type = LoadBalancer | +| `shell.authToken.key` | `secret` | Key to shell token in shell secret | +| `shell.authToken.secret` | `{Release.Name}-gitlab-shell-secret` | Shell token secret | +| `shell.port` | `nil` | Port number to use in SSH URLs generated by UI | +| `shutdown.blackoutSeconds` | `10` | Number of seconds to keep Webservice running after receiving shutdown, note this must shorter than `deployment.terminationGracePeriodSeconds` | +| `tls.enabled` | `false` | Webservice TLS enabled | +| `tls.secretName` | `{Release.Name}-webservice-tls` | Webservice TLS secrets. `secretName` must point to a [Kubernetes TLS secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets). | +| `tolerations` | `[]` | Toleration labels for pod assignment | +| `trusted_proxies` | `[]` | See [GitLab documentation](https://docs.gitlab.com/install/installation/#adding-your-trusted-proxies) for details | +| `workhorse.logFormat` | `json` | Logging format. Valid formats: `json`, `structured`, `text` | +| `workerProcesses` | `2` | Webservice number of workers | +| `workhorse.keywatcher` | `true` | Subscribe workhorse to Redis. This is **required** by any deployment servicing request to `/api/*`, but can be safely disabled for other deployments | +| `workhorse.shutdownTimeout` | `global.webservice.workerTimeout + 1` (seconds) | Time to wait for all Web requests to clear from Workhorse. Examples: `1min`, `65s`. | +| `workhorse.trustedCIDRsForPropagation` | | A list of CIDR blocks that can be trusted for propagating a correlation ID. The `-propagateCorrelationID` option must also be used in `workhorse.extraArgs` for this to work. See the [Workhorse documentation](https://docs.gitlab.com/development/workhorse/configuration/#propagate-correlation-ids) for more details. | +| `workhorse.trustedCIDRsForXForwardedFor` | | A list of CIDR blocks that can be used to resolve the actual client IP via the `X-Forwarded-For` HTTP header. This is used with `workhorse.trustedCIDRsForPropagation`. See the [Workhorse documentation](https://docs.gitlab.com/development/workhorse/configuration/#trusted-proxies) for more details. | +| `workhorse.containerSecurityContext` | | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the container is started | +| `workhorse.containerSecurityContext.runAsUser` | `1000` | User ID under which the container should be started | +| `workhorse.containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the container can gain more privileges than its parent process | +| `workhorse.containerSecurityContext.runAsNonRoot` | `true` | Controls whether the container runs with a non-root user | +| `workhorse.containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | +| `workhorse.livenessProbe.initialDelaySeconds` | 20 | Delay before liveness probe is initiated | +| `workhorse.livenessProbe.periodSeconds` | 60 | How often to perform the liveness probe | +| `workhorse.livenessProbe.timeoutSeconds` | 30 | When the liveness probe times out | +| `workhorse.livenessProbe.successThreshold` | 1 | Minimum consecutive successes for the liveness probe to be considered successful after having failed | +| `workhorse.livenessProbe.failureThreshold` | 3 | Minimum consecutive failures for the liveness probe to be considered failed after having succeeded | +| `workhorse.monitoring.exporter.enabled` | `false` | Enable workhorse to expose Prometheus metrics, this is overridden by `workhorse.metrics.enabled` | +| `workhorse.monitoring.exporter.port` | `9229` | Port number to use for workhorse Prometheus metrics | +| `workhorse.monitoring.exporter.tls.enabled` | `false` | When set to `true`, enables TLS on metrics endpoint. It requires [TLS to be enabled for Workhorse](#gitlab-workhorse). | +| `workhorse.metrics.enabled` | `true` | If a workhorse metrics endpoint should be made available for scraping | +| `workhorse.metrics.port` | `8083` | Workhorse metrics endpoint port | +| `workhorse.metrics.path` | `/metrics` | Workhorse metrics endpoint path | +| `workhorse.metrics.serviceMonitor.enabled` | `false` | If a ServiceMonitor should be created to enable Prometheus Operator to manage the Workhorse metrics scraping | +| `workhorse.metrics.serviceMonitor.additionalLabels` | `{}` | Additional labels to add to the Workhorse ServiceMonitor | +| `workhorse.metrics.serviceMonitor.endpointConfig` | `{}` | Additional endpoint configuration for the Workhorse ServiceMonitor | +| `workhorse.readinessProbe.initialDelaySeconds` | 0 | Delay before readiness probe is initiated | +| `workhorse.readinessProbe.periodSeconds` | 10 | How often to perform the readiness probe | +| `workhorse.readinessProbe.timeoutSeconds` | 2 | When the readiness probe times out | +| `workhorse.readinessProbe.successThreshold` | 1 | Minimum consecutive successes for the readiness probe to be considered successful after having failed | +| `workhorse.readinessProbe.failureThreshold` | 3 | Minimum consecutive failures for the readiness probe to be considered failed after having succeeded | +| `workhorse.imageScaler.maxProcs` | 2 | The maximum number of image scaling processes that may run concurrently | +| `workhorse.imageScaler.maxFileSizeBytes` | 250000 | The maximum file size in bytes for images to be processed by the scaler | +| `workhorse.tls.verify` | `true` | When set to `true` forces NGINX Ingress to verify the TLS certificate of Workhorse. For custom CA you need to set `workhorse.tls.caSecretName` as well. Must be set to `false` for self-signed certificates. | +| `workhorse.tls.secretName` | `{Release.Name}-workhorse-tls` | The name of the [TLS Secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) that contains the TLS key and certificate pair. This is required when Workhorse TLS is enabled. | +| `workhorse.tls.caSecretName` | | The name of the Secret that contains the CA certificate. This **is not** a [TLS Secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets), and must have only `ca.crt` key. This is used for TLS verification by NGINX. | +| `webServer` | `puma` | Selects web server (Webservice/Puma) that would be used for request handling | +| `priorityClassName` | `""` | Allow configuring pods `priorityClassName`, this is used to control pod priority in case of eviction | + +## Chart configuration examples + +### extraEnv + +`extraEnv` allows you to expose additional environment variables in all containers in the pods. + +Below is an example use of `extraEnv`: + +```yaml +extraEnv: + SOME_KEY: some_value + SOME_OTHER_KEY: some_other_value +``` + +When the container is started, you can confirm that the environment variables are exposed: + +```shell +env | grep SOME +SOME_KEY=some_value +SOME_OTHER_KEY=some_other_value +``` + +### extraEnvFrom + +`extraEnvFrom` allows you to expose additional environment variables from other data sources in all containers in the pods. +Subsequent variables can be overridden per [deployment](#deployments-settings). + +Below is an example use of `extraEnvFrom`: + +```yaml +extraEnvFrom: + MY_NODE_NAME: + fieldRef: + fieldPath: spec.nodeName + MY_CPU_REQUEST: + resourceFieldRef: + containerName: test-container + resource: requests.cpu + SECRET_THING: + secretKeyRef: + name: special-secret + key: special_token + # optional: boolean +deployments: + default: + extraEnvFrom: + CONFIG_STRING: + configMapKeyRef: + name: useful-config + key: some-string + # optional: boolean +``` + +### image.pullSecrets + +`pullSecrets` allows you to authenticate to a private registry to pull images for a pod. + +Additional details about private registries and their authentication methods can be +found in [the Kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). + +Below is an example use of `pullSecrets`: + +```yaml +image: + repository: my.webservice.repository + pullPolicy: Always + pullSecrets: + - name: my-secret-name + - name: my-secondary-secret-name +``` + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `annotations` | Map | `{}` | ServiceAccount annotations. | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `create` | Boolean | `false` | Indicates whether or not a ServiceAccount should be created. | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | +| `name` | String | | Name of the ServiceAccount. If not set, the full chart name is used. | + +### tolerations + +`tolerations` allow you schedule pods on tainted worker nodes + +Below is an example use of `tolerations`: + +```yaml +tolerations: +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoSchedule" +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoExecute" +``` + +### annotations + +`annotations` allows you to add annotations to the Webservice pods. For example: + +```yaml +annotations: + kubernetes.io/example-annotation: annotation-value +``` + +### strategy + +`deployment.strategy` allows you to change the deployment update strategy. It defines how the pods will be recreated when deployment is updated. When not provided, the cluster default is used. +For example, if you don't want to create extra pods when the rolling update starts and change max unavailable pods to 50%: + +```yaml +deployment: + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 50% +``` + +You can also change the type of update strategy to `Recreate`, but be careful as it will kill all pods before scheduling new ones, and the web UI will be unavailable until the new pods are started. In this case, you don't need to define `rollingUpdate`, only `type`: + +```yaml +deployment: + strategy: + type: Recreate +``` + +For more details, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy). + +### TLS + +A Webservice pod runs two containers: + +- `gitlab-workhorse` +- `webservice` + +#### `gitlab-workhorse` + +Workhorse supports TLS for both web and metrics endpoints. This will secure the +communication between Workhorse and other components, in particular `nginx-ingress`, +`gitlab-shell`, and `gitaly`. The TLS certificate should include the Workhorse +Service host name (e.g. `RELEASE-webservice-default.default.svc`) in the Common +Name (CN) or Subject Alternate Name (SAN). + +Note that [multiple deployments of Webservice](#deployments-settings) can exist, +so you need to prepare the TLS certificate for different service names. This +can be achieved by either multiple SAN or wildcard certificate. + +Once the TLS certificate is generated, create a [Kubernetes TLS Secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) for it. You also need to create +another Secret that only contains the CA certificate of the TLS certificate +with `ca.crt` key. + +The TLS can be enabled for `gitlab-workhorse` container by setting `global.workhorse.tls.enabled` +to `true`. You can pass custom Secret names to `gitlab.webservice.workhorse.tls.secretName` and +`global.certificates.customCAs` accordingly. + +When `gitlab.webservice.workhorse.tls.verify` is `true` (it is by default), you +also need to pass the CA certificate Secret name to `gitlab.webservice.workhorse.tls.caSecretName`. +This is necessary for self-signed certificates and custom CA. This Secret is used +by NGINX to verify the TLS certificate of Workhorse. + +```yaml +global: + workhorse: + tls: + enabled: true + certificates: + customCAs: + - secret: gitlab-workhorse-ca +gitlab: + webservice: + workhorse: + tls: + verify: true + # secretName: gitlab-workhorse-tls + caSecretName: gitlab-workhorse-ca + monitoring: + exporter: + enabled: true + tls: + enabled: true +``` + +TLS on the metrics endpoints of the `gitlab-workhorse` container is inherited from +`global.workhorse.tls.enabled`. Note that TLS on metrics endpoint is only available +when TLS is enabled for Workhorse. The metrics listener uses the same TLS certificate +that is specified by `gitlab.webservice.workhorse.tls.secretName`. + +TLS certificates used for metrics endpoints may require additional considerations for +the included subject alternative names (SANs), particularly if using the included Prometheus +Helm chart. For more information, see [Configure Prometheus to scrape TLS-enabled endpoints](../../../installation/tools.md#configure-prometheus-to-scrape-tls-enabled-endpoints). + +#### `webservice` + +The primary use case for enabling TLS is to provide encryption via HTTPS +for [scraping Prometheus metrics](https://docs.gitlab.com/administration/monitoring/prometheus/gitlab_metrics/). + +For Prometheus to scrape the `/metrics/` endpoint using HTTPS, additional +configuration is required for the certificate's `CommonName` attribute or +a `SubjectAlternativeName` entry. See +[Configuring Prometheus to scrape TLS-enabled endpoints](../../../installation/tools.md#configure-prometheus-to-scrape-tls-enabled-endpoints) +for those requirements. + +TLS can be enabled on the `webservice` container by the settings `gitlab.webservice.tls.enabled`: + +```yaml +gitlab: + webservice: + tls: + enabled: true + # secretName: gitlab-webservice-tls +``` + +`secretName` must point to a [Kubernetes TLS secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets). +For example, to create a TLS secret with a local certificate and key: + +```shell +kubectl create secret tls <secret name> --cert=path/to/puma.crt --key=path/to/puma.key +``` + +## Using the Community Edition of this chart + +By default, the Helm charts use the Enterprise Edition of GitLab. If desired, you +can use the Community Edition instead. Learn more about the +[differences between the two](https://about.gitlab.com/install/ce-or-ee/). + +In order to use the Community Edition, set `image.repository` to +`registry.gitlab.com/gitlab-org/build/cng/gitlab-webservice-ce` and `workhorse.image` +to `registry.gitlab.com/gitlab-org/build/cng/gitlab-workhorse-ce`. + +## Global settings + +We share some common global settings among our charts. See the [Globals Documentation](../../globals.md) +for common configuration options, such as GitLab and Registry hostnames. + +## Deployments settings + +This chart has the ability to create multiple Deployment objects and their related +resources. This feature allows requests to the GitLab application to be distributed between multiple sets of Pods using path based routing. + +The keys of this Map (`default` in this example) are the "name" for each. `default` +will have a Deployment, Service, HorizontalPodAutoscaler, PodDisruptionBudget, and +optional Ingress created with `RELEASE-webservice-default`. + +Any property not provided will inherit from the `gitlab-webservice` chart defaults. + +```yaml +deployments: + default: + ingress: + path: # Does not inherit or default. Leave blank to disable Ingress. + pathType: Prefix + provider: nginx + annotations: + # inherits `ingress.anntoations` + proxyConnectTimeout: # inherits `ingress.proxyConnectTimeout` + proxyReadTimeout: # inherits `ingress.proxyReadTimeout` + proxyBodySize: # inherits `ingress.proxyBodySize` + deployment: + annotations: # map + labels: # map + # inherits `deployment` + pod: + labels: # additional labels to .podLabels + annotations: # map + # inherit from .Values.annotations + service: + labels: # additional labels to .serviceLabels + annotations: # additional annotations to .service.annotations + # inherits `service.annotations` + hpa: + minReplicas: # defaults to .minReplicas + maxReplicas: # defaults to .maxReplicas + metrics: # optional replacement of HPA metrics definition + # inherits `hpa` + pdb: + maxUnavailable: # inherits `maxUnavailable` + resources: # `resources` for `webservice` container + # inherits `resources` + workhorse: # map + # inherits `workhorse` + extraEnv: # + # inherits `extraEnv` + extraEnvFrom: # + # inherits `extraEnvFrom` + puma: # map + # inherits `puma` + workerProcesses: # inherits `workerProcesses` + shutdown: + # inherits `shutdown` + nodeSelector: # map + # inherits `nodeSelector` + tolerations: # array + # inherits `tolerations` +``` + +### Deployments Ingress + +Each `deployments` entry will inherit from chart-wide [Ingress settings](#ingress-settings). Any value presented here will override those provided there. Outside of `path`, all settings are identical to those. + +```yaml +webservice: + deployments: + default: + ingress: + path: / + api: + ingress: + path: /api +``` + +The `path` property is directly populated into the Ingress's `path` property, and allows one to control URI paths which are directed to each service. In the example above, +`default` acts as the catch-all path, and `api` received all traffic under `/api` + +You can disable a given Deployment from having an associated Ingress resource created by setting `path` to empty. See below, where `internal-api` will never receive external traffic. + +```yaml +webservice: + deployments: + default: + ingress: + path: / + api: + ingress: + path: /api + internal-api: + ingress: + path: +``` + +## Ingress settings + +| Name | Type | Default | Description | +| :-------------------------------- | :-----: | :------------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `ingress.apiVersion` | String | | Value to use in the `apiVersion` field. | +| `ingress.annotations` | Map | See [below](#annotations) | These annotations will be used for every Ingress. For example: `ingress.annotations."nginx\.ingress\.kubernetes\.io/enable-access-log"=true`. | +| `ingress.configureCertmanager` | Boolean | | Toggles Ingress annotation `cert-manager.io/issuer` and `acme.cert-manager.io/http01-edit-in-place`. For more information see the [TLS requirement for GitLab Pages](../../../installation/tls.md). | +| `ingress.enabled` | Boolean | `false` | Setting that controls whether to create Ingress objects for services that support them. When `false`, the `global.ingress.enabled` setting value is used. | +| `ingress.proxyBodySize` | String | `512m` | [See Below](#proxybodysize). | +| `ingress.serviceUpstream` | Boolean | `true` | [See Below](#serviceupstream). | +| `ingress.tls.enabled` | Boolean | `true` | When set to `false`, you disable TLS for GitLab Webservice. This is mainly useful for cases in which you cannot use TLS termination at Ingress-level, like when you have a TLS-terminating proxy before the Ingress Controller. | +| `ingress.tls.secretName` | String | (empty) | The name of the Kubernetes TLS Secret that contains a valid certificate and key for the GitLab URL. When not set, the `global.ingress.tls.secretName` value is used instead. | +| `ingress.tls.smardcardSecretName` | String | (empty) | The name of the Kubernetes TLS SEcret that contains a valid certificate and key for the GitLab smartcard URL if enabled. When not set, the `global.ingress.tls.secretName` value is used instead. | +| `ingress.tls.useGeoClass` | Boolean | false | Override the IngressClass with the Geo Ingress class (`global.geo.ingressClass`). Required for primary Geo sites. | + +### annotations + +`annotations` is used to set annotations on the Webservice Ingress. + +### serviceUpstream + +This helps balance traffic to the Webservice pods more evenly by telling NGINX to directly +contact the Service itself as the upstream. For more information, see the +[NGINX docs](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#service-upstream). + +To override this, set: + +```yaml +gitlab: + webservice: + ingress: + serviceUpstream: "false" +``` + +### proxyBodySize + +`proxyBodySize` is used to set the NGINX proxy maximum body size. This is commonly +required to allow a larger Docker image than the default. +It is equivalent to the `nginx['client_max_body_size']` configuration in a +[Linux package installation](https://docs.gitlab.com/omnibus/settings/nginx.html#request-entity-too-large). +As an alternative option, +you can set the body size with either of the following two parameters too: + +- `gitlab.webservice.ingress.annotations."nginx\.ingress\.kubernetes\.io/proxy-body-size"` +- `global.ingress.annotations."nginx\.ingress\.kubernetes\.io/proxy-body-size"` + +### Extra Ingress + +An extra Ingress can be deployed by setting `extraIngress.enabled=true`. The Ingress +is named as the default Ingress with the `-extra` suffix and supports the same +settings as the default Ingress. + +## Resources + +### Memory requests/limits + +Each pod spawns an amount of workers equal to `workerProcesses`, who each use +some baseline amount of memory. We recommend: + +- A minimum of 1.25GB per worker (`requests.memory`) +- A maximum of 1.5GB per worker, plus 1GB for the primary (`limits.memory`) + +Note that required resources are dependent on the workload generated by users +and may change in the future based on changes or upgrades in the GitLab application. + +Default: + +```yaml +workerProcesses: 2 +resources: + requests: + memory: 2.5G # = 2 * 1.25G +# limits: +# memory: 4G # = (2 * 1.5G) + 950M +``` + +With 4 workers configured: + +```yaml +workerProcesses: 4 +resources: + requests: + memory: 5G # = 4 * 1.25G +# limits: +# memory: 7G # = (4 * 1.5G) + 950M +``` + +## External Services + +### Redis + +The Redis documentation has been consolidated in the [globals](../../globals.md#configure-redis-settings) +page. Please consult this page for the latest Redis configuration options. + +### PostgreSQL + +The PostgreSQL documentation has been consolidated in the [globals](../../globals.md#configure-postgresql-settings) +page. Please consult this page for the latest PostgreSQL configuration options. + +### Gitaly + +Gitaly is configured by [global settings](../../globals.md). Please see the +[Gitaly configuration documentation](../../globals.md#configure-gitaly-settings). + +### MinIO + +```yaml +minio: + serviceName: 'minio-svc' + port: 9000 +``` + +| Name | Type | Default | Description | +| :------------ | :-----: | :---------- | :------------------------------------------------------ | +| `port` | Integer | `9000` | Port number to reach the MinIO `Service` on. | +| `serviceName` | String | `minio-svc` | Name of the `Service` that is exposed by the MinIO pod. | + +### Registry + +```yaml +registry: + host: registry.example.com + port: 443 + api: + protocol: http + host: registry.example.com + serviceName: registry + port: 5000 + tokenIssuer: gitlab-issuer + certificate: + secret: gitlab-registry + key: registry-auth.key +``` + +| Name | Type | Default | Description | +| :------------------- | :-----: | :-------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `api.host` | String | | The hostname of the Registry server to use. This can be omitted in lieu of `api.serviceName`. | +| `api.port` | Integer | `5000` | The port on which to connect to the Registry API. | +| `api.protocol` | String | | The protocol Webservice should use to reach the Registry API. | +| `api.serviceName` | String | `registry` | The name of the `service` which is operating the Registry server. If this is present, and `api.host` is not, the chart will template the hostname of the service (and current `.Release.Name`) in place of the `api.host` value. This is convenient when using Registry as a part of the overall GitLab chart. | +| `certificate.key` | String | | The name of the `key` in the `Secret` which houses the certificate bundle that will be provided to the [registry](https://hub.docker.com/_/registry/) container as `auth.token.rootcertbundle`. | +| `certificate.secret` | String | | The name of the [Kubernetes Secret](https://kubernetes.io/docs/concepts/configuration/secret/) that houses the certificate bundle to be used to verify the tokens created by the GitLab instance(s). | +| `host` | String | | The external hostname to use for providing Docker commands to users in the GitLab UI. Falls back to the value set in the `registry.hostname` template. Which determines the registry hostname based on the values set in `global.hosts`. See the [Globals Documentation](../../globals.md) for more information. | +| `port` | Integer | | The external port used in the hostname. Using port `80` or `443` will result in the URLs being formed with `http`/`https`. Other ports will all use `http` and append the port to the end of hostname, for example `http://registry.example.com:8443`. | +| `tokenIssuer` | String | `gitlab-issuer` | The name of the auth token issuer. This must match the name used in the Registry's configuration, as it incorporated into the token when it is sent. The default of `gitlab-issuer` is the same default we use in the Registry chart. | + +## Chart settings + +The following values are used to configure the Webservice Pods. + +| Name | Type | Default | Description | +| :---------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `workerProcesses` | Integer | `2` | The number of Webservice workers to run per pod. You must have at least `2` workers available in your cluster in order for GitLab to function properly. Note that increasing the `workerProcesses` will increase the memory required by approximately `400MB` per worker, so you should update the pod `resources` accordingly. | +| `minReplicas` | Integer | `2` | Minimum number of replicas | +| `maxReplicas` | Integer | `10` | Maximum number of replicas | +| `maxUnavailable` | Integer | `1` | Limit of maximum number of Pods to be unavailable | + +### Metrics + +Metrics can be enabled with the `metrics.enabled` value and use the GitLab +monitoring exporter to expose a metrics port. Pods are either given Prometheus +annotations or if `metrics.serviceMonitor.enabled` is `true` a Prometheus +Operator ServiceMonitor is created. Metrics can alternativly be scraped from +the `/-/metrics` endpoint, but this requires [GitLab Prometheus metrics](https://docs.gitlab.com/administration/monitoring/prometheus/gitlab_metrics/) +to be enabled in the Admin area. The GitLab Workhorse metrics can also be +exposed via `workhorse.metrics.enabled` but these can't be collected using the +Prometheus annotations so either require +`workhorse.metrics.serviceMonitor.enabled` to be `true` or external Prometheus +configuration. + +### GitLab Shell + +GitLab Shell uses an Auth Token in its communication with Webservice. Share the token +with GitLab Shell and Webservice using a shared Secret. + +```yaml +shell: + authToken: + secret: gitlab-shell-secret + key: secret + port: +``` + +| Name | Type | Default | Description | +| :----------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------ | +| `authToken.key` | String | | Defines the name of the key in the secret (below) that contains the authToken. | +| `authToken.secret` | String | | Defines the name of the Kubernetes `Secret` to pull from. | +| `port` | Integer | `22` | The port number to use in the generation of SSH URLs within the GitLab UI. Controlled by `global.shell.port`. | + +### WebServer options + +Current version of chart supports Puma web server. + +Puma unique options: + +| Name | Type | Default | Description | +| :--------------------- | :-----: | :------ | :----------------------------------------------------------- | +| `puma.workerMaxMemory` | Integer | | The maximum memory (in megabytes) for the Puma worker killer | +| `puma.threads.min` | Integer | `4` | The minimum amount of Puma threads | +| `puma.threads.max` | Integer | `4` | The maximum amount of Puma threads | + +## Configuring the `networkpolicy` + +This section controls the +[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/). +This configuration is optional and is used to limit Egress and Ingress of the +Pods to specific endpoints. + +| Name | Type | Default | Description | +| :---------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | This setting enables the `NetworkPolicy` | +| `ingress.enabled` | Boolean | `false` | When set to `true`, the `Ingress` network policy will be activated. This will block all Ingress connections unless rules are specified. | +| `ingress.rules` | Array | `[]` | Rules for the Ingress policy, for details see <https://kubernetes.io/docs/concepts/services-networking/network-policies/#the-networkpolicy-resource> and the example below | +| `egress.enabled` | Boolean | `false` | When set to `true`, the `Egress` network policy will be activated. This will block all egress connections unless rules are specified. | +| `egress.rules` | Array | `[]` | Rules for the egress policy, these for details see <https://kubernetes.io/docs/concepts/services-networking/network-policies/#the-networkpolicy-resource> and the example below | + +### Example Network Policy + +The webservice service requires Ingress connections for the Prometheus +exporter if enabled, traffic coming from the NGINX Ingress and several GitLab pods. +Typically it requires Egress connections to various places. +This examples adds the following network policy: + +- Allows Ingress requests: + - From the pods `gitaly`, `gitlab-pages`, `gitlab-shell`, `kas` , `mailroom` and `nginx-ingress` to port `8181` + - From the `Prometheus` pod to port `8080`, `8083` and `9229` +- Allows Egress requests: + - To the `gitaly` pod to port `8075` + - To the `kas` pod to port `8153` + - To `kube-dns` to port `53` + - To the `registry` pod to port `5000` + - To external database `172.16.0.10/32` to port `5432` + - To external Redis `172.16.0.11/32` to port `6379` + - To the internet `0.0.0.0/0` to port `443` + - To endpoints like AWS VPC endpoint for S3 or STS `172.16.1.0/24` to port `443` + +_Note the example provided is only an example and may not be complete_ + +_Note the Webservice requires outbound connectivity to the public internet +for images on [external object storage](../../../advanced/external-object-storage)_ + +The example is based on the assumption that `kube-dns` was deployed +to the namespace `kube-system`, `prometheus` was deployed to the namespace +`monitoring` and `nginx-ingress` was deployed to the namespace `nginx-ingress`. + +```yaml +networkpolicy: + enabled: true + ingress: + enabled: true + rules: + - from: + - podSelector: + matchLabels: + app: gitaly + ports: + - port: 8181 + - from: + - podSelector: + matchLabels: + app: gitlab-pages + ports: + - port: 8181 + - from: + - podSelector: + matchLabels: + app: gitlab-shell + ports: + - port: 8181 + - from: + - podSelector: + matchLabels: + app: kas + ports: + - port: 8181 + - from: + - podSelector: + matchLabels: + app: mailroom + ports: + - port: 8181 + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: nginx-ingress + podSelector: + matchLabels: + app: nginx-ingress + component: controller + ports: + - port: 8181 + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app: prometheus + component: server + release: gitlab + ports: + - port: 9229 + - port: 8080 + - port: 8083 + egress: + enabled: true + rules: + - to: + - podSelector: + matchLabels: + app: gitaly + ports: + - port: 8075 + - to: + - podSelector: + matchLabels: + app: kas + ports: + - port: 8153 + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 + ports: + - port: 443 + - to: + - ipBlock: + cidr: 172.16.0.10/32 + ports: + - port: 5432 + - to: + - ipBlock: + cidr: 172.16.0.11/32 + ports: + - port: 6379 + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - port: 53 + protocol: UDP +``` + +### LoadBalancer Service + +If the `service.type` is set to `LoadBalancer`, you can optionally specify `service.loadBalancerIP` to create +the `LoadBalancer` with a user-specified IP (if your cloud provider supports it). + +When the `service.type` is set to `LoadBalancer` you must also set `service.loadBalancerSourceRanges` to restrict +the CIDR ranges that can access the `LoadBalancer` (if your cloud provider supports it). +This is currently required due to an issue where [metric ports are exposed](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2500). + +Additional information about the `LoadBalancer` service type can be found in +[the Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/#loadbalancer) + +```yaml +service: + type: LoadBalancer + loadBalancerIP: 1.2.3.4 + loadBalancerSourceRanges: + - 10.0.0.0/8 +``` + +## Configuring KEDA + +This `keda` section enables the installation of [KEDA](https://keda.sh/) `ScaledObjects` instead of regular `HorizontalPodAutoscalers`. +This configuration is optional and can be used when there is a need for autoscaling based on custom or external metrics. + +Most settings default to the values set in the `hpa` section where applicable. + +If the following are true, CPU and memory triggers are added automatically based on the CPU and memory thresholds set in the `hpa` section: + +- `triggers` is not set. +- The corresponding `request.cpu.request` or `request.memory.request` setting is also set to a non-zero value. + +If no triggers are set, the `ScaledObject` is not created. + +Refer to the [KEDA documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/) for more details about those settings. + +| Name | Type | Default | Description | +| :---------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `pollingInterval` | Integer | `30` | The interval to check each trigger on | +| `cooldownPeriod` | Integer | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `minReplicaCount` | Integer | | Minimum number of replicas KEDA will scale the resource down to, defaults to `minReplicas` | +| `maxReplicaCount` | Integer | | Maximum number of replicas KEDA will scale the resource up to, defaults to `maxReplicas` | +| `fallback` | Map | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `hpaName` | String | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `restoreToOriginalReplicaCount` | Boolean | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `behavior` | Map | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `triggers` | Array | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | diff --git a/chart/doc/charts/globals.md b/chart/doc/charts/globals.md index 4f67a15443f8fb733df1b48fcf1b623115036058..3c3952a46386ce1d098d222e35771fbc4e27c43d 100644 --- a/chart/doc/charts/globals.md +++ b/chart/doc/charts/globals.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure charts using globals --- -# Configure charts using globals +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} To reduce configuration duplication when installing our wrapper Helm chart, several configuration settings are available to be set in the `global` section of `values.yaml`. @@ -82,8 +84,8 @@ global: | Name | Type | Default | Description | | :------------------------ | :-------: | :------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | `domain` | String | `example.com` | The base domain. GitLab and Registry will be exposed on the subdomain of this setting. This defaults to `example.com`, but is not used for hosts that have their `name` property configured. See the `gitlab.name`, `minio.name`, and `registry.name` sections below. | -| `externalIP` | | `nil` | Set the external IP address that will be claimed from the provider. This will be templated into the [NGINX chart](nginx/index.md#configuring-nginx), in place of the more complex `nginx.service.loadBalancerIP`. | -| `externalGeoIP` | | `nil` | Same as `externalIP` but for the [NGINX Geo chart](nginx/index.md#gitlab-geo). Needed to configure a static IP for [GitLab Geo](../advanced/geo/index.md) sites using a unified URL. Must be different from `externalIP`. | +| `externalIP` | | `nil` | Set the external IP address that will be claimed from the provider. This will be templated into the [NGINX chart](nginx/_index.md#configuring-nginx), in place of the more complex `nginx.service.loadBalancerIP`. | +| `externalGeoIP` | | `nil` | Same as `externalIP` but for the [NGINX Geo chart](nginx/_index.md#gitlab-geo). Needed to configure a static IP for [GitLab Geo](../advanced/geo/_index.md) sites using a unified URL. Must be different from `externalIP`. | | `https` | Boolean | `true` | If set to true, you will need to ensure the NGINX chart has access to the certificates. In cases where you have TLS-termination in front of your Ingresses, you probably want to look at [`global.ingress.tls.enabled`](#configure-ingress-settings). Set to false for external URLs to use `http://` instead of `https`. | | `hostSuffix` | String | | [See Below](#hostsuffix). | | `gitlab.https` | Boolean | `false` | If `hosts.https` or `gitlab.https` are `true`, the GitLab external URL will use `https://` instead of `http://`. | @@ -188,7 +190,7 @@ For those users who need to have their `path` definitions end in `/*` to match t `ingress.class: alb` in AWS, or another such provider. This setting ensures that all `path` entries in Ingress resources throughout this chart are rendered with this. -The only exception is when populating the [`gitlab/webservice` deployments settings](gitlab/webservice/index.md#deployments-settings), where `path` must be specified. +The only exception is when populating the [`gitlab/webservice` deployments settings](gitlab/webservice/_index.md#deployments-settings), where `path` must be specified. ### Cloud provider LoadBalancers @@ -230,11 +232,14 @@ on enabling this, see [release notes](../releases/7_0.md#bundled-certmanager). ## GitLab Version -NOTE: +{{< alert type="note" >}} + This value should only used for development purposes, or by explicit request of GitLab support. Please avoid using this value on production environments and set the version as described in [Deploy using Helm](../installation/deployment.md#deploy-using-helm) +{{< /alert >}} + The GitLab version used in the default image tag for the charts can be changed using the `global.gitlabVersion` key: @@ -271,7 +276,7 @@ GitLab is using two database connections: one for `main` database and one for `ci`. By default, they point to the same PostgreSQL database. The values under `global.psql` are defaults and are applied to both database -configurations. If you want to use [two databases](https://docs.gitlab.com/ee/administration/postgresql/multiple_databases.html), +configurations. If you want to use [two databases](https://docs.gitlab.com/administration/postgresql/multiple_databases/), you can specifiy the connection details in `global.psql.main` and `global.psql.ci`. ```yaml @@ -340,11 +345,14 @@ from the global, by design. ### PostgreSQL SSL -NOTE: +{{< alert type="note" >}} + SSL support is mutual TLS only. See [issue #2034](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2034) and [issue #1817](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/1817). +{{< /alert >}} + If you want to connect GitLab with a PostgreSQL database over mutual TLS, create a secret containing the client key, client certificate and server certificate authority as different secret keys. Then describe the secret's structure using the `global.psql.ssl` mapping. @@ -379,11 +387,11 @@ global: ### PostgreSQL load balancing This feature requires the use of an -[external PostgreSQL](../advanced/external-db/index.md), as this chart does not +[external PostgreSQL](../advanced/external-db/_index.md), as this chart does not deploy PostgreSQL in an HA fashion. The Rails components in GitLab have the ability to -[make use of PostgreSQL clusters to load balance read-only queries](https://docs.gitlab.com/ee/administration/postgresql/database_load_balancing.html). +[make use of PostgreSQL clusters to load balance read-only queries](https://docs.gitlab.com/administration/postgresql/database_load_balancing/). This feature can be configured in two fashions: @@ -404,8 +412,8 @@ global: Configuration of service discovery can be more complex. For a complete details of this configuration, the parameters and their associated -behaviors, see [Service Discovery](https://docs.gitlab.com/ee/administration/postgresql/database_load_balancing.html#service-discovery) -in the [GitLab Administration documentation](https://docs.gitlab.com/ee/administration/index.html). +behaviors, see [Service Discovery](https://docs.gitlab.com/administration/postgresql/database_load_balancing/#service-discovery) +in the [GitLab Administration documentation](https://docs.gitlab.com/administration/). ```yaml global: @@ -424,7 +432,7 @@ global: ``` Further tuning is also available, in regards to the -[handling of stale reads](https://docs.gitlab.com/ee/administration/postgresql/database_load_balancing.html#handling-stale-reads). +[handling of stale reads](https://docs.gitlab.com/administration/postgresql/database_load_balancing/#handling-stale-reads). The GitLab Administration documentation covers these items in detail, and those properties can be added directly under `load_balancing`. @@ -439,7 +447,11 @@ global: ### Configure multiple database connections -> - The `gitlab:db:decomposition:connection_status` Rake task was [introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/111927) in GitLab 15.11. +{{< history >}} + +- The `gitlab:db:decomposition:connection_status` Rake task was [introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/111927) in GitLab 15.11. + +{{< /history >}} In GitLab 16.0, GitLab defaults to using two database connections that point to the same PostgreSQL database. @@ -452,7 +464,7 @@ By default we use an single, non-replicated Redis instance. If a highly availabl Redis is required, we recommend using an external Redis instance. You can bring an external Redis instance by setting `redis.install=false`, and -following our [advanced documentation](../advanced/external-redis/index.md) for +following our [advanced documentation](../advanced/external-redis/_index.md) for configuration. ```yaml @@ -548,7 +560,11 @@ continue to apply with the Sentinel support unless re-specified in the table abo #### Redis Sentinel password support -> - [Introduced](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/3792) in GitLab 17.1. +{{< history >}} + +- [Introduced](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/3792) in GitLab 17.1. + +{{< /history >}} ```yaml redis: @@ -583,7 +599,7 @@ global: for all Sentinel instances. Note that `sentinelAuth` cannot be overridden with [Redis instance-specific settings](#multiple-redis-support) -or [`global.redis.redisYmlOverride`](../advanced/external-redis/index.md#redisyml-override). +or [`global.redis.redisYmlOverride`](../advanced/external-redis/_index.md#redisyml-override). ### Multiple Redis support @@ -606,7 +622,7 @@ for different persistence classes, currently: Any number of the instances may be specified. Any instances not specified will be handled by the primary Redis instance specified by `global.redis.host` or use the deployed Redis instance from the chart. -The only exception is for the [GitLab agent server (KAS)](gitlab/kas/index.md), which looks for Redis configuration in the following order: +The only exception is for the [GitLab agent server (KAS)](gitlab/kas/_index.md), which looks for Redis configuration in the following order: 1. `global.redis.kas` 1. `global.redis.sharedState` @@ -775,9 +791,9 @@ global: ``` -For more details on `bucket`, `certificate`, `httpSecret`, and `notificationSecret` settings, see the documentation within the [registry chart](registry/index.md). +For more details on `bucket`, `certificate`, `httpSecret`, and `notificationSecret` settings, see the documentation within the [registry chart](registry/_index.md). -For details on `enabled`, `host`, `api` and `tokenIssuer` see documentation for [command line options](../installation/command-line-options.md) and [webcervice](gitlab/webservice/index.md) +For details on `enabled`, `host`, `api` and `tokenIssuer` see documentation for [command line options](../installation/command-line-options.md) and [webcervice](gitlab/webservice/_index.md) `host` is used to override autogenerated external registry hostname reference. @@ -848,11 +864,11 @@ RPC access to Git repositories, which handles all Git calls made by GitLab. Administrators can chose to use Gitaly nodes in the following ways: -- [Internal to the chart](#internal), as part of a `StatefulSet` via the [Gitaly chart](gitlab/gitaly/index.md). +- [Internal to the chart](#internal), as part of a `StatefulSet` via the [Gitaly chart](gitlab/gitaly/_index.md). - [External to the chart](#external), as external pets. - [Mixed environment](#mixed) using both internal and external nodes. -See [Repository Storage Paths](https://docs.gitlab.com/ee/administration/repository_storage_paths.html) +See [Repository Storage Paths](https://docs.gitlab.com/administration/repository_storage_paths/) documentation for details on managing which nodes will be used for new projects. If `gitaly.host` is provided, `gitaly.internal` and `gitaly.external` properties will _be ignored_. @@ -865,14 +881,14 @@ See [issue #1992](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/1992) for #### Internal The `internal` key currently consists of only one key, `names`, which is a list of -[storage names](https://docs.gitlab.com/ee/administration/repository_storage_paths.html) +[storage names](https://docs.gitlab.com/administration/repository_storage_paths/) to be managed by the chart. For each listed name, _in logical order_, one pod will be spawned, named `${releaseName}-gitaly-${ordinal}`, where `ordinal` is the index within the `names` list. If dynamic provisioning is enabled, the `PersistentVolumeClaim` will match. This list defaults to `['default']`, which provides for 1 pod related to one -[storage path](https://docs.gitlab.com/ee/administration/repository_storage_paths.html). +[storage path](https://docs.gitlab.com/administration/repository_storage_paths/). Manual scaling of this item is required, by adding or removing entries in `gitaly.internal.names`. When scaling down, any repository that has not been moved @@ -889,18 +905,18 @@ can be found in the examples folder. The `external` key provides a configuration for Gitaly nodes external to the cluster. Each item of this list has 3 keys: -- `name`: The name of the [storage](https://docs.gitlab.com/ee/administration/repository_storage_paths.html). - An entry with [`name: default` is required](https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#gitlab-requires-a-default-repository-storage). +- `name`: The name of the [storage](https://docs.gitlab.com/administration/repository_storage_paths/). + An entry with [`name: default` is required](https://docs.gitlab.com/administration/gitaly/configure_gitaly/#gitlab-requires-a-default-repository-storage). - `hostname`: The host of Gitaly services. - `port`: (optional) The port number to reach the host on. Defaults to `8075`. - `tlsEnabled`: (optional) Override `global.gitaly.tls.enabled` for this particular entry. -We provide an [advanced configuration](../advanced/index.md) guide for -[using an external Gitaly service](../advanced/external-gitaly/index.md). You can also +We provide an [advanced configuration](../advanced/_index.md) guide for +[using an external Gitaly service](../advanced/external-gitaly/_index.md). You can also find sample [configuration of multiple external services](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/examples/gitaly/values-multiple-external.yaml) in the examples folder. -You may use an external [Praefect](https://docs.gitlab.com/ee/administration/gitaly/praefect.html) +You may use an external [Praefect](https://docs.gitlab.com/administration/gitaly/praefect/) to provide highly available Gitaly services. Configuration of the two is interchangeable, as from the viewpoint of the clients, there is no difference. @@ -908,7 +924,7 @@ interchangeable, as from the viewpoint of the clients, there is no difference. It is possible to use both internal and external Gitaly nodes, but be aware that: -- There [must always be a node named `default`](https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#gitlab-requires-a-default-repository-storage), which Internal provides by default. +- There [must always be a node named `default`](https://docs.gitlab.com/administration/gitaly/configure_gitaly/#gitlab-requires-a-default-repository-storage), which Internal provides by default. - External nodes will be populated first, then Internal. A sample [configuration of mixed internal and external nodes](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/examples/gitaly/values-multiple-mixed.yaml) @@ -945,7 +961,7 @@ Praefect is disabled by default. When enabled with no extra settings, 3 Gitaly r To enable Praefect with default settings, set `global.praefect.enabled=true`. -See the [Praefect documentation](https://docs.gitlab.com/ee/administration/gitaly/praefect.html) for details on how to operate a Gitaly cluster using Praefect. +See the [Praefect documentation](https://docs.gitlab.com/administration/gitaly/praefect/) for details on how to operate a Gitaly cluster using Praefect. ### Global settings for Praefect @@ -964,7 +980,7 @@ global: | Name | Type | Default | Description | | ---- | ---- | ------- | ----------- | | enabled | Boolean | false | Whether or not to enable Praefect | -| virtualStorages | List | See [multiple virtual storages](https://docs.gitlab.com/ee/administration/gitaly/praefect.html#multiple-virtual-storages) above. | The list of desired virtual storages (each backed by a Gitaly StatefulSet) | +| virtualStorages | List | See [multiple virtual storages](https://docs.gitlab.com/administration/gitaly/praefect/#multiple-virtual-storages) above. | The list of desired virtual storages (each backed by a Gitaly StatefulSet) | | dbSecret.secret | String | | The name of the secret to use for authenticating with the database | | dbSecret.key | String | | The name of the key in `dbSecret.secret` to use | | psql.host | String | | The hostname of the database server to use (when using an external database) | @@ -975,7 +991,7 @@ global: ## Configure MinIO settings The GitLab global MinIO settings are located under the `global.minio` key. For more -details on these settings, see the documentation within the [MinIO chart](minio/index.md). +details on these settings, see the documentation within the [MinIO chart](minio/_index.md). ```yaml global: @@ -986,8 +1002,8 @@ global: ## Configure appConfig settings -The [Webservice](gitlab/webservice/index.md), [Sidekiq](gitlab/sidekiq/index.md), and -[Gitaly](gitlab/gitaly/index.md) charts share multiple settings, which are configured +The [Webservice](gitlab/webservice/_index.md), [Sidekiq](gitlab/sidekiq/_index.md), and +[Gitaly](gitlab/gitaly/_index.md) charts share multiple settings, which are configured with the `global.appConfig` key. ```yaml @@ -1147,18 +1163,18 @@ application are described below: |:----------------------------------- |:-------:|:------- |:----------- | | `cdnHost` | String | (empty) | Sets a base URL for a CDN to serve static assets (for example, `https://mycdnsubdomain.fictional-cdn.com`). | | `contentSecurityPolicy` | Struct | | [See below](#content-security-policy). | -| `enableUsagePing` | Boolean | `true` | A flag to disable the [usage ping support](https://docs.gitlab.com/ee/administration/settings/usage_statistics.html). | -| `enableSeatLink` | Boolean | `true` | A flag to disable the [seat link support](https://docs.gitlab.com/ee/subscriptions/#seat-link). | -| `enableImpersonation` | | `nil` | A flag to disable [user impersonation by Administrators](https://docs.gitlab.com/ee/api/index.html#disable-impersonation). | -| `applicationSettingsCacheSeconds` | Integer | 60 | An interval value (in seconds) to invalidate the [application settings cache](https://docs.gitlab.com/ee/administration/application_settings_cache.html). | +| `enableUsagePing` | Boolean | `true` | A flag to disable the [usage ping support](https://docs.gitlab.com/administration/settings/usage_statistics/). | +| `enableSeatLink` | Boolean | `true` | A flag to disable the [seat link support](https://docs.gitlab.com/subscriptions/#seat-link). | +| `enableImpersonation` | | `nil` | A flag to disable [user impersonation by Administrators](https://docs.gitlab.com/api/#disable-impersonation). | +| `applicationSettingsCacheSeconds` | Integer | 60 | An interval value (in seconds) to invalidate the [application settings cache](https://docs.gitlab.com/administration/application_settings_cache/). | | `usernameChangingEnabled` | Boolean | `true` | A flag to decide if users are allowed to change their username. | -| `issueClosingPattern` | String | (empty) | [Pattern to close issues automatically](https://docs.gitlab.com/ee/administration/issue_closing_pattern.html). | +| `issueClosingPattern` | String | (empty) | [Pattern to close issues automatically](https://docs.gitlab.com/administration/issue_closing_pattern/). | | `defaultTheme` | Integer | | [Numeric ID of the default theme for the GitLab instance](https://gitlab.com/gitlab-org/gitlab-foss/blob/master/lib/gitlab/themes.rb#L17-27). It takes a number, denoting the ID of the theme. | | `defaultColorMode` | Integer | | [Default color mode for the GitLab instance](https://gitlab.com/gitlab-org/gitlab/-/blob/66788a1de8c3dd3c5566d0f30fe1c2a1bae64bf9/lib/gitlab/color_modes.rb#L17-19). It takes a number, denoting the ID of the color mode. | | `defaultSyntaxHighlightingTheme` | Integer | | [Default syntax highlighting theme for the GitLab instance](https://gitlab.com/gitlab-org/gitlab/-/blob/66788a1de8c3dd3c5566d0f30fe1c2a1bae64bf9/lib/gitlab/color_schemes.rb#L12-17). It takes a number, denoting the ID of the syntax highlighting theme. | | `defaultProjectsFeatures.*feature*` | Boolean | `true` | [See below](#defaultprojectsfeatures). | -| `webhookTimeout` | Integer | (empty) | Waiting time in seconds before a [hook is deemed to have failed](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html#webhook-fails-or-multiple-webhook-requests-are-triggered). | -| `graphQlTimeout` | Integer | (empty) | Time in seconds the Rails has to [complete a GraphQL request](https://docs.gitlab.com/ee/api/graphql/#limits). | +| `webhookTimeout` | Integer | (empty) | Waiting time in seconds before a [hook is deemed to have failed](https://docs.gitlab.com/user/project/integrations/webhooks/#webhook-fails-or-multiple-webhook-requests-are-triggered). | +| `graphQlTimeout` | Integer | (empty) | Time in seconds the Rails has to [complete a GraphQL request](https://docs.gitlab.com/api/graphql/#limits). | #### Content Security Policy @@ -1218,8 +1234,8 @@ However, a custom Libravatar service can also be used if needed: | Name | Type | Default | Description | |:------------------- |:------:|:------- |:----------- | -| `gravatar.plainURL` | String | (empty) | [HTTP URL to Libravatar instance (instead of using gravatar.com)](https://docs.gitlab.com/ee/administration/libravatar.html). | -| `gravatar.sslUrl` | String | (empty) | [HTTPS URL to Libravatar instance (instead of using gravatar.com)](https://docs.gitlab.com/ee/administration/libravatar.html). | +| `gravatar.plainURL` | String | (empty) | [HTTP URL to Libravatar instance (instead of using gravatar.com)](https://docs.gitlab.com/administration/libravatar/). | +| `gravatar.sslUrl` | String | (empty) | [HTTPS URL to Libravatar instance (instead of using gravatar.com)](https://docs.gitlab.com/administration/libravatar/). | ### Hooking Analytics services to the GitLab instance @@ -1305,7 +1321,7 @@ You can use these defaults or configure the bucket names: #### storage_options The `storage_options` are used to configure -[S3 Server Side Encryption](https://docs.gitlab.com/ee/administration/object_storage.html#server-side-encryption-headers). +[S3 Server Side Encryption](https://docs.gitlab.com/administration/object_storage/#server-side-encryption-headers). Setting a default encryption on an S3 bucket is the easiest way to enable encryption, but you may want to @@ -1363,7 +1379,7 @@ This property has two sub-keys: `secret` and `key`. - `secret` is the name of a Kubernetes Secret. This value is required to use external object storage. - `key` is the name of the key in the secret which houses the YAML block. Defaults to `connection`. -Valid configuration keys can be found in the [GitLab Job Artifacts Administration](https://docs.gitlab.com/ee/administration/job_artifacts.html#s3-compatible-connection-settings) +Valid configuration keys can be found in the [GitLab Job Artifacts Administration](https://docs.gitlab.com/administration/cicd/secure_files/#s3-compatible-connection-settings) documentation. This matches to [Fog](https://github.com/fog/fog.github.com), and is different between provider modules. @@ -1385,13 +1401,13 @@ kubectl create secret generic gitlab-rails-storage \ #### when (only for External MR Diffs) `externalDiffs` setting has an additional key `when` to -[conditionally store specific diffs on object storage](https://docs.gitlab.com/ee/administration/merge_request_diffs.html#alternative-in-database-storage). +[conditionally store specific diffs on object storage](https://docs.gitlab.com/administration/merge_request_diffs/#alternative-in-database-storage). This setting is left empty by default in the charts, for a default value to be assigned by the Rails code. #### cdn (only for CI Artifacts) -`artifacts` setting has an additional key `cdn` [to configure Google CDN in front of a Google Cloud Storage bucket](../advanced/external-object-storage/index.md#google-cloud-cdn). +`artifacts` setting has an additional key `cdn` [to configure Google CDN in front of a Google Cloud Storage bucket](../advanced/external-object-storage/_index.md#google-cloud-cdn). ### Incoming email settings @@ -1473,7 +1489,7 @@ Prerequisites: - Use [GitLab 15.5.1 or later](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101571#note_1146419137). You can set your GitLab version with `global.gitlabVersion: <version>`. If you need to force an image update after an initial deployment, also set `global.image.pullPolicy: Always`. -- [Create the certificate authority](../advanced/internal-tls/index.md) and certificates that your `kas` pods will trust. +- [Create the certificate authority](../advanced/internal-tls/_index.md) and certificates that your `kas` pods will trust. To configure `kas` to use the certificates you created, set the following values. @@ -1504,9 +1520,12 @@ global: ### Suggested Reviewers settings -NOTE: -The Suggested Reviewers secret is created automatically and only used on GitLab SaaS. -This secret is not needed on self-managed GitLab instances. +{{< alert type="note" >}} + +The Suggested Reviewers secret is created automatically and only used on GitLab.com. +This secret is not needed on GitLab Self-Managed instances. + +{{< /alert >}} One can optionally customize the Suggested Reviewers `secret` name as well as `key`, either by using Helm's `--set variable` option: @@ -1530,7 +1549,7 @@ If you'd like to customize the secret value, refer to the [secrets documentation ### LDAP -The `ldap.servers` setting allows for the configuration of [LDAP](https://docs.gitlab.com/ee/administration/auth/ldap/) +The `ldap.servers` setting allows for the configuration of [LDAP](https://docs.gitlab.com/administration/auth/ldap/) user authentication. It is presented as a map, which will be translated into the appropriate LDAP servers configuration in `gitlab.yml`, as with an installation from source. @@ -1569,16 +1588,19 @@ Example `--set` configuration items, when using the global chart: --set global.appConfig.ldap.servers.main.password.key='the-key-containing-the-password' ``` -NOTE: +{{< alert type="note" >}} + Commas are considered [special characters](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set) within Helm `--set` items. Be sure to escape commas in values such as `bind_dn`: `--set global.appConfig.ldap.servers.main.bind_dn='cn=administrator\,cn=Users\,dc=domain\,dc=net'`. +{{< /alert >}} + #### Disable LDAP web sign in It can be useful to prevent using LDAP credentials through the web UI when an alternative such as SAML is preferred. This allows LDAP to be used for group sync, while also allowing your SAML identity provider to handle additional checks like custom 2FA. -When LDAP web sign in is disabled, users will not see a LDAP tab on the sign in page. This does not disable [using LDAP credentials for Git access.](https://docs.gitlab.com/ee/administration/auth/ldap/#git-password-authentication) +When LDAP web sign in is disabled, users will not see a LDAP tab on the sign in page. This does not disable [using LDAP credentials for Git access.](https://docs.gitlab.com/administration/auth/ldap/#git-password-authentication) To disable the use of LDAP for web sign-in, set `global.appConfig.ldap.preventSignin: true`. @@ -1611,16 +1633,19 @@ If the LDAP server uses a custom CA or self-signed certificate, you must: This ensures that the CA certificate is mounted in the relevant pods at `/etc/ssl/certs/unique_name.pem` and specifies its use in the LDAP configuration. -NOTE: +{{< alert type="note" >}} + In GitLab 15.9 and later, the certificate in `/etc/ssl/certs/` is not prefixed with `ca-cert-` anymore. This was the old behavior due to the use of Alpine for the container that prepared the certificate secrets for deployed pods. The `gitlab-base` container is now used for this operation, which is based on Debian. +{{< /alert >}} + See [Custom Certificate Authorities](#custom-certificate-authorities) for more info. ### DuoAuth -Use these settings to enable [two-factor authentication (2FA) with GitLab Duo](https://docs.gitlab.com/ee/user/profile/account/two_factor_authentication.html#enable-one-time-password). +Use these settings to enable [two-factor authentication (2FA) with GitLab Duo](https://docs.gitlab.com/user/profile/account/two_factor_authentication/#enable-one-time-password). ```yaml global: @@ -1654,7 +1679,7 @@ kubectl create secret generic <secret_object_name> --from-literal=secretKey=<duo ### OmniAuth GitLab can leverage OmniAuth to allow users to sign in using GitHub, Google, -and other popular services. Expanded documentation can be found in the [OmniAuth documentation](https://docs.gitlab.com/ee/integration/omniauth.html#configure-common-settings) +and other popular services. Expanded documentation can be found in the [OmniAuth documentation](https://docs.gitlab.com/integration/omniauth/#configure-common-settings) for GitLab. ```yaml @@ -1695,7 +1720,7 @@ omniauth: `providers` is presented as an array of maps that are used to populate `gitlab.yml` as when installed from source. See GitLab documentation for the available selection -of [Supported Providers](https://docs.gitlab.com/ee/integration/omniauth.html#supported-providers). +of [Supported Providers](https://docs.gitlab.com/integration/omniauth/#supported-providers). Defaults to `[]`. This property has two sub-keys: `secret` and `key`: @@ -1708,11 +1733,11 @@ Alternatively, if the provider has no other configuration than its name, you may use a second form with only a 'name' attribute, and optionally a `label` or `icon` attribute. The eligible providers are: -- [`group_saml`](https://docs.gitlab.com/ee/integration/saml.html#configure-group-saml-sso-on-a-self-managed-instance) -- [`kerberos`](https://docs.gitlab.com/ee/integration/saml.html#configure-group-saml-sso-on-a-self-managed-instance) +- [`group_saml`](https://docs.gitlab.com/integration/saml/#configure-group-saml-sso-on-a-self-managed-instance) +- [`kerberos`](https://docs.gitlab.com/integration/saml/#configure-group-saml-sso-on-a-self-managed-instance) The `Secret` for these entries contains YAML or JSON formatted blocks, as described -in [OmniAuth Providers](https://docs.gitlab.com/ee/integration/omniauth.html). To +in [OmniAuth Providers](https://docs.gitlab.com/integration/omniauth/). To create this secret, follow the appropriate instructions for retrieval of these items, and create a YAML or JSON file. @@ -1770,7 +1795,7 @@ omniauth: - secret: gitlab-cas3 ``` -[Group SAML](https://docs.gitlab.com/ee/integration/saml.html#configuring-group-saml-on-a-self-managed-gitlab-instance) configuration example: +[Group SAML](https://docs.gitlab.com/integration/saml/#configuring-group-saml-on-a-self-managed-gitlab-instance) configuration example: ```yaml omniauth: @@ -1884,10 +1909,10 @@ The routing rules list is an ordered array of tuples of query and corresponding queue: - The query is following the - [worker matching query](https://docs.gitlab.com/ee/administration/sidekiq/processing_specific_job_classes.html#worker-matching-query) syntax. -- The `<queue_name>` must match a valid Sidekiq queue name `sidekiq.pods[].queues` defined under [`sidekiq.pods`](gitlab/sidekiq/index.md#per-pod-settings). If the queue name + [worker matching query](https://docs.gitlab.com/administration/sidekiq/processing_specific_job_classes/#worker-matching-query) syntax. +- The `<queue_name>` must match a valid Sidekiq queue name `sidekiq.pods[].queues` defined under [`sidekiq.pods`](gitlab/sidekiq/_index.md#per-pod-settings). If the queue name is `nil`, or an empty string, the worker is routed to the queue generated - by the name of the worker instead. See [Full example of Sidekiq configuration](gitlab/sidekiq/index.md#full-example-of-sidekiq-configuration) as a reference. + by the name of the worker instead. See [Full example of Sidekiq configuration](gitlab/sidekiq/_index.md#full-example-of-sidekiq-configuration) as a reference. The query supports wildcard matching `*`, which matches all workers. As a result, the wildcard query must stay at the end of the list or the later rules @@ -1930,7 +1955,7 @@ global: | Name | Type | Default | Description | | :---------- | :------ | :------ | :---------- | -| serviceName | String | `webservice-default` | Name of service to direct internal API traffic to. Do not include the Release name, as it will be templated in. Should match an entry in `gitlab.webservice.deployments`. See [`gitlab/webservice` chart](gitlab/webservice/index.md#deployments-settings) | +| serviceName | String | `webservice-default` | Name of service to direct internal API traffic to. Do not include the Release name, as it will be templated in. Should match an entry in `gitlab.webservice.deployments`. See [`gitlab/webservice` chart](gitlab/webservice/_index.md#deployments-settings) | | scheme | String | `http` | Scheme of the API endpoint | | host | String | | Fully qualified hostname or IP address of an API endpoint. Overrides the presence of `serviceName`. | | port | Integer | `8181` | Port number of associated API server. | @@ -1948,7 +1973,7 @@ When possible, we recommend leaving this enabled. ## Configure GitLab Shell -There are several items for the global configuration of [GitLab Shell](gitlab/gitlab-shell/index.md) +There are several items for the global configuration of [GitLab Shell](gitlab/gitlab-shell/_index.md) chart. ```yaml @@ -1964,8 +1989,8 @@ global: | Name | Type | Default | Description | |:--------------------- |:-------:|:------- |:----------- | | `port` | Integer | `22` | See [port](#port) below for specific documentation. | -| `authToken` | | | See [authToken](gitlab/gitlab-shell/index.md#authtoken) in the GitLab Shell chart specific documentation. | -| `hostKeys` | | | See [hostKeys](gitlab/gitlab-shell/index.md#hostkeyssecret) in the GitLab Shell chart specific documentation. | +| `authToken` | | | See [authToken](gitlab/gitlab-shell/_index.md#authtoken) in the GitLab Shell chart specific documentation. | +| `hostKeys` | | | See [hostKeys](gitlab/gitlab-shell/_index.md#hostkeyssecret) in the GitLab Shell chart specific documentation. | | `tcp.proxyProtocol` | Boolean | `false` | See [TCP proxy protocol](#tcp-proxy-protocol) below for specific documentation. | ### Port @@ -2062,7 +2087,7 @@ global: | `localStore.path` | String | `/srv/gitlab/shared/pages` | Path where pages files will be stored; only used if localStore is set to true. | | `apiSecret.secret` | String | | Secret containing 32 bit API key in Base64 encoded form. | | `apiSecret.key` | String | | Key within the API key secret where the API key is stored. | -| `namespaceInPath` | Boolean | False | (Beta) Enable or disable namespace in the URL path to support without wildcard DNS setup. For more information, see the [Pages domain without wildcard DNS documentation](gitlab/gitlab-pages/index.md#pages-domain-without-wildcard-dns). | +| `namespaceInPath` | Boolean | False | (Beta) Enable or disable namespace in the URL path to support without wildcard DNS setup. For more information, see the [Pages domain without wildcard DNS documentation](gitlab/gitlab-pages/_index.md#pages-domain-without-wildcard-dns). | ## Configure Webservice @@ -2093,9 +2118,12 @@ gitlab: ## Custom Certificate Authorities -NOTE: +{{< alert type="note" >}} + These settings do not affect charts from outside of this repository, via `requirements.yaml`. +{{< /alert >}} + Some users may need to add custom certificate authorities, such as when using internally issued SSL certificates for TLS services. To provide this functionality, we provide a mechanism for injecting these custom root certificate authorities into the application through Secrets or ConfigMaps. @@ -2127,7 +2155,8 @@ global: - unique_name_2.crt ``` -NOTE: +{{< alert type="note" >}} + The `.crt` extension in the Secret's key name is important for the [Debian update-ca-certificates package](https://manpages.debian.org/bullseye/ca-certificates/update-ca-certificates.8.en.html). This step ensures that the custom CA file is mounted with that extension and is processed @@ -2137,6 +2166,8 @@ even though the [documentation](https://gitlab.alpinelinux.org/alpine/ca-certifi says that it is. The UBI-based `update-ca-trust` utility does not seem to have the same requirement. +{{< /alert >}} + You can provide any number of Secrets or ConfigMaps, each containing any number of keys that hold PEM-encoded CA certificates. These are configured as entries under `global.certificates.customCAs`. All keys are mounted unless `keys:` is provided with a list of specific keys to be mounted. All mounted keys across all Secrets and ConfigMaps must be unique. @@ -2203,11 +2234,14 @@ global: - Setting `global.serviceAccount.name` controls the ServiceAccount object name and the name referenced by each component. - Setting `global.serviceAccount.automountServiceAccountToken` controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). -NOTE: +{{< alert type="note" >}} + Do not use `global.serviceAccount.create=true` with `global.serviceAccount.name`, as it instructs the charts to create multiple ServiceAccount objects with the same name. Instead, use `global.serviceAccount.create=false` if specifying a global name. +{{< /alert >}} + ## Annotations Custom annotations can be applied to Deployment, Service, and Ingress objects. @@ -2238,11 +2272,14 @@ global: disktype: ssd ``` -NOTE: +{{< alert type="note" >}} + Charts that are maintained externally do not respect the `global.nodeSelector` at this time and may need to be configured separately based on available chart values. This includes Prometheus, cert-manager, Redis, etc. +{{< /alert >}} + ## Labels ### Common Labels @@ -2284,7 +2321,7 @@ deployment will also recieve the label set `baz: bat`. Refer to the Sidekiq and Webservice charts for additional details. Some charts that we depend on are excluded from this label configuration. Only -the [GitLab component sub-charts](gitlab/index.md) will receive these +the [GitLab component sub-charts](gitlab/_index.md) will receive these extra labels. ### Pod @@ -2325,7 +2362,7 @@ global: urlTemplate: 'http://jaeger-ui.example.com/search?service={{ service }}&tags=%7B"correlation_id"%3A"{{ correlation_id }}"%7D' ``` -- `global.tracing.connection.string` is used to configure where tracing spans would be sent. You can read more about that in [GitLab tracing documentation](https://docs.gitlab.com/ee/development/distributed_tracing.html) +- `global.tracing.connection.string` is used to configure where tracing spans would be sent. You can read more about that in [GitLab tracing documentation](https://docs.gitlab.com/development/distributed_tracing/) - `global.tracing.urlTemplate` is used as a template for tracing info URL rendering in GitLab performance bar. ## extraEnv @@ -2374,9 +2411,12 @@ gitlab: # optional: boolean ``` -NOTE: +{{< alert type="note" >}} + The implementation does not support re-using a value name with different content types. You can override the same name with similar content, but no not mix sources like `secretKeyRef`, `configMapKeyRef`, etc. +{{< /alert >}} + ## Configure OAuth settings OAuth integration is configured out-of-the box for services which support it. @@ -2415,7 +2455,7 @@ You can create a secret using the following snippet (assuming that you are insta kubectl create secret generic gitlab-kerberos-keytab --namespace=gitlab --from-file=keytab=./gitlab.keytab ``` -Kerberos integration for Git is enabled by setting `global.appConfig.kerberos.enabled=true`. This will also add the `kerberos` provider to the list of enabled [OmniAuth](https://docs.gitlab.com/ee/integration/omniauth.html) providers for ticket-based authentication in the browser. +Kerberos integration for Git is enabled by setting `global.appConfig.kerberos.enabled=true`. This will also add the `kerberos` provider to the list of enabled [OmniAuth](https://docs.gitlab.com/integration/omniauth/) providers for ticket-based authentication in the browser. If left as `false` the Helm chart will still mount the `keytab` in the toolbox, Sidekiq, and webservice Pods, which can be used with manually configured [OmniAuth settings](#omniauth) for Kerberos. @@ -2441,11 +2481,11 @@ global: - example.com ``` -Check the [Kerberos documentation](https://docs.gitlab.com/ee/integration/kerberos.html) for more details. +Check the [Kerberos documentation](https://docs.gitlab.com/integration/kerberos/) for more details. ### Dedicated port for Kerberos -GitLab supports the use of a [dedicated port for Kerberos negotiation](https://docs.gitlab.com/ee/integration/kerberos.html#http-git-access-with-kerberos-token-passwordless-authentication) when using the HTTP protocol for Git operations to workaround a limitation in Git falling back to Basic Authentication when presented with the `negotiate` headers in the authentication exchange. +GitLab supports the use of a [dedicated port for Kerberos negotiation](https://docs.gitlab.com/integration/kerberos/#http-git-access-with-kerberos-token-passwordless-authentication) when using the HTTP protocol for Git operations to workaround a limitation in Git falling back to Basic Authentication when presented with the `negotiate` headers in the authentication exchange. Use of the dedicated port is currently required when using GitLab CI/CD - as the GitLab Runner helper relies on in-URL credentials to clone from GitLab. @@ -2464,12 +2504,15 @@ global: This enables an additional clone URL in the GitLab UI that is dedicated for Kerberos negotiation. The `https: true` setting is for URL generation only, and doesn't expose any additional TLS configuration. TLS is terminated and configured in the Ingress for GitLab. -NOTE: +{{< alert type="note" >}} + Due to a current limitation with [our fork of the `nginx-ingress` Helm chart](nginx/fork.md) - specifying a `dedicatedPort` will not currently expose the port for use in the chart's `nginx-ingress` controller. Cluster operators will need to expose this port themselves. Follow [this charts issue](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/3531) for more details and potential workarounds. +{{< /alert >}} + ### LDAP custom allowed realms -The `global.appConfig.kerberos.simpleLdapLinkingAllowedRealms` can be used to specify a set of domains used to link LDAP and Kerberos identities together when a user's LDAP DN does not match the user's Kerberos realm. See the [Custom allowed realms section in the Kerberos integration documentation](https://docs.gitlab.com/ee/integration/kerberos.html#custom-allowed-realms) for additional details. +The `global.appConfig.kerberos.simpleLdapLinkingAllowedRealms` can be used to specify a set of domains used to link LDAP and Kerberos identities together when a user's LDAP DN does not match the user's Kerberos realm. See the [Custom allowed realms section in the Kerberos integration documentation](https://docs.gitlab.com/integration/kerberos/#custom-allowed-realms) for additional details. ## Outgoing email @@ -2558,7 +2601,11 @@ global: ## Log rotation -> - [Introduced](https://gitlab.com/gitlab-org/cloud-native/gitlab-logger/-/merge_requests/10) in GitLab 15.6. +{{< history >}} + +- [Introduced](https://gitlab.com/gitlab-org/cloud-native/gitlab-logger/-/merge_requests/10) in GitLab 15.6. + +{{< /history >}} By default, the GitLab Helm chart does not rotate logs. This can cause ephemeral storage issues for containers that run for a long time. diff --git a/chart/doc/charts/haproxy/_index.md b/chart/doc/charts/haproxy/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..b1f11485fe13880c584547d719e7f94f63d1ddaa --- /dev/null +++ b/chart/doc/charts/haproxy/_index.md @@ -0,0 +1,50 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#designated-technical-writers +title: Using HAProxy +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The [HAProxy Helm Chart](https://github.com/haproxytech/helm-charts/tree/main/kubernetes-ingress) can replace the +[bundled NGINX Helm chart](../nginx/_index.md) as the Ingress controller, and is documented in Kubernetes' +[list of additional Ingress controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/#additional-controllers). + +HAProxy will also support Git over SSH. + +We default to [NGINX](../nginx/_index.md) mostly due to historical experience with the tool, but HAProxy is a valid alternative that may be +preferable to those who have more experience with HAProxy specifically. Additionally, it offers [FIPS compliance](#fips-compliant-haproxy) +while the [NGINX Ingress controller](https://github.com/kubernetes/ingress-nginx) currently does not. + +## Configuring HAProxy + +See the [HAProxy Helm chart documentation](https://www.haproxy.com/documentation/kubernetes-ingress/enterprise/configuration-reference/) +or the [Helm values file](https://github.com/haproxytech/helm-charts/blob/main/kubernetes-ingress/values.yaml). +for configuration details. + +See the [HAProxy example configuration](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/values-haproxy-ingress.yaml) +for detailed YAML for values tested with the GitLab Helm Charts. + +### Global Settings + +We share some common global settings among our charts. See the [Global Ingress documentation](../globals.md#configure-ingress-settings) +for common configuration options, such as GitLab and Registry hostnames. + +### FIPS-compliant HAProxy + +[HAProxy Enterprise](https://www.haproxy.com/products/haproxy-enterprise-kubernetes-ingress-controller) provides FIPS compliance. +Note that HAProxy Enterprise requires a license. + +Following are links for more information on HAProxy Enterprise: + +- [HAProxy Enterprise landing page](https://www.haproxy.com/products/haproxy-enterprise) +- [HAProxy FIPS compliance blog post](https://www.haproxy.com/blog/become-fips-compliant-with-haproxy-enterprise-on-red-hat-enterprise-linux-8) +- [Certified OpenShift Operator](https://catalog.redhat.com/software/container-stacks/detail/5ec3f9fc110f56bd24f2dd57) +- [How to use an image from a private registry](https://github.com/haproxytech/helm-charts/blob/kubernetes-ingress-1.22.0/haproxy/README.md#installing-from-a-private-registry) +- [How to find the HAProxy Enterprise image](https://www.haproxy.com/documentation/haproxy-enterprise/getting-started/installation/docker/) diff --git a/chart/doc/charts/minio/_index.md b/chart/doc/charts/minio/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..ed64ac4126f9cd628c990bc77f12e883a34a6300 --- /dev/null +++ b/chart/doc/charts/minio/_index.md @@ -0,0 +1,301 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using MinIO for Object storage +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +This chart is based on [`stable/minio`](https://github.com/helm/charts/tree/master/stable/minio) +version [`0.4.3`](https://github.com/helm/charts/tree/aaaf98b5d25c26cc2d483925f7256f2ce06be080/stable/minio), +and inherits most settings from there. + +## Design Choices + +Design choices related to the [upstream chart](https://github.com/helm/charts/tree/master/stable/minio) +can be found in the project's README. + +GitLab chose to alter that chart in order to simplify configuration of the secrets, +and to remove all use of secrets in environment variables. GitLab added `initContainer`s +to control the population of secrets into the `config.json`, and a chart-wide `enabled` flag. + +This chart makes use of only one secret: + +- `global.minio.credentials.secret`: A global secret containing the `accesskey` and + `secretkey` values that will be used for authentication to the bucket(s). + +## Configuration + +We will describe all the major sections of the configuration below. When configuring +from the parent chart, these values will be: + +```yaml +minio: + init: + ingress: + enabled: + apiVersion: + tls: + enabled: + secretName: + annotations: + configureCertmanager: + proxyReadTimeout: + proxyBodySize: + proxyBuffering: + tolerations: + persistence: # Upstream + volumeName: + matchLabels: + matchExpressions: + serviceType: # Upstream + servicePort: # Upstream + defaultBuckets: + minioConfig: # Upstream +``` + +### Installation command line options + +The table below contains all the possible charts configurations that can be supplied +to the `helm install` command using the `--set` flags: + +| Parameter | Default | Description | +|----------------------------------------------------------|--------------------------------|-----------------------------------------------------------------------------------------------------------------------------------| +| `common.labels` | `{}` | Supplemental labels that are applied to all objects created by this chart. | +| `init.containerSecurityContext.allowPrivilegeEscalation` | `false` | initContainer specific: Controls whether a process can gain more privileges than its parent process | +| `init.containerSecurityContext.runAsNonRoot` | `true` | initContainer specific: Controls whether the container runs with a non-root user | +| `init.containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | +| `defaultBuckets` | `[{"name": "registry"}]` | MinIO default buckets | +| `deployment.strategy` | { `type`: `Recreate` } | Allows one to configure the update strategy utilized by the deployment | +| `image` | `minio/minio` | MinIO image | +| `imagePullPolicy` | `Always` | MinIO image pull policy | +| `imageTag` | `RELEASE.2017-12-28T01-21-00Z` | MinIO image tag | +| `minioConfig.browser` | `on` | MinIO browser flag | +| `minioConfig.domain` | | MinIO domain | +| `minioConfig.region` | `us-east-1` | MinIO region | +| `minioMc.image` | `minio/mc` | MinIO mc image | +| `minioMc.tag` | `latest` | MinIO mc image tag | +| `mountPath` | `/export` | MinIO configuration file mount path | +| `persistence.accessMode` | `ReadWriteOnce` | MinIO persistence access mode | +| `persistence.enabled` | `true` | MinIO enable persistence flag | +| `persistence.matchExpressions` | | MinIO label-expression matches to bind | +| `persistence.matchLabels` | | MinIO label-value matches to bind | +| `persistence.size` | `10Gi` | MinIO persistence volume size | +| `persistence.storageClass` | | MinIO storageClassName for provisioning | +| `persistence.subPath` | | MinIO persistence volume mount path | +| `persistence.volumeName` | | MinIO existing persistent volume name | +| `priorityClassName` | | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods. | +| `pullSecrets` | | Secrets for the image repository | +| `resources.requests.cpu` | `250m` | MinIO minimum CPU requested | +| `resources.requests.memory` | `256Mi` | MinIO minimum memory requested | +| `securityContext.fsGroup` | `1000` | Group ID to start the pod with | +| `securityContext.runAsUser` | `1000` | User ID to start the pod with | +| `securityContext.fsGroupChangePolicy` | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | +| `securityContext.seccompProfile.type` | `RuntimeDefault` | Seccomp profile to use | +| `containerSecurityContext.runAsUser` | `1000` | Allow to overwrite the specific security context under which the container is started | +| `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the Gitaly container can gain more privileges than its parent process | +| `containerSecurityContext.runAsNonRoot` | `true` | Controls whether the container runs with a non-root user | +| `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | +| `serviceAccount.automountServiceAccountToken` | `false` | Indicates whether or not the default ServiceAccount access token should be mounted in pods | +| `servicePort` | `9000` | MinIO service port | +| `serviceType` | `ClusterIP` | MinIO service type | +| `tolerations` | `[]` | Toleration labels for pod assignment | +| `jobAnnotations` | `{}` | Annotations for the job spec | + +## Chart configuration examples + +### pullSecrets + +`pullSecrets` allows you to authenticate to a private registry to pull images for a pod. + +Additional details about private registries and their authentication methods can be +found in [the Kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). + +Below is an example use of `pullSecrets`: + +```yaml +image: my.minio.repository +imageTag: latest +imagePullPolicy: Always +pullSecrets: +- name: my-secret-name +- name: my-secondary-secret-name +``` + +### serviceAccount + +This section controls if the default ServiceAccount access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | + +### tolerations + +`tolerations` allow you schedule pods on tainted worker nodes + +Below is an example use of `tolerations`: + +```yaml +tolerations: +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoSchedule" +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoExecute" +``` + +## Enable the sub-chart + +They way we've chosen to implement compartmentalized sub-charts includes the ability +to disable the components that you may not want in a given deployment. For this reason, +the first setting you should decide on is `enabled:`. + +By default, MinIO is enabled out of the box, but is not recommended for production use. +When you are ready to disable it, run `--set global.minio.enabled: false`. + +## Configure the initContainer + +While rarely altered, the `initContainer` behaviors can be changed via the following items: + +```yaml +init: + image: + repository: + tag: + pullPolicy: IfNotPresent + script: +``` + +### initContainer image + +The initContainer image settings are just as with a normal image configuration. +By default, chart-local values are left empty, and the global settings +`global.gitlabBase.image.repository` and the image tag associated with the current +`global.gitlabVersion` will be used to populate the initContainer image. +The global configuration can be overridden by chart-local values (e.g. `minio.init.image.tag`). + +### initContainer script + +The initContainer is passed the following items: + +- The secret containing authentication items mounted in `/config`, usually `accesskey` + and `secretkey`. +- The ConfigMap containing the `config.json` template, and `configure` containing a + script to be executed with `sh`, mounted in `/config`. +- An `emptyDir` mounted at `/minio` that will be passed to the daemon's container. + +The initContainer is expected to populate `/minio/config.json` with a completed configuration, +using `/config/configure` script. When the `minio-config` container has completed +that task, the `/minio` directory will be passed to the `minio` container, and used +to provide the `config.json` to the [MinIO](https://min.io) server. + +## Configuring the Ingress + +These settings control the MinIO Ingress. + +| Name | Type | Default | Description | +|:---------------- |:-------:|:------- |:----------- | +| `apiVersion` | String | | Value to use in the `apiVersion` field. | +| `annotations` | String | | This field is an exact match to the standard `annotations` for [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/). | +| `enabled` | Boolean | `false` | Setting that controls whether to create Ingress objects for services that support them. When `false` the `global.ingress.enabled` setting is used. | +| `configureCertmanager` | Boolean | | Toggles Ingress annotation `cert-manager.io/issuer` and `acme.cert-manager.io/http01-edit-in-place`.. For more information see the [TLS requirement for GitLab Pages](../../installation/tls.md). | +| `tls.enabled` | Boolean | `true` | When set to `false`, you disable TLS for MinIO. This is mainly useful when you cannot use TLS termination at Ingress-level, like when you have a TLS-terminating proxy before the Ingress Controller. | +| `tls.secretName` | String | | The name of the Kubernetes TLS Secret that contains a valid certificate and key for the MinIO URL. When not set, the `global.ingress.tls.secretName` is used instead. | + +## Configuring the image + +The `image`, `imageTag` and `imagePullPolicy` defaults are +[documented upstream](https://github.com/helm/charts/tree/master/stable/minio#configuration). + +## Persistence + +This chart provisions a `PersistentVolumeClaim` and mounts a corresponding persistent +volume to default location `/export`. You'll need physical storage available in the +Kubernetes cluster for this to work. If you'd rather use `emptyDir`, disable `PersistentVolumeClaim` +by: `persistence.enabled: false`. + +The behaviors for [`persistence`](https://github.com/helm/charts/tree/master/stable/minio#persistence) +are [documented upstream](https://github.com/helm/charts/tree/master/stable/minio#configuration). + +GitLab has added a few items: + +```yaml +persistence: + volumeName: + matchLabels: + matchExpressions: +``` + +| Name | Type | Default | Description | +|:------------------ |:-------:|:------- |:----------- | +| `volumeName` | String | `false` | When `volumeName` is provided, the `PersistentVolumeClaim` will use the provided `PersistentVolume` by name, in place of creating a `PersistentVolume` dynamically. This overrides the upstream behavior. | +| `matchLabels` | Map | `true` | Accepts a Map of label names and label values to match against when choosing a volume to bind. This is used in the `PersistentVolumeClaim` `selector` section. See the [volumes documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector). | +| `matchExpressions` | Array | | Accepts an array of label condition objects to match against when choosing a volume to bind. This is used in the `PersistentVolumeClaim` `selector` section. See the [volumes documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector). | + +## defaultBuckets + +`defaultBuckets` provides a mechanism to automatically create buckets on the MinIO +pod at *installation*. This property contains an array of items, each with up to three +properties: `name`, `policy`, and `purge`. + +```yaml +defaultBuckets: + - name: public + policy: public + purge: true + - name: private + - name: public-read + policy: download +``` + +| Name | Type | Default | Description | +|:-------- |:-------:|:--------|:------------| +| `name` | String | | The name of the bucket that is created. The provided value should conform to [AWS bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html), meaning that it should be compliant with DNS and contain only the characters a-z, 0-9, and – (hyphen) in strings between 3 and 63 characters in length. The `name` property is *required* for all entries. | +| `policy` | | `none` | The value of `policy` controls the access policy of the bucket on MinIO. The `policy` property is not required, and the default value is `none`. In regards to **anonymous** access, possible values are: `none` (no anonymous access), `download` (anonymous read-only access), `upload` (anonymous write-only access) or `public` (anonymous read/write access). | +| `purge` | Boolean | | The `purge` property is provided as a means to cause any existing bucket to be removed with force, at installation time. This only comes into play when using a pre-existing `PersistentVolume` for the volumeName property of [persistence](#persistence). If you make use of a dynamically created `PersistentVolume`, this will have no valuable effect as it only happens at chart installation and there will be no data in the `PersistentVolume` that was just created. This property is not required, but you may specify this property with a value of `true` in order to cause a bucket to purged with force `mc rm -r --force`. | + +## Security Context + +These options allow control over which `user` and/or `group` is used to start the pod. + +For in-depth information about security context, please refer to the official +[Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + +## Service Type and Port + +These are [documented upstream](https://github.com/helm/charts/tree/master/stable/minio#configuration), +and the key summary is: + +```yaml +## Expose the MinIO service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +serviceType: LoadBalancer +servicePort: 9000 +``` + +The chart does not expect to be of the `type: NodePort`, so **do not** set it as such. + +## Upstream items + +The [upstream documentation](https://github.com/helm/charts/tree/master/stable/minio) +for the following also applies completely to this chart: + +- `resources` +- `nodeSelector` +- `minioConfig` + +Further explanation of the `minioConfig` settings can be found in the +[MinIO notify documentation](https://min.io/docs/minio/kubernetes/upstream/index.html). +This includes details on publishing notifications when Bucket Objects are accessed or changed. diff --git a/chart/doc/charts/minio/index.md b/chart/doc/charts/minio/index.md index 887789d1c286cb5aec9a35e23fc5dd42a8858b3c..6a5af5ef763bfe38c96844eb34cbf9533d54cd1b 100644 --- a/chart/doc/charts/minio/index.md +++ b/chart/doc/charts/minio/index.md @@ -258,7 +258,7 @@ defaultBuckets: | Name | Type | Default | Description | |:-------- |:-------:|:--------|:------------| -| `name` | String | | The name of the bucket that is created. The provided value should conform to [AWS bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html), meaning that it should be compliant with DNS and contain only the characters a-z, 0-9, and – (hyphen) in strings between 3 and 63 characters in length. The `name` property is _required_ for all entries. | +| `name` | String | | The name of the bucket that is created. The provided value should conform to [AWS bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html), meaning that it should be compliant with DNS and contain only the characters a-z, 0-9, and – (hyphen) in strings between 3 and 63 characters in length. The `name` property is *required* for all entries. | | `policy` | | `none` | The value of `policy` controls the access policy of the bucket on MinIO. The `policy` property is not required, and the default value is `none`. In regards to **anonymous** access, possible values are: `none` (no anonymous access), `download` (anonymous read-only access), `upload` (anonymous write-only access) or `public` (anonymous read/write access). | | `purge` | Boolean | | The `purge` property is provided as a means to cause any existing bucket to be removed with force, at installation time. This only comes into play when using a pre-existing `PersistentVolume` for the volumeName property of [persistence](#persistence). If you make use of a dynamically created `PersistentVolume`, this will have no valuable effect as it only happens at chart installation and there will be no data in the `PersistentVolume` that was just created. This property is not required, but you may specify this property with a value of `true` in order to cause a bucket to purged with force `mc rm -r --force`. | diff --git a/chart/doc/charts/nginx/_index.md b/chart/doc/charts/nginx/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..c4470f5c3626b8952cfd38e5a3f14c02057ce28c --- /dev/null +++ b/chart/doc/charts/nginx/_index.md @@ -0,0 +1,106 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using NGINX +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +We provide a complete NGINX deployment to be used as an Ingress Controller. Not all +Kubernetes providers natively support the NGINX [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls), +to ensure compatibility. + +{{< alert type="note" >}} + +Our [fork](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/nginx-ingress) of the NGINX chart was pulled from +[GitHub](https://github.com/kubernetes/ingress-nginx). See [Our NGINX fork](fork.md) for details on what was modified in our fork. + +{{< /alert >}} + +{{< alert type="note" >}} + +Only one `global.hosts.domain` value is possible. +Support for multiple domains is being tracked in [issue 3147](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/3147). +{{< /alert >}} + +## Configuring NGINX + +See [NGINX chart documentation](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/charts/nginx-ingress/README.md#configuration) +for configuration details. + +### Global settings + +We share some common global settings among our charts. See the [Globals Documentation](../globals.md) +for common configuration options, such as GitLab and Registry hostnames. + +## Configure hosts using the Global settings + +The hostnames for the GitLab Server and the Registry Server can be configured using +our [Global settings](../globals.md) chart. + +## GitLab Geo + +A second NGINX subchart is bundled and preconfigured for GitLab Geo traffic, +which supports the same settings as the default controller. The controller can be +enabled with `nginx-ingress-geo.enabled=true`. + +This controller is configured to not modify any incoming `X-Forwarded-*` headers. +Make sure to do the same if you want to use a different provider for Geo traffic. + +The default controller value (`nginx-ingress-geo.controller.ingressClassResource.controllerValue`) +is set to `k8s.io/nginx-ingress-geo` and the IngressClass name to `{ReleaseName}-nginx-geo` +to avoid interference with the default controller. The IngressClass name can be overridden +with `global.geo.ingressClass`. + +The custom header handling is only required for primary Geo sites to handle traffic +forwarded from secondary sites. It only needs to be used on secondaries if the +site is about to be promoted to a primary. + +Note, that changing the IngressClass during a failover will cause the other controller +to handle incoming traffic. Since the other controller has a different loadbalancer IP +assigned, this may require additional changes to your DNS configuration. + +This can be avoided by enabling the Geo Ingress controller on all Geo sites and +by configuring default and extra webservice Ingresses to use the associated +IngressClass (`useGeoClass=true`). + +## Annotation value word blocklist + +{{< history >}} + +- Introduced in [GitLab Helm chart 6.6](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/2713). + +{{< /history >}} + +In situations where cluster operators need greater control over the generated +NGINX configuration, the NGINX Ingress allows for [configuration snippets](https://kubernetes.github.io/ingress-nginx/examples/customization/configuration-snippets/) +which inserts "snippets" of raw NGINX configuration not addressed by the +standard annotations and ConfigMap entries. + +The drawback of these configuration snippets is that it allows cluster +operators to deploy Ingress objects that include LUA scripting and similar +configurations that can compromise the security of your GitLab installation +and the cluster itself, including exposing serviceaccount tokens and secrets. + +See [CVE-2021-25742](https://nvd.nist.gov/vuln/detail/CVE-2021-25742) and +[this upstream `ingress-nginx` issue](https://github.com/kubernetes/ingress-nginx/issues/7837) +for additional details. + +In order to mitigate CVE-2021-25742 in Helm chart deployments of GitLab - we +set an [annotation-value-word-blocklist](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/v6.6.0/values.yaml#L836) +using the [suggested settings from the `nginx-ingress` community](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#annotation-value-word-blocklist) + +If you are making use of configuration snippets in your GitLab Ingress +configuration, or are using GitLab NGINX Ingress Controller with third-party +Ingress objects that use configuration snippets, you may experience `404` +errors when trying to visit your GitLab third-party domains and "invalid word" +errors in your `nginx-controller` logs. In that case, review and adjust your +`nginx-ingress.controller.config.annotation-value-word-blocklist` setting. + +See also ["Invalid Word" errors in the `nginx-controller` logs and `404` errors in our chart troubleshooting docs](../../troubleshooting/_index.md#invalid-word-errors-in-the-nginx-controller-logs-and-404-errors). diff --git a/chart/doc/charts/nginx/fork.md b/chart/doc/charts/nginx/fork.md index 8626b134605a0d635753ad3e5f8b2b430571c738..119a31fecb94f0460d7904de181c3f476a14a5bb 100644 --- a/chart/doc/charts/nginx/fork.md +++ b/chart/doc/charts/nginx/fork.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Our NGINX fork --- -# Our NGINX fork +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} Our [fork](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/nginx-ingress) of the NGINX chart was pulled from [GitHub](https://github.com/kubernetes/ingress-nginx). diff --git a/chart/doc/charts/registry/_index.md b/chart/doc/charts/registry/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..6ed3d35f0155002062535ae2e743edfdba0b096f --- /dev/null +++ b/chart/doc/charts/registry/_index.md @@ -0,0 +1,1354 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the Container Registry +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The `registry` sub-chart provides the Registry component to a complete cloud-native +GitLab deployment on Kubernetes. This sub-chart is based on the +[upstream chart](https://github.com/docker/distribution-library-image) +and contains the GitLab [Container Registry](https://gitlab.com/gitlab-org/container-registry). + +This chart is composed of 3 primary parts: + +- [Service](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/charts/registry/templates/service.yaml), +- [Deployment](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/charts/registry/templates/deployment.yaml), +- [ConfigMap](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/charts/registry/templates/configmap.yaml). + +All configuration is handled according to the +[Registry configuration documentation](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md?ref_type=heads) +using `/etc/docker/registry/config.yml` variables provided to the `Deployment` populated +from the `ConfigMap`. The `ConfigMap` overrides the upstream defaults, but is +[based on them](https://github.com/docker/distribution-library-image/blob/master/config-example.yml). +See below for more details: + +- [`distribution/cmd/registry/config-example.yml`](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml) +- [`distribution-library-image/config-example.yml`](https://github.com/docker/distribution-library-image/blob/master/config-example.yml) + +## Design Choices + +A Kubernetes `Deployment` was chosen as the deployment method for this chart to allow +for simple scaling of instances, while allowing for +[rolling updates](https://kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/). + +This chart makes use of two required secrets and one optional: + +### Required + +- `global.registry.certificate.secret`: A global secret that will contain the public + certificate bundle to verify the authentication tokens provided by the associated + GitLab instance(s). See [documentation](https://docs.gitlab.com/administration/packages/container_registry/#use-an-external-container-registry-with-gitlab-as-an-auth-endpoint) + on using GitLab as an auth endpoint. +- `global.registry.httpSecret.secret`: A global secret that will contain the + [shared secret](https://distribution.github.io/distribution/about/configuration/#http) between registry pods. + +### Optional + +- `profiling.stackdriver.credentials.secret`: If Stackdriver profiling is enabled and + you need to provide explicit service account credentials, then the value in this secret + (in the `credentials` key by default) is the GCP service account JSON credentials. + If you are using GKE and are providing service accounts to your workloads using + [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) + (or node service accounts, although this is not recommended), then this secret is not required + and should not be supplied. In either case, the service account requires the role + `roles/cloudprofiler.agent` or equivalent [manual permissions](https://cloud.google.com/profiler/docs/iam#roles) + +## Configuration + +We will describe all the major sections of the configuration below. When configuring +from the parent chart, these values will be: + +```yaml +registry: + enabled: + maintenance: + readonly: + enabled: false + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + image: + tag: 'v4.15.2-gitlab' + pullPolicy: IfNotPresent + annotations: + service: + type: ClusterIP + name: registry + httpSecret: + secret: + key: + authEndpoint: + tokenIssuer: + certificate: + secret: gitlab-registry + key: registry-auth.crt + deployment: + terminationGracePeriodSeconds: 30 + draintimeout: '0' + hpa: + minReplicas: 2 + maxReplicas: 10 + cpu: + targetAverageUtilization: 75 + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + storage: + secret: + key: storage + extraKey: + validation: + disabled: true + manifests: + referencelimit: 0 + payloadsizelimit: 0 + urls: + allow: [] + deny: [] + notifications: {} + tolerations: [] + affinity: {} + ingress: + enabled: false + tls: + enabled: true + secretName: redis + annotations: + configureCertmanager: + proxyReadTimeout: + proxyBodySize: + proxyBuffering: + networkpolicy: + enabled: false + egress: + enabled: false + rules: [] + ingress: + enabled: false + rules: [] + serviceAccount: + create: false + automountServiceAccountToken: false + tls: + enabled: false + secretName: + verify: true + caSecretName: + cipherSuites: +``` + +If you chose to deploy this chart as a standalone, remove the `registry` at the top level. + +## Installation parameters + +| Parameter | Default | Description | +| -------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `annotations` | | Pod annotations | +| `podLabels` | | Supplemental Pod labels. Will not be used for selectors. | +| `common.labels` | | Supplemental labels that are applied to all objects created by this chart. | +| `authAutoRedirect` | `true` | Auth auto-redirect (must be true for Windows clients to work) | +| `authEndpoint` | `global.hosts.gitlab.name` | Auth endpoint (only host and port) | +| `certificate.secret` | `gitlab-registry` | JWT certificate | +| `debug.addr.port` | `5001` | Debug port | +| `debug.tls.enabled` | `false` | Enable TLS for the debug port for the registry. Impacts liveness and readiness probes, as well as the metrics endpoint (if enabled) | +| `debug.tls.secretName` | | The name of the Kubernetes TLS Secret that contains a valid certificate and key for the registry debug endpoint. When not set and `debug.tls.enabled=true` - the debug TLS configuration will default to the registry's TLS certificate. | +| `debug.prometheus.enabled` | `false` | **DEPRECATED** Use `metrics.enabled` | +| `debug.prometheus.path` | `""` | **DEPRECATED** Use `metrics.path` | +| `metrics.enabled` | `false` | If a metrics endpoint should be made available for scraping | +| `metrics.path` | `/metrics` | Metrics endpoint path | +| `metrics.serviceMonitor.enabled` | `false` | If a ServiceMonitor should be created to enable Prometheus Operator to manage the metrics scraping, note that enabling this removes the `prometheus.io` scrape annotations | +| `metrics.serviceMonitor.additionalLabels` | `{}` | Additional labels to add to the ServiceMonitor | +| `metrics.serviceMonitor.endpointConfig` | `{}` | Additional endpoint configuration for the ServiceMonitor | +| `deployment.terminationGracePeriodSeconds` | `30` | Optional duration in seconds the pod needs to terminate gracefully. | +| `deployment.strategy` | `{}` | Allows one to configure the update strategy utilized by the deployment | +| `draintimeout` | `'0'` | Amount of time to wait for HTTP connections to drain after receiving a SIGTERM signal (e.g. `'10s'`) | +| `relativeurls` | `false` | Enable the registry to return relative URLs in Location headers. | +| `enabled` | `true` | Enable registry flag | +| `hpa.behavior` | `{scaleDown: {stabilizationWindowSeconds: 300 }}` | Behavior contains the specifications for up- and downscaling behavior (requires `autoscaling/v2beta2` or higher) | +| `hpa.customMetrics` | `[]` | Custom metrics contains the specifications for which to use to calculate the desired replica count (overrides the default use of Average CPU Utilization configured in `targetAverageUtilization`) | +| `hpa.cpu.targetType` | `Utilization` | Set the autoscaling CPU target type, must be either `Utilization` or `AverageValue` | +| `hpa.cpu.targetAverageValue` | | Set the autoscaling CPU target value | +| `hpa.cpu.targetAverageUtilization` | `75` | Set the autoscaling CPU target utilization | +| `hpa.memory.targetType` | | Set the autoscaling memory target type, must be either `Utilization` or `AverageValue` | +| `hpa.memory.targetAverageValue` | | Set the autoscaling memory target value | +| `hpa.memory.targetAverageUtilization` | | Set the autoscaling memory target utilization | +| `hpa.minReplicas` | `2` | Minimum number of replicas | +| `hpa.maxReplicas` | `10` | Maximum number of replicas | +| `httpSecret` | | Https secret | +| `extraEnvFrom` | | List of extra environment variables from other data sources to expose | +| `image.pullPolicy` | | Pull policy for the registry image | +| `image.pullSecrets` | | Secrets to use for image repository | +| `image.repository` | `registry.gitlab.com/gitlab-org/build/cng/gitlab-container-registry` | Registry image | +| `image.tag` | `v4.15.2-gitlab` | Version of the image to use | +| `init.image.repository` | | initContainer image | +| `init.image.tag` | | initContainer image tag | +| `init.containerSecurityContext` | | initContainer specific [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) | +| `init.containerSecurityContext.runAsUser` | `1000` | initContainer specific: User ID under which the container should be started | +| `init.containerSecurityContext.allowPrivilegeEscalation` | `false` | initContainer specific: Controls whether a process can gain more privileges than its parent process | +| `init.containerSecurityContext.runAsNonRoot` | `true` | initContainer specific: Controls whether the container runs with a non-root user | +| `init.containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | initContainer specific: Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the container | +| `keda.enabled` | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `keda.pollingInterval` | `30` | The interval to check each trigger on | +| `keda.cooldownPeriod` | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `keda.minReplicaCount` | | Minimum number of replicas KEDA will scale the resource down to, defaults to `hpa.minReplicas` | +| `keda.maxReplicaCount` | | Maximum number of replicas KEDA will scale the resource up to, defaults to `hpa.maxReplicas` | +| `keda.fallback` | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `keda.hpaName` | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `keda.restoreToOriginalReplicaCount` | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `keda.behavior` | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `keda.triggers` | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | +| `log` | `{level: info, fields: {service: registry}}` | Configure the logging options | +| `minio.bucket` | `global.registry.bucket` | Legacy registry bucket name | +| `maintenance.readonly.enabled` | `false` | Enable registry's read-only mode | +| `maintenance.uploadpurging.enabled` | `true` | Enable upload purging | +| `maintenance.uploadpurging.age` | `168h` | Purge uploads older than the specified age | +| `maintenance.uploadpurging.interval` | `24h` | Frequency at which upload purging is performed | +| `maintenance.uploadpurging.dryrun` | `false` | Only list which uploads will be purged without deleting | +| `priorityClassName` | | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods. | +| `reporting.sentry.enabled` | `false` | Enable reporting using Sentry | +| `reporting.sentry.dsn` | | The Sentry DSN (Data Source Name) | +| `reporting.sentry.environment` | | The Sentry [environment](https://docs.sentry.io/concepts/key-terms/environments/) | +| `profiling.stackdriver.enabled` | `false` | Enable continuous profiling using Stackdriver | +| `profiling.stackdriver.credentials.secret` | `gitlab-registry-profiling-creds` | Name of the secret containing credentials | +| `profiling.stackdriver.credentials.key` | `credentials` | Secret key in which the credentials are stored | +| `profiling.stackdriver.service` | `RELEASE-registry` (templated Service name) | Name of the Stackdriver service to record profiles under | +| `profiling.stackdriver.projectid` | GCP project where running | GCP project to report profiles to | +| `database.configure` | `false` | Populate database configuration in the registry chart without enabling it. Required when [migrating an existing registry](metadata_database.md#existing-registries). | +| `database.enabled` | `false` | Enable metadata database. This is an experimental feature and must not be used in production environments. | +| `database.host` | `global.psql.host` | The database server hostname. | +| `database.port` | `global.psql.port` | The database server port. | +| `database.user` | | The database username. | +| `database.password.secret` | `RELEASE-registry-database-password` | Name of the secret containing the database password. | +| `database.password.key` | `password` | Secret key in which the database password is stored. | +| `database.name` | | The database name. | +| `database.sslmode` | | The SSL mode. Can be one of `disable`, `allow`, `prefer`, `require`, `verify-ca` or `verify-full`. | +| `database.ssl.secret` | `global.psql.ssl.secret` | A secret containing client certificate, key and certificate authority. Defaults to the main PostgreSQL SSL secret. | +| `database.ssl.clientCertificate` | `global.psql.ssl.clientCertificate` | The key inside the secret referring the client certificate. | +| `database.ssl.clientKey` | `global.psql.ssl.clientKey` | The key inside the secret referring the client key. | +| `database.ssl.serverCA` | `global.psql.ssl.serverCA` | The key inside the secret referring the certificate authority (CA). | +| `database.connecttimeout` | `0` | Maximum time to wait for a connection. Zero or not specified means waiting indefinitely. | +| `database.draintimeout` | `0` | Maximum time to wait to drain all connections on shutdown. Zero or not specified means waiting indefinitely. | +| `database.preparedstatements` | `false` | Enable prepared statements. Disabled by default for compatibility with PgBouncer. | +| `database.primary` | `false` | Target primary database server. This is used to specify a dedicated FQDN to target when running registry `database.migrations`. The `host` will be used to run `database.migrations` when not specified. | +| `database.pool.maxidle` | `0` | The maximum number of connections in the idle connection pool. If `maxopen` is less than `maxidle`, then `maxidle` is reduced to match the `maxopen` limit. Zero or not specified means no idle connections. | +| `database.pool.maxopen` | `0` | The maximum number of open connections to the database. If `maxopen` is less than `maxidle`, then `maxidle` is reduced to match the `maxopen` limit. Zero or not specified means unlimited open connections. | +| `database.pool.maxlifetime` | `0` | The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. Zero or not specified means unlimited reuse. | +| `database.pool.maxidletime` | `0` | The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. Zero or not specified means unlimited duration. | +| `database.loadBalancing.enabled` | `false` | Enable database load balancing. This is an experimental feature and must not be used in production environments. | +| `database.loadBalancing.nameserver.host` | `localhost` | The host of the nameserver to use for looking up the DNS record. | +| `database.loadBalancing.nameserver.port` | `8600` | The port of the nameserver to use for looking up the DNS record. | +| `database.loadBalancing.record` | | The SRV record to look up. This option is required for service discovery to work. | +| `database.loadBalancing.replicaCheckInterval` | `1m` | The minimum amount of time between checking the status of a replica. | +| `database.migrations.enabled` | `true` | Enable the migrations job to automatically run migrations upon initial deployment and upgrades of the Chart. Note that migrations can also be run manually from within any running Registry pods. | +| `database.migrations.activeDeadlineSeconds` | `3600` | Set the [activeDeadlineSeconds](https://kubernetes.io/docs/concepts/workloads/controllers/job/#job-termination-and-cleanup) on the migrations job. | +| `database.migrations.annotations` | `{}` | Additional annotations to add to the migrations job. | +| `database.migrations.backoffLimit` | `6` | Set the [backoffLimit](https://kubernetes.io/docs/concepts/workloads/controllers/job/#job-termination-and-cleanup) on the migrations job. | +| `database.backgroundMigrations.enabled` | `false` | Enable background migrations for the database. This is an experimental feature for the Registry metadata database. Do not use in production. See the [specification](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/spec/gitlab/database-background-migrations.md?ref_type=heads) for a detailed explanation of how it works. | +| `database.backgroundMigrations.jobInterval` | | The sleep interval between each background migration job worker run. When not specified [a default value is set by the registry](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md?ref_type=heads#backgroundmigrations). | +| `database.backgroundMigrations.maxJobRetries` | | The maximum number of retries for a failed background migration job. When not specified [a default value is set by the registry](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md?ref_type=heads#backgroundmigrations). | +| `gc.disabled` | `true` | When set to `true`, the online GC workers are disabled. | +| `gc.maxbackoff` | `24h` | The maximum exponential backoff duration used to sleep between worker runs when an error occurs. Also applied when there are no tasks to be processed unless `gc.noidlebackoff` is `true`. Please note that this is not the absolute maximum, as a randomized jitter factor of up to 33% is always added. | +| `gc.noidlebackoff` | `false` | When set to `true`, disables exponential backoffs between worker runs when there are no tasks to be processed. | +| `gc.transactiontimeout` | `10s` | The database transaction timeout for each worker run. Each worker starts a database transaction at the start. The worker run is canceled if this timeout is exceeded to avoid stalled or long-running transactions. | +| `gc.blobs.disabled` | `false` | When set to `true`, the GC worker for blobs is disabled. | +| `gc.blobs.interval` | `5s` | The initial sleep interval between each worker run. | +| `gc.blobs.storagetimeout` | `5s` | The timeout for storage operations. Used to limit the duration of requests to delete dangling blobs on the storage backend. | +| `gc.manifests.disabled` | `false` | When set to `true`, the GC worker for manifests is disabled. | +| `gc.manifests.interval` | `5s` | The initial sleep interval between each worker run. | +| `gc.reviewafter` | `24h` | The minimum amount of time after which the garbage collector should pick up a record for review. `-1` means no wait. | +| `securityContext.fsGroup` | `1000` | Group ID under which the pod should be started | +| `securityContext.runAsUser` | `1000` | User ID under which the pod should be started | +| `securityContext.fsGroupChangePolicy` | | Policy for changing ownership and permission of the volume (requires Kubernetes 1.23) | +| `securityContext.seccompProfile.type` | `RuntimeDefault` | Seccomp profile to use | +| `containerSecurityContext` | | Override container [securityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core) under which the container is started | +| `containerSecurityContext.runAsUser` | `1000` | Allow to overwrite the specific security context user ID under which the container is started | +| `containerSecurityContext.allowPrivilegeEscalation` | `false` | Controls whether a process of the Gitaly container can gain more privileges than its parent process | +| `containerSecurityContext.runAsNonRoot` | `true` | Controls whether the container runs with a non-root user | +| `containerSecurityContext.capabilities.drop` | `[ "ALL" ]` | Removes [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) for the Gitaly container | +| `serviceAccount.automountServiceAccountToken` | `false` | Indicates whether or not the default ServiceAccount access token should be mounted in pods | +| `serviceAccount.enabled` | `false` | Indicates whether or not to use a ServiceAccount | +| `serviceLabels` | `{}` | Supplemental service labels | +| `tokenService` | `container_registry` | JWT token service | +| `tokenIssuer` | `gitlab-issuer` | JWT token issuer | +| `tolerations` | `[]` | Toleration labels for pod assignment | +| `affinity` | `{}` | Affinity rules for pod assignment | +| `middleware.storage` | | configuration layer for midleware storage ([s3 for instance](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md#example-middleware-configuration)) | +| `redis.cache.enabled` | `false` | When set to `true`, the Redis cache is enabled. This feature is dependent on the [metadata database](#database) being enabled. Repository metadata will be cached on the configured Redis instance. | +| `redis.cache.host` | `<Redis URL>` | The hostname of the Redis instance. If empty, the value will be filled as `global.redis.host:global.redis.port`. | +| `redis.cache.port` | `6379` | The port of the Redis instance. | +| `redis.cache.sentinels` | `[]` | List sentinels with host and port. | +| `redis.cache.mainname` | | The main server name. Only applicable for Sentinel. | +| `redis.cache.password.enabled` | `false` | Indicates whether the Redis cache used by the Registry is password protected. | +| `redis.cache.password.secret` | `gitlab-redis-secret` | Name of the secret containing the Redis password. This will be automatically created if not provided, when the `shared-secrets` feature is enabled. | +| `redis.cache.password.key` | `redis-password` | Secret key in which the Redis password is stored. | +| `redis.cache.sentinelpassword.enabled` | `false` | Indicates whether Redis Sentinels are password protected. If `redis.cache.sentinelpassword` is empty, the values from `global.redis.sentinelAuth` are used. Only used when `redis.cache.sentinels` is defined. | +| `redis.cache.sentinelpassword.secret` | `gitlab-redis-secret` | Name of the secret containing the Redis Sentinel password. | +| `redis.cache.sentinelpassword.key` | `redis-sentinel-password` | Secret key in which the Redis Sentinel password is stored. | +| `redis.cache.db` | `0` | The name of the database to use for each connection. | +| `redis.cache.dialtimeout` | `0s` | The timeout for connecting to the Redis instance. Defaults to no timeout. | +| `redis.cache.readtimeout` | `0s` | The timeout for reading from the Redis instance. Defaults to no timeout. | +| `redis.cache.writetimeout` | `0s` | The timeout for writing to the Redis instance. Defaults to no timeout. | +| `redis.cache.tls.enabled` | `false` | Set to `true` to enable TLS. | +| `redis.cache.tls.insecure` | `false` | Set to `true` to disable server name verification when connecting over TLS. | +| `redis.cache.pool.size` | `10` | The maximum number of socket connections. Default is 10 connections. | +| `redis.cache.pool.maxlifetime` | `1h` | The connection age at which client retires a connection. Default is to not close aged connections. | +| `redis.cache.pool.idletimeout` | `300s` | How long to wait before closing inactive connections. | +| `redis.rateLimiting.enabled` | `false` | When set to `true`, the Redis rate limiter is enabled. This feature is under development. | +| `redis.rateLimiting.host` | `<Redis URL>` | The hostname of the Redis instance. If empty, the value will be filled as `global.redis.host:global.redis.port`. | +| `redis.rateLimiting.port` | `6379` | The port of the Redis instance. | +| `redis.rateLimiting.cluster` | `[]` | List of addresses with host and port. | +| `redis.rateLimiting.sentinels` | `[]` | List sentinels with host and port. | +| `redis.rateLimiting.mainname` | | The main server name. Only applicable for Sentinel. | +| `redis.rateLimiting.username` | | The username used to connect to the Redis instance. | +| `redis.rateLimiting.password.enabled` | `false` | Indicates whether the Redis instance is password protected. | +| `redis.rateLimiting.password.secret` | `gitlab-redis-secret` | Name of the secret containing the Redis password. This will be automatically created if not provided, when the `shared-secrets` feature is enabled. | +| `redis.rateLimiting.password.key` | `redis-password` | Secret key in which the Redis password is stored. | +| `redis.rateLimiting.db` | `0` | The name of the database to use for each connection. | +| `redis.rateLimiting.dialtimeout` | `0s` | The timeout for connecting to the Redis instance. Defaults to no timeout. | +| `redis.rateLimiting.readtimeout` | `0s` | The timeout for reading from the Redis instance. Defaults to no timeout. | +| `redis.rateLimiting.writetimeout` | `0s` | The timeout for writing to the Redis instance. Defaults to no timeout. | +| `redis.rateLimiting.tls.enabled` | `false` | Set to `true` to enable TLS. | +| `redis.rateLimiting.tls.insecure` | `false` | Set to `true` to disable server name verification when connecting over TLS. | +| `redis.rateLimiting.pool.size` | `10` | The maximum number of socket connections. | +| `redis.rateLimiting.pool.maxlifetime` | `1h` | The connection age at which the client retires a connection. Default is to not close aged connections. | +| `redis.rateLimiting.pool.idletimeout` | `300s` | How long to wait before closing inactive connections. | + +## Chart configuration examples + +### pullSecrets + +`pullSecrets` allows you to authenticate to a private registry to pull images for a pod. + +Additional details about private registries and their authentication methods can be +found in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). + +Below is an example use of `pullSecrets`: + +```yaml +image: + repository: my.registry.repository + tag: latest + pullPolicy: Always + pullSecrets: + - name: my-secret-name + - name: my-secondary-secret-name +``` + +### serviceAccount + +This section controls if a ServiceAccount should be created and if the default access token should be mounted in pods. + +| Name | Type | Default | Description | +| :----------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `automountServiceAccountToken` | Boolean | `false` | Controls if the default ServiceAccount access token should be mounted in pods. You should not enable this unless it is required by certain sidecars to work properly (for example, Istio). | +| `enabled` | Boolean | `false` | Indicates whether or not to use a ServiceAccount. | + +### tolerations + +`tolerations` allow you schedule pods on tainted worker nodes + +Below is an example use of `tolerations`: + +```yaml +tolerations: +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoSchedule" +- key: "node_label" + operator: "Equal" + value: "true" + effect: "NoExecute" +``` + +### affinity + +`affinity` is an optional parameter that allows you to set either or both: + +- `podAntiAffinity` rules to: + - Not schedule pods in the same domain as the pods that match the expression corresponding to the `topology key`. + - Set two modes of `podAntiAffinity` rules: required (`requiredDuringSchedulingIgnoredDuringExecution`) and preferred + (`preferredDuringSchedulingIgnoredDuringExecution`). Using the variable `antiAffinity` in `values.yaml`, set the setting to `soft` so that the preferred mode is + applied or set it to `hard` so that the required mode is applied. +- `nodeAffinity` rules to: + - Schedule pods to nodes that belong to a specific zone or zones. + - Set two modes of `nodeAffinity` rules: required (`requiredDuringSchedulingIgnoredDuringExecution`) and preferred + (`preferredDuringSchedulingIgnoredDuringExecution`). When set to `soft`, the preferred mode is applied. When set to `hard`, the required mode is applied. This + rule is implemented only for the `registry` chart and the `gitlab` chart alongwith all its subcharts except `webservice` and `sidekiq`. + +`nodeAffinity` only implements the [`In` operator](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#operators). + +For more information, see [the relevant Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity). + +The following example sets `affinity`, with both `nodeAffinity` and `antiAffinity` set to `hard`: + +```yaml +nodeAffinity: "hard" +antiAffinity: "hard" +affinity: + nodeAffinity: + key: "test.com/zone" + values: + - us-east1-a + - us-east1-b + podAntiAffinity: + topologyKey: "test.com/hostname" +``` + +### annotations + +`annotations` allows you to add annotations to the registry pods. + +Below is an example use of `annotations` + +```yaml +annotations: + kubernetes.io/example-annotation: annotation-value +``` + +## Enable the sub-chart + +The way we've chosen to implement compartmentalized sub-charts includes the ability +to disable the components that you may not want in a given deployment. For this reason, +the first setting you should decide on is `enabled`. + +By default, Registry is enabled out of the box. Should you wish to disable it, set `enabled: false`. + +## Configuring the `image` + +This section details the settings for the container image used by this sub-chart's +[Deployment](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/charts/registry/templates/deployment.yaml). +You can change the included version of the Registry and `pullPolicy`. + +Default settings: + +- `tag: 'v4.15.2-gitlab'` +- `pullPolicy: 'IfNotPresent'` + +## Configuring the `service` + +This section controls the name and type of the [Service](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/charts/registry/templates/service.yaml). +These settings will be populated by [`values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/charts/registry/values.yaml). + +By default, the Service is configured as: + +| Name | Type | Default | Description | +| :--------------- | :----: | :---------- | :-------------------------------------------------------------------- | +| `name` | String | `registry` | Configures the name of the service | +| `type` | String | `ClusterIP` | Configures the type of the service | +| `externalPort` | Int | `5000` | Port exposed by the Service | +| `internalPort` | Int | `5000` | Port utilized by the Pod to accept request from the service | +| `clusterIP` | String | `null` | Allows one to configure a custom Cluster IP as necessary | +| `loadBalancerIP` | String | `null` | Allows one to configure a custom LoadBalancer IP address as necessary | + +## Configuring the `ingress` + +This section controls the registry Ingress. + +| Name | Type | Default | Description | +| :--------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `apiVersion` | String | | Value to use in the `apiVersion` field. | +| `annotations` | String | | This field is an exact match to the standard `annotations` for [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/). | +| `configureCertmanager` | Boolean | | Toggles Ingress annotation `cert-manager.io/issuer` and `acme.cert-manager.io/http01-edit-in-place`. For more information see the [TLS requirement for GitLab Pages](../../installation/tls.md). | +| `enabled` | Boolean | `false` | Setting that controls whether to create Ingress objects for services that support them. When `false` the `global.ingress.enabled` setting is used. | +| `tls.enabled` | Boolean | `true` | When set to `false`, you disable TLS for the Registry subchart. This is mainly useful for cases in which you cannot use TLS termination at `ingress-level`, like when you have a TLS-terminating proxy before the Ingress Controller. | +| `tls.secretName` | String | | The name of the Kubernetes TLS Secret that contains a valid certificate and key for the registry URL. When not set, the `global.ingress.tls.secretName` is used instead. Defaults to not being set. | +| `tls.cipherSuites` | Array | `[]` | The list of cipher suites that Container registry should present to the client during TLS handshake. | + +## Configuring TLS + +Container Registry supports TLS which secures its communication with other components, +including `nginx-ingress`. + +Prerequisites to configure TLS: + +- The TLS certificate must include the Registry Service host name + (for example, `RELEASE-registry.default.svc`) in the Common + Name (CN) or Subject Alternate Name (SAN). +- After the TLS certificate generates: + - Create a [Kubernetes TLS Secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) + - Create another Secret that only contains the CA certificate of the TLS certificate with `ca.crt` key. + +To enable TLS: + +1. Set `registry.tls.enabled` to `true`. +1. Set `global.hosts.registry.protocol` to `https`. +1. Pass the Secret names to `registry.tls.secretName` and `global.certificates.customCAs` accordingly. + +When `registry.tls.verify` is `true`, you must pass the CA certificate Secret +name to `registry.tls.caSecretName`. This is necessary for self-signed +certificates and custom Certificate Authorities. This Secret is used by NGINX to verify the TLS +certificate of Registry. + +For example: + +```yaml +global: + certificates: + customCAs: + - secret: registry-tls-ca + hosts: + registry: + protocol: https + +registry: + tls: + enabled: true + secretName: registry-tls + verify: true + caSecretName: registry-tls-ca +``` + +### Container Registry cipher suites + +Normally `tls.cipherSuites` option should be used only in some very unusual configurations where registry is deployed in a standalone mode and/or some non-default Ingress is used that does not support modern cipher suites. +In a standard GitLab deployment, the NGINX Ingress will choose the highest supported TLS version by the container-registry backend, which is TLS1.3 at the moment. +TLS1.3 does not allow for configuring ciphers and is secure by default. +In case when for some reason TLS1.3 is unavailable, the default TLS1.2 ciphers list that Container Registry is using is also compatible with NGINX Ingress default settings and is secure as well. + +### Configuring TLS for the debug port + +The Registry debug port also supports TLS. The debug port is used for the +Kubernetes liveness and readiness checks as well as exposing a `/metrics` +endpoint for Prometheus (if enabled). + +TLS can be enabled for by setting `registry.debug.tls.enabled` to `true`. +A [Kubernetes TLS Secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) +can be provided in `registry.debug.tls.secretName` dedicated for use in +the debug port's TLS configuration. If a dedicated secret is not specified, +the debug configuration will fall back to sharing `registry.tls.secretName` with +the registry's regular TLS configuration. + +For Prometheus to scrape the `/metrics/` endpoint using `https` - additional +configuration is required for the certificate's CommonName attribute or +a SubjectAlternativeName entry. See +[Configuring Prometheus to scrape TLS-enabled endpoints](../../installation/tools.md#configure-prometheus-to-scrape-tls-enabled-endpoints) +for those requirements. + +## Configuring the `networkpolicy` + +This section controls the registry +[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/). +This configuration is optional and is used to limit egress and Ingress of the registry to specific endpoints. +and Ingress to specific endpoints. + +| Name | Type | Default | Description | +| :---------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | This setting enables the `NetworkPolicy` for registry | +| `ingress.enabled` | Boolean | `false` | When set to `true`, the `Ingress` network policy will be activated. This will block all Ingress connections unless rules are specified. | +| `ingress.rules` | Array | `[]` | Rules for the Ingress policy, for details see <https://kubernetes.io/docs/concepts/services-networking/network-policies/#the-networkpolicy-resource> and the example below | +| `egress.enabled` | Boolean | `false` | When set to `true`, the `Egress` network policy will be activated. This will block all egress connections unless rules are specified. | +| `egress.rules` | Array | `[]` | Rules for the egress policy, these for details see <https://kubernetes.io/docs/concepts/services-networking/network-policies/#the-networkpolicy-resource> and the example below | + +### Example policy for preventing connections to all internal endpoints + +The Registry service normally requires egress connections to object storage, +Ingress connections from Docker clients, and kube-dns for DNS lookups. This +adds the following network restrictions to the Registry service: + +- Allows Ingress requests: + - From the pods `sidekiq` , `webservice` and `nginx-ingress` to port `5000` + - From the `Prometheus` pod to port `9235` +- Allows Egress requests: + - To `kube-dns` to port `53` + - To endpoints like AWS VPC endpoint for S3 or STS `172.16.1.0/24` to port `443` + - To the internet `0.0.0.0/0` to port `443` + +_Note that the registry service requires outbound connectivity to the public +internet for images on [external object storage](../../advanced/external-object-storage) if no endpoint is used_ + +The example is based on the assumption that `kube-dns` was deployed +to the namespace `kube-system`, `prometheus` was deployed to the namespace +`monitoring` and `nginx-ingress` was deployed to the namespace `nginx-ingress`. + +```yaml +networkpolicy: + enabled: true + ingress: + enabled: true + rules: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: nginx-ingress + podSelector: + matchLabels: + app: nginx-ingress + component: controller + ports: + - port: 5000 + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app: prometheus + component: server + release: gitlab + ports: + - port: 9235 + - from: + - podSelector: + matchLabels: + app: sidekiq + ports: + - port: 5000 + - from: + - podSelector: + matchLabels: + app: webservice + ports: + - port: 5000 + egress: + enabled: true + rules: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - port: 53 + protocol: UDP + - to: + - ipBlock: + cidr: 172.16.1.0/24 + ports: + - port: 443 + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 +``` + +## Configuring KEDA + +This `keda` section enables the installation of [KEDA](https://keda.sh/) `ScaledObjects` instead of regular `HorizontalPodAutoscalers`. +This configuration is optional and can be used when there is a need for autoscaling based on custom or external metrics. + +Most settings default to the values set in the `hpa` section where applicable. + +If the following are true, CPU and memory triggers are added automatically based on the CPU and memory thresholds set in the `hpa` section: + +- `triggers` is not set. +- The corresponding `request.cpu.request` or `request.memory.request` setting is also set to a non-zero value. + +If no triggers are set, the `ScaledObject` is not created. + +Refer to the [KEDA documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/) for more details about those settings. + +| Name | Type | Default | Description | +| :---------------------------- | :-----: | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `enabled` | Boolean | `false` | Use [KEDA](https://keda.sh/) `ScaledObjects` instead of `HorizontalPodAutoscalers` | +| `pollingInterval` | Integer | `30` | The interval to check each trigger on | +| `cooldownPeriod` | Integer | `300` | The period to wait after the last trigger reported active before scaling the resource back to 0 | +| `minReplicaCount` | Integer | | Minimum number of replicas KEDA will scale the resource down to, defaults to `hpa.minReplicas` | +| `maxReplicaCount` | Integer | | Maximum number of replicas KEDA will scale the resource up to, defaults to `hpa.maxReplicas` | +| `fallback` | Map | | KEDA fallback configuration, see the [documentation](https://keda.sh/docs/2.10/concepts/scaling-deployments/#fallback) | +| `hpaName` | String | | The name of the HPA resource KEDA will create, defaults to `keda-hpa-{scaled-object-name}` | +| `restoreToOriginalReplicaCount` | Boolean | | Specifies whether the target resource should be scaled back to original replicas count after the `ScaledObject` is deleted | +| `behavior` | Map | | The specifications for up- and downscaling behavior, defaults to `hpa.behavior` | +| `triggers` | Array | | List of triggers to activate scaling of the target resource, defaults to triggers computed from `hpa.cpu` and `hpa.memory` | + +### Example policy for preventing connections to all internal endpoints + +The Registry service normally requires egress connections to object storage, +Ingress connections from Docker clients, and kube-dns for DNS lookups. This +adds the following network restrictions to the Registry service: + +- All egress requests to the local network on `10.0.0.0/8` port 53 are allowed (for kubeDNS) +- Other egress requests to the local network on `10.0.0.0/8` are restricted +- Egress requests outside of the `10.0.0.0/8` are allowed + +_Note that the registry service requires outbound connectivity to the public +internet for images on [external object storage](../../advanced/external-object-storage)_ + +```yaml +networkpolicy: + enabled: true + egress: + enabled: true + # The following rules enable traffic to all external + # endpoints, except the local + # network (except DNS requests) + rules: + - to: + - ipBlock: + cidr: 10.0.0.0/8 + ports: + - port: 53 + protocol: UDP + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 +``` + +## Defining the Registry Configuration + +The following properties of this chart pertain to the configuration of the underlying +[registry](https://hub.docker.com/_/registry/) container. Only the most critical values +for integration with GitLab are exposed. For this integration, we make use of the `auth.token.x` +settings of [Docker Distribution](https://github.com/docker/distribution), controlling +authentication to the registry via JWT [authentication tokens](https://distribution.github.io/distribution/spec/auth/token/). + +### httpSecret + +Field `httpSecret` is a map that contains two items: `secret` and `key`. + +The content of the key this references correlates to the `http.secret` value of +[registry](https://hub.docker.com/_/registry/). This value should be populated with +a cryptographically generated random string. + +The `shared-secrets` job will automatically create this secret if not provided. It will be +filled with a securely generated 128 character alpha-numeric string that is base64 encoded. + +To create this secret manually: + +```shell +kubectl create secret generic gitlab-registry-httpsecret --from-literal=secret=strongrandomstring +``` + +### Notification Secret + +Notification Secret is utilized for calling back to the GitLab application in various ways, +such as for Geo to help manage syncing Container Registry data between primary and secondary sites. + +The `notificationSecret` secret object will be automatically created if +not provided, when the `shared-secrets` feature is enabled. + +To create this secret manually: + +```shell +kubectl create secret generic gitlab-registry-notification --from-literal=secret=[\"strongrandomstring\"] +``` + +Then proceed to set + +```yaml +global: + # To provide your own secret + registry: + notificationSecret: + secret: gitlab-registry-notification + key: secret + + # If utilising Geo, and wishing to sync the container registry. + # Define this in the primary site configs only. + geo: + registry: + replication: + enabled: true + primaryApiUrl: <URL to primary registry> +``` + +Ensuring the `secret` value is set to the name of the secret created above + +### Redis cache Secret + +The Redis cache Secret is used when `global.redis.auth.enabled` is set to `true`. + +When the `shared-secrets` feature is enabled, the `gitlab-redis-secret` secret object +is automatically created if not provided. + +To create this secret manually, see the [Redis password instructions](../../installation/secrets.md#redis-password). + +### authEndpoint + +The `authEndpoint` field is a string, providing the URL to the GitLab instance(s) that +the [registry](https://hub.docker.com/_/registry/) will authenticate to. + +The value should include the protocol and hostname only. The chart template will automatically +append the necessary request path. The resulting value will be populated to `auth.token.realm` +inside the container. For example: `authEndpoint: "https://gitlab.example.com"` + +By default this field is populated with the GitLab hostname configuration set by the +[Global Settings](../globals.md). + +### certificate + +The `certificate` field is a map containing two items: `secret` and `key`. + +`secret` is a string containing the name of the [Kubernetes Secret](https://kubernetes.io/docs/concepts/configuration/secret/) +that houses the certificate bundle to be used to verify the tokens created by the GitLab instance(s). + +`key` is the name of the `key` in the `Secret` which houses the certificate +bundle that will be provided to the [registry](https://hub.docker.com/_/registry/) +container as `auth.token.rootcertbundle`. + +Default Example: + +```yaml +certificate: + secret: gitlab-registry + key: registry-auth.crt +``` + +### readiness and liveness probe + +By default there is a readiness and liveness probe configured to +check `/debug/health` on port `5001` which is the debug port. + +### validation + +The `validation` field is a map that controls the Docker image validation +process in the registry. When image validation is enabled the registry rejects +windows images with foreign layers, unless the `manifests.urls.allow` field +within the validation stanza is explicitly set to allow those layer urls. + +Validation only happens during manifest push, so images already present in the +registry are not affected by changes to the values in this section. + +The image validation is turned off by default. + +To enable image validation you need to explicitly set `registry.validation.disabled: false`. + +#### manifests + +The `manifests` field allows configuration of validation policies particular to +manifests. + +The `urls` section contains both `allow` and `deny` fields. For manifest layers +which contain URLs to pass validation, that layer must match one of the regular +expressions in the `allow` field, while not matching any regular expression in +the `deny` field. + +| Name | Type | Default | Description | +| :----------------: | :---: | :------ | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| `referencelimit` | Int | `0` | The maximum number of references, such as layers, image configurations, and other manifests, that a single manifest may have. When set to `0` (default) this validation is disabled. | +| `payloadsizelimit` | Int | `0` | The maximum data size in bytes of manifest payloads. When set to `0` (default) this validation is disabled. | +| `urls.allow` | Array | `[]` | List of regular expressions that enables URLs in the layers of manifests. When left empty (default), layers with any URLs will be rejected. | +| `urls.deny` | Array | `[]` | List of regular expressions that restricts the URLs in the layers of manifests. When left empty (default), no layer with URLs which passed the `urls.allow` list will be rejected | + +### notifications + +The `notifications` field is used to configure [Registry notifications](https://distribution.github.io/distribution/about/notifications/#configuration). +It has an empty hash as default value. + +| Name | Type | Default | Description | +| :---------: | :---: | :------ | :------------------------------------------------------------------------------------------------------------------: | +| `endpoints` | Array | `[]` | List of items where each item correspond to an [endpoint](https://distribution.github.io/distribution/about/configuration/#endpoints) | +| `events` | Hash | `{}` | Information provided in [event](https://distribution.github.io/distribution/about/configuration/#events) notifications | + +An example setting will look like the following: + +```yaml +notifications: + endpoints: + - name: FooListener + url: https://foolistener.com/event + timeout: 500ms + # DEPRECATED: use `maxretries` instead https://gitlab.com/gitlab-org/container-registry/-/issues/1243. + # When using `maxretries`, `threshold` is ignored: https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md?ref_type=heads#endpoints + threshold: 10 + maxretries: 10 + backoff: 1s + - name: BarListener + url: https://barlistener.com/event + timeout: 100ms + # DEPRECATED: use `maxretries` instead https://gitlab.com/gitlab-org/container-registry/-/issues/1243. + # When using `maxretries`, `threshold` is ignored: https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md?ref_type=heads#endpoints + threshold: 3 + maxretries: 5 + backoff: 1s + events: + includereferences: true +``` + +<!-- vale gitlab.Spelling = NO --> + +### hpa + +<!-- vale gitlab.Spelling = YES --> + +The `hpa` field is an object, controlling the number of [registry](https://hub.docker.com/_/registry/) +instances to create as a part of the set. This defaults to a `minReplicas` value +of `2`, a `maxReplicas` value of 10, and configures the +`cpu.targetAverageUtilization` to 75%. + +### storage + +```yaml +storage: + secret: + key: config + extraKey: +``` + +The `storage` field is a reference to a Kubernetes Secret and associated key. The content +of this secret is taken directly from [Registry Configuration: `storage`](https://distribution.github.io/distribution/about/configuration/#storage). +Please refer to that documentation for more details. + +Examples for [AWS s3](https://distribution.github.io/distribution/storage-drivers/s3/) and +[Google GCS](https://distribution.github.io/distribution/storage-drivers/gcs/) drivers can be +found in [`examples/objectstorage`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage): + +- [`registry.s3.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/registry.s3.yaml) +- [`registry.gcs.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/objectstorage/registry.gcs.yaml) + +For S3, make sure you give the correct +[permissions for registry storage](https://distribution.github.io/distribution/storage-drivers/s3/#s3-permission-scopes). For more information about storage configuration, see +[Container Registry storage driver](https://docs.gitlab.com/administration/packages/container_registry/#container-registry-storage-driver) in the administration documentation. + +Place the _contents_ of the `storage` block into the secret, and provide the following +as items to the `storage` map: + +- `secret`: name of the Kubernetes Secret housing the YAML block. +- `key`: name of the key in the secret to use. Defaults to `config`. +- `extraKey`: _(optional)_ name of an extra key in the secret, which will be mounted + to `/etc/docker/registry/storage/${extraKey}` within the container. This can be + used to provide the `keyfile` for the `gcs` driver. + +```shell +# Example using S3 +kubectl create secret generic registry-storage \ + --from-file=config=registry-storage.yaml + +# Example using GCS with JSON key +# - Note: `registry.storage.extraKey=gcs.json` +kubectl create secret generic registry-storage \ + --from-file=config=registry-storage.yaml \ + --from-file=gcs.json=example-project-382839-gcs-bucket.json +``` + +You can [disable the redirect for the storage driver](https://docs.gitlab.com/administration/packages/container_registry/#disable-redirect-for-storage-driver), +ensuring that all traffic flows through the Registry service instead of redirecting to another backend: + +```yaml +storage: + secret: example-secret + key: config + redirect: + disable: true +``` + +If you chose to use the `filesystem` driver: + +- You will need to provide persistent volumes for this data. +- [`hpa.minReplicas`](#hpa) should be set to `1` +- [`hpa.maxReplicas`](#hpa) should be set to `1` + +For the sake of resiliency and simplicity, it is recommended to make use of an +external service, such as `s3`, `gcs`, `azure` or other compatible Object Storage. + +{{< alert type="note" >}} + +The chart will populate `delete.enabled: true` into this configuration +by default if not specified by the user. This keeps expected behavior in line with +the default use of MinIO, as well as the Linux package. Any user provided value +will supersede this default. + +{{< /alert >}} + +### middleware.storage + +Configuration of `middleware.storage` follows [upstream convention](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md#middleware): + +Configuration is fairly generic and follows similar pattern: + +```yaml +middleware: + # See https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md#middleware + storage: + - name: cloudfront + options: + baseurl: https://abcdefghijklmn.cloudfront.net/ + # `privatekey` is auto-populated with the content from the privatekey Secret. + privatekeySecret: + secret: cloudfront-secret-name + # "key" value is going to be used to generate filename for PEM storage: + # /etc/docker/registry/middleware.storage/<index>/<key> + key: private-key-ABC.pem + keypairid: ABCEDFGHIJKLMNOPQRST +``` + +Within above code `options.privatekeySecret` is a `generic` Kubernetes secret contents of which corresponds to PEM file contents: + +```shell +kubectl create secret generic cloudfront-secret-name --type=kubernetes.io/ssh-auth --from-file=private-key-ABC.pem=pk-ABCEDFGHIJKLMNOPQRST.pem +``` + +`privatekey` used upstream is being auto-populated by chart from the privatekey Secret and will be **ignored** if specified. + +#### `keypairid` variants + +Various vendors use different field names for the same construct: + +| Vendor | field name | +| :--------: | :---------: | +| Google CDN | `keyname` | +| CloudFront | `keypairid` | + +{{< alert type="note" >}} + +Only configuration of `middleware.storage` section is supported at this time. + +{{< /alert >}} + +### debug + +The debug port is enabled by default and is used for the liveness/readiness +probe. Additionally, Prometheus metrics can be enabled via the `metrics` values. + +```yaml +debug: + addr: + port: 5001 + +metrics: + enabled: true +``` + +### health + +The `health` property is optional, and contains preferences for +a periodic health check on the storage driver's backend storage. +For more details, see Docker's [configuration documentation](https://distribution.github.io/distribution/about/configuration/#health). + +```yaml +health: + storagedriver: + enabled: false + interval: 10s + threshold: 3 +``` + +### reporting + +The `reporting` property is optional and enables [reporting](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md#reporting) + +```yaml +reporting: + sentry: + enabled: true + dsn: 'https://<key>@sentry.io/<project>' + environment: 'production' +``` + +### profiling + +The `profiling` property is optional and enables [continuous profiling](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md#profiling) + +```yaml +profiling: + stackdriver: + enabled: true + credentials: + secret: gitlab-registry-profiling-creds + key: credentials + service: gitlab-registry +``` + +### database + +{{< history >}} + +- [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/5521) in GitLab 16.4 as a [beta](https://docs.gitlab.com/policy/development_stages_support/#beta) feature. +- [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/423459) in GitLab 17.3. + +{{< /history >}} + +The `database` property is optional and enables the [metadata database](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md#database). + +See the [administration documentation](https://docs.gitlab.com/administration/packages/container_registry_metadata_database/) +before enabling this feature. + +{{< alert type="note" >}} + +This feature requires PostgreSQL 13 or newer. + +{{< /alert >}} + +```yaml +database: + enabled: true + host: registry.db.example.com + port: 5432 + user: registry + password: + secret: gitlab-postgresql-password + key: postgresql-registry-password + dbname: registry + sslmode: verify-full + ssl: + secret: gitlab-registry-postgresql-ssl + clientKey: client-key.pem + clientCertificate: client-cert.pem + serverCA: server-ca.pem + connecttimeout: 5s + draintimeout: 2m + preparedstatements: false + primary: 'primary.record.fqdn' + pool: + maxidle: 25 + maxopen: 25 + maxlifetime: 5m + maxidletime: 5m + migrations: + enabled: true + activeDeadlineSeconds: 3600 + backoffLimit: 6 + backgroundMigrations: + enabled: true + maxJobRetries: 3 + jobInterval: 10s +``` + +#### Load balancing + +{{< alert type="warning" >}} + +This is an experimental feature under active development and must not be used in production. + +{{< /alert >}} + +The `loadBalancing` section allows configuring [database load balancing](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md#loadbalancing). The [Redis cache](#redis-cache) must be enabled for this feature to work. + +#### Manage the database + +See the [Container registry metadata database](metadata_database.md) page for +more information about creating the database. + +### `gc` property + +The `gc` property provides [online garbage collection](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md#gc) +options. + +Online garbage collection requires the [metadata database](#database) to be enabled. You must use online garbage collection when using the database, though +you can temporarily disable online garbage collection for maintenance and debugging. + +```yaml +gc: + disabled: false + maxbackoff: 24h + noidlebackoff: false + transactiontimeout: 10s + reviewafter: 24h + manifests: + disabled: false + interval: 5s + blobs: + disabled: false + interval: 5s + storagetimeout: 5s +``` + +### Redis cache + +{{< alert type="note" >}} + +The Redis cache is a beta feature from version 16.4 and later. Please +review the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/423459) +and associated documentation before enabling this feature. + +{{< /alert >}} + +The `redis.cache` property is optional and provides options related to the +[Redis cache](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md#cache-1). +To use `redis.cache` with the registry, the [metadata database](#database) must be enabled. + +For example: + +```yaml +redis: + cache: + enabled: true + host: localhost + port: 16379 + password: + secret: gitlab-redis-secret + key: redis-password + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + tls: + enabled: true + insecure: true + pool: + size: 10 + maxlifetime: 1h + idletimeout: 300s +``` + +#### Cluster + +The `redis.rateLimiting.cluster` property is a list of hosts and ports +to connect to a Redis cluster. For example: + +```yaml +redis: + cache: + enabled: true + host: redis.example.com + cluster: + - host: host1.example.com + port: 6379 + - host: host2.example.com + port: 6379 +``` + +#### Sentinels + +The `redis.cache` can use the `global.redis.sentinels` configuration. Local values can be provided and +will take precedence over the global values. For example: + +```yaml +redis: + cache: + enabled: true + host: redis.example.com + sentinels: + - host: sentinel1.example.com + port: 16379 + - host: sentinel2.example.com + port: 16379 +``` + +#### Sentinel password support + +{{< history >}} + +- [Introduced](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/3805) in GitLab 17.2. + +{{< /history >}} + +The `redis.cache` can also use the [`global.redis.sentinelAuth` configuration](../globals.md#redis-sentinel-password-support) +to use an authentication password for Redis Sentinel. Local values can +be provided and take precedence over the global values. For example: + +```yaml +redis: + cache: + enabled: true + host: redis.example.com + sentinels: + - host: sentinel1.example.com + port: 16379 + - host: sentinel2.example.com + port: 16379 + sentinelpassword: + enabled: true + secret: registry-redis-sentinel + key: password +``` + +### Redis rate-limiter + +{{< alert type="warning" >}} + +The Redis rate-limiting is [under development](https://gitlab.com/groups/gitlab-org/-/epics/13237). +More functionality details will be added to this section as they become available. + +{{< /alert >}} + +The `redis.rateLimiting` property is optional and provides options related to the +[Redis rate-limiter](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md#ratelimiter). + +For example: + +```yaml +redis: + rateLimiting: + enabled: true + host: localhost + port: 16379 + username: registry + password: + secret: gitlab-redis-secret + key: redis-password + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + tls: + enabled: true + insecure: true + pool: + size: 10 + maxlifetime: 1h + idletimeout: 300s +``` + +## Garbage Collection + +The Docker Registry will build up extraneous data over time which can be freed using +[garbage collection](https://distribution.github.io/distribution/about/garbage-collection/). +As of [now](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/1586) there is no +fully automated or scheduled way to run the garbage collection with this Chart. + +{{< alert type="warning" >}} + +You must use [online garbage collection](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/configuration.md#gc) with the +[metadata database](#database). Using manual garbage collection with the metadata database will lead to data loss. +Online garbage collection fully replaces the need to manually run garbage collection. + +{{< /alert >}} + +### Manual Garbage Collection + +Manual garbage collection requires the registry to be in read-only mode first. Let's assume that you've already +installed the GitLab chart by using Helm, named it `mygitlab`, and installed it in the namespace `gitlabns`. +Replace these values in the commands below according to your actual configuration. + +```shell +# Because of https://github.com/helm/helm/issues/2948 we can't rely on --reuse-values, so let's get our current config. +helm get values mygitlab > mygitlab.yml +# Upgrade Helm installation and configure the registry to be read-only. +# The --wait parameter makes Helm wait until all ressources are in ready state, so we are safe to continue. +helm upgrade mygitlab gitlab/gitlab -f mygitlab.yml --set registry.maintenance.readonly.enabled=true --wait +# Our registry is in r/o mode now, so let's get the name of one of the registry Pods. +# Note down the Pod name and replace the '<registry-pod>' placeholder below with that value. +# Replace the single quotes to double quotes (' => ") if you are using this with Windows' cmd.exe. +kubectl get pods -n gitlabns -l app=registry -o jsonpath='{.items[0].metadata.name}' +# Run the actual garbage collection. Check the registry's manual if you really want the '-m' parameter. +kubectl exec -n gitlabns <registry-pod> -- /bin/registry garbage-collect -m /etc/docker/registry/config.yml +# Reset registry back to original state. +helm upgrade mygitlab gitlab/gitlab -f mygitlab.yml --wait +# All done :) +``` + +### Running administrative commands against the Container Registry + +The administrative commands can be run against the Container Registry +only from a Registry pod, where both the `registry` binary as well as necessary +configuration is available. [Issue #2629](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2629) +is open to discuss how to provide this functionality from the toolbox pod. + +To run administrative commands: + +1. Connect to a Registry pod: + + ```shell + kubectl exec -it <registry-pod> -- bash + ``` + +1. Once inside the Registry pod, the `registry` binary is available in `PATH` and + can be used directly. The configuration file is available at + `/etc/docker/registry/config.yml`. The following example checks the status + of the database migration: + + ```shell + registry database migrate status /etc/docker/registry/config.yml + ``` + +For further details and other available commands, refer to the relevant +documentation: + +- [General Registry documentation](https://docs.docker.com/registry/) +- [GitLab-specific Registry documentation](https://gitlab.com/gitlab-org/container-registry/-/tree/master/docs-gitlab) diff --git a/chart/doc/charts/registry/metadata_database.md b/chart/doc/charts/registry/metadata_database.md index 998896e6bc49f5b5147b6a61dae8565565a94f42..b4e2bbbadbb5fff54ed8b8855d04c50ceea386ad 100644 --- a/chart/doc/charts/registry/metadata_database.md +++ b/chart/doc/charts/registry/metadata_database.md @@ -2,16 +2,22 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Manage the container registry metadata database --- -# Manage the container registry metadata database +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed -> - [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/5521) in GitLab 16.4 as a [beta](https://docs.gitlab.com/ee/policy/development_stages_support.html#beta) feature. -> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/423459) in GitLab 17.3. +{{< /details >}} + +{{< history >}} + +- [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/5521) in GitLab 16.4 as a [beta](https://docs.gitlab.com/policy/development_stages_support/#beta) feature. +- [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/423459) in GitLab 17.3. + +{{< /history >}} The metadata database enables many new registry features, including online garbage collection, and increases the efficiency of many registry operations. @@ -23,17 +29,20 @@ You can migrate existing registries to the metadata database, and use online gar Some database-enabled features are only enabled for GitLab.com and automatic database provisioning for the registry database is not available. Review the feature support section in the -[administration documentation](https://docs.gitlab.com/ee/administration/packages/container_registry_metadata_database.html#metadata-database-feature-support) +[administration documentation](https://docs.gitlab.com/administration/packages/container_registry_metadata_database/#metadata-database-feature-support) for the status of features related to the container registry database. ## Create the database Follow the steps below to manually create the database and role. -NOTE: +{{< alert type="note" >}} + These instructions assume you are using the bundled PostgreSQL server. If you are using your own server, there will be some variation in how you connect. +{{< /alert >}} + 1. Create the secret with the database password: ```shell @@ -99,12 +108,15 @@ Follow the instructions that match your situation: - [One-step migration](#one-step-migration). Only recommended for relatively small registries or no requirement to avoid downtime. - [Three-step migration](#three-step-migration). Recommended for larger container registries. -NOTE: +{{< alert type="note" >}} + For a list of import times for various test and user registries, see [this table in issue 423459](https://gitlab.com/gitlab-org/gitlab/-/issues/423459#completed-tests-and-user-reports). Your registry deployment is unique, and your import times might be longer than those reported in the issue. +{{< /alert >}} + ### Before you start -Read the [before you start](https://docs.gitlab.com/ee/administration/packages/container_registry_metadata_database.html#before-you-start) +Read the [before you start](https://docs.gitlab.com/administration/packages/container_registry_metadata_database/#before-you-start) section of the Registry administration guide. ### New installations @@ -171,9 +183,12 @@ A few factors affect the duration of the migration: - The number of registry pods running in your cluster. - Network latency between the registry, PostgresSQL and your configured Object Storage. -NOTE: +{{< alert type="note" >}} + Work to automate the migration process is being tracked in [issue 5293](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/5293). +{{< /alert >}} + #### Requirements You must complete the following steps before attempting the one-step or @@ -292,20 +307,26 @@ To migrate in three steps, you must: 1. Import all repository data 1. Import common blobs -NOTE: +{{< alert type="note" >}} + Users have reported step one import completed at [rates of 2 to 4 TB per hour](https://gitlab.com/gitlab-org/gitlab/-/issues/423459). At the slower speed, registries with over 100TB of data could take longer than 48 hours. +{{< /alert >}} + ##### Step 1. Pre-import repositories For larger instances, this process can take hours or even days to complete, depending on the size of your registry. You can still use the registry during this process. -WARNING: +{{< alert type="warning" >}} + It is [not yet possible](https://gitlab.com/gitlab-org/container-registry/-/issues/1162) to restart the migration, so it's important to let the migration run to completion. If you must halt the operation, you have to restart this step. +{{< /alert >}} + 1. Follow the steps described in the [requirements section](#requirements). 1. Find the `registry:` section in the `values.yml` file and add the `database` section. Set: @@ -355,12 +376,15 @@ If you must halt the operation, you have to restart this step. The first step is complete when the `registry import complete` displays. -NOTE: +{{< alert type="note" >}} + You should try to schedule the following step as soon as possible to reduce the amount of downtime required. Ideally, less than one week after step one completes. Any new data written to the registry before the next step causes that step to take more time. +{{< /alert >}} + ##### Step 2. Import all repository data This step requires the registry to be set in `read-only` mode. @@ -460,3 +484,42 @@ cd ~ ``` After the command completes successfully, the registry is now fully migrated to the database! + +## Troubleshooting + +### Error: `panic: interface conversion: interface {} is nil, not bool` + +When importing [existing registries](#existing-registries), you might see this error: + +```shell +panic: interface conversion: interface {} is nil, not bool +``` + +This is a known [issue](https://gitlab.com/gitlab-org/container-registry/-/merge_requests/2041) +that is fixed in registry version `v4.15.2-gitlab` and in GitLab 17.9 and later. + +To work around this issue, upgrade your registry version: + +1. In your `values.yml` file, set the registry image tag: + + ```yaml + registry: + image: + tag: v4.15.2-gitlab + ``` + +1. Upgrade your Helm installation: + + ```shell + helm upgrade gitlab -f values.yml + ``` + +Alternatively, you can manually update the registry configuration: + +- In `/etc/docker/registry/config.yml`, set `parallelwalk` to `false` for your storage provider. For example, with S3: + + ```yaml + storage: + s3: + parallelwalk: false + ``` diff --git a/chart/doc/charts/shared-secrets.md b/chart/doc/charts/shared-secrets.md index f6e599563365baade147018feeba4e1ee616fe08..14925176e0e22f5215cc371df53f1fb7b2cf998c 100644 --- a/chart/doc/charts/shared-secrets.md +++ b/chart/doc/charts/shared-secrets.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Using the Shared-Secrets Job --- -# Using the Shared-Secrets Job +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} The `shared-secrets` job is responsible for provisioning a variety of secrets used across the installation, unless otherwise manually specified. This includes: @@ -19,7 +21,7 @@ used across the installation, unless otherwise manually specified. This includes 1. MinIO, Registry, GitLab Shell, and Gitaly secrets 1. Redis and PostgreSQL passwords 1. SSH host keys -1. GitLab Rails secret for [encrypted credentials](https://docs.gitlab.com/ee/administration/encrypted_configuration.html) +1. GitLab Rails secret for [encrypted credentials](https://docs.gitlab.com/administration/encrypted_configuration/) ## Installation command line options @@ -39,12 +41,12 @@ the `helm install` command using the `--set` flag: | `priorityClassName` | | [Priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) assigned to pods | | `rbac.create` | `true` | Create RBAC roles and bindings | | `resources` | | resource requests, limits | -| `securitContext.fsGroup` | `65534` | User ID to mount filesystems as | -| `securitContext.runAsUser` | `65534` | User ID to run the container as | -| `selfsign.caSubject` | `GitLab Helm Chart` | selfsign CA Subject | -| `selfsign.image.repository` | `registry.gitlab.com/gitlab-org/build/cnf/cfssl-self-sign` | selfsign image repository | -| `selfsign.image.pullSecrets` | | Secrets for the image repository | -| `selfsign.image.tag` | | selfsign image tag | +| `securityContext.fsGroup` | `65534` | User ID to mount filesystems as | +| `securityContext.runAsUser` | `65534` | User ID to run the container as | +| `selfsign.caSubject` | `GitLab Helm Chart` | selfsign CA Subject | +| `selfsign.image.repository` | `registry.gitlab.com/gitlab-org/build/cnf/cfssl-self-sign` | selfsign image repository | +| `selfsign.image.pullSecrets` | | Secrets for the image repository | +| `selfsign.image.tag` | | selfsign image tag | | `selfsign.keyAlgorithm` | `rsa` | selfsign cert key algorithm | | `selfsign.keySize` | `4096` | selfsign cert key size | | `serviceAccount.enabled` | `true` | Define serviceAccountName on job(s) | @@ -85,7 +87,10 @@ shared-secrets: enabled: false ``` -NOTE: +{{< alert type="note" >}} + If you disable this job, you **must** manually create all secrets, and provide all necessary secret content. See [installation/secrets](../installation/secrets.md#manual-secret-creation-optional) for further details. + +{{< /alert >}} diff --git a/chart/doc/charts/traefik/_index.md b/chart/doc/charts/traefik/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..0e1356b84a65e62fa4f396984a83c1423579f996 --- /dev/null +++ b/chart/doc/charts/traefik/_index.md @@ -0,0 +1,49 @@ +--- +stage: Enablement +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#designated-technical-writers +title: Using Traefik +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +The [Traefik Helm chart](https://artifacthub.io/packages/helm/traefik/traefik) can replace the +[bundled NGINX Helm chart](../nginx/_index.md) as the Ingress controller. + +Traefik will [translate the native Kubernetes Ingress](https://doc.traefik.io/traefik/providers/kubernetes-ingress/) objects into +[IngressRoute](https://doc.traefik.io/traefik/routing/providers/kubernetes-crd/#kind-ingressroute) objects. + +Traefik also supports Git over SSH via +[IngressRouteTCP](https://doc.traefik.io/traefik/routing/providers/kubernetes-crd/#kind-ingressroutetcp) +objects, which are deployed by the GitLab Shell chart when [`global.ingress.provider`](../globals.md#configure-ingress-settings) is configured as `traefik`. + +## Configuring Traefik + +See the [Traefik Helm chart documentation](https://github.com/traefik/traefik-helm-chart/tree/master/traefik) +for configuration details. + +See the [Traefik example configuration](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/values-traefik-ingress.yaml) +for detailed YAML for values tested with the GitLab Helm Charts. + +### Global Settings + +We share some common global settings among our charts. See the [Global Ingress documentation](../globals.md#configure-ingress-settings) +for common configuration options, such as GitLab and Registry hostnames. + +### FIPS-compliant Traefik + +[Traefik Enterprise](https://doc.traefik.io/traefik-enterprise/) provides FIPS compliance. Note that Traefik Enterprise requires +a license, which is not included as part of this chart. + +Following are links for more information on Traefik Enterprise: + +- [Traefik Enterprise features](https://doc.traefik.io/traefik/providers/kubernetes-ingress/) +- [Traefik Enterprise FIPS image](https://doc.traefik.io/traefik-enterprise/operations/fips-image/) +- [Traefik Enterprise Helm chart](https://doc.traefik.io/traefik-enterprise/installing/kubernetes/helm/) +- [Traefik Enterprise Operator on ArtifactHub](https://artifacthub.io/packages/olm/community-operators/traefikee-operator) +- [Traefik Enterprise Certified OpenShift Operator on RedHat Catalog](https://catalog.redhat.com/software/container-stacks/detail/5e98745a6c5dcb34dfbb1a0a) diff --git a/chart/doc/development/_index.md b/chart/doc/development/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..51c97d205879502589225c782a3d14c22dc4cbec --- /dev/null +++ b/chart/doc/development/_index.md @@ -0,0 +1,201 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Contribute to Helm chart development +--- + +Our contribution policies can be found in [CONTRIBUTING.md](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/CONTRIBUTING.md) + +Contributing documentation changes to the charts requires only a text editor. Documentation is stored in the [`doc/`](https://gitlab.com/gitlab-org/charts/gitlab/-/tree/master/doc) directory. + +## Architecture + +Before starting development, it is helpful to review the goals, architecture, and design decisions for the charts. + +See [Architecture of GitLab Helm charts](../architecture/_index.md) for this information. + +## Environment setup + +See [setting up your development environment](environment_setup.md) to prepare your workstation for charts development. + +## Style guide + +See the [chart development style guide](style_guide.md) for guidelines and best practices for chart development. + +## Writing and running tests + +We run several different types of tests to validate the charts work as intended. + +### Developing RSpec tests + +Unit tests are written in RSpec and stored in the `spec/` directory of the chart repository. + +Read the notes on [creating RSpec tests](rspec.md) to validate the +functionality of the chart. + +### Developing bats tests + +Unit tests for functions in shell scripts are written in [bats](https://bats-core.readthedocs.io/en/stable/) and stored next to the script file they are testing in the `scripts/` directory of the chart repository. + +Read the notes on [creating bats tests](bats.md) to validate functions in the scripts used in this project. + +### Running GitLab QA + +[GitLab QA](https://gitlab.com/gitlab-org/gitlab-qa) can be used to run integrations and functional tests against a deployed cloud-native GitLab installation. + +[Read more in the GitLab QA chart docs](gitlab-qa/_index.md). + +### ChaosKube + +ChaosKube can be used to test the fault tolerance of highly available cloud-native GitLab installations. + +[Read more in the ChaosKube chart docs](chaoskube/_index.md). + +### ClickHouse + +[Instructions](clickhouse.md) for configuring an external ClickHouse server with GitLab. + +## Versioning and Release + +Details on the version scheme, branching and tags can be found in [release document](release.md). + +## Changelog Entries + +All `CHANGELOG.md` entries should be created via the [changelog entries](changelog.md) workflow. + +## Pipelines + +GitLab CI pipelines run on pipelines for: + +- Merge requests +- Default branch +- Stable branches +- Tags + +The configuration for these CI pipelines is managed in: + +- [`.gitlab-ci.yml`](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/.gitlab-ci.yml) +- Files under [`.gitlab/ci/`](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/.gitlab/ci/) + +### Review apps + +We use [Review apps](https://docs.gitlab.com/ci/review_apps/) in CI to +deploy running instances of the Helm Charts and test against them. + +We deploy these Review apps to our EKS and GKE clusters, confirm that the Helm +release is created successfully, and then run [GitLab QA](gitlab-qa/_index.md) +and other [RSpec tests](rspec.md). + +For merge requests specifically, we make use of +[`vcluster`](https://www.vcluster.com) to create ephemeral clusters. This +allows us to test against newer versions of Kubernetes more quickly due to the +ease of configuration and simplified environments that do not include External +DNS or Cert Manager dependencies. In this case, we simply deploy the Helm +Charts, confirm the release was created successfully, and validate that +Webservice is in the `Ready` state. This approach takes advantage of +[Kubernetes readiness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) +to ensure that the application is in a healthy state. See +[issue 5013](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/5013) for +more information on our `vcluster` implementation plan. + +### Managing Review apps + +Review apps will stay active for two hours by default, at which time they will be stopped automatically +by associated CI jobs. The process works as follows: + +1. `create_review_*` jobs create the Review App environment. + - These jobs only `echo` environment information. This ensures that these jobs do not fail, meaning we + can create environments consistently and avoid leaving them in a broken state where they cannot be + automaticaly stopped by future CI Jobs. +1. `review_*` jobs install the Helm Chart to the environment. +1. `stop_review_*` jobs run after the duration defined in the variable named `REVIEW_APPS_AUTO_STOP_IN`. + +If you notice that one or more of the `review_*` jobs have failed and need to debug the environment, you can: + +1. Find the associated `create_review_*` job. +1. At the top of the job page, click the environment link titled something like `This job is deployed to <cluster>/<commit>`. +1. At the top right of the environment page, you will see buttons to: + - Pin the environment: marked by a pin icon, this button will prevent the environment from being stopped automatically. + If you click this, it will cancel the `stop_review_*` job. Be sure to run that job manually when you have finished debugging. + This option is helpful if you need more time to debug a failed environment. + - View deployment: this button will open the environment URL of the running instance of GitLab. + - Stop: this buttton will run the associated `stop_review_*` job. + +## When to fork upstream charts + +### No changes, no fork + +Let it be stated that any chart that does not require changes to function +for our use *should not* be forked into this repository. + +### Guidelines for forking + +#### Sensitive information + +If a given chart expects that sensitive communication secrets will be presented +from within environment, such as passwords or cryptographic keys, +[we prefer to use `initContainers`](../architecture/decisions.md#preference-of-secrets-in-initcontainer-over-environment). + +#### Extending functionality + +There are some cases where it is needed to extend the functionality of a chart in +such a way that an upstream may not accept. + +## Handling configuration deprecations + +There are times in a development where changes in behavior require a functionally breaking change. We try to avoid such changes, but some items can not be handled without such a change. + +To handle this, we have implemented the [deprecations template](deprecations.md). This template is designed to recognize properties that need to be replaced or relocated, and inform the user of the actions they need to take. This template will compile all messages into a list, and then cause the deployment to stop via a `fail` call. This provides a method to inform the user at the same time as preventing the deployment the chart in a broken or unexpected state. + +See the documentation of the [deprecations template](deprecations.md) for further information on the design, functionality, and how to add new deprecations. + +## Attempt to catch problematic configurations + +Due to the complexity of these charts and their level of flexibility, there are some overlaps where it is possible to produce a configuration that would lead to an unpredictable, or entirely non-functional deployment. In an effort to prevent known problematic settings combinations, we have the following two patterns in place: + +- We use [schema validations](https://helm.sh/docs/topics/charts/#schema-files) for all + our sub-charts to ensure the user-specified values meet expectations. See + [the documentation](validation.md) to learn more. +- We implement template logic designed to detect and warn the user that their + configuration will not work. See the documentation of the + [`checkConfig` template](checkconfig.md) for further information on the design and + functionality, and how to add new configuration checks. + +## Verifying registry + +In development mode, verifying Registry with Docker clients can be difficult. This is partly due to issues with certificate of +the registry. You can either [add the certificate](https://distribution.github.io/distribution/about/insecure/#use-self-signed-certificates) or +[expose the registry over HTTP](https://distribution.github.io/distribution/about/insecure/#deploy-a-plain-http-registry) (see `global.hosts.registry.https`). +Note that adding the certificate is more secure than the insecure registry solution. + +Please keep in mind that Registry uses the external domain name of MinIO service (see `global.hosts.minio.name`). You may +encounter an error when using internal domain names, e.g. with custom TLDs for development environment. The common symptom +is that you can log in to the Registry but you can't push or pull images. This is generally because the Registry container(s) +can not resolve the MinIO domain name and find the correct endpoint (you can see the errors in container logs). + +## Troubleshooting a development environment + +Developers may encounter unique issues while working on new chart features. +[Refer to the troubleshooting guide](troubleshooting.md) for +information if your ***development*** cluster seems to have strange issues. + +{{< alert type="note" >}} + +The troubleshooting steps outlined in the link above are for development +clusters only. Do not use these procedures in a production environment or +data will be lost. + +{{< /alert >}} + +## Additional Helm information + +Some information on how all the inner Helm workings behave: + +- The Distribution Team has a [training presentation for Helm charts](https://docs.google.com/presentation/d/1CStgh5lbS-xOdKdi3P8N9twaw7ClkvyqFN3oZrM1SNw/present). +- Templating in Helm is done via Go [text/template](https://pkg.go.dev/text/template) + and [sprig](https://pkg.go.dev/github.com/Masterminds/sprig?utm_source=godoc%27). +- Helm repository has some additional information on developing with Helm in its + [tips and tricks section](https://helm.sh/docs/howto/charts_tips_and_tricks/). +- [Functions and Pipelines](https://helm.sh/docs/chart_template_guide/functions_and_pipelines/). +- [Subcharts and Globals](https://helm.sh/docs/chart_template_guide/subcharts_and_globals/). diff --git a/chart/doc/development/autoflow.md b/chart/doc/development/autoflow.md new file mode 100644 index 0000000000000000000000000000000000000000..8a12aa10fa3732adc8e8eb5fe76a7b0761276824 --- /dev/null +++ b/chart/doc/development/autoflow.md @@ -0,0 +1,117 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: AutoFlow +--- + +The KAS chart can be configured with AutoFlow support. +Currently, the AutoFlow support is limited to what is strictly required +for the [GitLab internal-use experiment](https://gitlab.com/groups/gitlab-org/-/epics/16181). + +This specifically means that only a subset of configuration +options are available and only Temporal Cloud via +mTLS worker authentication and with data encryption +is supported. + +## Configuration + +AutoFlow is configured with the `autoflow` value node in the KAS subchart. + +A minimal working configuration has the following anatomy: + +```yaml +autoflow: + enabled: true + temporal: + namespace: <unique temporal cloud namespace name> + workerMtls: + secretName: <name of the k8s secret with the mTLS worker certs> + workflowDataEncryption: + codecServer: + authorizedUserEmails: <list of authorized temporal cloud users> +``` + +The values within `<>` need to be provided by the user of the chart: + +- `<unique temporal cloud namespace name>`: this is the unique name of the + Temporal Cloud namespace. + The namespace must be created before by the KAS / AutoFlow maintainers. + Reach out to `#f_autoflow` to get help with that. +- `<name of the k8s secret with the mTLS worker certs>`: this is the name of an + already existing Kubernetes secret the KAS deployment will have access to. + The secret must be of type `tls` and contain the `tls.crt` and `tls.key` + data values. It can be created with a command like this: + `kubectl create secret tls kas-autoflow-temporal-worker-mtls --cert <path-to-worker-mtls.crt> --key <path-to-worker-mtls.key>`. + The mTLS certificate and key can be generated by following [this guide](https://docs.temporal.io/cloud/certificates#option-2-you-dont-have-certificate-management-infrastructure). + The generated CA certificate must be configured in the Temporal namespace settings +- `<list of authorized temporal cloud users>`: this is a list of email + addresses to should have access to the AutoFlow [codec server](https://docs.temporal.io/production-deployment/data-encryption) + and have already been granted access to the configured namespace. + +### Manual secret creation (optional) + +This section is an addition to the official [manual secret creation section in the installation guide](../installation/secrets.md#manual-secret-creation-optional). + +#### GitLab KAS AutoFlow Temporal Workflow Data Encryption Secret (experimental) + +You can leave it to the chart to auto-generate the secret, or you can create this secret manually (replace `<name>` with the name of the release): + +```shell +openssl rand 32 > secret.bin +kubectl create secret generic <name>-kas-autoflow-temporal-workflow-data-encryption-secret --from-literal=kas_autoflow_temporal_workflow_data_encryption=secret.bin +shred --remove secret.bin +``` + +This secret is referenced by the `gitlab.kas.autoflow.temporal.workflowDataEncryption.secret` setting. + +## Verification + +The [Configuration](#configuration) section must be followed in order +to verify (smoke test) the AutoFlow functionality. +Follow this step-by-step guide: + +1. After installing the chart make sure the KAS pods are running without + logging any errors. +1. Create a new project in the GitLab instance +1. Create a AutoFlow script at `.gitlab/autoflow/main.star` with the following + contents: (you don't really need to understand it at this point) + + ```python + # -*- mode: python -*- + + def handle_event(w, ev): + print("Handling event: {}".format(ev["type"])) + + on_event( + type="com.gitlab.events.issue_updated", + handler=handle_event, + ) + ``` + +1. Enable feature flags in Rails for AutoFlow using the `gitlab-rails console` in the toolbox pod: + + ```ruby + Feature.enable(:autoflow_enabled) + Feature.enable(:autoflow_issue_events_enabled) + ``` + +1. Update an existing issue in that project and verify the KAS logs + that it contains logs about handling that event and running the + workflow script. + +1. You can also emit events manually via the rails console. + + ```ruby + client = Gitlab::Kas::Client.new() + # Replace the issue and project IDs to match your setup. + client.send_autoflow_event( + project: Project.find(1), + type: "com.gitlab.events.issue_updated", + id: "1", + data: {"project": {"id": 1}, "issue": {"iid": 1}} + ) + ``` + +If you are interested in running more complex workflows, +see this snippet: <https://gitlab.com/-/snippets/4800564>. diff --git a/chart/doc/development/bats.md b/chart/doc/development/bats.md index 63ee4ddbc7fb0804d14884499d4057b867c50d90..d54988fa24422b30d07089b5b5d7e1ba7c4dee45 100644 --- a/chart/doc/development/bats.md +++ b/chart/doc/development/bats.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Writing bats tests for scripts used in the charts project --- -# Writing bats tests for scripts used in the charts project - The following are notes and conventions used for creating bats tests for the GitLab chart. diff --git a/chart/doc/development/changelog.md b/chart/doc/development/changelog.md index 8cdc4da648cb2a5c30ab68bb8121e2d495317dff..4b1677cb7057f6aca2606bbc53fa683ef89a545a 100644 --- a/chart/doc/development/changelog.md +++ b/chart/doc/development/changelog.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Changelog entries --- -# Changelog entries - This guide contains instructions for when and how to generate a changelog entry file, as well as information and history about our changelog process. @@ -166,4 +165,4 @@ found the workflow to be appealing and familiar. --- -[Return to Development documentation](index.md) +[Return to Development documentation](_index.md) diff --git a/chart/doc/development/chaoskube/_index.md b/chart/doc/development/chaoskube/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..1260ea447541d213aadea65654b3a52723c48bd0 --- /dev/null +++ b/chart/doc/development/chaoskube/_index.md @@ -0,0 +1,52 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: ChaosKube +--- + +[ChaosKube](https://github.com/linki/chaoskube) is similar to +Netflix's [chaos monkey](https://github.com/Netflix/chaosmonkey) for Kubernetes +clusters. It schedules random termination of pods in order to test the fault tolerance +of a highly available system. + +## Why + +As a part of our charts development we needed a way to test the fault tolerance +of our deployments. + +## How + +Using ChaosKube is a manual step we do after our weekly demos. The intended +use case of ChaosKube is to kill pods randomly at random times during a +working day to test the ability to recover. The way we use it is a bit different, +we manually launch ChaosKube in debug mode and manually identify the weak +points of our deployment. + +Later, we intend to integrate it into our CI pipeline, so whenever new changes +are rolled out we have a ChaosKube run for that release. + +## Usage + +The [`deploy_chaoskube.sh`](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/scripts/deploy_chaoskube.sh) +installs and unleashes ChaosKube by scheduling a run 10m after installing ChaosKube by default. It also sets up +the needed service account and role if RBAC is enabled. + +After you clone the charts repository, to install and unleash ChaosKube, run: + +```shell +scripts/deploy_chaoskube.sh up +``` + +## Configuration + +ChaosKube can be configured by editing the `scripts/chaoskube-resources/values.yaml` +file. For more info read the official [ChaosKube docs](https://github.com/linki/chaoskube). + +You can also configure the deployment with flags on the script. To find all available options, run: + +```shell +scripts/deploy_chaoskube.sh -h +``` + +Visit the [README's values section](https://github.com/helm/charts/tree/master/stable/chaoskube#configuration) for a full list of options to pass via `--set` arguments. diff --git a/chart/doc/development/checkconfig.md b/chart/doc/development/checkconfig.md index 1fecdddc3c85a9e66c56194abfbf46635664557d..c3750ba5d8d9b431eeec24f6d46479d989d9fb37 100644 --- a/chart/doc/development/checkconfig.md +++ b/chart/doc/development/checkconfig.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: checkConfig template --- -# checkConfig template - The purpose of this template is to provide a means to prevent users from deploying the Helm chart, or updates to it, in what would be a broken state due to known problematic configurations. The design makes use of multiple templates, providing a modular method of declaring and managing checks. This aids in simplification of both development and maintenance. diff --git a/chart/doc/development/ci.md b/chart/doc/development/ci.md index 7990f69778543e63a28a7fa3b68752c16ccf719e..c72ff88ba63add5eafe61d187f3a4582ef28eb90 100644 --- a/chart/doc/development/ci.md +++ b/chart/doc/development/ci.md @@ -1,4 +1,9 @@ -# CI setup and use +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: CI setup and use +--- ## CI Variables diff --git a/chart/doc/development/clickhouse.md b/chart/doc/development/clickhouse.md index 9575247dfe9e300b75bf81e3a5183bbc29df127c..d5fbea3da46df8019a45dc764b5b856d7ea9401f 100644 --- a/chart/doc/development/clickhouse.md +++ b/chart/doc/development/clickhouse.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: ClickHouse database --- -# ClickHouse database - The GitLab chart can be configured to set up GitLab with an external ClickHouse database via the HTTP interface. Required parameters: | Parameter | Description | @@ -16,9 +15,12 @@ The GitLab chart can be configured to set up GitLab with an external ClickHouse | `global.clickhouse.main.password.key` | Which key to use as the password within the secret | | `global.clickhouse.main.database` | Database name | -WARNING: +{{< alert type="warning" >}} + Using ClickHouse is intended for experimenting and testing purposes only at the moment. +{{< /alert >}} + ## Configuring the password The password can be set manually using the `kubectl` CLI tool: diff --git a/chart/doc/development/deploy.md b/chart/doc/development/deploy.md index 362e93faaa7f23bc74ccca74d5b7f3b38744c679..f5c670005bc423968d1a28a7d9ab5b407a8dde0c 100644 --- a/chart/doc/development/deploy.md +++ b/chart/doc/development/deploy.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Deploy Development Branch --- -# Deploy Development Branch - First ensure that your development environment is set up for charts development. See the [Development environment setup](environment_setup.md) page for instructions. @@ -25,7 +24,7 @@ relative path would be `file://../gitlab-runner/` and the absolute path would be `file:///home/USER/charts/gitlab-runner/`. Pay close attention with absolute paths as it is very easy to miss the leading slash on the file path. -Other steps from the [installation documentation](../installation/index.md) still apply. The difference is when deploying +Other steps from the [installation documentation](../installation/_index.md) still apply. The difference is when deploying a development branch, you need to add additional upstream repositories and update the local dependencies, then pass the local Git repository location to the Helm command. diff --git a/chart/doc/development/deprecations.md b/chart/doc/development/deprecations.md index 8cc67d725e394a14d12549d57d011764d7e42708..fd26312dce715a63a4b1ce30ccc3c06efe0ef4fc 100644 --- a/chart/doc/development/deprecations.md +++ b/chart/doc/development/deprecations.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Deprecations and removals --- -# Deprecations and removals - ## Deprecations Deprecated features are features that are still supported, but are scheduled for removal in a later diff --git a/chart/doc/development/environment_setup.md b/chart/doc/development/environment_setup.md index 8fbb8dc56b2ef41b9103bba5eb0d67d535a75ca4..444bfb3499cf4cf484e66b2b3d790e01c8b0c14b 100644 --- a/chart/doc/development/environment_setup.md +++ b/chart/doc/development/environment_setup.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Environment setup --- -# Environment setup - To set up for charts development, command line tools and a Kubernetes cluster are required. @@ -51,7 +50,7 @@ Tool name | Benefits | Example use case | Link(s) -|-|-|- `asdf` | Easily switch between versions of your favorite runtimes and CLI tools. | Switching between Helm 3.7 and Helm 3.9 binaries. | [GitHub](https://github.com/asdf-vm/asdf) `kubectx` & `kubens` | Manage and switch between Kubernetes contexts and namespaces. | Setting default namespace per selected cluster context. | [GitHub](https://github.com/ahmetb/kubectx) -`k3s` | Lightweight Kubernetes installation (<40MB). | Quick and reliable local chart testing. | [Homepage](https://k3s.io) +`k3s` | Lightweight Kubernetes installation (<40 MB). | Quick and reliable local chart testing. | [Homepage](https://k3s.io) `k9s` | Greatly reduced typing of `kubectl` commands. | Navigate and manage cluster resources quickly in a command line interface. | [GitHub](https://github.com/derailed/k9s) `lens` | Highly visual management and navigation of clusters. | Navigate and manage cluster resources quickly in a standalone desktop application. | [Homepage](https://k8slens.dev/) `stern` | Easily follow logs from multiple pods. | See logs from a set of GitLab pods together. | [GitHub](https://github.com/stern/stern) @@ -64,19 +63,22 @@ A cloud or local Kubernetes cluster may be used for development. For simple issues, a local cluster is often enough to test deployments. When dealing with networking, storage, or other complex issues, a cloud Kubernetes cluster allows you to more accurately recreate a production environment. -WARNING: +{{< alert type="warning" >}} + Official GitLab images are built with the x86-64 architecture. -For local development, Apple silicon users can use an [alternate Docker setup](kind/index.md#apple-silicon-m1m2) +For local development, Apple silicon users can use an [alternate Docker setup](kind/_index.md#apple-silicon-m1m2) to emulate a compatible architecture. Support for multiple architectures, including AArch64/ARM64, is under active development. See [issue 2899](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2899) for more information. +{{< /alert >}} + ### Local cluster The following local cluster options are supported: -- [minikube](minikube/index.md) - Cluster in virtual machines -- [KinD (Kubernetes in Docker)](kind/index.md) - Cluster in Docker containers +- [minikube](minikube/_index.md) - Cluster in virtual machines +- [KinD (Kubernetes in Docker)](kind/_index.md) - Cluster in Docker containers ### Cloud cluster diff --git a/chart/doc/development/gitlab-qa/_index.md b/chart/doc/development/gitlab-qa/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..094def77b709c028173f32909d2b684776d85f6d --- /dev/null +++ b/chart/doc/development/gitlab-qa/_index.md @@ -0,0 +1,126 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Running GitLab QA +--- + +The following documentation is meant to provide instructions for running +[GitLab QA](https://gitlab.com/gitlab-org/gitlab-qa) against a deployed cloud +native GitLab installation. These steps are performed as a part of the +[CI for this project](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/.gitlab-ci.yml) +but manual runs may be requested during development or a demo. + +## Preparation + +Before running GitLab QA, there are a few things to do. + +### Determine running version of GitLab + +From your deployed GitLab chart, visit `/admin` and see the Components panel +for the version of GitLab that is running. If this is `X.Y.Z-pre`, then you +will want the `nightly` image. If this is `X.Y.Z-ee`, then you will want this +version of GitLab QA image. + +Export `GITLAB_VERSION` based on what you have observed: + +```shell +export GITLAB_VERSION=11.0.3-ee +``` + +or: + +```shell +export GITLAB_VERSION=nightly +``` + +### Network access + +To run GitLab QA, you will need sustained network access to the deployed instance. +Ensure this by visiting the deployment from any browser, or via cURL. + +## Running GitLab QA in pipeline + +To run GitLab QA tests against the deployed instance you can use [GitLab QA Executor](https://gitlab.com/gitlab-org/quality/gitlab-qa-executor). This project contains CI configuration to run GitLab QA against GitLab Self-Managed environments with parallelization that automates the following manual steps for running GitLab QA from a local machine. + +## Running GitLab QA from local machine + +Follow below instructions to run GitLab QA against the deployed instance +from your local machine. + +### Install the `gitlab-qa` gem + +Ensure you have a functional version of Ruby, preferably of the `3.0` branch. +Install the `gitlab-qa` gem: + +```shell +gem install gitlab-qa +``` + +For more info, see the [GitLab QA documentation](https://gitlab.com/gitlab-org/gitlab-qa#how-can-you-use-it). + +### Docker + +GitLab QA makes use of Docker, so you will need to have an operational +installation. Ensure that the daemon is running. If you have set `GITLAB_VERSION=nightly`, +pull the GitLab QA nightly image to ensure that the latest nightly is used for +testing, in conjunction with the nightly builds of the CNG containers: + +```shell +docker pull gitlab/gitlab-ee-qa:$GITLAB_VERSION +``` + +### Configuration + +Items needed for execution, which +[will be set as environment variables](https://gitlab.com/gitlab-org/gitlab-qa#supported-environment-variables): + +- `GITLAB_VERSION`: The version of GitLab QA version to run. See [determine running version of GitLab](#determine-running-version-of-gitlab) above. +- `GITLAB_USERNAME`: This will be `root`. +- `GITLAB_PASSWORD`: This will be the password for the `root` user. +- `GITLAB_ADMIN_USERNAME`: This will be `root`. +- `GITLAB_ADMIN_PASSWORD`: This will be the password for the `root` user. +- `GITLAB_URL`: The fully-qualified URL to the deployed instance. This should be + in the form of `https://gitlab.domain.tld`. +- `EE_LICENSE`: A string containing a GitLab EE license. This can be handled + via `export EE_LICENSE=$(cat GitLab.gitlab-license)`. + +Retrieve the above items, and export them as environment variables. + +### Select test suite + +GitLab QA has multiple test suites to run against the standalone environment. Suite consists of subset of tests +when end-to-end tests are grouped by various [RSpec metadata](https://docs.gitlab.com/development/testing_guide/end_to_end/rspec_metadata_tests/): + +- _Smoke suite_: small [subset of fast end-to-end functional tests](https://docs.gitlab.com/development/testing_guide/smoke/) +to quickly ensure that basic functionality is working + - Enable this suite via `export QA_OPTIONS="--tag smoke"` +- _Full suite_: running all tests against the environment. Test run will take more than an hour. + - Enable this suite via `--tag ~skip_live_env --tag ~orchestrated --tag ~requires_praefect --tag ~github --tag ~requires_git_protocol_v2 --tag ~transient` + +Selecting a test suite depends on the use case. In the majority of cases, running +Smoke suite should give quick and consistent test results +as well as a good test coverage. This suite is being used as a sanity +check in [GitLab.com deployments](https://handbook.gitlab.com/handbook/engineering/deployments-and-releases/deployments/#gitlabcom-deployments-process). + +Full suite should be used to get full test results on the environment. It can be resource +intensive to run this suite from a local machine. Use `export CHROME_DISABLE_DEV_SHM=true` +when running Full suite from a single machine. + +## Execution + +Assuming you have set the environment variables from the +[Configuration](#configuration) step and selected [test suite](#select-test-suite), +the following command will perform the tests against the deployed GitLab instance: + +```shell +gitlab-qa Test::Instance::Any EE:$GITLAB_VERSION $GITLAB_URL -- $QA_OPTIONS +``` + +{{< alert type="note" >}} + +The above command runs with _nightly_ because the containers used as a +part of this chart are currently based on nightly builds of the `master` branches +of `gitlab-(ee|ce)` repositories. + +{{< /alert >}} diff --git a/chart/doc/development/index.md b/chart/doc/development/index.md index e5c4b3311bc67103fc58ac89563e4d525254cb45..499b93e4312d55bb224f1aee27210ba2e1f0830d 100644 --- a/chart/doc/development/index.md +++ b/chart/doc/development/index.md @@ -179,7 +179,7 @@ can not resolve the MinIO domain name and find the correct endpoint (you can see Developers may encounter unique issues while working on new chart features. [Refer to the troubleshooting guide](troubleshooting.md) for -information if your **_development_** cluster seems to have strange issues. +information if your ***development*** cluster seems to have strange issues. NOTE: The troubleshooting steps outlined in the link above are for development diff --git a/chart/doc/development/kind/_index.md b/chart/doc/development/kind/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..c27cec1976a5032735d01557b701f8d2ed332316 --- /dev/null +++ b/chart/doc/development/kind/_index.md @@ -0,0 +1,216 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Developing for Kubernetes with KinD +--- + +This guide is meant to serve as a cross-platform resource for setting up a local Kubernetes development environment. +In this guide, we'll be using [KinD](https://kind.sigs.k8s.io). It creates a Kubernetes cluster using Docker, and provides easy mechanisms for deploying different versions as well as multiple nodes. + +We will also make use of [nip.io](https://nip.io), which lets us map any IP address to a hostname using a format like this: `192.168.1.250.nip.io`, which maps to `192.168.1.250`. No installation is required. + +{{< alert type="note" >}} + +With the SSL-enabled installation options below, if you want to clone repositories and push changes, you will have to do so over HTTPS instead of SSH. We are planning to address this with an update to GitLab Shell's service exposure via NodePorts. + +{{< /alert >}} + +## Apple silicon (M1/M2) + +`kind` can be used with [`colima`](https://github.com/abiosoft/colima) to provide a local Kubernetes development environment on macOS, including `M1` and `M2` variants. + +### Installing dependencies + +- Make sure that you're running MacOS >= 13 (Ventura). +- Install [`colima`](https://github.com/abiosoft/colima#installation). +- Install [`Rosetta`](https://support.apple.com/en-us/102527): + + ```shell + softwareupdate --install-rosetta + ``` + +### Building the VM + +Create the `colima` VM: + +```shell +colima start --cpu 6 --memory 16 --disk 40 --profile docker --arch aarch64 --vm-type=vz --vz-rosetta +``` + +When ready, you can follow the [preparation](#preparation) below to install GitLab with `kind`. + +### Managing the VM + +To stop the `colima` VM: + +```shell +colima stop --profile docker +``` + +To start again the VM: + +```shell +colima start --profile docker +``` + +To remove and clean up the local system: + +```shell +colima delete --profile docker +``` + +## Preparation + +### Required information + +All of the following installation options require knowing your host IP. Here are a couple options to find this information: + +- Linux: `hostname -i` +- MacOS: `ipconfig getifaddr en0` + +{{< alert type="note" >}} + +Most MacOS systems use `en0` as the primary interface. If using a system with a different primary interface, please substitute that interface name for `en0`. + +{{< /alert >}} + +### Using namespaces + +It is considered best practice to install applications in namespaces other than `default`. Create a namespace **prior** to running `helm install` with **kubectl**: + +```shell +kubectl create namespace YOUR_NAMESPACE +``` + +Add `--namespace YOUR_NAMESPACE` to all future **kubectl** commands to use the namespace. Alternatively, use `kubens` from the [kubectx project](https://github.com/ahmetb/kubectx) to contextually switch into the namespace and skip the extra typing. + +### Installing dependencies + +You can use `asdf` ([more info](../environment_setup.md#additional-developer-tools)) to install the following tools: + +- `kubectl` +- `helm` +- `kind` + +Note that `kind` uses Docker to run local Kubernetes clusters, so be sure to [install Docker](https://docs.docker.com/get-docker/). + +### Obtaining configuration examples + +The GitLab charts repository contains every example referenced in the following steps. Clone the repository or update an existing checkout to get the latest versions: + +```shell +git clone https://gitlab.com/gitlab-org/charts/gitlab.git +``` + +### Spin up the Kind cluster + +There are a few example configurations in `doc/examples/kind` pending your desires and needs for testing. +Please review these configurations and make adjustments as necessary. +You can now spin up the cluster. Example: + +```shell +kind create cluster --config examples/kind/kind-ssl.yaml +``` + +### Adding GitLab Helm chart + +Follow these commands to set up your system to access the GitLab Helm charts: + +```shell +helm repo add gitlab https://charts.gitlab.io/ +helm repo update +``` + +## Deployment options + +Select from one of the following deployment options based on your needs. + +{{< alert type="note" >}} + +The first full deployment process may take around 10 minutes depending on network and system resources while the cloud-native GitLab images are downloaded. Confirm GitLab is running with the following command: + +{{< /alert >}} + +```shell +kubectl --namespace YOUR_NAMESPACE get pods +``` + +GitLab is fully deployed when the `webservice` pod shows a `READY` state with `2/2` containers. + +### NGINX Ingress NodePort with SSL + +In this method, we will use `kind` to expose the NGINX controller service's NodePorts to ports on your local machine with SSL enabled. + +```shell +kind create cluster --config examples/kind/kind-ssl.yaml +helm upgrade --install gitlab gitlab/gitlab \ + --set global.hosts.domain=(your host IP).nip.io \ + -f examples/kind/values-base.yaml \ + -f examples/kind/values-ssl.yaml +``` + +You can then access GitLab at `https://gitlab.(your host IP).nip.io`. + +#### (Optional) Add root CA + +In order for your browser to trust our self-signed certificate, download the root CA and trust it: + +```shell +kubectl get secret gitlab-wildcard-tls-ca -ojsonpath='{.data.cfssl_ca}' | base64 --decode > gitlab.(your host IP).nip.io.ca.pem +``` + +Now that the root CA is downloaded, you can add it to your local chain (instructions vary per platform and are readily available online). + +{{< alert type="note" >}} + +If you need to log into the registry with `docker login`, you will need to take additional steps to configure the registry to work with your self-signed certificates. More instructions can be found in: + +{{< /alert >}} + +- [Run an externally-accessible registry](https://distribution.github.io/distribution/about/deploying/#run-an-externally-accessible-registry) +- [Adding self-signed registry certificates to Docker and Docker for macOS](https://blog.container-solutions.com/adding-self-signed-registry-certs-docker-mac). + +### NGINX Ingress NodePort without SSL + +In this method, we will use `kind` to expose the NGINX controller service's NodePorts to ports on your local machine with SSL disabled. + +```shell +kind create cluster --config examples/kind/kind-no-ssl.yaml +helm upgrade --install gitlab gitlab/gitlab \ + --set global.hosts.domain=(your host IP).nip.io \ + -f examples/kind/values-base.yaml \ + -f examples/kind/values-no-ssl.yaml +``` + +Access GitLab at `http://gitlab.(your host IP).nip.io`. + +{{< alert type="note" >}} + +If you need to log into the registry with `docker login`, you will need to tell Docker to [trust your insecure registry](https://distribution.github.io/distribution/about/insecure/#deploy-a-plain-http-registry). + +{{< /alert >}} + +### Handling DNS + +This guide assumes you have network access to [nip.io](https://nip.io). If this is not available to you, please refer to the [handling DNS](../minikube/_index.md#handling-dns) section in the minikube documentation which will also work for KinD. + +{{< alert type="note" >}} + +When editing **/etc/hosts**, remember to use the [host computer's IP address](#required-information) rather than the output of `$(minikube ip)`. + +{{< /alert >}} + +## Cleaning up + +When you're ready to clean up your local system, run this command: + +```shell +kind delete cluster +``` + +{{< alert type="note" >}} + +If you named your cluster upon creation, or if you are running multiple clusters, you can delete specific ones with the `--name` flag. + +{{< /alert >}} diff --git a/chart/doc/development/minikube/_index.md b/chart/doc/development/minikube/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..8997e7091f8aa7ed308590bc487c872e7dc7b38c --- /dev/null +++ b/chart/doc/development/minikube/_index.md @@ -0,0 +1,254 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Developing for Kubernetes with minikube +--- + +This guide is meant to serve as a cross-plaform resource for setting up a local +Kubernetes development environment. In this guide, we'll be using +[minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/) as it is the accepted standard. + +## Getting Started with minikube + +We'll extract and expound on the official documentation from the +[Kubernetes project](https://kubernetes.io/), +[Running Kubernetes Locally with minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/). + +### Installing kubectl + +The official documentation provides several options, but the result is that you +can do one of three things: + +- Download as a part of the Google Cloud SDK from Google Cloud Platform's + [Cloud SDK](https://cloud.google.com/sdk/) page. Once you have `gcloud` + installed, you can install `kubectl`: + + ```shell + sudo gcloud components install kubectl + ``` + + If you've already installed `kubectl` via this method, ensure it is updated: + + ```shell + sudo gcloud components update + ``` + +- Install with cURL or with the appropriate package management system for each OS: + - [Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) + - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#install-with-homebrew-on-macos) + - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#install-on-windows-using-chocolatey-or-scoop) + +### Installing minikube + +See the [Kubernetes documentation](https://minikube.sigs.k8s.io/docs/start/) +where they suggest directly installing from the [releases on GitHub](https://github.com/kubernetes/minikube/releases). + +### Choosing a VM driver + +For the purposes of cross-platform compatibility in this guide, we'll stick +with VirtualBox, however there are drivers for VMware Fusion, HyperV, KVM, and Xhyve. + +### Starting / Stopping minikube + +minikube resource requests must be set higher than the default for developing +the GitLab chart. The key configuration items can be found with +`minikube start --help`. A selection is provided below, for what we may want to +change according to the pieces being tested, and the requirements as listed: + +- `--cpus int`: Number of CPUs allocated to the minikube VM (default `2`). + The absolute minimum necessary CPU is `3`. Deploying the _complete_ chart requires `4`. +- `--memory int`: Amount of RAM allocated to the minikube VM (default `2048`). + The absolute same minimum is `6144` (6 GB). Recommendation is `10240` (10 GB). +- `--disk-size string`: Disk size allocated to the minikube VM (format: `<number>[<unit>]`, + where unit = `b`, `k`, `m` or `g`) (default `20g`). See the GitLab + [storage](https://docs.gitlab.com/install/requirements/#storage) and + [database](https://docs.gitlab.com/install/requirements/#database) + requirements. + + {{< alert type="note" >}} + +This is created in your home directory under `~/.minikube/machines/minikube/`. + + {{< /alert >}} + +- `--kubernetes-version string`: The Kubernetes version that the minikube VM will use (e.g., `v1.2.3`). +- `--registry-mirror stringSlice`: Registry mirrors to pass to the Docker daemon. + +{{< alert type="note" >}} + +Changing these values in a second `start` command, requires to first delete +the existing instance with `minikube delete`, or manually you can alter the +properties with VirtualBox Manager. + +{{< /alert >}} + +Once you have all the tools installed and configured, starting at stopping minikube +can be done with: + +```shell +minikube start --cpus 4 --memory 10240 +``` + +This command should output something similar to: + +```plaintext +Starting local Kubernetes v1.7.0 cluster... +Starting VM... +Downloading Minikube ISO + 97.80 MB / 97.80 MB [==============================================] 100.00% 0s +Getting VM IP address... +Moving files into cluster... +Setting up certs... +Starting cluster components... +Connecting to cluster... +Setting up kubeconfig... +Kubectl is now configured to use the cluster. +[helm.gitlab.io]$ minikube ip +192.168.99.100 +[helm.gitlab.io]$ minikube stop +Stopping local Kubernetes cluster... +Machine stopped. +``` + +Take note of the result from running the `minikube ip` command. If the output is not `192.168.99.100`, the output IP will be needed later. + +## Using minikube + +minikube can be used directly as a Kubernetes installation, and treated as a +single node cluster. There are some behaviors that are slightly different between +minikube and full-fledged Kubernetes clusters, such as [Google Container Engine (GKE)](https://cloud.google.com/). + +Different: + +- Persistent Volumes: `hostPath` only. + +Unavailable: + +- Load Balancers (requires cloud provider). +- Advanced Scheduling Policies (requires multiple nodes). + +### Gotcha: Persistent Volumes + +minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) +of the `hostPath` type, which are mapped to directories inside the VM. As minikube +boots into a `tmpfs`, most directories will not persist across reboots via `minikube stop`. + +Further details and listings of directories that do persist, can be found +in the [minikube getting started guide](https://kubernetes.io/docs/setup/learning-environment/minikube/#persistent-volumes). + +### Enable Add-ons + +minikube handles some features apart from the base configuration. For the +development of this project, we'll need access to `Ingress`: + +```shell +minikube addons enable ingress +``` + +### Connecting to the dashboard + +You can find the URL for the dashboard by calling: + +```shell +minikube dashboard --url +``` + +## Deploying the chart + +When deploying this chart into minikube, some chart resources need to be reduced or disabled. +It is not possible to use the `nginx-ingress` chart to provide ports `22`, `80`, +`443`. It's best to disable it and set the Ingress class by setting +`nginx-ingress.enabled=false,global.ingress.class="nginx"`. + +The `certmanager` chart can not be used with minikube. You must disable this by +setting `certmanager.install=false,global.ingress.configureCertmanager=false`. +As a result, if you don't provide your own SSL certificates, self-signed +certificates will be generated. The `gitlab-runner` chart will accept the self-signed +certificates via `gitlab-runner.certsSecretName`. Assuming your release name is `gitlab`, +the certificate name will be `gitlab-wildcard-tls-chain`. + +The `gitlab-shell` chart can be used with minikube, but requires mapping to a port other +than 22 as it used by minikube already. You can configure `gitlab.gitlab-shell.service.type=NodePort` +and `gitlab.gitlab-shell.service.nodePort=<high-numbered port>`, which will allow cloning a repository +via the specified port. To ensure this port is reflected in the clone link in the UI, configure +`global.shell.port=<high-numbered port>`. + +In the following sections, we'll show how to install these charts from your local Git clone. +Be sure that you have checked out the desired branch or tag, and are at the base folder of +that checkout. + +### Clone GitLab chart repo + +```shell +git clone https://gitlab.com/gitlab-org/charts/gitlab.git +cd gitlab +``` + +### Deploying GitLab with recommended settings + +When using the recommended 4 CPU and 10 GB of RAM, use +[`values-minikube.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/examples/values-minikube.yaml) +as a base. + +```shell +helm dependency update +helm upgrade --install gitlab . \ + --timeout 600s \ + -f https://gitlab.com/gitlab-org/charts/gitlab/raw/master/examples/values-minikube.yaml +``` + +### Deploying GitLab with minimal settings + +If using _absolute minimum_ resources, 3 CPU and 6GB of RAM, you must reduce all replicas +and disable unneeded services. See [`values-minikube-minimum.yaml`](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/examples/values-minikube-minimum.yaml) +as a reasonable base. + +```shell +helm dependency update +helm upgrade --install gitlab . \ + --timeout 600s \ + -f https://gitlab.com/gitlab-org/charts/gitlab/raw/master/examples/values-minikube-minimum.yaml +``` + +If the output of `minikube ip` was not `192.168.99.100`, add these arguments to override the IP endpoints in the example configuration files: + +```shell + --set global.hosts.domain=$(minikube ip).nip.io \ + --set global.hosts.externalIP=$(minikube ip) +``` + +### Handling DNS + +The example configurations provided, configure the domain as `192.168.99.100.nip.io` +in an attempt to reduce the overhead of handling alterations to host files, or +other domain name resolution services. However, this relies on the network +reachability of [nip.io](https://nip.io). + +If this is not available to you, then you may need to make alterations to your +`/etc/hosts` file, or provide another means of DNS resolution. + +Example `/etc/hosts` file addition: + +```plaintext +192.168.99.100 gitlab.some.domain registry.some.domain minio.some.domain +``` + +### Incorporating Self-Signed CA + +Once the chart is deployed, if using self-signed certificates, the user will be +given the notice on how to fetch the CA certificate that was generated. This +certificate can be added to the system store, so that all browsers, Docker daemon, +and `git` command recognize the deployed certificates as trusted. The method +depends on your operating system. + +[BounCA](https://www.bounca.org) has a [good tutorial](https://www.bounca.org/tutorials/install_root_certificate.html), +covering most operating systems. + +### Logging in + +You can access the GitLab instance by visiting the domain specified, `https://gitlab.192.168.99.100.nip.io` is used in these examples. If you manually created the secret for initial root password, you can use that to sign in as root user. If not, GitLab automatically created a random password for the root user. This can be extracted by the following command (replace `<name>` by name of the release - which is `gitlab` if you used the command above). + +```shell +kubectl get secret <name>-gitlab-initial-root-password -ojsonpath='{.data.password}' | base64 --decode ; echo +``` diff --git a/chart/doc/development/preparation/_index.md b/chart/doc/development/preparation/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..e87cb6a1d6a2d54c547a62b47610592548bc5b67 --- /dev/null +++ b/chart/doc/development/preparation/_index.md @@ -0,0 +1,126 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Pre-install preparations +--- + +This document covers our weekly demos preparation steps but can also be useful +to anyone who tries to install using the charts before going through the +[installation](../../installation/_index.md). + +The person giving the demo needs to go throw this document before the demo, +and should perform the setup the day prior to the demo itself: + +- [GKE setup](#gke-setup) +- [External resources](#external-resources) +- [OmniAuth for Google OAuth2](#omniauth-for-google-oauth2) + +## GKE setup + +Make sure to have a `gcloud` user with permissions to access the `cloud-native` +project. All the [installation procedures](../../installation/_index.md) will +need to be done in this project. + +1. You will need to have the [`gcloud`](https://cloud.google.com/sdk/gcloud/) tool + installed on your system: + + ```shell + mkdir gcloud-build && cd gcloud-build; + wget https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-189.0.0-linux-x86_64.tar.gz; + tar -xzf google-cloud-sdk-189.0.0-linux-x86_64.tar.gz + ./google-cloud-sdk/install.sh + source google-cloud-sdk/path.bash.inc && echo "source google-cloud-sdk/path.bash.inc" >> $HOME/.profile + ``` + +1. Run `gcloud` and interactively go through its authentication and + initialization: + + ```shell + ./google-cloud-sdk/bin/gcloud init + ``` + +### Domain name + +During the demo you will need a valid domain name that will resolve to our +cluster load balancer through a wild card entry. Make sure to have one of the +Domain names ready for the demo either by creating a new one or by using an +existing one. + +We usually use `cloud-native-win` or `k8s-ftw`. + +## ChaosKube + +Follow our [ChaosKube](../chaoskube/_index.md) guide for running ChaosKube, +this is usually done after the demo. + +## Git LFS + +In order to test LFS storage in the chart, you will need to have the ability to +use `git lfs`: + +1. Start by [installing `git-lfs`](https://git-lfs.com/). +1. Next, have a non-text file on hand to add to your test repository via LFS. + A good example is [the GitLab logo](https://gitlab.com/gitlab-com/gitlab-artwork/raw/master/logo/logo.png): + + ```shell + git clone URL + cd project + curl -JLO "https://gitlab.com/gitlab-com/gitlab-artwork/raw/master/logo/logo.png" + git lfs track "*.png" + git add .gitattributes + git add logo.png + git commit -m "Add logo via LFS" + git push origin master + ``` + +## External resources + +As a part of the demo, we also wish to provide for testing the use of external +resources for PostgreSQL and Redis. + +Ensure that these external sources will be reachable from the deployed +cluster, which may mean configuring firewall rules. The `cloud-native` GCP +project used for our CI has firewall rules in place, which can be used by +applying the `demo-pgsql` and `demo-redis` tags to any VM instance created +within the project. + +### PostgreSQL + +Preparation of chart-external PostgreSQL services (as a pet or SaaS), can +be found in [advanced/external-db](../../advanced/external-db/_index.md). This +can be done several ways documented there. Once that is configured, the chart +should be configured with the external service by making use of the `globals.psql` +properties section of the global chart. + +### Redis + +Preparation of chart-external Redis services (as a pet or SaaS), can +be found in [`advanced/external-redis`](../../advanced/external-redis/_index.md). +This can be done as documented there. Once that is configured, the chart should +be configured with the external service by making use of the `globals.redis` +properties section of the global chart. + +### Gitaly + +Preparation of chart-external Gitaly services can +be found in [`advanced/external-gitaly`](../../advanced/external-gitaly/_index.md). +This can be done as documented there. Once that is configured, the chart should +be configured with the external service by making use of the `globals.gitaly` +properties section of the global chart. + +## OmniAuth for Google OAuth2 + +Configuring a deployment with the capability to integrate with GKE requires +the use of OmniAuth. You will need to ensure that a set of +**OAuth Client ID** credentials have been created for the hostname of the GitLab +endpoint in your cluster. + +Cursory instructions for [creating a set of OAuth credentials can be found here](https://support.google.com/cloud/answer/6158849?hl=en). + +The credentials from GCP can be added per the +[`globals` chart's `omniauth.providers` configuration documentation](../../charts/globals.md#omniauth). + +## Run GitLab QA + +As preparation for the demo, one should also [run GitLab QA against the deployed chart](../gitlab-qa/_index.md) diff --git a/chart/doc/development/release.md b/chart/doc/development/release.md index f680a4aeb50e25bc047dc468e831abe49b6aab7a..40a76bcbeede4770ab6f82ab8eca63e1496e0818 100644 --- a/chart/doc/development/release.md +++ b/chart/doc/development/release.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Helm chart releases --- -# Helm chart releases - ## Chart Versioning ### Major releases diff --git a/chart/doc/development/rspec.md b/chart/doc/development/rspec.md index 8ce16c979a1c44003b5d843fcb18c9c06040efe6..e54977ebafa9b37d1ff64ea44ffd0523c36b850d 100644 --- a/chart/doc/development/rspec.md +++ b/chart/doc/development/rspec.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Writing RSpec tests for charts --- -# Writing RSpec tests for charts - The following are notes and conventions used for creating RSpec tests for the GitLab chart. @@ -52,10 +51,13 @@ obj.dig('ConfigMap/test-gitaly', 'data', 'config.toml.tpl') This will return the contents of the `config.toml.tpl` file contained in the `test-gitaly` ConfigMap. -NOTE: +{{< alert type="note" >}} + Using the `HelmTemplate` class will always use the release name of "test" when executing the `helm template` command. +{{< /alert >}} + ## Chart inputs The input parameter to the `HelmTemplate` class constructor is a dictionary diff --git a/chart/doc/development/style_guide.md b/chart/doc/development/style_guide.md index df2dc620e398ab0ed79e2714143cb45a2ae680bd..db2a1fd9461cb4f08f2b44c8225f9b381b367d6b 100644 --- a/chart/doc/development/style_guide.md +++ b/chart/doc/development/style_guide.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Style guide --- -# Style guide - This document describes various guidelines and best practices for GitLab Helm chart development. ## Naming Conventions diff --git a/chart/doc/development/troubleshooting.md b/chart/doc/development/troubleshooting.md index bc541698c5b2466df4bf6db0ad6821176c133547..a870992c6aed276cdec53a19417a7fdb458dc679 100644 --- a/chart/doc/development/troubleshooting.md +++ b/chart/doc/development/troubleshooting.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Troubleshooting GitLab chart development environment --- -# Troubleshooting GitLab chart development environment - All steps noted here are for **DEVELOPMENT ENVIRONMENTS ONLY**. Administrators may find the information insightful, but the outlined fixes are destructive and would have a major negative impact on production @@ -30,10 +29,13 @@ claims. kubectl delete secrets,pvc -lrelease=RELEASE_NAME ``` -NOTE: +{{< alert type="note" >}} + This deletes all Kubernetes secrets including TLS certificates and all data in the database. This should not be performed in a production instance. +{{< /alert >}} + ## Database is broken and needs reset The database environment can be reset in a development environment by: @@ -42,10 +44,13 @@ The database environment can be reset in a development environment by: 1. Delete the PostgreSQL PersistentVolumeClaim 1. Deploy GitLab again with `helm upgrade --install` -NOTE: +{{< alert type="note" >}} + This will delete all data in the databases and should not be run in production. +{{< /alert >}} + ## CI clusters are low on available resources You may notice one or more CI clusters run low on available resources like CPU diff --git a/chart/doc/development/upgrade_stop.md b/chart/doc/development/upgrade_stop.md index bf484876e4cb02c04e20a8ad10c19492b2f40a13..af5af8b66740648f32a282282c3eeb40c0db19a6 100644 --- a/chart/doc/development/upgrade_stop.md +++ b/chart/doc/development/upgrade_stop.md @@ -2,12 +2,11 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Maintaining the upgrade stop in the charts project --- -# Maintaining the upgrade stop in the charts project - The GitLab chart creates a pre-upgrade hook that checks if the upgrade follows a -[valid upgrade path](https://docs.gitlab.com/ee/update/#upgrade-paths). +[valid upgrade path](https://docs.gitlab.com/update/#upgrade-paths). If the upgrade path is invalid, the upgrade will be aborted. diff --git a/chart/doc/development/validation.md b/chart/doc/development/validation.md index 035d7bc6720c68ddba36e44ec2790aeda532b4be..4bffc120e31f888e7027940f272c529556d6b8b2 100644 --- a/chart/doc/development/validation.md +++ b/chart/doc/development/validation.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Validations of values using JSON Schema --- -# Validations of values using JSON Schema - Helm 3 introduced support for validation of values using [schema files](https://helm.sh/docs/topics/charts/#schema-files) which follow [JSON Schema](https://json-schema.org/). Helm charts in this repository also makes use diff --git a/chart/doc/installation/_index.md b/chart/doc/installation/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..d32d069d8ce459b303592514542948d6797d533d --- /dev/null +++ b/chart/doc/installation/_index.md @@ -0,0 +1,69 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Installing GitLab by using Helm +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +Install GitLab on Kubernetes by using the cloud native GitLab Helm chart. + +Assuming you already have the [prerequisites](tools.md) installed and configured, +you can [deploy GitLab](deployment.md) with the `helm` command. + +{{< alert type="warning" >}} + +The default Helm chart configuration is **not intended for production**. +The default chart creates a proof of concept (PoC) implementation where all GitLab +services are deployed in the cluster. For production deployments, you must follow the +[Cloud Native Hybrid reference architecture](#use-the-reference-architectures). + +{{< /alert >}} + +For a production deployment, you should have strong working knowledge of Kubernetes. +This method of deployment has different management, observability, and concepts than traditional deployments. + +In a production deployment: + +- The stateful components, like PostgreSQL or Gitaly (a Git repository storage dataplane), + must run outside the cluster on PaaS or compute instances. This configuration is required + to scale and reliably service the variety of workloads found in production GitLab environments. +- You should use Cloud PaaS for PostgreSQL, Redis, and object storage for all non-Git repository storage. + +If Kubernetes is not required for your GitLab instance, see the +[reference architectures](https://docs.gitlab.com/administration/reference_architectures/) +for simpler alternatives. + +## Configure the Helm chart to use external stateful data + +You can configure the GitLab Helm chart to point to external stateful storage +for items like PostgreSQL, Redis, all non-Git repository storage, and Git repository storage (Gitaly). + +The following Infrastructure as Code (IaC) options use this approach. + +For production-grade implementation, the appropriate chart parameters should be used to +point to prebuilt, externalized state stores that align with the chosen +[reference architecture](https://docs.gitlab.com/administration/reference_architectures/). + +### Use the reference architectures + +The reference architecture for deploying GitLab instances to Kubernetes is called [Cloud Native Hybrid](https://docs.gitlab.com/administration/reference_architectures/#cloud-native-hybrid) specifically because not all GitLab services can run in the cluster for production-grade implementations. All stateful GitLab components must be deployed outside the Kubernetes cluster. + +Available Cloud Native Hybrid reference architectures sizes +are listed at [Reference architectures](https://docs.gitlab.com/administration/reference_architectures/#cloud-native-hybrid) page. +For example, here is the [Cloud Native Hybrid reference architecture](https://docs.gitlab.com/administration/reference_architectures/3k_users/#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) for the 3,000 user count. + +### Use Infrastructure as Code (IaC) and builder resources + +GitLab develops Infrastructure as Code that is capable of configuring the combination of Helm charts and supplemental cloud infrastructure: + +- [GitLab Environment Toolkit IaC](https://gitlab.com/gitlab-org/gitlab-environment-toolkit). +- [Implementation pattern: Provision GitLab cloud native hybrid on AWS EKS](https://docs.gitlab.com/solutions/cloud/aws/gitlab_instance_on_aws/): + This resource provides a Bill of Materials tested with the GitLab Performance Toolkit, + and uses the AWS Cost Calculator for budgeting. diff --git a/chart/doc/installation/chart-provenance.md b/chart/doc/installation/chart-provenance.md index 4ad2814bb76323cf42f7b972a1f1bcbd779f297f..2e04a2e2ad6ad8445c522eb9fa1d59642843c683 100644 --- a/chart/doc/installation/chart-provenance.md +++ b/chart/doc/installation/chart-provenance.md @@ -2,15 +2,14 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: GitLab Helm chart provenance --- -# GitLab Helm chart provenance - You can verify the integrity and origin of GitLab Helm charts by using [Helm provenance](https://helm.sh/docs/topics/provenance/). -The GitLab Helm charts are signed with a GNUPG keypair. The public portion of -the keypair must be downloaded and possibly exported before it can be used to +The GitLab Helm charts are signed with a GNUPG key pair. The public portion of +the key pair must be downloaded and possibly exported before it can be used to verify the charts. The [GNU Privacy Handbook](https://www.gnupg.org/gph/en/manual/x56.html) has detailed instructions on how to manage GPG keys. diff --git a/chart/doc/installation/cloud/_index.md b/chart/doc/installation/cloud/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..ab6526d5439dafe20df13154f00bc469d1cb8f52 --- /dev/null +++ b/chart/doc/installation/cloud/_index.md @@ -0,0 +1,67 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Cloud provider setup for the GitLab chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +Before you deploy the GitLab chart, you must configure resources for +the cloud provider you choose. + +The GitLab chart is intended to fit in a cluster with at least 8 vCPU +and 30 GB of RAM. If you are trying to deploy a non-production instance, +you can reduce the defaults to fit into a smaller cluster. + +## Supported Kubernetes releases + +The GitLab Helm chart supports the following Kubernetes releases: + +| Kubernetes release | Status | Minimum GitLab version | Architectures | End of life | +|--------------------|-------------|------------------------|---------------|-------------| +| 1.31 | [In development/qualification](https://gitlab.com/gitlab-org/distribution/team-tasks/-/issues/1602) | | x86-64 | 2025-10-28 | +| 1.30 | Supported | 17.6 | x86-64 | 2025-06-28 | +| 1.29 | Supported | 17.0 | x86-64 | 2025-02-28 | +| 1.28 | Deprecated | 17.0 | x86-64 | 2024-10-28 | +| 1.27 | Unsupported | 16.6 | x86-64 | 2024-06-28 | +| 1.26 | Unsupported | 16.5 | x86-64 | 2024-02-28 | +| 1.25 | Unsupported | 16.5 | x86-64 | 2023-10-28 | +| 1.24 | Unsupported | 16.5 | x86-64 | 2023-07-28 | +| 1.23 | Unsupported | 16.5 | x86-64 | 2023-02-28 | +| 1.22 | Unsupported | 16.5 | x86-64 | 2022-10-28 | + +The GitLab Helm Chart aims to support new minor Kubernetes releases three months after their initial release. +We welcome reports made to our [issue tracker](https://gitlab.com/gitlab-org/charts/gitlab/-/issues) about compatibility issues in releases newer than those listed above. + +Some GitLab features might not work on deprecated releases or releases older than the releases listed above. + +For some components, like the [agent for Kubernetes](https://docs.gitlab.com/user/clusters/agent/) and [GitLab Operator](https://docs.gitlab.com/operator/installation/), GitLab might support different cluster releases. + +{{< alert type="warning" >}} + +Kubernetes nodes must use the x86-64 architecture. +Support for multiple architectures, including AArch64/ARM64, is under active development. +See [issue 2899](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2899) for more information. + +{{< /alert >}} + +- For cluster topology recommendations for an environment, see the + [reference architectures](https://docs.gitlab.com/administration/reference_architectures/#available-reference-architectures). +- For an example of tuning the resources to fit in a 3 vCPU 12 GB cluster, see the + [minimal GKE example values file](https://gitlab.com/gitlab-org/charts/gitlab/tree/master/examples/values-gke-minimum.yaml). + +## Instructions for specific Cloud providers + +Create and connect to a Kubernetes cluster in your environment: + +- [Azure Kubernetes Service](aks.md) +- [Amazon EKS](eks.md) +- [Google Kubernetes Engine](gke.md) +- [OpenShift](openshift.md) +- [Oracle Container Engine for Kubernetes](oke.md) diff --git a/chart/doc/installation/cloud/aks.md b/chart/doc/installation/cloud/aks.md index 3c75afebb2203ccac051afed964a478269307941..e73d4a9b4ca28f4d8510e66f6722cb2273bea461 100644 --- a/chart/doc/installation/cloud/aks.md +++ b/chart/doc/installation/cloud/aks.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Preparing AKS resources for the GitLab chart --- -# Preparing AKS resources for the GitLab chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} For a fully functional GitLab instance, you need a few resources before deploying the GitLab chart to [Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/what-is-aks). diff --git a/chart/doc/installation/cloud/eks.md b/chart/doc/installation/cloud/eks.md index 49ec02b6bd62e51b98174abb1984a11920e078ec..9ac8970984b2912b7d6ed64bba0961f786e88201 100644 --- a/chart/doc/installation/cloud/eks.md +++ b/chart/doc/installation/cloud/eks.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Preparing EKS resources for the GitLab chart --- -# Preparing EKS resources for the GitLab chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} For a fully functional GitLab instance, you need a few resources before deploying the GitLab chart. @@ -76,11 +78,14 @@ Administrators may also want to consider the [new AWS Service Operator for Kubernetes](https://aws.amazon.com/blogs/opensource/aws-service-operator-kubernetes-available/) to simplify this process. -NOTE: +{{< alert type="note" >}} + Enabling the AWS Service Operator requires a method of managing roles within the cluster. The initial services handling that management task are provided by third party developers. Administrators should keep that in mind when planning for deployment. +{{< /alert >}} + ## Persistent Volume Management There are two methods to manage volume claims on Kubernetes: @@ -126,11 +131,14 @@ and then mapping your desired DNS name to the created ELB using a CNAME record. Since the ELB must be created first before its hostname can be retrieved, follow the next instructions to install GitLab. -NOTE: +{{< alert type="note" >}} + For environments where AWS LoadBalancers are required, [Amazon's Elastic Load Balancers](https://docs.aws.amazon.com/eks/latest/userguide/load-balancing.html) require specialized configuration. See [Cloud provider LoadBalancers](../../charts/globals.md#cloud-provider-loadbalancers) +{{< /alert >}} + ## Next Steps Continue with the [installation of the chart](../deployment.md) once you diff --git a/chart/doc/installation/cloud/gke.md b/chart/doc/installation/cloud/gke.md index 355dc2739db05a31b3a231a018c25c81cc81f2fa..62e274cb0edac78f037ffdf21bb6abfdab175458 100644 --- a/chart/doc/installation/cloud/gke.md +++ b/chart/doc/installation/cloud/gke.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Preparing GKE resources for the GitLab chart --- -# Preparing GKE resources for the GitLab chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} For a fully functional GitLab instance, you will need a few resources before deploying the GitLab chart. The following is how these charts are deployed diff --git a/chart/doc/installation/cloud/oke.md b/chart/doc/installation/cloud/oke.md index d6b4fbeb3944d33100f80b543351cf74388d20f7..b000d4f0eacec6cae543b8bdd5fc963d1e687a92 100644 --- a/chart/doc/installation/cloud/oke.md +++ b/chart/doc/installation/cloud/oke.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Preparing OKE resources for the GitLab chart --- -# Preparing OKE resources for the GitLab chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} For a fully functional GitLab instance, you need a few resources before deploying the GitLab chart to [Oracle Container Engine for Kubernetes (OKE)](https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm). Check how to [prepare](https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengprerequisites.htm) your Oracle Cloud Infrastructure tenancy before creating the OKE cluster. diff --git a/chart/doc/installation/cloud/openshift.md b/chart/doc/installation/cloud/openshift.md index b166e8aa1ab0a25f63a646b9b72d3c5046233159..06c64b98b0c4f78e192a5a8884e6f63eb6c1205a 100644 --- a/chart/doc/installation/cloud/openshift.md +++ b/chart/doc/installation/cloud/openshift.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Preparing OpenShift resources for the GitLab chart --- -# Preparing OpenShift resources for the GitLab chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} This document walks you through using the automation scripts in this project to create an OpenShift cluster in Google Cloud. @@ -82,9 +84,12 @@ All options have defaults, so no options are required. |`LOG_LEVEL`|Verbosity of `openshift-install` output|`info`| |`INSTALL_DIR`|Directory for install assets, useful for launching multiple clusters|`install-$CLUSTER_NAME`| -NOTE: +{{< alert type="note" >}} + The variables `CLUSTER_NAME` and `BASE_DOMAIN` are combined to build the domain name for the cluster. +{{< /alert >}} + ## Destroy your OpenShift cluster To destroy the OpenShift cluster: diff --git a/chart/doc/installation/command-line-options.md b/chart/doc/installation/command-line-options.md index c93e34a46e60b9a27e9f9e8ff9469c882978f76d..777e1171e523177ec8545885c8618514ce0fd07e 100644 --- a/chart/doc/installation/command-line-options.md +++ b/chart/doc/installation/command-line-options.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: GitLab Helm chart deployment options --- -# GitLab Helm chart deployment options +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} This page lists commonly used values of the GitLab chart. For a complete list of the available options, refer to the documentation for each subchart. @@ -108,7 +110,7 @@ helm inspect values gitlab/gitlab ### Common settings -See [incoming email configuration examples documentation](https://docs.gitlab.com/ee/administration/incoming_email.html#configuration-examples) +See [incoming email configuration examples documentation](https://docs.gitlab.com/administration/incoming_email/#configuration-examples) for more information. | Parameter | Description | Default | @@ -155,7 +157,7 @@ See the [instructions for creating secrets](secrets.md). As a requirement for Service Desk, the Incoming Mail must be [configured](#incoming-email-configuration). Note that the email address for both Incoming Mail and Service Desk must use -[email sub-addressing](https://docs.gitlab.com/ee/administration/incoming_email.html#email-sub-addressing). +[email sub-addressing](https://docs.gitlab.com/administration/incoming_email/#email-sub-addressing). When setting the email addresses in each section the tag added to the username must be `+%{key}`. @@ -240,7 +242,7 @@ might need to add specific RBAC rules Prefix NGINX Ingress values with `nginx-ingress`. For example, set the controller image tag using `nginx-ingress.controller.image.tag`. -See [`nginx-ingress` chart](../charts/nginx/index.md). +See [`nginx-ingress` chart](../charts/nginx/_index.md). ## Advanced in-cluster Redis configuration @@ -317,7 +319,7 @@ settings from the [Redis chart](https://github.com/bitnami/charts/tree/main/bitn | `gitlab-runner.resources.requests.memory` | runner resources | | | `gitlab-runner.runners.privileged` | run in privileged mode, needed for `dind` | false | | `gitlab-runner.runners.cache.secretName` | secret to get `accesskey` and `secretkey` from | `gitlab-minio` | -| `gitlab-runner.runners.config` | Runner configuration as string | See [Chart documentation](../charts/gitlab/gitlab-runner/index.md#default-runner-configuration) | +| `gitlab-runner.runners.config` | Runner configuration as string | See [Chart documentation](../charts/gitlab/gitlab-runner/_index.md#default-runner-configuration) | | `gitlab-runner.unregisterRunners` | Unregisters all runners in the local `config.toml` when the chart is installed. If the token is prefixed with `glrt-`, the runner manager is deleted, not the runner. The runner manager is identified by the runner and the machine that contains the `config.toml`. If the runner was registered with a registration token, the runner is deleted. | true | | `gitlab.geo-logcursor.securityContext.fsGroup` | Group ID under which the pod should be started | `1000` | | `gitlab.geo-logcursor.securityContext.runAsUser` | User ID under which the pod should be started | `1000` | @@ -494,4 +496,4 @@ for the exhaustive list of configuration options. In certain scenarios (i.e. offline environment), you may want to bring your own images rather than pulling them down from the Internet. This requires specifying your own Docker image registry/repository for each of the charts that make up the GitLab release. -Refer to the [custom images documentation](../advanced/custom-images/index.md) for more information. +Refer to the [custom images documentation](../advanced/custom-images/_index.md) for more information. diff --git a/chart/doc/installation/database_upgrade.md b/chart/doc/installation/database_upgrade.md index 58b96aec8f8a51b9088095ce4f3d1afa7fdc0a16..f2cdfd41063d148248d647d5a26d812fc864783d 100644 --- a/chart/doc/installation/database_upgrade.md +++ b/chart/doc/installation/database_upgrade.md @@ -2,46 +2,62 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Upgrade the bundled PostgreSQL version --- -# Upgrade the bundled PostgreSQL version +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +{{< alert type="note" >}} -NOTE: These steps are if you are using the bundled PostgreSQL chart (`postgresql.install` is not false), and not for external PostgreSQL setups. +{{< /alert >}} + Changing to a new major version of PostgreSQL using the bundle PostgreSQL chart is done via a backup on the existing database, then restoring to the new database. -NOTE: +{{< alert type="note" >}} + As part of the `7.0.0` release of this chart, we upgraded the default PostgreSQL version from `12.7.0` to `14.8.0`. This is done by upgrading [PostgreSQL chart](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) version from `8.9.4` to `12.5.2`. +{{< /alert >}} + This is NOT a drop in replacement. Manual steps need to be performed to upgrade the database. The steps have been documented in the [upgrade steps](#steps-for-upgrading-the-bundled-postgresql). -NOTE: +{{< alert type="note" >}} + As part of the `5.0.0` release of this chart, we upgraded the bundled PostgreSQL version from `11.9.0` to `12.7.0`. This is not a drop in replacement. Manual steps need to be performed to upgrade the database. The steps have been documented in the [upgrade steps](#steps-for-upgrading-the-bundled-postgresql). -NOTE: +{{< /alert >}} + +{{< alert type="note" >}} + As part of the `4.0.0` release of this chart, we upgraded the bundled [PostgreSQL chart](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) from `7.7.0` to `8.9.4`. This is not a drop in replacement. Manual steps need to be performed to upgrade the database. The steps have been documented in the [upgrade steps](#steps-for-upgrading-the-bundled-postgresql). +{{< /alert >}} ## Steps for upgrading the bundled PostgreSQL -NOTE: +{{< alert type="note" >}} + Starting from `7.0.0`, GitLab chart not longer mounts PostgreSQL credentials as files inside of the PostgreSQL instance. This is done by setting `postgresql.auth.usePasswordFiles` to `false`. This means that database credentials are passed as environment variables instead of password files, only for this component. +{{< /alert >}} + This is due to [an issue](https://github.com/bitnami/charts/issues/16707) in upstream PostgreSQL chart. If you do not want to use environment variables for PostgreSQL passwords and prefer to use files you need to follow the instructions for manual [editing the existing PostgreSQL passwords Secret](#edit-the-existing-postgresql-passwords-secret) and @@ -72,14 +88,20 @@ curl -s "https://gitlab.com/gitlab-org/charts/gitlab/-/raw/${GITLAB_RELEASE}/scr ### Delete existing PostgreSQL data -NOTE: +{{< alert type="note" >}} + Since the PostgreSQL data format has changed, upgrading requires removing the existing PostgreSQL StatefulSet before upgrading the release. The StatefulSet will be recreated in the next step. -WARNING: +{{< /alert >}} + +{{< alert type="warning" >}} + Ensure that you have created a database backup in the previous step. Without a backup, GitLab data will be lost. +{{< /alert >}} + ```shell kubectl delete statefulset RELEASE-NAME-postgresql kubectl delete pvc data-RELEASE_NAME-postgresql-0 @@ -133,11 +155,14 @@ Note the following: ## Edit the existing PostgreSQL passwords Secret -NOTE: +{{< alert type="note" >}} + This is only for `7.0.0` upgrade, and only when you want enforce the use password files inside of the PostgreSQL service containers. -The new version of [PostgreSQL chart](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) uses different +{{< /alert >}} + +The new version of [PostgreSQL chart](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) uses different keys to reference passwords in a Secrets. Instead of `postgresql-password` and `postgresql-postgres-password` it now uses `password` and `postgres-password`. These keys must be changed in `RELEASE-postgresql-password` Secret _WITHOUT_ changing their values. diff --git a/chart/doc/installation/deployment.md b/chart/doc/installation/deployment.md index 52715cf4148e2ed17637c195abcd3d68ff0ffd75..c3818d5075e4138a90e51a4c96d224fbb46af94e 100644 --- a/chart/doc/installation/deployment.md +++ b/chart/doc/installation/deployment.md @@ -2,24 +2,29 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Deploy the GitLab Helm chart --- -# Deploy the GitLab Helm chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} Before running `helm install`, you need to make some decisions about how you will run GitLab. Options can be specified using Helm's `--set option.name=value` command-line option. This guide will cover required values and common options. For a complete list of options, read [Installation command line options](command-line-options.md). -WARNING: +{{< alert type="warning" >}} + The default Helm chart configuration is **not intended for production**. The default chart creates a proof of concept (PoC) implementation where all GitLab services are deployed in the cluster. For production deployments, you must follow the -[Cloud Native Hybrid reference architecture](index.md#use-the-reference-architectures). +[Cloud Native Hybrid reference architecture](_index.md#use-the-reference-architectures). + +{{< /alert >}} For a production deployment, you should have strong working knowledge of Kubernetes. This method of deployment has different management, observability, and concepts than traditional deployments. @@ -106,10 +111,10 @@ specifying `--set global.edition=ce`. If you also specified individual images (for example, `--set gitlab.unicorn.image.repository=registry.gitlab.com/gitlab-org/build/cng/gitlab-unicorn-ce`), you need to omit any occurrence of those images. -After the deployment, you can [activate your Enterprise Edition license](https://docs.gitlab.com/ee/administration/license.html). +After the deployment, you can [activate your Enterprise Edition license](https://docs.gitlab.com/administration/license/). ## Recommended next steps After completing your installation, consider taking the -[recommended next steps](https://docs.gitlab.com/ee/install/next_steps.html), +[recommended next steps](https://docs.gitlab.com/install/next_steps/), including authentication options and sign-up restrictions. diff --git a/chart/doc/installation/migration/_index.md b/chart/doc/installation/migration/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..8df168e7363f88eb446029fb4e5690521403c896 --- /dev/null +++ b/chart/doc/installation/migration/_index.md @@ -0,0 +1,24 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Migration guides for the GitLab Helm chart +--- + +{{< details >}} + +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} + +Migrate from and to the Helm chart: + +- [Migrate from the Linux package to the Helm chart.](package_to_helm.md) +- [Migrate from the Helm chart to the Linux package.](helm_to_package.md) + +Other migrations: + +- [Migrate between Helm versions.](helm.md) +- [Migrate to the built-in MinIO service for object storage.](minio.md) +- [Migrate from Gitaly chart to external Gitaly](../../advanced/external-gitaly/_index.md#migrate-from-gitaly-chart-to-external-gitaly) diff --git a/chart/doc/installation/migration/helm.md b/chart/doc/installation/migration/helm.md index 4f0b4e492f95d9acc9d6285b855f5e6c94911fb6..fd30788f06c5b5debf09d09d5860a225a0bb62f3 100644 --- a/chart/doc/installation/migration/helm.md +++ b/chart/doc/installation/migration/helm.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Migrating from Helm v2 to Helm v3 --- -# Migrating from Helm v2 to Helm v3 +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} [Helm v2 was officially deprecated](https://helm.sh/blog/helm-v2-deprecation-timeline/) in November of 2020. Starting from GitLab Helm chart version 5.0 (GitLab App version 14.0), installation and upgrades using Helm v2.x are no longer supported. To get future GitLab updates, you will need to migrate to Helm v3. @@ -54,10 +56,13 @@ on some Deployments and StatefulSets are immutable and can not be changed from ` To work around this use the following instructions: -NOTE: +{{< alert type="note" >}} + These instructions _forcefully replace resources_, notably Redis StatefulSet. You need to ensure that the attached data volume to this StatefulSet is safe and remains intact. +{{< /alert >}} + 1. Replace cert-manager Deployments (when enabled). ```shell diff --git a/chart/doc/installation/migration/helm_to_package.md b/chart/doc/installation/migration/helm_to_package.md index 6fe985caa340e146378cd1dfbba1ad9c504c7f6a..6cc8b34b9127da775950f3194affda726ed06130 100644 --- a/chart/doc/installation/migration/helm_to_package.md +++ b/chart/doc/installation/migration/helm_to_package.md @@ -2,22 +2,24 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Migrate from the Helm chart to the Linux package --- -# Migrate from the Helm chart to the Linux package +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} To migrate from a Helm installation to a Linux package (Omnibus) installation: 1. On the left sidebar, at the bottom, select **Admin Area**. 1. Select **Overview > Components** to check your current version of GitLab. 1. Prepare a clean machine and - [install the Linux package](https://docs.gitlab.com/ee/update/package/index.html) + [install the Linux package](https://docs.gitlab.com/update/package/) that matches your GitLab Helm chart version. -1. [Verify the integrity of Git repositories](https://docs.gitlab.com/ee/administration/raketasks/check.html) +1. [Verify the integrity of Git repositories](https://docs.gitlab.com/administration/raketasks/check/) on your GitLab Helm chart instance before the migration. 1. Create [a backup of your GitLab Helm chart instance](../../backup-restore/backup.md), and make sure to [back up the secrets](../../backup-restore/backup.md#back-up-the-secrets) @@ -41,9 +43,9 @@ To migrate from a Helm installation to a Linux package (Omnibus) installation: sudo gitlab-ctl reconfigure ``` -1. In the Linux package instance, configure [object storage](https://docs.gitlab.com/ee/administration/object_storage.html), +1. In the Linux package instance, configure [object storage](https://docs.gitlab.com/administration/object_storage/), and make sure it works by testing LFS, artifacts, uploads, and so on. -1. If you use the Container Registry, [configure its object storage separately](https://docs.gitlab.com/ee/administration/packages/container_registry.html#use-object-storage). It does not support +1. If you use the Container Registry, [configure its object storage separately](https://docs.gitlab.com/administration/packages/container_registry/#use-object-storage). It does not support the consolidated object storage. 1. Sync the data from your object storage connected to the Helm chart instance with the new storage connected to the Linux package instance. A couple of notes: @@ -58,8 +60,8 @@ To migrate from a Helm installation to a Linux package (Omnibus) installation: you uninstall GitLab Helm chart if you are using the built-in MinIO instance. 1. Copy the GitLab Helm backup to `/var/opt/gitlab/backups` on your Linux package instance, and - [perform the restore](https://docs.gitlab.com/ee/administration/backup_restore/restore_gitlab.html#restore-for-linux-package-installations). -1. After the restore is complete, run the [doctor Rake tasks](https://docs.gitlab.com/ee/administration/raketasks/check.html) + [perform the restore](https://docs.gitlab.com/administration/backup_restore/restore_gitlab/#restore-for-linux-package-installations). +1. After the restore is complete, run the [doctor Rake tasks](https://docs.gitlab.com/administration/raketasks/check/) to make sure that the secrets are valid. 1. After everything is verified, you may [uninstall](../uninstall.md) the GitLab Helm chart instance. diff --git a/chart/doc/installation/migration/minio.md b/chart/doc/installation/migration/minio.md index 59cb511b358b0fd2c59f8db0efe0bf00b0c155bc..fa1fd9e5e0d8b893c00ca8ffc329e7391eaa8c5c 100644 --- a/chart/doc/installation/migration/minio.md +++ b/chart/doc/installation/migration/minio.md @@ -2,19 +2,21 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Use the built-in MinIO service for object storage --- -# Use the built-in MinIO service for object storage +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} This migration guide is for when you migrate from a [package-based installation](package_to_helm.md) to the Helm chart and you want to use the built-in MinIO service for object storage. This is better suited for testing purposes. For production use, you are advised to set up an -[external object storage](../../advanced/external-object-storage/index.md) +[external object storage](../../advanced/external-object-storage/_index.md) The easiest way to figure out the access details to built-in MinIO cluster is to look at the `gitlab.yml` file that is generated in Sidekiq, Webservice and @@ -55,12 +57,15 @@ To grab it from the Sidekiq pod: ``` 1. Use this information to - [configure the object storage](https://docs.gitlab.com/ee/administration/uploads.html#s3-compatible-connection-settings) + [configure the object storage](https://docs.gitlab.com/administration/uploads/#s3-compatible-connection-settings) in the `/etc/gitlab/gitlab.rb` file of the package-based deployment. - NOTE: - For connecting to the MinIO service from outside the cluster, the + {{< alert type="note" >}} + +For connecting to the MinIO service from outside the cluster, the MinIO host URL alone is enough. Helm charts based installations are configured to redirect requests coming to that URL automatically to the corresponding endpoint. So, you don't need to set the `endpoint` value in the connection settings in `/etc/gitlab/gitlab.rb`. + +{{< /alert >}} diff --git a/chart/doc/installation/migration/package_to_helm.md b/chart/doc/installation/migration/package_to_helm.md index 74089495d25e134cbb8ecb9b6c6d2a610043687d..73b913b00f833b9d72f103b2c886bf0a43c4cfb9 100644 --- a/chart/doc/installation/migration/package_to_helm.md +++ b/chart/doc/installation/migration/package_to_helm.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Migrate from the Linux package to the Helm chart --- -# Migrate from the Linux package to the Helm chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} This guide will help you migrate from a package-based GitLab installation to the Helm chart. @@ -20,12 +22,12 @@ Before the migration, a few prerequisites must be met: - The package-based GitLab instance must be up and running. Run `gitlab-ctl status` and confirm no services report a `down` state. - It is a good practice to - [verify the integrity](https://docs.gitlab.com/ee/administration/raketasks/check.html) + [verify the integrity](https://docs.gitlab.com/administration/raketasks/check/) of Git repositories prior to the migration. - A Helm charts based deployment running the same GitLab version as the package-based installation is required. - You need to set up the object storage which the Helm chart based deployment - will use. For production use, we recommend you use an [external object storage](../../advanced/external-object-storage/index.md) + will use. For production use, we recommend you use an [external object storage](../../advanced/external-object-storage/_index.md) and have the login credentials to access it ready. If you are using the built-in MinIO service, [read the docs](minio.md) on how to grab the login credentials from it. @@ -35,18 +37,18 @@ Before the migration, a few prerequisites must be met: 1. Migrate any existing data from the package-based installation to object storage: - 1. [Migrate to object storage](https://docs.gitlab.com/ee/administration/object_storage.html#migrate-to-object-storage). + 1. [Migrate to object storage](https://docs.gitlab.com/administration/object_storage/#migrate-to-object-storage). 1. Visit the package-based GitLab instance and make sure the migrated data are available. For example check if user, group and project avatars are rendered fine, image and other files added to issues load correctly, etc. -1. [Create a backup tarball](https://docs.gitlab.com/ee/administration/backup_restore/backup_gitlab.html) and [exclude all the already migrated directories](https://docs.gitlab.com/ee/administration/backup_restore/backup_gitlab.html#excluding-specific-directories-from-the-backup). +1. [Create a backup tarball](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/) and [exclude all the already migrated directories](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/#excluding-specific-directories-from-the-backup). For local backups (default), the backup file is stored under `/var/opt/gitlab/backups`, unless you [explicitly changed the location](https://docs.gitlab.com/omnibus/settings/backups.html#manually-manage-backup-directory). - For [remote storage backups](https://docs.gitlab.com/ee/administration/backup_restore/backup_gitlab.html#upload-backups-to-a-remote-cloud-storage), + For [remote storage backups](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/#upload-backups-to-a-remote-cloud-storage), the backup file is stored in the configured bucket. 1. [Restore from the package-based installation](../../backup-restore/restore.md) to the Helm chart, starting with the secrets. You will need to migrate the diff --git a/chart/doc/installation/rbac.md b/chart/doc/installation/rbac.md index 55b3c0c84a653fd513a71671d723567e05797e4a..94288a42318aae80d1a78d009c89b5252d44849e 100644 --- a/chart/doc/installation/rbac.md +++ b/chart/doc/installation/rbac.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure RBAC for the GitLab chart --- -# Configure RBAC for the GitLab chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} Until Kubernetes 1.7, there were no permissions within a cluster. With the launch of 1.7, there is now a role based access control system ([RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) which determines what services can perform actions within a cluster. diff --git a/chart/doc/installation/secrets.md b/chart/doc/installation/secrets.md index d59af5c63a0065ce3dc98fa68bf80478d0abed8d..62757c8275660f5be131dd135d7718436a1b3874 100644 --- a/chart/doc/installation/secrets.md +++ b/chart/doc/installation/secrets.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure secrets for the GitLab chart --- -# Configure secrets for the GitLab chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} GitLab requires a variety of secrets to operate: @@ -148,9 +150,12 @@ If this secret is rotated, all SSH clients will see `hostname mismatch` errors. ### Initial Enterprise license -WARNING: +{{< alert type="warning" >}} + This method will only add a license at the time of installation. Use the Admin Area in the web user interface to renew or upgrade licenses. +{{< /alert >}} + Create a Kubernetes secret for storing the Enterprise license for the GitLab instance. Replace `<name>` with the name of the release. @@ -224,7 +229,11 @@ This secret is referenced by the `global.praefect.authToken.secret` setting. ### GitLab Rails secret -> - The `active_record_encryption_*` keys were added in [GitLab 17.8](../releases/8_0.md#upgrade-to-880). +{{< history >}} + +- The `active_record_encryption_*` keys were added in [GitLab 17.8](../releases/8_0.md#upgrade-to-880). + +{{< /history >}} Replace `<name>` with the name of the release. @@ -250,7 +259,7 @@ kubectl create secret generic <name>-rails-secret --from-file=secrets.yml This secret is referenced by the `global.railsSecrets.secret` setting. It is **not recommended** to rotate this secret as it contains the database encryption keys. If the secret is -rotated, the result will be the same behavior exhibited [when the secrets file is lost](https://docs.gitlab.com/ee/administration/backup_restore/backup_gitlab.html#when-the-secrets-file-is-lost). +rotated, the result will be the same behavior exhibited [when the secrets file is lost](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/#when-the-secrets-file-is-lost). ### GitLab Workhorse secret @@ -307,9 +316,12 @@ This secret is referenced by the `gitlab.kas.websocketToken.secret` setting. ### GitLab Suggested Reviewers secret -NOTE: -The Suggested Reviewers secret is created automatically and only used on GitLab SaaS. -This secret is not needed on self-managed GitLab instances. +{{< alert type="note" >}} + +The Suggested Reviewers secret is created automatically and only used on GitLab.com. +This secret is not needed on GitLab Self-Managed. + +{{< /alert >}} GitLab Rails requires that a secret for Suggested Reviewers is present. You can leave it to the chart to auto-generate the secret, or you can create this secret @@ -347,10 +359,13 @@ This secret is referenced by the `global.psql.password.secret` setting. #### Changing the PostgreSQL password for the bundled PostgreSQL subchart -WARNING: +{{< alert type="warning" >}} + The default Helm chart configuration is **not intended for production**, which includes the bundled PostgreSQL subchart. +{{< /alert >}} + The bundled PostgreSQL subchart only configures the database with the passwords from the secret when the database is initially created. Additional steps need to be taken to change the passwords in an existing database. @@ -428,7 +443,7 @@ Some charts have further secrets to enable functionality that can not be automat ### OmniAuth -In order to enable the use of [OmniAuth Providers](https://docs.gitlab.com/ee/integration/omniauth.html) with the deployed GitLab, please follow the [instructions in the Globals chart](../charts/globals.md#omniauth) +In order to enable the use of [OmniAuth Providers](https://docs.gitlab.com/integration/omniauth/) with the deployed GitLab, please follow the [instructions in the Globals chart](../charts/globals.md#omniauth) ### LDAP Password @@ -441,9 +456,12 @@ kubectl create secret generic ldap-main-password --from-literal=password=yourpas Then use `--set global.appConfig.ldap.servers.main.password.secret=ldap-main-password` to inject the password into your configuration. -NOTE: +{{< alert type="note" >}} + Use the `Secret` name, not the _actual password_ when configuring the Helm property. +{{< /alert >}} + ### SMTP password If you are using an SMTP server that requires authentication, store the password @@ -455,15 +473,18 @@ kubectl create secret generic smtp-password --from-literal=password=yourpassword Then use `--set global.smtp.password.secret=smtp-password` in your Helm command. -NOTE: +{{< alert type="note" >}} + Use the `Secret` name, not the _actual password_ when configuring the Helm property. +{{< /alert >}} + ### IMAP password for incoming emails GitLab uses authentication strings such as app passwords, tokens, or IMAP passwords to access incoming emails. -[Find your email provider in the GitLab incoming email documentation](https://docs.gitlab.com/ee/administration/incoming_email.html) +[Find your email provider in the GitLab incoming email documentation](https://docs.gitlab.com/administration/incoming_email/) and set its required authentication string as a Kubernetes secret. ```shell @@ -473,16 +494,19 @@ kubectl create secret generic incoming-email-password --from-literal="password=a Then use `--set global.appConfig.incomingEmail.password.secret=incoming-email-password` in your Helm command along with other required settings as specified [in the docs](command-line-options.md#incoming-email-configuration). -NOTE: +{{< alert type="note" >}} + Use the `Secret` name, not the _actual password_ when configuring the Helm property. +{{< /alert >}} + ### IMAP password for Service Desk emails GitLab uses authentication strings such as app passwords, tokens, or IMAP passwords to access -[Service Desk emails](https://docs.gitlab.com/ee/user/project/service_desk/configure.html#custom-email-address). +[Service Desk emails](https://docs.gitlab.com/user/project/service_desk/configure/#custom-email-address). -[Find your email provider in the GitLab incoming email documentation](https://docs.gitlab.com/ee/administration/incoming_email.html) +[Find your email provider in the GitLab incoming email documentation](https://docs.gitlab.com/administration/incoming_email/) and set its required authentication string as a Kubernetes secret. ```shell @@ -492,9 +516,12 @@ kubectl create secret generic service-desk-email-password --from-literal="passwo Then use `--set global.appConfig.serviceDeskEmail.password.secret=service-desk-email-password` in your Helm command along with other required settings as specified [in the docs](command-line-options.md#service-desk-email-configuration). -NOTE: +{{< alert type="note" >}} + Use the `Secret` name, not the _actual password_ when configuring the Helm property. +{{< /alert >}} + ### GitLab incoming email auth token When incoming email is configured to use webhook delivery method, there should @@ -534,7 +561,7 @@ This secret is referenced by the `gitlab.zoekt.gateway.basicAuth.secretName` set ### Microsoft Graph client secret for incoming emails -To let GitLab have access to [incoming emails](https://docs.gitlab.com/ee/administration/incoming_email.html) +To let GitLab have access to [incoming emails](https://docs.gitlab.com/administration/incoming_email/) store the password of the IMAP account in a Kubernetes secret: ```shell @@ -544,12 +571,15 @@ kubectl create secret generic incoming-email-client-secret --from-literal=secret Then, use `--set global.appConfig.incomingEmail.clientSecret.secret=incoming-email-client-secret` in your Helm command along with other required settings as specified [in the docs](command-line-options.md#incoming-email-configuration). -NOTE: +{{< alert type="note" >}} + Use the `Secret` name, not the _actual password_ when configuring the Helm property. +{{< /alert >}} + ### Microsoft Graph client secret for Service Desk emails -To let GitLab have access to [service_desk emails](https://docs.gitlab.com/ee/user/project/service_desk/configure.html#custom-email-address) +To let GitLab have access to [service_desk emails](https://docs.gitlab.com/user/project/service_desk/configure/#custom-email-address) store the password of the IMAP account in a Kubernetes secret: ```shell @@ -559,9 +589,12 @@ kubectl create secret generic service-desk-email-client-secret --from-literal=se Then, use `--set global.appConfig.serviceDeskEmail.clientSecret.secret=service-desk-email-client-secret` in your Helm command along with other required settings as specified [in the docs](command-line-options.md#service-desk-email-configuration). -NOTE: +{{< alert type="note" >}} + Use the `Secret` name, not the _actual password_ when configuring the Helm property. +{{< /alert >}} + ### Microsoft Graph client secret for outgoing emails Store the password in a Kubernetes secret: @@ -573,9 +606,12 @@ kubectl create secret generic microsoft-graph-mailer-client-secret --from-litera Then, use `--set global.appConfig.microsoft_graph_mailer.client_secret.secret=microsoft-graph-mailer-client-secret` in your Helm command. -NOTE: +{{< alert type="note" >}} + Use the `Secret` name, not the _actual password_ when configuring the Helm property. +{{< /alert >}} + ### S/MIME Certificate Outgoing email messages can be digitally signed using the [S/MIME](https://en.wikipedia.org/wiki/S/MIME) standard. @@ -597,7 +633,7 @@ secret that contains the S/MIME certificate. ### Smartcard Authentication -[Smartcard authentication](https://docs.gitlab.com/ee/administration/auth/smartcard.html) +[Smartcard authentication](https://docs.gitlab.com/administration/auth/smartcard/) uses a custom Certificate Authority (CA) to sign client certificates. The certificate of this custom CA needs to be injected to the Webservice pod for it to verify whether a client certificate is valid or not. This is provided as a diff --git a/chart/doc/installation/storage.md b/chart/doc/installation/storage.md index 9c47c395aecce416e7b68a2cba8bb4c82410e496..195df8acbde143707fe7e14183b2c952caf9f23b 100644 --- a/chart/doc/installation/storage.md +++ b/chart/doc/installation/storage.md @@ -2,20 +2,22 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure storage for the GitLab chart --- -# Configure storage for the GitLab chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} The following applications within the GitLab chart require persistent storage to maintain state. -- [Gitaly](../charts/gitlab/gitaly/index.md) (persists the Git repositories) +- [Gitaly](../charts/gitlab/gitaly/_index.md) (persists the Git repositories) - [PostgreSQL](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) (persists the GitLab database data) - [Redis](https://github.com/bitnami/charts/tree/main/bitnami/redis) (persists GitLab job data) -- [MinIO](../charts/minio/index.md) (persists the object storage data) +- [MinIO](../charts/minio/_index.md) (persists the object storage data) The administrator may choose to provision this storage using [dynamic](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#dynamic) or [static](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static) volume provisioning. @@ -72,8 +74,8 @@ helm install -upgrade gitlab gitlab/gitlab -f HELM_OPTIONS_YAML_FILE Follow the links below for further reading and additional persistence options: -- [Gitaly persistence configuration](../charts/gitlab/gitaly/index.md#git-repository-persistence) -- [MinIO persistence configuration](../charts/minio/index.md#persistence) +- [Gitaly persistence configuration](../charts/gitlab/gitaly/_index.md#git-repository-persistence) +- [MinIO persistence configuration](../charts/minio/_index.md#persistence) - [Redis persistence configuration](https://github.com/bitnami/charts/tree/main/bitnami/redis#persistence) - [Upstream PostgreSQL chart configuration](https://github.com/bitnami/charts/tree/main/bitnami/postgresql#configuration-and-installation-details) @@ -101,11 +103,14 @@ kubectl create -f *PV_YAML_FILE* ### Using Amazon EKS -NOTE: +{{< alert type="note" >}} + If you need to deploy in multiple zones, you should review [Amazon's own documentation on storage classes](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html) when defining your storage solution. +{{< /alert >}} + 1. [Create a persistent disk in the cluster.](https://kubernetes.io/docs/concepts/storage/volumes/#creating-an-ebs-volume) ```shell @@ -165,8 +170,8 @@ After the initial installation, storage changes like migrating to new volumes, or changing disk sizes, require editing the Kubernetes objects outside of the Helm upgrade command. -See the [managing persistent volumes documentation](../advanced/persistent-volumes/index.md). +See the [managing persistent volumes documentation](../advanced/persistent-volumes/_index.md). ## Optional volumes -For larger installations, you may need to add persistent storage to the Toolbox to get backups/restores working. See our [troubleshooting documentation](../backup-restore/index.md#pod-eviction-issues) for a guide on how to do this. +For larger installations, you may need to add persistent storage to the Toolbox to get backups/restores working. See our [troubleshooting documentation](../backup-restore/_index.md#pod-eviction-issues) for a guide on how to do this. diff --git a/chart/doc/installation/tls.md b/chart/doc/installation/tls.md index 788099c14c6bae8912a096a258bc98c40b303eca..7efe07f5d095bf6c1ce46e5c8c212f65a33984bd 100644 --- a/chart/doc/installation/tls.md +++ b/chart/doc/installation/tls.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Configure TLS for the GitLab chart --- -# Configure TLS for the GitLab chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} This chart is capable of doing TLS termination using the NGINX Ingress Controller. You have the choice of how to acquire the TLS certificates for your deployment. Extensive details can be found in [global Ingress settings](../charts/globals.md#configure-ingress-settings). @@ -111,19 +113,25 @@ helm install gitlab gitlab/gitlab \ --set gitlab.kas.ingress.tls.secretName=RELEASE-kas-tls ``` -NOTE: +{{< alert type="note" >}} + If you are configuring your GitLab instance to talk with other services, it may be necessary to [provide the certificate chains](../charts/globals.md#custom-certificate-authorities) for those services to GitLab through the Helm chart as well. +{{< /alert >}} + ## Option 4: Use auto-generated self-signed wildcard certificate These charts also provide the capability to provide a auto-generated self-signed wildcard certificate. This can be useful in environments where Let's Encrypt is not an option, but security via SSL is still desired. This functionality is provided by the [shared-secrets](../charts/shared-secrets.md) job. -NOTE: +{{< alert type="note" >}} + The `gitlab-runner` chart does not function properly with self-signed certificates. We recommend disabling it, as shown below. +{{< /alert >}} + ```shell helm install gitlab gitlab/gitlab \ --set certmanager.install=false \ @@ -140,7 +148,7 @@ also use directly for GitLab Runner via `gitlab-runner.certsSecretName=RELEASE-w ## TLS requirement for GitLab Pages -For [GitLab Pages with TLS support](https://docs.gitlab.com/ee/administration/pages/#wildcard-domains-with-tls-support), +For [GitLab Pages with TLS support](https://docs.gitlab.com/administration/pages/#wildcard-domains-with-tls-support), a wildcard certificate applicable for `*.<pages domain>` (default value of `<pages domain>` is `pages.<base domain>`) is required. diff --git a/chart/doc/installation/tools.md b/chart/doc/installation/tools.md index 6787d9b3e65a2e79f1bf6750e8a018a4b75e2ed4..d525e0dcf9fe40ed9955011707b7ae554de11c16 100644 --- a/chart/doc/installation/tools.md +++ b/chart/doc/installation/tools.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: GitLab chart prerequisites --- -# GitLab chart prerequisites +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} Before you deploy GitLab in a Kubernetes cluster, install the following prerequisites and decide on the options to use when you install. @@ -32,7 +34,7 @@ is provided by [`bitnami/PostgreSQL`](https://artifacthub.io/packages/helm/bitna This deployment is for trial purposes only and **not recommended for use in production**. You should set up an -[external, production-ready PostgreSQL instance](../advanced/external-db/index.md). +[external, production-ready PostgreSQL instance](../advanced/external-db/_index.md). Recommended default versions: - PostgreSQL 13 since GitLab chart 6.0. @@ -48,7 +50,7 @@ is provided by [`bitnami/Redis`](https://artifacthub.io/packages/helm/bitnami/re This deployment is for trial purposes only and **not recommended for use in production**. You should set up an -[external, production-ready Redis instance](../advanced/external-redis/index.md). +[external, production-ready Redis instance](../advanced/external-redis/_index.md). For all the available configuration settings, see the [Redis globals documentation](../charts/globals.md#configure-redis-settings). @@ -58,10 +60,10 @@ not enabled by default. Such functionality has not been load tested by GitLab. ### Gitaly By default, the GitLab chart includes an in-cluster Gitaly deployment. For production, running Gitaly in Kubernetes is not supported. -[Gitaly is only supported on conventional virtual machines](https://docs.gitlab.com/ee/administration/reference_architectures/index.html#stateful-components-in-kubernetes). +[Gitaly is only supported on conventional virtual machines](https://docs.gitlab.com/administration/reference_architectures/#stateful-components-in-kubernetes). You should set up an -[external, production-ready Gitaly instance](../advanced/external-gitaly/index.md). +[external, production-ready Gitaly instance](../advanced/external-gitaly/_index.md). For all the available configuration settings, see the [Gitaly globals documentation](../charts/globals.md#configure-gitaly-settings). @@ -107,12 +109,15 @@ you don't need any additional DNS configuration for GitLab. However, you must de [has a comprehensive guide](https://github.com/kubernetes-sigs/external-dns#deploying-to-a-cluster) for each supported provider. -NOTE: +{{< alert type="note" >}} + If you enable custom domain support for GitLab Pages, `external-dns` no longer works for the Pages domain (`pages.<global.hosts.domain>` by default). You must manually configure the DNS entry to point the domain to the external IP address dedicated to Pages. +{{< /alert >}} + If you provision a [GKE cluster](cloud/gke.md) by using the provided script, `external-dns` is automatically installed in your cluster. @@ -147,10 +152,13 @@ dynamic provisioner creates the underlying persistent volumes. If you would like to customize the `storageClass` or manually create and assign volumes, review the [storage documentation](storage.md). -NOTE: +{{< alert type="note" >}} + After the initial deployment, making changes to your storage settings requires manually editing Kubernetes objects. Therefore, it's best to plan ahead before deploying your production instance to avoid extra storage migration work. +{{< /alert >}} + ### TLS certificates You should be running GitLab with HTTPS, which requires TLS certificates. By default, the @@ -293,21 +301,21 @@ Prometheus `tls_config.server_name`. | Service | Metrics Port(default) | Supports TLS? | Notes/Docs/Issue | | --- | --- | --- | --- | -| [Gitaly](../charts/gitlab/gitaly/index.md) | 9236 | YES | Enabled using `global.gitaly.tls.enabled=true` <br>Default Secret: `RELEASE-gitaly-tls` <br>[Docs: Running Gitaly over TLS](../charts/gitlab/gitaly/index.md#running-gitaly-over-tls) | -| [GitLab Exporter](../charts/gitlab/gitlab-exporter/index.md) | 9168 | YES | Enabled using `gitlab.gitlab-exporter.tls.enabled=true` <br>Default Secret: `RELEASE-gitlab-exporter-tls` | -| [GitLab Pages](../charts/gitlab/gitlab-pages/index.md) | 9235 | YES | Enabled using `gitlab.gitlab-pages.metrics.tls.enabled=true` <br>Default Secret: `RELEASE-pages-metrics-tls` <br>[Docs: General settings](../charts/gitlab/gitlab-pages/index.md#general-settings) | -| [GitLab Runner](../charts/gitlab/gitlab-runner/index.md) | 9252 | NO | [Issue - Add TLS Support for Metrics Endpoint](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29176) | -| [GitLab Shell](../charts/gitlab/gitlab-shell/index.md) | 9122 | NO | The GitLab Shell metrics exporter is only enabled when using [`gitlab-sshd`](https://docs.gitlab.com/ee/administration/operations/gitlab_sshd.html). OpenSSH is recommended for environments that require TLS | -| [KAS](../charts/gitlab/kas/index.md) | 8151 | YES | Can be configured using `global.kas.customConfig.observability.listen.certificate_file` and `global.kas.customConfig.observability.listen.key_file` options | -| [Praefect](../charts/gitlab/praefect/index.md) | 9236 | YES | Enabled using `global.praefect.tls.enabled=true` <br>Default Secret: `RELEASE-praefect-tls` <br>[Docs: Running Praefect over TLS](../charts/gitlab/praefect/index.md#running-praefect-over-tls) | -| [Registry](../charts/registry/index.md) | 5100 | YES | Enabled using `registry.debug.tls.enabled=true` <br>[Docs: Registry - Configuring TLS for the debug port](../charts/registry/index.md#configuring-tls-for-the-debug-port) | -| [Sidekiq](../charts/gitlab/sidekiq/index.md) | 3807 | YES | Enabled using `gitlab.sidekiq.metrics.tls.enabled=true` <br>Default Secret: `RELEASE-sidekiq-metrics-tls` <br>[Docs: Installation command line options](../charts/gitlab/sidekiq/index.md#installation-command-line-options) | -| [Webservice](../charts/gitlab/sidekiq/index.md) | 8083 | YES | Enabled using `gitlab.webservice.metrics.tls.enabled=true` <br>Default Secret: `RELEASE-webservice-metrics-tls` <br>[Docs: Installation command line options](../charts/gitlab/webservice/index.md#installation-command-line-options) | -| [Ingress-NGINX](../charts/nginx/index.md) | 10254 | NO | Does not support TLS on metrics/healthcheck port | +| [Gitaly](../charts/gitlab/gitaly/_index.md) | 9236 | YES | Enabled using `global.gitaly.tls.enabled=true` <br>Default Secret: `RELEASE-gitaly-tls` <br>[Docs: Running Gitaly over TLS](../charts/gitlab/gitaly/_index.md#running-gitaly-over-tls) | +| [GitLab Exporter](../charts/gitlab/gitlab-exporter/_index.md) | 9168 | YES | Enabled using `gitlab.gitlab-exporter.tls.enabled=true` <br>Default Secret: `RELEASE-gitlab-exporter-tls` | +| [GitLab Pages](../charts/gitlab/gitlab-pages/_index.md) | 9235 | YES | Enabled using `gitlab.gitlab-pages.metrics.tls.enabled=true` <br>Default Secret: `RELEASE-pages-metrics-tls` <br>[Docs: General settings](../charts/gitlab/gitlab-pages/_index.md#general-settings) | +| [GitLab Runner](../charts/gitlab/gitlab-runner/_index.md) | 9252 | NO | [Issue - Add TLS Support for Metrics Endpoint](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29176) | +| [GitLab Shell](../charts/gitlab/gitlab-shell/_index.md) | 9122 | NO | The GitLab Shell metrics exporter is only enabled when using [`gitlab-sshd`](https://docs.gitlab.com/administration/operations/gitlab_sshd/). OpenSSH is recommended for environments that require TLS | +| [KAS](../charts/gitlab/kas/_index.md) | 8151 | YES | Can be configured using `global.kas.customConfig.observability.listen.certificate_file` and `global.kas.customConfig.observability.listen.key_file` options | +| [Praefect](../charts/gitlab/praefect/_index.md) | 9236 | YES | Enabled using `global.praefect.tls.enabled=true` <br>Default Secret: `RELEASE-praefect-tls` <br>[Docs: Running Praefect over TLS](../charts/gitlab/praefect/_index.md#running-praefect-over-tls) | +| [Registry](../charts/registry/_index.md) | 5100 | YES | Enabled using `registry.debug.tls.enabled=true` <br>[Docs: Registry - Configuring TLS for the debug port](../charts/registry/_index.md#configuring-tls-for-the-debug-port) | +| [Sidekiq](../charts/gitlab/sidekiq/_index.md) | 3807 | YES | Enabled using `gitlab.sidekiq.metrics.tls.enabled=true` <br>Default Secret: `RELEASE-sidekiq-metrics-tls` <br>[Docs: Installation command line options](../charts/gitlab/sidekiq/_index.md#installation-command-line-options) | +| [Webservice](../charts/gitlab/sidekiq/_index.md) | 8083 | YES | Enabled using `gitlab.webservice.metrics.tls.enabled=true` <br>Default Secret: `RELEASE-webservice-metrics-tls` <br>[Docs: Installation command line options](../charts/gitlab/webservice/_index.md#installation-command-line-options) | +| [Ingress-NGINX](../charts/nginx/_index.md) | 10254 | NO | Does not support TLS on metrics/healthcheck port | For the webservice pod, the exposed port is the standalone webrick exporter in the webservice container. The workhorse container port is not scraped. See the -[Webservice Metrics documentation](../charts/gitlab/webservice/index.md#metrics) +[Webservice Metrics documentation](../charts/gitlab/webservice/_index.md#metrics) for additional details. ### Outgoing email @@ -326,12 +334,12 @@ If your Kubernetes cluster is on GKE, be aware that SMTP ### Incoming email The configuration of incoming email is documented in the -[mailroom chart](../charts/gitlab/mailroom/index.md#incoming-email). +[mailroom chart](../charts/gitlab/mailroom/_index.md#incoming-email). ### Service Desk email The configuration of incoming email is documented in the -[mailroom chart](../charts/gitlab/mailroom/index.md#service-desk-email). +[mailroom chart](../charts/gitlab/mailroom/_index.md#service-desk-email). ### RBAC @@ -347,4 +355,4 @@ have RBAC enabled, you must disable these settings: ## Next steps -[Set up your cloud provider and create your cluster](cloud/index.md). +[Set up your cloud provider and create your cluster](cloud/_index.md). diff --git a/chart/doc/installation/uninstall.md b/chart/doc/installation/uninstall.md index 798ff8d0cb364d89bd4761c93785792703238696..6182b1439fa1d96c3e0aa1a5ae2b685d98a55efc 100644 --- a/chart/doc/installation/uninstall.md +++ b/chart/doc/installation/uninstall.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Uninstall the GitLab Helm chart --- -# Uninstall the GitLab Helm chart - To uninstall the GitLab Helm chart, run the following command: ```shell diff --git a/chart/doc/installation/upgrade.md b/chart/doc/installation/upgrade.md index fc4e3e0df4f8922fa11fe459d681ba29ec9b1dbf..ac78f95d1833961b501315b7d19ff5f0d1018793 100644 --- a/chart/doc/installation/upgrade.md +++ b/chart/doc/installation/upgrade.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Upgrade the GitLab chart --- -# Upgrade the GitLab chart +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} Before upgrading your GitLab installation, you need to check the [changelog](https://gitlab.com/gitlab-org/charts/gitlab/blob/master/CHANGELOG.md) @@ -16,14 +18,17 @@ corresponding to the specific release you want to upgrade to and look for any [release notes](version_mappings.md#release-notes-for-each-supported-version) that might pertain to the new GitLab chart version. -Upgrades have to follow a supported [upgrade path](https://docs.gitlab.com/ee/update/#upgrade-paths). +Upgrades have to follow a supported [upgrade path](https://docs.gitlab.com/update/#upgrade-paths). Because the GitLab chart versions don't follow the same numbering as GitLab versions, see the [version mappings](version_mappings.md) between them. -NOTE: +{{< alert type="note" >}} + **Zero-downtime upgrades** are not available with the GitLab charts but can be achieved by using [GitLab Operator](https://docs.gitlab.com/operator/gitlab_upgrades.html). -We also recommend that you take a [backup](../backup-restore/index.md) first. Also note that you +{{< /alert >}} + +We also recommend that you take a [backup](../backup-restore/_index.md) first. Also note that you must provide all values using `helm upgrade --set key=value` syntax or `-f values.yaml` instead of using `--reuse-values`, because some of the current values might be deprecated. @@ -35,11 +40,14 @@ This safely replaces the behavior of `--reuse-values` ## Steps -NOTE: +{{< alert type="note" >}} + If you're upgrading to the `7.0` version of the chart, follow the [manual upgrade steps for 7.0](#upgrade-to-version-70). If you're upgrading to the `6.0` version of the chart, follow the [manual upgrade steps for 6.0](#upgrade-to-version-60). If you're upgrading to an older version of the chart, follow the [upgrade steps for older versions](#older-upgrade-instructions). +{{< /alert >}} + Before you upgrade, reflect on your set values and if you've possibly "over-configured" your settings. We expect you to maintain a small list of modified values, and leverage most of the chart defaults. If you've explicitly set a large number of settings by: - Copying computed settings @@ -73,10 +81,13 @@ Ensure that you explicitly set it back to `true` for future updates. ## Upgrade the bundled PostgreSQL chart -NOTE: +{{< alert type="note" >}} + If you aren't using the bundled PostgreSQL chart (`postgresql.install` is false), you do not need to perform this step. +{{< /alert >}} + ### Upgrade the bundled PostgreSQL to version 13 PostgreSQL 13 is supported by GitLab 14.1 and later. [PostgreSQL 13 brings significant performance improvements](https://www.postgresql.org/about/news/postgresql-13-released-2077/). @@ -90,11 +101,14 @@ To upgrade the bundled PostgreSQL to version 13, the following steps are require ## Upgrade to version 7.0 -WARNING: +{{< alert type="warning" >}} + If you are upgrading from the `6.x` version of the chart to the latest `7.0` release, you need to first update to the latest `6.11.x` patch release in order for the upgrade to work. The [7.0 release notes](../releases/7_0.md) describe the supported upgrade path. +{{< /alert >}} + The `7.0.x` release may require manual steps in order to perform the upgrade. - If using the bundled [`bitnami/Redis`](https://artifacthub.io/packages/helm/bitnami/redis) sub-chart @@ -160,11 +174,14 @@ behavior for pre-existing Ingresses. ## Upgrade to version 6.0 -WARNING: +{{< alert type="warning" >}} + If you are upgrading from the `5.x` version of the chart to the latest `6.0` release, you need to first update to the latest `5.10.x` patch release in order for the upgrade to work. The [6.0 release notes](../releases/6_0.md) describe the supported upgrade path. +{{< /alert >}} + To upgrade to the `6.0` release you must first be on the latest `5.10.x` patch release. There isn't any additional manual changes required in `6.0` so you can [follow the regular release upgrade steps](#steps). diff --git a/chart/doc/installation/verify_cng_images.md b/chart/doc/installation/verify_cng_images.md index 6cbd9c90eaa3ca9433dc26d6792b7bfec0f0b60a..6d9ff68588b215e791c494d505264331c76c2f79 100644 --- a/chart/doc/installation/verify_cng_images.md +++ b/chart/doc/installation/verify_cng_images.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Verifying integrity of CNG images --- -# Verifying integrity of CNG images - To ensure the CNG images aren't tampered with after they are pushed to the registry, their digests are signed using [`cosign`](https://github.com/sigstore/cosign). `cosign` uses ECDSA-P256 keys @@ -13,12 +12,15 @@ and SHA256 hashes. Keys are stored in PEM-encoded PKCS8 format. These digests can be verified using `cosign verify` command as described below: -NOTE: +{{< alert type="note" >}} + The images are signed using a private key and can be only verified locally using the corresponding public key. Moving to a keyless signing/verification with GitLab.com OIDC provider is being discussed in [issue 638](https://gitlab.com/gitlab-org/build/CNG/-/issues/638). +{{< /alert >}} + 1. Download the public key used for signing from [https://charts.gitlab.io/cosign.pub](https://charts.gitlab.io/cosign.pub): ```shell diff --git a/chart/doc/installation/version_mappings.md b/chart/doc/installation/version_mappings.md index 33d29f606578b638dd68b076092018c85a353d85..a32bfaa75d7f2622bb448abb030b20817dff9d30 100644 --- a/chart/doc/installation/version_mappings.md +++ b/chart/doc/installation/version_mappings.md @@ -2,13 +2,15 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: GitLab chart versions --- -# GitLab chart versions +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} The GitLab chart doesn't have the same version number as GitLab itself. This means that breaking changes can be introduced to the chart independent of GitLab. @@ -23,6 +25,7 @@ helm search repo -l gitlab/gitlab ## Release notes for each supported version +- [9.0](../releases/9_0.md) - [8.0](../releases/8_0.md) - [7.0](../releases/7_0.md) - [6.0](../releases/6_0.md) @@ -33,12 +36,19 @@ The table below maps some of the key previous supported chart versions and suppo | Chart version | GitLab version | |---------------|----------------| +| 8.9.1 | 17.9.1 | +| 8.9.0 | 17.9.0 | +| 8.8.2 | 17.8.2 | | 8.8.1 | 17.8.1 | | 8.8.0 | 17.8.0 | +| 8.7.6 | 17.7.4 | +| 8.7.5 | 17.7.3 | | 8.7.4 | 17.7.2 | | 8.7.3 | 17.7.1 | | 8.7.2 | 17.7.0 | | 8.7.0 | 17.7.0 | +| 8.6.5 | 17.6.5 | +| 8.6.4 | 17.6.4 | | 8.6.3 | 17.6.3 | | 8.6.2 | 17.6.2 | | 8.6.1 | 17.6.1 | diff --git a/chart/doc/quickstart/_index.md b/chart/doc/quickstart/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..48b4166904a63db53c5fa64155357f4e2957ed39 --- /dev/null +++ b/chart/doc/quickstart/_index.md @@ -0,0 +1,173 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Test the GitLab chart on GKE or EKS +--- + +This guide serves as a concise but complete documentation about how to install the +GitLab chart with default values on Google Kubernetes Engine (GKE) +or Amazon Elastic Kubernetes Service (EKS). + +By default, the GitLab chart includes an in-cluster PostgreSQL, Redis, and +MinIO deployment. Those are for trial purposes only and +**not recommended for use in production environments**. +If you wish to deploy these charts into production under sustained load, you +should follow the complete [installation guide](../installation/_index.md). + +## Prerequisites + +To complete this guide, you must have the following: + +- A domain you own, to which you can add a DNS record. +- A Kubernetes cluster. +- A working installation of `kubectl`. +- A working installation of Helm v3. + +### Available domain + +You must have access to an internet-accessible domain to which you can add +a DNS record. This can be a sub-domain such as `poc.domain.com`, but the +Let's Encrypt servers must be able to resolve the addresses in order to +issue certificates. + +### Create a Kubernetes cluster + +A cluster with a total of at least eight virtual CPUs and 30 GB of RAM is recommended. + +You can either refer to your cloud providers' instructions on how to create a Kubernetes cluster, +or use the GitLab-provided scripts to [automate the cluster creation](../installation/cloud/_index.md). + +{{< alert type="warning" >}} + +Kubernetes nodes must use the x86-64 architecture. +Support for multiple architectures, including AArch64/ARM64, is under active development. +See [issue 2899](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2899) for more information. + +{{< /alert >}} + +### Install kubectl + +To install kubectl, see the [Kubernetes installation documentation](https://kubernetes.io/docs/tasks/tools/). +The documentation covers most operating systems and the Google +Cloud SDK, which you may have installed during the previous step. + +After you create the cluster, you must +[configure `kubectl`](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#generate_kubeconfig_entry) +before you can interact with the cluster from the command line. + +### Install Helm + +For this guide, we use the latest release of Helm v3 (v3.9.4 or later). +To install Helm, see the [Helm installation documentation](https://helm.sh/docs/intro/install/). + +## Add the GitLab Helm repository + +Add the GitLab Helm repository to `helm`'s configuration: + +```shell +helm repo add gitlab https://charts.gitlab.io/ +``` + +## Install GitLab + +Here's the beauty of what this chart is capable of. One command. Poof! All +of GitLab installed, and configured with SSL. + +To configure the chart, you need: + +- The domain or subdomain for GitLab to operate under. +- Your email address, so Let's Encrypt can issue a certificate. + +To install the chart, run the install command with two +`--set` arguments: + +```shell +helm install gitlab gitlab/gitlab \ + --set global.hosts.domain=DOMAIN \ + --set certmanager-issuer.email=me@example.com +``` + +This step can take several minutes in order for all resources +to be allocated, services to start, and access made available. + +After it's completed, you can proceed to collect the IP address that has +been dynamically allocated for the installed NGINX Ingress. + +## Retrieve the IP address + +You can use `kubectl` to fetch the address that has been dynamically been +allocated by GKE to the NGINX Ingress you've just installed and configured as +a part of the GitLab chart: + +```shell +kubectl get ingress -lrelease=gitlab +``` + +The output should look something like the following: + +```plaintext +NAME HOSTS ADDRESS PORTS AGE +gitlab-minio minio.domain.tld 35.239.27.235 80, 443 118m +gitlab-registry registry.domain.tld 35.239.27.235 80, 443 118m +gitlab-webservice gitlab.domain.tld 35.239.27.235 80, 443 118m +``` + +You'll notice that there are three entries, all with the same IP address. +Take this IP address, and add it to your DNS for the domain +you have chosen to use. You can add multiple records of type `A`, but for +simplicity we recommend a single "wildcard" record: + +- In Google Cloud DNS, create an `A` record with the name `*`. We also + suggest setting the TTL to `1` minute instead of `5` minutes. +- On AWS EKS, the address will be a URL rather than an IP address. + [Create a Route 53 alias record](https://repost.aws/knowledge-center/route-53-create-alias-records) + `*.domain.tld` pointing to this URL. + +## Sign in to GitLab + +You can access GitLab at `gitlab.domain.tld`. For example, if you set +`global.hosts.domain=my.domain.tld`, then you would visit `gitlab.my.domain.tld`. + +To sign in, you must collect the password for the `root` user. +This is automatically generated at installation time and stored in a Kubernetes +Secret. Let's fetch that password from the secret and decode it: + +```shell +kubectl get secret gitlab-gitlab-initial-root-password -ojsonpath='{.data.password}' | base64 --decode ; echo +``` + +You can now sign in to GitLab with username `root`, and the retrieved password. +You can change this password through the user preferences after logged in, we only +generate it so that we can secure the first login on your behalf. + +## Troubleshooting + +If you experience issues during this guide, here are a few likely items you should +be sure are working: + +1. The `gitlab.my.domain.tld` resolves to the IP address of the Ingress you retrieved. +1. If you get a certificate warning, there has been a problem with Let's Encrypt, + usually related to DNS, or the requirement to retry. + +For further troubleshooting tips, see our [troubleshooting](../troubleshooting/_index.md) guide. + +### Helm install returns `roles.rbac.authorization.k8s.io "gitlab-shared-secrets" is forbidden` + +After running: + +```shell +helm install gitlab gitlab/gitlab \ + --set global.hosts.domain=DOMAIN \ + --set certmanager-issuer.email=user@example.com +``` + +You might see an error similar to: + +```shell +Error: failed pre-install: warning: Hook pre-install templates/shared-secrets-rbac-config.yaml failed: roles.rbac.authorization.k8s.io "gitlab-shared-secrets" is forbidden: user "some-user@some-domain.com" (groups=["system:authenticated"]) is attempting to grant RBAC permissions not currently held: +{APIGroups:[""], Resources:["secrets"], Verbs:["get" "list" "create" "patch"]} +``` + +This means that the `kubectl` context that you are using to connect to the cluster +does not have the permissions needed to create [RBAC](../installation/rbac.md) resources. diff --git a/chart/doc/releases/6_0.md b/chart/doc/releases/6_0.md index 0f9db5344e24cba512deb2810f665f368af0ef8c..45416d87345ddf5ee9b948d13d67a49a4a7c974d 100644 --- a/chart/doc/releases/6_0.md +++ b/chart/doc/releases/6_0.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: GitLab Helm chart 6.0 --- -# GitLab Helm chart 6.0 - Along with the `15.0` release of GitLab, we have bumped the chart version to `6.0`. ## Summary of major changes @@ -49,10 +48,13 @@ Please follow the [normal upgrade steps](../installation/upgrade.md). PostgreSQL 13 is the recommended version, but PostgreSQL 12.x is still supported. -NOTE: +{{< alert type="note" >}} + Although it is not required for this major release, you should start planning for an upgrade to PostgreSQL 13. +{{< /alert >}} + ## Release cadence We will be releasing a new version of the chart with each new GitLab patch. diff --git a/chart/doc/releases/7_0.md b/chart/doc/releases/7_0.md index 2cbaf8bd7f2dcbe5f1915717e0cc87ffccdb2ca7..dec93c744909a8e1341bda16b934893edc9d1ad9 100644 --- a/chart/doc/releases/7_0.md +++ b/chart/doc/releases/7_0.md @@ -2,11 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#designated-technical-writers - +title: GitLab Cloud Native Chart 7.0 --- -# GitLab Cloud Native Chart 7.0 - Along with the `16.0` release of GitLab, we have bumped the chart version to `7.0`. ## Summary of major changes @@ -21,7 +19,7 @@ release of the chart. Check the [version mapping details](../installation/versio GitLab now defaults to using two database connections. Prior to upgrading, you can check that PostgreSQL `max_connections` is high enough (using more than 50% of the available max connections). -You can verify this by running the following Rake task using [the Toolbox container](../charts/gitlab/toolbox/index.md#toolbox-included-tools): +You can verify this by running the following Rake task using [the Toolbox container](../charts/gitlab/toolbox/_index.md#toolbox-included-tools): ```shell gitlab-rake gitlab:db:decomposition:connection_status @@ -46,10 +44,13 @@ is done by upgrading [PostgreSQL chart](https://github.com/bitnami/charts/tree/m This is not a drop in replacement. Manual steps need to be performed to upgrade the database. The steps have been documented in the [upgrade steps](../installation/database_upgrade.md#steps-for-upgrading-the-bundled-postgresql). -NOTE: +{{< alert type="note" >}} + Note that PostgreSQL 13 is the minimum required PostgreSQL version in GitLab 16.0. PostgreSQL 12 is no longer supported by GitLab 16.0 and later. +{{< /alert >}} + ### Bundled certmanager The bundled certmanager chart is upgraded from 1.5.4 to 1.11.1. Depending on your cluster and tooling this diff --git a/chart/doc/releases/8_0.md b/chart/doc/releases/8_0.md index 2bb5f5a9ba943fbc2a9f323bdb8414ead2236150..ce5190e636d27b8dff8491e3e3b1ee43e3d61e26 100644 --- a/chart/doc/releases/8_0.md +++ b/chart/doc/releases/8_0.md @@ -2,10 +2,9 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#designated-technical-writers +title: GitLab Cloud Native Chart 8.0 --- -# GitLab Cloud Native Chart 8.0 - Along with the `17.0` release of GitLab, we have bumped the chart version to `8.0`. ## Summary of major changes @@ -13,7 +12,7 @@ Along with the `17.0` release of GitLab, we have bumped the chart version to `8. - The legacy runner registration workflow is now disabled by default. [Manual action is needed to migrate to the new registration workflow.](#runner-workflow-changes). - Support for PostgreSQL 13 has been removed. Make sure you are running PostgreSQL 14 or newer before upgrading. -See [GitLab 17 changes](https://docs.gitlab.com/ee/update/versions/gitlab_17_changes.html#1700) for all upgrade relevant changes. +See [GitLab 17 changes](https://docs.gitlab.com/update/versions/gitlab_17_changes/#1700) for all upgrade relevant changes. ## Upgrade path from 7.x @@ -95,8 +94,8 @@ If you're setting `nginx-ingress-geo.rbac.create: false`, the same applies. ### Runner workflow changes The legacy runner registration workflow is now disabled by default. You must -[migrate to the new registration workflow](https://docs.gitlab.com/ee/tutorials/automate_runner_creation/index.html) -or [re-enable the legacy workflow](https://docs.gitlab.com/ee/administration/settings/continuous_integration.html#enable-runner-registrations-tokens). +[migrate to the new registration workflow](https://docs.gitlab.com/tutorials/automate_runner_creation/) +or [re-enable the legacy workflow](https://docs.gitlab.com/administration/settings/continuous_integration/#enable-runner-registrations-tokens). -Refer to the [runner sub-chart documentation](../charts/gitlab/gitlab-runner/index.md#requirements) +Refer to the [runner sub-chart documentation](../charts/gitlab/gitlab-runner/_index.md#requirements) for migration instructions. diff --git a/chart/doc/releases/9_0.md b/chart/doc/releases/9_0.md new file mode 100644 index 0000000000000000000000000000000000000000..1f81f05d1c7e77935d63385edfac69c2fa8a6eed --- /dev/null +++ b/chart/doc/releases/9_0.md @@ -0,0 +1,53 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#designated-technical-writers +title: GitLab Cloud Native Chart 9.0 +--- + +Along with the `18.0` release of GitLab, we have bumped the chart version to `9.0`. + +## Summary of major changes + +- The bundled Prometheus chart was updated from 15.3 to 27.1. +- Along with the Prometheus chart upgrade, the Prometheus version was updated from 2.38 to 3.0. + +## Upgrade path from 8.x + +### Prometheus upgrade + +You can skip this section if you're not using the Prometheus subchart bundled with the GitLab chart. + +The bundled Prometheus subchart was updated from 15.3 to 27.1, which now bundles +Prometheus 3 instead of Prometheus 2.x. +Please check the [Prometheus 3 migration guide](https://prometheus.io/docs/prometheus/3.0/migration/), +if any of the features you use are impacted. + +We are highlighting here some information we consider most critical, but +for a fully comprehensive list of changes, please refer to the upstream +[Prometheus chart upgrade documentation](https://github.com/prometheus-community/helm-charts/tree/3aa3bbb4815854836033f42ff7fc41ed27d2904d/charts/prometheus#upgrading-chart). + +- The Prometheus chart updates several (selector) labels to align with + Helm and Kubernetes labeling best practices. Before upgrading you need + to delete the old workloads. + + ```shell + kubectl delete deployment -l app=prometheus,heritage=Helm,release=<release name> + kubectl delete statefulset -l app=prometheus,heritage=Helm,release=<release name> + kubectl delete daemonset -l app=prometheus,heritage=Helm,release=<release name> + ``` + + If you have other services depending on the labels on the Prometheus resources, + please update these accordingly. + +- If you enabled the bundled kube-state-metrics, alertmananger, node exporter + or pushgateway, you need to update your values per the upstream upgrade + changelogs: + + - [16.0 changes](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus#to-160), + - [17.0 changes](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus#to-170), + - [18.0 changes](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus#to-180), and + - [19.0 changes](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus#to-190). + +- The `configmapReload.prometheus.extraArgs` is not anymore compatible, as per + upgrade to [20.0](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus#to-200). diff --git a/chart/doc/troubleshooting/_index.md b/chart/doc/troubleshooting/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..2f89a966f9704d39dcc847d5c8429c64b676bddf --- /dev/null +++ b/chart/doc/troubleshooting/_index.md @@ -0,0 +1,696 @@ +--- +stage: Systems +group: Distribution +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments +title: Troubleshooting the GitLab chart +--- + +## UPGRADE FAILED: Job failed: BackoffLimitExceeded + +If you received this error when [upgrading to the 6.0 version of the chart](../releases/6_0.md#upgrade-path-from-5x), +then it's probably because you didn't follow the right upgrade path, as you first need to upgrade to the latest 5.10.x version: + +1. List all your releases to identify your GitLab Helm release name (you will need to include `-n <namespace>` if your release was not deployed to the `default` K8s namespace): + + ```shell + helm ls + ``` + +1. Assuming that your GitLab Helm release is called `gitlab` you then need to look at the release history and identify the last successful revision (you can see the status of a revision under `DESCRIPTION`): + + ```shell + helm history gitlab + ``` + +1. Assuming your most recent successful revision is `1` use this command to roll back: + + ```shell + helm rollback gitlab 1 + ``` + +1. Re-run the upgrade command by replacing `<x>` with the appropriate chart version: + + ```shell + helm upgrade --version=5.10.<x> + ``` + +1. At this point you can use the `--version` option to pass a specific 6.x.x chart version or remove the option for upgrading to the latest version of GitLab: + + ```shell + helm upgrade --install gitlab gitlab/gitlab <other_options> + ``` + +More information about command line arguments can be found in our [Deploy using Helm](../installation/deployment.md#deploy-using-helm) section. +For mappings between chart versions and GitLab versions, read [GitLab version mappings](../installation/version_mappings.md). + +## UPGRADE FAILED: "$name" has no deployed releases + +This error occurs on your second install/upgrade if your initial install failed. + +If your initial install completely failed, and GitLab was never operational, you +should first purge the failed install before installing again. + +```shell +helm uninstall <release-name> +``` + +If instead, the initial install command timed out, but GitLab still came up successfully, +you can add the `--force` flag to the `helm upgrade` command to ignore the error +and attempt to update the release. + +Otherwise, if you received this error after having previously had successful deploys +of the GitLab chart, then you are encountering a bug. Please open an issue on our +[issue tracker](https://gitlab.com/gitlab-org/charts/gitlab/-/issues), and also check out +[issue #630](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/630) where we recovered our +CI server from this problem. + +## Error: this command needs 2 arguments: release name, chart path + +An error like this could occur when you run `helm upgrade` +and there are some spaces in the parameters. In the following +example, `Test Username` is the culprit: + +```shell +helm upgrade gitlab gitlab/gitlab --timeout 600s --set global.email.display_name=Test Username ... +``` + +To fix it, pass the parameters in single quotes: + +```shell +helm upgrade gitlab gitlab/gitlab --timeout 600s --set global.email.display_name='Test Username' ... +``` + +## Application containers constantly initializing + +If you experience Sidekiq, Webservice, or other Rails based containers in a constant +state of Initializing, you're likely waiting on the `dependencies` container to +pass. + +If you check the logs of a given Pod specifically for the `dependencies` container, +you may see the following repeated: + +```plaintext +Checking database connection and schema version +WARNING: This version of GitLab depends on gitlab-shell 8.7.1, ... +Database Schema +Current version: 0 +Codebase version: 20190301182457 +``` + +This is an indication that the `migrations` Job has not yet completed. The purpose +of this Job is to both ensure that the database is seeded, as well as all +relevant migrations are in place. The application containers are attempting to +wait for the database to be at or above their expected database version. This is +to ensure that the application does not malfunction to the schema not matching +expectations of the codebase. + +1. Find the `migrations` Job. `kubectl get job -lapp=migrations` +1. Find the Pod being run by the Job. `kubectl get pod -lbatch.kubernetes.io/job-name=<job-name>` +1. Examine the output, checking the `STATUS` column. + +If the `STATUS` is `Running`, continue. If the `STATUS` is `Completed`, the application containers should start shortly after the next check passes. + +Examine the logs from this pod. `kubectl logs <pod-name>` + +Any failures during the run of this job should be addressed. These will block +the use of the application until resolved. Possible problems are: + +- Unreachable or failed authentication to the configured PostgreSQL database +- Unreachable or failed authentication to the configured Redis services +- Failure to reach a Gitaly instance + +## Applying configuration changes + +The following command will perform the necessary operations to apply any updates made to `gitlab.yaml`: + +```shell +helm upgrade <release name> <chart path> -f gitlab.yaml +``` + +## Included GitLab Runner failing to register + +This can happen when the runner registration token has been changed in GitLab. (This often happens after you have restored a backup) + +1. Find the new shared runner token located on the `admin/runners` webpage of your GitLab installation. +1. Find the name of existing runner token Secret stored in Kubernetes + + ```shell + kubectl get secrets | grep gitlab-runner-secret + ``` + +1. Delete the existing secret + + ```shell + kubectl delete secret <runner-secret-name> + ``` + +1. Create the new secret with two keys, (`runner-registration-token` with your shared token, and an empty `runner-token`) + + ```shell + kubectl create secret generic <runner-secret-name> --from-literal=runner-registration-token=<new-shared-runner-token> --from-literal=runner-token="" + ``` + +## Too many redirects + +This can happen when you have TLS termination before the NGINX Ingress, and the tls-secrets are specified in the configuration. + +1. Update your values to set `global.ingress.annotations."nginx.ingress.kubernetes.io/ssl-redirect": "false"` + + Via a values file: + + ```yaml + # values.yaml + global: + ingress: + annotations: + "nginx.ingress.kubernetes.io/ssl-redirect": "false" + ``` + + Via the Helm CLI: + + ```shell + helm ... --set-string global.ingress.annotations."nginx.ingress.kubernetes.io/ssl-redirect"=false + ``` + +1. Apply the change. + +{{< alert type="note" >}} + +When using an external service for SSL termination, that service is responsible for redirecting to https (if so desired). + +{{< /alert >}} + +## Upgrades fail with Immutable Field Error + +### spec.clusterIP + +Prior to the 3.0.0 release of these charts, the `spec.clusterIP` property +[had been populated into several Services](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/1710) +despite having no actual value (`""`). This was a bug, and causes problems with Helm 3's three-way +merge of properties. + +Once the chart was deployed with Helm 3, there would be _no possible upgrade path_ unless one +collected the `clusterIP` properties from the various Services and populated those into the values +provided to Helm, or the affected services are removed from Kubernetes. + +The [3.0.0 release of this chart corrected this error](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/1710), but it requires manual correction. + +This can be solved by simply removing all of the affected services. + +1. Remove all affected services: + + ```shell + kubectl delete services -lrelease=RELEASE_NAME + ``` + +1. Perform an upgrade via Helm. +1. Future upgrades will not face this error. + +{{< alert type="note" >}} + +This will change any dynamic value for the `LoadBalancer` for NGINX Ingress from this chart, if in use. +See [global Ingress settings documentation](../charts/globals.md#configure-ingress-settings) for more +details regarding `externalIP`. You may be required to update DNS records! + +{{< /alert >}} + +### spec.selector + +Sidekiq pods did not receive a unique selector prior to chart release +`3.0.0`. [The problems with this were documented in](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/663). + +Upgrades to `3.0.0` using Helm will automatically delete the old Sidekiq deployments and create new ones by appending `-v1` to the +name of the Sidekiq `Deployments`,`HPAs`, and `Pods`. + +If you continue to run into this error on the Sidekiq deployment when installing `3.0.0`, resolve these with the following +steps: + +1. Remove Sidekiq services + + ```shell + kubectl delete deployment --cascade -lrelease=RELEASE_NAME,app=sidekiq + ``` + +1. Perform an upgrade via Helm. + +### cannot patch "RELEASE-NAME-cert-manager" with kind Deployment + +Upgrading from **CertManager** version `0.10` introduced a number of +breaking changes. The old Custom Resource Definitions must be uninstalled +and removed from Helm's tracking and then re-installed. + +The Helm chart attempts to do this by default but if you encounter this error +you may need to take manual action. + +If this error message was encountered, then upgrading requires one more step +than normal in order to ensure the new Custom Resource Definitions are +actually applied to the deployment. + +1. Remove the old **CertManager** Deployment. + + ```shell + kubectl delete deployments -l app=cert-manager --cascade + ``` + +1. Run the upgrade again. This time install the new Custom Resource Definitions + + ```shell + helm upgrade --install --values - YOUR-RELEASE-NAME gitlab/gitlab < <(helm get values YOUR-RELEASE-NAME) + ``` + +### cannot patch `gitlab-kube-state-metrics` with kind Deployment + +Upgrading from **Prometheus** version `11.16.9` to `15.0.4` changes the selector labels +used on the [kube-state-metrics Deployment](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics), +which is disabled by default (`prometheus.kubeStateMetrics.enabled=false`). + +If this error message is encountered, meaning `prometheus.kubeStateMetrics.enabled=true`, then upgrading +requires [an additional step](https://artifacthub.io/packages/helm/prometheus-community/prometheus#to-15-0): + +1. Remove the old **kube-state-metrics** Deployment. + + ```shell + kubectl delete deployments.apps -l app.kubernetes.io/instance=RELEASE_NAME,app.kubernetes.io/name=kube-state-metrics --cascade=orphan + ``` + +1. Perform an upgrade via Helm. + +## `ImagePullBackOff`, `Failed to pull image` and `manifest unknown` errors + +If you are using [`global.gitlabVersion`](../charts/globals.md#gitlab-version), +start by removing that property. +Check the [version mappings between the chart and GitLab](../installation/version_mappings.md) +and specify a compatible version of the `gitlab/gitlab` chart in your `helm` command. + +## UPGRADE FAILED: "cannot patch ..." after `helm 2to3 convert` + +This is a known issue. After migrating a Helm 2 release to Helm 3, the subsequent upgrades may fail. +You can find the full explanation and workaround in [Migrating from Helm v2 to Helm v3](../installation/migration/helm.md#known-issues). + +## UPGRADE FAILED: type mismatch on mailroom: `%!t(<nil>)` + +An error like this can happen if you do not provide a valid map for a key that expects a map. + +For example, the configuration below will cause this error: + +```yaml +gitlab: + mailroom: +``` + +To fix this, either: + +1. Provide a valid map for `gitlab.mailroom`. +1. Remove the `mailroom` key entirely. + +Note that for optional keys, an empty map (`{}`) is a valid value. + +<!-- markdownlint-disable line-length --> + +## Restoration failure: `ERROR: cannot drop view pg_stat_statements because extension pg_stat_statements requires it` + +You may face this error when restoring a backup on your Helm chart instance. Use the following steps as a workaround: + +1. Inside your `toolbox` pod open the DB console: + + ```shell + /srv/gitlab/bin/rails dbconsole -p + ``` + +1. Drop the extension: + + ```shell + DROP EXTENSION pg_stat_statements; + ``` + +1. Perform the restoration process. +1. After the restoration is complete, re-create the extension in the DB console: + + ```shell + CREATE EXTENSION pg_stat_statements; + ``` + +If you encounter the same issue with the `pg_buffercache` extension, +follow the same steps above to drop and re-create it. + +You can find more details about this error in issue [#2469](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2469). + +<!-- markdownlint-enable line-length --> + +## Bundled PostgreSQL pod fails to start: `database files are incompatible with server` + +The following error message may appear in the bundled PostgreSQL pod after upgrading to a new version of the GitLab Helm chart: + +```plaintext +gitlab-postgresql FATAL: database files are incompatible with server +gitlab-postgresql DETAIL: The data directory was initialized by PostgreSQL version 11, which is not compatible with this version 12.7. +``` + +To address this, perform a [Helm rollback](https://helm.sh/docs/helm/helm_rollback/) to the previous +version of the chart and then follow the steps in the [upgrade guide](../installation/upgrade.md) to +upgrade the bundled PostgreSQL version. Once PostgreSQL is properly upgraded, try the GitLab Helm +chart upgrade again. + +## Bundled NGINX Ingress pod fails to start: `Failed to watch *v1beta1.Ingress` + +The following error message may appear in the bundled NGINX Ingress controller pod if running Kubernetes version 1.22 or later: + +```plaintext +Failed to watch *v1beta1.Ingress: failed to list *v1beta1.Ingress: the server could not find the requested resource +``` + +To address this, ensure the Kubernetes version is 1.21 or older. See +[#2852](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2852) for +more information regarding NGINX Ingress support for Kubernetes 1.22 or later. + +## Increased load on `/api/v4/jobs/request` endpoint + +You may face this issue if the option `workhorse.keywatcher` was set to `false` for the deployment servicing `/api/*`. +Use the following steps to verify: + +1. Access the container `gitlab-workhorse` in the pod serving `/api/*`: + + ```shell + kubectl exec -it --container=gitlab-workhorse <gitlab_api_pod> -- /bin/bash + ``` + +1. Inspect the file `/srv/gitlab/config/workhorse-config.toml`. The `[redis]` configuration might be missing: + + ```shell + grep '\[redis\]' /srv/gitlab/config/workhorse-config.toml + ``` + +If the `[redis]` configuration is not present, the `workhorse.keywatcher` flag was set to `false` during deployment +thus causing the extra load in the `/api/v4/jobs/request` endpoint. To fix this, enable the `keywatcher` in the +`webservice` chart: + +```yaml +workhorse: + keywatcher: true +``` + +## Git over SSH: `the remote end hung up unexpectedly` + +Git operations over SSH might fail intermittently with the following error: + +```plaintext +fatal: the remote end hung up unexpectedly +fatal: early EOF +fatal: index-pack failed +``` + +There are a number of potential causes for this error: + +- **Network timeouts**: + + Git clients sometimes open a connection and leave it idling, like when compressing objects. + Settings like `timeout client` in HAProxy might cause these idle connections to be terminated. + + you can set a keepalive in `sshd`: + + ```yaml + gitlab: + gitlab-shell: + config: + clientAliveInterval: 15 + ``` + +- **`gitlab-shell` memory**: + + By default, the chart does not set a limit on GitLab Shell memory. + If `gitlab.gitlab-shell.resources.limits.memory` is set too low, Git operations over SSH may fail with these errors. + + Run `kubectl describe nodes` to confirm that this is caused by memory limits rather than + timeouts over the network. + + ```plaintext + System OOM encountered, victim process: gitlab-shell + Memory cgroup out of memory: Killed process 3141592 (gitlab-shell) + ``` + +## YAML configuration: `mapping values are not allowed in this context` + +The following error message may appear when YAML configuration contains leading spaces: + +```plaintext +template: /var/opt/gitlab/templates/workhorse-config.toml.tpl:16:98: + executing \"/var/opt/gitlab/templates/workhorse-config.toml.tpl\" at <data.YAML>: + error calling YAML: + yaml: line 2: mapping values are not allowed in this context +``` + +To address this, ensure that there are no leading spaces in configuration. + +For example, change this: + +```yaml + key1: value1 + key2: value2 +``` + +... to this: + +```yaml +key1: value1 +key2: value2 +``` + +## TLS and certificates + +If your GitLab instance needs to trust a private TLS certificate authority, GitLab might +fail to handshake with other services like object storage, Elasticsearch, Jira, or Jenkins: + +```plaintext +error: certificate verify failed (unable to get local issuer certificate) +``` + +Partial trust of certificates signed by private certificate authorities can occur if: + +- The supplied certificates are not in separate files. +- The certificates init container doesn't perform all the required steps. + +Also, GitLab is mostly written in Ruby on Rails and Go, and each language's +TLS libraries work differently. This difference can result in issues like job logs +failing to render in the GitLab UI but raw job logs downloading without issue. + +Additionally, depending on the `proxy_download` configuration, your browser is +redirected to the object storage with no issues if the trust store is correctly configured. +At the same time, TLS handshakes by one or more GitLab components could still fail. + +### Certificate trust setup and troubleshooting + +As part of troubleshooting certificate issues, be sure to: + +- Create secrets for each certificate you need to trust. +- Provide only one certificate per file. + + ```plaintext + kubectl create secret generic custom-ca --from-file=unique_name=/path/to/cert + ``` + + In this example, the certificate is stored using the key name `unique_name` + +If you supply a bundle or a chain, some GitLab components won't work. + +Query secrets with `kubectl get secrets` and `kubectl describe secrets/secretname`, +which shows the key name for the certificate under `Data`. + +Supply additional certificates to trust using `global.certificates.customCAs` +[in the chart globals](../charts/globals.md#custom-certificate-authorities). + +When a pod is deployed, an init container mounts the certificates and sets them up so the GitLab +components can use them. The init container is`registry.gitlab.com/gitlab-org/build/cng/alpine-certificates`. + +Additional certificates are mounted into the container at `/usr/local/share/ca-certificates`, +using the secret key name as the certificate filename. + +The init container runs `/scripts/bundle-certificates` ([source](https://gitlab.com/gitlab-org/build/CNG-mirror/-/blob/master/certificates/scripts/bundle-certificates)). +In that script, `update-ca-certificates`: + +1. Copies custom certificates from `/usr/local/share/ca-certificates` to `/etc/ssl/certs`. +1. Compiles a bundle `ca-certificates.crt`. +1. Generates hashes for each certificate and creates a symlink using the hash, + which is required for Rails. Certificate bundles are skipped with a warning: + + ```plaintext + WARNING: unique_name does not contain exactly one certificate or CRL: skipping + ``` + +[Troubleshoot the init container's status and logs](https://kubernetes.io/docs/tasks/debug/debug-application/debug-init-containers/). +For example, to view the logs for the certificates init container and check for warnings: + +```plaintext +kubectl logs gitlab-webservice-default-pod -c certificates +``` + +### Check on the Rails console + +Use the toolbox pod to verify if Rails trusts the certificates you supplied. + +1. Start a Rails console (replace `<namespace>` with the namespace where GitLab is installed): + + ```shell + kubectl exec -ti $(kubectl get pod -n <namespace> -lapp=toolbox -o jsonpath='{.items[0].metadata.name}') -n <namespace> -- bash + /srv/gitlab/bin/rails console + ``` + +1. Verify the location Rails checks for certificate authorities: + + ```ruby + OpenSSL::X509::DEFAULT_CERT_DIR + ``` + +1. Execute an HTTPS query in the Rails console: + + ```ruby + ## Configure a web server to connect to: + uri = URI.parse("https://myservice.example.com") + + require 'openssl' + require 'net/http' + Rails.logger.level = 0 + OpenSSL.debug=1 + http = Net::HTTP.new(uri.host, uri.port) + http.set_debug_output($stdout) + http.use_ssl = true + + http.verify_mode = OpenSSL::SSL::VERIFY_PEER + # http.verify_mode = OpenSSL::SSL::VERIFY_NONE # TLS verification disabled + + response = http.request(Net::HTTP::Get.new(uri.request_uri)) + ``` + +### Troubleshoot the init container + +Run the certificates container using Docker. + +1. Set up a directory structure and populate it with your certificates: + + ```shell + mkdir -p etc/ssl/certs usr/local/share/ca-certificates + + # The secret name is: my-root-ca + # The key name is: corporate_root + + kubectl get secret my-root-ca -ojsonpath='{.data.corporate_root}' | \ + base64 --decode > usr/local/share/ca-certificates/corporate_root + + # Check the certificate is correct: + + openssl x509 -in usr/local/share/ca-certificates/corporate_root -text -noout + ``` + +1. Determine the correct container version: + + ```shell + kubectl get deployment -lapp=webservice -ojsonpath='{.items[0].spec.template.spec.initContainers[0].image}' + ``` + +1. Run container, which performs the preparation of `etc/ssl/certs` content: + + ```shell + docker run -ti --rm \ + -v $(pwd)/etc/ssl/certs:/etc/ssl/certs \ + -v $(pwd)/usr/local/share/ca-certificates:/usr/local/share/ca-certificates \ + registry.gitlab.com/gitlab-org/build/cng/gitlab-base:v15.10.3 + ``` + +1. Check your certificates have been correctly built: + + - `etc/ssl/certs/corporate_root.pem` should have been created. + - There should be a hashed filename, which is a symlink to the certificate itself (such as `etc/ssl/certs/1234abcd.0`). + - The file and the symbolic link should display with: + + ```shell + ls -l etc/ssl/certs/ | grep corporate_root + ``` + + For example: + + ```plaintext + lrwxrwxrwx 1 root root 20 Oct 7 11:34 28746b42.0 -> corporate_root.pem + -rw-r--r-- 1 root root 1948 Oct 7 11:34 corporate_root.pem + ``` + +## `308: Permanent Redirect` causing a redirect loop + +`308: Permanent Redirect` can happen if your Load Balancer is configured to send unencrypted traffic (HTTP) to NGINX. +Because NGINX defaults to redirecting `HTTP` to `HTTPS`, you may end up in a "redirect loop". + +To fix this, [enable NGINX's `use-forwarded-headers` setting](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#use-forwarded-headers). + +## "Invalid Word" errors in the `nginx-controller` logs and `404` errors + +After upgrading to Helm chart 6.6 or later, you might experience `404` return +codes when visiting your GitLab or third-party domains for applications installed +in your cluster and are also seeing "invalid word" errors in the +`gitlab-nginx-ingress-controller` logs: + +```console +gitlab-nginx-ingress-controller-899b7d6bf-688hr controller W1116 19:03:13.162001 7 store.go:846] skipping ingress gitlab/gitlab-minio: nginx.ingress.kubernetes.io/configuration-snippet annotation contains invalid word proxy_pass +gitlab-nginx-ingress-controller-899b7d6bf-688hr controller W1116 19:03:13.465487 7 store.go:846] skipping ingress gitlab/gitlab-registry: nginx.ingress.kubernetes.io/configuration-snippet annotation contains invalid word proxy_pass +gitlab-nginx-ingress-controller-899b7d6bf-lqcks controller W1116 19:03:12.233577 6 store.go:846] skipping ingress gitlab/gitlab-kas: nginx.ingress.kubernetes.io/configuration-snippet annotation contains invalid word proxy_pass +gitlab-nginx-ingress-controller-899b7d6bf-lqcks controller W1116 19:03:12.536534 6 store.go:846] skipping ingress gitlab/gitlab-webservice-default: nginx.ingress.kubernetes.io/configuration-snippet annotation contains invalid word proxy_pass +gitlab-nginx-ingress-controller-899b7d6bf-lqcks controller W1116 19:03:12.848844 6 store.go:846] skipping ingress gitlab/gitlab-webservice-default-smartcard: nginx.ingress.kubernetes.io/configuration-snippet annotation contains invalid word proxy_pass +gitlab-nginx-ingress-controller-899b7d6bf-lqcks controller W1116 19:03:13.161640 6 store.go:846] skipping ingress gitlab/gitlab-minio: nginx.ingress.kubernetes.io/configuration-snippet annotation contains invalid word proxy_pass +gitlab-nginx-ingress-controller-899b7d6bf-lqcks controller W1116 19:03:13.465425 6 store.go:846] skipping ingress gitlab/gitlab-registry: nginx.ingress.kubernetes.io/configuration-snippet annotation contains invalid word proxy_pass +``` + +In that case, review your GitLab values and any third-party Ingress objects for the use +of [configuration snippets](https://kubernetes.github.io/ingress-nginx/examples/customization/configuration-snippets/). +You may need to adjust or modify the `nginx-ingress.controller.config.annotation-value-word-blocklist` +setting. + +See [Annotation value word blocklist](../charts/nginx/_index.md#annotation-value-word-blocklist) for additional details. + +### Volume mount takes a long time + +Mounting large volumes, such as the `gitaly` or `toolbox` chart volumes, can take a long time because Kubernetes +recursively changes the permissions of the volume's contents to match the Pod's `securityContext`. + +Starting with Kubernetes 1.23 you can set the `securityContext.fsGroupChangePolicy` to `OnRootMismatch` to mitigate +this issue. This flag is supported by all GitLab subcharts. + +For example for the Gitaly subchart: + +```yaml +gitlab: + gitaly: + securityContext: + fsGroupChangePolicy: "OnRootMismatch" +``` + +See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods), +for more details. + +For Kubernetes versions not supporting `fsGroupChangePolicy` you can mitigate the +issue by changing or fully deleting the settings for the `securityContext`. + +```yaml +gitlab: + gitaly: + securityContext: + fsGroup: "" + runAsUser: "" +``` + +{{< alert type="note" >}} + +The example syntax eliminates the `securityContext` setting entirely. +Setting `securityContext: {}` or `securityContext:` does not work due +to the way Helm merges default values with user provided configuration. + +{{< /alert >}} + +### Intermittent 502 errors + +When a request being handled by a Puma worker crosses the memory limit threshold, it is killed by the node's OOMKiller. +However, killing the request does not necessarily kill or restart the webservice pod itself. This situation causes the request to return a `502` timeout. +In the logs, this appears as a Puma worker being created shortly after the `502` error is logged. + +```shell +2024-01-19T14:12:08.949263522Z {"correlation_id":"XXXXXXXXXXXX","duration_ms":1261,"error":"badgateway: failed to receive response: context canceled".... +2024-01-19T14:12:24.214148186Z {"component": "gitlab","subcomponent":"puma.stdout","timestamp":"2024-01-19T14:12:24.213Z","pid":1,"message":"- Worker 2 (PID: 7414) booted in 0.84s, phase: 0"} +``` + +To solve this problem, [raise memory limits for the webservice pods](../charts/gitlab/webservice/_index.md#memory-requestslimits). diff --git a/chart/doc/troubleshooting/kubernetes_cheat_sheet.md b/chart/doc/troubleshooting/kubernetes_cheat_sheet.md index a5d5499b315e7308330636dfb31e269406cfd92e..0f5864971f29204f49b4991c26c84a258c5fdb40 100644 --- a/chart/doc/troubleshooting/kubernetes_cheat_sheet.md +++ b/chart/doc/troubleshooting/kubernetes_cheat_sheet.md @@ -2,22 +2,26 @@ stage: Systems group: Distribution info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments -ignore_in_report: true +title: Kubernetes cheat sheet --- -# Kubernetes cheat sheet +{{< details >}} -DETAILS: -**Tier:** Free, Premium, Ultimate -**Offering:** Self-managed +- Tier: Free, Premium, Ultimate +- Offering: GitLab Self-Managed + +{{< /details >}} This is a list of useful information regarding Kubernetes that the GitLab Support Team sometimes uses while troubleshooting. GitLab is making this public, so that anyone can make use of the Support team's collected knowledge -WARNING: +{{< alert type="warning" >}} + These commands **can alter or break** your Kubernetes components so use these at your own risk. +{{< /alert >}} + If you are on a [paid tier](https://about.gitlab.com/pricing/) and are not sure how to use these commands, it is best to [contact Support](https://about.gitlab.com/support/) and they will assist you with any issues you are having. @@ -225,7 +229,7 @@ all Kubernetes resources and dependent charts: ## Installation of minimal GitLab configuration via minikube on macOS -This section is based on [Developing for Kubernetes with minikube](../development/minikube/index.md) +This section is based on [Developing for Kubernetes with minikube](../development/minikube/_index.md) and [Helm](../installation/tools.md). Refer to those documents for details. @@ -298,20 +302,26 @@ but commented out to help encourage others to add to it in the future. --> ## Patching the Rails code in the `toolbox` pod -WARNING: +{{< alert type="warning" >}} + This task is not something that should be regularly performed. Use it at your own risk. +{{< /alert >}} + Patching operational GitLab service pods requires building new images, with the modified source code inside. These can _not_ be directly patched. -The [`toolbox` / `task-runner` pod](../charts/gitlab/toolbox/index.md) has everything needed to operate as a Rails-based pod, without interfering with other normal service operations. You can use it to run independent tasks, and to modify the source code temporarily to perform some tasks. +The [`toolbox` / `task-runner` pod](../charts/gitlab/toolbox/_index.md) has everything needed to operate as a Rails-based pod, without interfering with other normal service operations. You can use it to run independent tasks, and to modify the source code temporarily to perform some tasks. + +{{< alert type="note" >}} -NOTE: If you make any changes using the `toolbox` pod, those will not be persisted if the pod is restarted. They're only present for the life of the container's operation. +{{< /alert >}} + To patch the source code in the `toolbox` pod: 1. Fetch the desired `.patch` file to be applied: - - Either download the diff of a merge request directly as a [patch file](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#download-merge-request-changes-as-a-patch-file). + - Either download the diff of a merge request directly as a [patch file](https://docs.gitlab.com/user/project/merge_requests/reviews/#download-merge-request-changes-as-a-patch-file). - Or, fetch the diff directly using `curl`. Replace `<mr_iid>` below with the IID of the merge request, or change the URL to point to a raw snippet: ```shell diff --git a/chart/examples/objectstorage/rails.azurerm.yaml b/chart/examples/objectstorage/rails.azurerm.yaml index 04658ceda530fa7d99836f75a6ffbedabd67db07..5ca9ee67413c0983156964521a21f6f1b198f127 100644 --- a/chart/examples/objectstorage/rails.azurerm.yaml +++ b/chart/examples/objectstorage/rails.azurerm.yaml @@ -6,5 +6,7 @@ provider: AzureRM # Specify storage account/access azure_storage_account_name: YOUR_AZURE_STORAGE_ACCOUNT_NAME +# This can be omitted if a workload or managed identity is used azure_storage_access_key: YOUR_AZURE_STORAGE_ACCOUNT_KEY +# Optional azure_storage_domain: blob.core.windows.net diff --git a/chart/examples/values-external-objectstorage.yaml b/chart/examples/values-external-objectstorage.yaml index a2e7ec2d421436b96ea747cc0a7442d6752ddf35..72bb6ea1e14480a4a16dc26cbab53242ffd9a114 100644 --- a/chart/examples/values-external-objectstorage.yaml +++ b/chart/examples/values-external-objectstorage.yaml @@ -3,32 +3,44 @@ global: minio: enabled: false - registry: - bucket: gitlab-registry-storage appConfig: - lfs: - bucket: gitlab-lfs-storage - connection: # https://gitlab.com/gitlab-org/charts/gitlab/blob/master/doc/charts/globals.md#connection - secret: objectstore-lfs - key: connection artifacts: bucket: gitlab-artifacts-storage + backups: + bucket: gitlab-backup-storage + tmpBucket: gitlab-tmp-storage + ciSecureFiles: + bucket: gitlab-ci-secure-files-storage + enabled: true + dependencyProxy: + bucket: gitlab-dependency-proxy + enabled: true + externalDiffs: + bucket: gitlab-external-diffs + enabled: true + lfs: + bucket: gitlab-lfs-storage + object_store: connection: - secret: objectstore-artifacts - key: connection - uploads: - bucket: gitlab-uploads-storage - connection: - secret: objectstore-uploads - key: connection + secret: gitlab-object-storage + enabled: true + proxy_download: false packages: bucket: gitlab-packages-storage + terraformState: + bucket: gitlab-terraform-state-storage + enabled: true + uploads: + bucket: gitlab-uploads-storage + pages: + enabled: true + objectStore: + enabled: true + bucket: gitlab-pages-storage connection: - secret: objectstore-packages - key: connection - backups: - bucket: gitlab-backup-storage - tmpBucket: gitlab-tmp-storage + secret: objectstore-pages + registry: + bucket: gitlab-registry-storage gitlab: toolbox: backups: diff --git a/chart/requirements.lock b/chart/requirements.lock index 9b15b2a410f083229da88b4c1782d73f8fd1daed..d6b5d85169c1c3ce1467a464db0d03e224873859 100644 --- a/chart/requirements.lock +++ b/chart/requirements.lock @@ -13,7 +13,7 @@ dependencies: version: '*.*.*' - name: cert-manager repository: https://charts.jetstack.io/ - version: v1.12.14 + version: v1.12.15 - name: prometheus repository: https://prometheus-community.github.io/helm-charts version: 15.18.0 @@ -34,7 +34,7 @@ dependencies: version: '*.*.*' - name: gitlab-zoekt repository: https://charts.gitlab.io/ - version: 1.4.3 + version: 1.5.0 - name: gluon repository: oci://registry1.dso.mil/bigbang version: 0.5.14 @@ -44,5 +44,5 @@ dependencies: - name: kubernetes-ingress repository: https://haproxytech.github.io/helm-charts version: 1.32.0 -digest: sha256:c426626626761ac60ff8acc301c56da4af0a16aaf92c915c0ff04c40a30ea109 -generated: "2025-02-14T16:09:31.555691-05:00" +digest: sha256:31256a86c719d1bb6b85acd528330ec34ef40559de04edd06b7ec16d1ea021b7 +generated: "2025-02-28T16:52:22.986303-06:00" diff --git a/chart/requirements.yaml b/chart/requirements.yaml index 81c452430c5bc2176e9d6f7261465e893742c68a..92449dc171c6e172dae5010044d8477405cf5d48 100644 --- a/chart/requirements.yaml +++ b/chart/requirements.yaml @@ -8,7 +8,7 @@ dependencies: - name: registry version: '*.*.*' - name: cert-manager - version: v1.12.14 + version: v1.12.15 repository: https://charts.jetstack.io/ condition: certmanager.install alias: certmanager @@ -36,7 +36,7 @@ dependencies: version: '*.*.*' alias: nginx-ingress-geo - name: gitlab-zoekt - version: 1.4.3 + version: 1.5.0 repository: https://charts.gitlab.io/ condition: gitlab-zoekt.install - name: gluon diff --git a/chart/scripts/ci/autodevops.sh b/chart/scripts/ci/autodevops.sh old mode 100644 new mode 100755 diff --git a/chart/scripts/ci/feature_spec_setup.sh b/chart/scripts/ci/feature_spec_setup.sh new file mode 100755 index 0000000000000000000000000000000000000000..2e816cfc4970e25f0f486c1f2b022d4f0e51c5fe --- /dev/null +++ b/chart/scripts/ci/feature_spec_setup.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +mkdir -p /etc/gitlab/minio + +kubectl get secret ${RELEASE_NAME}-minio-secret -o jsonpath='{.data.accesskey}' | base64 --decode > /etc/gitlab/minio/accesskey +kubectl get secret ${RELEASE_NAME}-minio-secret -o jsonpath='{.data.secretkey}' | base64 --decode > /etc/gitlab/minio/secretkey diff --git a/chart/scripts/ci/install_spec_dependencies.sh b/chart/scripts/ci/install_spec_dependencies.sh new file mode 100755 index 0000000000000000000000000000000000000000..9c40f8bec7466244c57126df17f431e2efc1ba5a --- /dev/null +++ b/chart/scripts/ci/install_spec_dependencies.sh @@ -0,0 +1,82 @@ +#!/bin/bash +set -e + +export DEBIAN_FRONTEND=noninteractive +HELM_VERSION=${HELM_VERSION:-3.10.3} +GOMPLATE_VERSION=${GOMPLATE_VERSION:-v3.11.4} +DOCKER_VERSION="24.0.9-1" +DEBIAN_VERSION_NUMBER=${DEBIAN_VERISON_NUMBER:-12} +DEBIAN_VERSION=${DEBIAN_VERSION:-"bookworm"} +DOCKER_DEB_VERSION="5:${DOCKER_VERSION}~debian.${DEBIAN_VERSION_NUMBER}~${DEBIAN_VERSION}" +KUBECTL_VERSION=${KUBECTL_VERSION:-1.28.3} +TARGET_DIR=${TARGET_DIR:-"/usr/local/bin"} + +apt-get update -qq +apt-get install -y --no-install-recommends \ + curl ca-certificates + +DOCKER_INSTALLED_VERSION="" +if command -v docker; then + DOCKER_INSTALLED_VERSION=$(docker version --format '{{ .Client.Version }}') + echo "Docker ${DOCKER_INSTALLED_VERSION} already installed" + echo "Expected version: ${DOCKER_VERSION}" +fi + +if [ "${STRICT_VERSIONS:-false}" == "true" ] && [ "${DOCKER_INSTALLED_VERSION}" != "${DOCKER_VERSION}" ] || [ -z "${DOCKER_INSTALLED_VERSION}" ]; then + echo "Installing Docker version ${DOCKER_DEB_VERSION}" + curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null + + apt-get update -qq + apt install -y docker-ce-cli=${DOCKER_DEB_VERSION} +fi +# Sometimes, `docker:dind` service is not ready yet, causing exit code of 1 +# We only care about the client, anyways! +docker version --format 'Effective: docker-{{ .Client.Version }}' || true + +GOMPLATE_INSTALLED_VERSION="" +if command -v gomaplate; then + GOMPLATE_INSTALLED_VERSION=$(gomplate -v | cut -d' ' -f3) + echo "gomplate-${GOMPLATE_INSTALLED_VERSION} already installed" + echo "Expected version: ${GOMPLATE_VERSION}" +fi + +if [ "${STRICT_VERSIONS:-false}" == "true" ] && [ "${GOMPLATE_INSTALLED_VERSION}" != "${GOMPLATE_VERSION}" ] || [ -z "${GOMPLATE_INSTALLED_VERSION}" ]; then + echo "Installing gomplate-${GOMPLATE_VERSION}" + curl -o gomplate -sSL https://github.com/hairyhenderson/gomplate/releases/download/${GOMPLATE_VERSION}/gomplate_linux-amd64 + chmod +x gomplate + mv gomplate ${TARGET_DIR}/gomplate +fi +echo -n "Effective: "; gomplate -v + + +HELM_INSTALLED_VERSION="" +if command -v helm; then + echo "Helm already installed" + echo "Expected version: ${HELM_VERSION}" + HELM_INSTALLED_VERSION=$(helm version --template '{{.Version}}' | sed -e 's/^v//' ) + echo "Installed version: ${HELM_INSTALLED_VERSION}" +fi + +if [ "${STRICT_VERSIONS:-false}" == "true" ] && [ "${HELM_INSTALLED_VERSION}" != "${HELM_VERSION}" ] || [ -z "${HELM_INSTALLED_VERSION}" ]; then + echo "Installing helm-${HELM_VERSION}" + curl -Ls https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz | tar zxf - + chmod +x linux-amd64/helm + mv linux-amd64/helm ${TARGET_DIR}/helm + rm -rf linux-amd64/ +fi + +KUBECTL_INSTALLED_VERSION="" +if command -v kubectl; then + echo "Kubectl already installed" + echo "Expected version: ${KUBECTL_VERSION}" + KUBECTL_INSTALLED_VERSION=$(kubectl version --client=true -o yaml | awk '/gitVersion/ { sub("^v","",$2); print $2; }') + echo "Installed kubectl version: ${KUBECTL_INSTALLED_VERSION}" +fi + +if [ "${STRICT_VERSIONS:-false}" == "true" ] && [ "${KUBECTL_INSTALLED_VERSION}" != "${KUBECTL_VERSION}" ] || [ -z "${KUBECTL_INSTALLED_VERSION}" ]; then + echo "Installing kubectl-${KUBECTL_VERSION}" + curl -LsO https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl + chmod +x kubectl + mv kubectl ${TARGET_DIR}/kubectl +fi diff --git a/chart/scripts/ci/integration_spec_setup.sh b/chart/scripts/ci/integration_spec_setup.sh new file mode 100755 index 0000000000000000000000000000000000000000..425248a23870a857a17508ff2c24ae4962e7d023 --- /dev/null +++ b/chart/scripts/ci/integration_spec_setup.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -e + +MAX_HELM_REPO_UPDATE_ATTEMPTS=3 +HELM_REPO_WAIT_TIMER=5 + +while (( MAX_HELM_REPO_UPDATE_ATTEMPTS >= 0 )); do + if helm dependency update; then + exit 0 + fi + + echo "Failed to update dependency list, trying again in ${HELM_REPO_WAIT_TIMER} seconds..." + sleep "${HELM_REPO_WAIT_TIMER}" + (( MAX_HELM_REPO_UPDATE_ATTEMPTS-- )) +done + +# Something has gone very wrong. +echo "Failed to update helm dependencies." +exit 1 diff --git a/chart/scripts/ci/qa.sh b/chart/scripts/ci/qa.sh old mode 100644 new mode 100755 diff --git a/chart/scripts/ci/run_specs.sh b/chart/scripts/ci/run_specs.sh new file mode 100755 index 0000000000000000000000000000000000000000..f3c057f46cf0fd338913ab2a8d3f02973a439bf7 --- /dev/null +++ b/chart/scripts/ci/run_specs.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +if [[ -n "${VARIABLES_FILE}" ]]; then + source "${VARIABLES_FILE}" + ./scripts/ci/feature_spec_setup.sh +else + ./scripts/ci/integration_spec_setup.sh +fi + +bundle config set --local path 'gems' +bundle config set --local frozen 'true' +bundle install -j $(nproc) + +# For tests not being run on a cluster, use knapsack for parallelizing +if [[ "${RSPEC_TAGS}" == "~type:feature" ]]; then + echo "Here" + echo "{}" > knapsack_rspec_report.json + bundle exec rake "knapsack:rspec[--color --format documentation --tag '${RSPEC_TAGS}']" +else + bundle exec rspec -c -f d spec -t "${RSPEC_TAGS}" +fi diff --git a/chart/scripts/ci/vcluster.sh b/chart/scripts/ci/vcluster.sh index ef6148415424b79853d4fcc0ad8d8a5f4fc84a88..1f01b361b2d52d775fbb2757860fb156c33d2634 100755 --- a/chart/scripts/ci/vcluster.sh +++ b/chart/scripts/ci/vcluster.sh @@ -9,18 +9,33 @@ function cluster_connect() { fi } +function vcluster_install() { + if [ -z "${VCLUSTER_VERSION}" ] || [ "${VCLUSTER_VERSION,,}" == "default" ]; then + echo "No version specified, using default image version" + else + echo "Install vcluster version ${VCLUSTER_VERSION}" + curl -Lo /tmp/vcluster "https://github.com/loft-sh/vcluster/releases/download/v${VCLUSTER_VERSION}/vcluster-linux-amd64" + install -c -m 0755 /tmp/vcluster /usr/local/bin + fi + vcluster version +} + function vcluster_name() { printf ${VCLUSTER_NAME:0:52} } function vcluster_create() { + envsubst '$VCLUSTER_K8S_VERSION' < ./scripts/ci/vcluster.template.yaml > ./vcluster.yaml + cat vcluster.yaml + local vcluster_name=$(vcluster_name) vcluster create ${vcluster_name} \ --upgrade \ --namespace=${vcluster_name} \ - --kubernetes-version=${VCLUSTER_K8S_VERSION} \ --connect=false \ - --update-current=false + --values ./vcluster.yaml + + kubectl annotate namespace ${vcluster_name} janitor/ttl=2d } function vcluster_run() { @@ -44,7 +59,7 @@ function vcluster_helm_rollout_status() { } function vcluster_delete() { - vcluster delete $(vcluster_name) + vcluster delete $(vcluster_name) --delete-configmap --delete-namespace --ignore-not-found } function vcluster_info() { diff --git a/chart/scripts/ci/vcluster.template.yaml b/chart/scripts/ci/vcluster.template.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d009b44aee8c32ddb9b6a4d51d068a3677807829 --- /dev/null +++ b/chart/scripts/ci/vcluster.template.yaml @@ -0,0 +1,21 @@ +controlPlane: + backingStore: + etcd: + deploy: + enabled: true + distro: + k8s: + enabled: true + version: "$VCLUSTER_K8S_VERSION" + statefulSet: + scheduling: + podManagementPolicy: OrderedReady +sync: + toHost: + ingresses: + enabled: true +integrations: + metricsServer: + enabled: true + nodes: true + pods: true diff --git a/chart/spec/configuration/certmanager_issuer_spec.rb b/chart/spec/configuration/certmanager_issuer_spec.rb index af35dbecc3fc610361f33f8f69a0ba896458f5c4..3e703d5a5ea17c8006a53d365ff76928e82104b9 100644 --- a/chart/spec/configuration/certmanager_issuer_spec.rb +++ b/chart/spec/configuration/certmanager_issuer_spec.rb @@ -30,9 +30,6 @@ describe 'certmanager_issuer configuration' do # Expectation for the metadata name prefix expect(issuer_job["metadata"]["name"]).to match(/^test-issuer-[a-f0-9]+$/) - # Expectation for the container image needs to be a regex to work for master and stable branches. - expect(issuer_job["spec"]["template"]["spec"]["containers"][0]["image"]).to match(%r{^registry\.gitlab\.com/gitlab-org/build/cng/kubectl:(v\d+\.\d+\.\d+|master)$}) - # Expectation for the rest of the structure expect(issuer_job).to include( "apiVersion" => "batch/v1", @@ -41,7 +38,7 @@ describe 'certmanager_issuer configuration' do "namespace" => "default", "labels" => { "app" => "certmanager-issuer", - "chart" => "certmanager-issuer-0.2.0", + "chart" => "certmanager-issuer-0.2.1", "release" => "test", "heritage" => "Helm" } @@ -50,7 +47,14 @@ describe 'certmanager_issuer configuration' do "activeDeadlineSeconds" => 300, "ttlSecondsAfterFinished" => 1800, "template" => include( - "metadata" => { "labels" => { "app" => "certmanager-issuer", "release" => "test" } }, + "metadata" => include( + "labels" => include( + "app" => "certmanager-issuer", + "chart" => "certmanager-issuer-0.2.1", + "release" => "test", + "heritage" => "Helm" + ) + ), "spec" => include( "securityContext" => { "runAsUser" => 65534, "fsGroup" => 65534, "seccompProfile" => { "type" => "RuntimeDefault" } }, "serviceAccountName" => "test-certmanager-issuer", @@ -59,6 +63,7 @@ describe 'certmanager_issuer configuration' do include( "name" => "create-issuer", "command" => ["/bin/bash", "/scripts/create-issuer", "/scripts/issuer.yml"], + "image" => "registry.gitlab.com/gitlab-org/build/cng/kubectl:v42.0.0", "securityContext" => { "allowPrivilegeEscalation" => false, "capabilities" => { "drop" => ["ALL"] }, diff --git a/chart/spec/configuration/database_decomposition_spec.rb b/chart/spec/configuration/database_decomposition_spec.rb index efaa44d29e2831f9459f4187557c209e3750b0c5..ccaaf4c52428f6fb5dd59701a8a6969e71248a19 100644 --- a/chart/spec/configuration/database_decomposition_spec.rb +++ b/chart/spec/configuration/database_decomposition_spec.rb @@ -287,6 +287,14 @@ describe 'Database configuration' do applicationName: embedding host: embedding.host.name load_balancing: false + sec: + username: sec-user + password: + secret: sec-password + preparedStatements: false + databaseTasks: false + applicationName: sec + postgresql: install: false ))) @@ -297,7 +305,7 @@ describe 'Database configuration' do expect(t.exit_code).to eq(0), "Unexpected error code #{t.exit_code} -- #{t.stderr}" db_config = database_config(t, 'webservice') - expect(db_config['production'].keys).to contain_exactly('main', 'ci', 'embedding') + expect(db_config['production'].keys).to contain_exactly('main', 'ci', 'embedding', 'sec') # check `main` stanza main_config = db_config['production']['main'] @@ -329,12 +337,22 @@ describe 'Database configuration' do expect(embedding_config['database_tasks']).to eq(true) expect(embedding_config['load_balancing']).to eq(nil) + # check `sec` stanza + sec_config = db_config['production']['sec'] + expect(sec_config['host']).to eq('global-server') + expect(sec_config['port']).to eq(5432) + expect(sec_config['username']).to eq('sec-user') + expect(sec_config['application_name']).to eq('sec') + expect(sec_config['prepared_statements']).to eq(false) + expect(sec_config['database_tasks']).to eq(false) + expect(ci_config['load_balancing']).to eq({ 'hosts' => ['a.secondary.global', 'b.secondary.global'] }) + # Check the secret mounts webservice_secret_mounts = t.projected_volume_sources('Deployment/test-webservice-default', 'init-webservice-secrets').select do |item| item['secret']['items'][0]['key'] == 'postgresql-password' end psql_secret_mounts = webservice_secret_mounts.map { |x| x['secret']['name'] } - expect(psql_secret_mounts).to contain_exactly('main-password', 'ci-password', 'embedding-password') + expect(psql_secret_mounts).to contain_exactly('main-password', 'ci-password', 'embedding-password', 'sec-password') end end diff --git a/chart/spec/configuration/gitaly_spec.rb b/chart/spec/configuration/gitaly_spec.rb index 98a9bc28a4bed0186acf69b09a7fea70feb75010..c303c3fe51c82680433c952443ccba79a4fdaa7d 100644 --- a/chart/spec/configuration/gitaly_spec.rb +++ b/chart/spec/configuration/gitaly_spec.rb @@ -370,6 +370,50 @@ describe 'Gitaly configuration' do end end + context 'timeout' do + context 'when enabled' do + let(:values) do + YAML.safe_load(%( + gitlab: + gitaly: + timeout: + uploadPackNegotiation: 10m + uploadArchiveNegotiation: 20m + )).merge(default_values) + end + + let(:template) { HelmTemplate.new(values) } + + it 'populates a timeout section in config.toml.tpl' do + config_toml = template.dig('ConfigMap/test-gitaly','data','config.toml.tpl') + + pack_objects_cache_section = <<~CONFIG + [timeout] + upload_pack_negotiation = "10m" + upload_archive_negotiation = "20m" + CONFIG + + expect(config_toml).to include(pack_objects_cache_section) + end + end + + context 'when not enabled' do + let(:values) do + YAML.safe_load(%( + gitlab: + gitaly: + )).merge(default_values) + end + let(:template) { HelmTemplate.new(values) } + + it 'does not populate a timeout section in config.toml.tpl' do + config_toml = template.dig('ConfigMap/test-gitaly','data','config.toml.tpl') + + expect(config_toml).not_to match(/^\[timeout\]/) + end + end + end + context 'gpg signing' do let(:values) do HelmTemplate.with_defaults %( @@ -682,7 +726,7 @@ describe 'Gitaly configuration' do expect(gitaly_startup_probe).to include( 'initialDelaySeconds' => 5, - 'exec' => { "command" => ["/scripts/healthcheck"] }, + 'grpc' => { "port" => 8075 }, 'failureThreshold' => 60, 'periodSeconds' => 1, 'timeoutSeconds' => 2, diff --git a/chart/spec/configuration/gitlab-yml-erb_spec.rb b/chart/spec/configuration/gitlab-yml-erb_spec.rb index fb044651e680a026cf7aa0f95f7ba7a4549e68b2..f129253a298ea86462984b11b9bdae0389371ed6 100644 --- a/chart/spec/configuration/gitlab-yml-erb_spec.rb +++ b/chart/spec/configuration/gitlab-yml-erb_spec.rb @@ -35,7 +35,7 @@ describe 'gitlab.yml.erb configuration' do object_src: "'none'" script_src: "'self' 'unsafe-inline' 'unsafe-eval'" style_src: "'self'" - )).merge(default_values) + )).deep_merge!(default_values) end let(:no_directives) do @@ -44,7 +44,7 @@ describe 'gitlab.yml.erb configuration' do appConfig: contentSecurityPolicy: enabled: true - )).merge(default_values) + )).deep_merge!(default_values) end it 'populates the gitlab.yml.erb' do @@ -73,7 +73,7 @@ describe 'gitlab.yml.erb configuration' do appConfig: extra: matomoDisableCookies: #{value} - )).merge(default_values) + )).deep_merge!(default_values) end context 'when true' do @@ -127,7 +127,7 @@ describe 'gitlab.yml.erb configuration' do appConfig: extra: oneTrustId: #{value} - )).merge(default_values) + )).deep_merge!(default_values) end context 'when configured' do @@ -166,7 +166,7 @@ describe 'gitlab.yml.erb configuration' do appConfig: extra: bizible: #{value} - )).merge(default_values) + )).deep_merge!(default_values) end context 'when true' do @@ -219,7 +219,7 @@ describe 'gitlab.yml.erb configuration' do global: appConfig: cdnHost: #{value} - )).merge(default_values) + )).deep_merge!(default_values) end context 'when configured' do @@ -273,7 +273,7 @@ describe 'gitlab.yml.erb configuration' do context 'sidekiq.routingRules on web' do let(:required_values) do - value.merge(default_values) + value.deep_merge!(default_values) end context 'when empty array' do @@ -343,7 +343,7 @@ describe 'gitlab.yml.erb configuration' do context 'sidekiq.routingRules on Sidekiq' do let(:required_values) do - value.merge(default_values) + value.deep_merge!(default_values) end context 'when empty array' do diff --git a/chart/spec/configuration/image_tag_spec.rb b/chart/spec/configuration/image_tag_spec.rb index d2932e5b2e98ea334a91ff9c5d23ee2f7b91ab20..adc624e2ed482a8362479dc244308aaabb45c629 100644 --- a/chart/spec/configuration/image_tag_spec.rb +++ b/chart/spec/configuration/image_tag_spec.rb @@ -67,7 +67,7 @@ end describe 'image tag configuration' do context 'no global.gitlabVersion configured' do begin - values = HelmTemplate.with_defaults %( + values = HelmTemplate.certmanager_issuer.deep_merge!(%( global: pages: enabled: true @@ -78,7 +78,7 @@ describe 'image tag configuration' do ingress: # To ensure the cfsl-self-sign image is used configureCertmanager: false - ) + )) template = HelmTemplate.new values rescue StandardError # Skip these examples when helm or chart dependencies are missing diff --git a/chart/spec/configuration/kas_spec.rb b/chart/spec/configuration/kas_spec.rb index ffec4c9b3fddba8335f2f84a060dd1eb320819e7..db87730001681092f9ac2dbece8aabe92c2e704d 100644 --- a/chart/spec/configuration/kas_spec.rb +++ b/chart/spec/configuration/kas_spec.rb @@ -83,7 +83,7 @@ describe 'kas configuration' do serviceLabels: service: true global: service - )).merge(default_values) + )).deep_merge!(default_values) end it 'Populates the additional labels in the expected manner' do @@ -231,6 +231,39 @@ describe 'kas configuration' do expect(config_yaml_data['private_api']).to eq(expected_config) end + + context 'when AutoFlow enabled' do + let(:kas_values) do + default_kas_values.deep_merge!(YAML.safe_load(%( + global: + kas: + tls: + enabled: true + gitlab: + kas: + autoflow: + enabled: true + temporal: + namespace: some-namespace.id42 + workerMtls: + secretName: worker-mtls + workflowDataEncryption: + codecServer: + authorizedUserEmails: [] + ))) + end + + it 'configures the AutoFlow codec server with certificate files' do + expected_config = { + "network" => "tcp", + "address" => :"8142", + "certificate_file" => "/etc/kas/tls.crt", + "key_file" => "/etc/kas/tls.key" + } + + expect(config_yaml_data['autoflow']['temporal']['workflow_data_encryption']['codec_server']['listen']).to eq(expected_config) + end + end end end @@ -238,6 +271,50 @@ describe 'kas configuration' do expect(config_yaml_data['agent']['kubernetes_api']['websocket_token_secret_file']).to eq('/etc/kas/.gitlab_kas_websocket_token_secret') end + context 'when AutoFlow is enabled' do + let(:kas_values) do + default_kas_values.deep_merge!(YAML.safe_load(%( + gitlab: + kas: + autoflow: + enabled: true + temporal: + namespace: some-namespace.id42 + workerMtls: + secretName: worker-mtls + workflowDataEncryption: + codecServer: + authorizedUserEmails: ["maintainer@gitlab.example.com"] + ))) + end + + it 'configures the autoflow config node' do + expected_config = { + "temporal" => { + "host_port" => "some-namespace.id42.tmprl.cloud:7233", + "namespace" => "some-namespace.id42", + "enable_tls" => true, + "certificate_file" => "/etc/kas/temporal-worker-client-mtls.crt", + "key_file" => "/etc/kas/temporal-worker-client-mtls.key", + "workflow_data_encryption" => { + "secret_key_file" => "/etc/kas/.gitlab_kas_autoflow_temporal_workflow_data_encryption_secret", + "codec_server" => { + "listen" => { + "address" => :"8142", + "network" => "tcp" + }, + "temporal_oidc_url" => "https://login.tmprl.cloud/.well-known/openid-configuration", + "temporal_web_ui_url" => "https://cloud.temporal.io", + "authorized_user_emails" => ["maintainer@gitlab.example.com"] + } + } + } + } + + expect(config_yaml_data['autoflow']).to eq(expected_config) + end + end + context 'when customConfig is given' do let(:custom_config) do YAML.safe_load(%( @@ -724,6 +801,48 @@ describe 'kas configuration' do end end end + + context 'when autoflow.enabled is given' do + let(:autoflow_enabled) { true } + let(:kas_values) do + default_kas_values.deep_merge!( + 'gitlab' => { + 'kas' => { + 'autoflow' => { + 'enabled' => autoflow_enabled, + 'temporal' => { + 'namespace' => 'some-namespace.id42', + 'workerMtls' => { + 'secretName' => 'worker-mtls' + }, + 'workflowDataEncryption' => { + 'codecServer' => { + 'authorizedUserEmails' => [] + } + } + } + } + } + } + ) + end + + context 'when autoflow.enabled is true' do + let(:autoflow_enabled) { true } + + it 'exports autoflow codec server port' do + expect(service['spec']['ports']).to include(include("name" => "tcp-kas-autoflow-codec-server-api")) + end + end + + context 'when autoflow.enabled is false' do + let(:autoflow_enabled) { false } + + it 'exports no autoflow codec server port' do + expect(service['spec']['ports']).not_to include(include("name" => "tcp-kas-autoflow-codec-server-api")) + end + end + end end describe 'templates/deployment.yaml' do @@ -833,6 +952,75 @@ describe 'kas configuration' do ) end + context 'when AutoFlow is enabled' do + let(:kas_values) do + default_kas_values.deep_merge!(YAML.safe_load(%( + gitlab: + kas: + autoflow: + enabled: true + temporal: + namespace: some-namespace.id42 + workerMtls: + secretName: worker-mtls + workflowDataEncryption: + codecServer: + authorizedUserEmails: ["maintainer@gitlab.example.com"] + ))) + end + + it 'creates AutoFlow Temporal Workflow Data Encryption secret volume' do + init_etc_kas_volume = deployment['spec']['template']['spec']['volumes'].find do |volume| + volume['name'] == 'init-etc-kas' + end + + expect(init_etc_kas_volume['projected']['sources']).to include( + { + "secret" => { + "name" => "test-kas-autoflow-temporal-workflow-data-encryption-secret", + "items" => [ + { + "key" => "kas_autoflow_temporal_workflow_data_encryption", + "path" => ".gitlab_kas_autoflow_temporal_workflow_data_encryption_secret" + } + ] + } + } + ) + end + end + + context 'when AutoFlow is disabled' do + let(:kas_values) do + default_kas_values.deep_merge!(YAML.safe_load(%( + gitlab: + kas: + autoflow: + enabled: false + ))) + end + + it 'des not creates AutoFlow Temporal Workflow Data Encryption secret volume' do + init_etc_kas_volume = deployment['spec']['template']['spec']['volumes'].find do |volume| + volume['name'] == 'init-etc-kas' + end + + expect(init_etc_kas_volume['projected']['sources']).not_to include( + { + "secret" => { + "name" => "test-kas-autoflow-temporal-workflow-data-encryption-secret", + "items" => [ + { + "key" => "kas_autoflow_temporal_workflow_data_encryption", + "path" => ".gitlab_kas_autoflow_temporal_workflow_data_encryption_secret" + } + ] + } + } + ) + end + end + describe 'tls' do context 'when global.kas.tls is enabled' do let(:kas_values) do diff --git a/chart/spec/configuration/labels_spec.rb b/chart/spec/configuration/labels_spec.rb index 1900337e85fa45d48314416a85e3e393fe1fbabd..c6b87f43a841abcd73a7078bee739ff68d3b379a 100644 --- a/chart/spec/configuration/labels_spec.rb +++ b/chart/spec/configuration/labels_spec.rb @@ -15,6 +15,7 @@ describe 'Labels configuration' do let(:ignored_charts) do [ + 'Job/test-certmanager-startupapicheck', 'Deployment/test-certmanager-cainjector', 'Deployment/test-certmanager-webhook', 'Deployment/test-certmanager', @@ -49,7 +50,18 @@ describe 'Labels configuration' do resources_by_kind = t.resources_by_kind('Deployment').reject{ |key, _| ignored_charts.include? key } resources_by_kind.each do |key, _| - expect(t.dig(key, 'spec', 'template', 'metadata', 'labels')).to include(default_values['global']['pod']['labels']) + expect(t.dig(key, 'spec', 'template', 'metadata', 'labels')).to include(default_values['global']['pod']['labels']), key + end + end + + it 'Populates labels for all Job templates' do + t = HelmTemplate.new(default_values) + expect(t.exit_code).to eq(0) + + resources_by_kind = t.resources_by_kind('Job').reject{ |key, _| ignored_charts.include? key } + + resources_by_kind.each do |key, _| + expect(t.dig(key, 'spec', 'template', 'metadata', 'labels')).to include(default_values['global']['pod']['labels']), key end end diff --git a/chart/spec/configuration/redis_spec.rb b/chart/spec/configuration/redis_spec.rb index 5f5720c1eacbf7ab30883d655c80396e342a003d..00ef31cc92b835416b76731b6069dfa301af0531 100644 --- a/chart/spec/configuration/redis_spec.rb +++ b/chart/spec/configuration/redis_spec.rb @@ -35,7 +35,7 @@ describe 'Redis configuration' do connectTimeout: 3 readTimeout: 4 writeTimeout: 5 - )).merge(default_values) + )).deep_merge!(default_values) end it 'renders {connect,read,write}_timeout values' do @@ -56,7 +56,7 @@ describe 'Redis configuration' do database: 4 redis: install: false - )).deep_merge(default_values) + )).deep_merge!(default_values) end it 'configures Redis' do @@ -74,7 +74,7 @@ describe 'Redis configuration' do redis: auth: enabled: true - )).merge(default_values) + )).deep_merge!(default_values) end context 'when true' do @@ -92,7 +92,7 @@ describe 'Redis configuration' do redis: auth: enabled: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'do not populate password' do @@ -110,7 +110,7 @@ describe 'Redis configuration' do redis: sentinelAuth: enabled: true - )).merge(default_values) + )).deep_merge!(default_values) end context 'when true' do @@ -128,7 +128,7 @@ describe 'Redis configuration' do redis: senntinelAuth: enabled: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'do not populate password' do @@ -157,7 +157,7 @@ describe 'Redis configuration' do redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'skips render only for invalid secret' do @@ -209,7 +209,7 @@ describe 'Redis configuration' do key: password redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'renders both secret without namespace clash' do @@ -253,7 +253,7 @@ describe 'Redis configuration' do key: password redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'renders custom enabled secrets as a volume alongside other secrets' do @@ -301,7 +301,7 @@ describe 'Redis configuration' do redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'replaces password with ERB string where required' do @@ -352,7 +352,7 @@ describe 'Redis configuration' do foo: bar redis: install: true - )).merge(default_values) + )).deep_merge!(default_values) end it 'fails to template (checkConfig)' do @@ -378,7 +378,7 @@ describe 'Redis configuration' do password: <%= File.read('/path/to/password').chomp %> redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'renders arbitrary values' do @@ -405,7 +405,7 @@ describe 'Redis configuration' do host: cache.redis redis: install: true - )).merge(default_values) + )).deep_merge!(default_values) end it 'fails to template (checkConfig)' do @@ -426,7 +426,7 @@ describe 'Redis configuration' do host: cache.redis redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'sub-queue inherits all of password from global.redis' do @@ -457,7 +457,7 @@ describe 'Redis configuration' do secret: rspec-cache redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'sub-queue inherits from global' do @@ -491,7 +491,7 @@ describe 'Redis configuration' do secret: rspec-cache redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'sub-queue uses password, global does not' do @@ -525,7 +525,7 @@ describe 'Redis configuration' do secret: rspec-cache redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'sub-queue does not use password, global does' do @@ -557,7 +557,7 @@ describe 'Redis configuration' do host: cache.redis redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'global uses user' do @@ -579,7 +579,7 @@ describe 'Redis configuration' do port: 9999 redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'sub-queue uses port, global host' do @@ -612,7 +612,7 @@ describe 'Redis configuration' do port: 26379 redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'separate sentinels are populated' do @@ -642,7 +642,7 @@ describe 'Redis configuration' do port: 26379 redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'sub-queue sentinels are populated' do @@ -674,7 +674,7 @@ describe 'Redis configuration' do - host: s2.cluster-cache.redis redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'Only nested redis cluster is populated' do @@ -704,7 +704,7 @@ describe 'Redis configuration' do - host: s2.cluster-cache.redis redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'Only nested redis cluster is populated' do @@ -739,7 +739,7 @@ describe 'Redis configuration' do - host: s2.cluster-cache.redis redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end let(:redis_cluster_yml_erb) { template.dig('ConfigMap/test-webservice', 'data', 'redis.cluster_cache.yml.erb') } @@ -768,7 +768,7 @@ describe 'Redis configuration' do - host: s2.cluster-cache.redis redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'No values are inherited by nested redis cluster' do diff --git a/chart/spec/configuration/topology_spread_constraints_spec.rb b/chart/spec/configuration/topology_spread_constraints_spec.rb new file mode 100644 index 0000000000000000000000000000000000000000..3c7f62abd99be3c93741ad4da6eed9d84ca6ffe4 --- /dev/null +++ b/chart/spec/configuration/topology_spread_constraints_spec.rb @@ -0,0 +1,176 @@ +require 'spec_helper' +require 'helm_template_helper' +require 'yaml' +require 'hash_deep_merge' + +IGNORED_DEPLOYMENTS = [ + 'Deployment/test-certmanager', + 'Deployment/test-certmanager-cainjector', + 'Deployment/test-certmanager-webhook', + 'Deployment/test-gitlab-exporter', + 'Deployment/test-gitlab-runner', + 'Deployment/test-nginx-ingress-controller', + 'Deployment/test-prometheus-server' +].freeze + +SUPPORTED_STATEFULSETS = [ + 'Statefulset/test-praefect' +].freeze + +describe 'local topologySpreadConstraints configuration' do + let(:supported_statefulsets) do + SUPPORTED_STATEFULSETS + end + + let(:ignored_deployments) do + IGNORED_DEPLOYMENTS + end + + let(:default_values) do + HelmTemplate.defaults + end + + let(:values_with_override) do + HelmTemplate.with_defaults(%( + gitlab: + geo-logcursor: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + gitlab-pages: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + gitlab-shell: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + kas: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + mailroom: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + praefect: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + sidekiq: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + spamcheck: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + toolbox: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + webservice: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + minio: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + registry: + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: test + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + )) + end + + context 'when left with default values' do + it 'does not specify topologySpreadConstraints' do + t = HelmTemplate.new(default_values) + expect(t.exit_code).to eq(0) + + deployments = t.resources_by_kind('Deployment').reject { |key, _| ignored_deployments.include? key } + deployments.each do |key, _| + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')).not_to be_present + end + + statefulsets = t.resources_by_kind('Statefulset').select { |key, _| supported_statefulsets.include? key } + statefulsets.each do |key, _| + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')).not_to be_present + end + end + end + + context 'when setting a local topologySpreadConstraints override' do + it 'applies to a single Deployment' do + t = HelmTemplate.new(values_with_override) + expect(t.exit_code).to eq(0), "Unexpected error code #{t.exit_code} -- #{t.stderr}" + + deployments = t.resources_by_kind('Deployment').reject { |key, _| ignored_deployments.include? key } + deployments.each do |key, _| + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')).to be_present + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')[0]['labelSelector']['matchLabels']['app']).to eq('test') + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')[0]['maxSkew']).to eq(1) + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')[0]['topologyKey']).to eq('topology.kubernetes.io/zone') + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')[0]['whenUnsatisfiable']).to eq('DoNotSchedule') + end + + statefulsets = t.resources_by_kind('Statefulset').select { |key, _| supported_statefulsets.include? key } + statefulsets.each do |key, _| + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')).to be_present + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')[0]['labelSelector']['matchLabels']['app']).to eq('test') + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')[0]['maxSkew']).to eq(1) + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')[0]['topologyKey']).to eq('topology.kubernetes.io/zone') + expect(t.dig(key, 'spec', 'template', 'spec', 'topologySpreadConstraints')[0]['whenUnsatisfiable']).to eq('DoNotSchedule') + end + end + end +end diff --git a/chart/spec/configuration/webservice_spec.rb b/chart/spec/configuration/webservice_spec.rb index 5f19662516586408b9de33a1facff9f3ebb5def1..5b17a4f80d22f88fad0aaa1cf858c0d192055ea0 100644 --- a/chart/spec/configuration/webservice_spec.rb +++ b/chart/spec/configuration/webservice_spec.rb @@ -77,4 +77,55 @@ describe 'webservice configuration' do expect(extra_ingress['spec']['tls'][0]['secretName']).to eql('another-local-tls') end end + + context 'setting nginx service-upstream annotation' do + let(:values) do + YAML.safe_load(%( + gitlab: + webservice: + ingress: + serviceUpstream: true + )).deep_merge(super()) + end + + it 'nginx service-upstream annotation is added' do + expect(default_ingress["metadata"]["annotations"]).to include( + "nginx.ingress.kubernetes.io/service-upstream" => "true" + ) + end + end + + context 'providing own nginx annotation' do + let(:values) do + YAML.safe_load(%( + gitlab: + webservice: + ingress: + serviceUpstream: true + annotations: + nginx.ingress.kubernetes.io/service-upstream: "false" + )).deep_merge(super()) + end + + it 'provided annotation takes precedence over local setting' do + expect(default_ingress["metadata"]["annotations"]).to include( + "nginx.ingress.kubernetes.io/service-upstream" => "false" + ) + end + end + + context 'using traefik as ingress provider' do + let(:values) do + YAML.safe_load(%( + global: + ingress: + provider: traefik + )).deep_merge(super()) + end + + it 'does not contain any nginx.ingress annotations' do + nginx_annotations = default_ingress["metadata"]["annotations"].keys.select { |key| key.include?("nginx.ingress") } + expect(nginx_annotations).to be_empty + end + end end diff --git a/chart/spec/configuration/workhorse_spec.rb b/chart/spec/configuration/workhorse_spec.rb index 7240bf9b34f82d5e58d7141b13af360cdebcc51c..5adb2f9d09e59e7b63895a1264b8241fd6ec46b1 100644 --- a/chart/spec/configuration/workhorse_spec.rb +++ b/chart/spec/configuration/workhorse_spec.rb @@ -118,10 +118,10 @@ describe 'Workhorse configuration' do end context 'with AzureRM configured' do - let(:s3_config) { File.read('examples/objectstorage/rails.azurerm.yaml') } + let(:azure_config) { File.read('examples/objectstorage/rails.azurerm.yaml') } it 'renders a TOML configuration file' do - toml = render_toml(raw_toml, s3_config) + toml = render_toml(raw_toml, azure_config) expect(toml.keys).to match_array(%w[shutdown_timeout listeners object_storage image_resizer redis]) @@ -131,6 +131,27 @@ describe 'Workhorse configuration' do expect(object_storage['azurerm']['azure_storage_account_name']).to eq('YOUR_AZURE_STORAGE_ACCOUNT_NAME') expect(object_storage['azurerm']['azure_storage_access_key']).to eq('YOUR_AZURE_STORAGE_ACCOUNT_KEY') end + + context 'with a blank access key' do + let(:azure_config) do + <<CFG +provider: AzureRM +azure_storage_account_name: YOUR_AZURE_STORAGE_ACCOUNT_NAME +CFG + end + + it 'renders a TOML configuration file' do + toml = render_toml(raw_toml, azure_config) + + expect(toml.keys).to match_array(%w[shutdown_timeout listeners object_storage image_resizer redis]) + + object_storage = toml['object_storage'] + expect(object_storage.keys).to match_array(%w[provider azurerm]) + expect(object_storage['azurerm'].keys).to match_array(%w[azure_storage_account_name azure_storage_access_key]) + expect(object_storage['azurerm']['azure_storage_account_name']).to eq('YOUR_AZURE_STORAGE_ACCOUNT_NAME') + expect(object_storage['azurerm']['azure_storage_access_key']).to be_empty + end + end end context 'with GCS configured' do @@ -166,7 +187,7 @@ describe 'Workhorse configuration' do secret: global-secret redis: install: false - )).deep_merge(default_values) + )).deep_merge!(default_values) end it 'renders the global redis config' do @@ -198,7 +219,7 @@ describe 'Workhorse configuration' do secret: global-secret redis: install: false - )).deep_merge(default_values) + )).deep_merge!(default_values) end it 'renders the global redis config' do @@ -234,7 +255,7 @@ describe 'Workhorse configuration' do secret: workhorse redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'overrides global redis config' do @@ -281,7 +302,7 @@ describe 'Workhorse configuration' do user: redis-user redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) it "adds the username to the URL" do toml = render_toml(raw_toml) @@ -318,7 +339,7 @@ describe 'Workhorse configuration' do user: workhorse-redis-user redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it "overrides global redis config" do @@ -358,7 +379,7 @@ describe 'Workhorse configuration' do secret: workhorse redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end let(:webservice_config) { template.dig('ConfigMap/test-webservice', 'data') } @@ -375,13 +396,28 @@ describe 'Workhorse configuration' do redis_config = toml['redis'] expect(redis_config.keys).to match_array(%w[Password SentinelMaster Sentinel DB]) expect(redis_config['SentinelMaster']).to eq('workhorse.redis') - expect(redis_config['Sentinel']).to match_array(%w[tcp://s1.workhorse.redis:26379 tcp://s2.workhorse.redis:26379]) + # Workhorse sentinels don't use global rediss scheme + expect(redis_config['Sentinel']).to match_array(%w[redis://s1.workhorse.redis:26379 redis://s2.workhorse.redis:26379]) expect(redis_config['Password']).to eq(workhorse_redis_password) expect(redis_config['DB']).to eq(9) expect(template.dig("ConfigMap/test-workhorse-default", "data", 'workhorse-config.toml.tpl')).to include('redis/workhorse-password') expect(template.dig('ConfigMap/test-workhorse-default', 'data', 'configure')).to include('init-config/redis/workhorse-password') end + context 'with workhorse rediss scheme' do + before do + values["global"]["redis"]["workhorse"]["scheme"] = 'rediss' + end + + it 'uses the rediss scheme' do + expect(template.exit_code).to eq(0), "Unexpected error code #{template.exit_code} -- #{template.stderr}" + + toml = render_toml(raw_toml) + redis_config = toml['redis'] + expect(redis_config['Sentinel']).to match_array(%w[rediss://s1.workhorse.redis:26379 rediss://s2.workhorse.redis:26379]) + end + end + context 'when workhorse redis does not have password' do before do values["global"]["redis"]["workhorse"]["password"]["enabled"] = false @@ -395,7 +431,7 @@ describe 'Workhorse configuration' do redis_config = toml['redis'] expect(redis_config.keys).to match_array(%w[SentinelMaster Sentinel DB]) expect(redis_config['SentinelMaster']).to eq('workhorse.redis') - expect(redis_config['Sentinel']).to match_array(%w[tcp://s1.workhorse.redis:26379 tcp://s2.workhorse.redis:26379]) + expect(redis_config['Sentinel']).to match_array(%w[redis://s1.workhorse.redis:26379 redis://s2.workhorse.redis:26379]) end end @@ -428,7 +464,7 @@ describe 'Workhorse configuration' do key: password redis: install: false - )).merge(default_values) + )).deep_merge!(default_values) end it 'uses global redis config' do @@ -439,7 +475,7 @@ describe 'Workhorse configuration' do redis_config = toml['redis'] expect(redis_config.keys).to match_array(%w[Password SentinelMaster Sentinel SentinelPassword DB]) expect(redis_config['SentinelMaster']).to eq('workhorse.redis') - expect(redis_config['Sentinel']).to match_array(%w[tcp://s1.workhorse.redis:26379 tcp://s2.workhorse.redis:26379]) + expect(redis_config['Sentinel']).to match_array(%w[redis://s1.workhorse.redis:26379 redis://s2.workhorse.redis:26379]) expect(redis_config['Password']).to eq(workhorse_redis_password) expect(redis_config['SentinelPassword']).to eq(global_redis_sentinel_password) expect(redis_config['DB']).to eq(0) diff --git a/chart/spec/helm_template_helper.rb b/chart/spec/helm_template_helper.rb index 474cfd6b34f638d38cb918b09c4c6f4be7865042..8df24480167f8a54bc5b078269696d386ee438e2 100644 --- a/chart/spec/helm_template_helper.rb +++ b/chart/spec/helm_template_helper.rb @@ -43,14 +43,17 @@ class HelmTemplate { "certmanager-issuer" => { "email" => "test@example.com" } } end + # The final defaults stubs the stable version so that the spec values are dependent on which branch + # the tests are running on, since on stable versions will have a semVer value, while the default branch + # will have `master`. def self.defaults - HelmTemplate.certmanager_issuer + HelmTemplate.certmanager_issuer.deep_merge!({ 'global' => { 'gitlabVersion' => "v42.0.0" } }) end def self.with_defaults(yaml) yaml ||= {} hash = yaml.is_a?(Hash) ? yaml : YAML.safe_load(yaml) - hash.deep_merge!(HelmTemplate.defaults) + HelmTemplate.defaults.deep_merge!(hash) end attr_reader :mapped diff --git a/chart/spec/integration/check_config/duo_auth_spec.rb b/chart/spec/integration/check_config/duo_auth_spec.rb index 699fda2e4457eae0acf519724d04bec4fe764a3e..8a59ac1e70f63666847816206cfe09de3a101f97 100644 --- a/chart/spec/integration/check_config/duo_auth_spec.rb +++ b/chart/spec/integration/check_config/duo_auth_spec.rb @@ -16,7 +16,7 @@ describe 'checkConfig duo' do secretKey: secret: SecretName key: KeyName - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -25,7 +25,7 @@ describe 'checkConfig duo' do appConfig: duoAuth: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Enabling Duo Auth requires hostname to be present' } @@ -47,7 +47,7 @@ describe 'checkConfig duo' do secretKey: secret: SecretName key: KeyName - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -57,7 +57,7 @@ describe 'checkConfig duo' do duoAuth: enabled: true hostname: test.api.hostname - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Enabling Duo Auth requires integrationKey to be present' } @@ -79,7 +79,7 @@ describe 'checkConfig duo' do secretKey: secret: SecretName key: KeyName - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -90,7 +90,7 @@ describe 'checkConfig duo' do enabled: true hostname: test.api.hostname integrationKey: dummy_integration_key - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Enabling Duo Auth requires secretKey.secret to be present' } diff --git a/chart/spec/integration/check_config/geo_spec.rb b/chart/spec/integration/check_config/geo_spec.rb index 2f995ba981b5b19bf4e63c707b94952e18d12f29..b03f9b194777cd54c3d2eaf56055736f073adab8 100644 --- a/chart/spec/integration/check_config/geo_spec.rb +++ b/chart/spec/integration/check_config/geo_spec.rb @@ -14,7 +14,7 @@ describe 'checkConfig geo' do host: foo password: secret: bar - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -22,7 +22,7 @@ describe 'checkConfig geo' do global: geo: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Geo was configured but no database was provided' } @@ -42,7 +42,7 @@ describe 'checkConfig geo' do host: foo password: secret: bar - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -55,7 +55,7 @@ describe 'checkConfig geo' do host: foo password: secret: bar - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Geo was configured with `role: secondary`, but no database was provided' } @@ -80,7 +80,7 @@ describe 'checkConfig geo' do }, 'psql' => { 'host' => 'foo', 'password' => { 'secret' => 'bar' } } } - }.merge(default_required_values) + }.deep_merge!(default_required_values) end let(:error_values) do @@ -97,7 +97,7 @@ describe 'checkConfig geo' do }, 'psql' => { 'host' => 'foo', 'password' => { 'secret' => 'bar' } } } - }.merge(default_required_values) + }.deep_merge!(default_required_values) end let(:error_output) { 'Registry replication is enabled for GitLab Geo, but no primary API URL is specified.' } diff --git a/chart/spec/integration/check_config/gitaly_spec.rb b/chart/spec/integration/check_config/gitaly_spec.rb index 0a3992e818fa9b31bb82bcf37fa3b8a1cdf3b7c0..7ea10c74bd8ef6d6fbfc49f2637274e588e0551d 100644 --- a/chart/spec/integration/check_config/gitaly_spec.rb +++ b/chart/spec/integration/check_config/gitaly_spec.rb @@ -23,7 +23,7 @@ describe 'checkConfig gitaly' do enabled: true tls: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -43,7 +43,7 @@ describe 'checkConfig gitaly' do enabled: true tls: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'global.praefect.virtualStorages[1].tlsSecretName not specified (\'vs2\')' } @@ -62,7 +62,7 @@ describe 'checkConfig gitaly' do external: - name: default hostname: bar - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -71,7 +71,7 @@ describe 'checkConfig gitaly' do gitaly: enabled: false external: [] - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'external Gitaly repos needs to be specified if global.gitaly.enabled is not set' } @@ -92,7 +92,7 @@ describe 'checkConfig gitaly' do external: - name: foo hostname: bar - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -106,7 +106,7 @@ describe 'checkConfig gitaly' do external: - name: foo hostname: bar - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Each storage name must be unique.' } @@ -130,7 +130,7 @@ describe 'checkConfig gitaly' do replaceInternalGitaly: false virtualStorages: - name: defaultPraefect - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -146,7 +146,7 @@ describe 'checkConfig gitaly' do replaceInternalGitaly: false virtualStorages: - name: foo - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Each storage name must be unique.' } @@ -167,7 +167,7 @@ describe 'checkConfig gitaly' do external: - name: external1 hostname: foo - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -180,7 +180,7 @@ describe 'checkConfig gitaly' do external: - name: bar hostname: baz - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'There must be one (and only one) storage named \'default\'.' } @@ -206,7 +206,7 @@ describe 'checkConfig gitaly' do replaceInternalGitaly: false virtualStorages: - name: praefect1 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -224,7 +224,7 @@ describe 'checkConfig gitaly' do replaceInternalGitaly: false virtualStorages: - name: praefect1 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'There must be one (and only one) storage named \'default\'.' } @@ -244,7 +244,7 @@ describe 'checkConfig gitaly' do - name: default gitalyReplicas: 3 defaultReplicationFactor: 2 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -256,7 +256,7 @@ describe 'checkConfig gitaly' do - name: default gitalyReplicas: 2 defaultReplicationFactor: 3 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { '\'defaultReplicationFactor\' is not correct.' } diff --git a/chart/spec/integration/check_config/gitlab_shell_spec.rb b/chart/spec/integration/check_config/gitlab_shell_spec.rb index 57255d75d88a83cb5063a652ab85522d68c9b42e..90d0ff6bf5f03c9bb80ce58653b82851fab6f27e 100644 --- a/chart/spec/integration/check_config/gitlab_shell_spec.rb +++ b/chart/spec/integration/check_config/gitlab_shell_spec.rb @@ -12,7 +12,7 @@ describe 'checkConfig gitlab-shell' do config: proxyProtocol: true proxyPolicy: use - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -22,7 +22,7 @@ describe 'checkConfig gitlab-shell' do config: proxyProtocol: true proxyPolicy: reject - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Either disable proxyProtocol or set proxyPolicy to "use", "require", or "ignore".' } @@ -40,7 +40,7 @@ describe 'checkConfig gitlab-shell' do metrics: enabled: true sshDaemon: gitlab-sshd - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -50,7 +50,7 @@ describe 'checkConfig gitlab-shell' do metrics: enabled: true sshDaemon: openssh - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Either disable metrics or set sshDaemon to "gitlab-sshd".' } diff --git a/chart/spec/integration/check_config/mailroom_spec.rb b/chart/spec/integration/check_config/mailroom_spec.rb index c162b6d879a4c3eff0d2cc41b1019247a28b72b2..ba012d7a1e7d654513ed38c7e4589333a097eca0 100644 --- a/chart/spec/integration/check_config/mailroom_spec.rb +++ b/chart/spec/integration/check_config/mailroom_spec.rb @@ -16,7 +16,7 @@ describe 'checkConfig mailroom' do clientId: MY-CLIENT-ID clientSecret: secret: secret - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -28,7 +28,7 @@ describe 'checkConfig mailroom' do inboxMethod: microsoft_graph clientSecret: secret: secret - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'be sure to specify the tenant ID' } @@ -57,7 +57,7 @@ describe 'checkConfig mailroom' do clientId: MY-CLIENT-ID clientSecret: secret: secret - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -76,7 +76,7 @@ describe 'checkConfig mailroom' do inboxMethod: microsoft_graph clientSecret: secret: secret - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'be sure to specify the tenant ID' } diff --git a/chart/spec/integration/check_config/nginx_spec.rb b/chart/spec/integration/check_config/nginx_spec.rb index f02007f67a92db1d272dd78a6a52feaf5ad1afc4..c8f6a32e661b66e2820991144a3542617b840ac8 100644 --- a/chart/spec/integration/check_config/nginx_spec.rb +++ b/chart/spec/integration/check_config/nginx_spec.rb @@ -9,7 +9,7 @@ describe 'checkConfig nginx' do nginx-ingress: rbac: scope: false - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -17,7 +17,7 @@ describe 'checkConfig nginx' do nginx-ingress: rbac: scope: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Namespaced IngressClasses do not exist' } diff --git a/chart/spec/integration/check_config/omniauth_spec.rb b/chart/spec/integration/check_config/omniauth_spec.rb index 4fc85a332d1c2d0ba2f27f6fe3c1a3dabe11e02f..c198413b2607b5b3888abcb742252bbb13ef676e 100644 --- a/chart/spec/integration/check_config/omniauth_spec.rb +++ b/chart/spec/integration/check_config/omniauth_spec.rb @@ -17,7 +17,7 @@ describe 'checkConfig omniauth' do - name: kerberos label: Kerberos icon: "https://example.org/kerberos.png" - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -29,7 +29,7 @@ describe 'checkConfig omniauth' do - name: oauth2_generic app_id: id app_secret: secret - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { "each provider should only contain either:" } diff --git a/chart/spec/integration/check_config/postgresql_spec.rb b/chart/spec/integration/check_config/postgresql_spec.rb index 3eaf672545cb610210470617da636476deac2e19..7d7253c7db0b8de13b5fc8f58a4c53ec421a1673 100644 --- a/chart/spec/integration/check_config/postgresql_spec.rb +++ b/chart/spec/integration/check_config/postgresql_spec.rb @@ -16,7 +16,7 @@ describe 'checkConfig postgresql' do hosts: [a, b, c] postgresql: install: false - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -30,7 +30,7 @@ describe 'checkConfig postgresql' do hosts: [a, b, c] postgresql: install: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'PostgreSQL is set to install, but database load balancing is also enabled' } @@ -51,7 +51,7 @@ describe 'checkConfig postgresql' do hosts: [a, b, c] postgresql: install: false - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -65,7 +65,7 @@ describe 'checkConfig postgresql' do invalid: item postgresql: install: false - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'You must specify `load_balancing.hosts` or `load_balancing.discover`' } @@ -87,7 +87,7 @@ describe 'checkConfig postgresql' do hosts: [a, b, c] postgresql: install: false - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -101,7 +101,7 @@ describe 'checkConfig postgresql' do hosts: a postgresql: install: false - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Database load balancing using `hosts` is configured, but does not appear to be a list' } @@ -124,7 +124,7 @@ describe 'checkConfig postgresql' do record: secondary postgresql: install: false - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -138,7 +138,7 @@ describe 'checkConfig postgresql' do discover: true postgresql: install: false - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Database load balancing using `discover` is configured, but does not appear to be a map' } @@ -155,7 +155,7 @@ describe 'checkConfig postgresql' do postgresql: image: tag: 13 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -163,7 +163,7 @@ describe 'checkConfig postgresql' do postgresql: image: tag: 12 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'The minimum required version is PostgreSQL 13.' } diff --git a/chart/spec/integration/check_config/registry_spec.rb b/chart/spec/integration/check_config/registry_spec.rb index 1974a055bd920344e06a6b85686975b99bc78b6c..0940fc9ad2b9d1125acd4bb1a0a08488ef4ff15f 100644 --- a/chart/spec/integration/check_config/registry_spec.rb +++ b/chart/spec/integration/check_config/registry_spec.rb @@ -14,7 +14,7 @@ describe 'checkConfig registry' do registry: database: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -26,7 +26,7 @@ describe 'checkConfig registry' do registry: database: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'PostgreSQL 13 is the minimum required version' } @@ -47,7 +47,7 @@ describe 'checkConfig registry' do database: enabled: true sslmode: disable - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -60,7 +60,7 @@ describe 'checkConfig registry' do database: enabled: true sslmode: testing - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Invalid SSL mode' } @@ -86,7 +86,7 @@ describe 'checkConfig registry' do loadBalancing: enabled: true record: db-replica-registry.service.consul - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -103,7 +103,7 @@ describe 'checkConfig registry' do enabled: true loadBalancing: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { '`database.loadBalancing` requires `record` to be provided' } @@ -129,7 +129,7 @@ describe 'checkConfig registry' do loadBalancing: enabled: true record: db-replica-registry.service.consul - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -147,7 +147,7 @@ describe 'checkConfig registry' do loadBalancing: enabled: true record: db-replica-registry.service.consul - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Enabling database load balancing requires the metadata database to be enabled.' } @@ -173,7 +173,7 @@ describe 'checkConfig registry' do loadBalancing: enabled: true record: db-replica-registry.service.consul - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -191,7 +191,7 @@ describe 'checkConfig registry' do loadBalancing: enabled: true record: db-replica-registry.service.consul - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Enabling database load balancing requires Redis caching to be enabled.' } @@ -209,7 +209,7 @@ describe 'checkConfig registry' do sentry: enabled: true dsn: somedsn - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -218,7 +218,7 @@ describe 'checkConfig registry' do reporting: sentry: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'When enabling sentry, you must configure at least one DSN.' } @@ -237,7 +237,7 @@ describe 'checkConfig registry' do redis: cache: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -248,7 +248,7 @@ describe 'checkConfig registry' do redis: cache: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Enabling the Redis cache requires the metadata database to be enabled' } @@ -272,7 +272,7 @@ describe 'checkConfig registry' do cache: enabled: true host: 'localhost' - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -288,7 +288,7 @@ describe 'checkConfig registry' do cache: enabled: true host: '' - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Enabling the Redis cache requires the host to not be empty' } @@ -317,7 +317,7 @@ describe 'checkConfig registry' do port: 26379 - host: sentinel2.example.com port: 26379 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -338,7 +338,7 @@ describe 'checkConfig registry' do port: 26379 - host: sentinel2.example.com port: 26379 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Enabling the Redis cache with sentinels requires the registry.redis.cache.host to be set.' } @@ -366,7 +366,7 @@ describe 'checkConfig registry' do enabled: true secret: registry-redis-cache-secret key: password - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -385,7 +385,7 @@ describe 'checkConfig registry' do password: enabled: true secret: '' - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { ' Enabling the Redis cache password requires \'registry.redis.cache.password.secret\' to be set.' } @@ -413,7 +413,7 @@ describe 'checkConfig registry' do enabled: true secret: registry-redis-cache-secret key: password - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -433,7 +433,7 @@ describe 'checkConfig registry' do enabled: true secret: registry-redis-cache-secret key: '' - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { ' Enabling the Redis cache password requires \'registry.redis.cache.password.key\' to be set.' } @@ -455,7 +455,7 @@ describe 'checkConfig registry' do rateLimiting: enabled: true host: 'localhost' - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -469,7 +469,7 @@ describe 'checkConfig registry' do rateLimiting: enabled: true host: '' - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Enabling the Redis rate-limiter requires the host to not be empty' } @@ -496,7 +496,7 @@ describe 'checkConfig registry' do port: 26379 - host: sentinel2.example.com port: 26379 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -515,7 +515,7 @@ describe 'checkConfig registry' do port: 26379 - host: sentinel2.example.com port: 26379 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Enabling the Redis rate-limiter with sentinels requires the registry.redis.rateLimiting.host to be set.' } @@ -541,7 +541,7 @@ describe 'checkConfig registry' do enabled: true secret: registry-redis-cache-secret key: password - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -558,7 +558,7 @@ describe 'checkConfig registry' do password: enabled: true secret: '' - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { ' Enabling the Redis rate-limiter password requires \'registry.redis.rateLimiting.password.secret\' to be set.' } @@ -584,7 +584,7 @@ describe 'checkConfig registry' do enabled: true secret: registry-redis-cache-secret key: password - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -602,7 +602,7 @@ describe 'checkConfig registry' do enabled: true secret: registry-redis-cache-secret key: '' - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { ' Enabling the Redis rate-limiter password requires \'registry.redis.rateLimiting.password.key\' to be set.' } @@ -622,7 +622,7 @@ describe 'checkConfig registry' do registry: tls: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -630,7 +630,7 @@ describe 'checkConfig registry' do registry: tls: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Enabling the service level TLS requires \'global.hosts.registry.protocol\'' } @@ -648,7 +648,7 @@ describe 'checkConfig registry' do tls: enabled: true secretName: example-tls - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -657,7 +657,7 @@ describe 'checkConfig registry' do debug: tls: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'secret is required when not enabling TLS for the non-debug Registry endpoint.' } diff --git a/chart/spec/integration/check_config/sidekiq_spec.rb b/chart/spec/integration/check_config/sidekiq_spec.rb index d3b5e7e34dd02021e114720f8d0a419a71eaaced..2e64dd2b4d93e3ce75004e904119adfd1e2036bc 100644 --- a/chart/spec/integration/check_config/sidekiq_spec.rb +++ b/chart/spec/integration/check_config/sidekiq_spec.rb @@ -11,7 +11,7 @@ describe 'checkConfig sidekiq' do pods: - name: valid-1 queues: merge,post_receive - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -21,7 +21,7 @@ describe 'checkConfig sidekiq' do pods: - name: invalid-1 queues: [merge] - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'not a string' } @@ -40,7 +40,7 @@ describe 'checkConfig sidekiq' do deployment: terminationGracePeriodSeconds: 30 timeout: 10 - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -50,7 +50,7 @@ describe 'checkConfig sidekiq' do deployment: terminationGracePeriodSeconds: 30 timeout: 40 - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'You must set `terminationGracePeriodSeconds` (30) longer than `timeout` (40) for pod `all-in-1`.' } @@ -68,7 +68,7 @@ describe 'checkConfig sidekiq' do pods: - name: 'valid-1' timeout: 10 - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -78,7 +78,7 @@ describe 'checkConfig sidekiq' do pods: - name: 'valid-1' timeout: 50 - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'You must set `terminationGracePeriodSeconds` (30) longer than `timeout` (50) for pod `valid-1`.' } @@ -96,7 +96,7 @@ describe 'checkConfig sidekiq' do pods: - name: 'valid-1' terminationGracePeriodSeconds: 50 - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -106,7 +106,7 @@ describe 'checkConfig sidekiq' do pods: - name: 'valid-1' terminationGracePeriodSeconds: 1 - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'You must set `terminationGracePeriodSeconds` (1) longer than `timeout` (25) for pod `valid-1`.' } @@ -125,7 +125,7 @@ describe 'checkConfig sidekiq' do - name: 'valid-1' terminationGracePeriodSeconds: 50 timeout: 10 - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -136,7 +136,7 @@ describe 'checkConfig sidekiq' do - name: 'valid-1' terminationGracePeriodSeconds: 50 timeout: 60 - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'You must set `terminationGracePeriodSeconds` (50) longer than `timeout` (60) for pod `valid-1`.' } @@ -159,7 +159,7 @@ describe 'checkConfig sidekiq' do appConfig: sidekiq: routingRules: [] - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end it 'succeeds' do @@ -181,7 +181,7 @@ describe 'checkConfig sidekiq' do - ["feature_category=search", "search"] - ["feature_category=memory|resource_boundary=memory", "memory-bound"] - ["*", "default", "default"] - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end it 'succeeds' do @@ -198,7 +198,7 @@ describe 'checkConfig sidekiq' do appConfig: sidekiq: routingRules: 'hello' - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end it 'returns an error' do @@ -217,7 +217,7 @@ describe 'checkConfig sidekiq' do routingRules: - ["resource_boundary=cpu", "cpu_boundary"] - "feature_category=pages" - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end it 'returns an error' do @@ -236,7 +236,7 @@ describe 'checkConfig sidekiq' do routingRules: - ["resource_boundary=cpu", "cpu_boundary"] - [] - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end it 'returns an error' do @@ -255,7 +255,7 @@ describe 'checkConfig sidekiq' do routingRules: - ["resource_boundary=cpu", "cpu_boundary"] - ["hello"] - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end it 'returns an error' do @@ -274,7 +274,7 @@ describe 'checkConfig sidekiq' do routingRules: - ["resource_boundary=cpu", "cpu_boundary"] - ["resource_boundary=cpu", "cpu_boundary", "something", "something"] - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end it 'returns an error' do @@ -293,7 +293,7 @@ describe 'checkConfig sidekiq' do routingRules: - ["resource_boundary=cpu", "cpu_boundary"] - ["rule", 123] - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end it 'returns an error' do @@ -312,7 +312,7 @@ describe 'checkConfig sidekiq' do routingRules: - ["resource_boundary=cpu", "cpu_boundary"] - ["rule", "default", 123] - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end it 'returns an error' do @@ -331,7 +331,7 @@ describe 'checkConfig sidekiq' do routingRules: - ["resource_boundary=cpu", "cpu_boundary"] - [123, 'valid-queue'] - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end it 'returns an error' do @@ -355,7 +355,7 @@ describe 'checkConfig sidekiq' do port: 8082 health_checks: port: 8083 - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -367,7 +367,7 @@ describe 'checkConfig sidekiq' do port: 8082 health_checks: port: 8082 - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end include_examples 'config validation', @@ -385,7 +385,7 @@ describe 'checkConfig sidekiq' do port: 8082 health_checks: port: 8082 - )).deep_merge(default_required_values) + )).deep_merge!(default_required_values) end include_examples 'config validation', diff --git a/chart/spec/integration/check_config/toolbox_spec.rb b/chart/spec/integration/check_config/toolbox_spec.rb index 0217a0509c501d4c603d1cddc2d576ef14622549..1e6beb4e08f0eebbb5a9f1a2e6623d57e07ac441 100644 --- a/chart/spec/integration/check_config/toolbox_spec.rb +++ b/chart/spec/integration/check_config/toolbox_spec.rb @@ -11,7 +11,7 @@ describe 'checkConfig toolbox' do replicas: 1 persistence: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -21,7 +21,7 @@ describe 'checkConfig toolbox' do replicas: 2 persistence: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'more than 1 replica, but also with a PersistentVolumeClaim' } @@ -43,7 +43,7 @@ describe 'checkConfig toolbox' do config: secret: s3cmd-config key: config - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -56,7 +56,7 @@ describe 'checkConfig toolbox' do config: # secret: s3cmd-config key: config - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'A valid object storage config secret is needed for backups.' } @@ -77,7 +77,7 @@ describe 'checkConfig toolbox' do config: # secret: s3cmd-config key: config - )).merge(default_required_values) + )).deep_merge!(default_required_values) end include_examples 'config validation', @@ -96,7 +96,7 @@ describe 'checkConfig toolbox' do config: # secret: s3cmd-config key: config - )).merge(default_required_values) + )).deep_merge!(default_required_values) end include_examples 'config validation', diff --git a/chart/spec/integration/check_config/webservice_spec.rb b/chart/spec/integration/check_config/webservice_spec.rb index 5b269868dfa23ef5a8d743fc6186b88ef190812f..db9cf39c3edd22a5e4a65b9521e5aa472c48a3d3 100644 --- a/chart/spec/integration/check_config/webservice_spec.rb +++ b/chart/spec/integration/check_config/webservice_spec.rb @@ -11,7 +11,7 @@ describe 'checkConfig webservice' do maxRequestDurationSeconds: 50 webservice: workerTimeout: 60 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -21,7 +21,7 @@ describe 'checkConfig webservice' do maxRequestDurationSeconds: 70 webservice: workerTimeout: 60 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'global.appConfig.maxRequestDurationSeconds (70) is greater than or equal to global.webservice.workerTimeout (60)' } @@ -40,7 +40,7 @@ describe 'checkConfig webservice' do terminationGracePeriodSeconds: 50 shutdown: blackoutSeconds: 10 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -51,7 +51,7 @@ describe 'checkConfig webservice' do terminationGracePeriodSeconds: 5 shutdown: blackoutSeconds: 20 - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'You must set terminationGracePeriodSeconds (5) longer than blackoutSeconds (20)' } diff --git a/chart/spec/integration/check_config/workhorse_spec.rb b/chart/spec/integration/check_config/workhorse_spec.rb index ab0d89f3a29df9e900af8341177b75b0f03b56b2..06d13ff57886e31283b605a4e27c411f7b6cedad 100644 --- a/chart/spec/integration/check_config/workhorse_spec.rb +++ b/chart/spec/integration/check_config/workhorse_spec.rb @@ -17,7 +17,7 @@ describe 'checkConfig workhorse' do exporter: tls: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -33,7 +33,7 @@ describe 'checkConfig workhorse' do exporter: tls: enabled: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'The monitoring exporter TLS depends on the main workhorse listener using TLS.' } diff --git a/chart/spec/integration/check_config_spec.rb b/chart/spec/integration/check_config_spec.rb index ed6a03147c9221fa7864dc564bf97b520b345447..3068c65c2b9cf8bc1327682ab2a7b474ad2d8b73 100644 --- a/chart/spec/integration/check_config_spec.rb +++ b/chart/spec/integration/check_config_spec.rb @@ -21,7 +21,7 @@ describe 'checkConfig template' do YAML.safe_load(%( redis: install: true - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -32,7 +32,7 @@ describe 'checkConfig template' do redis: cache: host: foo - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'If configuring multiple Redis servers, you can not use the in-chart Redis server' } @@ -50,7 +50,7 @@ describe 'checkConfig template' do enabled: true create: false name: myaccount - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_values) do @@ -60,7 +60,7 @@ describe 'checkConfig template' do enabled: true create: true name: myaccount - )).merge(default_required_values) + )).deep_merge!(default_required_values) end let(:error_output) { 'Please set `global.serviceAccount.create=false`' } diff --git a/chart/templates/NOTES.txt b/chart/templates/NOTES.txt index 1db16bc7dc8022c80482d5ccb8fda00ea05881fd..d0465eda9f9b2043ebcf6fd7502c1691e143e975 100644 --- a/chart/templates/NOTES.txt +++ b/chart/templates/NOTES.txt @@ -2,6 +2,12 @@ {{- $WARNING := "\n=== WARNING" -}} {{- $CRITICAL := "\n=== CRITICAL" -}} +{{ $NOTICE }} +GitLab 18/GitLab chart 9.0 will release in May 2025. +Please check the upcoming deprecations and removals at: +* https://docs.gitlab.com/ee/update/deprecations.html#gitlab-180, and +* https://docs.gitlab.com/charts/releases/9_0.html. + {{- /* If any development subchart is enabled, note it is not production ready */}} {{- $enabledNonProdCharts := fromJsonArray (include "gitlab.nonProdCharts.enabledNames" .) -}} {{- if not (empty $enabledNonProdCharts) }} diff --git a/chart/templates/_checkConfig.tpl b/chart/templates/_checkConfig.tpl index a4ecd1f75a49c674de34d65c7c0299faea52ec6f..73e1dd048d68748c8e6ab7b8ce3a865e7f7523ec 100644 --- a/chart/templates/_checkConfig.tpl +++ b/chart/templates/_checkConfig.tpl @@ -105,6 +105,10 @@ Due to gotpl scoping, we can't make use of `range`, so we have to add action lin {{/* _checkConfig_omniauth.tpl*/}} {{- $messages = append $messages (include "gitlab.checkConfig.omniauth.providerFormat" .) -}} +{{/* _checkConfig_kas.tpl*/}} +{{- $messages = append $messages (include "gitlab.checkConfig.kas.autoflowTemporalNamespace" .) -}} +{{- $messages = append $messages (include "gitlab.checkConfig.kas.autoflowTemporalWorkerMtls" .) -}} + {{/* other checks */}} {{- $messages = append $messages (include "gitlab.checkConfig.multipleRedis" .) -}} {{- $messages = append $messages (include "gitlab.checkConfig.redisYmlOverride" .) -}} diff --git a/chart/templates/_checkConfig_kas.tpl b/chart/templates/_checkConfig_kas.tpl new file mode 100644 index 0000000000000000000000000000000000000000..7e38abc5adf0289ad51d859683b69d4fcacba67f --- /dev/null +++ b/chart/templates/_checkConfig_kas.tpl @@ -0,0 +1,29 @@ +{{/* +Ensures that Temporal namespace is configured when AutoFlow is enabled + +*/}} +{{- define "gitlab.checkConfig.kas.autoflowTemporalNamespace" -}} +{{- $kas := .Values.gitlab.kas }} +{{- if ($kas.autoflow).enabled -}} +{{- if not ($kas.autoflow.temporal).namespace -}} +kas: + Temporal namespace is required when AutoFlow is enabled. Set `autoflow.temporal.namespace` to your unique namespace value +{{- end -}} +{{- end -}} +{{- end -}} +{{/* END gitlab.checkConfig.kas.autoflowTemporalNamespace */}} + +{{/* +Ensures that Temporal Worker mTLS certificates are configured when AutoFlow is enabled + +*/}} +{{- define "gitlab.checkConfig.kas.autoflowTemporalWorkerMtls" -}} +{{- $kas := .Values.gitlab.kas }} +{{- if ($kas.autoflow).enabled -}} +{{- if not (($kas.autoflow.temporal).workerMtls).secretName -}} +kas: + Temporal worker mTLS secret Name is required when AutoFlow is enabled. Use `autoflow.temporal.workerMtls.secretName` to specify the name of the Kubernetes secret containing the worker mTLS key and cert +{{- end -}} +{{- end -}} +{{- end -}} +{{/* END gitlab.checkConfig.kas.autoflowTemporalWorkerMtls */}} diff --git a/chart/templates/_kas.tpl b/chart/templates/_kas.tpl index 26a4155da9caed55a22e1ed1ef50b4dd1577fd70..ad440bbc4437cec5c154ea02de1f3e410aa900df 100644 --- a/chart/templates/_kas.tpl +++ b/chart/templates/_kas.tpl @@ -59,3 +59,27 @@ Return the gitlab-kas WebSocket Token secret {{- end -}} {{- default "kas_websocket_token_secret" $key | quote -}} {{- end -}} + +{{/* +Return the gitlab-kas AutoFlow Temporal Workflow data encryption secret +*/}} + +{{- define "gitlab.kas.autoflow.temporal.workflowDataEncryption.secret" -}} +{{- $secret := "" -}} +{{- if eq .Chart.Name "kas" -}} +{{- $secret = ((.Values.autoflow.temporal).workflowDataEncryption).secret -}} +{{- else -}} +{{- $secret = ((.Values.gitlab.kas.autoflow.temporal).workflowDataEncryption).secret -}} +{{- end -}} +{{- default (printf "%s-kas-autoflow-temporal-workflow-data-encryption-secret" .Release.Name) $secret | quote -}} +{{- end -}} + +{{- define "gitlab.kas.autoflow.temporal.workflowDataEncryption.key" -}} +{{- $key := "" -}} +{{- if eq .Chart.Name "kas" -}} +{{- $key = ((.Values.autoflow.temporal).workflowDataEncryption).key -}} +{{- else -}} +{{- $key = ((.Values.gitlab.kas.autoflow.temporal).workflowDataEncryption).key -}} +{{- end -}} +{{- default "kas_autoflow_temporal_workflow_data_encryption" $key | quote -}} +{{- end -}} diff --git a/chart/templates/_runcheck.tpl b/chart/templates/_runcheck.tpl index 6d17a765110f58a7b73e774ef298900dbf7e1170..6d472461d5eaa9275ec12a0f0c6fbcbff536ac65 100644 --- a/chart/templates/_runcheck.tpl +++ b/chart/templates/_runcheck.tpl @@ -35,8 +35,8 @@ if [ -d "${secrets_dir}" ]; then fi fi fi -MIN_VERSION=17.5 -CHART_MIN_VERSION=8.5 +MIN_VERSION=17.8 +CHART_MIN_VERSION=8.8 # Remove 'v' prefix from GitLab version if present (set in Chart.yaml appVersions) GITLAB_VERSION=${GITLAB_VERSION#v} diff --git a/chart/templates/shared-secrets/_generate_secrets.sh.tpl b/chart/templates/shared-secrets/_generate_secrets.sh.tpl index 7bdeace16e6aa500fbbf63840e4bb5741f080180..0238b44da46f79cc3e9555a5c49b2fcfd92a824b 100644 --- a/chart/templates/shared-secrets/_generate_secrets.sh.tpl +++ b/chart/templates/shared-secrets/_generate_secrets.sh.tpl @@ -134,6 +134,13 @@ generate_secret_if_needed {{ template "gitlab.kas.privateApi.secret" . }} --from # Gitlab-kas WebSocket Token secret generate_secret_if_needed {{ template "gitlab.kas.websocketToken.secret" . }} --from-literal={{ template "gitlab.kas.websocketToken.key" . }}=$(gen_random_base64 72) + +{{ if (.Values.gitlab.kas.autoflow).enabled -}} +# Gitlab-kas AutoFlow Temporal Workflow Data Encryption Secret +trap 'shred --remove autoflow-temporal-workflow-data-encryption-secret.bin' EXIT +openssl rand 32 > autoflow-temporal-workflow-data-encryption-secret.bin +generate_secret_if_needed {{ template "gitlab.kas.autoflow.temporal.workflowDataEncryption.secret" . }} --from-file={{ template "gitlab.kas.autoflow.temporal.workflowDataEncryption.key" . }}=autoflow-temporal-workflow-data-encryption-secret.bin +{{ end }} {{ end }} # Gitlab-suggested-reviewers secret diff --git a/chart/templates/shared-secrets/self-signed-cert-job.yml b/chart/templates/shared-secrets/self-signed-cert-job.yml index f5159b7208465b98729f20a382ea04f72d32a0c3..651e645350794ea35e79d4c136751c58a275443f 100644 --- a/chart/templates/shared-secrets/self-signed-cert-job.yml +++ b/chart/templates/shared-secrets/self-signed-cert-job.yml @@ -21,8 +21,9 @@ spec: template: metadata: labels: - app: {{ template "name" . }} - release: {{ .Release.Name }} + {{- include "gitlab.standardLabels" . | nindent 8 }} + {{- include "gitlab.commonLabels" . | nindent 8 }} + {{- include "gitlab.podLabels" . | nindent 8 }} annotations: {{- range $key, $value := $sharedSecretValues.annotations }} {{ $key }}: {{ $value | quote }} diff --git a/chart/templates/upgrade_check_hook.yaml b/chart/templates/upgrade_check_hook.yaml index aeef33984d6161cf1eb330163be0ea22b580a0ae..fa4cd5de23a4fcb91e6f2b74171b259f1d173bff 100644 --- a/chart/templates/upgrade_check_hook.yaml +++ b/chart/templates/upgrade_check_hook.yaml @@ -36,8 +36,9 @@ spec: template: metadata: labels: - app: {{ template "name" . }} - release: {{ .Release.Name }} + {{- include "gitlab.standardLabels" . | nindent 8 }} + {{- include "gitlab.commonLabels" . | nindent 8 }} + {{- include "gitlab.podLabels" . | nindent 8 }} {{- if .Values.upgradeCheck.annotations }} {{- range $key, $value := .Values.upgradeCheck.annotations }} annotations: diff --git a/chart/values.yaml b/chart/values.yaml index 616dbca65d4bfbea85f3f110893afaa3990b97b2..644a81e1f585286e48970a4db8b399b98aa3dda9 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -58,7 +58,7 @@ global: edition: ee ## https://docs.gitlab.com/charts/charts/globals#gitlab-version - gitlabVersion: "17.8.2" + gitlabVersion: "17.9.1" ## https://docs.gitlab.com/charts/charts/globals#application-resource application: @@ -827,7 +827,7 @@ global: certificates: image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/certificates - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry init: @@ -876,7 +876,7 @@ global: kubectl: image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/kubectl - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry securityContext: @@ -893,7 +893,7 @@ global: # 1. UBI does not have the newly required /scripts/set-config template generator in its entrypoint. # a. trying gitlab-base per https://repo1.dso.mil/dsop/gitlab/gitlab/gitlab-base/-/issues/77 repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-base - tag: "17.8.2" + tag: "17.9.1" pullSecrets: - name: private-registry @@ -1391,7 +1391,7 @@ postgresql: image: registry: registry1.dso.mil repository: ironbank/opensource/postgres/postgresql - tag: "14.16" + tag: "14.17" pullSecrets: - private-registry auth: @@ -1480,7 +1480,7 @@ registry: memory: 1024Mi image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-container-registry - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry ingress: @@ -1620,7 +1620,7 @@ gitlab: app: gitaly image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-toolbox - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry init: @@ -1697,7 +1697,7 @@ gitlab: - ALL image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-exporter - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry metrics: @@ -1742,7 +1742,7 @@ gitlab: memory: 1.5G image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-toolbox - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry securityContext: @@ -1791,7 +1791,7 @@ gitlab: memory: 2.5G # = 2 * 1.25G assuming there are 2 workerProcesses configured image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-webservice - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry workhorse: @@ -1804,7 +1804,7 @@ gitlab: cpu: 600m memory: 2.5G image: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-workhorse - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry metrics: @@ -1828,7 +1828,7 @@ gitlab: sidekiq: image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-sidekiq - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry init: @@ -1865,7 +1865,7 @@ gitlab: gitaly: image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitaly - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry init: @@ -1906,7 +1906,7 @@ gitlab: gitlab-shell: image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-shell - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry init: @@ -1950,7 +1950,7 @@ gitlab: mailroom: image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-mailroom - tag: 17.8.2 + tag: 17.9.1 pullSecrets: - name: private-registry containerSecurityContext: @@ -1967,7 +1967,7 @@ gitlab: type: ClusterIP image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-pages - tag: 17.8.2 + tag: 17.9.1 containerSecurityContext: capabilities: drop: @@ -1978,7 +1978,7 @@ gitlab: praefect: image: repository: registry1.dso.mil/ironbank/gitlab/gitlab/gitaly - tag: 17.8.2 + tag: 17.9.1 init: resources: limits: diff --git a/tests/images.txt b/tests/images.txt index 1dc99134108cda670053cc01151eb602205439ca..fddaac648d8522105c5710835d20d903cd540266 100644 --- a/tests/images.txt +++ b/tests/images.txt @@ -1,2 +1,2 @@ -registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-exporter:17.8.2 -registry1.dso.mil/ironbank/gitlab/gitlab/kubectl:17.8.2 +registry1.dso.mil/ironbank/gitlab/gitlab/gitlab-exporter:17.9.1 +registry1.dso.mil/ironbank/gitlab/gitlab/kubectl:17.9.1