UNCLASSIFIED

Commit 74512f88 authored by bhearn's avatar bhearn
Browse files

update redis dep

parent 4cfc9c82
...@@ -3,6 +3,12 @@ ...@@ -3,6 +3,12 @@
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
--- ---
## [1.12.15-bb.1]
### Changed
- Updated Redis dependency to 14.1.0-bb.0
### UPGRADE NOTICE
- A clean upgrade job will run which requires complete deletion of the previous redis instance, which means downtime can be expected for Anchore Enterprise UI users. Multiple values were changed and shifted around - most importantly `anchore-ui-redis.password` is now `anchore-ui-redis.auth.password`. By default your old password (whatever is in the secret) will be used and will override any values specified
## [1.12.15-bb.0] ## [1.12.15-bb.0]
### Changed ### Changed
- Updated docs for BB documentation standards - Updated docs for BB documentation standards
......
...@@ -7,9 +7,9 @@ dependencies: ...@@ -7,9 +7,9 @@ dependencies:
version: 1.0.1 version: 1.0.1
- name: redis - name: redis
repository: file://./deps/redis repository: file://./deps/redis
version: 12.8.3-bb.0 version: 14.1.0-bb.0
- name: bb-test-lib - name: bb-test-lib
repository: oci://registry.dso.mil/platform-one/big-bang/pipeline-templates/pipeline-templates repository: oci://registry.dso.mil/platform-one/big-bang/pipeline-templates/pipeline-templates
version: 0.4.0 version: 0.4.0
digest: sha256:cdf6e2694ba10c26845caffc96343262185f697595fdcb658c1c6e9796ddb029 digest: sha256:95fc02eb4c73428f58530043f2ccea983eb2de36c3e2bed6566deaff6552285c
generated: "2021-05-11T11:23:19.071211-04:00" generated: "2021-06-11T14:51:29.578969-04:00"
apiVersion: v2 apiVersion: v2
name: anchore-engine name: anchore-engine
version: 1.12.15-bb.0 version: 1.12.15-bb.1
appVersion: 0.9.4 appVersion: 0.9.4
description: Anchore container analysis and policy evaluation engine service description: Anchore container analysis and policy evaluation engine service
keywords: keywords:
...@@ -33,7 +33,7 @@ dependencies: ...@@ -33,7 +33,7 @@ dependencies:
condition: anchore-feeds-db.enabled,anchoreEnterpriseGlobal.enabled condition: anchore-feeds-db.enabled,anchoreEnterpriseGlobal.enabled
alias: anchore-feeds-db alias: anchore-feeds-db
- name: redis - name: redis
version: "12.8.3-bb.0" version: "14.1.0-bb.0"
repository: "file://./deps/redis" repository: "file://./deps/redis"
condition: anchore-ui-redis.enabled,anchoreEnterpriseGlobal.enabled condition: anchore-ui-redis.enabled,anchoreEnterpriseGlobal.enabled
alias: anchore-ui-redis alias: anchore-ui-redis
......
dependencies: dependencies:
- name: common - name: common
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
version: 1.4.1 version: 1.5.2
digest: sha256:81be4c0ebd0a81952423b24268e82697231b8c07991ee60b23b950ff1db003a2 digest: sha256:7b5a8ece9b57d70ef47eb7ed27e6f66b059fb0fc1f2ca59a15bb495e32366690
generated: "2021-02-24T06:54:40.099558726Z" generated: "2021-06-07T12:05:28.337668-06:00"
annotations: annotations:
category: Database category: Database
apiVersion: v2 apiVersion: v2
appVersion: 6.0.12 appVersion: 6.2.2
dependencies: dependencies:
- name: common - name: common
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
...@@ -25,4 +25,4 @@ name: redis ...@@ -25,4 +25,4 @@ name: redis
sources: sources:
- https://github.com/bitnami/bitnami-docker-redis - https://github.com/bitnami/bitnami-docker-redis
- http://redis.io/ - http://redis.io/
version: 12.8.3-bb.0 version: 14.1.0-bb.0
...@@ -5,7 +5,7 @@ metadata: ...@@ -5,7 +5,7 @@ metadata:
upstream: upstream:
type: git type: git
git: git:
commit: ba3a0e31485ed629e379487ceff44ff4863e28ef commit: 424349e5f1d571a2dbddf8c6c0621db120986c1f
repo: https://repo1.dso.mil/platform-one/big-bang/apps/sandbox/redis repo: https://repo1.dso.mil/platform-one/big-bang/apps/sandbox/redis
directory: /chart directory: /chart
ref: main ref: 14.1.0-bb.0
...@@ -59,255 +59,374 @@ The command removes all the Kubernetes components associated with the chart and ...@@ -59,255 +59,374 @@ The command removes all the Kubernetes components associated with the chart and
## Parameters ## Parameters
The following table lists the configurable parameters of the Redis<sup>TM</sup> chart and their default values. ### Global parameters
| Parameter | Description | Default | | Name | Description | Value |
|:------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------| | ------------------------- | ----------------------------------------------------- | ----- |
| `global.imageRegistry` | Global Docker image registry | `nil` | | `global.imageRegistry` | Global Docker image registry | `nil` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | | `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | | `global.storageClass` | Global StorageClass for Persistent Volume(s) | `nil` |
| `global.redis.password` | Redis<sup>TM</sup> password (overrides `password`) | `nil` | | `global.redis.password` | Global Redis(TM) password (overrides `auth.password`) | `nil` |
| `image.registry` | Redis<sup>TM</sup> Image registry | `docker.io` |
| `image.repository` | Redis<sup>TM</sup> Image name | `bitnami/redis` |
| `image.tag` | Redis<sup>TM</sup> Image tag | `{TAG_NAME}` | ### Common parameters
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | | Name | Description | Value |
| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | | ------------------- | -------------------------------------------------- | --------------- |
| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | | `kubeVersion` | Override Kubernetes version | `nil` |
| `cluster.enabled` | Use master-slave topology | `true` | | `nameOverride` | String to partially override common.names.fullname | `nil` |
| `cluster.slaveCount` | Number of slaves | `2` | | `fullnameOverride` | String to fully override common.names.fullname | `nil` |
| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | | `commonLabels` | Labels to add to all deployed objects | `{}` |
| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | | `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
| `usePassword` | Use password | `true` | | `clusterDomain` | Kubernetes cluster domain name | `cluster.local` |
| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | | `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
| `password` | Redis<sup>TM</sup> password (ignored if existingSecret set) | Randomly generated |
| `configmap` | Additional common Redis<sup>TM</sup> node configuration (this value is evaluated as a template) | See values.yaml |
| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | ### Redis(TM) Image parameters
| `networkPolicy.enabled` | Enable NetworkPolicy | `false` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | | Name | Description | Value |
| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | | ------------------- | ---------------------------------------------------- | --------------------- |
| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | | `image.registry` | Redis(TM) image registry | `docker.io` |
| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | | `image.repository` | Redis(TM) image repository | `bitnami/redis` |
| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | | `image.tag` | Redis(TM) image tag (immutable tags are recommended) | `6.2.1-debian-10-r36` |
| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | | `image.pullPolicy` | Redis(TM) image pull policy | `IfNotPresent` |
| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | | `image.pullSecrets` | Redis(TM) image pull secrets | `[]` |
| `containerSecurityContext.enabled` | Enable security context (both redis master and slave containers) | `true` | | `image.debug` | Enable image debug mode | `false` |
| `containerSecurityContext.runAsUser` | User ID for the container (both redis master and slave containers) | `1001` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | ### Redis(TM) common configuration parameters
| `serviceAccount.annotations` | Specifies annotations to add to ServiceAccount. | `nil` |
| `rbac.create` | Specifies whether RBAC resources should be created | `false` | | Name | Description | Value |
| `rbac.role.rules` | Rules to create | `[]` | | -------------------------------- | ------------------------------------------------------------------------------------ | ------------- |
| `metrics.enabled` | Start a side-car prometheus exporter | `false` | | `architecture` | Redis(TM) architecture. Allowed values: `standalone` or `replication` | `replication` |
| `metrics.image.registry` | Redis<sup>TM</sup> exporter image registry | `docker.io` | | `auth.enabled` | Enable password authentication | `true` |
| `metrics.image.repository` | Redis<sup>TM</sup> exporter image name | `bitnami/redis-exporter` | | `auth.sentinel` | Enable password authentication on sentinels too | `true` |
| `metrics.image.tag` | Redis<sup>TM</sup> exporter image tag | `{TAG_NAME}` | | `auth.password` | Redis(TM) password | `""` |
| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | | `auth.existingSecret` | The name of an existing secret with Redis(TM) credentials | `nil` |
| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | | `auth.existingSecretPasswordKey` | Password key to be retrieved from existing secret | `nil` |
| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | | `auth.usePasswordFiles` | Mount credentials as files instead of using an environment variable | `false` |
| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | | `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Redis(TM) nodes | `nil` |
| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} |
| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` |
| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | ### Redis(TM) master configuration parameters
| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` |
| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | | Name | Description | Value |
| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | | ------------------------------------------- | ------------------------------------------------------------------------------------------------ | --------------- |
| `metrics.serviceMonitor.relabelings` | ServiceMonitor relabelings. Value is evaluated as a template | `[]` | | `master.configuration` | Configuration for Redis(TM) master nodes | `nil` |
| `metrics.serviceMonitor.metricRelabelings` | ServiceMonitor metricRelabelings. Value is evaluated as a template | `[]` | | `master.disableCommands` | Array with Redis(TM) commands to disable on master nodes | `[]` |
| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | | `master.command` | Override default container command (useful when using custom images) | `[]` |
| `metrics.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | | `master.args` | Override default container args (useful when using custom images) | `[]` |
| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | | `master.preExecCmds` | Additional commands to run prior to starting Redis(TM) master | `[]` |
| `metrics.service.labels` | Additional labels for the metrics service | {} | | `master.extraFlags` | Array with additional command line flags for Redis(TM) master | `[]` |
| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | | `master.extraEnvVars` | Array with extra environment variables to add to Redis(TM) master nodes | `[]` |
| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | `nil` | | `master.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis(TM) master nodes | `nil` |
| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | | `master.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis(TM) master nodes | `nil` |
| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | | `master.containerPort` | Container port to open on Redis(TM) master nodes | `6379` |
| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | | `master.livenessProbe.enabled` | Enable livenessProbe on Redis(TM) master nodes | `true` |
| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | | `master.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` |
| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | | `master.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` |
| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | | `master.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `master.hostAliases` | Add deployment host aliases | `[]` | | `master.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` |
| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | | `master.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | | `master.readinessProbe.enabled` | Enable readinessProbe on Redis(TM) master nodes | `true` |
| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | | `master.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | | `master.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
| `master.persistence.size` | Size of data volume | `8Gi` | | `master.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` |
| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | | `master.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` |
| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | | `master.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `master.persistence.volumes` | Additional volumes without creating PVC | `{}` | | `master.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
| `master.statefulset.labels` | Additional labels for redis master StatefulSet | `{}` | | `master.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
| `master.statefulset.annotations` | Additional annotations for redis master StatefulSet | `{}` | | `master.resources.limits` | The resources limits for the Redis(TM) master containers | `{}` |
| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | | `master.resources.requests` | The requested resources for the Redis(TM) master containers | `{}` |
| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | | `master.podSecurityContext.enabled` | Enabled Redis(TM) master pods' Security Context | `true` |
| `master.statefulset.volumeClaimTemplates.labels` | Additional labels for redis master StatefulSet volumeClaimTemplates | `{}` | | `master.podSecurityContext.fsGroup` | Set Redis(TM) master pod's Security Context fsGroup | `1001` |
| `master.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis master StatefulSet volumeClaimTemplates | `{}` | | `master.containerSecurityContext.enabled` | Enabled Redis(TM) master containers' Security Context | `true` |
| `master.podLabels` | Additional labels for Redis<sup>TM</sup> master pod | {} | | `master.containerSecurityContext.runAsUser` | Set Redis(TM) master containers' Security Context runAsUser | `1001` |
| `master.podAnnotations` | Additional annotations for Redis<sup>TM</sup> master pod | {} | | `master.schedulerName` | Alternate scheduler for Redis(TM) master pods | `nil` |
| `master.extraEnvVars` | Additional Environment Variables passed to the pod of the master's stateful set set | `[]` | | `master.updateStrategy.type` | Redis(TM) master statefulset strategy type | `RollingUpdate` |
| `master.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the master's stateful set set | `[]` | | `master.priorityClassName` | Redis(TM) master pods' priorityClassName | `""` |
| `master.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the master's stateful set | `[]` | | `master.hostAliases` | Redis(TM) master pods host aliases | `[]` |
| `podDisruptionBudget.enabled` | Pod Disruption Budget toggle | `false` | | `master.podLabels` | Extra labels for Redis(TM) master pods | `{}` |
| `podDisruptionBudget.minAvailable` | Minimum available pods | `1` | | `master.podAnnotations` | Annotations for Redis(TM) master pods | `{}` |
| `podDisruptionBudget.maxUnavailable` | Maximum unavailable | `nil` | | `master.shareProcessNamespace` | Share a single process namespace between all of the containers in Redis(TM) master pods | `false` |
| `redisPort` | Redis<sup>TM</sup> port (in both master and slaves) | `6379` | | `master.podAffinityPreset` | Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `tls.enabled` | Enable TLS support for replication traffic | `false` | | `master.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `tls.authClients` | Require clients to authenticate or not | `true` | | `master.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `tls.certificatesSecret` | Name of the secret that contains the certificates | `nil` | | `master.nodeAffinityPreset.key` | Node label key to match. Ignored if `master.affinity` is set | `""` |
| `tls.certFilename` | Certificate filename | `nil` | | `master.nodeAffinityPreset.values` | Node label values to match. Ignored if `master.affinity` is set | `[]` |
| `tls.certKeyFilename` | Certificate key filename | `nil` | | `master.affinity` | Affinity for Redis(TM) master pods assignment | `{}` |
| `tls.certCAFilename` | CA Certificate filename | `nil` | | `master.nodeSelector` | Node labels for Redis(TM) master pods assignment | `{}` |
| `tls.dhParamsFilename` | DH params (in order to support DH based ciphers) | `nil` | | `master.tolerations` | Tolerations for Redis(TM) master pods assignment | `[]` |
| `master.command` | Redis<sup>TM</sup> master entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | | `master.spreadConstraints` | Spread Constraints for Redis(TM) master pod assignment | `{}` |
| `master.preExecCmds` | Text to inset into the startup script immediately prior to `master.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | | `master.lifecycleHooks` | for the Redis(TM) master container(s) to automate configuration before or after startup | `{}` |
| `master.configmap` | Additional Redis<sup>TM</sup> configuration for the master nodes (this value is evaluated as a template) | `nil` | | `master.extraVolumes` | Optionally specify extra list of additional volumes for the Redis(TM) master pod(s) | `[]` |
| `master.disableCommands` | Array of Redis<sup>TM</sup> commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | | `master.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis(TM) master container(s) | `[]` |
| `master.extraFlags` | Redis<sup>TM</sup> master additional command line flags | [] | | `master.sidecars` | Add additional sidecar containers to the Redis(TM) master pod(s) | `{}` |
| `master.nodeSelector` | Redis<sup>TM</sup> master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | | `master.initContainers` | Add additional init containers to the Redis(TM) master pod(s) | `{}` |
| `master.tolerations` | Toleration labels for Redis<sup>TM</sup> master pod assignment | [] | | `master.persistence.enabled` | Enable persistence on Redis(TM) master nodes using Persistent Volume Claims | `true` |
| `master.affinity` | Affinity settings for Redis<sup>TM</sup> master pod assignment | {} | | `master.persistence.path` | The path the volume will be mounted at on Redis(TM) master containers | `/data` |
| `master.schedulerName` | Name of an alternate scheduler | `nil` | | `master.persistence.subPath` | The subdirectory of the volume to mount on Redis(TM) master containers | `""` |
| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | | `master.persistence.storageClass` | Persistent Volume storage class | `nil` |
| `master.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | | `master.persistence.accessModes` | Persistent Volume access modes | `[]` |
| `master.service.port` | Kubernetes Service port (redis master) | `6379` | | `master.persistence.size` | Persistent Volume size | `8Gi` |
| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | | `master.persistence.annotations` | Additional custom annotations for the PVC | `{}` |
| `master.service.annotations` | annotations for redis master service | {} | | `master.persistence.selector` | Additional labels to match for the PVC | `{}` |
| `master.service.labels` | Additional labels for redis master service | {} | | `master.persistence.existingClaim` | Use a existing PVC which must be created manually before bound | `nil` |
| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | | `master.service.type` | Redis(TM) master service type | `ClusterIP` |
| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | | `master.service.port` | Redis(TM) master service port | `6379` |
| `master.resources` | Redis<sup>TM</sup> master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | | `master.service.nodePort` | Node port for Redis(TM) master | `nil` |
| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | | `master.service.externalTrafficPolicy` | Redis(TM) master service external traffic policy | `Cluster` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `5` | | `master.service.clusterIP` | Redis(TM) master service Cluster IP | `nil` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | | `master.service.loadBalancerIP` | Redis(TM) master service Load Balancer IP | `nil` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | | `master.service.loadBalancerSourceRanges` | Redis(TM) master service Load Balancer sources | `[]` |
| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | | `master.service.annotations` | Additional custom annotations for Redis(TM) master service | `{}` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | | `master.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-master pods | `30` |
| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | ### Redis(TM) replicas configuration parameters
| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` |
| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | | Name | Description | Value |
| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | | -------------------------------------------- | ------------------------------------------------------------------------------------------------- | --------------- |
| `master.shareProcessNamespace` | Redis<sup>TM</sup> Master pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | | `replica.replicaCount` | Number of Redis(TM) replicas to deploy | `3` |
| `master.priorityClassName` | Redis<sup>TM</sup> Master pod priorityClassName | `nil` | | `replica.configuration` | Configuration for Redis(TM) replicas nodes | `nil` |
| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | | `replica.disableCommands` | Array with Redis(TM) commands to disable on replicas nodes | `[]` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | | `replica.command` | Override default container command (useful when using custom images) | `[]` |
| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` | | `replica.args` | Override default container args (useful when using custom images) | `[]` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag | `"10"` | | `replica.preExecCmds` | Additional commands to run prior to starting Redis(TM) replicas | `[]` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | | `replica.extraFlags` | Array with additional command line flags for Redis(TM) replicas | `[]` |
| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | | `replica.extraEnvVars` | Array with extra environment variables to add to Redis(TM) replicas nodes | `[]` |
| `volumePermissions.securityContext.*` | Security context of the init container | `{}` | | `replica.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis(TM) replicas nodes | `nil` |
| `volumePermissions.securityContext.runAsUser` | UserID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | 0 | | `replica.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis(TM) replicas nodes | `nil` |
| `slave.hostAliases` | Add deployment host aliases | `[]` | | `replica.containerPort` | Container port to open on Redis(TM) replicas nodes | `6379` |
| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | | `replica.livenessProbe.enabled` | Enable livenessProbe on Redis(TM) replicas nodes | `true` |
| `slave.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | | `replica.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` |
| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | | `replica.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` |
| `slave.service.annotations` | annotations for redis slave service | {} | | `replica.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `slave.service.labels` | Additional labels for redis slave service | {} | | `replica.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` |
| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | | `replica.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis<sup>TM</sup> slave service type is `LoadBalancer` | `nil` | | `replica.readinessProbe.enabled` | Enable readinessProbe on Redis(TM) replicas nodes | `true` |
| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if Redis<sup>TM</sup> slave service type is `LoadBalancer` | `nil` | | `replica.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `slave.command` | Redis<sup>TM</sup> slave entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | | `replica.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
| `slave.preExecCmds` | Text to inset into the startup script immediately prior to `slave.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | | `replica.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` |
| `slave.configmap` | Additional Redis<sup>TM</sup> configuration for the slave nodes (this value is evaluated as a template) | `nil` | | `replica.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` |
| `slave.disableCommands` | Array of Redis<sup>TM</sup> commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | | `replica.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `slave.extraFlags` | Redis<sup>TM</sup> slave additional command line flags | `[]` | | `replica.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | | `replica.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `5` | | `replica.resources.limits` | The resources limits for the Redis(TM) replicas containers | `{}` |
| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | | `replica.resources.requests` | The requested resources for the Redis(TM) replicas containers | `{}` |
| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | | `replica.podSecurityContext.enabled` | Enabled Redis(TM) replicas pods' Security Context | `true` |
| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | | `replica.podSecurityContext.fsGroup` | Set Redis(TM) replicas pod's Security Context fsGroup | `1001` |
| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | | `replica.containerSecurityContext.enabled` | Enabled Redis(TM) replicas containers' Security Context | `true` |
| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | | `replica.containerSecurityContext.runAsUser` | Set Redis(TM) replicas containers' Security Context runAsUser | `1001` |
| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | | `replica.schedulerName` | Alternate scheduler for Redis(TM) replicas pods | `nil` |
| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | | `replica.updateStrategy.type` | Redis(TM) replicas statefulset strategy type | `RollingUpdate` |
| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `1` | | `replica.priorityClassName` | Redis(TM) replicas pods' priorityClassName | `""` |
| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | | `replica.hostAliases` | Redis(TM) replicas pods host aliases | `[]` |
| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | | `replica.podLabels` | Extra labels for Redis(TM) replicas pods | `{}` |
| `slave.shareProcessNamespace` | Redis<sup>TM</sup> slave pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | | `replica.podAnnotations` | Annotations for Redis(TM) replicas pods | `{}` |
| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | | `replica.shareProcessNamespace` | Share a single process namespace between all of the containers in Redis(TM) replicas pods | `false` |
| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | | `replica.podAffinityPreset` | Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | | `replica.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | | `replica.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | | `replica.nodeAffinityPreset.key` | Node label key to match. Ignored if `replica.affinity` is set | `""` |
| `slave.persistence.size` | Size of data volume | `8Gi` | | `replica.nodeAffinityPreset.values` | Node label values to match. Ignored if `replica.affinity` is set | `[]` |
| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | | `replica.affinity` | Affinity for Redis(TM) replicas pods assignment | `{}` |
| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | | `replica.nodeSelector` | Node labels for Redis(TM) replicas pods assignment | `{}` |
| `slave.statefulset.labels` | Additional labels for redis slave StatefulSet | `{}` | | `replica.tolerations` | Tolerations for Redis(TM) replicas pods assignment | `[]` |
| `slave.statefulset.annotations` | Additional annotations for redis slave StatefulSet | `{}` | | `replica.spreadConstraints` | Spread Constraints for Redis(TM) replicas pod assignment | `{}` |
| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | | `replica.lifecycleHooks` | for the Redis(TM) replica container(s) to automate configuration before or after startup | `{}` |
| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | | `replica.extraVolumes` | Optionally specify extra list of additional volumes for the Redis(TM) replicas pod(s) | `[]` |
| `slave.statefulset.volumeClaimTemplates.labels` | Additional labels for redis slave StatefulSet volumeClaimTemplates | `{}` | | `replica.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis(TM) replicas container(s) | `[]` |
| `slave.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis slave StatefulSet volumeClaimTemplates | `{}` | | `replica.sidecars` | Add additional sidecar containers to the Redis(TM) replicas pod(s) | `{}` |
| `slave.extraEnvVars` | Additional Environment Variables passed to the pod of the slave's stateful set set | `[]` | | `replica.initContainers` | Add additional init containers to the Redis(TM) replicas pod(s) | `{}` |
| `slave.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the slave's stateful set set | `[]` | | `replica.persistence.enabled` | Enable persistence on Redis(TM) replicas nodes using Persistent Volume Claims | `true` |
| `masslaveter.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the slave's stateful set | `[]` | | `replica.persistence.path` | The path the volume will be mounted at on Redis(TM) replicas containers | `/data` |
| `slave.podLabels` | Additional labels for Redis<sup>TM</sup> slave pod | `master.podLabels` | | `replica.persistence.subPath` | The subdirectory of the volume to mount on Redis(TM) replicas containers | `""` |
| `slave.podAnnotations` | Additional annotations for Redis<sup>TM</sup> slave pod | `master.podAnnotations` | | `replica.persistence.storageClass` | Persistent Volume storage class | `nil` |
| `slave.schedulerName` | Name of an alternate scheduler | `nil` | | `replica.persistence.accessModes` | Persistent Volume access modes | `[]` |
| `slave.resources` | Redis<sup>TM</sup> slave CPU/Memory resource requests/limits | `{}` | | `replica.persistence.size` | Persistent Volume size | `8Gi` |
| `slave.affinity` | Enable node/pod affinity for slaves | {} | | `replica.persistence.annotations` | Additional custom annotations for the PVC | `{}` |
| `slave.tolerations` | Toleration labels for Redis<sup>TM</sup> slave pod assignment | [] | | `replica.persistence.selector` | Additional labels to match for the PVC | `{}` |
| `slave.spreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for Redis<sup>TM</sup> slave pod | {} | | `replica.service.type` | Redis(TM) replicas service type | `ClusterIP` |
| `slave.priorityClassName` | Redis<sup>TM</sup> Slave pod priorityClassName | `nil` | | `replica.service.port` | Redis(TM) replicas service port | `6379` |
| `sentinel.enabled` | Enable sentinel containers | `false` | | `replica.service.nodePort` | Node port for Redis(TM) replicas | `nil` |
| `sentinel.usePassword` | Use password for sentinel containers | `true` | | `replica.service.externalTrafficPolicy` | Redis(TM) replicas service external traffic policy | `Cluster` |
| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | | `replica.service.clusterIP` | Redis(TM) replicas service Cluster IP | `nil` |
| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | | `replica.service.loadBalancerIP` | Redis(TM) replicas service Load Balancer IP | `nil` |
| `sentinel.quorum` | Quorum for electing a new master | `2` | | `replica.service.loadBalancerSourceRanges` | Redis(TM) replicas service Load Balancer sources | `[]` |
| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis<sup>TM</sup> node is down | `60000` | | `replica.service.annotations` | Additional custom annotations for Redis(TM) replicas service | `{}` |
| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | | `replica.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-replicas pods | `30` |
| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` |
| `sentinel.port` | Redis<sup>TM</sup> Sentinel port | `26379` |
| `sentinel.cleanDelaySeconds` | Delay seconds before issuing the the cleaning in the next node | `5` | ### Redis(TM) Sentinel configuration parameters
| `sentinel.configmap` | Additional Redis<sup>TM</sup> configuration for the sentinel nodes (this value is evaluated as a template) | `nil` |
| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | | Name | Description | Value |
| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | | --------------------------------------------- | ------------------------------------------------------------------------------------------------ | ------------------------ |
| `sentinel.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | | `sentinel.enabled` | Use Redis(TM) Sentinel on Redis(TM) pods. | `false` |
| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | | `sentinel.image.registry` | Redis(TM) Sentinel image registry | `docker.io` |
| `sentinel.service.annotations` | annotations for redis sentinel service | {} | | `sentinel.image.repository` | Redis(TM) Sentinel image repository | `bitnami/redis-sentinel` |
| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | | `sentinel.image.tag` | Redis(TM) Sentinel image tag (immutable tags are recommended) | `6.0.9-debian-10-r38` |
| `sentinel.service.redisPort` | Kubernetes Service port for Redis<sup>TM</sup> read only operations | `6379` | | `sentinel.image.pullPolicy` | Redis(TM) Sentinel image pull policy | `IfNotPresent` |
| `sentinel.service.sentinelPort` | Kubernetes Service port for Redis<sup>TM</sup> sentinel | `26379` | | `sentinel.image.pullSecrets` | Redis(TM) Sentinel image pull secrets | `[]` |
| `sentinel.service.redisNodePort` | Kubernetes Service node port for Redis<sup>TM</sup> read only operations | `` | | `sentinel.image.debug` | Enable image debug mode | `false` |
| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for Redis<sup>TM</sup> sentinel | `` | | `sentinel.masterSet` | Master set name | `mymaster` |
| `sentinel.service.loadBalancerIP` | LoadBalancerIP if Redis<sup>TM</sup> sentinel service type is `LoadBalancer` | `nil` | | `sentinel.quorum` | Sentinel Quorum | `2` |
| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | | `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis(TM) node is down | `60000` |
| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | | `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` |
| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | | `sentinel.cleanDelaySeconds` | Delay seconds when cleaning nodes IPs | `5` |
| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | | `sentinel.parallelSyncs` | Number of replicas that can be reconfigured in parallel to use the new master after a failover | `1` |
| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | | `sentinel.staticID` | Enable static Sentinel IDs for each replica | `false` |
| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | | `sentinel.configuration` | Configuration for Redis(TM) Sentinel nodes | `nil` |
| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | | `sentinel.command` | Override default container command (useful when using custom images) | `[]` |
| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | | `sentinel.args` | Override default container args (useful when using custom images) | `[]` |
| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | | `sentinel.preExecCmds` | Additional commands to run prior to starting Redis(TM) Sentinel | `[]` |
| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | | `sentinel.containerPort` | Container port to open on Redis(TM) Sentinel nodes | `26379` |
| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | | `sentinel.livenessProbe.enabled` | Enable livenessProbe on Redis(TM) Sentinel nodes | `true` |
| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | | `sentinel.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` |
| `sentinel.resources` | Redis<sup>TM</sup> sentinel CPU/Memory resource requests/limits | `{}` | | `sentinel.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` |
| `sentinel.image.registry` | Redis<sup>TM</sup> Sentinel Image registry | `docker.io` | | `sentinel.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `sentinel.image.repository` | Redis<sup>TM</sup> Sentinel Image name | `bitnami/redis-sentinel` | | `sentinel.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` |
| `sentinel.image.tag` | Redis<sup>TM</sup> Sentinel Image tag | `{TAG_NAME}` | | `sentinel.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | | `sentinel.readinessProbe.enabled` | Enable readinessProbe on Redis(TM) Sentinel nodes | `true` |
| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | | `sentinel.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `sentinel.extraEnvVars` | Additional Environment Variables passed to the pod of the sentinel node stateful set set | `[]` | | `sentinel.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
| `sentinel.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the sentinel node stateful set set | `[]` | | `sentinel.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` |
| `sentinel.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the sentinel node statefulset | `[]` | | `sentinel.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` |
| `sentinel.preExecCmds` | Text to inset into the startup script immediately prior to `sentinel.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | | `sentinel.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | | `sentinel.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
| `sysctlImage.command` | sysctlImage command to execute | [] | | `sentinel.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | | `sentinel.resources.limits` | The resources limits for the Redis(TM) Sentinel containers | `{}` |
| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/bitnami-shell` | | `sentinel.resources.requests` | The requested resources for the Redis(TM) Sentinel containers | `{}` |
| `sysctlImage.tag` | sysctlImage Init container tag | `"10"` | | `sentinel.containerSecurityContext.enabled` | Enabled Redis(TM) Sentinel containers' Security Context | `true` |
| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | | `sentinel.containerSecurityContext.runAsUser` | Set Redis(TM) Sentinel containers' Security Context runAsUser | `1001` |
| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | | `sentinel.lifecycleHooks` | for the Redis(TM) sentinel container(s) to automate configuration before or after startup | `{}` |
| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | | `sentinel.extraVolumes` | Optionally specify extra list of additional volumes for the Redis(TM) Sentinel | `[]` |
| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | | `sentinel.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis(TM) Sentinel container(s) | `[]` |
| `sentinel.service.type` | Redis(TM) Sentinel service type | `ClusterIP` |
| `sentinel.service.port` | Redis(TM) service port for Redis(TM) | `6379` |
| `sentinel.service.sentinelPort` | Redis(TM) service port for Sentinel | `26379` |
| `sentinel.service.nodePorts.redis` | Node port for Redis(TM) | `nil` |
| `sentinel.service.nodePorts.sentinel` | Node port for Sentinel | `nil` |
| `sentinel.service.externalTrafficPolicy` | Redis(TM) Sentinel service external traffic policy | `Cluster` |
| `sentinel.service.clusterIP` | Redis(TM) Sentinel service Cluster IP | `nil` |
| `sentinel.service.loadBalancerIP` | Redis(TM) Sentinel service Load Balancer IP | `nil` |
| `sentinel.service.loadBalancerSourceRanges` | Redis(TM) Sentinel service Load Balancer sources | `[]` |
| `sentinel.service.annotations` | Additional custom annotations for Redis(TM) Sentinel service | `{}` |
| `sentinel.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-node pods | `30` |
### Other Parameters
| Name | Description | Value |
| --------------------------------------- | ------------------------------------------------------------------- | ------- |
| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` |
| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` |
| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` |
| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` |
| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` |
| `rbac.create` | Specifies whether RBAC resources should be created | `false` |
| `rbac.rules` | Custom RBAC rules to set | `[]` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
| `pdb.create` | Specifies whether a ServiceAccount should be created | `false` |
| `pdb.minAvailable` | Min number of pods that must still be available after the eviction | `1` |
| `pdb.maxUnavailable` | Max number of pods that can be unavailable after the eviction | `nil` |
| `tls.enabled` | Enable TLS traffic | `false` |
| `tls.authClients` | Require clients to authenticate | `true` |
| `tls.certificatesSecret` | Then name of the existing secret that contains the TLS certificates | `nil` |
| `tls.certFilename` | Certificate filename | `nil` |
| `tls.certKeyFilename` | Certificate Key filename | `nil` |
| `tls.certCAFilename` | CA Certificate filename | `nil` |
| `tls.dhParamsFilename` | File containing DH params (in order to support DH based ciphers) | `nil` |
### Metrics Parameters
| Name | Description | Value |
| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------ | --------------------------------- |
| `metrics.enabled` | Start a sidecar prometheus exporter to expose Redis(TM) metrics | `false` |
| `metrics.image.registry` | Redis(TM) Exporter image registry | `docker.io` |
| `metrics.image.repository` | Redis(TM) Exporter image repository | `bitnami/redis-exporter` |
| `metrics.image.tag` | Redis(TM) Redis(TM) Exporter image tag (immutable tags are recommended) | `1.20.0-debian-10-r16` |
| `metrics.image.pullPolicy` | Redis(TM) Exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Redis(TM) Exporter image pull secrets | `[]` |
| `metrics.redisTargetHost` | A way to specify an alternative Redis(TM) hostname | `localhost` |
| `metrics.extraArgs` | Extra arguments for Redis(TM) exporter, for example: | `{}` |
| `metrics.containerSecurityContext.enabled` | Enabled Redis(TM) exporter containers' Security Context | `true` |
| `metrics.containerSecurityContext.runAsUser` | Set Redis(TM) exporter containers' Security Context runAsUser | `1001` |
| `metrics.resources.limits` | The resources limits for the Redis(TM) exporter container | `{}` |
| `metrics.resources.requests` | The requested resources for the Redis(TM) exporter container | `{}` |
| `metrics.podLabels` | Extra labels for Redis(TM) exporter pods | `{}` |
| `metrics.podAnnotations` | Annotations for Redis(TM) exporter pods | `{}` |
| `metrics.service.type` | Redis(TM) exporter service type | `ClusterIP` |
| `metrics.service.port` | Redis(TM) exporter service port | `9121` |
| `metrics.service.externalTrafficPolicy` | Redis(TM) exporter service external traffic policy | `Cluster` |
| `metrics.service.loadBalancerIP` | Redis(TM) exporter service Load Balancer IP | `""` |
| `metrics.service.loadBalancerSourceRanges` | Redis(TM) exporter service Load Balancer sources | `[]` |
| `metrics.service.annotations` | Additional custom annotations for Redis(TM) exporter service | `{}` |
| `metrics.sentinel.enabled` | Start a sidecar prometheus exporter to expose Redis(TM) Sentinel metrics | `false` |
| `metrics.sentinel.image.registry` | Redis(TM) Sentinel Exporter image registry | `docker.io` |
| `metrics.sentinel.image.repository` | Redis(TM) Sentinel Exporter image repository | `bitnami/redis-sentinel-exporter` |
| `metrics.sentinel.image.tag` | Redis(TM) Redis(TM) Sentinel Exporter image tag (immutable tags are recommended) | `1.7.1-debian-10-r109` |
| `metrics.sentinel.image.pullPolicy` | Redis(TM) Sentinel Exporter image pull policy | `IfNotPresent` |
| `metrics.sentinel.image.pullSecrets` | Redis(TM) Sentinel Exporter image pull secrets | `[]` |
| `metrics.sentinel.extraArgs` | Extra arguments for Redis(TM) Sentinel exporter, for example: | `{}` |
| `metrics.sentinel.containerSecurityContext.enabled` | Enabled Redis(TM) Sentinel exporter containers' Security Context | `true` |
| `metrics.sentinel.containerSecurityContext.runAsUser` | Set Redis(TM) Sentinel exporter containers' Security Context runAsUser | `1001` |
| `metrics.sentinel.resources.limits` | The resources limits for the Redis(TM) Sentinel exporter container | `{}` |
| `metrics.sentinel.resources.requests` | The requested resources for the Redis(TM) Sentinel exporter container | `{}` |
| `metrics.sentinel.service.type` | Redis(TM) Sentinel exporter service type | `ClusterIP` |
| `metrics.sentinel.service.port` | Redis(TM) Sentinel exporter service port | `9355` |
| `metrics.sentinel.service.externalTrafficPolicy` | Redis(TM) Sentinel exporter service external traffic policy | `Cluster` |
| `metrics.sentinel.service.loadBalancerIP` | Redis(TM) Sentinel exporter service Load Balancer IP | `""` |
| `metrics.sentinel.service.loadBalancerSourceRanges` | Redis(TM) Sentinel exporter service Load Balancer sources | `[]` |
| `metrics.sentinel.service.annotations` | Additional custom annotations for Redis(TM) Sentinel exporter service | `{}` |
| `metrics.serviceMonitor.enabled` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | `false` |
| `metrics.serviceMonitor.namespace` | The namespace in which the ServiceMonitor will be created | `nil` |
| `metrics.serviceMonitor.interval` | The interval at which metrics should be scraped | `30s` |
| `metrics.serviceMonitor.scrapeTimeout` | The timeout after which the scrape is ended | `nil` |
| `metrics.serviceMonitor.relabellings` | Metrics relabellings to add to the scrape endpoint | `[]` |
| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` |
| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | `{}` |
| `metrics.prometheusRule.enabled` | Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator | `false` |
| `metrics.prometheusRule.namespace` | The namespace in which the prometheusRule will be created | `nil` |
| `metrics.prometheusRule.additionalLabels` | Additional labels for the prometheusRule | `{}` |
| `metrics.prometheusRule.rules` | Custom Prometheus rules | `[]` |
### Init Container Parameters
| Name | Description | Value |
| ------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | ----------------------- |
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` |
| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `10` |
| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `Always` |
| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
| `volumePermissions.resources.limits` | The resources limits for the init container | `{}` |
| `volumePermissions.resources.requests` | The requested resources for the init container | `{}` |
| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` |
| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` |
| `sysctl.image.registry` | Bitnami Shell image registry | `docker.io` |
| `sysctl.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
| `sysctl.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `10` |
| `sysctl.image.pullPolicy` | Bitnami Shell image pull policy | `Always` |
| `sysctl.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
| `sysctl.command` | Override default init-sysctl container command (useful when using custom images) | `[]` |
| `sysctl.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` |
| `sysctl.resources.limits` | The resources limits for the init container | `{}` |
| `sysctl.resources.requests` | The requested resources for the init container | `{}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```bash ```bash
$ helm install my-release \ $ helm install my-release \
--set password=secretpassword \ --set auth.password=secretpassword \
bitnami/redis bitnami/redis
``` ```
...@@ -323,8 +442,6 @@ $ helm install my-release -f values.yaml bitnami/redis ...@@ -323,8 +442,6 @@ $ helm install my-release -f values.yaml bitnami/redis
> **Tip**: You can use the default [values.yaml](values.yaml) > **Tip**: You can use the default [values.yaml](values.yaml)
> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the Redis<sup>TM</sup> pod as it attempts to write to the `/bitnami` directory. Consider installing Redis<sup>TM</sup> with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information.
## Configuration and installation details ## Configuration and installation details
### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) ### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
...@@ -339,31 +456,39 @@ To modify the Redis<sup>TM</sup> version used in this chart you can specify a [v ...@@ -339,31 +456,39 @@ To modify the Redis<sup>TM</sup> version used in this chart you can specify a [v
### Cluster topologies ### Cluster topologies
#### Default: Master-Slave #### Default: Master-Replicas
When installing the chart with `architecture=replication`, it will deploy a Redis<sup>TM</sup> master StatefulSet (only one master node allowed) and a Redis<sup>TM</sup> replicas StatefulSet. The replicas will be read-replicas of the master. Two services will be exposed:
- Redis<sup>TM</sup> Master service: Points to the master, where read-write operations can be performed
- Redis<sup>TM</sup> Replicas service: Points to the replicas, where only read operations are allowed.
When installing the chart with `cluster.enabled=true`, it will deploy a Redis<sup>TM</sup> master StatefulSet (only one master node allowed) and a Redis<sup>TM</sup> slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: In case the master crashes, the replicas will wait until the master node is respawned again by the Kubernetes Controller Manager.
- Redis<sup>TM</sup> Master service: Points to the master, where read-write operations can be performed #### Standalone
- Redis<sup>TM</sup> Slave service: Points to the slaves, where only read operations are allowed.
In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. When installing the chart with `architecture=standalone`, it will deploy a standalone Redis<sup>TM</sup> StatefulSet (only one node allowed) and a Redis<sup>TM</sup> replicas StatefulSet. A single service will be exposed:
#### Master-Slave with Sentinel - Redis<sup>TM</sup> Master service: Points to the master, where read-write operations can be performed
When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a Redis<sup>TM</sup> master StatefulSet (only one master allowed) and a Redis<sup>TM</sup> slave StatefulSet. In this case, the pods will contain an extra container with Redis<sup>TM</sup> Sentinel. This container will form a cluster of Redis<sup>TM</sup> Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: #### Master-Replicas with Sentinel
- Redis<sup>TM</sup> service: Exposes port 6379 for Redis<sup>TM</sup> read-only operations and port 26379 for accessing Redis<sup>TM</sup> Sentinel. When installing the chart with `architecture=replication` and `sentinel.enabled=true`, it will deploy a Redis<sup>TM</sup> master StatefulSet (only one master allowed) and a Redis<sup>TM</sup> replicas StatefulSet. In this case, the pods will contain an extra container with Redis<sup>TM</sup> Sentinel. This container will form a cluster of Redis<sup>TM</sup> Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed:
For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis<sup>TM</sup> Sentinel cluster and query the current master using the command below (using redis-cli or similar: - Redis<sup>TM</sup> service: Exposes port 6379 for Redis<sup>TM</sup> read-only operations and port 26379 for accessing Redis<sup>TM</sup> Sentinel.
For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis<sup>TM</sup> Sentinel cluster and query the current master using the command below (using redis-cli or similar):
``` ```
SENTINEL get-master-addr-by-name <name of your MasterSet. Example: mymaster> SENTINEL get-master-addr-by-name <name of your MasterSet. e.g: mymaster>
``` ```
This command will return the address of the current master, which can be accessed from inside the cluster. This command will return the address of the current master, which can be accessed from inside the cluster.
In case the current master crashes, the Sentinel containers will elect a new master node. In case the current master crashes, the Sentinel containers will elect a new master node.
### Using password file ### Using password file
To use a password file for Redis<sup>TM</sup> you need to create a secret containing the password. To use a password file for Redis<sup>TM</sup> you need to create a secret containing the password.
> *NOTE*: It is important that the file with the password must be called `redis-password` > *NOTE*: It is important that the file with the password must be called `redis-password`
...@@ -371,9 +496,9 @@ To use a password file for Redis<sup>TM</sup> you need to create a secret contai ...@@ -371,9 +496,9 @@ To use a password file for Redis<sup>TM</sup> you need to create a secret contai
And then deploy the Helm Chart using the secret name as parameter: And then deploy the Helm Chart using the secret name as parameter:
```console ```console
usePassword=true auth.enabled=true
usePasswordFile=true auth.usePasswordFiles=true
existingSecret=redis-password-file auth.existingSecret=redis-password-file
sentinels.enabled=true sentinels.enabled=true
metrics.enabled=true metrics.enabled=true
``` ```
...@@ -390,7 +515,7 @@ TLS support can be enabled in the chart by specifying the `tls.` parameters whil ...@@ -390,7 +515,7 @@ TLS support can be enabled in the chart by specifying the `tls.` parameters whil
For example: For example:
First, create the secret with the cetificates files: First, create the secret with the certificates files:
```console ```console
kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem
...@@ -422,8 +547,7 @@ tls-ca-cert-file ...@@ -422,8 +547,7 @@ tls-ca-cert-file
### Host Kernel Settings ### Host Kernel Settings
Redis<sup>TM</sup> may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. Redis<sup>TM</sup> may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example:
To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example:
``` ```
sysctlImage: sysctlImage:
...@@ -459,7 +583,7 @@ By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/use ...@@ -459,7 +583,7 @@ By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/use
3. Install the chart 3. Install the chart
```bash ```bash
$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/redis $ helm install my-release --set master.persistence.existingClaim=PVC_NAME bitnami/redis
``` ```
## Backup and restore ## Backup and restore
...@@ -486,9 +610,7 @@ $ kubectl cp my-redis-master-0:/data/dump.rdb dump.rdb -c redis ...@@ -486,9 +610,7 @@ $ kubectl cp my-redis-master-0:/data/dump.rdb dump.rdb -c redis
### Restore ### Restore
To restore in a new cluster, you will need to change a parameter in the redis.conf file and then upload the `dump.rdb` to the volume. To restore in a new cluster, you will need to change a parameter in the redis.conf file and then upload the `dump.rdb` to the volume. Follow the following steps:
Follow the following steps:
- First you will need to set in the `values.yaml` the parameter `appendonly` to `no`, if it is already `no` you can skip this step. - First you will need to set in the `values.yaml` the parameter `appendonly` to `no`, if it is already `no` you can skip this step.
...@@ -505,7 +627,7 @@ configmap: |- ...@@ -505,7 +627,7 @@ configmap: |-
For example, : For example, :
```bash ```bash
helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 helm install new-redis -f values.yaml . --set architecture=replication --set replica.replicaCount=3
``` ```
- Now that the PVC were created, stop it and copy the `dump.rdp` on the persisted data by using a helping pod. - Now that the PVC were created, stop it and copy the `dump.rdp` on the persisted data by using a helping pod.
...@@ -551,23 +673,18 @@ $ kubectl delete pod volpod ...@@ -551,23 +673,18 @@ $ kubectl delete pod volpod
- Start again the cluster: - Start again the cluster:
``` ```
helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 helm install new-redis -f values.yaml . --set architecture=replication --set replica.replicaCount=3
``` ```
## NetworkPolicy ## NetworkPolicy
To enable network policy for Redis<sup>TM</sup>, install To enable network policy for Redis<sup>TM</sup>, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin),
and set `networkPolicy.enabled` to `true`.
For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace:
the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace:
kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}"
With NetworkPolicy enabled, only pods with the generated client label will be With NetworkPolicy enabled, only pods with the generated client label will be able to connect to Redis<sup>TM</sup>. This label will be displayed in the output after a successful install.
able to connect to Redis<sup>TM</sup>. This label will be displayed in the output
after a successful install.
With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set:
...@@ -580,15 +697,89 @@ networkPolicy: ...@@ -580,15 +697,89 @@ networkPolicy:
redis-client: true redis-client: true
``` ```
### Setting Pod's affinity
This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more infomation about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
## Troubleshooting ## Troubleshooting
Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading an existing Release to a new major version ## Upgrading
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an
incompatible breaking change needing manual actions. incompatible breaking change needing manual actions.
### To 14.0.0
- Several parameters were renamed or disappeared in favor of new ones on this major version:
- The term *slave* has been replaced by the term *replica*. Therefore, parameters prefixed with `slave` are now prefixed with `replicas`.
- Credentials parameter are reorganized under the `auth` parameter.
- `cluster.enabled` parameter is deprecated in favor of `architecture` parameter that accepts two values: `standalone` and `replication`.
- `securityContext.*` is deprecated in favor of `XXX.podSecurityContext` and `XXX.containerSecurityContext`.
- `sentinel.metrics.*` parameters are deprecated in favor of `metrics.sentinel.*` ones.
- New parameters to add custom command, environment variables, sidecars, init containers, etc. were added.
- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels).
- values.yaml metadata was adapted to follow the format supported by [readmenator](https://github.com/bitnami-labs/readmenator).
Consequences:
Backwards compatibility is not guaranteed. To upgrade to `14.0.0`, install a new release of the Redis<sup>TM</sup> chart, and migrate the data from your previous release. You have 2 alternatives to do so:
- Create a backup of the database, and restore it on the new release as explained in the [Backup and restore](#backup-and-restore) section.
- Reuse the PVC used to hold the master data on your previous release. To do so, use the `master.persistence.existingClaim` parameter. The following example assumes that the release name is `redis`:
```bash
$ helm install redis bitnami/redis --set auth.password=[PASSWORD] --set master.persistence.existingClaim=[EXISTING_PVC]
```
| Note: you need to substitute the placeholder _[EXISTING_PVC]_ with the name of the PVC used on your previous release, and _[PASSWORD]_ with the password used in your previous release.
### To 13.0.0
This major version updates the Redis<sup>TM</sup> docker image version used from `6.0` to `6.2`, the new stable version. There are no major changes in the chart and there shouldn't be any breaking changes in it as `6.2` is basically a stricter superset of `6.0`. For more information, please refer to [Redis<sup>TM</sup> 6.2 release notes](https://raw.githubusercontent.com/redis/redis/6.2/00-RELEASENOTES).
### To 12.3.0
This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
### To 12.0.0
[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
**What changes were introduced in this major version?**
- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
**Considerations when upgrading to this version**
- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
**Useful links**
- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/
- https://helm.sh/docs/topics/v2_v3_migration/
- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/
### To 11.0.0
When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml`
### To 9.0.0
The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis<sup>TM</sup> exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter).
### To 7.0.0
In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all.
This version also allows enabling Redis<sup>TM</sup> Sentinel containers inside of the Redis<sup>TM</sup> Pods (feature disabled by default). In case the master crashes, a new Redis<sup>TM</sup> node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel).
### To 11.0.0 ### To 11.0.0
When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version.
...@@ -670,40 +861,3 @@ And edit the Redis<sup>TM</sup> slave (and metrics if enabled) deployment: ...@@ -670,40 +861,3 @@ And edit the Redis<sup>TM</sup> slave (and metrics if enabled) deployment:
kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
``` ```
## Upgrading
### To 12.0.0
[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
**What changes were introduced in this major version?**
- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
**Considerations when upgrading to this version**
- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
**Useful links**
- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/
- https://helm.sh/docs/topics/v2_v3_migration/
- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/
### To 11.0.0
When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml`
### To 9.0.0
The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis<sup>TM</sup> exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter).
### To 7.0.0
In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all.
This version also allows enabling Redis<sup>TM</sup> Sentinel containers inside of the Redis<sup>TM</sup> Pods (feature disabled by default). In case the master crashes, a new Redis<sup>TM</sup> node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel).
...@@ -3,9 +3,10 @@ master: ...@@ -3,9 +3,10 @@ master:
- --maxmemory-policy allkeys-lru - --maxmemory-policy allkeys-lru
persistence: persistence:
enabled: false enabled: false
slave: replica:
extraFlags: extraFlags:
- --maxmemory-policy allkeys-lru - --maxmemory-policy allkeys-lru
persistence: persistence:
enabled: false enabled: false
usePassword: false auth:
enabled: false
sentinel:
enabled: true
metrics:
enabled: true
sentinel:
enabled: true
** Please be patient while the chart is being deployed ** ** Please be patient while the chart is being deployed **
{{- if contains .Values.master.service.type "LoadBalancer" }} {{- if contains .Values.master.service.type "LoadBalancer" }}
{{- if not .Values.usePassword }} {{- if not .Values.auth.enabled }}
{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} {{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }}
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
WARNING WARNING
By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have By specifying "master.service.type=LoadBalancer" and "auth.enabled=false" you have
most likely exposed the Redis(TM) service externally without any authentication most likely exposed the Redis(TM) service externally without any authentication
mechanism. mechanism.
For security reasons, we strongly suggest that you switch to "ClusterIP" or For security reasons, we strongly suggest that you switch to "ClusterIP" or
"NodePort". As alternative, you can also switch to "usePassword=true" "NodePort". As alternative, you can also switch to "auth.enabled=true"
providing a valid password on "password" parameter. providing a valid password on "password" parameter.
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
...@@ -20,117 +20,128 @@ ...@@ -20,117 +20,128 @@
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if and .Values.sentinel.enabled (not .Values.cluster.enabled)}} {{- if eq .Values.architecture "replication" }}
-------------------------------------------------------------------------------
WARNING
Using redis sentinel without a cluster is not supported. A single pod with
standalone redis has been deployed.
To deploy redis sentinel, please use the values "cluster.enabled=true" and
"sentinel.enabled=true".
-------------------------------------------------------------------------------
{{- end }}
{{- if .Values.cluster.enabled }}
{{- if .Values.sentinel.enabled }} {{- if .Values.sentinel.enabled }}
Redis(TM) can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster:
{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations Redis(TM) can be accessed via port {{ .Values.sentinel.service.port }} on the following DNS name from within your cluster:
{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations
For read/write operations, first access the Redis(TM) Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. For read/write operations, first access the Redis(TM) Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above.
{{- else }} {{- else }}
Redis(TM) can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster:
{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations Redis(TM) can be accessed on the following DNS names from within your cluster:
{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations
{{- end }} {{ printf "%s-master.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read/write operations (port {{ .Values.master.service.port }})
{{ printf "%s-replicas.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read-only operations (port {{ .Values.replica.service.port }})
{{- end }}
{{- else }} {{- else }}
Redis(TM) can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster:
{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} Redis(TM) can be accessed via port {{ .Values.master.service.port }} on the following DNS name from within your cluster:
{{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
{{- end }} {{- end }}
{{ if .Values.usePassword }} {{ if .Values.auth.enabled }}
To get your password run: To get your password run:
export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode)
{{- end }} {{- end }}
To connect to your Redis(TM) server: To connect to your Redis(TM) server:
1. Run a Redis(TM) pod that you can use as a client: 1. Run a Redis(TM) pod that you can use as a client:
kubectl run --namespace {{ .Release.Namespace }} redis-client --restart='Never' {{ if .Values.auth.enabled }} --env REDIS_PASSWORD=$REDIS_PASSWORD {{ end }} --image {{ template "redis.image" . }} --command -- sleep infinity
{{- if .Values.tls.enabled }} {{- if .Values.tls.enabled }}
kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image {{ template "redis.image" . }} --command -- sleep infinity
Copy your TLS certificates to the pod: Copy your TLS certificates to the pod:
kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.cert {{ template "redis.fullname" . }}-client:/tmp/client.cert kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.cert redis-client:/tmp/client.cert
kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.key {{ template "redis.fullname" . }}-client:/tmp/client.key kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.key redis-client:/tmp/client.key
kubectl cp --namespace {{ .Release.Namespace }} /path/to/CA.cert {{ template "redis.fullname" . }}-client:/tmp/CA.cert kubectl cp --namespace {{ .Release.Namespace }} /path/to/CA.cert redis-client:/tmp/CA.cert
{{- end }}
Use the following command to attach to the pod: Use the following command to attach to the pod:
kubectl exec --tty -i {{ template "redis.fullname" . }}-client \ kubectl exec --tty -i redis-client \
{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "common.names.fullname" . }}-client=true" \{{- end }}
--namespace {{ .Release.Namespace }} -- bash --namespace {{ .Release.Namespace }} -- bash
{{- else }}
kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \
{{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }}
{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }}
--image {{ template "redis.image" . }} -- bash
{{- end }}
2. Connect using the Redis(TM) CLI: 2. Connect using the Redis(TM) CLI:
{{- if .Values.cluster.enabled }} {{- if eq .Values.architecture "replication" }}
{{- if .Values.sentinel.enabled }} {{- if .Values.sentinel.enabled }}
redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.port }}{{ if .Values.auth.enabled }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations
redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.auth.enabled }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access
{{- else }} {{- else }}
redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} redis-cli -h {{ printf "%s-master" (include "common.names.fullname" .) }}{{ if .Values.auth.enabled }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} redis-cli -h {{ printf "%s-replicas" (include "common.names.fullname" .) }}{{ if .Values.auth.enabled }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
{{- end }} {{- end }}
{{- else }} {{- else }}
redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} redis-cli -h {{ template "common.names.fullname" . }}-master{{ if .Values.auth.enabled }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
{{- end }} {{- end }}
{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
Note: Since NetworkPolicy is enabled, only pods with label
{{ template "redis.fullname" . }}-client=true" Note: Since NetworkPolicy is enabled, only pods with label {{ template "common.names.fullname" . }}-client=true" will be able to connect to redis.
will be able to connect to redis.
{{- else -}} {{- else }}
To connect to your database from outside the cluster execute the following commands: To connect to your database from outside the cluster execute the following commands:
{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }}
{{- if contains "NodePort" .Values.sentinel.service.type }}
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }})
redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.auth.enabled }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
{{- else if contains "LoadBalancer" .Values.sentinel.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "common.names.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
redis-cli -h $SERVICE_IP -p {{ .Values.sentinel.service.port }} {{- if .Values.auth.enabled }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
{{- else if contains "ClusterIP" .Values.sentinel.service.type }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "common.names.fullname" . }} {{ .Values.sentinel.service.port }}:{{ .Values.sentinel.service.port }} &
redis-cli -h 127.0.0.1 -p {{ .Values.sentinel.service.port }} {{- if .Values.auth.enabled }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
{{- end }}
{{- else }}
{{- if contains "NodePort" .Values.master.service.type }} {{- if contains "NodePort" .Values.master.service.type }}
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ printf "%s-master" (include "common.names.fullname" .) }})
redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.auth.enabled }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
{{- else if contains "LoadBalancer" .Values.master.service.type }} {{- else if contains "LoadBalancer" .Values.master.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available. NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ printf "%s-master" (include "common.names.fullname" .) }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.auth.enabled }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
{{- else if contains "ClusterIP" .Values.master.service.type }} {{- else if contains "ClusterIP" .Values.master.service.type }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ printf "%s-master" (include "common.names.fullname" .) }} {{ .Values.master.service.port }}:{{ .Values.master.service.port }} &
redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} redis-cli -h 127.0.0.1 -p {{ .Values.master.service.port }} {{- if .Values.auth.enabled }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{ include "redis.checkRollingTags" . }} {{- end }}
{{- include "redis.checkRollingTags" . }}
{{- include "redis.validateValues" . }} {{- include "redis.validateValues" . }}
{{/* vim: set filetype=mustache: */}} {{/* vim: set filetype=mustache: */}}
{{/* {{/*
Expand the name of the chart. Return the proper Redis image name
*/}} */}}
{{- define "redis.name" -}} {{- define "redis.image" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
{{- end -}} {{- end -}}
{{/* {{/*
Expand the chart plus release name (used by the chart label) Return the proper Redis Sentinel image name
*/}} */}}
{{- define "redis.chart" -}} {{- define "redis.sentinel.image" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version -}} {{ include "common.images.image" (dict "imageRoot" .Values.sentinel.image "global" .Values.global) }}
{{- end -}} {{- end -}}
{{/* {{/*
Create a default fully qualified app name. Return the proper image name (for the metrics image)
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}} */}}
{{- define "redis.fullname" -}} {{- define "redis.metrics.image" -}}
{{- if .Values.fullnameOverride -}} {{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Return the appropriate apiVersion for networkpolicy. Return the proper image name (for the metrics image)
*/}} */}}
{{- define "networkPolicy.apiVersion" -}} {{- define "redis.metrics.sentinel.image" -}}
{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} {{ include "common.images.image" (dict "imageRoot" .Values.metrics.sentinel.image "global" .Values.global) }}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "networking.k8s.io/v1" -}}
{{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Return the appropriate apiGroup for PodSecurityPolicy. Return the proper image name (for the init container volume-permissions image)
*/}} */}}
{{- define "podSecurityPolicy.apiGroup" -}} {{- define "redis.volumePermissions.image" -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} {{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
{{- print "policy" -}}
{{- else -}}
{{- print "extensions" -}}
{{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Return the appropriate apiVersion for PodSecurityPolicy. Return sysctl image
*/}} */}}
{{- define "podSecurityPolicy.apiVersion" -}} {{- define "redis.sysctl.image" -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} {{ include "common.images.image" (dict "imageRoot" .Values.sysctl.image "global" .Values.global) }}
{{- print "policy/v1beta1" -}}
{{- else -}}
{{- print "extensions/v1beta1" -}}
{{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Return the proper Redis(TM) image name Return the proper Docker Image Registry Secret Names
*/}} */}}
{{- define "redis.image" -}} {{- define "redis.imagePullSecrets" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{/* {{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope, Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option Also, we can not use a single if because lazy evaluation is not an option
*/}} */}}
{{- if .Values.global }} {{- if .Values.global }}
{{- if .Values.global.imageRegistry }} {{- if .Values.global.imagePullSecrets }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} imagePullSecrets:
{{- else -}} {{- range .Values.global.imagePullSecrets }}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} {{- $credType := typeOf . -}}
{{- end -}} {{ if eq $credType "map[string]interface {}" }}
{{- else -}} - name: {{ get . "name" }}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} {{ else }}
- name: {{ . }}
{{ end }}
{{- end }}
{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.metrics.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.sysctlImage.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.volumePermissions.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end -}}
{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.metrics.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.sysctlImage.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.volumePermissions.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Return the proper Redis(TM) Sentinel image name Return the appropriate apiVersion for networkpolicy.
*/}}
{{- define "sentinel.image" -}}
{{- $registryName := .Values.sentinel.image.registry -}}
{{- $repositoryName := .Values.sentinel.image.repository -}}
{{- $tag := .Values.sentinel.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}} */}}
{{- if .Values.global }} {{- define "networkPolicy.apiVersion" -}}
{{- if .Values.global.imageRegistry }} {{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} {{- print "extensions/v1beta1" -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}} {{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} {{- print "networking.k8s.io/v1" -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Return the proper image name (for the metrics image) Return the appropriate apiGroup for PodSecurityPolicy.
*/}}
{{- define "redis.metrics.image" -}}
{{- $registryName := .Values.metrics.image.registry -}}
{{- $repositoryName := .Values.metrics.image.repository -}}
{{- $tag := .Values.metrics.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}} */}}
{{- if .Values.global }} {{- define "podSecurityPolicy.apiGroup" -}}
{{- if .Values.global.imageRegistry }} {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} {{- print "policy" -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}} {{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} {{- print "extensions" -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Return the proper image name (for the init container volume-permissions image) Return the appropriate apiVersion for PodSecurityPolicy.
*/}}
{{- define "redis.volumePermissions.image" -}}
{{- $registryName := .Values.volumePermissions.image.registry -}}
{{- $repositoryName := .Values.volumePermissions.image.repository -}}
{{- $tag := .Values.volumePermissions.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}} */}}
{{- if .Values.global }} {{- define "podSecurityPolicy.apiVersion" -}}
{{- if .Values.global.imageRegistry }} {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} {{- print "policy/v1beta1" -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}} {{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} {{- print "extensions/v1beta1" -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
...@@ -191,202 +162,78 @@ Create the name of the service account to use ...@@ -191,202 +162,78 @@ Create the name of the service account to use
*/}} */}}
{{- define "redis.serviceAccountName" -}} {{- define "redis.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}} {{- if .Values.serviceAccount.create -}}
{{ default (include "redis.fullname" .) .Values.serviceAccount.name }} {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
{{- else -}} {{- else -}}
{{ default "default" .Values.serviceAccount.name }} {{ default "default" .Values.serviceAccount.name }}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Get the password secret. Return the configuration configmap name
*/}} */}}
{{- define "redis.secretName" -}} {{- define "redis.configmapName" -}}
{{- if .Values.existingSecret -}} {{- if .Values.existingConfigmap -}}
{{- printf "%s" .Values.existingSecret -}} {{- printf "%s" (tpl .Values.existingConfigmap $) -}}
{{- else -}} {{- else -}}
{{- printf "%s" (include "redis.fullname" .) -}} {{- printf "%s-configuration" (include "common.names.fullname" .) -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Get the password key to be retrieved from Redis(TM) secret. Return true if a configmap object should be created
*/}} */}}
{{- define "redis.secretPasswordKey" -}} {{- define "redis.createConfigmap" -}}
{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} {{- if empty .Values.existingConfigmap }}
{{- printf "%s" .Values.existingSecretPasswordKey -}} {{- true -}}
{{- else -}}
{{- printf "redis-password" -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Return Redis(TM) password Get the password secret.
*/}} */}}
{{- define "redis.password" -}} {{- define "redis.secretName" -}}
{{- if not (empty .Values.global.redis.password) }} {{- if .Values.auth.existingSecret -}}
{{- .Values.global.redis.password -}} {{- printf "%s" .Values.auth.existingSecret -}}
{{- else if not (empty .Values.password) -}}
{{- .Values.password -}}
{{- else -}} {{- else -}}
{{- randAlphaNum 10 -}} {{- printf "%s" (include "common.names.fullname" .) -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Return sysctl image Get the password key to be retrieved from Redis(TM) secret.
*/}}
{{- define "redis.sysctl.image" -}}
{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}}
{{- $repositoryName := .Values.sysctlImage.repository -}}
{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}} */}}
{{- if .Values.global }} {{- define "redis.secretPasswordKey" -}}
{{- if .Values.global.imageRegistry }} {{- if and .Values.auth.existingSecret .Values.auth.existingSecretPasswordKey -}}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} {{- printf "%s" .Values.auth.existingSecretPasswordKey -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}} {{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} {{- printf "redis-password" -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
Return the proper Docker Image Registry Secret Names Return Redis(TM) password
*/}}
{{- define "redis.imagePullSecrets" -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
Also, we can not use a single if because lazy evaluation is not an option
*/}} */}}
{{- if .Values.global }} {{- define "redis.password" -}}
{{- if .Values.global.imagePullSecrets }} {{- $secretName := include "redis.secretName" . -}}
imagePullSecrets: {{- $secret := (lookup "v1" "Secret" .Release.Namespace $secretName ) -}}
{{- range .Values.global.imagePullSecrets }} {{- if $secret -}}
{{- $credType := typeOf . -}} {{- with $secret -}}
{{ if eq $credType "map[string]interface {}" }} {{- get .data "redis-password" | b64dec -}}
- name: {{ get . "name" }}
{{ else }}
- name: {{ . }}
{{ end }}
{{- end }}
{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.metrics.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.sysctlImage.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.volumePermissions.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end -}} {{- end -}}
{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} {{- else if not (empty .Values.global.redis.password) }}
imagePullSecrets: {{- .Values.global.redis.password -}}
{{- range .Values.image.pullSecrets }} {{- else if not (empty .Values.auth.password) -}}
- name: {{ . }} {{- .Values.auth.password -}}
{{- end }} {{- else -}}
{{- range .Values.metrics.image.pullSecrets }} {{- randAlphaNum 10 -}}
- name: {{ . }}
{{- end }}
{{- range .Values.sysctlImage.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.volumePermissions.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* Check if there are rolling tags in the images */}} {{/* Check if there are rolling tags in the images */}}
{{- define "redis.checkRollingTags" -}} {{- define "redis.checkRollingTags" -}}
{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} {{- include "common.warnings.rollingTag" .Values.image }}
WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. {{- include "common.warnings.rollingTag" .Values.sentinel.image }}
+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ {{- include "common.warnings.rollingTag" .Values.metrics.image }}
{{- end }}
{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }}
WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
{{- end }}
{{- end -}}
{{/*
Return the proper Storage Class for master
*/}}
{{- define "redis.master.storageClass" -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
*/}}
{{- if .Values.global -}}
{{- if .Values.global.storageClass -}}
{{- if (eq "-" .Values.global.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.global.storageClass -}}
{{- end -}}
{{- else -}}
{{- if .Values.master.persistence.storageClass -}}
{{- if (eq "-" .Values.master.persistence.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- else -}}
{{- if .Values.master.persistence.storageClass -}}
{{- if (eq "-" .Values.master.persistence.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper Storage Class for slave
*/}}
{{- define "redis.slave.storageClass" -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
*/}}
{{- if .Values.global -}}
{{- if .Values.global.storageClass -}}
{{- if (eq "-" .Values.global.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.global.storageClass -}}
{{- end -}}
{{- else -}}
{{- if .Values.slave.persistence.storageClass -}}
{{- if (eq "-" .Values.slave.persistence.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- else -}}
{{- if .Values.slave.persistence.storageClass -}}
{{- if (eq "-" .Values.slave.persistence.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}} {{- end -}}
{{/* {{/*
...@@ -395,6 +242,7 @@ Compile all warnings into a single message, and call fail. ...@@ -395,6 +242,7 @@ Compile all warnings into a single message, and call fail.
{{- define "redis.validateValues" -}} {{- define "redis.validateValues" -}}
{{- $messages := list -}} {{- $messages := list -}}
{{- $messages := append $messages (include "redis.validateValues.spreadConstraints" .) -}} {{- $messages := append $messages (include "redis.validateValues.spreadConstraints" .) -}}
{{- $messages := append $messages (include "redis.validateValues.architecture" .) -}}
{{- $messages := without $messages "" -}} {{- $messages := without $messages "" -}}
{{- $message := join "\n" $messages -}} {{- $message := join "\n" $messages -}}
...@@ -405,22 +253,24 @@ Compile all warnings into a single message, and call fail. ...@@ -405,22 +253,24 @@ Compile all warnings into a single message, and call fail.
{{/* Validate values of Redis(TM) - spreadConstrainsts K8s version */}} {{/* Validate values of Redis(TM) - spreadConstrainsts K8s version */}}
{{- define "redis.validateValues.spreadConstraints" -}} {{- define "redis.validateValues.spreadConstraints" -}}
{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.slave.spreadConstraints -}} {{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.replica.spreadConstraints -}}
redis: spreadConstraints redis: spreadConstraints
Pod Topology Spread Constraints are only available on K8s >= 1.16 Pod Topology Spread Constraints are only available on K8s >= 1.16
Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* {{/* Validate values of Redis(TM) - must provide a valid architecture */}}
Renders a value that contains template. {{- define "redis.validateValues.architecture" -}}
Usage: {{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "replication") -}}
{{ include "redis.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} redis: architecture
*/}} Invalid architecture selected. Valid values are "standalone" and
{{- define "redis.tplValue" -}} "replication". Please set a valid architecture (--set architecture="xxxx")
{{- if typeIs "string" .value }} {{- end -}}
{{- tpl .value .context }} {{- if and .Values.sentinel.enabled (not (eq .Values.architecture "replication")) }}
{{- else }} redis: architecture
{{- tpl (.value | toYaml) .context }} Using redis sentinel on standalone mode is not supported.
{{- end }} To deploy redis sentinel, please select the "replication" mode
(--set "architecture=replication,sentinel.enabled=true")
{{- end -}}
{{- end -}} {{- end -}}
{{- if .Values.cleanUpgrade.enabled }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: redis-upgrade-sa
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-10"
"helm.sh/hook-delete-policy": hook-succeeded,hook-failed,before-hook-creation
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: redis-upgrade-role
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-10"
"helm.sh/hook-delete-policy": hook-succeeded,hook-failed,before-hook-creation
rules:
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["get", "list", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: redis-upgrade-role-binding
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-10"
"helm.sh/hook-delete-policy": hook-succeeded,hook-failed,before-hook-creation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: redis-upgrade-role
subjects:
- kind: ServiceAccount
name: redis-upgrade-sa
namespace: {{ .Release.Namespace }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: redis-clean-upgrade
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
template:
metadata:
name: redis-clean-upgrade
annotations:
sidecar.istio.io/inject: 'false'
spec:
serviceAccountName: redis-upgrade-sa
imagePullSecrets:
{{- if .Values.global.imagePullSecrets }}
{{- range .Values.global.imagePullSecrets }}
{{- $credType := typeOf . -}}
{{- if eq $credType "map[string]interface {}" }}
- name: {{ get . "name" }}
{{- else }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- end }}
restartPolicy: OnFailure
containers:
- name: redis-clean-upgrade
image: "registry1.dso.mil/ironbank/gitlab/gitlab/kubectl:13.9.0"
command:
- /bin/sh
- -c
- |
set -e
if [[ $(kubectl get statefulset -l app={{ include "common.names.name" . }} -n {{ .Release.Namespace }} 2> /dev/null | wc -l) -gt 0 ]]; then
kubectl delete statefulset -n {{ .Release.Namespace }} -l app={{ include "common.names.name" . }}
echo "Statefulsets cleaned up."
else
echo "No statefulsets to clean up."
fi
if [[ $(kubectl get pvc -l app={{ include "common.names.name" . }} -n {{ .Release.Namespace }} 2> /dev/null | wc -l) -gt 0 ]]; then
kubectl delete pvc -n {{ .Release.Namespace }} -l app={{ include "common.names.name" . }}
echo "PVCs cleaned up."
else
echo "No PVCs to clean up."
fi
echo "Done with upgrade steps."
{{- end }}
{{- if (include "redis.createConfigmap" .) }}
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: {{ template "redis.fullname" . }} name: {{ printf "%s-configuration" (include "common.names.fullname" .) }}
namespace: {{ .Release.Namespace | quote }} namespace: {{ .Release.Namespace | quote }}
labels: labels: {{- include "common.labels.standard" . | nindent 4 }}
app: {{ template "redis.name" . }} {{- if .Values.commonLabels }}
chart: {{ template "redis.chart" . }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
heritage: {{ .Release.Service }} {{- end }}
release: {{ .Release.Name }} {{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data: data:
redis.conf: |- redis.conf: |-
{{- if .Values.configmap }} # User-supplied common configuration:
# User-supplied configuration: {{- if .Values.commonConfiguration }}
{{- tpl .Values.configmap . | nindent 4 }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonConfiguration "context" $ ) | nindent 4 }}
{{- end }} {{- end }}
# End of common configuration
master.conf: |- master.conf: |-
dir {{ .Values.master.persistence.path }} dir {{ .Values.master.persistence.path }}
{{- if .Values.master.configmap }}
# User-supplied master configuration: # User-supplied master configuration:
{{- tpl .Values.master.configmap . | nindent 4 }} {{- if .Values.master.configuration }}
{{- end }} {{- include "common.tplvalues.render" ( dict "value" .Values.master.configuration "context" $ ) | nindent 4 }}
{{- if .Values.master.disableCommands }} {{- end }}
{{- range .Values.master.disableCommands }} {{- if .Values.master.disableCommands }}
{{- range .Values.master.disableCommands }}
rename-command {{ . }} "" rename-command {{ . }} ""
{{- end }} {{- end }}
{{- end }} {{- end }}
# End of master configuration
replica.conf: |- replica.conf: |-
dir {{ .Values.slave.persistence.path }} dir {{ .Values.replica.persistence.path }}
slave-read-only yes slave-read-only yes
{{- if .Values.slave.configmap }} # User-supplied replica configuration:
# User-supplied slave configuration: {{- if .Values.replica.configuration }}
{{- tpl .Values.slave.configmap . | nindent 4 }} {{- include "common.tplvalues.render" ( dict "value" .Values.replica.configuration "context" $ ) | nindent 4 }}
{{- end }} {{- end }}
{{- if .Values.slave.disableCommands }} {{- if .Values.replica.disableCommands }}
{{- range .Values.slave.disableCommands }} {{- range .Values.replica.disableCommands }}
rename-command {{ . }} "" rename-command {{ . }} ""
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.sentinel.enabled }} # End of replica configuration
{{- if .Values.sentinel.enabled }}
sentinel.conf: |- sentinel.conf: |-
dir "/tmp" dir "/tmp"
bind 0.0.0.0 bind 0.0.0.0
port {{ .Values.sentinel.port }} port {{ .Values.sentinel.containerPort }}
sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-node-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "common.names.fullname" . }}-node-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.sentinel.service.port }} {{ .Values.sentinel.quorum }}
sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }}
sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }}
sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }}
{{- if .Values.sentinel.configmap }}
# User-supplied sentinel configuration: # User-supplied sentinel configuration:
{{- tpl .Values.sentinel.configmap . | nindent 4 }} {{- if .Values.sentinel.configuration }}
{{- end }} {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.configuration "context" $ ) | nindent 4 }}
{{- end }}
# End of sentinel configuration
{{- end }}
{{- end }} {{- end }}
{{- range .Values.extraDeploy }}
---
{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
{{- end }}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment