UNCLASSIFIED - NO CUI

Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • big-bang/product/packages/elasticsearch-kibana
  • toladipupo/elasticsearch-kibana
  • staskiewicz.blane/elasticsearch-kibana
3 results
Show changes
Commits on Source (9)
......@@ -4,6 +4,16 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
---
## [0.1.7-bb.0] - 2021-04-07
### Added
- Updating isito labels for elasticsearch resource
## [0.1.6-bb.0] - 2021-03-30
### Added
- `kibana.count` to chart values
### Changed
- modified the values for affinity and nodeSelector to allow greater flexibility
## [0.1.5-bb.0] - 2021-03-05
### Added
......@@ -27,4 +37,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
### Added
- Adds the ability to create pod antiaffinity and node affinity.
- Adds the ability to add labels, annotations, a list of gateways and hosts for the kibana virtualservice.
\ No newline at end of file
- Adds the ability to add labels, annotations, a list of gateways and hosts for the kibana virtualservice.
apiVersion: v2
name: logging
version: 0.1.5-bb.0
version: 0.1.7-bb.0
appVersion: 7.9.2
......@@ -68,9 +68,9 @@ spec:
podTemplate:
metadata:
annotations:
fluentbit.io/exclude-istio-proxy: "true"
prometheus.istio.io/merge-metrics: "false"
sidecar.istio.io/rewriteAppHTTPProbers: "true"
traffic.sidecar.istio.io/includeInboundPorts: "*"
traffic.sidecar.istio.io/excludeInboundPorts: "9300"
traffic.sidecar.istio.io/excludeOutboundPorts: "9300"
spec:
......@@ -79,33 +79,13 @@ spec:
{{- toYaml .securityContext | nindent 10 }}
{{- end }}
{{- if or .antiAffinity .nodeAffinity }}
affinity:
{{- end }}
{{- if eq .antiAffinity "hard" }}
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
elasticsearch.k8s.elastic.co/statefulset-name: "{{ $.Release.Name }}-es-master"
elasticsearch.k8s.elastic.co/cluster-name: "{{ $.Release.Name }}"
elasticsearch.k8s.elastic.co/node-master: "true"
{{- else if eq .antiAffinity "soft" }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
elasticsearch.k8s.elastic.co/statefulset-name: "{{ $.Release.Name }}-es-master"
elasticsearch.k8s.elastic.co/cluster-name: "{{ $.Release.Name }}"
elasticsearch.k8s.elastic.co/node-master: "true"
{{- if .nodeSelector }}
nodeSelector:
{{ toYaml .nodeSelector | nindent 10 }}
{{- end }}
{{- with .nodeAffinity }}
nodeAffinity:
{{ toYaml . | nindent 12 }}
{{- if .affinity }}
affinity:
{{ toYaml .affinity | nindent 10 }}
{{- end }}
{{- if .initContainers }}
......@@ -184,9 +164,9 @@ spec:
podTemplate:
metadata:
annotations:
fluentbit.io/exclude-istio-proxy: "true"
prometheus.istio.io/merge-metrics: "false"
sidecar.istio.io/rewriteAppHTTPProbers: "true"
traffic.sidecar.istio.io/includeInboundPorts: "*"
traffic.sidecar.istio.io/excludeInboundPorts: "9300"
traffic.sidecar.istio.io/excludeOutboundPorts: "9300"
......@@ -196,36 +176,15 @@ spec:
{{- toYaml .securityContext | nindent 10 }}
{{- end }}
{{- if or .antiAffinity .nodeAffinity }}
affinity:
{{- if .nodeSelector }}
nodeSelector:
{{ toYaml .nodeSelector | nindent 10 }}
{{- end }}
{{- if eq .antiAffinity "hard" }}
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
elasticsearch.k8s.elastic.co/statefulset-name: "{{ $.Release.Name }}-es-data"
elasticsearch.k8s.elastic.co/cluster-name: "{{ $.Release.Name }}"
elasticsearch.k8s.elastic.co/node-data: "true"
{{- else if eq .antiAffinity "soft" }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
elasticsearch.k8s.elastic.co/statefulset-name: "{{ $.Release.Name }}-es-data"
elasticsearch.k8s.elastic.co/cluster-name: "{{ $.Release.Name }}"
elasticsearch.k8s.elastic.co/node-data: "true"
{{- end }}
{{- with .nodeAffinity }}
nodeAffinity:
{{ toYaml . | nindent 12 }}
{{- if .affinity }}
affinity:
{{ toYaml .affinity | nindent 10 }}
{{- end }}
{{- if .initContainers }}
initContainers:
{{ toYaml .initContainers | nindent 10 }}
......
......@@ -5,7 +5,7 @@ metadata:
namespace: {{ .Release.Namespace }}
spec:
version: {{ .Values.kibana.version }}
count: 1
count: {{ .Values.kibana.count }}
image: {{ .Values.kibana.image.repository }}:{{ .Values.kibana.image.tag }}
{{- with .Values.sso }}
......@@ -51,6 +51,16 @@ spec:
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.istio.enabled }}
{{- if .Values.istio.enabled }}
automountServiceAccountToken: true
{{- end }}
{{- end }}
{{- with .Values.kibana }}
{{- if .nodeSelector }}
nodeSelector:
{{ toYaml .nodeSelector | nindent 10 }}
{{- end }}
{{- if .affinity }}
affinity:
{{ toYaml .affinity | nindent 10 }}
{{- end }}
{{- end }}
......@@ -6,6 +6,9 @@ kibana:
repository: registry1.dso.mil/ironbank/elastic/kibana/kibana
tag: 7.9.2
# Number of Kibana replicas
count: 3
securityContext:
runAsUser: 1000
runAsGroup: 1000
......@@ -21,6 +24,25 @@ kibana:
# memory: 2Gi
# cpu: 2
affinity: {}
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# dont-schedule-with: kibana
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: node-type
# operator: In
# values:
# - "kibana"
nodeSelector: {}
# node-type: kibana
elasticsearch:
version: 7.9.2
image:
......@@ -46,17 +68,24 @@ elasticsearch:
runAsGroup: 1000
fsGroup: 1000
# Available options are "soft" and "hard"
antiAffinity: ""
nodeAffinity: {}
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: type
# operator: In
# values:
# - "ecs-optimized"
affinity: {}
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# dont-schedule-with: elastic-master
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: node-type
# operator: In
# values:
# - "elastic-master"
nodeSelector: {}
# node-type: elastic-master
count: 3
persistence:
......@@ -92,17 +121,24 @@ elasticsearch:
runAsGroup: 1000
fsGroup: 1000
# Available options are "soft" and "hard"
antiAffinity: ""
nodeAffinity: {}
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: type
# operator: In
# values:
# - "ecs-optimized"
affinity: {}
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# dont-schedule-with: elastic-data
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: node-type
# operator: In
# values:
# - "elastic-data"
nodeSelector: {}
# node-type: elastic-data
count: 4
persistence:
......
# Node Affinity & Anti-Affinity with Elastic/Kibana
Affinity is exposed through values options for this package. If you want to schedule your pods to deploy on specific nodes you can do that through the `nodeSelector` value and as needed the `affinity` value. Additional info is provided below as well to help in configuring this.
It is good to have a basic knowledge of node affinity and available options to you before customizing in this way - the upstream kubernetes documentation [has a good walkthrough of this](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity).
## Values for NodeSelector
The `nodeSelector` value at multiple levels can be set to do basic node selection for deployments. See the below example for an example to schedule pods to only nodes with the label `node-type` equal to the "pod type":
```yaml
kibana:
nodeSelector:
node-type: kibana
elasticsearch:
master:
nodeSelector:
node-type: elastic-master
data:
nodeSelector:
node-type: elastic-data
```
## Values for Affinity
The `affinity` value at multiple levels should be used to specify affinity. The format to include follows what you'd specify at a pod/deployment level. See the example below for scheduling the operator pods only to nodes with the label `node-type` equal to the "pod type":
```yaml
kibana:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-type
operator: In
values:
- "kibana"
elasticsearch:
master:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-type
operator: In
values:
- "elastic-master"
data:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-type
operator: In
values:
- "elastic-data"
```
## Values for Anti-Affinity
The `affinity` value at multiple levels can be set in the same way to schedule pods based on anti-affinity. See the below example to schedule pods to not be present on the nodes that already have pods with the `dont-schedule-with:` "pod type" label:
```yaml
kibana:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
dont-schedule-with: kibana
elasticsearch:
master:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
dont-schedule-with: elastic-master
data:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
dont-schedule-with: elastic-data
```
......@@ -2,6 +2,8 @@ kibana:
imagePullSecrets:
- name: private-registry-mil
count: 1
elasticsearch:
imagePullSecrets:
- name: private-registry-mil
......