UNCLASSIFIED

Commit fdeea492 authored by Grant Duncklee's avatar Grant Duncklee
Browse files

Merge branch 'bb-29/bigbang-addon' into 'main'

29.1.0-bb.0

See merge request !1
parents 41dfdebe bcc14499
Pipeline #351576 passed with stages
in 3 minutes and 31 seconds
{{- if .Values.service.enabled -}} {{- if .Values.service.enabled -}}
---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
{{- if .Values.service.name }} name: {{ include "nexus.fullname" . }}
name: {{ .Values.service.name }}
{{- else }}
name: {{ template "nexus.name" . }}-service
{{- end }}
namespace: {{ template "nexus.namespace" . }}
labels:
{{ include "nexus.labels" . | indent 4 }}
{{- if .Values.service.labels }}
{{ toYaml .Values.service.labels | indent 4 }}
{{- end }}
{{- if .Values.service.annotations }} {{- if .Values.service.annotations }}
annotations: annotations:
{{ toYaml .Values.service.annotations | indent 4 }} {{ toYaml .Values.service.annotations | indent 4 }}
{{- end }} {{- end }}
labels:
{{- include "nexus.labels" . | nindent 4 }}
{{- if .Values.nexus.extraLabels }}
{{- with .Values.nexus.extraLabels }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }}
spec: spec:
type: {{ .Values.service.type }}
ports: ports:
{{- if .Values.service.portName }} - port: {{ .Values.nexus.nexusPort }}
- name: {{ .Values.service.portName }} protocol: TCP
port: {{ .Values.service.port }} name: nexus-ui
targetPort: {{ .Values.service.targetPort }}
{{- end }}
{{- with .Values.service.ports }}
{{ toYaml . | indent 2 }}
{{- end }}
selector: selector:
app: {{ template "nexus.name" . }} {{- include "nexus.selectorLabels" . | nindent 4 }}
release: {{ .Release.Name }} {{- if .Values.nexus.extraSelectorLabels }}
type: {{ .Values.service.type }} {{- with .Values.nexus.extraSelectorLabels }}
{{ if .Values.service.loadBalancerSourceRanges }} {{ toYaml . | indent 4 }}
loadBalancerSourceRanges: {{- end }}
{{- range .Values.service.loadBalancerSourceRanges }} {{- end }}
- {{ . }}
{{- if .Values.nexus.docker.enabled }}
{{- range $registry := .Values.nexus.docker.registries }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "nexus.fullname" $ | trunc 49 }}-docker-{{ $registry.port }}
{{- if $.Values.service.annotations }}
annotations:
{{ toYaml $.Values.service.annotations | indent 4 }}
{{- end }}
labels:
{{- include "nexus.labels" $ | nindent 4 }}
{{- if $.Values.nexus.extraLabels }}
{{- with $.Values.nexus.extraLabels }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }} {{- end }}
{{ end }} spec:
{{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} type: {{ $.Values.service.type }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }} ports:
{{- end }} - port: {{ $registry.port }}
{{- end}} protocol: TCP
name: docker-{{ $registry.port }}
selector:
{{- include "nexus.selectorLabels" $ | nindent 4 }}
{{- if $.Values.nexus.extraSelectorLabels }}
{{- with $.Values.nexus.extraSelectorLabels }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.serviceAccount.create }} {{- if .Values.serviceAccount.create -}}
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
{{- if .Values.serviceAccount.name }} name: {{ include "nexus.serviceAccountName" . }}
name: {{ .Values.serviceAccount.name }} labels: {{- include "nexus.labels" . | nindent 4 }}
{{- else }} {{- if .Values.nexus.extraLabels }}
name: {{ template "nexus.fullname" . }} {{- with .Values.nexus.extraLabels }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }} {{- end }}
namespace: {{ template "nexus.namespace" . }} {{- with .Values.serviceAccount.annotations }}
{{- if .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }}
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
## Overrides for generated resource names # -- Big Bang Additions
# namespaceOverride: hostname: nexus
domain: bigbang.dev
istio:
enabled: false
nexus:
gateways:
- "istio-system/main"
monitoring:
enabled: false
license_key: ""
license:
mountPath: /nexus-data/sonatype-license.lic
subPath: sonatype-license.lic
sso:
enabled: false
idp_data:
entityId: ""
usernameAttribute: ""
firstNameAttribute: ""
lastNameAttribute: ""
emailAttribute: ""
groupsAttribute: ""
validateResponseSignature: true
validateAssertionSignature: true
idpMetadata: ''
realm:
- "NexusAuthenticatingRealm"
- "NexusAuthorizingRealm"
- "SamlRealm"
role:
id: "nexus"
name: "nexus"
description: "nexus group"
privileges:
- "nx-all"
roles:
- "nx-admin"
# -- End of BigBang Additions
ingress:
enabled: true
annotations: {kubernetes.io/ingress.class: nginx}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hostPath: /
hostRepo: repo.demo
tls: []
# - secretName: nexus-local-tls
# hosts:
# - nexus.local
# - nexus-docker.local
# - nexus-docker-hosted.local
statefulset: statefulset:
# This is not supported
enabled: false enabled: false
replicaCount: 1
# By default deploymentStrategy is set to rollingUpdate with maxSurge of 25% and maxUnavailable of 25% . you can change type to `Recreate` or can uncomment `rollingUpdate` specification and adjust them to your usage. # By default deploymentStrategy is set to rollingUpdate with maxSurge of 25% and maxUnavailable of 25% . you can change type to `Recreate` or can uncomment `rollingUpdate` specification and adjust them to your usage.
deploymentStrategy: {} deploymentStrategy: Recreate
# rollingUpdate: image:
# maxSurge: 25% # IB Nexus Image
# maxUnavailable: 25% repository: registry1.dso.mil/ironbank/sonatype/nexus/nexus
# type: RollingUpdate tag: 3.29.0-02
pullPolicy: IfNotPresent
# If enabled, a Job will be launched after the chart is installed to initialize the admin password of your choice
initAdminPassword:
enabled: false
# Set this in the instance where default admin password is different
defaultPasswordOverride:
password: "admin321"
nexus: nexus:
imageName: quay.io/travelaudience/docker-nexus docker:
imageTag: 3.27.0 enabled: false
imagePullPolicy: IfNotPresent registries: []
# Uncomment this to scheduler pods on priority # - host: chart.local
# priorityClassName: "high-priority" # port: 5000
# secretName: registrySecret
env: env:
- name: INSTALL4J_ADD_VM_PARAMS - name: install4jAddVmParams
value: "-Xms1200M -Xmx1200M -XX:MaxDirectMemorySize=2G -XX:ActiveProcessorCount=4" value: "-Xms1200M -Xmx1200M -XX:MaxDirectMemorySize=2G -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap"
- name: NEXUS_SECURITY_RANDOMPASSWORD - name: NEXUS_SECURITY_RANDOMPASSWORD
value: "false" value: "true"
# nodeSelector: properties:
override: true
data: {}
# data:
# nexus.licenseFile: /nexus-data/sonatype-license.override.lic
# nexus.scripts.allowCreation: true
# See this article for ldap configuratioon options https://support.sonatype.com/hc/en-us/articles/216597138-Setting-Advanced-LDAP-Connection-Properties-in-Nexus-Repository-Manager
# nexus.ldap.env.java.naming.security.authentication: simple
nodeSelector: {}
# cloud.google.com/gke-nodepool: default-pool # cloud.google.com/gke-nodepool: default-pool
affinity: {}
resources: {} resources: {}
# requests: # requests:
## Based on https://support.sonatype.com/hc/en-us/articles/115006448847#mem ## Based on https://support.sonatype.com/hc/en-us/articles/115006448847#mem
...@@ -44,40 +99,22 @@ nexus: ...@@ -44,40 +99,22 @@ nexus:
# cpu: 250m # cpu: 250m
# memory: 4800Mi # memory: 4800Mi
# The ports should only be changed if the nexus image uses a different port # The ports should only be changed if the nexus image uses a different port
dockerPort: 5003
nexusPort: 8081 nexusPort: 8081
additionalPorts: []
service:
type: NodePort
# clusterIP: None
# annotations: {}
## When using LoadBalancer service type, use the following AWS certificate from ACM
## https://aws.amazon.com/documentation/acm/
# service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:eu-west-1:123456789:certificate/abc123-abc123-abc123-abc123"
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "https"
# service.beta.kubernetes.io/aws-load-balancer-backend-port: "https"
## When using LoadBalancer service type, whitelist these source IP ranges
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
# loadBalancerSourceRanges:
# - 192.168.1.10/32
# labels: {}
## Configures the requested IP on the loadBalancer when using LoadBalancer service type
# loadBalancerIP: "192.168.1.10"
securityContextEnabled: true
securityContext: securityContext:
fsGroup: 200 fsGroup: 2000
podAnnotations: {} podAnnotations: {}
livenessProbe: livenessProbe:
initialDelaySeconds: 300 initialDelaySeconds: 30
periodSeconds: 30 periodSeconds: 30
failureThreshold: 6 failureThreshold: 6
# timeoutSeconds: 10 timeoutSeconds: 10
path: / path: /
readinessProbe: readinessProbe:
initialDelaySeconds: 30 initialDelaySeconds: 30
periodSeconds: 30 periodSeconds: 30
failureThreshold: 6 failureThreshold: 6
# timeoutSeconds: 10 timeoutSeconds: 10
path: / path: /
# hostAliases allows the modification of the hosts file inside a container # hostAliases allows the modification of the hosts file inside a container
hostAliases: [] hostAliases: []
...@@ -85,10 +122,48 @@ nexus: ...@@ -85,10 +122,48 @@ nexus:
# hostnames: # hostnames:
# - "example.com" # - "example.com"
# - "www.example.com" # - "www.example.com"
context: imagePullSecrets: []
# When using nexus it is important that all the files in the data directory have the proper owner configured. Therefore this
# value defaults to true to apply chown -R nexus:nexus to the mounted directory at every startup of the container. nameOverride: ""
chownNexusData: true fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
deployment:
# # Add annotations in deployment to enhance deployment configurations
annotations: {}
# # Add init containers. e.g. to be used to give specific permissions for nexus-data.
# # Add your own init container or uncomment and modify the given example.
initContainers:
# - name: fmp-volume-permission
# image: busybox
# imagePullPolicy: IfNotPresent
# command: ['chown','-R', '200', '/nexus-data']
# volumeMounts:
# - name: nexus-data
# mountPath: /nexus-data
# # Uncomment and modify this to run a command after starting the nexus container.
postStart:
command: # '["/bin/sh", "-c", "ls"]'
preStart:
command: # '["/bin/rm", "-f", "/path/to/lockfile"]'
terminationGracePeriodSeconds: 120
additionalContainers:
additionalVolumes:
additionalVolumeMounts:
service:
name: nexus3
enabled: true
labels: {}
annotations: {}
serviceType: ClusterIP
route: route:
enabled: false enabled: false
...@@ -98,37 +173,6 @@ route: ...@@ -98,37 +173,6 @@ route:
annotations: annotations:
# path: /docker # path: /docker
nexusProxy:
enabled: true
# svcName: proxy-svc
imageName: quay.io/travelaudience/docker-nexus-proxy
imageTag: 2.6.0
imagePullPolicy: IfNotPresent
port: 8080
targetPort: 8080
# labels: {}
env:
nexusDockerHost:
nexusHttpHost:
enforceHttps: false
cloudIamAuthEnabled: false
## If cloudIamAuthEnabled is set to true uncomment the variables below and remove this line
# clientId: ""
# clientSecret: ""
# organizationId: ""
# redirectUrl: ""
# requiredMembershipVerification: "true"
# secrets:
# keystore: ""
# password: ""
resources: {}
# requests:
# cpu: 100m
# memory: 256Mi
# limits:
# cpu: 200m
# memory: 512Mi
nexusProxyRoute: nexusProxyRoute:
enabled: false enabled: false
labels: labels:
...@@ -153,183 +197,29 @@ persistence: ...@@ -153,183 +197,29 @@ persistence:
# pdName: nexus-data-disk # pdName: nexus-data-disk
# fsType: ext4 # fsType: ext4
nexusBackup:
enabled: false
imageName: dbcc/docker-nexus-backup
imageTag: 0.0.1
imagePullPolicy: IfNotPresent
env:
rcloneRemote:
targetBucket:
nexusAuthorization:
# Size of the data chunk to stream to the remote
streamingUploadCutoff: "5000000"
offlineRepos: "maven-central maven-public maven-releases maven-snapshots"
gracePeriod: 60
# This should match the value of `initAdminPassword.password` if `initAdminPassword.enabled` is true
nexusAdminPassword: "admin123"
persistence:
enabled: true
# existingClaim:
# annotations:
# "helm.sh/resource-policy": keep
accessMode: ReadWriteOnce
# See comment above for information on setting the backup storageClass
# storageClass: "-"
storageSize: 8Gi
# If PersistentDisk already exists you can create a PV for it by including the 2 following keypairs.
# pdName: nexus-backup-disk
# fsType: ext4
resources: {}
# requests:
# cpu: 100m
# memory: 256Mi
# limits:
# cpu: 200m
# memory: 512Mi
rcloneConfig:
rclone.conf: |
[AWS]
type = s3
provider = AWS
env_auth = true
region = us-east-1
acl = authenticated-read
nexusCloudiam:
enabled: false
persistence:
enabled: true
# existingClaim:
# annotations:
# "helm.sh/resource-policy": keep
accessMode: ReadWriteOnce
# See comment above for information on setting the backup storageClass
# storageClass: "-"
storageSize: 1Mi
# If PersistentDisk already exists you can create a PV for it by including the 2 following keypairs.
# pdName: nexus-cloudiam-path
# fsType: ext4
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
annotations: {}
rbac:
create: false
roleRef: {}
annotations: {}
ingress:
enabled: false
path: /
labels: {}
annotations: {}
# # NOTE: Can't use 'false' due to https://github.com/jetstack/kube-lego/issues/173.
# kubernetes.io/ingress.allow-http: true
# kubernetes.io/ingress.class: gce
# kubernetes.io/ingress.global-static-ip-name: ""
# kubernetes.io/tls-acme: true
tls:
enabled: true
secretName: nexus-tls
# Specify custom rules in addition to or instead of the nexus-proxy rules
rules:
# - host: http://nexus.127.0.0.1.nip.io
# http:
# paths:
# - backend:
# serviceName: additional-svc
# servicePort: 80
ingressDocker:
enabled: false
path: /
labels: {}
annotations: {}
# # NOTE: Can't use 'false' due to https://github.com/jetstack/kube-lego/issues/173.
# kubernetes.io/ingress.allow-http: true
# kubernetes.io/ingress.class: gce
# kubernetes.io/ingress.global-static-ip-name: ""
# kubernetes.io/tls-acme: true
tls:
enabled: true
secretName: nexus-tls
# Specify custom rules in addition to or instead of the nexus-proxy rules
rules:
# - host: http://nexus.127.0.0.1.nip.io
# http:
# paths:
# - backend:
# serviceName: additional-svc
# servicePort: 80
affinity: {}
tolerations: [] tolerations: []
# # Enable configmap and add data in configmap # # Enable configmap and add data in configmap
config: config:
enabled: false enabled: false
mountPath: /sonatype-nexus-conf mountPath: /sonatype-nexus-conf
data: data: []
deployment:
# # Add annotations in deployment to enhance deployment configurations
annotations: {}
# # Add init containers. e.g. to be used to give specific permissions for nexus-data.
# # Add your own init container or uncomment and modify the given example.
initContainers:
# - name: fmp-volume-permission
# image: busybox
# imagePullPolicy: IfNotPresent
# command: ['chown','-R', '200', '/nexus-data']
# volumeMounts:
# - name: nexus-data
# mountPath: /nexus-data
# # Uncomment and modify this to run a command after starting the nexus container.
postStart:
command: # '["/bin/sh", "-c", "ls"]'
# # Enable nexus scripts settings, disabled by default for versions >= 3.21.2
# command: '["/bin/sh", "-c", "if [[ -z $(grep ^nexus.scripts.allowCreation=true /nexus-data/etc/nexus.properties) ]];then echo nexus.scripts.allowCreation=true >> /nexus-data/etc/nexus.properties;fi"]'
additionalContainers:
additionalVolumes:
additionalVolumeMounts:
# # To use an additional secret, set enable to true and add data # # To use an additional secret, set enable to true and add data
secret: secret:
enabled: false enabled: true
mountPath: /etc/secret-volume mountPath: /nexus-data/admin.password
subPath: admin.password
readOnly: true readOnly: true
data:
# # To use an additional service, set enable to true serviceAccount:
service: # Specifies whether a service account should be created
type: ClusterIP create: true
# name: additional-svc # Annotations to add to the service account
enabled: false
labels: {}
annotations: {} annotations: {}
ports: # The name of the service account to use.
- name: nexus-service # If not set and create is true, a name is generated using the fullname template
targetPort: 80 name: ""
port: 80
## Configures the requested IP on the loadBalancer when using LoadBalancer service type
# loadBalancerIP: "192.168.1.10"
additionalConfigMaps: [] psp:
# - name: maven-central create: false
# labels:
# nexus-type: repository
# data:
# recipe: 'MavenProxy'
# remoteUrl: 'https://repo.maven.apache.org/maven2/'
# blobStoreName: 'default'
# strictContentTypeValidation: 'true'
# versionPolicy: 'RELEASE'
# layoutPolicy: 'STRICT'
# Nexus Artifact Repository Pro Docs # Sonatype Nexus Repository Manager (NXRM) Documentation
## Usage ## Table of Contents
- [NXRM SSO Integration](docs/keycloak.md)
- [NXRM High Availability](docs/general.md#high-availability)
- [NXRM Storage](docs/general.md#storage)
- [NXRM Database](docs/general.md#database)
- [NXRM Dependent Packages](#nxrm-dependent-packages)
- [NXRM BigBang Caveats, Notes, etc.](#bigbang-additions-comments-and-important-information)
### Prerequisites ## Iron Bank
You can `pull` the Iron Bank image [here](https://registry1.dso.mil/harbor/projects/3/repositories/sonatype%2Fnexus%2Fnexus) and view the container approval [here](https://ironbank.dso.mil/repomap/sonatype/nexus).
### Deployment ## Helm
Please reference complete list of providable variables [here](https://github.com/sonatype/helm3-charts/tree/master/charts/nexus-repository-manager#configuration)
### Nexus Post Install Configuration Notes ```bash
git clone https://repo1.dso.mil/platform-one/big-bang/apps/developer-tools/nexus-repository-manager.git
helm install nexus-repository-manager chart
```
## BigBang Additions, Comments, and Important Information
#### Allow SSO login ### Random Admin Password
NXRM's upstream chart ships with a standardized password and an optional values parameter to randomize a password. The
problem with this approach it the user would be required to `exec` into the pod to retrieve the password. We are
leveraging the existing `nexus.env['NEXUS_SECURITY_RANDOMPASSWORD']` item to force the creation of the random password
on the pod. However, we are generating a random password via `randAlphaNum` and creating a Kubernetes secret. This
method allows us to overwrite the generated file containing the Nexus generated random password with a Kubernetes
secret to enable programmatic ingestion.
Ensure the following is present to enable the randomized Kubernetes password:
```bash
# values.yaml
nexus:
env:
- name: NEXUS_SECURITY_RANDOMPASSWORD
key: "true"
...
secret:
enabled: true
mountPath: /nexus-data/admin.password
subPath: admin.password
readOnly: true
```
### License
We expect you to secure your license; the license will be provided as a binary. Encode the binary file as a base64
encoded string, secure with sops, and place in `.Values.addons.nexusRepositoryManager.license_key`. The `_helpers.tpl`
will create a named template and generate the appropriate secret within the namespace. The chart will reference the
license via a secret volumeMount to ensure the application starts licensed.
### NXRM Dependent Packages
Nexus IQ Server requires Nexus Repository Manager.
# Node Affinity & Anti-Affinity for Nexus
Affinity is exposed through values options for this package. If you want to schedule your pods to deploy on specific nodes you can do that through the `nodeSelector` value and as needed the `affinity` value. Additional info is provided below as well to help in configuring this.
It is good to have a basic knowledge of node affinity and available options to you before customizing in this way - the upstream kubernetes documentation [has a good walkthrough of this](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity).
## Values for NodeSelector
The `nodeSelector` value at the top level can be set to do basic node selection for deployments. See the below example for an example to schedule pods to only nodes with the label `node-type` equal to `operator`:
```yaml
nodeSelector:
node-type: operator
```
## Values for Affinity
The `affinity` value at the top level should be used to specify affinity. The format to include follows what you'd specify at a pod/deployment level. See the example below for scheduling the operator pods only to nodes with the label `node-type` equal to `operator`:
```yaml
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-type
operator: In
values:
- operator
```
## Values for Anti-Affinity
The `affinity` value at the top level can be set in the same way to schedule pods based on anti-affinity. See the below example to schedule pods to not be present on the nodes that already have pods with the `dont-schedule-with: operator` label:
```yaml
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
dont-schedule-with: operator
```
# NXRM Storage, Database, and High Availability
## Storage
## Blob Store
Can be a shared file system or a cloud object store.
[Blob Stores](https://help.sonatype.com/repomanager3/high-availability/configuring-blob-stores)
### Recommended Shared File Systems
- NFS v4
- AWS EFS
- AWS S3
## Database
Nexus 3 uses builtin DB OrientDB for holding metadata and pointers for blob objects.
## High Availability
Discussing with Sonatype to ensure their HA-C solution is compatible with our deployment.
The upstream charts have the replica count hard-coded to `1`, possibly due to a limitation.
## Monitoring Node Health
NXRM provides two endpoints to monitor health status. Success is represented as `HTTP 200 OK`, failure is represented
as `HTTP 503 SERVICE UNAVAILABLE`.
- `http://<hostname>:<port>/service/rest/v1/status`
Verifies that a node can handle read requests.
- `http://<hostname>:<port>/service/rest/v1/status/writable`
Verifies that a node can handle read and write requests.
# NXRM Keycloak Configuration
**SAML/SSO integration is a *Pro* license feature.**
BigBang requires/prefers SAML/SSO integration out of the box; unfortunately, the upstream Helm chart did not have a solution at the drafting of this integration. To achieve our goal, we added a Kubernetes job that handles the SAML/SSO integration as part of the NXRM Helm installation. To enable this functionality, ensure `sso.enabled` is set to `true`; you will additionally require a Keycloak instance, the IDP metadata file, along with other parameters you may define in `sso.idp_data`.
Our implementation closely follows the [Sonatype SAML Integration](https://support.sonatype.com/hc/en-us/articles/1500000976522-SAML-integration-for-Nexus-Repository-Manager-Pro-3-and-Nexus-IQ-Server-with-Keycloak) documentation.
## Download Keycloak IdP Metadata
1. Login to the Keycloak Admin Console i.e. <KeycloakURL>/auth/admin/master/console/
2. From the left-side menu, click on *Realm Settings*.
3. From the General tab, right-click on SAML 2.0 Identity Provider Metadata under the Endpoints field and save the link/file locally. This is the Keycloak IdP metadata that will be needed when configuring NXRM/IQ.
## Configure Users and Groups in Keycloak
4. To add groups, via the left-side menu, under *Manage*, select *Groups* and then *New*.
5. In the next screen enter a group name and select *Save*. This will create a group that will be used for role mapping on the NXRM/IQ side.
6. To add users, via the left-side menu, under *Manage*, select *Users* and then *Add user*.
7. In the next screen, enter a *username*, First Name, Last Name* and *Email*, then click *Save*.
8. Once saved, the user will be created but will not have a default password set or be assigned to any groups. To set the password, click on the *Credentials* tab, set a password and click *Reset Password*.
9. To add the user to a group, click on the Groups tab and from the *Available Groups* field enter the name of the group created in Step 5 and click *Join*.
## NXRM Configuration
```
# values.yaml
sso:
enabled: false
idp_data:
entityId: "{{ base_url }}/service/rest/v1/security/saml/metadata"
usernameAttribute: "username"
firstNameAttribute: "firstName"
lastNameAttribute: "lastName"
emailAttribute: "email"
groupsAttribute: "groups"
validateResponseSignature: true
validateAssertionSignature: true
idpMetadata: 'string'
realm:
- "NexusAuthenticatingRealm"
- "NexusAuthorizingRealm"
- "SamlRealm"
role:
id: "nexus"
name: "nexus"
description: "nexus group"
privileges:
- "nx-all"
roles:
- "nx-admin"
# Retrieve a list of all available privileges:
# curl -X GET "https://{{ base_url }}/service/rest/v1/security/privileges" -H "accept: application/json"
```
10. Obtain a copy of the NXRM 3 SAML Metadata by opening the Entity ID URI i.e. <NXRMBaseURL>/service/rest/v1/security/saml/metadata and saving the XML to file
## Configure Keycloak - Client Config and Attribute Mapping
11. Further to configuring the NXRM/IQ side, to import the NXRM or IQ SAML metadata into Keycloak, via the Keycloak Admin Console select Clients from the left-side menu, then click *Create*.
12. In the Add Client screen, click *Select file* from the Import field, upload the NXRM or IQ SAML metadata that was obtained when configuring the NXRM/IQ side and click *Save*.
13. After saving, in the next screen, for the Client SAML Endpoint field, enter the Nexus instance*s Assertion Consumer Service (ACS) URL i.e. <NXRMBaseURL>/saml for NXRM 3 or <IQBaseURL>/saml for Nexus IQ Server and click *Save*.
14. If in the Configure Nexus Applications section, the *Validate Response Signature* and *Validate Assertion Signature* fields are set to "Default" or "True", then in the Clients → Settings tab ensure that the *Sign Documents* and *Sign Assertions* fields are enabled.
Once the client has been created and the Client SAML Endpoint has been set, an attribute for each of the mappable fields that were configured in the Configure Nexus Applications section i.e. username, firstName, lastName, email and groups, will need to be created.
15. To map an attribute, select the Mappers tab and then click on 'Create'.
16. Create a mapper for each of the mappable attributes with the values shown here:
| Name | Mapper Type | Property | Friendly Name | SAML Attribute Name | SAML Attribute NameFormat |
|-------------|---------------|-----------|---------------|---------------------|---------------------------|
| username | User Property | username | username | username | Basic |
| First Name | User Property | firstName | firstName | firstName | Basic |
| Last Name | User Property | lastName | lastName | lastName | Basic |
| Email | User Property | email | email | email | Basic |
| Groups | Group list | groups | groups | *N/A* | Basic |
Cypress tests run headless calling the script cy-run.sh
\ No newline at end of file
{
"pluginsFile": false,
"supportFile": false,
"fixturesFolder": false,
"env": {
"nexus_url": "nexus.bigbang.dev"
}
}
describe('Basic prometheus', function() {
it('Visits the prometheus sign in page', function() {
cy.visit(Cypress.env('nexus_url'))
})
})
apiVersion: networking.istio.io/v1beta1
kind: Gateway
metadata:
name: main
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- hosts:
- '*'
port:
name: http
number: 80
protocol: HTTP
tls:
httpsRedirect: true
- hosts:
- '*.bigbang.dev'
port:
name: https
number: 443
protocol: HTTPS
tls:
credentialName: wildcard-cert
mode: SIMPLE
istio:
enabled: true
nexus:
imagePullSecrets:
- name: private-registry-mil
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment