Running with gitlab-runner 14.1.0 (8925d9a0)  on gitlab-runners-bigbang-gl-packages-privileged-gitlab-runneb7dvc L_wAsvdS section_start:1630091942:resolve_secrets Resolving secrets section_end:1630091942:resolve_secrets section_start:1630091942:prepare_executor Preparing the "kubernetes" executor Using Kubernetes namespace: gitlab-runners Using Kubernetes executor with image registry.dso.mil/platform-one/big-bang/pipeline-templates/pipeline-templates/k3d-builder:0.0.5 ... Using attach strategy to execute scripts... section_end:1630091942:prepare_executor section_start:1630091942:prepare_script Preparing environment Waiting for pod gitlab-runners/runner-lwasvds-project-4906-concurrent-09lx74 to be running, status is Pending Running on runner-lwasvds-project-4906-concurrent-09lx74 via gitlab-runners-bigbang-gl-packages-privileged-gitlab-runneb7dvc... section_end:1630091949:prepare_script section_start:1630091949:get_sources Getting source from Git repository Fetching changes with git depth set to 50... Initialized empty Git repository in /builds/L_wAsvdS/0/platform-one/big-bang/apps/developer-tools/nexus/.git/ Created fresh repository. Checking out 6474b645 as refs/merge-requests/16/head... Skipping Git submodules setup section_end:1630091950:get_sources section_start:1630091950:step_script Executing "step_script" stage of the job script $ echo -e "\e[0Ksection_start:`date +%s`:cluster_setup[collapsed=true]\r\e[0KCluster Setup" section_start:1630091950:cluster_setup[collapsed=true] Cluster Setup $ if [ -z ${PIPELINE_REPO_BRANCH} ]; then # collapsed multi-line command $ git clone -b ${PIPELINE_REPO_BRANCH} ${PIPELINE_REPO} ${PIPELINE_REPO_DESTINATION} Cloning into '../pipeline-repo'... $ source ${WAIT_PATH} $ i=0; while [ "$i" -lt 12 ]; do docker info &>/dev/null && break; sleep 5; i=$(( i + 1 )) ; done $ docker network create ${CI_JOB_ID} --driver=bridge -o "com.docker.network.driver.mtu"="1450" 6bdb176c48eaa06d5843772df04ab4e49c9ed1c7a1e0f973ede4bb90d1819f22 $ k3d cluster create ${CI_JOB_ID} --config ${K3D_CONFIG_PATH} --network ${CI_JOB_ID} INFO[0000] Using config file ../pipeline-repo/jobs/k3d-ci/config.yaml INFO[0000] Prep: Network INFO[0000] Network with name '5994279' already exists with ID '6bdb176c48eaa06d5843772df04ab4e49c9ed1c7a1e0f973ede4bb90d1819f22' INFO[0000] Created volume 'k3d-5994279-images' INFO[0001] Creating node 'k3d-5994279-server-0' INFO[0002] Pulling image 'docker.io/rancher/k3s:v1.20.4-k3s1' INFO[0004] Creating LoadBalancer 'k3d-5994279-serverlb' INFO[0005] Pulling image 'docker.io/rancher/k3d-proxy:v4.3.0' INFO[0007] Starting cluster '5994279' INFO[0007] Starting servers... INFO[0007] Starting Node 'k3d-5994279-server-0' INFO[0013] Starting agents... INFO[0013] Starting helpers... INFO[0013] Starting Node 'k3d-5994279-serverlb' INFO[0013] (Optional) Trying to get IP of the docker host and inject it into the cluster as 'host.k3d.internal' for easy access INFO[0016] Successfully added host record to /etc/hosts in 2/2 nodes and to the CoreDNS ConfigMap INFO[0016] Cluster '5994279' created successfully! INFO[0016] --kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false INFO[0016] You can now use it like this: kubectl config use-context k3d-5994279 kubectl cluster-info $ until kubectl get deployment coredns -n kube-system -o go-template='{{.status.availableReplicas}}' | grep -v -e ''; do sleep 1s; done 1 $ if [ ! -z ${PROJECT_NAME} ]; then # collapsed multi-line command namespace/nexus created secret/private-registry created secret/private-registry-mil created $ echo -e "\e[0Ksection_end:`date +%s`:cluster_setup\r\e[0K" section_end:1630091989:cluster_setup  $ echo -e "\e[0Ksection_start:`date +%s`:dependency_clean[collapsed=true]\r\e[0KDependency Install and Wait" section_start:1630091989:dependency_clean[collapsed=true] Dependency Install and Wait $ if [ -f "tests/dependencies.yaml" ]; then # collapsed multi-line command $ if [ -f "tests/dependencies.yaml" ]; then # collapsed multi-line command $ echo -e "\e[0Ksection_end:`date +%s`:dependency_clean\r\e[0K" section_end:1630091989:dependency_clean  $ if [ ! -z ${PROJECT_NAME} ]; then # collapsed multi-line command Helm installing nexus/chart into nexus namespace using nexus/tests/test-values.yaml for values Release "nexus" does not exist. Installing it now. NAME: nexus LAST DEPLOYED: Fri Aug 27 19:19:49 2021 NAMESPACE: nexus STATUS: deployed REVISION: 1 NOTES: 1. Get the application URL by running these commands: export POD_NAME=$(kubectl get pods --namespace nexus -l "app.kubernetes.io/name=nexus-repository-manager,app.kubernetes.io/instance=nexus" -o jsonpath="{.items[0].metadata.name}") echo "Visit http://127.0.0.1 to use your application" kubectl --namespace nexus port-forward $POD_NAME 8081:80 $ sleep 10 # collapsed multi-line command $ if [ -d "chart/templates/tests" ]; then # collapsed multi-line command NAME: nexus LAST DEPLOYED: Fri Aug 27 19:19:49 2021 NAMESPACE: nexus STATUS: deployed REVISION: 1 TEST SUITE: nexus-repository-manager-cypress-sa Last Started: Fri Aug 27 19:21:08 2021 Last Completed: Fri Aug 27 19:21:08 2021 Phase: Succeeded TEST SUITE: nexus-repository-manager-cypress-config Last Started: Fri Aug 27 19:21:08 2021 Last Completed: Fri Aug 27 19:21:08 2021 Phase: Succeeded TEST SUITE: nexus-repository-manager-cypress-role Last Started: Fri Aug 27 19:21:08 2021 Last Completed: Fri Aug 27 19:21:08 2021 Phase: Succeeded TEST SUITE: nexus-repository-manager-cypress-rolebinding Last Started: Fri Aug 27 19:21:08 2021 Last Completed: Fri Aug 27 19:21:08 2021 Phase: Succeeded TEST SUITE: nexus-repository-manager-cypress-test Last Started: Fri Aug 27 19:21:08 2021 Last Completed: Fri Aug 27 19:21:55 2021 Phase: Failed NOTES: 1. Get the application URL by running these commands: export POD_NAME=$(kubectl get pods --namespace nexus -l "app.kubernetes.io/name=nexus-repository-manager,app.kubernetes.io/instance=nexus" -o jsonpath="{.items[0].metadata.name}") echo "Visit http://127.0.0.1 to use your application" kubectl --namespace nexus port-forward $POD_NAME 8081:80 Error: pod nexus-repository-manager-cypress-test failed ***** Start Helm Test Logs ***** ====================================================================================================  (Run Starting)  ┌────────────────────────────────────────────────────────────────────────────────────────────────┐  │ Cypress: 5.0.0 │  │ Browser: Chrome 83 (headless) │  │ Specs: 1 found (nexus-healthspec.js) │  └────────────────────────────────────────────────────────────────────────────────────────────────┘ ──────────────────────────────────────────────────────────────────────────────────────────────────── Running: nexus-healthspec.js (1 of 1) Browserslist: caniuse-lite is outdated. Please run: npx browserslist@latest --update-db   Basic Nexus  1) Visit the Nexus sign in page   0 passing (8s)  1 failing  1) Basic Nexus Visit the Nexus sign in page:  AssertionError: Timed out retrying: Expected to find element: `a[class="x-btn x-unselectable x-box-item x-toolbar-item x-btn-nx-primary-small x-item-disabled x-btn-disabled"]`, but never found it. at Context.eval (http://nexus-nexus-repository-manager:8081/__cypress/tests?p=cypress/integration/nexus-healthspec.js:105:8)   (Results)  ┌────────────────────────────────────────────────────────────────────────────────────────────────┐  │ Tests: 1 │  │ Passing: 0 │  │ Failing: 1 │  │ Pending: 0 │  │ Skipped: 0 │  │ Screenshots: 1 │  │ Video: true │  │ Duration: 7 seconds │  │ Spec Ran: nexus-healthspec.js │  └────────────────────────────────────────────────────────────────────────────────────────────────┘  (Screenshots)   - /test/cypress/screenshots/nexus-healthspec.js/Basic Nexus -- Visit the Nexus sig (1280x720)   n in page (failed).png  (Video)   - Started processing: Compressing to 32 CRF   - Finished processing: /test/cypress/videos/nexus-healthspec.js.mp4 (0 seconds) ====================================================================================================  (Run Finished)   Spec Tests Passing Failing Pending Skipped    ┌────────────────────────────────────────────────────────────────────────────────────────────────┐  │ ✖ nexus-healthspec.js 00:07 1 - 1 - - │  └────────────────────────────────────────────────────────────────────────────────────────────────┘   ✖ 1 of 1 failed (100%) 00:07 1 - 1 - -   tar: Removing leading `/' from member names configmap/cypress-screenshots created tar: Removing leading `/' from member names configmap/cypress-videos created ***** End Helm Test Logs ***** section_end:1630092116:step_script section_start:1630092116:after_script Running after_script Running after script... $ if [ -e success ]; then # collapsed multi-line command Job Failed, cluster status: NAMESPACE NAME READY STATUS RESTARTS AGE kube-system pod/local-path-provisioner-5ff76fc89d-m6nz5 1/1 Running 0 2m16s kube-system pod/metrics-server-86cbb8457f-svftg 1/1 Running 0 2m16s kube-system pod/coredns-854c77959c-6tm2j 1/1 Running 0 2m16s nexus pod/nexus-nexus-repository-manager-dc7fc5f85-m58bj 1/1 Running 0 2m6s nexus pod/nexus-repository-manager-cypress-test 0/1 Error 0 48s NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default service/kubernetes ClusterIP 10.43.0.1 443/TCP 2m33s kube-system service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 2m31s kube-system service/metrics-server ClusterIP 10.43.196.164 443/TCP 2m31s nexus service/nexus-nexus-repository-manager ClusterIP 10.43.205.33 8081/TCP 2m6s NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE kube-system deployment.apps/local-path-provisioner 1/1 1 1 2m31s kube-system deployment.apps/metrics-server 1/1 1 1 2m31s kube-system deployment.apps/coredns 1/1 1 1 2m31s nexus deployment.apps/nexus-nexus-repository-manager 1/1 1 1 2m6s NAMESPACE NAME DESIRED CURRENT READY AGE kube-system replicaset.apps/local-path-provisioner-5ff76fc89d 1 1 1 2m16s kube-system replicaset.apps/metrics-server-86cbb8457f 1 1 1 2m16s kube-system replicaset.apps/coredns-854c77959c 1 1 1 2m16s nexus replicaset.apps/nexus-nexus-repository-manager-dc7fc5f85 1 1 1 2m6s $ echo -e "\e[0Ksection_start:`date +%s`:cluster_clean[collapsed=true]\r\e[0KCluster Cleanup" section_start:1630092116:cluster_clean[collapsed=true] Cluster Cleanup $ k3d cluster delete ${CI_JOB_ID} INFO[0000] Deleting cluster '5994279' INFO[0000] Deleted k3d-5994279-serverlb INFO[0004] Deleted k3d-5994279-server-0 INFO[0004] Deleting image volume 'k3d-5994279-images' INFO[0004] Removing cluster details from default kubeconfig... INFO[0004] Removing standalone kubeconfig file (if there is one)... INFO[0004] Successfully deleted cluster 5994279! $ docker network rm ${CI_JOB_ID} 5994279 $ echo -e "\e[0Ksection_end:`date +%s`:cluster_clean\r\e[0K" section_end:1630092121:cluster_clean  section_end:1630092121:after_script section_start:1630092121:upload_artifacts_on_failure Uploading artifacts for failed job Uploading artifacts... WARNING: images.txt: no matching files  WARNING: tests/cypress/screenshots: no matching files WARNING: tests/cypress/videos: no matching files  cypress-artifacts: found 6 matching files and directories Uploading artifacts as "archive" to coordinator... ok id=5994279 responseStatus=201 Created token=QHEAywpz section_end:1630092122:upload_artifacts_on_failure section_start:1630092122:cleanup_file_variables Cleaning up file based variables section_end:1630092122:cleanup_file_variables ERROR: Job failed: command terminated with exit code 1