Running with gitlab-runner 14.1.0 (8925d9a0)  on gitlab-runners-bigbang-gl-packages-privileged-gitlab-runneb7dvc L_wAsvdS section_start:1630076430:resolve_secrets Resolving secrets section_end:1630076430:resolve_secrets section_start:1630076430:prepare_executor Preparing the "kubernetes" executor Using Kubernetes namespace: gitlab-runners Using Kubernetes executor with image registry.dso.mil/platform-one/big-bang/pipeline-templates/pipeline-templates/k3d-builder:0.0.5 ... Using attach strategy to execute scripts... section_end:1630076430:prepare_executor section_start:1630076430:prepare_script Preparing environment Waiting for pod gitlab-runners/runner-lwasvds-project-4906-concurrent-09gslp to be running, status is Pending Running on runner-lwasvds-project-4906-concurrent-09gslp via gitlab-runners-bigbang-gl-packages-privileged-gitlab-runneb7dvc... section_end:1630076437:prepare_script section_start:1630076437:get_sources Getting source from Git repository Fetching changes with git depth set to 50... Initialized empty Git repository in /builds/L_wAsvdS/0/platform-one/big-bang/apps/developer-tools/nexus/.git/ Created fresh repository. Checking out 9f6060f2 as refs/merge-requests/16/head... Skipping Git submodules setup section_end:1630076439:get_sources section_start:1630076439:step_script Executing "step_script" stage of the job script $ if [ -z ${PIPELINE_REPO_BRANCH} ]; then # collapsed multi-line command $ git clone -b ${PIPELINE_REPO_BRANCH} ${PIPELINE_REPO} ${PIPELINE_REPO_DESTINATION} Cloning into '../pipeline-repo'... $ source ${WAIT_PATH} $ i=0; while [ "$i" -lt 12 ]; do docker info &>/dev/null && break; sleep 5; i=$(( i + 1 )) ; done $ docker network create ${CI_JOB_ID} --driver=bridge -o "com.docker.network.driver.mtu"="1450" f4937d49eb3b7f88ffc51cbcdf61c2e71f68c4494ca93f7c1344af76e535f915 $ k3d cluster create ${CI_JOB_ID} --config ${K3D_CONFIG_PATH} --network ${CI_JOB_ID} INFO[0000] Using config file ../pipeline-repo/jobs/k3d-ci/config.yaml INFO[0000] Prep: Network INFO[0000] Network with name '5990488' already exists with ID 'f4937d49eb3b7f88ffc51cbcdf61c2e71f68c4494ca93f7c1344af76e535f915' INFO[0000] Created volume 'k3d-5990488-images' INFO[0001] Creating node 'k3d-5990488-server-0' INFO[0002] Pulling image 'docker.io/rancher/k3s:v1.20.4-k3s1' INFO[0004] Creating LoadBalancer 'k3d-5990488-serverlb' INFO[0005] Pulling image 'docker.io/rancher/k3d-proxy:v4.3.0' INFO[0007] Starting cluster '5990488' INFO[0007] Starting servers... INFO[0007] Starting Node 'k3d-5990488-server-0' INFO[0012] Starting agents... INFO[0012] Starting helpers... INFO[0012] Starting Node 'k3d-5990488-serverlb' INFO[0012] (Optional) Trying to get IP of the docker host and inject it into the cluster as 'host.k3d.internal' for easy access INFO[0016] Successfully added host record to /etc/hosts in 2/2 nodes and to the CoreDNS ConfigMap INFO[0016] Cluster '5990488' created successfully! INFO[0016] --kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false INFO[0016] You can now use it like this: kubectl config use-context k3d-5990488 kubectl cluster-info $ until kubectl get deployment coredns -n kube-system -o go-template='{{.status.availableReplicas}}' | grep -v -e ''; do sleep 1s; done 1 $ if [ ! -z ${PROJECT_NAME} ]; then # collapsed multi-line command namespace/nexus created secret/private-registry created secret/private-registry-mil created $ if [[ "${CI_PROJECT_NAME}" != *"istio"* ]]; then # collapsed multi-line command - Processing resources for Istio core. ✔ Istio core installed - Processing resources for Istiod. - Processing resources for Istiod. Waiting for Deployment/istio-system/istiod ✔ Istiod installed - Processing resources for Ingress gateways. - Processing resources for Ingress gateways. Waiting for Deployment/istio-system/istio-ingressgat... ✔ Ingress gateways installed - Pruning removed resources ✔ Installation completenamespace/istio-system labeled $ if [[ "${PACKAGE_NAMESPACE}" != "istio-operator" ]]; then # collapsed multi-line command Generating a RSA private key ........................................+++++ ........+++++ writing new private key to 'tls.key' ----- secret/wildcard-cert created $ if [ -f "tests/main-test-gateway.yaml" ]; then # collapsed multi-line command $ if [ -f "tests/dependencies.yaml" ]; then # collapsed multi-line command $ sleep 10 $ kubectl wait --for=condition=established --timeout 60s -A crd --all > /dev/null $ if [ -f tests/dependencies.yaml ]; then # collapsed multi-line command $ wait_sts $ wait_daemonset $ kubectl wait --for=condition=available --timeout 600s -A deployment --all > /dev/null $ kubectl wait --for=condition=ready --timeout 600s -A pods --all --field-selector status.phase=Running > /dev/null $ echo "Package install" Package install $ if [ ! -z ${PROJECT_NAME} ]; then # collapsed multi-line command $ if [ $(ls -1 tests/test-values.y*ml 2>/dev/null | wc -l) -gt 0 ]; then # collapsed multi-line command Helm installing nexus/chart into nexus namespace using nexus/tests/test-values.yaml for values NAME: nexus LAST DEPLOYED: Fri Aug 27 15:01:46 2021 NAMESPACE: nexus STATUS: deployed REVISION: 1 NOTES: 1. Get the application URL by running these commands: export POD_NAME=$(kubectl get pods --namespace nexus -l "app.kubernetes.io/name=nexus-repository-manager,app.kubernetes.io/instance=nexus" -o jsonpath="{.items[0].metadata.name}") echo "Visit http://127.0.0.1 to use your application" kubectl --namespace nexus port-forward $POD_NAME 8081:80 $ sleep 10 $ kubectl wait --for=condition=established --timeout 60s -A crd --all > /dev/null $ if [ -f tests/wait.sh ]; then # collapsed multi-line command $ wait_sts $ wait_daemonset $ kubectl wait --for=condition=available --timeout 600s -A deployment --all > /dev/null $ kubectl wait --for=condition=ready --timeout 600s -A pods --all --field-selector status.phase=Running > /dev/null $ echo "Package tests" Package tests $ if [ ! -z $(kubectl get services -n istio-system istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}' &> /dev/null) ] && [ ! -z $(kubectl get vs -A -o jsonpath='{.items[0].spec.hosts[0]}' &> /dev/null) ]; then # collapsed multi-line command $ if [ -f "tests/cypress.json" ]; then # collapsed multi-line command $ if [ -d "chart/templates/tests" ]; then # collapsed multi-line command NAME: nexus LAST DEPLOYED: Fri Aug 27 15:01:46 2021 NAMESPACE: nexus STATUS: deployed REVISION: 1 TEST SUITE: nexus-repository-manager-cypress-sa Last Started: Fri Aug 27 15:02:50 2021 Last Completed: Fri Aug 27 15:02:50 2021 Phase: Succeeded TEST SUITE: nexus-repository-manager-cypress-config Last Started: Fri Aug 27 15:02:50 2021 Last Completed: Fri Aug 27 15:02:50 2021 Phase: Succeeded TEST SUITE: nexus-repository-manager-cypress-role Last Started: Fri Aug 27 15:02:50 2021 Last Completed: Fri Aug 27 15:02:50 2021 Phase: Succeeded TEST SUITE: nexus-repository-manager-cypress-rolebinding Last Started: Fri Aug 27 15:02:50 2021 Last Completed: Fri Aug 27 15:02:50 2021 Phase: Succeeded TEST SUITE: nexus-repository-manager-cypress-test Last Started: Fri Aug 27 15:02:50 2021 Last Completed: Fri Aug 27 15:03:35 2021 Phase: Failed NOTES: 1. Get the application URL by running these commands: export POD_NAME=$(kubectl get pods --namespace nexus -l "app.kubernetes.io/name=nexus-repository-manager,app.kubernetes.io/instance=nexus" -o jsonpath="{.items[0].metadata.name}") echo "Visit http://127.0.0.1 to use your application" kubectl --namespace nexus port-forward $POD_NAME 8081:80 Error: pod nexus-repository-manager-cypress-test failed ***** Start Helm Test Logs ***** ====================================================================================================  (Run Starting)  ┌────────────────────────────────────────────────────────────────────────────────────────────────┐  │ Cypress: 5.0.0 │  │ Browser: Chrome 83 (headless) │  │ Specs: 1 found (nexus-healthspec.js) │  └────────────────────────────────────────────────────────────────────────────────────────────────┘ ──────────────────────────────────────────────────────────────────────────────────────────────────── Running: nexus-healthspec.js (1 of 1) Browserslist: caniuse-lite is outdated. Please run: npx browserslist@latest --update-db   Basic Nexus  1) Visit the Nexus sign in page   0 passing (9s)  1 failing  1) Basic Nexus Visit the Nexus sign in page:  CypressError: Timed out retrying: `cy.click()` failed because this element is not visible: `...` This element `` is not visible because its parent `` has CSS property: `visibility: hidden` Fix this problem, or use `{force: true}` to disable error checking. https://on.cypress.io/element-cannot-be-interacted-with at $Cy.ensureVisibility (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:160819:24) at runAllChecks (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:149841:14) at retryActionability (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:149884:16) at tryCatcher (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:9852:23) at Function.Promise.attempt.Promise.try (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:7126:29) at tryFn (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:164075:24) at whenStable (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:164116:12) at http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:163634:16 at tryCatcher (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:9852:23) at Promise._settlePromiseFromHandler (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:7787:31) at Promise._settlePromise (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:7844:18) at Promise._settlePromise0 (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:7889:10) at Promise._settlePromises (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:7969:18) at Promise._fulfill (http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:7913:18) at http://nexus-nexus-repository-manager:8081/__cypress/runner/cypress_runner.js:9527:46 From Your Spec Code: at Context.eval (http://nexus-nexus-repository-manager:8081/__cypress/tests?p=cypress/integration/nexus-healthspec.js:118:27)   (Results)  ┌────────────────────────────────────────────────────────────────────────────────────────────────┐  │ Tests: 1 │  │ Passing: 0 │  │ Failing: 1 │  │ Pending: 0 │  │ Skipped: 0 │  │ Screenshots: 1 │  │ Video: true │  │ Duration: 9 seconds │  │ Spec Ran: nexus-healthspec.js │  └────────────────────────────────────────────────────────────────────────────────────────────────┘  (Screenshots)   - /test/cypress/screenshots/nexus-healthspec.js/Basic Nexus -- Visit the Nexus sig (1280x720)   n in page (failed).png  (Video)   - Started processing: Compressing to 32 CRF   - Finished processing: /test/cypress/videos/nexus-healthspec.js.mp4 (0 seconds) ====================================================================================================  (Run Finished)   Spec Tests Passing Failing Pending Skipped    ┌────────────────────────────────────────────────────────────────────────────────────────────────┐  │ ✖ nexus-healthspec.js 00:09 1 - 1 - - │  └────────────────────────────────────────────────────────────────────────────────────────────────┘   ✖ 1 of 1 failed (100%) 00:09 1 - 1 - -   tar: Removing leading `/' from member names configmap/cypress-screenshots created tar: Removing leading `/' from member names configmap/cypress-videos created ***** End Helm Test Logs ***** section_end:1630076615:step_script section_start:1630076615:after_script Running after_script Running after script... $ if [ -e success ]; then # collapsed multi-line command Job Failed Printing Debug Logs kubectl get all -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system pod/metrics-server-86cbb8457f-2wn58 1/1 Running 0 2m29s kube-system pod/local-path-provisioner-5ff76fc89d-477qg 1/1 Running 0 2m29s kube-system pod/coredns-854c77959c-t6vth 1/1 Running 0 2m29s istio-system pod/istiod-7b57d88d9c-455wr 1/1 Running 0 2m17s istio-system pod/svclb-istio-ingressgateway-csnpb 5/5 Running 0 2m11s istio-system pod/istio-ingressgateway-69c8589df9-6g57j 1/1 Running 0 2m11s nexus pod/nexus-nexus-repository-manager-7dbbc479d6-clgml 1/1 Running 0 110s nexus pod/nexus-repository-manager-cypress-test 0/1 Error 0 46s NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default service/kubernetes ClusterIP 10.43.0.1 443/TCP 2m45s kube-system service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 2m42s kube-system service/metrics-server ClusterIP 10.43.166.244 443/TCP 2m42s istio-system service/istiod ClusterIP 10.43.126.27 15010/TCP,15012/TCP,443/TCP,15014/TCP 2m17s istio-system service/istio-ingressgateway LoadBalancer 10.43.126.94 172.18.0.2 15021:30620/TCP,80:32497/TCP,443:30353/TCP,15012:32421/TCP,15443:32413/TCP 2m11s nexus service/nexus-nexus-repository-manager ClusterIP 10.43.224.22 8081/TCP 110s NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE istio-system daemonset.apps/svclb-istio-ingressgateway 1 1 1 1 1 2m11s NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE kube-system deployment.apps/metrics-server 1/1 1 1 2m42s kube-system deployment.apps/local-path-provisioner 1/1 1 1 2m42s kube-system deployment.apps/coredns 1/1 1 1 2m42s istio-system deployment.apps/istiod 1/1 1 1 2m17s istio-system deployment.apps/istio-ingressgateway 1/1 1 1 2m11s nexus deployment.apps/nexus-nexus-repository-manager 1/1 1 1 110s NAMESPACE NAME DESIRED CURRENT READY AGE kube-system replicaset.apps/metrics-server-86cbb8457f 1 1 1 2m29s kube-system replicaset.apps/local-path-provisioner-5ff76fc89d 1 1 1 2m29s kube-system replicaset.apps/coredns-854c77959c 1 1 1 2m29s istio-system replicaset.apps/istiod-7b57d88d9c 1 1 1 2m17s istio-system replicaset.apps/istio-ingressgateway-69c8589df9 1 1 1 2m11s nexus replicaset.apps/nexus-nexus-repository-manager-7dbbc479d6 1 1 1 110s NAMESPACE NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE istio-system horizontalpodautoscaler.autoscaling/istiod Deployment/istiod 0%/80% 1 5 1 2m17s istio-system horizontalpodautoscaler.autoscaling/istio-ingressgateway Deployment/istio-ingressgateway 17%/80% 1 5 1 2m11s $ docker exec -i k3d-${CI_JOB_ID}-server-0 crictl images -o json | jq -r '.images[].repoTags[0] | select(. != null)' > images.txt $ sed -i '/docker.io\/istio\//d' images.txt $ sed -i '/docker.io\/rancher\//d' images.txt $ if [ -f tests/images.txt ]; then # collapsed multi-line command $ k3d cluster delete ${CI_JOB_ID} INFO[0000] Deleting cluster '5990488' INFO[0000] Deleted k3d-5990488-serverlb INFO[0004] Deleted k3d-5990488-server-0 INFO[0004] Deleting image volume 'k3d-5990488-images' INFO[0004] Removing cluster details from default kubeconfig... INFO[0004] Removing standalone kubeconfig file (if there is one)... INFO[0004] Successfully deleted cluster 5990488! $ docker network rm ${CI_JOB_ID} 5990488 section_end:1630076621:after_script section_start:1630076621:upload_artifacts_on_failure Uploading artifacts for failed job Uploading artifacts... images.txt: found 1 matching files and directories WARNING: tests/cypress/screenshots: no matching files WARNING: tests/cypress/videos: no matching files  cypress-artifacts: found 6 matching files and directories Uploading artifacts as "archive" to coordinator... ok id=5990488 responseStatus=201 Created token=DNznGA4s section_end:1630076622:upload_artifacts_on_failure section_start:1630076622:cleanup_file_variables Cleaning up file based variables section_end:1630076623:cleanup_file_variables ERROR: Job failed: command terminated with exit code 1