[0KRunning with gitlab-runner 17.5.4 (d8d88d9e)[0;m [0K on graduated-runner-graduated-runner-gitlab-runner-c8fd66bb8-7t2sw t2_cmeAC, system ID: r_RjKnXc9h57C2[0;m [0K[36;1mResolving secrets[0;m[0;m section_start:1736966048:prepare_executor [0K[0K[36;1mPreparing the "kubernetes" executor[0;m[0;m [0KUsing Kubernetes namespace: graduated-runner[0;m [0KUsing Kubernetes executor with image registry1.dso.mil/bigbang-ci/bb-ci:2.21.0 ...[0;m [0KUsing attach strategy to execute scripts...[0;m section_end:1736966048:prepare_executor [0Ksection_start:1736966048:prepare_script [0K[0K[36;1mPreparing environment[0;m[0;m [0KUsing FF_USE_POD_ACTIVE_DEADLINE_SECONDS, the Pod activeDeadlineSeconds will be set to the job timeout: 1h0m0s...[0;m Waiting for pod graduated-runner/runner-t2cmeac-project-6751-concurrent-0-thjf02nf to be running, status is Pending Waiting for pod graduated-runner/runner-t2cmeac-project-6751-concurrent-0-thjf02nf to be running, status is Pending ContainersNotInitialized: "containers with incomplete status: [istio-proxy init-permissions]" ContainersNotReady: "containers with unready status: [istio-proxy build helper svc-0]" ContainersNotReady: "containers with unready status: [istio-proxy build helper svc-0]" Waiting for pod graduated-runner/runner-t2cmeac-project-6751-concurrent-0-thjf02nf to be running, status is Pending ContainersNotReady: "containers with unready status: [build helper svc-0]" ContainersNotReady: "containers with unready status: [build helper svc-0]" Running on runner-t2cmeac-project-6751-concurrent-0-thjf02nf via graduated-runner-graduated-runner-gitlab-runner-c8fd66bb8-7t2sw... section_end:1736966058:prepare_script [0Ksection_start:1736966058:get_sources [0K[0K[36;1mGetting source from Git repository[0;m[0;m [32;1mFetching changes with git depth set to 20...[0;m Initialized empty Git repository in /builds/big-bang/product/packages/gluon/.git/ [32;1mCreated fresh repository.[0;m [32;1mChecking out 8a25d50e as detached HEAD (ref is refs/merge-requests/105/head)...[0;m [32;1mSkipping Git submodules setup[0;m section_end:1736966059:get_sources [0Ksection_start:1736966059:step_script [0K[0K[36;1mExecuting "step_script" stage of the job script[0;m[0;m [32;1m$ echo -e "\e[0Ksection_start:`date +%s`:k3d_up[collapsed=true]\r\e[0K\e[33;1mK3D Cluster Create\e[37m"[0;m [0Ksection_start:1736966059:k3d_up[collapsed=true] [0K[33;1mK3D Cluster Create[37m [32;1m$ git clone -b ${PIPELINE_REPO_BRANCH} ${PIPELINE_REPO} ${PIPELINE_REPO_DESTINATION}[0;m Cloning into '../pipeline-repo'... [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/templates.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/bigbang-functions.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/package-functions.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/k8s-functions.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/rds-functions.sh[0;m [32;1m$ package_auth_setup[0;m [32;1m$ i=0; while [ "$i" -lt 12 ]; do docker info &>/dev/null && break; sleep 5; i=$(( i + 1 )) ; done[0;m [32;1m$ docker network create --opt com.docker.network.bridge.name=${CI_JOB_ID} ${CI_JOB_ID} --driver=bridge -o "com.docker.network.driver.mtu"="1450" --subnet=172.20.0.0/16 --gateway 172.20.0.1[0;m 407c4c71ee9afe6a8fc308327dab70022afd3846f2e397cd53f5cf1b401d89cf [32;1m$ chmod +x ${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/k3d/deploy_k3d.sh; echo "Executing ${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/k3d/deploy_k3d.sh..."; ./${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/k3d/deploy_k3d.sh[0;m Executing ../pipeline-repo/clusters/k3d/dependencies/k3d/deploy_k3d.sh... Creating k3d cluster with default metrics server [36mINFO[0m[0000] Using config file ../pipeline-repo/clusters/k3d/dependencies/k3d/config.yaml (k3d.io/v1alpha4#simple) [33mWARN[0m[0000] Default config apiVersion is 'k3d.io/v1alpha5', but you're using 'k3d.io/v1alpha4': consider migrating. [36mINFO[0m[0000] portmapping '80:80' targets the loadbalancer: defaulting to [servers:*:proxy agents:*:proxy] [36mINFO[0m[0000] portmapping '443:443' targets the loadbalancer: defaulting to [servers:*:proxy agents:*:proxy] [36mINFO[0m[0000] Prep: Network [36mINFO[0m[0000] Re-using existing network '41583920' (407c4c71ee9afe6a8fc308327dab70022afd3846f2e397cd53f5cf1b401d89cf) [36mINFO[0m[0000] Created image volume k3d-41583920-images [36mINFO[0m[0000] Starting new tools node... [36mINFO[0m[0000] Pulling image 'ghcr.io/k3d-io/k3d-tools:5.7.5' [36mINFO[0m[0001] Creating node 'k3d-41583920-server-0' [36mINFO[0m[0001] Starting node 'k3d-41583920-tools' [36mINFO[0m[0002] Pulling image 'rancher/k3s:v1.31.4-k3s1' [36mINFO[0m[0005] Creating LoadBalancer 'k3d-41583920-serverlb' [36mINFO[0m[0006] Pulling image 'ghcr.io/k3d-io/k3d-proxy:5.7.5' [36mINFO[0m[0008] Using the k3d-tools node to gather environment information [36mINFO[0m[0008] HostIP: using network gateway 172.20.0.1 address [36mINFO[0m[0008] Starting cluster '41583920' [36mINFO[0m[0008] Starting servers... [36mINFO[0m[0009] Starting node 'k3d-41583920-server-0' [36mINFO[0m[0013] All agents already running. [36mINFO[0m[0013] Starting helpers... [36mINFO[0m[0013] Starting node 'k3d-41583920-serverlb' [36mINFO[0m[0020] Injecting records for hostAliases (incl. host.k3d.internal) and for 2 network members into CoreDNS configmap... [36mINFO[0m[0022] Cluster '41583920' created successfully! [36mINFO[0m[0022] You can now use it like this: kubectl cluster-info [32;1m$ until kubectl get deployment coredns -n kube-system -o go-template='{{.status.availableReplicas}}' | grep -v -e '<no value>'; do sleep 1s; done[0;m 1 [32;1m$ chmod +x ${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/metallb/install_metallb.sh; echo "Executing ${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/metallb/install_metallb.sh...";./${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/metallb/install_metallb.sh ;[0;m Executing ../pipeline-repo/clusters/k3d/dependencies/metallb/install_metallb.sh... namespace/metallb-system created namespace/metallb-system labeled secret/private-registry created customresourcedefinition.apiextensions.k8s.io/addresspools.metallb.io created customresourcedefinition.apiextensions.k8s.io/bfdprofiles.metallb.io created customresourcedefinition.apiextensions.k8s.io/bgpadvertisements.metallb.io created customresourcedefinition.apiextensions.k8s.io/bgppeers.metallb.io created customresourcedefinition.apiextensions.k8s.io/communities.metallb.io created customresourcedefinition.apiextensions.k8s.io/ipaddresspools.metallb.io created customresourcedefinition.apiextensions.k8s.io/l2advertisements.metallb.io created serviceaccount/controller created serviceaccount/speaker created role.rbac.authorization.k8s.io/controller created role.rbac.authorization.k8s.io/pod-lister created clusterrole.rbac.authorization.k8s.io/metallb-system:controller created clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created rolebinding.rbac.authorization.k8s.io/controller created rolebinding.rbac.authorization.k8s.io/pod-lister created clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created configmap/metallb-excludel2 created secret/webhook-server-cert created service/webhook-service created deployment.apps/controller created daemonset.apps/speaker created validatingwebhookconfiguration.admissionregistration.k8s.io/metallb-webhook-configuration created Waiting on MetalLB controller/webhook... deployment.apps/controller condition met ipaddresspool.metallb.io/default created l2advertisement.metallb.io/l2advertisement1 created Waiting for daemon set "speaker" rollout to finish: 0 of 1 updated pods are available... daemon set "speaker" successfully rolled out [32;1m$ get_all[0;m [0Ksection_start:1736966110:all_resources[collapsed=true] [0K[33;1mAll Cluster Resources[37m NAMESPACE NAME READY STATUS RESTARTS AGE kube-system pod/coredns-ccb96694c-svfs9 1/1 Running 0 31s kube-system pod/local-path-provisioner-5cf85fd84d-xt92g 1/1 Running 0 31s kube-system pod/metrics-server-5985cbc9d7-g86gp 1/1 Running 0 31s metallb-system pod/controller-5f67f69db-2bwbt 1/1 Running 0 24s metallb-system pod/speaker-gjqs2 1/1 Running 0 24s NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default service/kubernetes ClusterIP 172.20.0.1 <none> 443/TCP 38s kube-system service/kube-dns ClusterIP 172.20.0.10 <none> 53/UDP,53/TCP,9153/TCP 34s kube-system service/metrics-server ClusterIP 172.20.52.138 <none> 443/TCP 33s metallb-system service/webhook-service ClusterIP 172.20.16.88 <none> 443/TCP 24s NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE metallb-system daemonset.apps/speaker 1 1 1 1 1 kubernetes.io/os=linux 24s NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE kube-system deployment.apps/coredns 1/1 1 1 34s kube-system deployment.apps/local-path-provisioner 1/1 1 1 34s kube-system deployment.apps/metrics-server 1/1 1 1 33s metallb-system deployment.apps/controller 1/1 1 1 24s NAMESPACE NAME DESIRED CURRENT READY AGE kube-system replicaset.apps/coredns-ccb96694c 1 1 1 31s kube-system replicaset.apps/local-path-provisioner-5cf85fd84d 1 1 1 31s kube-system replicaset.apps/metrics-server-5985cbc9d7 1 1 1 31s metallb-system replicaset.apps/controller-5f67f69db 1 1 1 24s [0Ksection_end:1736966111:all_resources [0K [32;1m$ echo -e "\e[0Ksection_end:`date +%s`:k3d_up\r\e[0K"[0;m [0Ksection_end:1736966111:k3d_up [0K [32;1m$ helm dependency update ${VALIDATION_CHART_NAME}[0;m Saving 1 charts Deleting outdated charts [32;1m$ kubectl create namespace ${PACKAGE_NAMESPACE}[0;m namespace/gluon created [32;1m$ kubectl create -n ${PACKAGE_NAMESPACE} secret docker-registry private-registry --docker-server="https://${HARBOR_BB_REGISTRY}" --docker-username="${BB_CI_PUSH_USER}" --docker-password="${BB_CI_PUSH_TOKEN}"[0;m secret/private-registry created [32;1m$ helm install ${PACKAGE_NAMESPACE} ${VALIDATION_CHART_NAME} -n ${PACKAGE_NAMESPACE}[0;m NAME: gluon LAST DEPLOYED: Wed Jan 15 18:35:11 2025 NAMESPACE: gluon STATUS: deployed REVISION: 1 [32;1m$ package_wait[0;m [0Ksection_start:1736966121:package_wait[collapsed=true] [0K[33;1mPackage Wait[37m Waiting on CRDs ... done. Waiting on stateful sets ... done. Waiting on daemon sets ... done. Waiting on deployments ... done. Waiting on terminating pods ... done. done. [0Ksection_end:1736966134:package_wait [0K [32;1m$ cypressTestBasePath="/" # collapsed multi-line command[0;m NAME: gluon LAST DEPLOYED: Wed Jan 15 18:35:11 2025 NAMESPACE: gluon STATUS: deployed REVISION: 1 TEST SUITE: validate-chart-cypress-config Last Started: Wed Jan 15 18:35:34 2025 Last Completed: Wed Jan 15 18:35:34 2025 Phase: Succeeded TEST SUITE: validate-chart-cypress-config Last Started: Wed Jan 15 18:35:34 2025 Last Completed: Wed Jan 15 18:35:34 2025 Phase: Succeeded TEST SUITE: validate-chart-script-config Last Started: Wed Jan 15 18:35:34 2025 Last Completed: Wed Jan 15 18:35:34 2025 Phase: Succeeded TEST SUITE: validate-chart-script-config Last Started: Wed Jan 15 18:35:34 2025 Last Completed: Wed Jan 15 18:35:34 2025 Phase: Succeeded TEST SUITE: validate-chart-cypress-test Last Started: Wed Jan 15 18:35:35 2025 Last Completed: Wed Jan 15 18:36:20 2025 Phase: Succeeded TEST SUITE: validate-chart-script-test Last Started: Wed Jan 15 18:36:20 2025 Last Completed: Wed Jan 15 18:36:23 2025 Phase: Succeeded ***** Start Helm Test Logs ***** --2025-01-15 18:35:56-- https://repo1.dso.mil/big-bang/product/packages/gluon/-/raw/master/common/commands.js Resolving repo1.dso.mil (repo1.dso.mil)... 15.205.173.153 Connecting to repo1.dso.mil (repo1.dso.mil)|15.205.173.153|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 4656 (4.5K) [text/plain] Saving to: '/test/cypress/common/commands.js' 0K .... 100% 61.5M=0s 2025-01-15 18:35:56 (61.5 MB/s) - '/test/cypress/common/commands.js' saved [4656/4656] DevTools listening on ws://127.0.0.1:43607/devtools/browser/e2e43dfa-819d-4b47-8512-d3a33584a78c This folder is not writable: /test Writing to this directory is required by Cypress in order to store screenshots and videos. Enable write permissions to this directory to ensure screenshots and videos are stored. If you don't require screenshots or videos to be stored you can safely ignore this warning. tput: No value for $TERM and no -T specified ================================================================================ (Run Starting) ┌────────────────────────────────────────────────────────────────────────────────────────────────┐ │ Cypress: 13.17.0 │ │ Browser: Chrome 132 (headless) │ │ Node Version: v22.13.0 (/usr/local/bin/node) │ │ Specs: 1 found (01-health.cy.js) │ │ Searched: cypress/e2e/**/*.cy.{js,jsx,ts,tsx} │ └────────────────────────────────────────────────────────────────────────────────────────────────┘ ──────────────────────────────────────────────────────────────────────────────────────────────────── Running: 01-health.cy.js (1 of 1) Dummy Cypress Test ✓ Dummy Cypress Test (1039ms) 1 passing (1s) (Results) ┌────────────────────────────────────────────────────────────────────────────────────────────────┐ │ Tests: 1 │ │ Passing: 1 │ │ Failing: 0 │ │ Pending: 0 │ │ Skipped: 0 │ │ Screenshots: 0 │ │ Video: true │ │ Duration: 1 second │ │ Spec Ran: 01-health.cy.js │ └────────────────────────────────────────────────────────────────────────────────────────────────┘ (Video) - Started compressing: Compressing to 35 CRF - Finished compressing: 0 seconds - Video output: /test/cypress/videos/01-health.cy.js.mp4 tput: No value for $TERM and no -T specified ================================================================================ (Run Finished) Spec Tests Passing Failing Pending Skipped ┌────────────────────────────────────────────────────────────────────────────────────────────────┐ │ ✔ 01-health.cy.js 00:01 1 1 - - - │ └────────────────────────────────────────────────────────────────────────────────────────────────┘ ✔ All specs passed! 00:01 1 1 - - - npm notice npm notice New major version of npm available! 10.9.2 -> 11.0.0 npm notice Changelog: https://github.com/npm/cli/releases/tag/v11.0.0 npm notice To update run: npm install -g npm@11.0.0 npm notice found cypress logs from the pod no cypress screenshots found from the pod found cypress videos from the pod --- Running test.sh... --- Hello from Test Script ***** End Helm Test Logs ***** Cypress test logs found from the pipe No cypress test screenshots found from the pipe Cypress test videos found from the pipe section_end:1736966183:step_script [0Ksection_start:1736966183:after_script [0K[0K[36;1mRunning after_script[0;m[0;m [32;1mRunning after script...[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/templates.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/bigbang-functions.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/package-functions.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/k8s-functions.sh[0;m [32;1m$ get_ns[0;m [0Ksection_start:1736966183:namespaces[collapsed=true] [0K[33;1mNamespaces[37m NAME STATUS AGE LABELS default Active 112s kubernetes.io/metadata.name=default gluon Active 72s kubernetes.io/metadata.name=gluon kube-node-lease Active 112s kubernetes.io/metadata.name=kube-node-lease kube-public Active 112s kubernetes.io/metadata.name=kube-public kube-system Active 112s kubernetes.io/metadata.name=kube-system metallb-system Active 97s app=metallb,kubernetes.io/metadata.name=metallb-system [0Ksection_end:1736966183:namespaces [0K [32;1m$ get_all[0;m [0Ksection_start:1736966183:all_resources[collapsed=true] [0K[33;1mAll Cluster Resources[37m NAMESPACE NAME READY STATUS RESTARTS AGE gluon pod/gluon-validate-chart-74cf74c849-qvj8f 1/1 Running 0 72s gluon pod/validate-chart-cypress-test 0/1 Completed 0 48s gluon pod/validate-chart-script-test 0/1 Completed 0 3s kube-system pod/coredns-ccb96694c-svfs9 1/1 Running 0 104s kube-system pod/local-path-provisioner-5cf85fd84d-xt92g 1/1 Running 0 104s kube-system pod/metrics-server-5985cbc9d7-g86gp 1/1 Running 0 104s metallb-system pod/controller-5f67f69db-2bwbt 1/1 Running 0 97s metallb-system pod/speaker-gjqs2 1/1 Running 0 97s NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default service/kubernetes ClusterIP 172.20.0.1 <none> 443/TCP 111s gluon service/gluon-validate-chart ClusterIP 172.20.191.58 <none> 80/TCP 72s kube-system service/kube-dns ClusterIP 172.20.0.10 <none> 53/UDP,53/TCP,9153/TCP 107s kube-system service/metrics-server ClusterIP 172.20.52.138 <none> 443/TCP 106s metallb-system service/webhook-service ClusterIP 172.20.16.88 <none> 443/TCP 97s NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE metallb-system daemonset.apps/speaker 1 1 1 1 1 kubernetes.io/os=linux 97s NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE gluon deployment.apps/gluon-validate-chart 1/1 1 1 72s kube-system deployment.apps/coredns 1/1 1 1 107s kube-system deployment.apps/local-path-provisioner 1/1 1 1 107s kube-system deployment.apps/metrics-server 1/1 1 1 106s metallb-system deployment.apps/controller 1/1 1 1 97s NAMESPACE NAME DESIRED CURRENT READY AGE gluon replicaset.apps/gluon-validate-chart-74cf74c849 1 1 1 72s kube-system replicaset.apps/coredns-ccb96694c 1 1 1 104s kube-system replicaset.apps/local-path-provisioner-5cf85fd84d 1 1 1 104s kube-system replicaset.apps/metrics-server-5985cbc9d7 1 1 1 104s metallb-system replicaset.apps/controller-5f67f69db 1 1 1 97s [0Ksection_end:1736966183:all_resources [0K [32;1m$ get_events[0;m [0Ksection_start:1736966183:show_event_log[collapsed=true] [0K[33;1mCluster Event Log[37m [31mNOTICE: Cluster events can be found in artifact events.txt[0m [0Ksection_end:1736966183:show_event_log [0K [32;1m$ bigbang_pipeline[0;m Pipeline type is not BB, skipping [32;1m$ get_debug[0;m Debug not enabled, skipping [32;1m$ k3d cluster delete ${CI_JOB_ID}[0;m [36mINFO[0m[0000] Deleting cluster '41583920' [36mINFO[0m[0004] Deleting 1 attached volumes... [36mINFO[0m[0004] Removing cluster details from default kubeconfig... [36mINFO[0m[0004] Removing standalone kubeconfig file (if there is one)... [36mINFO[0m[0004] Successfully deleted cluster 41583920! [32;1m$ docker network rm ${CI_JOB_ID}[0;m 41583920 section_end:1736966188:after_script [0Ksection_start:1736966188:upload_artifacts_on_success [0K[0K[36;1mUploading artifacts for successful job[0;m[0;m [32;1mUploading artifacts...[0;m events.txt: found 1 matching artifact files and directories[0;m [0;33mWARNING: get_cpumem.txt: no matching files. Ensure that the artifact path is relative to the working directory (/builds/big-bang/product/packages/gluon)[0;m [0;33mWARNING: images.txt: no matching files. Ensure that the artifact path is relative to the working directory (/builds/big-bang/product/packages/gluon)[0;m [0;33mWARNING: pod_logs: no matching files. Ensure that the artifact path is relative to the working directory (/builds/big-bang/product/packages/gluon)[0;m [0;33mWARNING: cluster_info_dump.txt: no matching files. Ensure that the artifact path is relative to the working directory (/builds/big-bang/product/packages/gluon)[0;m [0;33mWARNING: kubectl_describes: no matching files. Ensure that the artifact path is relative to the working directory (/builds/big-bang/product/packages/gluon)[0;m cypress-artifacts: found 8 matching artifact files and directories[0;m Uploading artifacts as "archive" to coordinator... 201 Created[0;m id[0;m=41583920 responseStatus[0;m=201 Created token[0;m=glcbt-64 section_end:1736966189:upload_artifacts_on_success [0Ksection_start:1736966189:cleanup_file_variables [0K[0K[36;1mCleaning up project directory and file based variables[0;m[0;m section_end:1736966190:cleanup_file_variables [0K[32;1mJob succeeded[0;m