[0KRunning with gitlab-runner 17.8.0 (e4f782b3)[0;m [0K on graduated-runner-graduated-runner-gitlab-runner-6d4f7b8ccczfg95 t2_t1jsF, system ID: r_nSetl0DZjXdE[0;m [0K[36;1mResolving secrets[0;m[0;m section_start:1743186883:prepare_executor [0K[0K[36;1mPreparing the "kubernetes" executor[0;m[0;m [0KUsing Kubernetes namespace: graduated-runner[0;m [0KUsing Kubernetes executor with image registry1.dso.mil/bigbang-ci/bb-ci:2.21.2 ...[0;m [0KUsing attach strategy to execute scripts...[0;m section_end:1743186883:prepare_executor [0Ksection_start:1743186883:prepare_script [0K[0K[36;1mPreparing environment[0;m[0;m [0KUsing FF_USE_POD_ACTIVE_DEADLINE_SECONDS, the Pod activeDeadlineSeconds will be set to the job timeout: 1h0m0s...[0;m Waiting for pod graduated-runner/runner-t2t1jsf-project-4986-concurrent-1-i4rzx2vh to be running, status is Pending Waiting for pod graduated-runner/runner-t2t1jsf-project-4986-concurrent-1-i4rzx2vh to be running, status is Pending ContainersNotInitialized: "containers with incomplete status: [init-permissions]" ContainersNotReady: "containers with unready status: [istio-proxy build helper svc-0]" ContainersNotReady: "containers with unready status: [istio-proxy build helper svc-0]" Running on runner-t2t1jsf-project-4986-concurrent-1-i4rzx2vh via graduated-runner-graduated-runner-gitlab-runner-6d4f7b8ccczfg95... section_end:1743186890:prepare_script [0Ksection_start:1743186890:get_sources [0K[0K[36;1mGetting source from Git repository[0;m[0;m [32;1mFetching changes with git depth set to 20...[0;m Initialized empty Git repository in /builds/big-bang/product/maintained/redis/.git/ [32;1mCreated fresh repository.[0;m [32;1mChecking out 280f208c as detached HEAD (ref is refs/merge-requests/162/head)...[0;m [32;1mSkipping Git submodules setup[0;m section_end:1743186891:get_sources [0Ksection_start:1743186891:step_script [0K[0K[36;1mExecuting "step_script" stage of the job script[0;m[0;m [32;1m$ echo -e "\e[0Ksection_start:`date +%s`:k3d_up[collapsed=true]\r\e[0K\e[33;1mK3D Cluster Create\e[37m"[0;m [0Ksection_start:1743186891:k3d_up[collapsed=true] [0K[33;1mK3D Cluster Create[37m [32;1m$ git clone -b ${PIPELINE_REPO_BRANCH} ${PIPELINE_REPO} ${PIPELINE_REPO_DESTINATION}[0;m Cloning into '../pipeline-repo'... [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/templates.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/bigbang-functions.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/package-functions.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/k8s-functions.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/rds-functions.sh[0;m [32;1m$ package_auth_setup[0;m [32;1m$ i=0; while [ "$i" -lt 12 ]; do docker info &>/dev/null && break; sleep 5; i=$(( i + 1 )) ; done[0;m [32;1m$ docker network create --opt com.docker.network.bridge.name=${CI_JOB_ID} ${CI_JOB_ID} --driver=bridge -o "com.docker.network.driver.mtu"="1450" --subnet=172.20.0.0/16 --gateway 172.20.0.1[0;m f9442bc64f03a28627903e2a9493a93faa1964ad8c1ff4fa7e148d688b5eedc6 [32;1m$ chmod +x ${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/k3d/deploy_k3d.sh; echo "Executing ${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/k3d/deploy_k3d.sh..."; ./${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/k3d/deploy_k3d.sh[0;m Executing ../pipeline-repo/clusters/k3d/dependencies/k3d/deploy_k3d.sh... Creating k3d cluster with default metrics server Configuring DNS for k3d-43791259-agent-0 Configuring DNS for k3d-43791259-agent-2 Configuring DNS for k3d-43791259-agent-1 Waiting for k3d-43791259-agent-0 to start... (0s elapsed) Waiting for k3d-43791259-agent-1 to start... (0s elapsed) Waiting for k3d-43791259-agent-2 to start... (0s elapsed) [36mINFO[0m[0000] Using config file ../pipeline-repo/clusters/k3d/dependencies/k3d/config.yaml (k3d.io/v1alpha5#simple) [36mINFO[0m[0000] portmapping '80:80' targets the loadbalancer: defaulting to [servers:*:proxy agents:*:proxy] [36mINFO[0m[0000] portmapping '443:443' targets the loadbalancer: defaulting to [servers:*:proxy agents:*:proxy] [36mINFO[0m[0000] Prep: Network [36mINFO[0m[0000] Re-using existing network '43791259' (f9442bc64f03a28627903e2a9493a93faa1964ad8c1ff4fa7e148d688b5eedc6) [36mINFO[0m[0000] Created image volume k3d-43791259-images [36mINFO[0m[0000] Starting new tools node... [36mINFO[0m[0000] Pulling image 'ghcr.io/k3d-io/k3d-tools:5.7.5' [36mINFO[0m[0001] Creating node 'k3d-43791259-server-0' [36mINFO[0m[0001] Starting node 'k3d-43791259-tools' Waiting for k3d-43791259-agent-2 to start... (2s elapsed) Waiting for k3d-43791259-agent-0 to start... (2s elapsed) Waiting for k3d-43791259-agent-1 to start... (2s elapsed) [36mINFO[0m[0002] Pulling image 'rancher/k3s:v1.31.4-k3s1' Waiting for k3d-43791259-agent-2 to start... (4s elapsed) Waiting for k3d-43791259-agent-0 to start... (4s elapsed) Waiting for k3d-43791259-agent-1 to start... (4s elapsed) [36mINFO[0m[0005] Creating LoadBalancer 'k3d-43791259-serverlb' Waiting for k3d-43791259-agent-2 to start... (6s elapsed) Waiting for k3d-43791259-agent-0 to start... (6s elapsed) Waiting for k3d-43791259-agent-1 to start... (6s elapsed) [36mINFO[0m[0006] Pulling image 'ghcr.io/k3d-io/k3d-proxy:5.7.5' [36mINFO[0m[0007] Using the k3d-tools node to gather environment information Waiting for k3d-43791259-agent-2 to start... (8s elapsed) Waiting for k3d-43791259-agent-1 to start... (8s elapsed) Waiting for k3d-43791259-agent-0 to start... (8s elapsed) [36mINFO[0m[0008] HostIP: using network gateway 172.20.0.1 address [36mINFO[0m[0008] Starting cluster '43791259' [36mINFO[0m[0008] Starting servers... [36mINFO[0m[0008] Starting node 'k3d-43791259-server-0' Waiting for k3d-43791259-agent-2 to start... (10s elapsed) Waiting for k3d-43791259-agent-1 to start... (10s elapsed) Waiting for k3d-43791259-agent-0 to start... (10s elapsed) Waiting for k3d-43791259-agent-2 to start... (12s elapsed) Waiting for k3d-43791259-agent-0 to start... (12s elapsed) Waiting for k3d-43791259-agent-1 to start... (12s elapsed) [36mINFO[0m[0012] All agents already running. [36mINFO[0m[0012] Starting helpers... [36mINFO[0m[0012] Starting node 'k3d-43791259-serverlb' Waiting for k3d-43791259-agent-2 to start... (14s elapsed) Waiting for k3d-43791259-agent-1 to start... (14s elapsed) Waiting for k3d-43791259-agent-0 to start... (14s elapsed) Timeout waiting for k3d-43791259-agent-2, skipping DNS configuration Timeout waiting for k3d-43791259-agent-0, skipping DNS configuration Timeout waiting for k3d-43791259-agent-1, skipping DNS configuration [36mINFO[0m[0018] Injecting records for hostAliases (incl. host.k3d.internal) and for 2 network members into CoreDNS configmap... [36mINFO[0m[0020] Cluster '43791259' created successfully! [36mINFO[0m[0021] You can now use it like this: kubectl cluster-info K3d cluster creation completed [32;1m$ until kubectl get deployment coredns -n kube-system -o go-template='{{.status.availableReplicas}}' | grep -v -e '<no value>'; do sleep 1s; done[0;m 1 [32;1m$ chmod +x ${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/metallb/install_metallb.sh; echo "Executing ${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/metallb/install_metallb.sh...";./${PIPELINE_REPO_DESTINATION}/clusters/k3d/dependencies/metallb/install_metallb.sh ;[0;m Executing ../pipeline-repo/clusters/k3d/dependencies/metallb/install_metallb.sh... namespace/metallb-system created namespace/metallb-system labeled secret/private-registry created customresourcedefinition.apiextensions.k8s.io/addresspools.metallb.io created customresourcedefinition.apiextensions.k8s.io/bfdprofiles.metallb.io created customresourcedefinition.apiextensions.k8s.io/bgpadvertisements.metallb.io created customresourcedefinition.apiextensions.k8s.io/bgppeers.metallb.io created customresourcedefinition.apiextensions.k8s.io/communities.metallb.io created customresourcedefinition.apiextensions.k8s.io/ipaddresspools.metallb.io created customresourcedefinition.apiextensions.k8s.io/l2advertisements.metallb.io created serviceaccount/controller created serviceaccount/speaker created role.rbac.authorization.k8s.io/controller created role.rbac.authorization.k8s.io/pod-lister created clusterrole.rbac.authorization.k8s.io/metallb-system:controller created clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created rolebinding.rbac.authorization.k8s.io/controller created rolebinding.rbac.authorization.k8s.io/pod-lister created clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created configmap/metallb-excludel2 created secret/webhook-server-cert created service/webhook-service created deployment.apps/controller created daemonset.apps/speaker created validatingwebhookconfiguration.admissionregistration.k8s.io/metallb-webhook-configuration created Waiting on MetalLB controller/webhook... deployment.apps/controller condition met ipaddresspool.metallb.io/default created l2advertisement.metallb.io/l2advertisement1 created Waiting for daemon set "speaker" rollout to finish: 0 of 1 updated pods are available... daemon set "speaker" successfully rolled out [32;1m$ get_all[0;m [0Ksection_start:1743186941:all_resources[collapsed=true] [0K[33;1mAll Cluster Resources[37m NAMESPACE NAME READY STATUS RESTARTS AGE kube-system pod/coredns-ccb96694c-l9vk9 1/1 Running 0 32s kube-system pod/local-path-provisioner-5cf85fd84d-mlb5l 1/1 Running 0 32s kube-system pod/metrics-server-5985cbc9d7-l8bvf 1/1 Running 0 32s metallb-system pod/controller-5f67f69db-4b5kb 1/1 Running 0 24s metallb-system pod/speaker-vc24t 1/1 Running 0 24s NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default service/kubernetes ClusterIP 172.20.0.1 <none> 443/TCP 39s kube-system service/kube-dns ClusterIP 172.20.0.10 <none> 53/UDP,53/TCP,9153/TCP 35s kube-system service/metrics-server ClusterIP 172.20.67.67 <none> 443/TCP 35s metallb-system service/webhook-service ClusterIP 172.20.189.26 <none> 443/TCP 25s NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE metallb-system daemonset.apps/speaker 1 1 1 1 1 kubernetes.io/os=linux 24s NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE kube-system deployment.apps/coredns 1/1 1 1 35s kube-system deployment.apps/local-path-provisioner 1/1 1 1 35s kube-system deployment.apps/metrics-server 1/1 1 1 35s metallb-system deployment.apps/controller 1/1 1 1 25s NAMESPACE NAME DESIRED CURRENT READY AGE kube-system replicaset.apps/coredns-ccb96694c 1 1 1 33s kube-system replicaset.apps/local-path-provisioner-5cf85fd84d 1 1 1 33s kube-system replicaset.apps/metrics-server-5985cbc9d7 1 1 1 33s metallb-system replicaset.apps/controller-5f67f69db 1 1 1 25s [0Ksection_end:1743186942:all_resources [0K [32;1m$ echo -e "\e[0Ksection_end:`date +%s`:k3d_up\r\e[0K"[0;m [0Ksection_end:1743186942:k3d_up [0K [32;1m$ echo "Installing ${CI_PROJECT_NAME} from ${CI_MERGE_REQUEST_TARGET_BRANCH_NAME}"[0;m Installing redis from main [32;1m$ echo -e "\e[0Ksection_start:`date +%s`:package_checkout1[collapsed=true]\r\e[0KPackage Checkout"[0;m [0Ksection_start:1743186942:package_checkout1[collapsed=true] [0KPackage Checkout [32;1m$ git fetch && git checkout ${CI_MERGE_REQUEST_TARGET_BRANCH_NAME}[0;m From https://repo1.dso.mil/big-bang/product/maintained/redis * [new branch] gitlab-testing -> origin/gitlab-testing * [new branch] main -> origin/main * [new branch] redis-fix-244 -> origin/redis-fix-244 Previous HEAD position was 280f208 helm dependency update Switched to a new branch 'main' branch 'main' set up to track 'origin/main'. [32;1m$ echo -e "\e[0Ksection_end:`date +%s`:package_checkout1\r\e[0K"[0;m [0Ksection_end:1743186942:package_checkout1 [0K [32;1m$ rds_create[0;m [0Ksection_start:`date +%s`:dependency_install1[collapsed=true] [0K[33;1mRDS Database Dependency Creation[37m [32;1m$ dependency_install[0;m [0Ksection_start:1743186942:dependency_install[collapsed=true] [0K[33;1mDependency Install[37m [0Ksection_end:1743186942:dependency_install [0K [32;1m$ dependency_wait[0;m [0Ksection_start:1743186942:dependency_wait[collapsed=true] [0K[33;1mDependency Wait[37m [0Ksection_end:1743186942:dependency_wait [0K [32;1m$ package_install[0;m [0Ksection_start:1743186942:package_install[collapsed=true] [0K[33;1mPackage Install[37m namespace/redis created namespace/redis labeled secret/private-registry created Helm installing redis/chart into redis namespace using redis/tests/test-values.yaml for values Release "redis" does not exist. Installing it now. NAME: redis LAST DEPLOYED: Fri Mar 28 18:35:42 2025 NAMESPACE: redis STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: CHART NAME: redis CHART VERSION: 20.11.4-bb.0 APP VERSION: 7.4.2 Did you know there are enterprise versions of the Bitnami catalog? For enhanced secure software supply chain features, unlimited pulls from Docker, LTS support, or application customization, see Bitnami Premium or Tanzu Application Catalog. See https://www.arrow.com/globalecs/na/vendors/bitnami for more information. ** Please be patient while the chart is being deployed ** Redis® can be accessed on the following DNS names from within your cluster: redis-master.redis.svc.cluster.local for read/write operations (port 6379) redis-replicas.redis.svc.cluster.local for read-only operations (port 6379) To get your password run: export REDIS_PASSWORD=$(kubectl get secret --namespace redis redis -o jsonpath="{.data.redis-password}" | base64 -d) To connect to your Redis® server: 1. Run a Redis® pod that you can use as a client: kubectl run --namespace redis redis-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image registry1.dso.mil/ironbank/bitnami/redis:7.4.2 --command -- sleep infinity Use the following command to attach to the pod: kubectl exec --tty -i redis-client \ --namespace redis -- bash 2. Connect using the Redis® CLI: REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h redis-master REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h redis-replicas To connect to your database from outside the cluster execute the following commands: kubectl port-forward --namespace redis svc/redis-master 6379:6379 & REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h 127.0.0.1 -p 6379 WARNING: Rolling tag detected (ironbank/bitnami/redis:7.4.2), please note that it is strongly recommended to avoid using rolling tags in a production environment. +info https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html WARNING: Rolling tag detected (ironbank/bitnami/analytics/redis-exporter:1.69.0), please note that it is strongly recommended to avoid using rolling tags in a production environment. +info https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html ⚠ SECURITY WARNING: Original containers have been substituted. This Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Tanzu Application Catalog containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables. Substituted images detected: - registry1.dso.mil/ironbank/bitnami/redis:7.4.2 - docker.io/bitnami/redis-sentinel:7.4.2-debian-12-r6 - registry1.dso.mil/ironbank/bitnami/analytics/redis-exporter:1.69.0 - docker.io/bitnami/os-shell:12-debian-12-r40 - docker.io/bitnami/kubectl:1.32.3-debian-12-r1 - docker.io/bitnami/os-shell:12-debian-12-r40 [0Ksection_end:1743186979:package_install [0K [32;1m$ package_wait[0;m [0Ksection_start:1743186979:package_wait[collapsed=true] [0K[33;1mPackage Wait[37m Waiting on CRDs ... done. Waiting on stateful sets ... done. Waiting on daemon sets ... done. Waiting on deployments ... done. Waiting on terminating pods ... done. done. [0Ksection_end:1743186991:package_wait [0K [32;1m$ post_install_packages[0;m [0Ksection_start:1743186991:post_install_packages[collapsed=true] [0K[33;1mPost Install Packages[37m [0Ksection_end:1743186991:post_install_packages [0K [32;1m$ post_install_wait[0;m [0Ksection_start:1743186991:post_install_wait[collapsed=true] [0K[33;1mPost Install Wait[37m [0Ksection_end:1743186991:post_install_wait [0K [32;1m$ package_test[0;m [0Ksection_start:1743186991:package_test[collapsed=true] [0K[33;1mPackage Test[37m [0Ksection_end:1743186991:package_test [0K [32;1m$ echo "Upgrading ${CI_PROJECT_NAME} to ${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME}"[0;m Upgrading redis to redis-fix-244 [32;1m$ echo -e "\e[0Ksection_start:`date +%s`:package_checkout2[collapsed=true]\r\e[0KPackage Upgrade Checkout"[0;m [0Ksection_start:1743186991:package_checkout2[collapsed=true] [0KPackage Upgrade Checkout [32;1m$ git reset --hard && git clean -fd -e db_values.yaml[0;m HEAD is now at 1d4e7fe Merge branch 'renovate/ironbank' into 'main' [32;1m$ git checkout ${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME}[0;m Switched to a new branch 'redis-fix-244' branch 'redis-fix-244' set up to track 'origin/redis-fix-244'. [32;1m$ echo -e "\e[0Ksection_end:`date +%s`:package_checkout2\r\e[0K"[0;m [0Ksection_end:1743186991:package_checkout2 [0K [32;1m$ dependency_install[0;m [0Ksection_start:1743186991:dependency_install[collapsed=true] [0K[33;1mDependency Install[37m [0Ksection_end:1743186991:dependency_install [0K [32;1m$ dependency_wait[0;m [0Ksection_start:1743186991:dependency_wait[collapsed=true] [0K[33;1mDependency Wait[37m [0Ksection_end:1743186991:dependency_wait [0K [32;1m$ package_install[0;m [0Ksection_start:1743186991:package_install[collapsed=true] [0K[33;1mPackage Install[37m NAME STATUS AGE redis Active 49s NAME TYPE DATA AGE private-registry kubernetes.io/dockerconfigjson 1 49s Helm installing redis/chart into redis namespace using redis/tests/test-values.yaml for values Release "redis" has been upgraded. Happy Helming! NAME: redis LAST DEPLOYED: Fri Mar 28 18:36:31 2025 NAMESPACE: redis STATUS: deployed REVISION: 2 TEST SUITE: None NOTES: CHART NAME: redis CHART VERSION: 20.11.4-bb.1 APP VERSION: 7.4.2 Did you know there are enterprise versions of the Bitnami catalog? For enhanced secure software supply chain features, unlimited pulls from Docker, LTS support, or application customization, see Bitnami Premium or Tanzu Application Catalog. See https://www.arrow.com/globalecs/na/vendors/bitnami for more information. ** Please be patient while the chart is being deployed ** Redis® can be accessed on the following DNS names from within your cluster: redis-master.redis.svc.cluster.local for read/write operations (port 6379) redis-replicas.redis.svc.cluster.local for read-only operations (port 6379) To get your password run: export REDIS_PASSWORD=$(kubectl get secret --namespace redis redis -o jsonpath="{.data.redis-password}" | base64 -d) To connect to your Redis® server: 1. Run a Redis® pod that you can use as a client: kubectl run --namespace redis redis-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image registry1.dso.mil/ironbank/bitnami/redis:7.4.2 --command -- sleep infinity Use the following command to attach to the pod: kubectl exec --tty -i redis-client \ --namespace redis -- bash 2. Connect using the Redis® CLI: REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h redis-master REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h redis-replicas To connect to your database from outside the cluster execute the following commands: kubectl port-forward --namespace redis svc/redis-master 6379:6379 & REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h 127.0.0.1 -p 6379 WARNING: Rolling tag detected (ironbank/bitnami/redis:7.4.2), please note that it is strongly recommended to avoid using rolling tags in a production environment. +info https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html WARNING: Rolling tag detected (ironbank/bitnami/analytics/redis-exporter:v1.69.0), please note that it is strongly recommended to avoid using rolling tags in a production environment. +info https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html ⚠ SECURITY WARNING: Original containers have been substituted. This Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Tanzu Application Catalog containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables. Substituted images detected: - registry1.dso.mil/ironbank/bitnami/redis:7.4.2 - docker.io/bitnami/redis-sentinel:7.4.2-debian-12-r6 - registry1.dso.mil/ironbank/bitnami/analytics/redis-exporter:v1.69.0 - docker.io/bitnami/os-shell:12-debian-12-r40 - docker.io/bitnami/kubectl:1.32.3-debian-12-r1 - docker.io/bitnami/os-shell:12-debian-12-r40 [0Ksection_end:1743187024:package_install [0K [32;1m$ package_wait[0;m [0Ksection_start:1743187024:package_wait[collapsed=true] [0K[33;1mPackage Wait[37m Waiting on CRDs ... done. Waiting on stateful sets ... done. Waiting on daemon sets ... done. Waiting on deployments ... done. Waiting on terminating pods ... done. done. [0Ksection_end:1743187037:package_wait [0K [32;1m$ post_install_packages[0;m [0Ksection_start:1743187037:post_install_packages[collapsed=true] [0K[33;1mPost Install Packages[37m [0Ksection_end:1743187037:post_install_packages [0K [32;1m$ post_install_wait[0;m [0Ksection_start:1743187037:post_install_wait[collapsed=true] [0K[33;1mPost Install Wait[37m [0Ksection_end:1743187037:post_install_wait [0K [32;1m$ package_upgrade_test[0;m [0Ksection_start:1743187037:package_test2[collapsed=true] [0K[33;1mPackage Re-Test[37m [0Ksection_end:1743187037:package_test2 [0K [32;1m$ cluster_deprecation_check[0;m [0Ksection_start:1743187037:kubent_check[collapsed=true] [0K[33;1mIn Cluster Deprecation Check[37m [90m6:37PM[0m [32mINF[0m [1m>>> Kube No Trouble `kubent` <<<[0m [90m6:37PM[0m [32mINF[0m [1mversion 0.7.3 (git sha 57480c07b3f91238f12a35d0ec88d9368aae99aa)[0m [90m6:37PM[0m [32mINF[0m [1mInitializing collectors and retrieving data[0m [90m6:37PM[0m [32mINF[0m [1mTarget K8s version is 1.31.6-eks-bc803b4[0m [90m6:37PM[0m [32mINF[0m [1mRetrieved 0 resources from collector[0m [36mname=[0mCluster [90m6:37PM[0m [31mERR[0m [1mFailed to retrieve data from collector[0m [36merror=[0m[31m[1m"list: failed to list: secrets is forbidden: User \"system:serviceaccount:graduated-runner:default\" cannot list resource \"secrets\" in API group \"\" at the cluster scope"[0m[0m [36mname=[0m"Helm v3" [0Ksection_end:1743187043:kubent_check [0K [32;1m$ package_control_validate[0;m [0Ksection_start:1743187043:package_control_validate[collapsed=true] [0K[33;1mPackage Control Validation[37m [0Ksection_end:1743187043:package_control_validate [0K [32;1m$ touch $CI_PROJECT_DIR/success[0;m section_end:1743187043:step_script [0Ksection_start:1743187043:after_script [0K[0K[36;1mRunning after_script[0;m[0;m [32;1mRunning after script...[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/templates.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/bigbang-functions.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/package-functions.sh[0;m [32;1m$ source ${PIPELINE_REPO_DESTINATION}/library/k8s-functions.sh[0;m [32;1m$ get_ns[0;m [0Ksection_start:1743187044:namespaces[collapsed=true] [0K[33;1mNamespaces[37m NAME STATUS AGE LABELS default Active 2m22s kubernetes.io/metadata.name=default kube-node-lease Active 2m22s kubernetes.io/metadata.name=kube-node-lease kube-public Active 2m22s kubernetes.io/metadata.name=kube-public kube-system Active 2m22s kubernetes.io/metadata.name=kube-system metallb-system Active 2m7s app=metallb,kubernetes.io/metadata.name=metallb-system redis Active 102s app.kubernetes.io/name=redis,kubernetes.io/metadata.name=redis [0Ksection_end:1743187044:namespaces [0K [32;1m$ get_all[0;m [0Ksection_start:1743187044:all_resources[collapsed=true] [0K[33;1mAll Cluster Resources[37m NAMESPACE NAME READY STATUS RESTARTS AGE kube-system pod/coredns-ccb96694c-l9vk9 1/1 Running 0 2m14s kube-system pod/local-path-provisioner-5cf85fd84d-mlb5l 1/1 Running 0 2m14s kube-system pod/metrics-server-5985cbc9d7-l8bvf 1/1 Running 0 2m14s metallb-system pod/controller-5f67f69db-4b5kb 1/1 Running 0 2m6s metallb-system pod/speaker-vc24t 1/1 Running 0 2m6s redis pod/redis-master-0 1/1 Running 0 51s redis pod/redis-replicas-0 1/1 Running 0 51s NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default service/kubernetes ClusterIP 172.20.0.1 <none> 443/TCP 2m21s kube-system service/kube-dns ClusterIP 172.20.0.10 <none> 53/UDP,53/TCP,9153/TCP 2m17s kube-system service/metrics-server ClusterIP 172.20.67.67 <none> 443/TCP 2m17s metallb-system service/webhook-service ClusterIP 172.20.189.26 <none> 443/TCP 2m7s redis service/redis-headless ClusterIP None <none> 6379/TCP 102s redis service/redis-master ClusterIP 172.20.12.188 <none> 6379/TCP 102s redis service/redis-replicas ClusterIP 172.20.205.86 <none> 6379/TCP 102s NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE metallb-system daemonset.apps/speaker 1 1 1 1 1 kubernetes.io/os=linux 2m6s NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE kube-system deployment.apps/coredns 1/1 1 1 2m17s kube-system deployment.apps/local-path-provisioner 1/1 1 1 2m17s kube-system deployment.apps/metrics-server 1/1 1 1 2m17s metallb-system deployment.apps/controller 1/1 1 1 2m7s NAMESPACE NAME DESIRED CURRENT READY AGE kube-system replicaset.apps/coredns-ccb96694c 1 1 1 2m15s kube-system replicaset.apps/local-path-provisioner-5cf85fd84d 1 1 1 2m15s kube-system replicaset.apps/metrics-server-5985cbc9d7 1 1 1 2m15s metallb-system replicaset.apps/controller-5f67f69db 1 1 1 2m7s NAMESPACE NAME READY AGE redis statefulset.apps/redis-master 1/1 102s redis statefulset.apps/redis-replicas 1/1 102s NAMESPACE NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE redis horizontalpodautoscaler.autoscaling/redis-replicas StatefulSet/redis-replicas cpu: <unknown>/80% 1 11 1 102s [0Ksection_end:1743187044:all_resources [0K [32;1m$ get_events[0;m [0Ksection_start:1743187044:show_event_log[collapsed=true] [0K[33;1mCluster Event Log[37m [31mNOTICE: Cluster events can be found in artifact events.txt[0m [0Ksection_end:1743187044:show_event_log [0K [32;1m$ bigbang_pipeline[0;m Pipeline type is not BB, skipping [32;1m$ get_debug[0;m Debug not enabled, skipping [32;1m$ k3d cluster delete ${CI_JOB_ID}[0;m [36mINFO[0m[0000] Deleting cluster '43791259' [36mINFO[0m[0002] Deleting 1 attached volumes... [36mINFO[0m[0002] Removing cluster details from default kubeconfig... [36mINFO[0m[0002] Removing standalone kubeconfig file (if there is one)... [36mINFO[0m[0002] Successfully deleted cluster 43791259! [32;1m$ docker network rm ${CI_JOB_ID}[0;m 43791259 section_end:1743187047:after_script [0Ksection_start:1743187047:upload_artifacts_on_success [0K[0K[36;1mUploading artifacts for successful job[0;m[0;m [32;1mUploading artifacts...[0;m events.txt: found 1 matching artifact files and directories[0;m [0;33mWARNING: db_values.yaml: no matching files. Ensure that the artifact path is relative to the working directory (/builds/big-bang/product/maintained/redis)[0;m [0;33mWARNING: get_cpumem.txt: no matching files. Ensure that the artifact path is relative to the working directory (/builds/big-bang/product/maintained/redis)[0;m [0;33mWARNING: pod_logs: no matching files. Ensure that the artifact path is relative to the working directory (/builds/big-bang/product/maintained/redis)[0;m [0;33mWARNING: cluster_info_dump.txt: no matching files. Ensure that the artifact path is relative to the working directory (/builds/big-bang/product/maintained/redis)[0;m [0;33mWARNING: kubectl_describes: no matching files. Ensure that the artifact path is relative to the working directory (/builds/big-bang/product/maintained/redis)[0;m [0;33mWARNING: oscal-assessment-results.yaml: no matching files. Ensure that the artifact path is relative to the working directory (/builds/big-bang/product/maintained/redis)[0;m Uploading artifacts as "archive" to coordinator... 201 Created[0;m id[0;m=43791259 responseStatus[0;m=201 Created token[0;m=glcbt-64 section_end:1743187048:upload_artifacts_on_success [0Ksection_start:1743187048:cleanup_file_variables [0K[0K[36;1mCleaning up project directory and file based variables[0;m[0;m section_end:1743187048:cleanup_file_variables [0K[32;1mJob succeeded[0;m