Running with gitlab-runner 14.1.0 (8925d9a0)  on gitlab-runners-bigbang-gl-packages-privileged-gitlab-runneb7dvc L_wAsvdS section_start:1629844738:resolve_secrets Resolving secrets section_end:1629844738:resolve_secrets section_start:1629844738:prepare_executor Preparing the "kubernetes" executor Using Kubernetes namespace: gitlab-runners Using Kubernetes executor with image registry.dso.mil/platform-one/big-bang/pipeline-templates/pipeline-templates/k3d-builder:0.0.5 ... Using attach strategy to execute scripts... section_end:1629844738:prepare_executor section_start:1629844738:prepare_script Preparing environment Waiting for pod gitlab-runners/runner-lwasvds-project-3874-concurrent-0bk4dz to be running, status is Pending Running on runner-lwasvds-project-3874-concurrent-0bk4dz via gitlab-runners-bigbang-gl-packages-privileged-gitlab-runneb7dvc... section_end:1629844742:prepare_script section_start:1629844742:get_sources Getting source from Git repository Fetching changes with git depth set to 50... Initialized empty Git repository in /builds/L_wAsvdS/0/platform-one/big-bang/apps/developer-tools/haproxy/.git/ Created fresh repository. Checking out 4793283d as refs/merge-requests/3/head... Skipping Git submodules setup section_end:1629844743:get_sources section_start:1629844743:step_script Executing "step_script" stage of the job script $ if [ -z ${PIPELINE_REPO_BRANCH} ]; then # collapsed multi-line command $ git clone -b ${PIPELINE_REPO_BRANCH} ${PIPELINE_REPO} ${PIPELINE_REPO_DESTINATION} Cloning into '../pipeline-repo'... $ source ${WAIT_PATH} $ i=0; while [ "$i" -lt 12 ]; do docker info &>/dev/null && break; sleep 5; i=$(( i + 1 )) ; done $ docker network create ${CI_JOB_ID} --driver=bridge -o "com.docker.network.driver.mtu"="1450" 38adcece1eb1d8f672977ed29a3de295be8ecbcda52e9a0af5fcb5c79be8228a $ k3d cluster create ${CI_JOB_ID} --config ${K3D_CONFIG_PATH} --network ${CI_JOB_ID} INFO[0000] Using config file ../pipeline-repo/jobs/k3d-ci/config.yaml INFO[0000] Prep: Network INFO[0000] Network with name '5917274' already exists with ID '38adcece1eb1d8f672977ed29a3de295be8ecbcda52e9a0af5fcb5c79be8228a' INFO[0000] Created volume 'k3d-5917274-images' INFO[0001] Creating node 'k3d-5917274-server-0' INFO[0002] Pulling image 'docker.io/rancher/k3s:v1.20.4-k3s1' INFO[0004] Creating LoadBalancer 'k3d-5917274-serverlb' INFO[0005] Pulling image 'docker.io/rancher/k3d-proxy:v4.3.0' INFO[0007] Starting cluster '5917274' INFO[0007] Starting servers... INFO[0007] Starting Node 'k3d-5917274-server-0' INFO[0012] Starting agents... INFO[0012] Starting helpers... INFO[0012] Starting Node 'k3d-5917274-serverlb' INFO[0013] (Optional) Trying to get IP of the docker host and inject it into the cluster as 'host.k3d.internal' for easy access INFO[0016] Successfully added host record to /etc/hosts in 2/2 nodes and to the CoreDNS ConfigMap INFO[0016] Cluster '5917274' created successfully! INFO[0016] --kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false INFO[0016] You can now use it like this: kubectl config use-context k3d-5917274 kubectl cluster-info $ until kubectl get deployment coredns -n kube-system -o go-template='{{.status.availableReplicas}}' | grep -v -e ''; do sleep 1s; done 1 $ if [ ! -z ${PROJECT_NAME} ]; then # collapsed multi-line command namespace/haproxy created secret/private-registry created secret/private-registry-mil created $ if [[ "${CI_PROJECT_NAME}" != *"istio"* ]]; then # collapsed multi-line command - Processing resources for Istio core. ✔ Istio core installed - Processing resources for Istiod. - Processing resources for Istiod. Waiting for Deployment/istio-system/istiod ✔ Istiod installed - Processing resources for Ingress gateways. - Processing resources for Ingress gateways. Waiting for Deployment/istio-system/istio-ingressgat... ✔ Ingress gateways installed - Pruning removed resources ✔ Installation completenamespace/istio-system labeled $ if [[ "${PACKAGE_NAMESPACE}" != "istio-operator" ]]; then # collapsed multi-line command Generating a RSA private key .........................................+++++ .........................+++++ writing new private key to 'tls.key' ----- secret/wildcard-cert created $ if [ -f "tests/main-test-gateway.yaml" ]; then # collapsed multi-line command gateway.networking.istio.io/main created $ if [ -f "tests/dependencies.yaml" ]; then # collapsed multi-line command $ sleep 10 $ kubectl wait --for=condition=established --timeout 60s -A crd --all > /dev/null $ if [ -f tests/dependencies.yaml ]; then # collapsed multi-line command $ wait_sts $ wait_daemonset $ kubectl wait --for=condition=available --timeout 600s -A deployment --all > /dev/null $ kubectl wait --for=condition=ready --timeout 600s -A pods --all --field-selector status.phase=Running > /dev/null $ echo "Package install" Package install $ if [ ! -z ${PROJECT_NAME} ]; then # collapsed multi-line command $ if [ $(ls -1 tests/test-values.y*ml 2>/dev/null | wc -l) -gt 0 ]; then # collapsed multi-line command Helm installing haproxy/chart into haproxy namespace using haproxy/tests/test-values.yaml for values NAME: haproxy LAST DEPLOYED: Tue Aug 24 22:40:07 2021 NAMESPACE: haproxy STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: HAProxy has been has been successfully installed. This Chart is used to run HAProxy as a regular application, as opposed to HAProxy Ingress Controller Chart. Controller image deployed is: "registry1.dso.mil/ironbank/opensource/haproxy/haproxy22:v2.2.2". Your HAProxy app is of a "Deployment" kind. Service ports mapped are: - name: http containerPort: 8080 protocol: TCP - name: https containerPort: 8443 protocol: TCP - name: stat containerPort: 10024 protocol: TCP To be able to bind to privileged ports as non-root, the following is required: securityContext: enabled: true runAsUser: 1000 runAsGroup: 1000 initContainers: - name: sysctl image: "busybox:musl" command: - /bin/sh - -c - sysctl -w net.ipv4.ip_unprivileged_port_start=0 securityContext: privileged: true Node IP can be found with: $ kubectl --namespace haproxy get nodes -o jsonpath="{.items[0].status.addresses[1].address}" For more examples and up to date documentation, please visit: * Helm chart documentation: https://github.com/haproxytech/helm-charts/tree/master/haproxy * HAProxy Alpine Docker container documentation: https://github.com/haproxytech/haproxy-docker-alpine * HAProxy documentation: https://www.haproxy.org/download/2.2/doc/configuration.txt $ sleep 10 $ kubectl wait --for=condition=established --timeout 60s -A crd --all > /dev/null $ if [ -f tests/wait.sh ]; then # collapsed multi-line command $ wait_sts $ wait_daemonset $ kubectl wait --for=condition=available --timeout 600s -A deployment --all > /dev/null $ kubectl wait --for=condition=ready --timeout 600s -A pods --all --field-selector status.phase=Running > /dev/null $ echo "Package tests" Package tests $ if [ ! -z $(kubectl get services -n istio-system istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}' &> /dev/null) ] && [ ! -z $(kubectl get vs -A -o jsonpath='{.items[0].spec.hosts[0]}' &> /dev/null) ]; then # collapsed multi-line command $ if [ -f "tests/cypress.json" ]; then # collapsed multi-line command $ if [ -d "chart/templates/tests" ]; then # collapsed multi-line command $ touch $CI_PROJECT_DIR/success section_end:1629844825:step_script section_start:1629844825:after_script Running after_script Running after script... $ if [ -e success ]; then # collapsed multi-line command Job Succeeded $ docker exec -i k3d-${CI_JOB_ID}-server-0 crictl images -o json | jq -r '.images[].repoTags[0] | select(. != null)' > images.txt $ sed -i '/docker.io\/istio\//d' images.txt $ sed -i '/docker.io\/rancher\//d' images.txt $ if [ -f tests/images.txt ]; then # collapsed multi-line command $ k3d cluster delete ${CI_JOB_ID} INFO[0000] Deleting cluster '5917274' INFO[0000] Deleted k3d-5917274-serverlb INFO[0001] Deleted k3d-5917274-server-0 INFO[0001] Deleting image volume 'k3d-5917274-images' INFO[0001] Removing cluster details from default kubeconfig... INFO[0001] Removing standalone kubeconfig file (if there is one)... INFO[0001] Successfully deleted cluster 5917274! $ docker network rm ${CI_JOB_ID} 5917274 section_end:1629844827:after_script section_start:1629844827:upload_artifacts_on_success Uploading artifacts for successful job Uploading artifacts... images.txt: found 1 matching files and directories WARNING: tests/cypress/screenshots: no matching files WARNING: tests/cypress/videos: no matching files  WARNING: cypress-artifacts: no matching files  Uploading artifacts as "archive" to coordinator... ok id=5917274 responseStatus=201 Created token=NSsYFEMj section_end:1629844828:upload_artifacts_on_success section_start:1629844828:cleanup_file_variables Cleaning up file based variables section_end:1629844829:cleanup_file_variables Job succeeded