From a5ac16d99bb0de764e3213576420659ec777aba5 Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Mon, 25 May 2020 15:44:24 -0400 Subject: [PATCH 01/27] Initial commit --- Dockerfile | 58 +++++++++++++++ Jenkinsfile | 2 + LICENSE | 177 +++++++++++++++++++++++++++++++++++++++++++++ README.md | 20 ++++- download.yaml | 19 +++++ files/run_nginx.sh | 102 ++++++++++++++++++++++++++ 6 files changed, 377 insertions(+), 1 deletion(-) create mode 100644 Dockerfile create mode 100644 Jenkinsfile create mode 100644 LICENSE create mode 100644 download.yaml create mode 100755 files/run_nginx.sh diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..7abd98c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,58 @@ + +ARG BASE_REGISTRY=nexus-docker-secure.levelup-nexus.svc.cluster.local:18082 +ARG BASE_IMAGE=redhat/ubi/ubi8 +ARG BASE_TAG=8.2 + + +# Fist build a container with the data we need (to reduce bloat) +FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} as collector + +ARG jitt_version + +COPY /jitt-${jitt_version}.tar.gz /opt/jitt-${jitt_version}.tar.gz + +RUN set -x \ + && mkdir /jitt \ + && tar -xvzf /opt/jitt-${jitt_version}.tar.gz -C /jitt + + +# The container we want to ship +FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} + +LABEL name="SD Elements Just In Time Training (JITT) Container" \ + maintainer="devops-support@securitycompass.com" \ + vendor="Security Compass Ltd." \ + version='5.5.23' \ + release='1' \ + summary="SD Elements Automatically Builds In And Enables Compliance Throughout The Software Lifecycle." \ + description="SD Elements automatically identifies and classifies risks and translates complex requirements into actionable tasks that are assigned to your personnel to improve your security posture. It automates Risk Assessments, Threat Modeling, Secure Development, and Regulatory Compliance - at scale." + +ENV VENDOR=security-compass + +RUN set -x \ + && dnf -y upgrade \ + && dnf -y install iproute gettext-libs procps-ng \ + && dnf -y install nss_wrapper gettext \ + && dnf -y install nginx \ + && dnf clean all \ + && mkdir -p /var/nginx/proxy_temp \ + && mkdir -p /var/nginx/client_body_temp \ + && chown nginx:root /var/nginx/proxy_temp \ + && chown nginx:root /var/nginx/client_body_temp \ + && rm -f /etc/nginx/conf.d/* \ + && rm -f /etc/nginx/nginx.conf \ + && rm -f /var/log/nginx/access.log \ + && rm -f /var/log/nginx/error.log + +# Default to local build context files +ARG rtenvsub_file=rtenvsub.sh +ARG shtdlib_file=shtdlib.sh + +ADD ${rtenvsub_file} /bin/rtenvsub.sh +ADD ${shtdlib_file} /bin/shtdlib.sh +COPY /files/run_nginx.sh /bin/run_nginx.sh +COPY --from=collector /jitt /jitt + +USER nginx + +HEALTHCHECK --interval=15s --timeout=10s --retries=3 CMD which nginx diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000..99963ea --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,2 @@ +@Library('DCCSCR@master') _ +dccscrPipeline(version: "0.0.1") diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..b728b68 --- /dev/null +++ b/LICENSE @@ -0,0 +1,177 @@ +SD ELEMENTS END USER LICENSE AGREEMENT + +This End User License Agreement (this “Agreement”) is a legal contract between you, as either an +individual, Entity or Government Agency (as per the Order), and Infotek Solutions Inc. dba Security +Compass, or its affiliates (collectively “Security Compass”). + +THIS SOFTWARE IS COPYRIGHTED AND IT IS LICENSED TO YOU UNDER THIS AGREEMENT, NOT +SOLD TO YOU. BY DOWNLOADING, INSTALLING, OBTAINING A LICENSE KEY, OR OTHERWISE +ACCESSING OR USING THIS SOFTWARE, YOU ACKNOWLEDGE THAT YOU HAVE READ THIS +AGREEMENT, YOU UNDERSTAND IT, AND THAT YOU ACCEPT AND AGREE TO BE BOUND BY ITS +TERMS. + +IF YOU ARE ACCEPTING THIS AGREEMENT ON BEHALF OF A COMPANY, ORGANIZATION, OR +OTHER LEGAL ENTITY (AN “ENTITY”), YOU REPRESENT AND WARRANT THAT YOU HAVE FULL +POWER AND AUTHORITY TO BIND SUCH ENTITY TO THESE TERMS, AND REFERENCES TO “YOU” +OR “YOUR” HEREIN REFER TO BOTH YOU, THE INDIVIDUAL END USER, AND THE ENTITY ON +WHOSE BEHALF YOU ARE ACCEPTING THIS AGREEMENT. + +1. Intellectual Property Rights. Security Compass or its licensors retain ownership of all intellectual +property rights in and to the Software, including any modifications, translations, or derivatives thereof, +even if unauthorized, and all applicable rights in patents, copyrights, trade secrets, and trademarks. +The Software is valuable, proprietary, and unique, and you agree to be bound by and observe the +proprietary nature thereof. The Software contains material that is protected by patent, copyright, and +trade secret laws. Your rights to use the Software are limited to those expressly granted by this +Agreement. All rights not granted to you in this Agreement are reserved to Security Compass. No +ownership of the Software passes to you. Security Compass may make changes to the Software at any +time without notice. You may not remove any proprietary notice of Security Compass or any third party +from the Software. + + +2. Protection and Restrictions. + +2.1. You agree to take all reasonable steps to safeguard access to the Software to ensure that no +unauthorized person has access thereto and that no unauthorized copy, publication, disclosure, +or distribution, in whole or in part, in any form is made. + +2.2. You acknowledge that the Software contains valuable, confidential information and trade secrets +and that unauthorized use and/or copying is harmful to Security Compass. You also understand +and agree that the copying or modifying of the Documentation provided with or as part of the +Software is strictly prohibited. Any third-party software included in the Software may not be used +independently from the Software. + +2.3. You will not, and will not allow a third party to, directly or indirectly: sell, sublicense, transfer, assign, +publish, display, disclose, rent, lease, timeshare, modify, loan, distribute, market, commercialize, +or create derivative works based on the Software or any part thereof, incorporate the Software into +or with other products, or use the Software for timesharing or service bureau purposes. + +2.4. You will not reverse engineer, decompile, translate, adapt, or disassemble the Software, nor will +you attempt to reconstruct or discover any source code, underlying ideas, algorithms, file formats +or programming interfaces of the Software by any means whatsoever (except and only to the +extent that applicable law prohibits or restricts reverse engineering restrictions, and then only with +prior written notice to Security Compass). + + +3. Limitation of Liability. TO THE FULLEST EXTENT PERMITTED BY LAW, UNDER NO +CIRCUMSTANCES WILL SECURITY COMPASS, ITS AFFILIATES, ITS LICENSORS OR +RESELLERS BE LIABLE FOR ANY INDIRECT, CONSEQUENTIAL, SPECIAL, PUNITIVE OR + +SD Elements Corporate End User License Agreement (July 2017) +INCIDENTAL DAMAGES, WHETHER FORESEEABLE OR UNFORESEEABLE, ARISING OUT OF +OR RELATED TO THIS AGREEMENT INCLUDING, BUT NOT LIMITED TO CLAIMS FOR +INACCURACY, LOSS OF DATA, COST OF PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES, GOODWILL, OPPORTUNITY, REVENUE, PROFITS, OR USE OF THE PRODUCTS, +INTERRUPTION IN USE OR AVAILABILITY OF DATA, STOPPAGE OF OTHER WORK OR +IMPAIRMENT OF OTHER ASSETS OR OTHER BUSINESS LOSS, PRIVACY, NEGLIGENCE, +BREACH OF CONTRACT, TORT OR OTHERWISE AND THIRD PARTY CLAIMS, EVEN IF +SECURITY COMPASS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. IN NO +EVENT WILL SECURITY COMPASS’ AGGREGATE LIABILITY ARISING OUT OF OR RELATED TO +THIS AGREEMENT, BASED ON ANY LEGAL THEORY, INCLUDING BUT NOT LIMITED TO +CONTRACT, TORT, BREACH OF WARRANTY INFRINGEMENT OR OTHERWISE, EXCEED THE +TOTAL AMOUNT ACTUALLY PAID BY YOU TO SECURITY COMPASS FOR THE LICENSE. + + +4. Usage Review. Where you host the Software, Security Compass may at its option request from you a +record of your usage to review and ensure compliance with this Agreement. You agree to cooperate +with Security Compass’ audit and provide reasonable assistance and access to information. Any such +audit shall not unreasonably interfere with your normal business operations. If any audit reveals a +breach of this Agreement by you, you will reimburse any amount revealed to be due to Security +Compass as a result of such breach within thirty (30) days after receipt of an invoice. +5. SD Elements Specific Terms. Your use of SD Elements shall be subject to Licensor’s per-application usage +and pricing terms and conditions as set out in Schedule A to this Agreement. + + +SCHEDULE A + +PER-APPLICATION PRICING TERMS AND CONDITIONS + +Additional or alternate terms and conditions that apply to SD Elements are provided below and form part of the +Agreement. + + +1. Definitions + +1.1. “Active Application(s)” shall mean an Application being developed within the SD Elements Software, which +has not been archived, and for which at least one (1) Project has been created. + +1.2. “Application” shall have the meaning set out in Section 3.1 below. + +1.3. “Archived Application(s)” shall mean an Active Application which has been moved to an archive within the +Software, whereupon it shall cease to be an Active Application. + +1.4. “Licensee” shall mean the individual, entity or government agency entering into this Agreement + +1.5. “License Year” shall mean a license year within the License Term + +1.6. “Project” shall mean an instance, component or release of Licensee’s software code base(s) being +developed/managed within an Application + +1.7. All Capitalized terms not defined in this Schedule shall have the meanings assigned to such terms in the +Agreement + + +2. License Metric + +2.1. The License granted to the SD Elements Software shall entitle Licensee to utilize the Software in the +development of a maximum number of Applications stated in the Order Form (hereinafter the “License +Limit”). + +2.2. Active Applications shall apply towards the usage of the License Limit. Archiving an Active Application shall +not free up the license for the Archived Application in the current License Year. + +2.3. The License Limit utilization cycle shall be reset upon the expiry of a License Year. As of the first day of the +renewal License Year, only Active Applications shall apply towards the License Limit. + + +3. Application + +3.1. For the purpose of the Agreement, an “Application” is a set of software instructions (source code, bytecode), +which compile and/or execute in a single run time environment within the Software, subject to any exception +stated below: + +(a) Licensee may create an unlimited number of new releases as Projects within an Application. Such +new releases shall not count as additional usage against the License Limit + +(b) Where Licensee utilizes the Software in the development of a web application, the browser space +code and server side code may be considered different parts of the same Application where the +technical profile of each code base is intended to produce a single list of requirements within the SD +Elements Software. + +(c) Technologies that operate as independent Licensee Applications shall be considered separate +Applications. This includes but is not limited to Java applets and browser plugins. The development +of the same Application for different mobile operating systems shall be considered to be separate +Applications, whereby each such Application shall apply as usage against the License Limit. + +(d) Server side applications which include components that run in a different run time space may be +considered the same Application where (i) a similar technology stack is utilized; and (ii) a single list +of requirements is intended for all components. + +(e) Where the Software is used to develop micro services architecture, all services shall be considered +to be a single application for the purpose of licensing where (i) all services use a similar technology +stack; and (ii) a single list of requirements is intended for all services. + + +4. Usage reporting obligations and auditing + +4.1. Where Licensee hosts the SD Elements Software On-Site, Licensee shall be required to report the number +of Applications developed using the Software, once at the end of each quarter in each License Year. A quarter +shall be measured as each three (3) month period starting from the License Effective Date stated on the +Order Form. Licensor reserves the right to refuse access to Standard Technical Support and Software +Updates until Licensee usage data is provided to Licensor. Usage reports shall be sent to +usagereport@sdelements.com + + +5. Pricing + +5.1. Pricing for the SD Elements Software is stated in the Order Form. Prices represent the License Limit and +SD Elements Corporate End User License Agreement (July 2017) +type of license granted. All prices are in United States Dollars, and are based upon an annual subscription +with a minimum one (1) year License Term. + + +6. Over-Usage + +6.1. At any time during the License Term, where Licensee’s usage exceeds the License Limit, Licensee shall pay +Licensor over-usage fees for the number of Active Applications used in excess of the License Limit at the per +Application rate set forth in the Order. Over-usage fees shall be calculated and invoiced annually after each +License Year. diff --git a/README.md b/README.md index a997260..4f3ca75 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,20 @@ -# Nginx +# nginx-jitt +## Summary + +This container hosts SDElements Just In Time Training (JITT) content using Nginx to serve the static content + + +## Local build + +Use this command to build locally: + +```bash +docker build . -t ubi_nginx-jitt \ + --build-arg BASE_REGISTRY="registry.access.redhat.com" \ + --build-arg BASE_IMAGE="ubi8/ubi" \ + --build-arg BASE_TAG="8.2" \ + --build-arg rtenvsub_file="https://raw.githubusercontent.com/sdelements/shtdlib/master/docker_utils/rtenvsub.sh" \ + --build-arg shtdlib_file="https://raw.githubusercontent.com/sdelements/shtdlib/master/shtdlib.sh" \ + --build-arg jitt_version="5.5.23" +``` diff --git a/download.yaml b/download.yaml new file mode 100644 index 0000000..fcdee7b --- /dev/null +++ b/download.yaml @@ -0,0 +1,19 @@ +resources: + - url: "https://raw.githubusercontent.com/sdelements/shtdlib/master/shtdlib.sh" + filename: "shtdlib.sh" + validation: + type: sha256 + value: "a1a529dbb641e1f9f2470be710aaebd9ef5ed650fbd6cd25971d7050749ec86b" + - url: "https://raw.githubusercontent.com/sdelements/shtdlib/master/docker_utils/rtenvsub.sh" + filename: "rtenvsub.sh" + validation: + type: sha256 + value: "52a4620abac7ee2046ff58965ae466f5b8117a4db903f64e4dfb3a7af682e444" + - url: "https://anvil.sdelements.com/pulp/isos/Default_Organization/Library/custom/sde/SDElements_Dependency_RPMs/jitt-5.5.23.tar.gz" + filename: "jitt-5.5.23.tar.gz" + validation: + type: sha256 + value: "472ad942998b0a444e51637ccf8bda039c475ee4f0bccc714bd620485bb2d631" + auth: + type: basic + id: scompass diff --git a/files/run_nginx.sh b/files/run_nginx.sh new file mode 100755 index 0000000..3a09020 --- /dev/null +++ b/files/run_nginx.sh @@ -0,0 +1,102 @@ +#!/bin/bash +# +# Copyright (c) 2018 SD Elements Inc. +# +# All Rights Reserved. +# +# NOTICE: All information contained herein is, and remains +# the property of SD Elements Incorporated and its suppliers, +# if any. The intellectual and technical concepts contained +# herein are proprietary to SD Elements Incorporated +# and its suppliers and may be covered by U.S., Canadian and other Patents, +# patents in process, and are protected by trade secret or copyright law. +# Dissemination of this information or reproduction of this material +# is strictly forbidden unless prior written permission is obtained +# from SD Elements Inc.. + +# Set strict mode +set -eu + +# Version +# shellcheck disable=2034 +version='0.0.1' + +# Set verbose logging for shell script +#export verbosity=10 + +default_library_name='shtdlib.sh' +default_base_download_url='https://raw.githubusercontent.com/sdelements/shtdlib/master' +default_install_path='/usr/local/bin' + +# Library download function, optionally accepts a full path/name and URL +function download_lib { + tmp_path="${1:-$(mktemp)}" + lib_url="${2:-${default_base_download_url}/${default_library_name}}" + curl -s -l -o "${tmp_path}" "${lib_url}" || wget --no-verbose "${lib_url}" --output-document "${tmp_path}" || return 1 +} + +# Library install function, optionally accepts a URL and a full path/name +# shellcheck disable=SC2120,SC2119 +function install_lib { + lib_path="${1:-${default_install_path}/${default_library_name}}" + lib_name="${2:-$(basename "${lib_path}")}" + tmp_path="${3:-$(mktemp)}" + + echo "Installing library ${lib_name} to ${lib_path}" + download_lib "${tmp_path}" "${default_base_download_url}/${lib_name}" + mv "${tmp_path}" "${lib_path}" || sudo mv "${tmp_path}" "${lib_path}" || return 1 + chmod 755 "${lib_path}" || sudo chmod 755 "${lib_path}" || return 1 + # shellcheck disable=SC1091,SC1090 + source "${lib_path}m" + color_echo green "Installed ${lib_name} to ${lib_path} successfully" +} + +# Library import function, accepts one optional parameter, name of the file to import +# shellcheck disable=SC2120,SC2119 +function import_lib { + lib_name="${1:-${default_library_name}}" + full_path="$(readlink -f "${BASH_SOURCE[0]}" 2> /dev/null || realpath "${BASH_SOURCE[0]}" 2> /dev/null || greadlink -f "${BASH_SOURCE[1]}" 2> /dev/null:-"${0}")" + # Search current dir and walk down to see if we can find the library in a + # parent directory or sub directories of parent directories named lib/bin + while true; do + pref_pattern=( "${full_path}/${lib_name}" "${full_path}/$(basename -s .sh "${lib_name}")/${lib_name}" "${full_path}/lib/${lib_name}" "${full_path}/bin/${lib_name}" ) + for pref_lib in "${pref_pattern[@]}" ; do + if [ -e "${pref_lib}" ] ; then + echo "Importing ${pref_lib}" + # shellcheck disable=SC1091,SC1090 + source "${pref_lib}" + return 0 + fi + done + full_path="$(dirname "${full_path}")" + if [ "${full_path}" == '/' ] ; then + # If we haven't found the library try the PATH or install if needed + # shellcheck disable=SC1091,SC1090 + source "${lib_name}" 2> /dev/null || install_lib "${default_install_path}/${lib_name}" "${lib_name}" && return 0 + # If nothing works then we fail + echo "Unable to import ${lib_name}" + return 1 + fi + done +} + +# Import the shell standard library +# shellcheck disable=SC2119 +import_lib + +# Dynamically figure add resolvers for nginx +export NAMESERVERS="resolver $(grep nameserver /etc/resolv.conf | awk '{print $2}') 127.0.0.11 valid=10s;" +# Create config files +rtenvsub.sh --nofifo --overlay --process nginx --daemon /etc/nginx /run/nginx & + +color_echo green 'Waiting for config to become available ' +until test -e '/etc/nginx/nginx.conf' && test -d '/etc/nginx/sites-enabled'; do + add_on_break 'exit' + echo -n . + sleep 1 +done + +# Run nginx +color_echo green 'Starting nginx' +nginx -g 'daemon off;' || exit_on_fail +debug 2 "Nginx exited with return code: ${?}" -- GitLab From 440d10593416c7023dd89057467672e18e02f547 Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Mon, 25 May 2020 21:25:23 -0400 Subject: [PATCH 02/27] Simplify multi-stage build by using ADD --- Dockerfile | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7abd98c..ae7fc5d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,20 +3,6 @@ ARG BASE_REGISTRY=nexus-docker-secure.levelup-nexus.svc.cluster.local:18082 ARG BASE_IMAGE=redhat/ubi/ubi8 ARG BASE_TAG=8.2 - -# Fist build a container with the data we need (to reduce bloat) -FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} as collector - -ARG jitt_version - -COPY /jitt-${jitt_version}.tar.gz /opt/jitt-${jitt_version}.tar.gz - -RUN set -x \ - && mkdir /jitt \ - && tar -xvzf /opt/jitt-${jitt_version}.tar.gz -C /jitt - - -# The container we want to ship FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} LABEL name="SD Elements Just In Time Training (JITT) Container" \ @@ -47,11 +33,12 @@ RUN set -x \ # Default to local build context files ARG rtenvsub_file=rtenvsub.sh ARG shtdlib_file=shtdlib.sh +ARG jitt_version ADD ${rtenvsub_file} /bin/rtenvsub.sh ADD ${shtdlib_file} /bin/shtdlib.sh +ADD /jitt-${jitt_version}.tar.gz /jitt COPY /files/run_nginx.sh /bin/run_nginx.sh -COPY --from=collector /jitt /jitt USER nginx -- GitLab From 82878315790b98c7572af68d8f33a5773557e7f8 Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Wed, 27 May 2020 08:41:52 -0400 Subject: [PATCH 03/27] Add .gitignore --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9258361 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +# Ignore build artifacts +*.tar.gz -- GitLab From eec3e15b78036ebff26bf09937823fb3c04e447d Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Wed, 27 May 2020 14:24:40 -0400 Subject: [PATCH 04/27] Use proper URL --- download.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/download.yaml b/download.yaml index fcdee7b..934ce04 100644 --- a/download.yaml +++ b/download.yaml @@ -9,7 +9,7 @@ resources: validation: type: sha256 value: "52a4620abac7ee2046ff58965ae466f5b8117a4db903f64e4dfb3a7af682e444" - - url: "https://anvil.sdelements.com/pulp/isos/Default_Organization/Library/custom/sde/SDElements_Dependency_RPMs/jitt-5.5.23.tar.gz" + - url: "https://tar.sdelements.com/pulp/isos/Default_Organization/Library/custom/sde/SDElements_Dependency_RPMs/jitt-5.5.23.tar.gz" filename: "jitt-5.5.23.tar.gz" validation: type: sha256 -- GitLab From 9c15785203952dc0da1f5aff22406c576415bf1e Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Thu, 28 May 2020 10:31:09 -0400 Subject: [PATCH 05/27] Move script to scripts directory --- Dockerfile | 2 +- {files => scripts}/run_nginx.sh | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename {files => scripts}/run_nginx.sh (100%) diff --git a/Dockerfile b/Dockerfile index ae7fc5d..bda3b43 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,7 +38,7 @@ ARG jitt_version ADD ${rtenvsub_file} /bin/rtenvsub.sh ADD ${shtdlib_file} /bin/shtdlib.sh ADD /jitt-${jitt_version}.tar.gz /jitt -COPY /files/run_nginx.sh /bin/run_nginx.sh +COPY /scripts/run_nginx.sh /bin/run_nginx.sh USER nginx diff --git a/files/run_nginx.sh b/scripts/run_nginx.sh similarity index 100% rename from files/run_nginx.sh rename to scripts/run_nginx.sh -- GitLab From 7a9b41e7bffc405133a6b8f712ce02109d287750 Mon Sep 17 00:00:00 2001 From: mchum Date: Mon, 15 Jun 2020 21:03:47 +0000 Subject: [PATCH 06/27] updating base image, hard coding scripts, update docs --- Dockerfile | 21 +- README.md | 17 +- download.yaml | 10 - scripts/rtenvsub.sh | 525 ++++++++ scripts/shtdlib.sh | 3050 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 3596 insertions(+), 27 deletions(-) create mode 100644 scripts/rtenvsub.sh create mode 100644 scripts/shtdlib.sh diff --git a/Dockerfile b/Dockerfile index bda3b43..f386995 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,14 +1,17 @@ ARG BASE_REGISTRY=nexus-docker-secure.levelup-nexus.svc.cluster.local:18082 -ARG BASE_IMAGE=redhat/ubi/ubi8 -ARG BASE_TAG=8.2 +ARG BASE_IMAGE=opensource/nginx/nginx +ARG BASE_TAG=1.19.0 FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} +# Default to local build context files +ARG jitt_version + LABEL name="SD Elements Just In Time Training (JITT) Container" \ maintainer="devops-support@securitycompass.com" \ vendor="Security Compass Ltd." \ - version='5.5.23' \ + version="${jitt_version}" \ release='1' \ summary="SD Elements Automatically Builds In And Enables Compliance Throughout The Software Lifecycle." \ description="SD Elements automatically identifies and classifies risks and translates complex requirements into actionable tasks that are assigned to your personnel to improve your security posture. It automates Risk Assessments, Threat Modeling, Secure Development, and Regulatory Compliance - at scale." @@ -19,7 +22,6 @@ RUN set -x \ && dnf -y upgrade \ && dnf -y install iproute gettext-libs procps-ng \ && dnf -y install nss_wrapper gettext \ - && dnf -y install nginx \ && dnf clean all \ && mkdir -p /var/nginx/proxy_temp \ && mkdir -p /var/nginx/client_body_temp \ @@ -30,16 +32,11 @@ RUN set -x \ && rm -f /var/log/nginx/access.log \ && rm -f /var/log/nginx/error.log -# Default to local build context files -ARG rtenvsub_file=rtenvsub.sh -ARG shtdlib_file=shtdlib.sh -ARG jitt_version - -ADD ${rtenvsub_file} /bin/rtenvsub.sh -ADD ${shtdlib_file} /bin/shtdlib.sh +ADD /scripts/rtenvsub.sh /bin/rtenvsub.sh +ADD /scripts/shtdlib.sh /bin/shtdlib.sh ADD /jitt-${jitt_version}.tar.gz /jitt COPY /scripts/run_nginx.sh /bin/run_nginx.sh USER nginx -HEALTHCHECK --interval=15s --timeout=10s --retries=3 CMD which nginx +HEALTHCHECK --interval=15s --timeout=10s --retries=3 CMD pgrep -lf nginx || exit 1 diff --git a/README.md b/README.md index 4f3ca75..be0f233 100644 --- a/README.md +++ b/README.md @@ -7,14 +7,21 @@ This container hosts SDElements Just In Time Training (JITT) content using Nginx ## Local build +Download NGINX version 1.19.0 as a tarball from `https://dcar.dsop.io/repomap/opensource/nginx/nginx` +Follow the instructions under "Downloading and Running the image". For this example, we will use `nginx-1.19.0.tar` as the file downloaded. + +Load the tarball into docker + +```bash +docker load -i nginx-1.19.0.tar +``` + Use this command to build locally: ```bash docker build . -t ubi_nginx-jitt \ - --build-arg BASE_REGISTRY="registry.access.redhat.com" \ - --build-arg BASE_IMAGE="ubi8/ubi" \ - --build-arg BASE_TAG="8.2" \ - --build-arg rtenvsub_file="https://raw.githubusercontent.com/sdelements/shtdlib/master/docker_utils/rtenvsub.sh" \ - --build-arg shtdlib_file="https://raw.githubusercontent.com/sdelements/shtdlib/master/shtdlib.sh" \ + --build-arg BASE_REGISTRY="nexus-docker-secure.levelup-nexus.svc.cluster.local:18082" \ + --build-arg BASE_IMAGE="opensource/nginx/nginx" \ + --build-arg BASE_TAG="1.19.0" \ --build-arg jitt_version="5.5.23" ``` diff --git a/download.yaml b/download.yaml index 934ce04..f82cc63 100644 --- a/download.yaml +++ b/download.yaml @@ -1,14 +1,4 @@ resources: - - url: "https://raw.githubusercontent.com/sdelements/shtdlib/master/shtdlib.sh" - filename: "shtdlib.sh" - validation: - type: sha256 - value: "a1a529dbb641e1f9f2470be710aaebd9ef5ed650fbd6cd25971d7050749ec86b" - - url: "https://raw.githubusercontent.com/sdelements/shtdlib/master/docker_utils/rtenvsub.sh" - filename: "rtenvsub.sh" - validation: - type: sha256 - value: "52a4620abac7ee2046ff58965ae466f5b8117a4db903f64e4dfb3a7af682e444" - url: "https://tar.sdelements.com/pulp/isos/Default_Organization/Library/custom/sde/SDElements_Dependency_RPMs/jitt-5.5.23.tar.gz" filename: "jitt-5.5.23.tar.gz" validation: diff --git a/scripts/rtenvsub.sh b/scripts/rtenvsub.sh new file mode 100644 index 0000000..e6f640f --- /dev/null +++ b/scripts/rtenvsub.sh @@ -0,0 +1,525 @@ +#!/bin/bash +# +# Copyright (c) 2018 SD Elements Inc. +# +# All Rights Reserved. +# +# NOTICE: All information contained herein is, and remains +# the property of SD Elements Incorporated and its suppliers, +# if any. The intellectual and technical concepts contained +# herein are proprietary to SD Elements Incorporated +# and its suppliers and may be covered by U.S., Canadian and other Patents, +# patents in process, and are protected by trade secret or copyright law. +# Dissemination of this information or reproduction of this material +# is strictly forbidden unless prior written permission is obtained +# from SD Elements Inc.. +# Version + +version='0.1' + +# Set a safe umask +umask 0077 + +dev_mode="${DEV_MODE:-false}" +default_library_name='shtdlib.sh' +default_base_download_url='https://raw.githubusercontent.com/sdelements/shtdlib/master' +default_install_path='/usr/local/bin' + +# Temporary debug function +type -t import | grep -q '^function$' || function debug { echo "${@:2}" ; } + +# Import or source +function import_or_source { + if type -t import | grep -q '^function$' ; then + debug 10 "Importing ${1}" + import "${1}" + else + debug 10 "Sourcing ${1}" + # shellcheck disable=1090 + source "${1}" + fi +} + +# Library download function, optionally accepts a full path/name and URL +function download_lib { + local tmp_path="${1:-$(mktemp)}" + local lib_url="${2:-${default_base_download_url}/${default_library_name}}" + curl -s -l -o "${tmp_path}" "${lib_url}" || wget --no-verbose "${lib_url}" --output-document "${tmp_path}" || return 1 +} + +# Library install function, optionally accepts a URL and a full path/name +# shellcheck disable=SC2120,SC2119 +function install_lib { + local lib_path="${1:-${default_install_path}/${default_library_name}}" + local lib_name="${2:-$(basename "${lib_path}")}" + local tmp_path="${3:-$(mktemp)}" + + echo "Installing library ${lib_name} to ${lib_path}" + download_lib "${tmp_path}" "${default_base_download_url}/${lib_name}" + mv "${tmp_path}" "${lib_path}" || sudo mv "${tmp_path}" "${lib_path}" || return 1 + chmod 755 "${lib_path}" || sudo chmod 755 "${lib_path}" || return 1 + import_or_source "${lib_path}" + color_echo green "Installed ${lib_name} to ${lib_path} successfully" +} + +# Library import function, accepts one optional parameter, name of the file to import +# shellcheck disable=SC2120,SC2119 +function import_lib { + local full_path + local lib_name="${1:-${default_library_name}}" + local lib_no_ext="${lib_name%.*}" + local lib_basename_s="${lib_no_ext##*/}" + full_path="$(readlink -f "${BASH_SOURCE[0]}" 2> /dev/null || realpath "${BASH_SOURCE[0]}" 2> /dev/null || greadlink -f "${BASH_SOURCE[0]}" 2> /dev/null || true)" + full_path="${full_path:-${0}}" + # Search current dir and walk down to see if we can find the library in a + # parent directory or sub directories of parent directories named lib/bin + while true; do + local pref_pattern=( "${full_path}/${lib_name}" "${full_path}/${lib_basename_s}/${lib_name}" "${full_path}/lib/${lib_name}" "${full_path}/bin/${lib_name}" ) + for pref_lib in "${pref_pattern[@]}" ; do + if [ -e "${pref_lib}" ] ; then + debug 10 "Found ${pref_lib}, attempting to import/source" + import_or_source "${pref_lib}" && return 0 + echo "Unable to import/source ${pref_lib}!" + fi + done + full_path="$(dirname "${full_path}")" + if [ "${full_path}" == '/' ] ; then + # If we haven't found the library try the PATH or install if needed + debug 10 "Attempting to import/source ${lib_name}" + import_or_source "${lib_name}" 2> /dev/null || install_lib "${default_install_path}/${lib_name}" "${lib_name}" && return 0 + # If nothing works then we fail + echo "Unable to import ${lib_name}" + return 1 + fi + done +} + +# Import the shell standard library +# shellcheck disable=SC2119 +import_lib + +debug 10 "Running ${0} with PID: ${$}" + +if ! whichs envsubst ; then + color_echo red "Unable to locate envsubst command, please make sure it's available" + color_echo cyan 'Perhaps this can be fixed with: apt-get -y install gettext-base' + exit 1 +fi +if ! whichs inotifywait && ${dev_mode} ; then + color_echo red "Unable to locate the inotifywait command, please make sure it's available" + color_echo cyan 'Perhaps this can be fixed with: apt-get install inotify-tools' + exit 1 +fi + +# Print usage and argument list +function print_usage { +cat << EOF +usage: ${0} destination_path file(s) director(y/ies) + +rtenvsub.sh + +Real time environment variable based templating engine + +This script uses the Linux inotify interface in combination with the envsubst +program and optionally named pipes to mirror directories and files replacing environment +variables in realtime in an efficent manner. In addition any changes to the +template files can trigger service/process reload or restart by signaling them +(default SIGHUP). + +To refresh environment variables loaded by this script you can send it the HUP signal. + +For more info see: + +man inotifywait +man mkfifo +man envsubst +man kill +man pgrep/pkill + +OPTIONS: + -p, --process Process PID or name to signal if config files change + -s, --signal Signal to send (defaults to HUP, see man kill for details) + -o, --overlay Set up mirror even if the destination directory contains files/subdirectories + -n, --nofifo Write to files instead of using named pipes + -h, --help Show this message + -d, --daemon Daemonize, run in the background + -v, --verbose {verbosity_level} Set verbose mode (optionally accepts a integer level) + -t, --test Run unit tests + +Examples: +${0} /etc/nginx /usr/share/doc/nginx # Recursively map all files and directories from /usr/share/doc/nginx to /etc/nginx +${0} /etc /usr/share/doc/ntp.conf -p ntpd # Map /usr/share/doc/ntp.conf to /etc/ntp.conf and send a HUP signal to the ntpd process if the file changes + +Version: ${version:-${shtdlib_version}} +EOF +} + +# Store all parameters as an array for `parse_opt_arg` +# shellcheck disable=2034 +parameter_array=( "${@}" ) +# Parse command line arguments +function parse_arguments { + debug 5 "Parse Arguments got argument: ${1}" + case ${1} in + '-') + # This uses the parse_arguments logic to parse a tag and it's value + # The value is passed on in the OPTARG variable which is standard + # when parsing arguments with optarg. + tag="${OPTARG}" + debug 10 "Found long argument/option" + parse_opt_arg OPTARG '' + parse_arguments "${tag}" + ;; + 'p'|'process') + export process="${OPTARG}" + debug 5 "Set process name to signal to: ${process}" + ;; + 's'|'signal') + export signal="${OPTARG}" + debug 5 "Set signal to: ${signal}" + ;; + 'o'|'overlay') + overlay='true' + debug 5 "Overlay enabled!" + ;; + 'n'|'nofifo') + nofifo='true' + debug 5 "Named pipes disabled, using files instead!" + ;; + 'd'|'daemon') + daemonize='true' + debug 5 "Daemon mode selected!" + ;; + 'v'|'verbose') + parse_opt_arg verbosity '10' + export verbose=true + # shellcheck disable=SC2154 + debug 1 "Set verbosity to: ${verbosity}" + debug 1 "Set verbose to: ${verbose}" + ;; + 'h'|'help'|'version') # Help + print_usage + exit 0 + ;; + 't'|'test') # Unit tests + run_unit_tests='true' + ;; + '?') # Invalid option specified + color_echo red "Invalid option '${OPTARG}'" + print_usage + exit 64 + ;; + ':') # Expecting an argument but none provided + color_echo red "Missing option argument for option '${OPTARG}'" + print_usage + exit 64 + ;; + '*') # Anything else + color_echo red "Unknown error while processing options" + print_usage + exit 64 + ;; + esac +} + +# Process arguments/parameters/options +while getopts ":-:p:s:ndotvh" opt; do + parse_arguments "${opt}" +done + +all_arguments=( "${@}" ) +declare -a non_argument_parameters +for (( index=${#@}-1 ; index>=0 ; index-- )) ; do + # shellcheck disable=SC2004 + if ! [[ "${all_arguments[$index]}" =~ -[-:alphanum:]* ]] && ! in_array "${all_arguments[$(($index - 1))]}" '--signal' '--process' '--verbose' '-p' '-s' '-v' ; then + non_argument_parameters[(${index})]="${all_arguments[${index}]}" + else + break + fi +done +debug 10 "Non-argument parameters:" "${non_argument_parameters[*]:-}" + +export run_unit_tests="${run_unit_tests:-false}" +export signal="${signal:-SIGHUP}" +export process="${process:-}" +export overlay="${overlay:-false}" +export daemonize="${daemonize:-false}" +export nofifo="${nofifo:-false}" + +if [ "${#@}" -lt 2 ] && ! "${run_unit_tests}" ; then + color_echo red "You need to supply at least one source dir/file and a destination directory" + print_usage + exit 64 +fi + +# Create a named pipe and set up envsubst loop to feed it +function setup_named_pipe { + local destination="${1}" + local file="${2}" + local path="${3}" + debug 10 "Creating named pipe: ${destination}/${file#${path}} with permissions identical to ${file}" + # Create a named pipe for each file with same permissions, then + # set up an inotifywait process to monitor and trigger envsubst + mkfifo -m "$(stat -c '%a' "${file}")" "${destination}/${file#${path}}" + + # Loop envsubst until the destination or source file no longer exist + while [ -d "${destination}" ] && [ -f "${file}" ] ; do + render_file "${destination}" "${file}" "${path}}" + done +} + +# Render configuration template to a file using envsubst +function render_file { + local destination="${1}" + local file="${2}" + local path="${3}" + debug 10 "Rendering file: ${destination}/${file#${path}} from template: ${file}" + envsubst < "${file}" > "${destination}/${file#${path}}" "$(compgen -v | sed -e 's/^/\$/g' | tr '\n' ',')" +} + +# Create a directory to mirror a source +# shellcheck disable=SC2174 +function create_directory_structure { + local destination="${1}" + local dir="${2}" + local path="${3}" + debug 10 "Creating directory ${destination}/${dir#${path}} with permissions identical to ${dir}" + # Create each directory in the mirror with same permissions + mkdir -m "$(stat -c '%a' "${dir}")" -p "${destination}/${dir#${path}}" +} + +# Loops inotify on a given source and makes sure it's mirrored and templates +# rendered to the destination +function inotify_looper { + local destination="${1}" + local full_path="${2}" + # Set up notifications for each path and fork watching + inotifywait --monitor --recursive --format '%w %f %e' "${full_path}" \ + --event 'modify' --event 'close_write' \ + --event 'moved_to' --event 'create' \ + --event 'moved_from' --event 'delete' --event 'move_self' \ + --event 'delete_self' --event 'unmount' \ + | while read -r -a dir_file_events; do + for event in "${dir_file_events[@]:2}"; do + case "${event}" in + 'ACCESS'|'CLOSE_NOWRITE'|'OPEN') #Non events + color_echo red "Non mutable event on: ${dir_file_events[*]}, this should not happen since we don't subscribe to these" + exit 1 + ;; + 'MODIFY'|'CLOSE_WRITE') # File modified events + debug 6 "File modification event on: ${dir_file_events[*]}" + if ${nofifo} ; then + render_file "${destination}" "${dir_file_events[0]}/${dir_file_events[1]}" "${full_path}" + fi + if [ -n "${process}" ] ; then + signal_process "${process}" "${signal}" + fi + ;; + 'MOVED_TO'|'CREATE') # New file events + debug 6 "New file event on: ${dir_file_events[*]} ${event}" + create_directory_structure "${destination}" "${dir_file_events[0]}" "${full_path}" + if ${nofifo} ; then + render_file "${destination}" "${dir_file_events[0]}/${dir_file_events[1]}" "${full_path}" + else + setup_named_pipe "${destination}" "${dir_file_events[0]}/${dir_file_events[1]}" "${full_path}" & + fi + if [ -n "${process}" ] ; then + signal_process "${process}" "${signal}" + fi + ;; + 'MOVED_FROM'|'DELETE'|'MOVE_SELF') # File/Directory deletion events + fs_object="${dir_file_events[0]}/${dir_file_events[1]}" + mirror_object="${destination}/${fs_object#${full_path}}" + debug 5 "Filesystem object removed from source, removing from mirror" + debug 5 "Source: ${fs_object} Pipe: ${mirror_object}" + if [ -f "${fs_object}" ] ; then + rm -f "${mirror_object}" + elif [ -d "${fs_object}" ] ; then + rmdir "${mirror_object}" + fi + if [ -n "${process}" ] ; then + signal_process "${process}" "${signal}" + fi + ;; + 'DELETE_SELF'|'UNMOUNT') # Stop/exit/cleanup events + color_echo red "Received fatal event: ${dir_file_events[0:1]} ${event}, exiting!" + if [ -n "${process}" ] ; then + signal_process "${process}" "${signal}" + fi + exit 1 + ;; + esac + done + done +} + + +# Mirrors given path(s) of directories, files and symlinks to a destination path +# using name pipes or files substituting environment variables found in files in realtime +# Ignores filesystem objects that are not files, directories or symlinks +function mirror_envsubst_paths { + declare -a sources + destination="$(readlink -m "${1}")" + sources=("${@:2}") + if ! [ -d "${destination}" ] ; then + color_echo red "Destination path: ${destination} is not a directory, exiting!" + exit 1 + fi + declare -a looper_pids + # Iterate over each source file/directory, exclude root dir if specified + for path in "${sources[@]}"; do + if ! [ -e "${path}" ] ; then + color_echo red "Source path: ${path} does not exist, exiting!" + exit 1 + fi + full_path="$(readlink -m "${path}")" + + if [ "${full_path#${destination}}" != "${full_path}" ] || [ "${destination#${full_path}}" != "${destination}" ] ; then + color_echo red "Source/Destination directories can't be subdirectories of each other or the same directory" + exit 64 + fi + + mapfile -t directories < <(find "${full_path}" -mindepth 1 -type d -exec readlink -m {} \;) + mapfile -t files < <(find "${full_path}" -type f -exec readlink -m {} \;) + mapfile -t links < <(find "${full_path}" -type l) + + # Create directory structure, check if destination is empty + if [ -n "$(ls -A "${destination}")" ] && ! ${overlay} ; then + color_echo red "Destination directory is not empty, if you still want to overlay into it please use the -o/--overlay option" + print_usage + exit 1 + else + for dir in "${directories[@]}"; do + create_directory_structure "${destination}" "${dir}" "${full_path}" + done + + fi + + # Create named pipes / files and set up cleanup on signals for them + if [ -z "${files[*]}" ] ; then + color_echo magenta "Destination directory does not contain any files, no pipes created for ${full_path}!" + else + for file in "${files[@]:-}"; do + if ${dev_mode} ; then + add_on_sig "rm -f ${destination}${file#${full_path}}" + fi + if ${nofifo} ; then + render_file "${destination}" "${file}" "${full_path}" + else + setup_named_pipe "${destination}" "${file}" "${full_path}" & + fi + done + fi + + # Create symbolic links as needed and set up cleanup + for link in "${links[@]}" ; do + color_echo green "Processing symbolic link ${link}" + target="${destination}${link#${full_path}}" + ln --symbolic "$(readlink ${link})" "${target}" + add_on_sig "unlink ${target}" + done + + if ${dev_mode} ; then + # Set up safe cleanup for directory structure (needs to be done in + # reverse order to ensure safety of operation without recursive rm + local index + for (( index=${#directories[@]}-1 ; index>=0 ; index-- )) ; do + add_on_sig "rmdir ${destination}${directories[${index}]#${full_path}}" + done + + # Run update loop and detach it + if ${daemonize} ; then + inotify_looper "${destination}" "${full_path}" & + else + inotify_looper "${destination}" "${full_path}" & + fi + looper_pids+=( "${!}" ) + fi + done + if ! ${daemonize} ; then + debug 8 "Waiting for looper pids: ${looper_pids[*]}" + wait "${looper_pids[*]}" + fi +} + +# Unit tests +# shellcheck disable=SC2046,SC2154,SC2016,SC2034,SC2064 +function unit_tests { + export verbosity=10 + debug 5 "Running unit tests!" + # Basic setup + export TEST_VARIABLE1='/dev/null' + export TEST_VARIABLE2='example.com' + create_secure_tmp tmp_source_test_dir 'dir' + create_secure_tmp tmp_dest_test_dir 'dir' + create_secure_tmp tmp_source_test_file 'file' "${tmp_source_test_dir}" + test_string=$(tr -dc '[:alnum:]' < /dev/urandom | fold -w 1024 | head -n 1) + export signal='SIGUSR1' + # Set up a proces to listen to signals and perform actions + signal_test_file="${tmp_source_test_dir}/signal_test_file" + process="$(signal_processor "${signal}" "test -f ${signal_test_file} && echo ${test_string} > ${signal_test_file}")" + export process + + # Test setting up a named pipe + setup_named_pipe "${tmp_dest_test_dir}" "${tmp_source_test_file}" "${tmp_source_test_dir}" & + echo "${test_string}" > "${tmp_source_test_file}" & + sleep 1 + read_test_string="$(cat "${tmp_dest_test_dir}/${tmp_source_test_file#${tmp_source_test_dir}}")" + assert [ "${test_string}" == "${read_test_string}" ] + + # Test creating directory structure + mkdir "${tmp_source_test_dir}/sub_dir" + create_directory_structure "${tmp_dest_test_dir}" "${tmp_source_test_dir}/sub_dir" "${tmp_source_test_dir}" + assert [ "$(basename $(find "${tmp_dest_test_dir}" -mindepth 1 -type d))" == "$(basename $(find "${tmp_source_test_dir}" -mindepth 1 -type d))" ] + + # Test mirroring a more complicated structure + create_secure_tmp tmp_mirror_test_dir 'dir' + mkdir "${tmp_source_test_dir}/sub_dir/sub_sub_dir" + touch "${tmp_source_test_dir}/test_file" + touch "${tmp_source_test_dir}/sub_dir/sub_file" + touch "${tmp_source_test_dir}/sub_dir/sub_sub_dir/sub_sub_file" + + mirror_envsubst_paths "${tmp_mirror_test_dir}" "${tmp_source_test_dir}" & + + sleep 1 + mapfile -t files < <(find "${tmp_source_test_dir}" -type f) + mapfile -t pipes < <(find "${tmp_mirror_test_dir}" -type p) + assert [ "${#files}" -eq "${#pipes}" ] + + # Check each file matches + for (( index=0 ; index<${#files[@]} ; index++ )) ; do + assert diff "${files[${index}]}" "${pipes[${index}]}" + done + + # Test dynamically adding a file with variables + echo 'setting1=${TEST_VARIABLE1}' > "${tmp_source_test_dir}/settings_file" + sleep 1 + assert [ "$(cat "${tmp_mirror_test_dir}/settings_file")" == "$(cat "${tmp_mirror_test_dir}/settings_file")" ] + echo 'setting2=$TEST_VARIABLE2' >> "${tmp_source_test_dir}/settings_file" + sleep 1 + assert [ "$(cat "${tmp_mirror_test_dir}/settings_file")" == "$(cat "${tmp_mirror_test_dir}/settings_file")" ] + + # Test signaling + touch "${tmp_source_test_dir}/signal_test_file" + sleep 1 + assert test -f "${tmp_source_test_dir}/signal_test_file" + test_string_from_trap="$(cat "${signal_test_file}")" + assert [ "${test_string_from_trap}" == "${test_string}" ] + color_echo green "All tests successfully completed" + # Make sure all descendant processes get terminated + kill $(pgrep --pgroup "${$}" | grep -v "${0}") + exit 0 +} + +# Run tests or not +if ${run_unit_tests} ; then + unit_tests +fi + +# Call the main mirroring function +if ${daemonize} ; then + mirror_envsubst_paths "${non_argument_parameters[@]:-}" & + wait "${!}" +else + mirror_envsubst_paths "${non_argument_parameters[@]:-}" +fi diff --git a/scripts/shtdlib.sh b/scripts/shtdlib.sh new file mode 100644 index 0000000..b0f4bba --- /dev/null +++ b/scripts/shtdlib.sh @@ -0,0 +1,3050 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2034,SC2174,SC2016,SC2026,SC2206,SC2128 +# +# This is a collection of shared functions used by SD Elements products +# +# Copyright (c) 2018 SD Elements Inc. +# +# All Rights Reserved. +# +# NOTICE: All information contained herein is, and remains +# the property of SD Elements Incorporated and its suppliers, +# if any. The intellectual and technical concepts contained +# herein are proprietary to SD Elements Incorporated +# and its suppliers and may be covered by U.S., Canadian and other Patents, +# patents in process, and are protected by trade secret or copyright law. +# + +# Set a debug log file to be used in addition to stderr/stdout +# debug_log_file="/tmp/${0}.log" + +# If there is no TTY then it's not interactive +if ! [[ -t 1 ]]; then + interactive=false +fi +# Default is interactive mode unless already set +interactive="${interactive:-true}" + +# Create which -s alias (whichs), same as POSIX: -s +# No output, just return 0 if all of the executables are found, or 1 if some were not found. +function whichs { + # Bash 3.1 does not flush stdout so we use tee to make sure it gets done + command -v "${*}" &> /dev/null | tee /dev/null &> /dev/null + return "${PIPESTATUS}" +} + +# Unless disabled set strict mode for non-interactive mode +if ${strict_mode:-true} && ! ${interactive} ; then + set -euo pipefail +fi + +# Set Version +shtdlib_version='0.2' + +# Timestamp, the date/time we started +start_timestamp=$(date +"%Y%m%d%H%M") + +# Store original arguments/parameters +#base_arguments="${@:-}" + +# Store original tty +init_tty="$(tty || true)" + +# Check if shell supports array append syntax +array_append_supported="$(bash -c 'a=(); a+=1 &>/dev/null && echo true || echo false')" + +# Exit unless syntax supports array append +if ! "${array_append_supported}" ; then + echo "This library (${0}) requires bash version 3.1+ with array append support to work properly" + exit 1 +fi + +# Determine OS family and OS type +OS="${OS:-}" +os_family='Unknown' +os_name='Unknown' +os_codename='Unknown' +# Preferred methods +if [ -e '/etc/redhat-release' ] ; then + os_family='RedHat' +elif [ -e '/etc/lsb-release' ] ; then + os_family='Debian' +else + # Educated guesses + yum help help > /dev/null 2>&1 && os_family='RedHat' + apt-get help > /dev/null 2>&1 && os_family='Debian' + echo "${OSTYPE}" | grep -q 'darwin' && os_family='MacOSX' + if [ "${OS}" == 'SunOS' ]; then os_family='Solaris'; fi + if [ "${OSTYPE}" == 'cygwin' ]; then os_family='Cygwin'; fi + if [ -f '/etc/alpine-release' ] ; then os_family='Alpine'; fi +fi +os_type="$(uname)" + +# Determine virtualization platform in a way that ignores SIGPIPE, requires root +if [ "${EUID}" == 0 ] && command -v virt-what &> /dev/null ; then + if [ -f '/.dockerenv' ] ; then + virt_platform='Docker' + else + virt_platform="$(virt-what | head -1 || if [[ ${?} -eq 141 ]]; then true; else exit ${?}; fi)" + fi +elif [ "${os_type}" == "Linux" ] && grep -Eq '/(lxc|docker)/[[:xdigit:]]{64}' /proc/self/cgroup; then + # A method of detecting if Docker is the virtual platform on Linux containers + virt_platform='Docker' +else + virt_platform="Unknown" +fi + +# Set major and minor version variables +if [ "${os_family}" == 'RedHat' ]; then + major_version="$(grep -oE '[0-9]+\.[0-9]+' /etc/redhat-release | awk -F. '{print $1}')" + minor_version="$(grep -oE '[0-9]+\.[0-9]+' /etc/redhat-release | awk -F. '{print $2}')" + if ! [[ ${major_version} =~ ^-?[0-9]+$ ]] ; then # If major version is not an integer + major_version="$(rpm -qa \*-release | grep -Ei 'oracle|redhat|centos' | cut -d'-' -f3)" + fi + if ! [[ ${minor_version} =~ ^-?[0-9]+$ ]] ; then # If minor version is not an integer + minor_version="$(rpm -qa \*-release | grep -Ei 'oracle|redhat|centos' | cut -d'-' -f4 | cut -d'.' -f1)" + fi + + # The following is a more robust way of determining the OS name than + # `rpm-qa \*release | grep -q -Ei "^(redhat|centos)"` + if grep -qEi 'centos' /etc/redhat-release; then + os_name='centos'; + elif grep -qEi 'red ?hat' /etc/redhat-release; then + os_name='redhat'; + fi + patch_version=0 +elif [ "${os_family}" == 'Debian' ]; then + if [ -e '/etc/os-release' ] ; then + # VERSION_CODENAME is the built-in optional identifier + grep -q VERSION_CODENAME /etc/os-release && os_codename="$(grep VERSION_CODENAME /etc/os-release | awk -F= '{print $2}')" + # For oses based on Ubuntu we often need the Ubuntu (parent distro) codename (e.g. repository configuration) + grep -q UBUNTU_CODENAME /etc/os-release && os_codename="$(grep UBUNTU_CODENAME /etc/os-release | awk -F= '{print $2}')" + fi + if [ -e '/etc/lsb-release' ] ; then + major_version="$(grep DISTRIB_RELEASE /etc/lsb-release | awk -F= '{print $2}' | awk -F. '{print $1}')" + minor_version="$(grep DISTRIB_RELEASE /etc/lsb-release | awk -F= '{print $2}' | awk -F. '{print $2}')" + os_name="$(grep DISTRIB_ID /etc/lsb-release | awk -F= '{print $2}')" + else + major_version="$(awk -F. '{print $1}' /etc/debian_version)" + minor_version="$(awk -F. '{print $2}' /etc/debian_version)" + os_name='debian' + fi + patch_version=0 +elif [ "${os_family}" == 'Alpine' ]; then + # A safe way to read the version regardless of bash version and buggy + # implementations + # shellcheck disable=2207 + command -v mapfile &> /dev/null | tee /dev/null &> /dev/null && mapfile -d. -t full_version < /etc/alpine-release &> /dev/null || full_version=($(awk -F. '{printf("%s %s %s\n", $1, $2, $3)}' /etc/alpine-release)) + major_version="${full_version[0]}" + minor_version="${full_version[1]}" + patch_version="${full_version[2]}" + os_name='alpine' +fi + +# Filters a stream of local addresses from inet adders formatted lines +function filter_sort_local_ip_addresses { + grep -v '127.' | \ + sort -Vu | \ + grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | \ + grep -Eo '([0-9]*\.){3}[0-9]*' +} + +# Gets local IP addresses (excluding localhost) and prints one per line +function get_local_ip_addresses { + local -a all_ipv4 + local -a local_iv4 + if whichs ip ; then + ip -4 addr show | filter_sort_local_ip_addresses + elif whichs ifconfig ; then + ifconfig | filter_sort_local_ip_addresses + else + $(awk '/32 host/ { print "inet " f } {f=$2}' > "${debug_log_file}" + fi +} + +# Debug function for verbose debugging +# Note debug is special because it's safe even in subshells because it bypasses +# the stdin/stdout and writes directly to the terminal +function debug { + if [ "${verbosity:-1}" -ge "${1}" ]; then + if [ -w "${init_tty}" ] ; then + color_echo yellow "${*:2}" > "${init_tty}" + else + color_echo yellow "${*:2}" >&2 + fi + fi +} + +# Error function for verbose explicit error messages +# First argument is the priority, second is the log message +# A priority of 0 will disable writing of errors to the syslog +function error { + if whichs logger ; then + logger --priority "${1}" "${*:2}" + else + debug 3 "Unable to fing logger command to write to syslog" + fi + if [ -w "${init_tty}" ] ; then + color_echo red "${*:2}" > "${init_tty}" + else + color_echo red "${*:2}" >&2 + fi +} + +# Fails/exits if the exit code of the last command does not match the one +# specified in the first argument. +# Example use: +# touch /tmp/test_file || conditional_exit_on_fail 128 "Failed to create tmp file and touch did not return 128" +function conditional_exit_on_fail { + valid_exit_codes=(0 "${1}") + if ! in_array "${?}" "${valid_exit_codes[@]}" ; then + exit_on_fail "${@}" + fi +} + +# Umask decorator, changes the umask for a function +# To use this add a line like the following (without #) as the first line of a function +# umask_decorator "${FUNCNAME[0]}" "${@:-}" && return +# umask_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with umask_decorator" + +# To specify a different umask set the umask_decorator_mask variable to the +# desired umask. +function umask_decorator { + if [ "${FUNCNAME[0]}" != "${FUNCNAME[2]:-}" ] ; then + local mask="${umask_decorator_mask:-0007}" + local original_mask + original_mask="$(umask)" + umask "${mask}" + debug 10 "Set umask to ${mask}" + #shellcheck disable=2068 + ${@} + umask "${original_mask}" + debug 10 "Set umask to ${original_mask}" + return 0 + fi + return 1 +} + +# Bash behaviour option decorator +# Allows changing/setting bash options for a command/function (code block) restoring +# the original once it's been executed and it's calls are complete. +# Requires an option name (see shopt) and a truthyness value "true"/"false" or +# other command/function that returns 0/1. These are set using the variables +# shopt_decorator_option_name and shopt_decorator_option_value +# To use this add a line like the following (without #) as the first line of a function +# Example: +# function smarter_sort { +# # 'sort' doesn't properly handle SIGPIPE +# shopt_decorator_option_name='pipefail' +# shopt_decorator_option_value='false' +# shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" +# +# echo "Bash option pipefail is set to false for this code" +# } +function shopt_decorator { + debug 10 "${FUNCNAME} called with ${*}" + if [ -n "${shopt_decorator_option_value:-}" ] && [ -n "$(shopt -o "${shopt_decorator_option_name:-}")" ] ; then + if [ "${FUNCNAME[0]}" != "${FUNCNAME[2]:-}" ] ; then + if shopt -qo "${shopt_decorator_option_name}" ; then + # Option is set + if ! "${shopt_decorator_option_value}" ; then + # Option should not be set + debug 10 "Temporarily unsetting bash option ${shopt_decorator_option_name}" + shopt -uo "${shopt_decorator_option_name}" + else + debug 10 "No need to set ${shopt_decorator_option_name}, it's already ${shopt_decorator_option_value}" + fi + "${@}" + return_code="${?}" + debug 10 "Got return code ${return_code}" + # Set the option again in case it was unset + debug 10 "(Re)Setting ${shopt_decorator_option_name}" + shopt -so "${shopt_decorator_option_name}" + return ${return_code} + else + # Option is not set + if "${shopt_decorator_option_value}" ; then + # Option should be set + debug 10 "Temporarily setting bash option ${shopt_decorator_option_name}" + shopt -so "${shopt_decorator_option_name}" + else + debug 10 "No need to unset ${shopt_decorator_option_name}, it's already ${shopt_decorator_option_value}" + fi + "${@}" + return_code="${?}" + debug 10 "Got return code ${return_code}" + # Unset the option in case it was set + debug 10 "(Re)Unsetting ${shopt_decorator_option_name}" + shopt -uo "${shopt_decorator_option_name}" + return ${return_code} + fi + fi + # Calling function is the decorator, skip + debug 10 "Already decorated, returning 121" + return 121 + else + color_echo red "Called ${FUNCNAME[*]} without setting required variables with valid option name/value. The variables shopt_decorator_option_name and shopt_decorator_option_value need to be set to a valid shopt option and a command/function that evaluates true/false, 'true'/'false' are valid commands" + exit 126 + fi + # We should never get here + exit 127 +} + +# Test decorator +# Forces a function to be executed in all bash variants using the bashtester +# submodule and containers. Requires docker to be installed and git submodules +# to be present and up do date. +# To use this add a line like the following (without #) as the first line of a function +# test_decorator "${FUNCNAME[0]}" "${@:-}" && return +# test_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with test_decorator" + +# To specify a different set of bash versions set supported-bash_versions to a +# space separated string of the supported versions. +function test_decorator { + # If not running in a container + if [ "${FUNCNAME[0]}" != "${FUNCNAME[2]:-}" ] && ! grep -q docker /proc/1/cgroup 2> /dev/null ; then + default_bash_versions=( '3.2.57' \ + '4.0.44' \ + '4.1.17' \ + '4.2.53' \ + '4.3.48' \ + '4.4.23' \ + '5.0-beta' ) + supported_bash_versions=( ${supported_bash_versions[@]:-"${default_bash_versions[@]}"} ) + verbosity="${verbosity:-}" bash_images="${supported_bash_versions[*]}" bashtester/run.sh ". /code/$(basename ${BASH_SOURCE[0]}) && ${*}" + return 0 + fi + return 1 +} + +# Imports/Sources an external script if it's not already been imported/sourced +# or is being imported/sourced as determined by BASH_SOURCE +# Only accepts one argument, the file to source. +# Returns 0 if file is successfully imported or has already been imported. +# For opertunistic usage use the following pattern: +# file_to_import='my_file_path' +# type -t import | grep -q '^function$' && import "${file_to_import}" || source "${file_to_import}" +declare -a sourced_imported_files +sourced_imported_files=() +function import { + assert test -n "${1}" + assert test -e "${1}" + local hasher + if whichs shasum; then + hasher='shasum' + elif whichs md5sum; then + hasher='md5sum' + elif whichs cksum; then + hasher='cksum' + else + debug 1 "Unable to find a valid hashing command, blindly importing/sourcing!" + # shellcheck disable=1090 + source "${1}" && return 0 + fi + # Create a hash of the target file + target_file_hash="$("${hasher}" "${1}")" + + # Add all files in source history to the list of imported files + for source_file in "${BASH_SOURCE[@]}"; do + source_file_hash="$("${hasher}" "${source_file}" | awk '{print $0}')" + if ! in_array "${source_file_hash}" "${sourced_imported_files[@]:-}" ; then + sourced_imported_files+=( "${source_file_hash}" ) + fi + done + + # Check if file has already been sourced/imported + if in_array "${target_file_hash}" "${sourced_imported_files[@]}" ; then + debug 5 "Source file ${1} has already been imported/sourced, skipping" + return 0 + fi + + # Finally import/source the file if needed + debug 7 "Sourcing file ${1}" + sourced_imported_files+=( "${target_file_hash}" ) + # shellcheck disable=1090 + source "${1}" && return 0 +} + +# A platform (readlink implementation) neutral way to follow symlinks +function readlink_m { + debug 10 "readlink_m called with: ${*}" + args=( ${@} ) + if [ "${#args[@]}" -eq 0 ] ; then + color_echo red 'readlink_m needs at least one argument, none were provided' + return 64 + elif [ "${#args[@]}" -gt 1 ] ; then + base_path="$(dirname "${args[0]}")" + new_path="${base_path}/${args[1]}" + elif whichs readlink && readlink -f "${args[0]}" &> /dev/null ; then + readlink -f "${args[0]}" + return 0 + elif whichs readlink && readlink -m "${args[0]}" &> /dev/null ; then + readlink -m "${args[0]}" + return 0 + elif whichs realpath && realpath -m "${args[0]}" &> /dev/null ; then + realpath -m "${args[0]}" + return 0 + elif whichs greadink ; then + greadlink -m "${args[0]}" + return 0 + elif whichs grealpath ; then + grealpath "${args[0]}" + return 0 + elif whichs realpath ; then + realpath "${args[0]}" + return 0 + elif [ -e "${args[0]}" ] ; then + if stat -f "%N %Y" "${args[0]}" &> /dev/null ; then + new_path="$(stat -f "%N %Y" "${args[0]}")" + elif stat -f "%n %N" "${args[0]}" &> /dev/null ; then + new_path="$(stat --format '%n %N' "${args[0]}" | tr -d "‘’")" + else + color_echo red "Unable to find a usable way to determine full path (readlink_m)" + exit_on_fail + fi + else + color_echo red "Unable to find a usable way to determine full path (readlink_m)" + exit_on_fail + fi + new_path=( ${new_path} ) + debug 10 "Processed path is: ${new_path[*]}" + if [ ${#new_path[@]} -gt 1 ] || [ -L "${new_path[0]}" ] ; then + readlink_m "${new_path[@]}" + elif [ -e "${new_path[0]}" ] ; then + echo "${new_path[0]}" + return 0 + elif command -v realpath ; then + realpath "${args[0]}" + return 0 + else + debug 10 "Failed to resolve path: ${new_path[*]}" + return 1 + fi +} + +# Platform independent version sort +# When input is piped it's assumed to be space and/or newline (NL) delimited +# When passed as parameters each one is processed independently +function _version_sort { + debug 12 "${FUNCNAME} called with ${*}" + # 'sort' doesn't properly handle SIGPIPE + shopt_decorator_option_name='pipefail' + shopt_decorator_option_value='false' + # shellcheck disable=2015 + shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" + + if sort --help 2>&1 | grep -q version-sort ; then + local vsorter='sort --version-sort' + else + debug 10 "Using suboptimal version sort due to old Coreutils/Platform" + local vsorter='sort -t. -k1,1n -k2,2n -k3,3n -k4,4n' + fi + + for arg in "${@}" ; do + echo "${arg}" + done | ${vsorter} +} +# shellcheck disable=2120 +function version_sort { + # First command needs to be read, this way any piped input goes to it + while read -rt "${read_timeout:-1}" piped_data; do + declare -a piped_versions + debug 10 "Versions piped to ${FUNCNAME}: ${piped_data}" + # shellcheck disable=2086 + piped_versions+=( ${piped_data} ) + done + shopt_decorator_option_name='nounset' + shopt_decorator_option_value='false' + # shellcheck disable=2015 + shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" + # shellcheck disable=2068 + _version_sort ${@} ${piped_versions[@]} +} + +# Increment a version number by 1 +function version_increment { + declare -a segment=( ${1//\./ } ) + declare new_version + declare -i carry=1 + + for (( n=${#segment[@]}-1; n>=0; n-=1 )); do + length=${#segment[n]} + new_version=$((segment[n]+carry)) + [ "${#new_version}" -gt "${length}" ] && carry=1 || carry=0 + [ "${n}" -gt 0 ] && segment[n]=${new_version: -length} || segment[n]=${new_version} + done + new_version="${segment[*]}" + echo -e "${new_version// /.}" +} + +# Allows clear assert syntax +function assert { + debug 10 "Assertion made: ${*}" + # shellcheck disable=SC2068 + if ! "${@}" ; then + color_echo red "Assertion failed: '${*}'" + exit_on_fail + fi +} + +# A bash only version of basename -s +function basename_s { + local path="${*}" + local path_no_ext="${path%.*}" + local basename="${path_no_ext##*/}" + echo "${basename}" +} + +# Converts relative paths to full paths, ignores invalid paths +# Accepts either the path or name of a variable holding the path +function finalize_path { + local setvar + assert test -n "${1}" + # Check if there is a filesystem object matching the path + if [ -e "${1}" ] || [[ "${1}" =~ '/' ]] || [[ "${1}" =~ '~' ]]; then + debug 10 "Assuming path argument: ${1} is a path" + path="${1}" + setvar=false + else + debug 5 "Assuming path argument: ${1} is a variable name" + declare path="${!1}" + setvar=true + fi + if [ -n "${path}" ] && [ -e "${path}" ] ; then + if [ "$(basename "$(readlink "$(command -v readlink)")")" == 'busybox' ] || [ "${os_family}" == 'MacOSX' ] ; then + full_path=$(readlink_m "${path}") + else + full_path="$(readlink -m "${path}")" + fi + debug 10 "Finalized path: '${path}' to full path: '${full_path}'" + if [ -n "${full_path}" ]; then + if ${setvar} ; then + export "$1"="${full_path}" + else + echo "${full_path}" + fi + fi + else + debug 5 "Unable to finalize path: ${path}" + fi +} + +# Store full path to this script +script_full_path="${0}" +if [ ! -f "${script_full_path}" ] ; then + script_full_path="$(pwd)" +fi +finalize_path script_full_path +run_dir="${run_dir:-$(dirname "${script_full_path}")}" + +# Allows checking of exit status, on error print debugging info and exit. +# Takes an optional error message in which case only it will be shown +# This is typically only used when running in non-strict mode but when errors +# should be raised and to help with debugging +function exit_on_fail { + message="${*:-}" + if [ -z "${message}" ] ; then + color_echo red "Last command did not execute successfully but is required!" >&2 + else + color_echo red "${*}" >&2 + fi + debug 10 "[$( caller )] ${*:-}" + debug 10 "BASH_SOURCE: ${BASH_SOURCE[*]}" + debug 10 "BASH_LINENO: ${BASH_LINENO[*]}" + debug 0 "FUNCNAME: ${FUNCNAME[*]}" + # Exit if we are running as a script, else return + if [ -f "${script_full_path}" ]; then + exit 1 + else + return 1 + fi +} + +# Returns the index number of the lowest version, in effect this means it +# returns true if the first value is the smallest but will always return +# the index of the lowest version. In the case of multiple matches, the lowest +# (the first match) index is returned. +# Example: +# compare_versions '1.1.1 1.2.2test' -> returns 0 # True +# compare_versions '1.2.2 1.1.1' -> returns 1 # False +# compare_versions '1.0.0 1.1.1 2.2.2' -> returns 0 # True +# compare_versions '4.0.0 3.0.0 2.0.0 1.1.1test 1.0.0 v5.0' -> returns 4 (the +# index number, which also evaluates to False since its a non-zero return code) +function compare_versions { + debug 10 "${FUNCNAME} called with ${*}" + # 'printf' doesn't properly handle SIGPIPE + shopt_decorator_option_name='pipefail' + shopt_decorator_option_value='false' + # shellcheck disable=2015 + shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" + + items=( ${@} ) + assert [ ${#items[@]} -gt 0 ] + # shellcheck disable=2119 + lowest_ver="$(printf "%s\\n" "${items[@]}" | version_sort | head -n1)" + for (( i=0; i<${#items[@]}; i++ )) ; do + if [ "${items[i]}" == "${lowest_ver}" ] ; then + debug 10 "${FUNCNAME} returning ${i}" + return "${i}" + fi + done + color_echo red "Failed to compare versions!" + exit_on_fail +} + +# Set conveniece variable for bash v4 compat +if compare_versions "${BASH_VERSION}" "4" ; then + bash_pre_v4=true +else + bash_pre_v4=false +fi + +# Set timeout value to use for read, v3 does not support decimal seconds +if "${bash_pre_v4}" ; then + read_timeout='1' +else + read_timeout='0.1' +fi + +# Prints the version of a command, arguments include: +# 1. Full or relative path to command (required) +# 2. Text to display before version info (optional) +# 3+. Flag(s)/Argument(s) to command to get version (optional, defaults to --version) +# error_msg variable: Error message if command is not found, to ignore redirect +# stderr run this like so: print_version bash 2> /dev/null +function print_version { + local error_msg + error_msg="${error_msg:-Unable to find command ${1}}" + if command -v "${1}" > /dev/null ; then + echo -n "${2:-}" + if [ -n "${3}" ] ; then + ${1} "${@:3}" + else + ${1} --version + fi + else + (>&2 echo "${error_msg}") + fi +} + +# Store full path to this script +script_full_path="${0}" +if [ ! -f "${script_full_path}" ] ; then + script_full_path="$(pwd)" +fi +finalize_path script_full_path +run_dir="${run_dir:-$(dirname "${script_full_path}")}" + +# Default is to clean up after ourselves +cleanup="${cleanup:-true}" + +# Create NSS Wrapper passwd and group files +# Accepts 4 optional arguments, uid:gid, username, group and home directory +# Defaults to current uid/gid, bob, builders and a temporary directory +# Note that if a home directory is specified and it's temporary it will need to +# be removed/cleaned up by the code calling this function +function init_nss_wrapper { + umask_decorator_mask=${NSS_WRAPPED_FILE_MASK:-0002} + umask_decorator "${FUNCNAME[0]}" "${@:-}" && return + + GUID="${1:-${GUID:-${UID:-$(id -u)}:$(id -g)}}" + debug 8 "Initializing NSS Wrapper with ${GUID}" + + export TMP_USER="${2:-bob}" + export TMP_GROUP="${3:-builders}" + # The ordering of -t and -d is important so this works on both BSD/OSX an + # linux since template and -t have different meanings and syntaxes + tmp_passwd_file="$(mktemp -t "passwd.${$}.XXXXXXXXXX")" && add_on_exit "rm -f '${tmp_passwd_file}'" && chmod "${NSS_WRAPPED_FILE_PERM:-0664}" "${tmp_passwd_file}" + tmp_group_file="$(mktemp -t "group.${$}.XXXXXXXXXX")" && add_on_exit "rm -f '${tmp_group_file}'" && chmod "${NSS_WRAPPED_FILE_PERM:-0664}" "${tmp_group_file}" + tmp_hosts_file="$(mktemp -t "hosts.${$}.XXXXXXXXXX")" && add_on_exit "rm -f '${tmp_hosts_file}'" && chmod "${NSS_WRAPPED_FILE_PERM:-0664}" "${tmp_hosts_file}" + + if [ -n "${4:-}" ] ; then + TMP_HOME_PATH="${4}" + else + TMP_HOME_PATH="$(mktemp -d -t "home.${TMP_USER}.XXXXXXXXXX")" && add_on_exit "rm -Rf '${TMP_HOME_PATH}'" && chown -R "${GUID}" "${TMP_HOME_PATH}" &> /dev/null + fi + export TMP_HOME_PATH + + mkdir -p "${TMP_HOME_PATH}" + cat '/etc/passwd' > "${tmp_passwd_file}" + cat '/etc/group' > "${tmp_group_file}" + cat '/etc/hosts' > "${tmp_hosts_file}" + export BUID="${GUID%:*}" + export BGID="${GUID#*:}" + passwd_string="${TMP_USER}:x:${BUID}:${BGID}:Bob the builder:${TMP_HOME_PATH}:/bin/false" + group_string="${TMP_GROUP}:x:${BUID}:" + passwd_pattern=".*:x:${BUID}:.*:.*:.*:.*" + group_pattern=".*:x:${BGID}:.*" + + sed -i "s|.*:x:${BUID}:.*:.*:.*:.*|${passwd_string}|g" "${tmp_passwd_file}" || echo "${passwd_string}" >> "${tmp_passwd_file}" + sed -i "s|.*:x:${BGID}:.*|${group_string}|g" "${tmp_group_file}" || echo "${group_string}" >> "${tmp_group_file}" + sed -i "/${passwd_pattern}/!{q42}; {s|${passwd_pattern}|${passwd_string}|g}" "${tmp_passwd_file}" || echo "${passwd_string}" >> "${tmp_passwd_file}" + sed -i "/${group_pattern}/!{q42}; {s|${group_pattern}|${group_string}|g}" "${tmp_group_file}" || echo "${group_string}" >> "${tmp_group_file}" + + export LD_PRELOAD='libnss_wrapper.so' + export NSS_WRAPPER_PASSWD="${tmp_passwd_file}" + export NSS_WRAPPER_GROUP="${tmp_group_file}" + export NSS_WRAPPER_HOSTS="${tmp_hosts_file}" +} + +# Enable a Python Software Collection, SCL allows multiple versions of the same RPMs to be +# installed at the same time. Accepts one required argument, the version of +# python to enable, this should be in the format '3.6' +function enable_scl_python { + assert [ "${os_name}" = "redhat" ] + shopt_decorator_option_name='nounset' + shopt_decorator_option_value='false' + assert test -n "${1}" + python_version="${1}" + short_version="$(echo "${python_version}" | tr -dc '0-9')" + python_enable_path="${2:-${PYTHON_ENABLE_PATH:-/opt/rh/python${short_version}/enable}}" + # shellcheck disable=SC2015 + shopt_decorator "${FUNCNAME[0]}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" + color_echo green "Enabling SCL environment for python version: ${python_version}" + # shellcheck disable=SC1090 + source "${python_enable_path}}" +} + +#Set username not available (unattended run) if passwd record exists +if [ -z "${USER:-}" ] && whoami &> /dev/null ; then + USER="$(whoami)" + export USER +fi + +# Set home directory if not available (unattended run) +if [ -z "${HOME:-}" ]; then + HOME="$(getent passwd "${USER}" | awk -F: '{print $6}')" + export HOME +fi + +# Find the best way to escalate our privileges +function set_priv_esc_cmd { + if [ "${EUID}" != "0" ]; then + if [ -x "$(command -v sudo)" ]; then + priv_esc_cmd='sudo -E' + elif [ -x "$(command -v su)" ]; then + priv_esc_cmd='su -c' + else + color_echo red "Not running as root and unable to locate/run sudo or su for privilege escalation" + return 1 + fi + else + priv_esc_cmd='' + fi + return 0 +} +set_priv_esc_cmd + +# Magical sudo/su which preserves all ssh keys, kerb creds and def. ssh user +# and tty/pty +function priv_esc_with_env { + debug 10 "Calling: \"${priv_esc_cmd} ${*}\" on tty: \"${init_tty}\" with priv esc command as: \"${priv_esc_cmd}\" and user: \"${USER}\"" + debug 11 "${priv_esc_cmd} /bin/bash -c export SSH_AUTH_SOCK='${SSH_AUTH_SOCK}' && export SUDO_USER_HOME='${HOME}' && export KRB5CCNAME='${KRB5CCNAME}' && export GPG_TTY='${init_tty}' && alias ssh='ssh -l ${USER}' && ${*}" + ${priv_esc_cmd} /bin/bash -c "export SSH_AUTH_SOCK='${SSH_AUTH_SOCK}' && export SUDO_USER_HOME='${HOME}' && export KRB5CCNAME='${KRB5CCNAME}' && export GPG_TTY='${init_tty}' && alias ssh='ssh -l ${USER}' && ${*}" + return ${?} +} + +# Create and manage a custom ssh auth agent, socket and pid +# Create a special ssh-agent for docker, accepts two optional +# parameters/arguments, the location of the named socket and the pid file +# Optionally accepts any number of ssh key files to import, these can include +# wildcards. +function get_custom_ssh_auth_agent { + custom_ssh_auth_socket_path="${1:-${HOME}/custom-ssh-agent}" + custom_ssh_auth_pid_file="${2:-${HOME}/.custom-ssh-agent.pid}" + ssh_key_files=( ${@:3} ) + if [ -S "${custom_ssh_auth_socket_path}" ] && pgrep -F ${custom_ssh_auth_pid_file} &> /dev/null ; then + color_echo cyan "Found custom ssh-agent with socket: ${custom_ssh_auth_socket_path}" + export SSH_AUTH_SOCK="${custom_ssh_auth_socket_path}" + if [ -f "${custom_ssh_auth_pid_file}" ] ; then + read -r SSH_AGENT_PID < "${custom_ssh_auth_pid_file}" + export SSH_AGENT_PID + fi + else + color_echo cyan "Creating custom ssh-agent with socket: ${custom_ssh_auth_socket_path}" + assert whichs ssh-agent + if rm -f ${custom_ssh_auth_socket_path} ; then + eval $(ssh-agent -a ${custom_ssh_auth_socket_path}) + echo "${SSH_AGENT_PID}" > "${custom_ssh_auth_pid_file}" + else + color_echo red "Unable to reset/create named socket ${custom_ssh_auth_socket_path}, please verify path and permissions" + return 1 + fi + fi + + color_echo cyan "Checking ssh-agent key status" + assert whichs ssh-add + if [ -n "${ssh_key_files:-}" ] ; then + color_echo green "Loading key files: ${ssh_key_files[*]}" + for ssh_key_file in "${ssh_key_files[@]}" ; do + debug 10 "Processing ssh key file: ${ssh_key_file}" + if ! ssh-add -l | grep -q "${ssh_key_file}" ; then + ssh-add ${ssh_key_file:-} || exit_on_fail "Unable to load ssh key file ${ssh_key_file} into agent" + else + color_echo green "Key file: ${ssh_key_file} already loaded into custom ssh agent" + fi + done + else + if ! ssh-add -l &> /dev/null ; then + color_echo green "No ssh key specified, loading default key" + ssh-add || exit_on_fail "Unable to load ssh key into agent" + else + color_echo green "Found existing ssh key in custom ssh agent, no key specified to load, skipping" + fi + fi + assert test -n "${SSH_AUTH_SOCK}" +} + +# A subprocess which performs a command when it receives a signal +# First parameter is the signal and the rest is assumed to be the command +# Returns the PID of the subprocess +function signal_processor { + local signal="${1}" + local command="${*:2}" + bash -c "trap '${command}' ${signal} && while true; do sleep 1 ; done" &> /dev/null & + echo "${!}" +} + +# Signals a process by either exact name or pid +# Accepts name/pid as first parameter and optionally signal as second parameter +function signal_process { +debug 8 "Signaling PID: ${1} with signal: ${2:-SIGTERM}" +if [[ "${1}" =~ ^[0-9]+$ ]] ; then + if [ "${2}" != '' ] ; then + kill -s "${2}" "${1}" + else + kill "${1}" + fi +else + assert whichs pkill + if [ "${2}" != '' ] ; then + pkill --exact --signal "${2}" "${1}" + else + pkill --exact "${1}" + fi +fi +} + +# This function watches a set of files/directories and lets you run commands +# when file system events (using inotifywait) are detected on them +# - Param 1: command/function to run +# - Param 2..N: files/directories to monitor. Note: Absolute paths to the +# modified objects are passed to the command/function +# Custom variables: +# - on_mod_max_frequency: the frequency, in seconds, to run command/function +# (acts as a debounce). If set to 0 then multiple instances of +# the command/function can run at the same time. Default: 1s +# - on_mod_refresh: determines if command/function should run again at the end +# of the timeout if re-triggered during the previous run. +# Default: true +# - on_mod_max_queue_depth: determines event queue size. Default: 1 event +# +# File system modification events: +# - MODIFY | CLOSE_WRITE +# - MOVED_TO | CREATE +# - MOVED_FROM | DELETE | MOVE_SELF +# - DELETE_SELF | UNMOUNT +# +# Example use: Create a callback function and register it for events +# +# path_to_monitor="/tmp" +# function callback { +# modified_obj="${1}" +# modified_dir=$(dirname "${modified_obj}") +# modified_file=$(basename "${modified_obj}") +# current_dir="${PWD}" +# cd ${modified_dir} +# echo "Do something with '${modified_file}' in '${modified_dir}'" +# ls -la ${modified_file} +# cd ${current_dir} +# } +# add_on_mod callback "${path_to_monitor}" +# +function add_on_mod { + shopt_decorator_option_name='nounset' + shopt_decorator_option_value='false' + # shellcheck disable=2015 + shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" + if whichs inotifywait ; then + file_monitor_command="inotifywait --monitor --recursive --format %w%f + --event modify + --event close_write + --event moved_to + --event create + --event moved_from + --event delete + --event move_self + --event delete_self + --event unmount" + elif whichs fswatch ; then + file_monitor_command="fswatch --recursive --format %p + --event Created + --event Updated + --event Removed + --event Renamed + --event MovedFrom + --event MovedTo" + else + color_echo red "Unable to find inotifywait or fswatch, please install one or the other before trying to use '${FUNCNAME[0]} ${*}'" + return 1 + fi + local arguments=("${@}") + on_mod_refresh="${on_mod_refresh:-true}" + on_mod_max_frequency="${max_frequency:-1}" + on_mod_max_queue_depth="${on_mod_max_queue_depth:-1}" + for fs_object in "${arguments[@]:1}"; do + if ! [ -e "${fs_object}" ] ; then + color_echo red "Unable to find filesystem object '${fs_object}' when running ${FUNCNAME[0]}" + return 1 + fi + ${file_monitor_command} "${fs_object}" \ + | while read -r mod_fs_object; do + debug 10 "Handling event using event loop with pid: ${$}" + declare -a sub_processes + # Remove stale pids from sub process array + live_sub_processes=() + for pid in "${sub_processes[@]}" ; do + if kill -0 "${pid}" &> /dev/null ; then + debug 10 "Contacted pid: ${pid}" + live_sub_processes+=("${pid}") + fi + done + sub_processes=("${live_sub_processes[@]}") + # Fork a process to run the command + ( + debug 8 "Found ${#sub_processes[@]} elements in sub process array: ${sub_processes[*]}" + if [ "${on_mod_max_frequency}" -gt 0 ] && [ "${#sub_processes[@]}" -gt 0 ] ; then + if "${on_mod_refresh}" && [ "${#sub_processes[@]}" -le "${on_mod_max_queue_depth}" ] ; then + sibling_pid="${sub_processes[$(( ${#sub_processes[@]} - 1 ))]}" + # Implement a special case for busybox support + # shellcheck disable=2009,2015,2230 + sibling_run_time="$(readlink -f "$(which ps)" | grep -q busybox && \ + ps -Ao pid,time | grep '^[\t ]*${sibling_pid}[\t ]' | awk '{print $2}' | awk -F: '{for(i=NF;i>=1;i--) printf "%s ", $i;print ""}' | awk '{print $1 + $2 * 60 + $3 * 3600 + $4 * 86400}' || \ + ps h -o etimes -p "${sibling_pid}")" + delta=$(( on_mod_max_frequency - sibling_run_time)) + if [ "${delta}" -gt 0 ] ; then + sleep "${delta}" + fi + # Watch for sibling and run when it is stopped + while kill -0 "${sibling_pid}" &> /dev/null ; do + sleep 1 + done + debug 7 "Running ${arguments} to refresh after ${on_mod_max_frequency} sec timeout with pid ${$}" + ${arguments} "${mod_fs_object}" + else + debug 10 "Discarding redundant/unwanted event since refresh is disabled or max queue depth has been reached" + fi + else + debug 7 "Running command: '${arguments} ${mod_fs_object}' in subshell with PID: ${$}" + ${arguments} "${mod_fs_object}" + fi + ) & + sub_processes+=("${!}") + done + done +} + +# Traps for cleaning up on exit +# Note that trap definition needs to happen here not inside the add_on_sig as +# shown in the original since this can easily be called in a subshell in which +# case the trap will only apply to that subshell +declare -a on_exit +on_exit=() +declare -a on_break +on_break=() + +function on_exit { + # shellcheck disable=SC2181 + if [ ${?} -ne 0 ]; then + # Prints to stderr to provide an easy way to check if the script + # failed. Because the exit signal gets propagated only the first call to + # this function will know the exit code of the script. All subsequent + # calls will see $? = 0 if the previous signal handler did not fail + color_echo red "Last command did not complete successfully" >&2 + fi + + if [ -n "${on_exit:-}" ] ; then + debug 10 "Received SIGEXIT, ${#on_exit[@]} items to clean up." + if [ ${#on_exit[@]} -gt 0 ]; then + for item in "${on_exit[@]}"; do + if [ -n "${item}" ] ; then + debug 10 "Executing cleanup statement on exit: ${item}" + # shellcheck disable=SC2091 + ${item} + fi + done + fi + fi + debug 10 "Finished cleaning up, de-registering signal trap" + trap - EXIT + if ! $interactive ; then + # Be a nice Unix citizen and propagate the signal + kill -s EXIT "${$}" + fi +} + +function on_break { + if [ -n "${on_break:-}" ] ; then + color_echo red "Break signal received, unexpected exit, ${#on_break[@]} items to clean up." + if [ ${#on_break[@]} -gt 0 ]; then + for item in "${on_break[@]}"; do + if [ -n "${item}" ] ; then + color_echo red "Executing cleanup statement on break: ${item}" + ${item} + fi + done + fi + fi + # Be a nice Unix citizen and propagate the signal + trap - "${1}" + if ! $interactive ; then + # Be a nice Unix citizen and propagate the signal + kill -s "${1}" "${$}" + fi +} + +function add_on_exit { + debug 10 "Registering signal action on exit: \"${*}\"" + if [ -n "${on_exit:-}" ] ; then + local n="${#on_exit[@]}" + else + local n=0 + fi + on_exit[${n}]="${*}" + debug 10 "on_exit content: ${on_exit[*]}, size: ${#on_exit[*]}, keys: ${!on_exit[*]}" +} + +function add_on_break { + debug 10 "Registering signal action on break: \"${*}\"" + if [ -n "${on_break:-}" ] ; then + local n="${#on_break[@]}" + else + local n=0 + fi + on_break[${n}]="${*}" + debug 10 "on_break content: ${on_break[*]}, size: ${#on_break[*]}, keys: ${!on_break[*]}" +} + +function add_on_sig { + add_on_exit "${*}" + add_on_break "${*}" +} + +function clear_sig_registry { + debug 10 "Clearing all registered signal actions" + on_exit=() + on_break=() +} + +debug 10 "Setting up signal traps" +trap on_exit EXIT +trap "on_break INT" INT +trap "on_break QUIT" QUIT +trap "on_break TERM" TERM +debug 10 "Signal trap successfully initialized" + +# Creates a secure temporary directory or file +# First argument (REQUIRED) is the name of the caller's return variable +# Second argument (REQUIRED) is either 'dir' or 'file' +# Third argument (OPTIONAL) can either be an existing or non-existing directory +# +# If "file" is chosen and the second argument matches a dir a tmp file with a +# random filename will be created. +# If "dir" is chosen and the second argument matches a dir a tmp dir with a +# random name will be created. +# If "file" is chosen and the second argument does not match any existing +# directory a temporary file with that name will be created. +# If "dir" is chosen and the second argument does not match any existing +# directory a temporary dir with that name will be created. +# If no second argument is given a randomly named tmp file/dir will be created +# +# DO NOT call this function in a subshell, it breaks the clean up functionality. +# Instead, call the function with the name of the caller's return variable as the +# first argument. For example: +# local my_temp_dir="" +# create_secure_tmp my_temp_dir 'dir' +function create_secure_tmp { + # Check for the minimum number of arguments + if [ ${#@} -lt 2 ]; then + color_echo red "Called 'create_secure_tmp' with less than 2 arguments." + exit_on_fail + fi + + # Save the name of the caller's return variable + local _RETVAL=${1} + + local type_flag + if [ "${2}" == 'file' ] ; then + type_flag='' + elif [ "${2}" == 'dir' ] ; then + type_flag='-d' + else + color_echo red 'Called create_secure_tmp without specifying a required second argument "dir" or "file"!' + color_echo red "You specified: ${2}" + exit_on_fail + fi + original_umask="$(umask)" + umask 0007 + + # Should not be a local variable so the calling environment can access it + secure_tmp_object="" + dir=${3:-} + if [ -d "${dir}" ]; then + if [ "${os_type}" == 'Linux' ]; then + secure_tmp_object="$(mktemp ${type_flag} -p "${dir}" -q )" + else + TMPDIR="${3}" + secure_tmp_object="$(mktemp -t tmp -q)" + fi + elif [ -e "${dir}" ] || [ -z "${dir}" ]; then + if [ "${os_type}" == 'Linux' ]; then + secure_tmp_object="$(mktemp ${type_flag} -q)" + else + secure_tmp_object="$(mktemp ${type_flag} -q -t tmp)" + fi + else + if [ "${2}" == 'file' ] ; then + mkdir -p -m 0700 "$(dirname "${dir}")" || exit_on_fail + install -m 0600 /dev/null "${dir}" || exit_on_fail + elif [ "${2}" == 'dir' ] ; then + mkdir -p -m 0700 "${dir}" || exit_on_fail + fi + secure_tmp_object="${dir}" + fi + # shellcheck disable=SC2181 + if [ ${?} -ne 0 ]; then + exit_on_fail "${secure_tmp_object}" + fi + + umask "${original_umask}" || exit_on_fail + + # Store temp file/dir path into the caller's variable + # shellcheck disable=SC2086 + eval ${_RETVAL}="'$secure_tmp_object'" + + if ${cleanup}; then + debug 10 "Setting up signal handler to delete tmp object ${secure_tmp_object} on exit" + add_on_sig "rm -Rf ${secure_tmp_object}" + fi +} + +# Extracts archives +# First argument is the archive, second is the destination folder +# Any subsequent arguments are assumed to be embedded archives to try to +# extract, these will all be normalized into the dest folder +# If no arguments are given or a simple dash it's assumed the archive is +# provided on stdin in which case we try to determine the type and extract +# using a temporary file +# Examples of usage: +# stdin/stdout: extract < cat /some/file OR cat /some/file | extract +# stdin/filename: extract - /output/path +# filename/filename: extract /input/path /output/path +# filename/stdout: extract /input/path +declare -a extract_trailing_arguments +function extract { + # Check if we have a filename or are dealing with data on stdin + if [ "${1:-}" == '-' ] || [ "${1:-}" == '' ] ; then + if [ "${2:-}" != '' ] ; then + dest_flag_place="-C ${2}" + else + dest_flag_place='' + fi + tmp_archive="$(mktemp)" + case "$(tee "${tmp_archive}" &> /dev/null && file "${tmp_archive}" --brief --mime-type)" in + application/x-tar) tar xf "${tmp_archive}" ${dest_flag_place};; + application/x-gzip) tar zxf "${tmp_archive}" ${dest_flag_place};; + application/pgp) gpg -q -o - --decrypt "${tmp_archive}" | extract "${@:1}";; + *) color_echo red "Unsupported mime type for extracting file from stdin" ;; + esac + debug 10 "Removing temporary archive: ${tmp_archive}" + rm -f "${tmp_archive}" + else + if [ "${verbosity}" -ge 10 ]; then + local tar_verb_flag="--verbose" + else + local tar_verb_flag='' + fi + if [ -f "${1}" ] && [ -d "${2}" ]; then + case "${1}" in + *.tar.bz2) ${priv_esc_cmd} tar xvjf "${1}" -C "${2}" ${tar_verb_flag};; + *.tar.gz) ${priv_esc_cmd} tar xvzf "${1}" -C "${2}" ${tar_verb_flag};; + *.bz2) ${priv_esc_cmd} bunzip2 -dc "${1}" > "${2}" ;; + *.rar) ${priv_esc_cmd} unrar x "${1}" "${2}" ;; + *.gz) ${priv_esc_cmd} gunzip -c "${1}" > "${2}" ;; + *.tar) ${priv_esc_cmd} tar xvf "${1}" -C "${2}" ${tar_verb_flag};; + *.pyball) ${priv_esc_cmd} tar xvf "${1}" -C "${2}" ${tar_verb_flag};; + *.tbz2) ${priv_esc_cmd} tar xvjf "${1}" -C "${2}" ${tar_verb_flag};; + *.tgz) ${priv_esc_cmd} tar xvzf "${1}" -C "${2}" ${tar_verb_flag};; + *.zip) ${priv_esc_cmd} unzip "${1}" -d "${2}" ;; + *.Z) ${priv_esc_cmd} uncompress -c "${1}" > "${2}" ;; + *.7z) ${priv_esc_cmd} 7za x -y "${1}" -o"${2}" ;; + *.tar.gpg) ${priv_esc_cmd} gpg -q -o - --decrypt "${1}" | tar xv -C "${2}" ${tar_verb_flag};; + *.tgz.gpg) ${priv_esc_cmd} gpg -q -o - --decrypt "${1}" | tar xvz -C "${2}" ${tar_verb_flag};; + *.tar.gz.gpg) ${priv_esc_cmd}gpg -q -o - --decrypt "${1}" | tar xvz -C "${2}" ${tar_verb_flag};; + *) color_echo red "${1} is not a known compression format" ;; + esac + extract_trailing_arguments=("${@:3}:-") + if [ -n "${extract_trailing_arguments}" ] ; then + if [ -f "${2}"/"${extract_trailing_arguments}" ] ; then + extract "$(find "${2}/${extract_trailing_arguments}")" "${2}" + extract_trailing_arguments=("${extract_trailing_arguments[@]:1}") + fi + else + color_echo cyan "Did not find any embedded archive matching ${extract_trailing_arguments}" + fi + else + color_echo red "'${1}' is not a valid file or '${2}' is not a valid directory" + exit_on_fail + fi + fi +} + +# If script is a part of a self extracting executable tar archive +# Extract itself and set variable to path +function extract_exec_archive { + # create_secure_tmp will store return data into the first argument + create_secure_tmp tmp_archive_dir 'dir' + export tmp_archive_dir + if ${interactive} ; then + while ! [[ "${REPLY:-}" =~ ^[NnYy]$ ]]; do + color_echo magenta "Detected self extracting executable archive" + read -rp "Please confirm you want to continue and extract the archive (Yy/Nn): " -n 1 + echo "" + done + else + REPLY="y" + fi + if [[ ${REPLY} =~ ^[Yy]$ ]]; then + bash_num_lines="$(awk '/^__ARCHIVE_FOLLOWS__/ { print NR + 1; exit 0; }' "${script_full_path}")" + debug 10 "Extracting embedded tar archive to ${tmp_archive_dir}" + tail -n +"${bash_num_lines}" "${script_full_path}" | extract - "${tmp_archive_dir}" || exit_on_fail + else + color_echo red "Archive extraction cancelled by user!" + exit 255 + fi +} + +# If this script is being run as a part of an executable installer archive handle correctly +if [ -f "${script_full_path}" ] && grep -qe '^__ARCHIVE_FOLLOWS__' "${script_full_path}" ; then + export running_as_exec_archive=true + debug 5 "Detected I'm an executable archive" + extract_exec_archive + if [ "$(type -t run_if_exec_archive)" == 'function' ] ; then + debug 10 "Found function named run_if_exec_archive, running it!" + run_if_exec_archive + else + debug 10 "Did not find a function named run_if_exec_archive, continuing" + fi +else + debug 5 "Detected I'm running as a script or interactive" + running_as_exec_archive=false +fi + +# This is a sample print usage function, it should be overwritten by scripts +# which import this library +function print_usage { +cat << EOF +usage: ${0} options + +This is an example usage help function + +OPTIONS: + -x Create an example bundle, optionally accepts a release, defaults to acme release + -a Apply an example bundle + -s Sign a bundle being created and force validation when it's applied + -p Create a patch, the patch only includes acme updates and does not update the release + -h Show this message + -v Print ${0} version and exit + +Examples: +${0} -c # Create a bundle with "acme" version +${0} -sc 1.0.1 # Create and sign an acme bundle with version 1.0.1 +${0} -a # Apply example update, default action when run from archive + +Version: ${version:-${shtdlib_version}} +EOF +} + +# Exits with error if a required argument was not provided +# Takes two arguments, first is the argument value and the second +# is the error message if argument is not set +# This is mostly irrelevant when running in strict mode +function required_argument { + print_usage_function="${3:-print_usage}" + if [ -z "${!1}" ]; then + ${print_usage_function} + color_echo red "${2}" + exit 255 + fi +} + +# Sometimes we want to process the required arguments later +declare -a arg_var_names +declare -a arg_err_msgs +function deferred_required_argument { + arg_var_names+=("${1}") + arg_err_msgs+=("${2}") +} +function process_deferred_required_arguments { + for ((i=0;i<${#arg_var_names[@]};++i)) ; do + required_argument "${arg_var_names[$i]}" "${arg_err_msgs[$i]}" + done +} + +# Parse for optional arguments (-f vs. -f optional_argument) +# Takes variable name as first arg and default value as optional second +# variable will be initialized in any case for compat with -e +# You need to set or export `parameter_array` in the script that uses `parse_opt_arg`: +# +# # shellcheck disable=2034 +# parameter_array=(${@:-}) # Store all parameters as an array +# +# # Parse command line arguments +# function parse_arguments { +# debug 5 "Parse Arguments got argument: ${1}" +# case ${1} in +# ... +function parse_opt_arg { + # Pick up optional arguments + debug 10 "Parameter Array is: ${parameter_array[*]:-}" + next_arg="${parameter_array[$((OPTIND - 1))]:-}" + debug 10 "Optarg/Option index is: ${OPTIND} and next argument is: ${next_arg}" + if [ "$(echo "${next_arg}" | grep -v '^-')" != "" ]; then + debug 10 "Found optional argument and setting ${1}=\"${next_arg}\"" + eval "${1}=\"${next_arg}\"" + # Skip over the optional value so getopts does not stop processing + (( OPTIND++ )) + else + if [ "${2}" != '' ]; then + debug 10 "Optional argument not found, using default and setting ${1}=\"${2}\"" + eval "${1}=\"${2}\"" + else + debug 10 "Initializing empty variable ${1}" + eval "${1}=" + fi + fi + unset next_arg + debug 10 "Set argument: ${1} to \"${!1}\"" +} + +# Resolve DNS name, returns IP if successful, otherwise name and error code +function resolve_domain_name { + lookup_result="$( (whichs getent >/dev/null && getent ahosts "${1}" | awk '{ print $1 }'| sort -u) || (whichs dscacheutil && dscacheutil -q host -a name "${1}" | grep ip_address | awk '{ print $2 }'| sort -u ))" + if [ -z "${lookup_result}" ]; then + echo "${1}" + return 1 + else + echo "${lookup_result}" + return 0 + fi +} + +# Resolve DNS SRV name given a service and a domain, returns host name(s) +function resolve_srv_name { + service="_${1}" + domain="${2}" + proto="_${3:-TCP}" + debug 10 "${service} ${domain} ${proto}" + mapfile -t lookup_result <<< "$(host -t SRV "${service}.${proto}.${domain}" ; echo -e "${?}" )" + if test "${lookup_result[@]: -1}" -eq 0 ; then + for line in "${lookup_result[@]}"; do + echo "${line}" + done + else + debug 2 "Failed to resolve ${service} ${domain} ${proto}" + fi +} + +# Wait for file to exists +# - first param: filename, +# - second param: timeout (optional, default 5 sec) +# - third param: sleep interval (optional, default 1 sec) +function wait_for_file { + local file_name="${1}" + local timeout="${2:-5}" + local sleep_interval="${3:-1}" + local max_count=$((timeout/sleep_interval)) + local count=0 + while [ ! -f "${file_name}" ]; do + (( count++ )) + if [ ${count} -ge ${max_count} ]; then + break + else + sleep "${sleep_interval}" + fi + done +} + +# Wait for a command to return a 0 exit status +# - first param: command +# - second param: timeout (optional, default 10 sec) +# - third param: sleep interval (optional, default 1 sec) +function wait_for_success { + local command="${1:-false}" + local timeout="${2:-10}" + local sleep_interval="${3:-1}" + local max_count=$((timeout/sleep_interval)) + local count=0 + while ! ${command}; do + (( count++ )) + if [ ${count} -ge ${max_count} ]; then + return 1 + else + sleep "${sleep_interval}" + fi + done +} + +# Helper function for copy_file +# Sets Permission/Owner on files +# Takes params/args file, owner[:group], oct_mode (permission) +function set_file_perm_owner { + debug 10 "Called set_file_perm_owner with ${1}, ${2}, ${3}" + if [ -z "${2}" ] ; then + rsync_base_flags="${rsync_base_flags} -og" + else + debug 10 "Changing owner on ${1} to ${2}" + rsync_base_flags="${rsync_base_flags} --usermap=${2}" + # Workaround when running from setuid and no supplemental groups are + # loaded automatically + # shellcheck disable=SC2091 + if [ "${EUID}" -ne '0' ] && $(echo "${2}" | grep -q ':') ; then + group="$(echo "${user_group:-}" | awk -F: '{print $2}')" + if [[ "${group}" != '' ]]; then + sg "${group}" -c "chown '${2}' '${1}'" || exit_on_fail + fi + else + chown "${2}" "${1}" || exit_on_fail + fi + fi + if [ -z "${3}" ] ; then + rsync_base_flags="${rsync_base_flags} -p" + else + debug 10 "Changing permissions on ${1} to ${3}" + rsync_base_flags="${rsync_base_flags} --chmod=${3}" + chmod "${3}" "${1}" || exit_on_fail + fi +} + +# Helper function for copy_dir +# Sets Permission/Owner of directories +# Takes params/args directory, owner[:group], oct_mode, file permission +function set_dir_perm_owner { + debug 10 "Called set_dir_perm_owner with ${1}, ${2}, ${3}, ${4}" + if [ -z "${2}" ] ; then + rsync_base_flags="${rsync_base_flags} -og" + else + debug 10 "Changing owner on ${1} to ${2}" + rsync_base_flags="${rsync_base_flags} --usermap=${2}" + # Workaround when running from setuid and no supplemental groups are + # loaded automatically + # shellcheck disable=SC2091 + if [ "${EUID}" -ne '0' ] && $(echo "${2}" | grep -q ':') ; then + group="$(echo "${user_group:-}" | awk -F: '{print $2}')" + if [[ "${group}" != '' ]]; then + sg "${group}" -c "chown '${2}' ${1}" || exit_on_fail + fi + else + chown -R "${2}" "${1}" || exit_on_fail + fi + fi + if [ -z "${3}" ] ; then + rsync_base_flags="${rsync_base_flags} -p" + else + debug 10 "Changing permissions recursively on dirs in ${1} to ${3}" + rsync_base_flags="${rsync_base_flags} --chmod=${3}" + find "${1}" -type d -exec chmod "${3}" {} + + fi + if [ -n "${4}" ] ; then + debug 10 "Changing permissions recursively on files in ${1} to ${4}" + # Figure out how to do this with rsync + #rsync_base_flags="${rsync_base_flags} --chmod=${4}" + find "${1}" -type f -exec chmod "${4}" {} + + fi +} + +# Very cautiously copy files +# First parameter source, second destination, third owner:group, fourth +# permissions, until we have rsync 3.1 everywhere we are actually changing +# the permissions on the source files which is not an issue when it's a tmp dir +# but could be an issue if used in a different way. Third and fourth parameters +# are optional +function copy_file { + rsync_base_flags="-ltDu --inplace --backup --backup-dir=\"${backup_dir:-${dest}.backup}\" --keep-dirlinks" + local source="${1}" + local dest="${2}" + local owner_group="${3:-}" + local perm="${4:-}" + set -f + find_directory="$(dirname "${source}")" + find_pattern="$(basename "${source}")" + set +f + debug 10 "Called copy_file with ${source} ${dest} ${owner_group} ${perm}" + # shellcheck disable=SC2086 + if [ -e "${source}" ] ; then + debug 10 "Filesystem object ${source} exists" + # Make sure permissions and owner are OK + set_file_perm_owner "${source}" "${owner_group}" "${perm}" + if [ -f "${source}" ] ; then + debug 10 "Found file ${source}" + if "${force_overwrite:-false}" ; then + debug 10 "Copying with forced overwrite" + rsync_flags="${rsync_base_flags} --force" + #rsync ${rsync_flags} "${1}" "${2}" + cp -pf "${source}" "${dest}" || exit_on_fail + elif "${interactive}" ; then + debug 10 "Copying in interactive mode" + rsync_flags="${rsync_base_flags}" + #rsync ${rsync_flags} "${1}" "${2}" + cp -pi "${source}" "${dest}" || exit_on_fail + else + debug 10 "Copying in non-interactive mode" + flags="${rsync_base_flags}" + #rsync ${rsync_flags} "${1}" "${2}" + cp -pn "${source}" "${dest}" || exit_on_fail + fi + debug 10 "Copied file ${source} to ${dest}" + else + color_echo red "Found filesystem object ${source} but it's not a file" + return 1 + fi + # Support globbing + elif [ -n "$(find ${find_directory} -maxdepth 1 -name ${find_pattern} -type f -print -quit)" ] ; then + debug 10 "Found globbing pattern in ${1}" + # Make sure permissions and owner are OK + set_file_perm_owner "${source}" "${owner_group}" "${perm}" + if "${force_overwrite:-false}" ; then + debug 10 "Copying with forced overwrite" + cp -pf ${source} "${dest}" || exit_on_fail + elif "${interactive}" ; then + debug 10 "Copying in interactive mode" + cp -pi ${source} "${dest}" || exit_on_fail + else + debug 10 "Copying in non-interactive mode" + cp -pn ${source} "${dest}" || exit_on_fail + fi + copied_files="$(find ${source} -type f -exec basename {} \; | tr '\n' ' ')" + debug 10 "Copied file(s) ${copied_files} to ${dest}" + else + color_echo cyan "Unable to find filesystem object ${source} while looking for file. Skipping..." + return 1 + fi + return 0 +} + +# Very cautiously copy directories +# First parameter source, second destination, third owner:group, fourth dir +# permissions, fifth file permissions. Last three parameters are optional +function copy_dir { + local source="${1}" + local dest="${2}" + local owner_group="${3:-}" + local file_perm="${4:-}" + local dir_perm="${5:-}" + set -f + find_directory="$(dirname "${source}")" + find_pattern="$(basename "${source}")" + set +f + # shellcheck disable=SC2086 + if [ -e "${source}" ] ; then + debug 10 "Filesystem object ${source} exists" + set_dir_perm_owner "${source}" "${owner_group}" "${file_perm}" "${dir_perm}" + if [ -d "${source}" ] ; then + debug 10 "Found directory ${source}" + if "${force_overwrite:-false}" ; then + debug 10 "Copying with forced overwrite" + cp -Rpf "${source}" "${dest}" || exit_on_fail + elif "${interactive}" ; then + debug 10 "Copying in interactive mode" + cp -Rpi "${source}" "${dest}" || exit_on_fail + else + debug 10 "Copying in non-interactive mode" + cp -Rpn "${source}" "${dest}" || exit_on_fail + fi + debug 10 "Copied dir ${source} to ${dest}" + else + color_echo red "Found filesystem object ${source} but it's not a directory" + return 1 + fi + # Support globbing + elif [ -n "$(find ${find_directory} -maxdepth 1 -name ${find_pattern} -type f -print -quit)" ] ; then + debug 10 "Found globbing pattern in ${source}" + set_dir_perm_owner "${source}" "${owner_group}" "${file_perm}" "${dir_perm}" + if "${force_overwrite:-false}" ; then + debug 10 "Copying with forced overwrite" + cp -Rpf ${source} "${dest}" || exit_on_fail + elif "${interactive}" ; then + debug 10 "Copying in interactive mode" + cp -Rpi ${source} "${dest}" || exit_on_fail + else + debug 10 "Copying in non-interactive mode" + cp -Rpn ${source} "${dest}" || exit_on_fail + fi + copied_dirs="$(find ${source} -type f -exec basename {} \; | tr '\n' ' ')" + debug 10 "Copied dir(s) ${copied_dirs}" + else + color_echo cyan "Unable to find filesystem object ${source} while looking for dir" + return 1 + fi + return 0 +} + +# Create directories, first argument is path, second is owner, third is +# group, fourth is mode +function create_dir_or_fail { + # Make sure directory exist, offer to create it or fail + debug 10 "Asked to create/check directory ${1}" + if [ ! -d "${1}" ]; then + if [ -e "${1}" ]; then + color_echo red "A non directory object already exists at ${1}" + exit_on_fail + fi + # Offer to create the directory if it does not exist + if ${interactive} ; then + while ! [[ "${REPLY}" =~ ^[NnYy]$ ]]; do + read -rp "The directory ${1} does not exist, do you want to create it (y/n):" -n 1 + echo "" + done + else + REPLY="y" + fi + if [[ ${REPLY} =~ ^[Yy]$ ]]; then + color_echo green "Creating directory ${1}" + if [ "${4}" != "" ] ; then + mode_flag="-m ${4}" + else + mode_flag='' + fi + # Create dir, use sudo/su if required + if [ -w "$(dirname "${1}")" ] ; then + mkdir -p "${1}" "${4}" + else + ${priv_esc_cmd} mkdir -p "${1}" "${4}" + fi + # Change owner if specified + if [ "${2}" != "" ] && [ "$(stat -c '%U' "${1}")" != "${2}" ] ; then + debug 5 "Changing owner on ${1} to ${2}" + ${priv_esc_cmd} chown "${2}" "${1}" + fi + # Change group if specified + if [ "${3}" != "" ] && [ "$(stat -c '%G' "${1}")" != "${3}" ] ; then + debug 5 "Changing group on ${1} to ${3}" + ${priv_esc_cmd} chgrp "${3}" "${1}" + fi + else + color_echo red "Target directory is required" + exit_on_fail + fi + fi +} + +# Takes yaml file as first parameter and key as second, e.g. +# load_from_yaml /etc/custom.yaml puppet::mykey (additional keys can be follow) +# example load_from_yaml example.yaml ':sources' ':base' "'remote'" +function load_from_yaml { + # ruby doesn't properly handle SIGPIPE + shopt_decorator_option_name='pipefail' + shopt_decorator_option_value='false' + # shellcheck disable=2015 + shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" + + if [ -r "${1}" ]; then + ruby_yaml_parser="data = YAML::load(STDIN.read); puts data['${2}']" + for key in "${@:3}" ; do + ruby_yaml_parser+="[${key}]" + done + assert whichs ruby + ruby -w0 -ryaml -e "${ruby_yaml_parser}" "${1}" 2> /dev/null | awk '{print $1}' || return 1 + return 0 + else + return 1 + fi +} + +# Install gem if not already installed +# Returns 0 if package was installed, 1 if package was already installed +function install_gem { + original_umask="$(umask)" + if [ "${verbosity}" -le 5 ]; then + gem_verb_flag='-q' + elif [ "${verbosity}" -ge 10 ]; then + gem_verb_flag='-V' + else + gem_verb_flag='' + fi + # First try gem with version code, ala ubuntu or installed with gem but + # default to basic gem command + gem_cmd="$(compgen -c | grep '^gem[0-9][0-9]*\.*[0-9][0-9]*' | sort | tail -n1)" + if [ "${gem_cmd}" == '' ]; then + gem_cmd='gem' + fi + debug 10 "Using gem command: '${gem_cmd}'" + gem_version=$(${gem_cmd} list "${1}" | grep -e "^${1}") + debug 10 "Query for gem package '${1}' version returned: '${gem_version}'" + if [ "${gem_version}" == "" ]; then + umask 0002 + ${priv_esc_cmd} bash -c "${gem_cmd} install ${gem_verb_flag} ${1} ${2}" || exit_on_fail + umask "${original_umask}" || exit_on_fail + return 0 + fi + return 1 +} + +# A platform independent way to install a package, accepts any number of +# arguments all of which are assumed to be name variations of a package that +# should be tried, will only error if none of the arguments represent a valid +# package name. +function install_package { + case "${os_family}" in + 'Debian') + ${priv_esc_cmd} apt-get update + exit_status=127 + for package_name in "${@}"; do + ${priv_esc_cmd} sudo apt-get --assume-yes --quiet install "${package_name}" && exit_status="${?}" && break + done + return "${exit_status}" + ;; + 'RedHat') + ${priv_esc_cmd} yum update + exit_status=127 + for package_name in "${@}"; do + ${priv_esc_cmd} yum -assumeyes --quiet install "${package_name}" && exit_status="${?}" && break + done + return "${exit_status}" + ;; + 'MacOSX') + assert whichs brew + brew update + exit_status=127 + for package_name in "${@}"; do + brew install "${package_name}" && exit_status="${?}" && break + done + return "${exit_status}" + ;; + 'Alpine') + ${priv_esc_cmd} apk update + exit_status=127 + for package_name in "${@}"; do + ${priv_esc_cmd} apk add "${package_name}" && exit_status="${?}" && break + done + return "${exit_status}" + ;; + + *) + color_echo red "Unsupported platform '${os_family}' for install_package function" >&2 + return 1 + ;; + esac +} + + +function validate_hostfile { + assigned_ip_addresses="$(ip -4 addr show | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*')" + ip_address_in_hostfile="$(getent hosts | grep -e "\\b$(hostname)\\b" | awk '{print $1}')" + + debug 10 "Currently assigned IP addresses: ${assigned_ip_addresses}" + debug 10 "IP address associated with hostname on hostfile: ${ip_address_in_hostfile}" + + if echo "${assigned_ip_addresses}" | grep -q "${ip_address_in_hostfile}" ; then + debug 8 "Hostname found in hostfile and resolves to IP address on the system" + else + color_echo red "Unable to resolve hostname to any IP address on the system" + exit_on_fail + fi +} + +# URI parsing function +# +# The function creates global variables with the parsed results. +# It returns 0 if parsing was successful or non-zero otherwise. +# +# [schema://][user[:password]@]host[:port][/path][?[arg1=val1]...][#fragment] +# +# Originally from: http://vpalos.com/537/uri-parsing-using-bash-built-in-features/ +function uri_parser { + # uri capture + uri="${*}" + + # safe escaping + uri="${uri//\`/%60}" + uri="${uri//\"/%22}" + + # top level parsing + pattern='^(([a-z]{3,5})://)?((([^:\/]+)(:([^@\/]*))?@)?([^:\/?]+)(:([0-9]+))?)([:\/][^?]*)?(\?[^#]*)?(#.*)?$' + [[ "${uri}" =~ ${pattern} ]] || [[ "${uri}" =~ ssh://${pattern} ]] || return 1; + + # component extraction + uri=${BASH_REMATCH[0]} + uri_schema=${BASH_REMATCH[2]} + uri_address=${BASH_REMATCH[3]} + uri_user=${BASH_REMATCH[5]} + uri_password=${BASH_REMATCH[7]} + uri_host=${BASH_REMATCH[8]} + uri_port=${BASH_REMATCH[10]} + uri_path=${BASH_REMATCH[11]} + uri_query=${BASH_REMATCH[12]} + uri_fragment=${BASH_REMATCH[13]} + + # path parsing + local count + count=0 + path="${uri_path}" + pattern='^/+([^/]+)' + while [[ ${path} =~ ${pattern} ]]; do + eval "uri_parts[${count}]=\"${BASH_REMATCH[1]}\"" + path="${path:${#BASH_REMATCH[0]}}" + (( count++ )) && true + done + # query parsing + count=0 + query="${uri_query}" + pattern='^[?&]+([^= ]+)(=([^&]*))?' + while [[ ${query} =~ ${pattern} ]]; do + eval "uri_args[${count}]=\"${BASH_REMATCH[1]}\"" + eval "uri_arg_${BASH_REMATCH[1]}=\"${BASH_REMATCH[3]}\"" + query="${query:${#BASH_REMATCH[0]}}" + (( count++ )) && true + done + + debug 8 "Uri parser paring summary:" + debug 8 "uri_parser: uri -> ${uri}" + debug 8 "uri_parser: uri_schema -> ${uri_schema}" + debug 8 "uri_parser: uri_address -> ${uri_address}" + debug 8 "uri_parser: uri_user -> ${uri_user}" + debug 8 "uri_parser: uri_password -> ${uri_password}" + debug 8 "uri_parser: uri_host -> ${uri_host}" + debug 8 "uri_parser: uri_port -> ${uri_port}" + debug 8 "uri_parser: uri_path -> ${uri_path}" + debug 8 "uri_parser: uri_query -> ${uri_query}" + debug 8 "uri_parser: uri_fragment -> ${uri_fragment}" + + # return success + return 0 +} + +## Create a uri back from all the variables created by uri_parser +# [schema://][user[:password]@]host[:port][/path][?[arg1=val1]...][#fragment] +function uri_unparser { + working_uri="${uri_schema}://" + if [ -n "${uri_user}" ] && [ -n "${uri_password}" ] ; then + working_uri+="${uri_user}:${uri_password}@" + fi + working_uri+="${uri_host}" + if [ -n "${uri_port}" ] ; then + working_uri+=":${uri_port}" + fi + if [ -n "${uri_path}" ] ; then + working_uri+="${uri_path}" + fi + if [ -n "${uri_query}" ] ; then + working_uri+="?${uri_query}" + fi + if [ -n "${uri_fragment}" ] ; then + working_uri+="#${uri_fragment}" + fi + echo "${working_uri}" +} + +## Uniform Resource Identifier (URI) Hostname to Fully Qualified Domain Name (FQDN) +# Opportunistically resolves the hostname portion of a URI, and replaces it +# with a FQDN using the Name Service Switch (nsswitch) library hosts database. +# If URI hostname resolves, or if no match is found, then it uses the unresolved +# hostname of the original URI. Returns status code 1 if URI fails to parse. +## Example +# $ uri_hostname_to_fqdn http://app:8080 +# http://app.example.com:8080 +function uri_hostname_to_fqdn { + uri="${*}" + uri_parser "${uri}" || return 1 + + local fqdnames + fqdnames=$(getent hosts "${uri_host}") + + # If hostname exists in hosts library, return + if echo "${fqdnames}" | grep -E -q "(^| )${uri_host}( |$)"; then + echo "${uri}" + return 0 + fi + + # If it doesn't exist, try appending the domains found under "search" in /etc/resolv.conf + local new_uri_host + local domain_names=($(grep -e '^search' /etc/resolv.conf)) + for domain_name in "${domain_names[@]:1}"; do # first element is "search", skip + new_uri_host="${uri_host}.${domain_name}" + if echo "${fqdnames}" | grep -E -q "(^| )${new_uri_host}( |$)"; then + # Found a match, set it as the new URI host, and break out of the matrix + uri_host="${new_uri_host}" + break + fi + done + + # Unparse the URI and echo + uri_unparser +} + +## Strip all leading/trailing whitespaces +function strip_space { + echo -n "${@}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' +} + +# Load ini file parameter +# Requires at least two arguments and optionally accepts a third, ini_section +# If ini_section is specified and multiple sections match, an error will be +# raised. If no ini_section is specified and multiple parameter names match +# they will all be returned. +# To strip leading/trailing whitespace simple pipe to sed -e 's/^[[:space:]]*//g' +function load_ini_file_parameter { + local filename="${1}" + local name="${2}" + local ini_section="${3:-}" + debug 10 "Loading INI file parameter: ${name} from file: ${filename}, optional section ${ini_section}" + + if [ -n "${ini_section}" ]; then + #shellcheck disable=SC2086 + ini_section_match="$(grep -c "\[${ini_section}\]" "${filename}")" + if [ "${ini_section_match}" -lt 1 ]; then + color_echo red "Unable to find INI section matching ${ini_section}" + return 1 + elif [ "${ini_section_match}" -eq 1 ]; then + debug 9 "Found INI section ${ini_section}" + sed -n "/\[${ini_section}\]/,/\[/p" "${filename}" | grep --max-count=1 -E "^${name}" | awk -F= '{print $2}' + else + color_echo red "Multiple sections match the INI section specified: ${ini_section}" + exit 1 + fi + else + grep -E "^${name}" "${filename}" | awk -F= '{print $2}' + fi +} + +# This function is used to safely edit ini style config files parameters. +# This function will return 0 on success or 1 if it fails to change the value +# +# OPTIONS: +# -n Filename, for example: /tmp/config_file +# -p Regex pattern, for example: ^[a-z]* +# -v Value, the value to replace with, can include variables from previous regex +# pattern, if omitted the pattern is used as the value +# -a Append, if this flag is specified and the pattern does not exist it will be +# created, takes an optional argument which is the [INI] section to add the pattern to +# -o Opportunistic, don't fail if pattern is not found, takes an optional argument +# which is the number of matches expected/required for the change to be performed +# -c Create, if file does not exist we create it, assumes append and opportunistic +function edit_ini_file_parameter { + local n p v a o c + local OPTIND + local OPTARG + local opt + local force + local opportunistic + local filename + local pattern + local new_value + local ini_section + local force=false + local opportunistic=false + local create=false + local append=false + local req_matches=1 + + # Handle arguments + while getopts "n:p:v:aoc" opt; do + case ${opt} in + 'n') + filename="${OPTARG}" + ;; + 'p') + # Properly escape control characters in pattern + pattern="$(echo "${OPTARG}" | sed -e 's/[\/&]/\\\\&/g')" + debug 10 "Pattern set to ${pattern}" + + # If value is not set we set it to pattern for now + if [ "${new_value}" == "" ]; then + new_value="${pattern}" + fi + ;; + 'v') + # Properly escape control characters in new value + new_value="$(echo "${OPTARG}" | sed -e 's/[\/&]/\\\\&/g')" + ;; + 'a') + append=true + parse_opt_arg ini_section + ;; + 'o') + opportunistic=true + parse_opt_arg req_matches + ;; + 'c') + create=true + append=true + opportunistic=true + ;; + *) + print_usage + esac + done + # Cleanup getopts variables + unset OPTSTRING OPTIND + + # Make sure all required parameters are provided + if [ -z "${filename:-}" ] || [ -z "${pattern:-}" ] && ! ${append} || [ -z "${new_value:-}" ]; then + color_echo red "${FUNCNAME[0]} requires filename, pattern and value to be provided" + color_echo magenta "Provided filename: ${filename}" + color_echo magenta "Provided pattern: ${pattern}" + color_echo magenta "Provided value: ${new_value}" + exit 64 + fi + + # Check to make sure file exists and is normal file, create if needed and specified + if [ -f "${filename}" ]; then + debug 10 "${filename} found and is normal file" + else + if [ ! -e "${filename}" ] && ${create} ; then + # Create file if nothing exists with the same name + debug 10 "Created new file ${filename}" + ${priv_esc_cmd} touch "${filename}" + else + color_echo red "File ${filename} not found or is not regular file" + exit 74 + fi + fi + + # Count matches + num_matches="$(${priv_esc_cmd} grep -c "${pattern}" "${filename}")" + + # Handle replacements + if [ -n "${pattern}" ] && [ "${num_matches}" -eq "${req_matches}" ]; then + ${priv_esc_cmd} sed -i -e 's/'"${pattern}"'/'"${new_value}"'/g' "${filename}" + # Handle appends + elif ${append} ; then + if [ "${ini_section}" != "" ]; then + #shellcheck disable=SC2086 + ini_section_match="$(${priv_esc_cmd} grep -c \"\[${ini_section}\]\" \"${filename}\")" + if [ "${ini_section_match}" -lt 1 ]; then + echo -e '\n['"${ini_section}"']\n' | ${priv_esc_cmd} tee -a "${filename}" > /dev/null + elif [ "${ini_section_match}" -eq 1 ]; then + ${priv_esc_cmd} sed -i -e '/\['"${ini_section}"'\]/{:a;n;/^$/!ba;i'"${new_value}" -e '}' "${filename}" + else + color_echo red "Multiple sections match the INI file section specified: ${ini_section}" + exit 1 + fi + else + echo "${new_value}" | ${priv_esc_cmd} tee -a "${filename}" > /dev/null + fi + # Handle opportunistic, no error if match not found + elif ${opportunistic} ; then + color_echo magenta "Pattern: ${pattern} not found in ${filename}, continuing" + # Otherwise exit with error + else + color_echo red "Found ${num_matches} matches searching for ${pattern} in ${filename}" + color_echo red "This indicates a problem, there should be only one match" + exit 1 + fi +} + +# A function to make the ssh environment from a user available to the root +# user when running as a superuser via the priv_esc_cmd function +function link_ssh_config { + # If root has no ssh config but pre-sudo user does we use the users config during the run + if ! ${priv_esc_cmd} test -e /root/.ssh/config ; then + if [ -z "${SUDO_USER_HOME}" ] && [ "${HOME}" != "/root" ]; then + debug 10 "Did not find SUDO_USER_HOME varible setting to ${HOME}" + SUDO_USER_HOME="${HOME}" + fi + if [ -f "${SUDO_USER_HOME}/.ssh/config" ]; then + # Make sure .ssh directory exists and has correct permissions + ${priv_esc_cmd} mkdir -p "/root/.ssh" && sudo chmod 700 "/root/.ssh" + color_echo green "Copying ${SUDO_USER_HOME}/.ssh/config to /root/.ssh/config for this session" + color_echo green "Please note that for future/automated r10k runs you might need to make this permanent" + ${priv_esc_cmd} cp "${SUDO_USER_HOME}/.ssh/config" '/root/.ssh/config' + ${priv_esc_cmd} chown root "/root/.ssh/config" && ${priv_esc_cmd} chmod 700 "/root/.ssh/config" + add_on_sig ${priv_esc_cmd} "rm -f /root/.ssh/config" + fi + else + debug 10 "Running as user: $(whoami)" + debug 10 "Found User home: ${SUDO_USER_HOME}" + color_echo magenta "Not running as root or root user already has an SSH config, please make sure it's correctly configured as needed for GIT access" + fi +} + +#Creates a tar archive where all paths have been made relative +function create_relative_archive { + debug 10 "Creating relative archive ${1}" + local archive_path="${1}" + local arguments=("${@}") + local source_elements=("${arguments[@]:1}") + local transformations=() + local archive_operation="${archive_operation:-create}" + assert in_array "${archive_operation}" 'create' 'append' 'update' + + + local verbose_flag='' + if [ "${verbosity}" -ge 5 ]; then + local verbose_flag=' -v' + fi + # Iterate this way to avoid whitespace filename bugs + num_transformations=${#source_elements[@]} + for (( i=1; i> "${inline_dest_file}" + chmod --reference="${inline_source_file}" "${inline_dest_file}" + debug 10 "Wrote combined source to ${inline_dest_file}" + fi +} + +# Creates an executable tar archive that can extract and run itself +# Note that any script that's provided should not require any parameters +# and should source/include this library file +# Any special commands or things that should be done after extracting the +# archive should be defined in a function called run_if_exec_archive, note that +# the archive will be extracted into a tmp dir name stored in ${tmp_archive_dir} +# Note that run_if_exec_archive will need to be defined before +# importing/sourcing this file +# Note that the archive should be in .tar.gz format +function create_exec_archive { + # An executable archive is just a bash script concatenated with an archive + # but separated with a marker __ARCHIVE_FOLLOWS__ + local binary_path="${1}" + local script_path="${2}" + local archive="${3}" + debug 10 "Creating binary ${binary_path} using ${script_path} and ${archive}" + # create_secure_tmp will store return data into the first argument + create_secure_tmp tmp_script_file 'file' + # shellcheck disable=SC2154 + inline_bash_source "${script_path}" "${tmp_script_file}" + debug 10 "Created temporary inlined script file at: ${tmp_script_file}" + cat "${tmp_script_file}" > "${binary_path}" || exit_on_fail + echo '__ARCHIVE_FOLLOWS__' >> "${binary_path}" || exit_on_fail + cat "${archive}" >> "${binary_path}" || exit_on_fail + chmod +x "${binary_path}" + debug 3 "Finished writing binary: ${binary_path}" +} + +# Slugifies a string +function slugify { + echo "${*}" | sed -e 's/[^[:alnum:]._\-]/_/g' | tr -s '-' | tr '[:upper:]' '[:lower:]' +} + +# Converts a string to upper case +function _upper { + local string="${*}" + if "${bash_pre_v4}" ; then + echo "${string}" | tr '[:lower:]' '[:upper:]' + else + echo "${string^^}" + fi +} +function upper { + # First command needs to be read, this way any piped input goes to it + while read -rt "${read_timeout:-1}" piped_data; do + declare -a piped_string + debug 10 "String piped to ${FUNCNAME}: ${piped_data}" + # shellcheck disable=2086 + piped_string+=( ${piped_data} ) + done + _upper "${*}${piped_string[*]}" +} + +# Converts a string to lower case +function _lower { + local string="${*}" + if "${bash_pre_v4}" ; then + echo "${string}" | tr '[:upper:]' '[:lower:]' + else + echo "${string,,}" + fi +} +function lower { + # First command needs to be read, this way any piped input goes to it + while read -rt "${read_timeout:-1}" piped_data; do + declare -a piped_string + debug 10 "String piped to ${FUNCNAME}: ${piped_data}" + # shellcheck disable=2086 + piped_string+=( ${piped_data} ) + done + _lower "${*}${piped_string[*]}" +} + +# Load default login environment +function get_env { + # Load all default settings, including proxy, etc + declare -a env_files + env_files=('/etc/environment' '/etc/profile') + for env_file in "${env_files[@]}"; do + if [ -e "${env_file}" ]; then + debug 10 "Sourcing ${env_file}" + #shellcheck source=/dev/null + source "${env_file}" + else + debug 10 "Env file: ${env_file} not present" + fi + done +} + +# Pick pidfile location if it's ever needed +if [ "${EUID}" -eq "0" ]; then + pid_prefix="/var/run/" +else + pid_prefix="/tmp/.pid_" +fi + +# Check for or create a pid file for the program +# takes program/pidfile name as a first parameter, this is the unique ID +# Exits with error if a previous matching pidfile is found +function init_pid { + pidfile="${pid_prefix}${1}" + if [ -f "${pidfile}" ]; then + file_size="$(wc -c < "${pidfile}")" + file_type="$(file -b "${pidfile}")" + max_file_size=$(cat < '/proc/sys/kernel/pid_max' | wc -c) + max_pid=$(cat < /proc/sys/kernel/pid_max) + if [ "${file_size}" -le "${max_file_size}" ] && [ "${file_type}" == 'ASCII text' ]; then + pid="$(cat "${pidfile}")" + if [ "${pid}" -le "${max_pid}" ]; then + if [ "$(pgrep -cF "${pidfile}")" -eq 1 ]; then + color_echo green "Process with PID: ${pid} already running" + return 129 + else + color_echo red "Pidfile ${pidfile} already exists, but no process found with PID: ${pid}" + return 130 + fi + else + color_echo red "Pidfile ${pidfile} does not contain a real PID, value ${pid} is larger than max allowed pid of ${max_pid}" + return 1 + fi + else + color_echo red "Pidfile ${pidfile} is either too large or not of type ASCII, make sure it's a real PID file" + return 1 + fi + else + echo "${$}" > "${pidfile}" && add_on_sig "rm -f ${pidfile}" + return 0 + fi +} + +# Send success signal to other process by name +function signal_success { + signal "${1}" "SIGCONT" "Success" +} + +# Send failure signal to other process by name if send_failure_signal is true +send_failure_signal="${send_failure_signal:-true}" +function signal_failure { + if ${send_failure_signal} ; then + signal "${1}" "SIGUSR2" "Failure" + fi +} + +# Send a signal to process, read pid from file or search by name +# Parameters are: filename/processname signal message +function signal { + pidfile="${pid_prefix}${1}" + # Check if first parameter is pidfile or process name/search string + if init_pid "${1}" > /dev/null || [ ${?} == 129 ]; then + other_pids="$(cat "${pidfile}")" + else + other_pids="$(pgrep -f -d ' ' "${1}")" + fi + if [ "${other_pids}" != "" ]; then + kill -s "${2}" "${other_pids}" + color_echo cyan "Signalled ${3} to PID(s): ${other_pids}" + else + debug 5 "Unable to find process '${1}' to signal" + fi +} + +# Trim whitespaces from strings +function trim { + local var="${1}" + var="${var#"${var%%[![:space:]]*}"}" # remove leading whitespace characters + var="${var%"${var##*[![:space:]]}"}" # remove trailing whitespace characters + echo -n "${var}" +} + +# Sort array elements, accepts name of array to sort, defaults to unique sort +# but can be configured by setting the sort_command +function sort_array { + declare -ga "${1}" + local array_name="${1}" + local array_elements=( $(eval echo '${'"${array_name}"'[@]}') ) + sort_command="${sort_command:-sort -u}" + readarray -t "${1}" < <(for element in "${array_elements[@]}"; do echo "${element}"; done | ${sort_command}) +} + +# Creates an associative array from an array of variable names setting the +# values as the variable values. +# Accepts the name of an array to expand and the name of the associative array +# to be created. +# Unset or empty variables will raise an error unless +# ignore_missing_associate_value is set to true in which the key/value will be +# skipped. +function associate_array { + local source_array_name="${1}" + local array_elements=( $(eval echo '${'"${source_array_name}"'[@]}') ) + local new_array_name="${2}" + debug 10 "Creating associative array: ${new_array_name} from: ${source_array_name} with elements: ${array_elements[*]}" + declare -gA "${new_array_name}" + + for key in "${array_elements[@]}" ; do + debug 10 "Processing associate key: ${key}" + if [ -n "${!key:-}" ] ; then + debug 10 "Setting ${new_array_name}[${key}] to ${!key}" + eval ${new_array_name}[${key}]=${!key} + elif ! ${ignore_missing_associate_value:-false} ; then + error 0 "No variable found to be set with name ${key}" + exit_on_fail + fi + done +} + +# Safely loads config file +# First parameter is filename, all consequent parameters are assumed to be +# valid configuration parameters +function load_config { + config_file="${1}" + # Verify config file permissions are correct and warn if they aren't + # Dual stat commands to work with both linux and bsd + while read -r line; do + if [[ "${line}" =~ ^[^#]*= ]]; then + setting_name="$(echo "${line}" | awk -F '=' '{print $1}' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')" + setting_value="$(echo "${line}" | cut -f 2 -d '=' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')" + + for requested_setting in "${@:2}" ; do + if [ "${requested_setting}" == "${setting_name}" ] ; then + export "${setting_name}"="${setting_value}" + debug 10 "Loaded config parameter ${setting_name} with value of '${setting_value}'" + fi + done + fi + done < "${config_file}"; +} + +# Load settings from config file if they have not been set already +# First parameter is filename, all consequent parameters are assumed to be +# configuration parameters +function load_missing_config { + declare -a new_settings + new_settings=() + for setting in "${@:2}"; do + if [ -z "${!setting:-}" ] ; then + new_settings+=( "${setting}" ) + fi + done + if [ -n "${new_settings[*]:-}" ] ; then + debug 10 "Attempting to load missing settings: ${new_settings[*]} from config file: '${1}'" + load_config "${1}" "${new_settings[@]}" + else + #shellcheck disable=SC2145 + debug 5 "No missing settings to load, all specified settings already set for: ${@:2}" + fi +} + +# Make sure symlink exists and points to the correct target, will remove +# symlinks pointing to other locations or do nothing if it's correct. +function ln_sf { + # Check for the minimum number of arguments + if [ ${#@} -lt 2 ]; then + color_echo red "Called 'ln_sf' with less than 2 arguments." + exit_on_fail + fi + + target_path="${1}" + link_path="${2}" + assert test -e "${target_path}" + debug 10 "Creating symlink at ${2} pointing to ${1}" + if [ -L "${link_path}" ] ; then + current_target="$(readlink "${link_path}")" + if [ "${current_target}" != "${target_path}" ] ; then + debug 6 "Removing existing symlink: ${link_path}" + rm -f "${link_path}" + else + debug 6 "Current symlink at ${link_path} already points to ${target_path}" + return 0 + fi + elif [ -e "${link_path}" ]; then + color_echo red "Found filesystem object at: ${link_path} but it's not a symlink, fatal error, exiting!" + exit_on_fail + fi + # Create symlink + ln -s "${target_path}" "${link_path}" + debug 10 "Successfully created symlink" +} + +# Create string of random characters +# - First param is length, default: 20 +# - Second param is characters, default: A-Za-z0-9_ (Note: '-' specifies range) +function gen_rand_chars { + shopt_decorator_option_name='pipefail' + shopt_decorator_option_value='false' + # shellcheck disable=2015 + shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" + + local length="${1:-20}" + local chars="${2:-A-Za-z0-9_}" + debug 10 "Creating a string of random characters of length: ${length} and chars: ${chars}" + LC_CTYPE=C tr -dc "${chars}" < '/dev/urandom' | head -c "${length}" +} + +# Checks if an environment variable is set and contains a string longer than +# 0, if not then it's set to a random value. +# If a file name/path is specified then a line containing VARIABLE=VALUE is +# written to the end of the file. Optionally the length of the random +# string/value can be specified. (defaults to 50) +function check_set_persist_random_variable { + local var_name="${1}" + local file_path="${csprv_file_path:-${2:-}}" + local key_length="${csprv_key_length:-${3:-50}}" + assert test -n "${var_name}" + if [ -z "${!var_name:-}" ] ; then + debug 11 "No variable named ${var_name} found, generating a random string" + export "${var_name}"="$(gen_rand_chars "${key_length}")" + + if [ -n "${file_path}" ] ; then + if [ -e "${file_path}" ] ; then + debug 10 "Writing variable key/value to file ${file_path}" + echo "${var_name}=${!var_name}" >> "${file_path}" + else + color_echo red "Unable to find/open file: ${file_path}" + exit_on_fail + fi + else + debug 10 "${FUNCNAME[0]} no file_path specified, setting ${var_name} but not persisting" + fi + else + debug 10 "Variable ${var_name} is already set" + fi +} + +function manage_service { + # Ensure all arguments are passed in + local items=( ${@} ) + assert [ "${#items[@]}" -eq 2 ] + + # Set args into meaningful names + local service="${1}" + local action="${2}" + + # Disable paging when using systemd + if command -v systemd &> /dev/null; then + export SYSTEMD_PAGER='cat' + fi + + local commands=("/etc/init.d/${service} ${action}") # init.d + commands+=("/usr/sbin/service ${service} ${action}") # Old Redhat + commands+=("/sbin/service ${service} ${action}") # Old Debian + commands+=("/bin/systemctl ${action} ${service}") # Redhat systemd + commands+=("/usr/bin/systemctl ${action} ${service}") # Debian/other systemd + commands+=("${action} ${service}") # Upstart + + # Loop though each command + local command + for command in "${commands[@]}"; do + debug 10 "Checking command, '${command}', to determine if we can run it on this system" + + # Check if the path to the command exists + local path + path="$(echo "${command}" | cut -d' ' -f1)" + if [[ -e "${path}" ]]; then + debug 10 "Path to command found: '${path}'" + + # Run command + ${command} + return "${?}" + else + debug 10 "Path to command not found: '${path}'" + fi + done + + debug 10 'Exhausted init commands, try again with debug/verbosity for more information.' + return 1 +} + +tls_common_cert_attrib="${tls_common_cert_attrib:-/C=ZZ/ST=None/L=None/O=None/OU=None}" +tls_valid_days=${tls_valid_days:-3650} +tls_key_type="${tls_key_type:-rsa:4096}" + +# Creates a Certificate Authority if one does not exist in the CA cert path. +# Requires two arguments, paths to the key and certificate files. +# Optionally consumes COMMON_NAME variable and appends to CN attribute. +function tls_create_cert_authority { + assert whichs openssl + ca_key_path="${1}" + ca_cert_path="${2}" + common_name="${COMMON_NAME:-${HOSTNAME:-$(hostname --fqdn)}}" + assert test -n "${ca_cert_path}" + assert test -n "${ca_key_path}" + if ! [ -e "${ca_cert_path}" ] ; then + debug 8 "Creating CA: ${ca_cert_path} with key ${ca_key_path}" + openssl req -new -x509 -nodes -out "${ca_cert_path}" -keyout "${ca_key_path}" -subj "${tls_common_cert_attrib}/CN=${common_name}" -newkey "${tls_key_type}" -sha512 -days "${tls_valid_days}" + else + debug 8 "Certificate ${ca_cert_path} already exists, skipping!" + fi +} + +# Creates a new key/certificate pair and signs the certificate with a CA if a +# certificate does not already exist in the new cert path. +# Requires four arguments, new key path, new cert path, CA key path and CA cert +# path. Optionally consumes COMMON_NAME variable and appends to CN attribute +function tls_create_sign_cert { + assert whichs openssl + new_key_path="${1}" + new_cert_path="${2}" + ca_key_path="${3}" + ca_cert_path="${4}" + common_name="${COMMON_NAME:-${HOSTNAME:-$(hostname --fqdn)}}" + assert test -n "${new_cert_path}" + assert test -n "${new_key_path}" + assert test -n "${ca_cert_path}" && test -r "${ca_cert_path}" + assert test -n "${ca_key_path}" && test -r "${ca_key_path}" + if ! [ -e "${new_cert_path}" ] ; then + openssl req -new -keyout "${new_key_path}" -nodes -newkey "${tls_key_type}" -subj "${tls_common_cert_attrib}/CN=${common_name}" | \ + openssl x509 -req -CAkey "${ca_key_path}" -CA "${ca_cert_path}" -days "${tls_valid_days}" -set_serial "${RANDOM}" -sha512 -out "${new_cert_path}" + fi +} + +# Creates a self signed cert/key pair if a cert does not exist in the path. +# Requires two arguments, path to the key and cert to be created. +# Optionally consumes COMMON_NAME variable and appends to CN attribute. +function tls_create_self_signed_cert { + assert whichs openssl + new_key_path="${1}" + new_cert_path="${2}" + common_name="${COMMON_NAME:-${HOSTNAME:-$(hostname --fqdn)}}" + assert test -n "${new_cert_path}" + assert test -n "${new_key_path}" + openssl req -new -keyout "${new_key_path}" -nodes -newkey "${tls_key_type}" -subj "${tls_common_cert_attrib}/CN=${common_name}" -x509 -sha512 -days "${tls_valid_days}" -nodes -set_serial "${RANDOM}" -out "${new_cert_path}" +} + + + +alias "mantrap"='color_echo green "************,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,**********///****************************************///, .. .....**/////*,***//////////////////*/////////*** +> ,,,,,,,,,,,,,,,,,,,,,,,..,,,,,,,,,,********/////////////////////////////////////********************,,,**///////////////////,,**///////////////////////////*/// +> ,,,,,,************,,,,,,,,,,,,,...... .,*/**/*///////*//////////////////////////////******************,,,,**///////////////,,,**///////////////*//(////////// +> ,,,************************,. ...... ...,******//////////////////////**********///////********************,,*/////////////,,**///////////////////////////// +> /////////////////////////*,. ...... ....,*//////*////***************,,,,,,,,,,,,,*////*********************,,,*/////////*.,**//////////////////////////// +> //////////////////////*,,.. ............,*********/*********************,,,,,,,,,*******************************,**/////*/////////////////////////////// +> */////////////////*,,.... ........,,,*****//*******************,**,,,**,**************************************///////*//////////////(///////////// +> ***********//////,...... .....,,,,,,,,**/////************,,,,,(#######*/%%%%%%%%%%###%%#/%%%%#//**(#&@@@@@&%(**////////////#%%%%%%%%%%(///(///// +> **********//////,...... .....,,,,,,,,,,*********,,,,,,,,,,,,,%@@@@@@&,/@@@@@@@@@@@@@@@@(@@@@&/*(@@@@@@@@@@@@@(*////*//////&@@@@@@@@@@%///((///* +> ***************,......... . ...,,,,,,,,,,,,,,,,,,.. #@@@@@@&./@@@@@@@@@@@@@@@@/@@@@&*(@@@@@@@@@@@@@@@#***////////&@@@@@@@@@@&/((((((/* +> ,,,,,..... ......... .... ...,,,,,,,,,**,. #@@@@@@&.*###%&@@@@@@%####,#@@@(,%@@@@@@#*(@@@@@@&*****/////(@@@@@@@@@@@@(((((((/* +> *,,,,.,. .. .... .. . ...,,,,,,,,,,**, #@@@@@@&. ,%@@@@@@/ .&@@# %@@@@@@#*(@@@@@@&*******///#@@@@@@@@@@@@#((((((/* +> ****,,,........ .... ..,,,,,,,,,,**, . .*,. #@@@@@@&. ,%@@@@@@/ ./. #@@@@@@@/*///////********/(%@@@@@&%@@@@@%((((((/* +> ((*,,.,*,... ...... ..,,,,,,,,,,**, . .,. #@@@@@@&. ,%@@@@@@/ ,@@@@@@@@@**************(&@@@@@##@@@@@&((((((/* +> /(. ,,*(/*,. . ....... ..,,,,,,,,,,,*, . . #@@@@@@&. ,%@@@@@@/ ,&@@@@@@@@@@&(***********%@@@@@@((@@@@@@#(((((/* +> ./ .,.. ..... ....... ..... ..,,,,,,,,,,***, #@@@@@@&. ,%@@@@@@/ /%@@@@@@@@@@%**********&@@@@@@((&@@@@@#(((((/* +> ........ ...... ...... ..,,,,,,,,,****. .,,, #@@@@@@&. ,%@@@@@@/ *&@@@@@@@@&********/@@@@@@&//&@@@@@((((/* +> . .,.... . .. ... ..........,,,,**,,**,*.. ..,,,, #@@@@@@&. *%@@@@@@/ /%%%%%%/ #@@@@@@@/*******(@@@@@@@&&&@@@@@@%((((/* +> .. ,,......... ... . ........... .,,,,,.,,****. .,,,**, #@@@@@@&. ,%@@@@@@/ (@@@@@@( &@@@@@@#*******%@@@@@@@@@@@@@@@@&((((/* +> ,,. .,........... . . .............. ..,,,,.,,,,**. ...,*,. #@@@@@@&. ,%@@@@@@/ /@@@@@@( %@@@@@@#*******&@@@@@@@@@@@@@@@@@(((((/ +> ... .. . .... ..... . . .......,,,,,,..,* ...., #@@@@@@&. ,%@@@@@@/ ,@@@@@@&*(@@@@@@@,******/@@@@@@@&//(@@@@@@@#((((/ +> ... . ... ... ... .....,,,. .*, .. .,. #@@@@@@&. ,%@@@@@@/ /@@@@@@@@@@@@@@( ****/@@@@@@@%//(@@@@@@@%((((/ +> ..,.**, ... . .... ... ......,**/, .* .//,.. (&&&&&&%. ,#&&&&&&* *#@@@@@@@@&/ ***#@@@@@@@(//(&@@@@@@&((((( +> ..,**. .,...... .. .... .......,*//... .. .,.., ..... ***********////(((((((((((( +> . .... .. ...,. .. .... .,*.......... #@@@@@@@@@@@@@@@@# #@@@@@@@@@@@&%(. %@@@@@@@@@@@. ***#&@@@@@@@@@@@&%#((((((( +> ,(*. .,.(/*, .,,... #@@@@@@@@@@@@@@@@# #@@@@@@@@@@@@@@@%. .@@@@@@@@@@@@/ **#@@@@@@@@@@@@@@@@#((((( +> ,*////(/,. , . . #@@@@@@@@@@@@@@@@# #@@@@@@@%&@@@@@@@#, *@@@@@@@@@@@@% .*#@@@@@@@@&@@@@@@@@((((( +> .**********//,, .*,,. ...... .... ....*@@@@@@@%.... #@@@@@@@, ,@@@@@@&* (@@@@@@@@@@@@&. .#@@@@@@@@//&@@@@@@#(((( +> **************,,. ... .... ...... ,@@@@@@@% #@@@@@@@, .&@@@@@&/ %@@@@@@#@@@@@@, (&@@@@@@@//&@@@@@@#(((( +> ,,,***********,,. .........,,,,,,,, ,@@@@@@@% #@@@@@@@, ,@@@@@@%, .@@@@@@&*@@@@@@( (&@@@@@@@/*&@@@@@@#(((( +> ,,*,*****,,****,. . ...,,,***(* ,@@@@@@@# #@@@@@@@&@@@@@@@@/ ,@@@@@@%.@@@@@@% (&@@@@@@@/(@@@@@@@#(((( +> *,,,*****,,,****,. . .....,,,(((#(//***,. ,@@@@@@@# #@@@@@@@@@@@@@%* (@@@@@@( &@@@@@@. (&@@@@@@@@@@@@@@@&((((( +> *,,,*****,,,,,***,. ...,,*, .*/((((((((*. ,@@@@@@@# #@@@@@@@##&@@@@@@, %@@@@@@, %@@@@@@* (&@@@@@@@@@@@@@&%(((((( +> *,,,,,,,*,,,,,,,*,,. ..,,,, .*(##((((((((///,. ,@@@@@@@# #@@@@@@@, .@@@@@@%* .@@@@@@@, (@@@@@@# (&@@@@@@&/*******(((((( +> ,,,,,,,,**,,,,,,,***, . ..,, .*/(#((((((((((/*, ,@@@@@@@# #@@@@@@@, .@@@@@@&/ ,@@@@@@@@@@@@@@@@& (&@@@@@@@/*******/((((( +> ,,,,,,,,,,,,,,,,,,,***/*. . ,/((###(((((((((/ ,@@@@@@@% #@@@@@@@, .@@@@@@&/ (@@@@@@@@@@@@@@@@@, (&@@@@@@@/********((((( +> ,,,,,,,,,,,,,,,,,,,,,,,**/*,. .*/(((####((((((((((. ,@@@@@@@% #@@@@@@@, .@@@@@@&/ %@@@@@@@%%%@@@@@@@( (&@@@@@@@/********/(((( +> ,,,,,,,,,,,,,,,,,,,,,,,,,,,**********,,***////(((######((((((((((#, ,@@@@@@@% #@@@@@@@, .@@@@@@&/.@@@@@@@&. .@@@@@@@% (&@@@@@@@/*********(((( +> ,,,,,,,,,,,,,,,,,,,,,,,,,.,,,,,*******/////(((((#######((((((((((##. ,@@@@@@@# #@@@@@@@, .&@@@@@&/,@@@@@@@& .&@@@@@@&.(&@@@@@@@/**,,*****/((( +> ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,****///((((((#####((((((((((((###* .*******, ,*******. *******.,/(/****, *//*/***..******((/**,,,*****((( +> ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,******/////((((((#####(((((((((((((((( ..,,. . ,*****,,*****/(( +> ,,,,,,,,,,,,,,,,,,,,,,,,,,,**********//////(((((((#####(((((((((((((((. ..,. .,,,.. ,****,,,,****(( +> ..,,,,,,,,,,,,,,,,,,..,,,,,,***///////////////((((#####((((((((((((((#. . .... .,,,.. .****,,,,****(( +> .........,,,,,,,,,,,......,,,**////////////////((((((##(((((((((((((##. . ...,,,,,,.. ,***,,,,****/("' + + +# Unit tests +# +# Short tests should be placed in the test_shtdlib function, longer and more +# elaborated tests should be placed in their own functions and called from +# test_shtlib + +# Test function to decorate +function test_shopt_decorator { + shopt_decorator_option_name='pipefail' + shopt_decorator_option_value=true + # shellcheck disable=2015 + shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" + echo "${*}" + shopt -o pipefail + assert shopt -qo pipefail && color_echo green "Successfully decorated ${FUNCNAME[0]} with pipefail" +} + +# Test signaling +function test_signal_process { + signal_processor SIGUSR2 'exit 42' > /dev/null + local sub_pid_0="${!}" + signal_processor SIGUSR1 "sleep 2 && kill -s SIGUSR2 ${sub_pid_0} && exit 42" > /dev/null + local sub_pid_1="${!}" + debug 10 "Spawned sub processes using signal processor with pids: ${sub_pid_0} and ${sub_pid_1}" + debug 10 "Active sub processes are: $(pgrep -P ${$} | tr '\n' ' ')" + signal_process "${sub_pid_1}" SIGUSR1 > /dev/null + debug 10 "Waiting for sub processes to exit" + bash -c "sleep 10 && kill ${sub_pid_0} &> /dev/null" & + bash -c "sleep 10 && kill ${sub_pid_1} &> /dev/null" & + while pgrep -P ${$} > /dev/null ; do + debug 10 "Waiting for ${sub_pid_0}" + # Make sure the sub process exits with 42 + wait ${sub_pid_0} &> /dev/null || assert [ "${?}" == '42' ] + color_echo green "Sub process was signaled, responded and properly exited" + return 0 + done + color_echo red "Signaling and sub process test failed" + return 1 +} + +# Test filesystem monitoring/event triggers +# shellcheck disable=SC2120 +function test_add_on_mod { + shopt_decorator_option_name='errexit' + shopt_decorator_option_value='false' + # shellcheck disable=2015 + shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" + + if ! ( whichs inotifywait || whichs fswatch ) ; then + debug 4 "Unable to locate inotify or fswatch, trying to install them" + install_package inotify-tools fswatch + fi + + signal_processor SIGUSR1 'exit 42' > /dev/null + local signaler_pid="${!}" + local tmp_file_path + tmp_file_path="$(mktemp)" + add_on_exit "rm -f ${tmp_file_path}" + debug 10 "Using temporary file: ${tmp_file_path} to test add_on_mod" + max_frequency=5 add_on_mod "signal_process ${signaler_pid} SIGUSR1 &> /dev/null" "${tmp_file_path}" & + mod_watcher_pid="${!}" + bash -c "sleep 2 && echo 'test message' > '${tmp_file_path}'" + bash -c "sleep 10 && kill ${signaler_pid} &> /dev/null" & + while pgrep -P ${$} > /dev/null ; do + debug 10 "Waiting for PID ${signaler_pid} to exit" + wait "${signaler_pid}" &> /dev/null + return_status="${?}" + # Make sure the sub process exits with 42 + if [ "${return_status}" != '42' ] ; then + debug 1 "Got return status ${return_status} when waiting for ${signaler_pid} to exit" + exit_on_fail + fi + color_echo green "Sub process was signaled by file system monitoring thread, responded and properly exited" + debug 10 "Signaling mod_watcher ${mod_watcher_pid} to exit" + kill "${mod_watcher_pid}" + return 0 + done + color_echo red "Filesystem modification monitoring and trigger testing failed" + return 1 +} + +# Test function for create_secure_tmp function +function test_create_secure_tmp { + local tmp_file + local tmp_dir + + # Test 2 arguments + create_secure_tmp "tmp_file" "file" + create_secure_tmp "tmp_dir" "dir" + + assert [ -e "${tmp_file}" ] + assert [ "$(stat -c %a "${tmp_file}")" -eq 600 ] + echo 'test' > "${tmp_file}" + assert grep test "${tmp_file}" > /dev/null + + assert [ -e "${tmp_dir}" ] + assert [ "$(stat -c %a "${tmp_dir}")" -eq 700 ] + touch "${tmp_dir}/test" + assert [ -e "${tmp_dir}/test" ] + + # Test 3 arguments + create_secure_tmp "tmp_file2" "file" "${tmp_dir}" + create_secure_tmp "tmp_file3" "file" "/tmp/tmp_file3" + create_secure_tmp "tmp_dir2" "dir" "/tmp/tmp.new_dir" + + assert [ -e "${tmp_file}" ] + assert [ "$(stat -c %a "${tmp_file}")" -eq 600 ] + echo 'test' > "${tmp_file}" + assert grep test "${tmp_file}" > /dev/null + + assert [ -e "${tmp_file}" ] + assert [ "$(stat -c %a "${tmp_file}")" -eq 600 ] + echo 'test' > "${tmp_file}" + assert grep test "${tmp_file}" > /dev/null + + assert [ -e "${tmp_dir}" ] + assert [ "$(stat -c %a "${tmp_dir}")" -eq 700 ] + + color_echo green 'Temporary files and directories successfully created and tested' + return 0 +} + +# Primary Unit Test Function +# Defaults to testing all bash versions in containers, any/all arguments are +# assumed to be container image names (bash versions) to test with. +# Also supports "local" which will test without using containers. +function test_shtdlib { + export verbosity=11 + # Run this function inside bash containers as/if specified + if in_array 'local' "${@:-}" ; then + if [ "${#}" -ne 1 ] ; then + supported_bash_versions=( "${@/local}" ) + test_decorator "${FUNCNAME[0]}" + fi + else + supported_bash_versions=( "${@:-}" ) + test_decorator "${FUNCNAME[0]}" && return + fi + + color_echo green "Testing shtdlib functions" + + # Show some basic system stats + color_echo cyan "OS Family is: ${os_family}" + color_echo cyan "OS Type is: ${os_type}" + color_echo cyan "OS Name is: ${os_name}" + color_echo cyan "OS version is (major.minor.patch): ${major_version}.${minor_version}.${patch_version}" + color_echo cyan "Local IPs are:" + for ip in ${local_ip_addresses} ; do + color_echo cyan "${ip}" + done + + # Test color output + color_echo cyan "Testing echo colors:" + color_echo black "Black" + color_echo red "Red" + color_echo green "Green" + color_echo yellow "Yellow" + color_echo blue "Blue" + color_echo magenta "Magenta" + color_echo cyan "Cyan" + color_echo blank "Blank" + + # Test decorators + # shellcheck disable=2015 + shopt -uo pipefail && test_shopt_decorator 'Hello World' || exit_on_fail + + # Test whichs command + whichs command && color_echo green "whichs found the command 'command'" + + # Test assert command and make some basic assertions + assert true && color_echo green "asserted 'true' is true" + assert whichs ls + assert [ 0 -eq 0 ] + + # Test array inclusion, argument counting and empty check + declare -a shtdlib_test_array + shtdlib_test_array=(a b c d e f g) + # shellcheck disable=SC1117 + assert in_array 'a' "${shtdlib_test_array[@]}" && color_echo cyan "'a' is in '${shtdlib_test_array[*]}'" + assert [ "$(count_array_elements shtdlib_test_array)" == 7 ] && color_echo green "Found 7 elements in test array" + declare -a shtdlib_empty_array + assert empty_array shtdlib_empty_array + + # Test verbosity and debug logging + orig_verbosity="${verbosity:-1}" + verbosity=1 && color_echo green 'Verbosity set to 1 (should see debug up to 1)' + for ((i=1; i <= 11 ; i++)) ; do + debug ${i} "Debug Level ${i}" + done + verbosity=10 && color_echo green 'Verbosity set to 10 (should see debug up to 10)' + for ((i=1; i <= 11 ; i++)) ; do + debug ${i} "Debug Level ${i}" + done + verbosity="${orig_verbosity}" + + # Test finalizing paths + shtdlib_test_variable='/home/test' + finalize_path shtdlib_test_variable > /dev/null + finalize_path '~' > /dev/null + finalize_path './' > /dev/null + finalize_path '$HOME/test' > /dev/null + + # Test stripping path and exptension from a path + assert [ "$(basename_s /tmp/example.file)" == 'example' ] && color_echo green 'Tested basename_s correctly stripped path and extension from a path' + + # Test counting arguments + assert [ "$(count_arguments 1 2 3 4)" == 4 ] && color_echo green 'Tested count_arguments with 4 args' + + # Test platform neutral readlink -m/_m implementation + tmp_file_path="$(mktemp)" + tmp_symlink_dir="$(mktemp -d)" + tmp_file_name="$(basename "${tmp_file_path}")" + ln -s "${tmp_file_path}" "${tmp_symlink_dir}/${tmp_file_name}" + assert [ "$(readlink_m "${tmp_symlink_dir}/${tmp_file_name}")" == "${tmp_file_path}" ] && color_echo green "Sucessfully determined symlink target with readlink_m" + + # Test safe loading of config parameters + tmp_file="$(mktemp)" + add_on_sig "rm -f ${tmp_file}" + test_key='TEST_KEY' + test_value='test value moretest -f /somepath ./morepath \/ping ${}$() -- __' + echo "${test_key}=${test_value}" > "${tmp_file}" + load_config "${tmp_file}" 'TEST_KEY' + # shellcheck disable=SC2153 + test "'${TEST_KEY}'" == "'${test_value}'" || exit_on_fail + + # Test version sort + sorted_string="$(version_sort '1 0 2.3.2 3.3.3 1.1.1 0.0.1 2m 2.2.2m 4.4a')" + assert [ "${sorted_string//[$'\t\r\n ']/ }" == '0 0.0.1 1 1.1.1 2m 2.2.2m 2.3.2 3.3.3 4.4a' ] && color_echo green "Successfully tested version sort" + + # Test version comparison + assert compare_versions '1.1.1 1.2.2test' + assert [ "$(compare_versions '1.2.2 1.1.1'; echo "${?}")" == '1' ] + assert compare_versions '1.0.0 1.1.1 2.2.2' + assert [ "$(compare_versions '4.0.0 3.0.0 2.0.0 1.1.1test 1.0.0' ; echo "${?}" )" == '4' ] + + # Test process signaling + test_signal_process + + # Test filesystem object activity triggers + # shellcheck disable=SC2119 + test_add_on_mod + + # Test resolving domain names (IPv4) + assert [ "$(resolve_domain_name example.com | grep -v '.*:.*:.*:.*:.*:.*:.*:.*')" == '93.184.216.34' ] + + test_create_secure_tmp + + # Test version increment + new_version=$(version_increment 12323.3.2) + assert [ "${new_version}" == '12323.3.3' ] +} + +# Test bash version +if "${bash_pre_v4}" ; then + debug 9 "Detected bash version ${BASH_VERSION}, for optimal results we suggest using bash V4 or later" +fi -- GitLab From 79bd5b6938c1bd8fcc0886eb0cd549cb14b4adb8 Mon Sep 17 00:00:00 2001 From: Al Fontaine Date: Wed, 8 Jul 2020 21:04:35 +0000 Subject: [PATCH 07/27] Update Dockerfile --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index f386995..3dbe104 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,3 @@ - ARG BASE_REGISTRY=nexus-docker-secure.levelup-nexus.svc.cluster.local:18082 ARG BASE_IMAGE=opensource/nginx/nginx ARG BASE_TAG=1.19.0 @@ -18,6 +17,8 @@ LABEL name="SD Elements Just In Time Training (JITT) Container" \ ENV VENDOR=security-compass +USER root + RUN set -x \ && dnf -y upgrade \ && dnf -y install iproute gettext-libs procps-ng \ -- GitLab From 668157ce5ddd723d1db6a61376665319ad9e0271 Mon Sep 17 00:00:00 2001 From: Al Fontaine Date: Wed, 8 Jul 2020 22:25:46 +0000 Subject: [PATCH 08/27] Update Dockerfile --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 3dbe104..06c8670 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,9 +33,9 @@ RUN set -x \ && rm -f /var/log/nginx/access.log \ && rm -f /var/log/nginx/error.log -ADD /scripts/rtenvsub.sh /bin/rtenvsub.sh -ADD /scripts/shtdlib.sh /bin/shtdlib.sh -ADD /jitt-${jitt_version}.tar.gz /jitt +COPY /scripts/rtenvsub.sh /bin/rtenvsub.sh +COPY /scripts/shtdlib.sh /bin/shtdlib.sh +COPY /jitt-${jitt_version}.tar.gz /jitt COPY /scripts/run_nginx.sh /bin/run_nginx.sh USER nginx -- GitLab From 549b6ebb24a8c442ebcfc6eaaac6cbf51c941d49 Mon Sep 17 00:00:00 2001 From: Al Fontaine Date: Wed, 8 Jul 2020 22:26:24 +0000 Subject: [PATCH 09/27] Update Dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 06c8670..b09f67f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ ARG BASE_TAG=1.19.0 FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} # Default to local build context files -ARG jitt_version +ARG jitt_version=5.5.23 LABEL name="SD Elements Just In Time Training (JITT) Container" \ maintainer="devops-support@securitycompass.com" \ -- GitLab From f1a8ee713b95f6265bd6182d8d6a8843cbdb8a12 Mon Sep 17 00:00:00 2001 From: Matthew Chum Date: Thu, 9 Jul 2020 10:01:35 -0400 Subject: [PATCH 10/27] add2copy --- Dockerfile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index b09f67f..c3c2f38 100644 --- a/Dockerfile +++ b/Dockerfile @@ -35,9 +35,13 @@ RUN set -x \ COPY /scripts/rtenvsub.sh /bin/rtenvsub.sh COPY /scripts/shtdlib.sh /bin/shtdlib.sh -COPY /jitt-${jitt_version}.tar.gz /jitt +COPY /jitt-${jitt_version}.tar.gz /jitt-${jitt_version}.tar.gz COPY /scripts/run_nginx.sh /bin/run_nginx.sh +RUN mkdir -p /jitt \ + && tar -zxf "/jitt-${jitt_version}.tar.gz" -C /jitt \ + && rm -fv "/jitt-${jitt_version}.tar.gz" + USER nginx HEALTHCHECK --interval=15s --timeout=10s --retries=3 CMD pgrep -lf nginx || exit 1 -- GitLab From 7a9a7709be414d22bf430f3c00fa900692735ad4 Mon Sep 17 00:00:00 2001 From: mchum Date: Thu, 9 Jul 2020 18:35:05 +0000 Subject: [PATCH 11/27] Fix/container issues --- Dockerfile | 2 +- download.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index c3c2f38..b62190b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ ARG BASE_TAG=1.19.0 FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} # Default to local build context files -ARG jitt_version=5.5.23 +ARG jitt_version=5.6.33 LABEL name="SD Elements Just In Time Training (JITT) Container" \ maintainer="devops-support@securitycompass.com" \ diff --git a/download.yaml b/download.yaml index f82cc63..09998cc 100644 --- a/download.yaml +++ b/download.yaml @@ -1,6 +1,6 @@ resources: - - url: "https://tar.sdelements.com/pulp/isos/Default_Organization/Library/custom/sde/SDElements_Dependency_RPMs/jitt-5.5.23.tar.gz" - filename: "jitt-5.5.23.tar.gz" + - url: "https://tar.sdelements.com/pulp/isos/Default_Organization/Library/custom/sde/SDElements_Dependency_RPMs/jitt-5.6.33.tar.gz" + filename: "jitt-5.6.33.tar.gz" validation: type: sha256 value: "472ad942998b0a444e51637ccf8bda039c475ee4f0bccc714bd620485bb2d631" -- GitLab From 55038379f8f58a68400b1f88a7b0c86444dffb01 Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Mon, 20 Jul 2020 15:26:09 -0400 Subject: [PATCH 12/27] Update local build comand --- README.md | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index be0f233..1b80451 100644 --- a/README.md +++ b/README.md @@ -7,21 +7,28 @@ This container hosts SDElements Just In Time Training (JITT) content using Nginx ## Local build -Download NGINX version 1.19.0 as a tarball from `https://dcar.dsop.io/repomap/opensource/nginx/nginx` -Follow the instructions under "Downloading and Running the image". For this example, we will use `nginx-1.19.0.tar` as the file downloaded. +1. Download NGINX version 1.19.0 as a tarball from `https://dcar.dsop.io/repomap/opensource/nginx/nginx`. For this example, we will use `nginx-1.19.0.tar` as the file downloaded. -Load the tarball into docker + Load the tarball into docker -```bash -docker load -i nginx-1.19.0.tar -``` + ```bash + docker load -i nginx-1.19.0.tar + ``` -Use this command to build locally: +2. Download the memcached tarball defined in `download.yaml`. The URL below is used as an example. Note +the version of memcached, in this case `5.6.33` -```bash -docker build . -t ubi_nginx-jitt \ - --build-arg BASE_REGISTRY="nexus-docker-secure.levelup-nexus.svc.cluster.local:18082" \ - --build-arg BASE_IMAGE="opensource/nginx/nginx" \ - --build-arg BASE_TAG="1.19.0" \ - --build-arg jitt_version="5.5.23" -``` + ```bash + wget --http-user=user --ask-password https://tar.sdelements.com/pulp/isos/Default_Organization/Library/custom/sde/SDElements_Dependency_RPMs/jitt-5.6.33.tar.gz + ``` + + +3. Use this command to build locally: + + ```bash + docker build . -t ubi_nginx-jitt:5.6.33 \ + --build-arg BASE_REGISTRY="localhost" \ + --build-arg BASE_IMAGE="opensource/nginx/nginx" \ + --build-arg BASE_TAG="1.19.0" \ + --build-arg jitt_version="5.6.33" + ``` -- GitLab From f21c5a8879a9269ad621c8e5dfd25012642a2705 Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Wed, 22 Jul 2020 15:57:23 -0400 Subject: [PATCH 13/27] Run container as root --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index b62190b..f8cecb2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -42,6 +42,6 @@ RUN mkdir -p /jitt \ && tar -zxf "/jitt-${jitt_version}.tar.gz" -C /jitt \ && rm -fv "/jitt-${jitt_version}.tar.gz" -USER nginx +# USER nginx HEALTHCHECK --interval=15s --timeout=10s --retries=3 CMD pgrep -lf nginx || exit 1 -- GitLab From 4f4f7407da7eeebe495b9f34e7a314c2657f2d62 Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Wed, 22 Jul 2020 15:57:25 -0400 Subject: [PATCH 14/27] Remove import code --- scripts/rtenvsub.sh | 76 ++------------------------------------------ scripts/run_nginx.sh | 60 ++-------------------------------- 2 files changed, 4 insertions(+), 132 deletions(-) mode change 100644 => 100755 scripts/rtenvsub.sh diff --git a/scripts/rtenvsub.sh b/scripts/rtenvsub.sh old mode 100644 new mode 100755 index e6f640f..5d65484 --- a/scripts/rtenvsub.sh +++ b/scripts/rtenvsub.sh @@ -21,82 +21,10 @@ version='0.1' umask 0077 dev_mode="${DEV_MODE:-false}" -default_library_name='shtdlib.sh' -default_base_download_url='https://raw.githubusercontent.com/sdelements/shtdlib/master' -default_install_path='/usr/local/bin' - -# Temporary debug function -type -t import | grep -q '^function$' || function debug { echo "${@:2}" ; } - -# Import or source -function import_or_source { - if type -t import | grep -q '^function$' ; then - debug 10 "Importing ${1}" - import "${1}" - else - debug 10 "Sourcing ${1}" - # shellcheck disable=1090 - source "${1}" - fi -} - -# Library download function, optionally accepts a full path/name and URL -function download_lib { - local tmp_path="${1:-$(mktemp)}" - local lib_url="${2:-${default_base_download_url}/${default_library_name}}" - curl -s -l -o "${tmp_path}" "${lib_url}" || wget --no-verbose "${lib_url}" --output-document "${tmp_path}" || return 1 -} - -# Library install function, optionally accepts a URL and a full path/name -# shellcheck disable=SC2120,SC2119 -function install_lib { - local lib_path="${1:-${default_install_path}/${default_library_name}}" - local lib_name="${2:-$(basename "${lib_path}")}" - local tmp_path="${3:-$(mktemp)}" - - echo "Installing library ${lib_name} to ${lib_path}" - download_lib "${tmp_path}" "${default_base_download_url}/${lib_name}" - mv "${tmp_path}" "${lib_path}" || sudo mv "${tmp_path}" "${lib_path}" || return 1 - chmod 755 "${lib_path}" || sudo chmod 755 "${lib_path}" || return 1 - import_or_source "${lib_path}" - color_echo green "Installed ${lib_name} to ${lib_path} successfully" -} - -# Library import function, accepts one optional parameter, name of the file to import -# shellcheck disable=SC2120,SC2119 -function import_lib { - local full_path - local lib_name="${1:-${default_library_name}}" - local lib_no_ext="${lib_name%.*}" - local lib_basename_s="${lib_no_ext##*/}" - full_path="$(readlink -f "${BASH_SOURCE[0]}" 2> /dev/null || realpath "${BASH_SOURCE[0]}" 2> /dev/null || greadlink -f "${BASH_SOURCE[0]}" 2> /dev/null || true)" - full_path="${full_path:-${0}}" - # Search current dir and walk down to see if we can find the library in a - # parent directory or sub directories of parent directories named lib/bin - while true; do - local pref_pattern=( "${full_path}/${lib_name}" "${full_path}/${lib_basename_s}/${lib_name}" "${full_path}/lib/${lib_name}" "${full_path}/bin/${lib_name}" ) - for pref_lib in "${pref_pattern[@]}" ; do - if [ -e "${pref_lib}" ] ; then - debug 10 "Found ${pref_lib}, attempting to import/source" - import_or_source "${pref_lib}" && return 0 - echo "Unable to import/source ${pref_lib}!" - fi - done - full_path="$(dirname "${full_path}")" - if [ "${full_path}" == '/' ] ; then - # If we haven't found the library try the PATH or install if needed - debug 10 "Attempting to import/source ${lib_name}" - import_or_source "${lib_name}" 2> /dev/null || install_lib "${default_install_path}/${lib_name}" "${lib_name}" && return 0 - # If nothing works then we fail - echo "Unable to import ${lib_name}" - return 1 - fi - done -} # Import the shell standard library -# shellcheck disable=SC2119 -import_lib +source /bin/shtdlib.sh + debug 10 "Running ${0} with PID: ${$}" diff --git a/scripts/run_nginx.sh b/scripts/run_nginx.sh index 3a09020..b2bcce2 100755 --- a/scripts/run_nginx.sh +++ b/scripts/run_nginx.sh @@ -24,65 +24,9 @@ version='0.0.1' # Set verbose logging for shell script #export verbosity=10 -default_library_name='shtdlib.sh' -default_base_download_url='https://raw.githubusercontent.com/sdelements/shtdlib/master' -default_install_path='/usr/local/bin' - -# Library download function, optionally accepts a full path/name and URL -function download_lib { - tmp_path="${1:-$(mktemp)}" - lib_url="${2:-${default_base_download_url}/${default_library_name}}" - curl -s -l -o "${tmp_path}" "${lib_url}" || wget --no-verbose "${lib_url}" --output-document "${tmp_path}" || return 1 -} - -# Library install function, optionally accepts a URL and a full path/name -# shellcheck disable=SC2120,SC2119 -function install_lib { - lib_path="${1:-${default_install_path}/${default_library_name}}" - lib_name="${2:-$(basename "${lib_path}")}" - tmp_path="${3:-$(mktemp)}" - - echo "Installing library ${lib_name} to ${lib_path}" - download_lib "${tmp_path}" "${default_base_download_url}/${lib_name}" - mv "${tmp_path}" "${lib_path}" || sudo mv "${tmp_path}" "${lib_path}" || return 1 - chmod 755 "${lib_path}" || sudo chmod 755 "${lib_path}" || return 1 - # shellcheck disable=SC1091,SC1090 - source "${lib_path}m" - color_echo green "Installed ${lib_name} to ${lib_path} successfully" -} - -# Library import function, accepts one optional parameter, name of the file to import -# shellcheck disable=SC2120,SC2119 -function import_lib { - lib_name="${1:-${default_library_name}}" - full_path="$(readlink -f "${BASH_SOURCE[0]}" 2> /dev/null || realpath "${BASH_SOURCE[0]}" 2> /dev/null || greadlink -f "${BASH_SOURCE[1]}" 2> /dev/null:-"${0}")" - # Search current dir and walk down to see if we can find the library in a - # parent directory or sub directories of parent directories named lib/bin - while true; do - pref_pattern=( "${full_path}/${lib_name}" "${full_path}/$(basename -s .sh "${lib_name}")/${lib_name}" "${full_path}/lib/${lib_name}" "${full_path}/bin/${lib_name}" ) - for pref_lib in "${pref_pattern[@]}" ; do - if [ -e "${pref_lib}" ] ; then - echo "Importing ${pref_lib}" - # shellcheck disable=SC1091,SC1090 - source "${pref_lib}" - return 0 - fi - done - full_path="$(dirname "${full_path}")" - if [ "${full_path}" == '/' ] ; then - # If we haven't found the library try the PATH or install if needed - # shellcheck disable=SC1091,SC1090 - source "${lib_name}" 2> /dev/null || install_lib "${default_install_path}/${lib_name}" "${lib_name}" && return 0 - # If nothing works then we fail - echo "Unable to import ${lib_name}" - return 1 - fi - done -} - # Import the shell standard library -# shellcheck disable=SC2119 -import_lib +source /bin/shtdlib.sh + # Dynamically figure add resolvers for nginx export NAMESERVERS="resolver $(grep nameserver /etc/resolv.conf | awk '{print $2}') 127.0.0.11 valid=10s;" -- GitLab From fef50ad0c60bd2988b127c699253f21c5863b671 Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Wed, 22 Jul 2020 18:05:15 -0400 Subject: [PATCH 15/27] Reduce the size of the container, remove unused lines --- Dockerfile | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index f8cecb2..d78f3a8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,6 +2,17 @@ ARG BASE_REGISTRY=nexus-docker-secure.levelup-nexus.svc.cluster.local:18082 ARG BASE_IMAGE=opensource/nginx/nginx ARG BASE_TAG=1.19.0 + +# Down with the bloat +FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} as extractor + +COPY /jitt-*.tar.gz / + +USER root +RUN mkdir -p /jitt \ + && tar -zxf /jitt-*.tar.gz -C /jitt + + FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} # Default to local build context files @@ -33,15 +44,10 @@ RUN set -x \ && rm -f /var/log/nginx/access.log \ && rm -f /var/log/nginx/error.log +COPY --from=extractor /jitt / COPY /scripts/rtenvsub.sh /bin/rtenvsub.sh COPY /scripts/shtdlib.sh /bin/shtdlib.sh -COPY /jitt-${jitt_version}.tar.gz /jitt-${jitt_version}.tar.gz COPY /scripts/run_nginx.sh /bin/run_nginx.sh -RUN mkdir -p /jitt \ - && tar -zxf "/jitt-${jitt_version}.tar.gz" -C /jitt \ - && rm -fv "/jitt-${jitt_version}.tar.gz" - -# USER nginx HEALTHCHECK --interval=15s --timeout=10s --retries=3 CMD pgrep -lf nginx || exit 1 -- GitLab From ed67babe3fedd055aca6f551343cfae85dc9dc9e Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Thu, 23 Jul 2020 09:07:51 -0400 Subject: [PATCH 16/27] Copy files into correct path --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d78f3a8..72a69b0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -44,7 +44,7 @@ RUN set -x \ && rm -f /var/log/nginx/access.log \ && rm -f /var/log/nginx/error.log -COPY --from=extractor /jitt / +COPY --from=extractor /jitt /jitt/ COPY /scripts/rtenvsub.sh /bin/rtenvsub.sh COPY /scripts/shtdlib.sh /bin/shtdlib.sh COPY /scripts/run_nginx.sh /bin/run_nginx.sh -- GitLab From 0a010b949d22afaf97b5ad425fed06532db9719c Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Thu, 23 Jul 2020 10:26:11 -0400 Subject: [PATCH 17/27] Be specific about which tarball to use --- Dockerfile | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index 72a69b0..9c0581e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,18 +6,18 @@ ARG BASE_TAG=1.19.0 # Down with the bloat FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} as extractor -COPY /jitt-*.tar.gz / +ARG jitt_version=5.6.33 +COPY /jitt-${jitt_version}.tar.gz / USER root RUN mkdir -p /jitt \ - && tar -zxf /jitt-*.tar.gz -C /jitt + && tar -zxf /jitt-${jitt_version}.tar.gz -C /jitt FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} -# Default to local build context files ARG jitt_version=5.6.33 - +ENV VENDOR=security-compass LABEL name="SD Elements Just In Time Training (JITT) Container" \ maintainer="devops-support@securitycompass.com" \ vendor="Security Compass Ltd." \ @@ -26,7 +26,6 @@ LABEL name="SD Elements Just In Time Training (JITT) Container" \ summary="SD Elements Automatically Builds In And Enables Compliance Throughout The Software Lifecycle." \ description="SD Elements automatically identifies and classifies risks and translates complex requirements into actionable tasks that are assigned to your personnel to improve your security posture. It automates Risk Assessments, Threat Modeling, Secure Development, and Regulatory Compliance - at scale." -ENV VENDOR=security-compass USER root -- GitLab From fba3bc52e7aabaa4b8028ae2ae05a36242ef17ae Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Thu, 23 Jul 2020 16:12:10 -0400 Subject: [PATCH 18/27] Minor readme update --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1b80451..b089994 100644 --- a/README.md +++ b/README.md @@ -26,9 +26,10 @@ the version of memcached, in this case `5.6.33` 3. Use this command to build locally: ```bash - docker build . -t ubi_nginx-jitt:5.6.33 \ + export jitt_version='5.6.33' && \ + docker build . -t localhost/security-compass/jitt/nginx-jitt:"${jitt_version}_local" \ --build-arg BASE_REGISTRY="localhost" \ --build-arg BASE_IMAGE="opensource/nginx/nginx" \ --build-arg BASE_TAG="1.19.0" \ - --build-arg jitt_version="5.6.33" + --build-arg jitt_version="${jitt_version}" ``` -- GitLab From 9e8186a7b5565f147fa976beb4e4578f422eaa8c Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Mon, 27 Jul 2020 13:24:37 -0400 Subject: [PATCH 19/27] Minor readme update --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b089994..2050b02 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ the version of memcached, in this case `5.6.33` ```bash export jitt_version='5.6.33' && \ - docker build . -t localhost/security-compass/jitt/nginx-jitt:"${jitt_version}_local" \ + docker build . -t localhost/security-compass/jitt/nginx-jitt:"local" \ --build-arg BASE_REGISTRY="localhost" \ --build-arg BASE_IMAGE="opensource/nginx/nginx" \ --build-arg BASE_TAG="1.19.0" \ -- GitLab From 2c8199e42a8ba58fef7e5ada1ef22f66fb5a3039 Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Fri, 11 Sep 2020 13:33:38 -0400 Subject: [PATCH 20/27] Trim down shtdlib --- Dockerfile | 4 +- scripts/rtenvsub.sh | 179 +-- scripts/run_nginx.sh | 14 +- scripts/shtdlib.sh | 3050 ------------------------------------- scripts/shtdlib_dccscr.sh | 153 ++ 5 files changed, 199 insertions(+), 3201 deletions(-) delete mode 100644 scripts/shtdlib.sh create mode 100644 scripts/shtdlib_dccscr.sh diff --git a/Dockerfile b/Dockerfile index 9c0581e..91671fd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -31,8 +31,6 @@ USER root RUN set -x \ && dnf -y upgrade \ - && dnf -y install iproute gettext-libs procps-ng \ - && dnf -y install nss_wrapper gettext \ && dnf clean all \ && mkdir -p /var/nginx/proxy_temp \ && mkdir -p /var/nginx/client_body_temp \ @@ -45,7 +43,7 @@ RUN set -x \ COPY --from=extractor /jitt /jitt/ COPY /scripts/rtenvsub.sh /bin/rtenvsub.sh -COPY /scripts/shtdlib.sh /bin/shtdlib.sh +COPY /scripts/shtdlib_dccscr.sh /bin/shtdlib_dccscr.sh COPY /scripts/run_nginx.sh /bin/run_nginx.sh diff --git a/scripts/rtenvsub.sh b/scripts/rtenvsub.sh index 5d65484..1f1c0cd 100755 --- a/scripts/rtenvsub.sh +++ b/scripts/rtenvsub.sh @@ -15,27 +15,25 @@ # from SD Elements Inc.. # Version -version='0.1' +version='0.2' # Set a safe umask umask 0077 -dev_mode="${DEV_MODE:-false}" - # Import the shell standard library -source /bin/shtdlib.sh +source /bin/shtdlib_dccscr.sh -debug 10 "Running ${0} with PID: ${$}" +echo "Running ${0} with PID: ${$}" if ! whichs envsubst ; then - color_echo red "Unable to locate envsubst command, please make sure it's available" - color_echo cyan 'Perhaps this can be fixed with: apt-get -y install gettext-base' + echo "Unable to locate envsubst command, please make sure it's available" + echo 'Perhaps this can be fixed with: apt-get -y install gettext-base' exit 1 fi -if ! whichs inotifywait && ${dev_mode} ; then - color_echo red "Unable to locate the inotifywait command, please make sure it's available" - color_echo cyan 'Perhaps this can be fixed with: apt-get install inotify-tools' +if ! whichs inotifywait ; then + echo "Unable to locate the inotifywait command, please make sure it's available" + echo 'Perhaps this can be fixed with: apt-get install inotify-tools' exit 1 fi @@ -72,7 +70,6 @@ OPTIONS: -h, --help Show this message -d, --daemon Daemonize, run in the background -v, --verbose {verbosity_level} Set verbose mode (optionally accepts a integer level) - -t, --test Run unit tests Examples: ${0} /etc/nginx /usr/share/doc/nginx # Recursively map all files and directories from /usr/share/doc/nginx to /etc/nginx @@ -87,63 +84,60 @@ EOF parameter_array=( "${@}" ) # Parse command line arguments function parse_arguments { - debug 5 "Parse Arguments got argument: ${1}" + echo "Parse Arguments got argument: ${1}" case ${1} in '-') # This uses the parse_arguments logic to parse a tag and it's value # The value is passed on in the OPTARG variable which is standard # when parsing arguments with optarg. tag="${OPTARG}" - debug 10 "Found long argument/option" + echo "Found long argument/option" parse_opt_arg OPTARG '' parse_arguments "${tag}" ;; 'p'|'process') export process="${OPTARG}" - debug 5 "Set process name to signal to: ${process}" + echo "Set process name to signal to: ${process}" ;; 's'|'signal') export signal="${OPTARG}" - debug 5 "Set signal to: ${signal}" + echo "Set signal to: ${signal}" ;; 'o'|'overlay') overlay='true' - debug 5 "Overlay enabled!" + echo "Overlay enabled!" ;; 'n'|'nofifo') nofifo='true' - debug 5 "Named pipes disabled, using files instead!" + echo "Named pipes disabled, using files instead!" ;; 'd'|'daemon') daemonize='true' - debug 5 "Daemon mode selected!" + echo "Daemon mode selected!" ;; 'v'|'verbose') parse_opt_arg verbosity '10' export verbose=true # shellcheck disable=SC2154 - debug 1 "Set verbosity to: ${verbosity}" - debug 1 "Set verbose to: ${verbose}" + echo "Set verbosity to: ${verbosity}" + echo "Set verbose to: ${verbose}" ;; 'h'|'help'|'version') # Help print_usage exit 0 ;; - 't'|'test') # Unit tests - run_unit_tests='true' - ;; '?') # Invalid option specified - color_echo red "Invalid option '${OPTARG}'" + echo "Invalid option '${OPTARG}'" print_usage exit 64 ;; ':') # Expecting an argument but none provided - color_echo red "Missing option argument for option '${OPTARG}'" + echo "Missing option argument for option '${OPTARG}'" print_usage exit 64 ;; '*') # Anything else - color_echo red "Unknown error while processing options" + echo "Unknown error while processing options" print_usage exit 64 ;; @@ -165,17 +159,16 @@ for (( index=${#@}-1 ; index>=0 ; index-- )) ; do break fi done -debug 10 "Non-argument parameters:" "${non_argument_parameters[*]:-}" +echo "Non-argument parameters:" "${non_argument_parameters[*]:-}" -export run_unit_tests="${run_unit_tests:-false}" export signal="${signal:-SIGHUP}" export process="${process:-}" export overlay="${overlay:-false}" export daemonize="${daemonize:-false}" export nofifo="${nofifo:-false}" -if [ "${#@}" -lt 2 ] && ! "${run_unit_tests}" ; then - color_echo red "You need to supply at least one source dir/file and a destination directory" +if [ "${#@}" -lt 2 ] ; then + echo "You need to supply at least one source dir/file and a destination directory" print_usage exit 64 fi @@ -185,7 +178,7 @@ function setup_named_pipe { local destination="${1}" local file="${2}" local path="${3}" - debug 10 "Creating named pipe: ${destination}/${file#${path}} with permissions identical to ${file}" + echo "Creating named pipe: ${destination}/${file#${path}} with permissions identical to ${file}" # Create a named pipe for each file with same permissions, then # set up an inotifywait process to monitor and trigger envsubst mkfifo -m "$(stat -c '%a' "${file}")" "${destination}/${file#${path}}" @@ -201,7 +194,7 @@ function render_file { local destination="${1}" local file="${2}" local path="${3}" - debug 10 "Rendering file: ${destination}/${file#${path}} from template: ${file}" + echo "Rendering file: ${destination}/${file#${path}} from template: ${file}" envsubst < "${file}" > "${destination}/${file#${path}}" "$(compgen -v | sed -e 's/^/\$/g' | tr '\n' ',')" } @@ -211,7 +204,7 @@ function create_directory_structure { local destination="${1}" local dir="${2}" local path="${3}" - debug 10 "Creating directory ${destination}/${dir#${path}} with permissions identical to ${dir}" + echo "Creating directory ${destination}/${dir#${path}} with permissions identical to ${dir}" # Create each directory in the mirror with same permissions mkdir -m "$(stat -c '%a' "${dir}")" -p "${destination}/${dir#${path}}" } @@ -231,11 +224,11 @@ function inotify_looper { for event in "${dir_file_events[@]:2}"; do case "${event}" in 'ACCESS'|'CLOSE_NOWRITE'|'OPEN') #Non events - color_echo red "Non mutable event on: ${dir_file_events[*]}, this should not happen since we don't subscribe to these" + echo "Non mutable event on: ${dir_file_events[*]}, this should not happen since we don't subscribe to these" exit 1 ;; 'MODIFY'|'CLOSE_WRITE') # File modified events - debug 6 "File modification event on: ${dir_file_events[*]}" + echo "File modification event on: ${dir_file_events[*]}" if ${nofifo} ; then render_file "${destination}" "${dir_file_events[0]}/${dir_file_events[1]}" "${full_path}" fi @@ -244,7 +237,7 @@ function inotify_looper { fi ;; 'MOVED_TO'|'CREATE') # New file events - debug 6 "New file event on: ${dir_file_events[*]} ${event}" + echo "New file event on: ${dir_file_events[*]} ${event}" create_directory_structure "${destination}" "${dir_file_events[0]}" "${full_path}" if ${nofifo} ; then render_file "${destination}" "${dir_file_events[0]}/${dir_file_events[1]}" "${full_path}" @@ -258,8 +251,8 @@ function inotify_looper { 'MOVED_FROM'|'DELETE'|'MOVE_SELF') # File/Directory deletion events fs_object="${dir_file_events[0]}/${dir_file_events[1]}" mirror_object="${destination}/${fs_object#${full_path}}" - debug 5 "Filesystem object removed from source, removing from mirror" - debug 5 "Source: ${fs_object} Pipe: ${mirror_object}" + echo "Filesystem object removed from source, removing from mirror" + echo "Source: ${fs_object} Pipe: ${mirror_object}" if [ -f "${fs_object}" ] ; then rm -f "${mirror_object}" elif [ -d "${fs_object}" ] ; then @@ -270,7 +263,7 @@ function inotify_looper { fi ;; 'DELETE_SELF'|'UNMOUNT') # Stop/exit/cleanup events - color_echo red "Received fatal event: ${dir_file_events[0:1]} ${event}, exiting!" + echo "Received fatal event: ${dir_file_events[0:1]} ${event}, exiting!" if [ -n "${process}" ] ; then signal_process "${process}" "${signal}" fi @@ -290,20 +283,20 @@ function mirror_envsubst_paths { destination="$(readlink -m "${1}")" sources=("${@:2}") if ! [ -d "${destination}" ] ; then - color_echo red "Destination path: ${destination} is not a directory, exiting!" + echo "Destination path: ${destination} is not a directory, exiting!" exit 1 fi declare -a looper_pids # Iterate over each source file/directory, exclude root dir if specified for path in "${sources[@]}"; do if ! [ -e "${path}" ] ; then - color_echo red "Source path: ${path} does not exist, exiting!" + echo "Source path: ${path} does not exist, exiting!" exit 1 fi full_path="$(readlink -m "${path}")" if [ "${full_path#${destination}}" != "${full_path}" ] || [ "${destination#${full_path}}" != "${destination}" ] ; then - color_echo red "Source/Destination directories can't be subdirectories of each other or the same directory" + echo "Source/Destination directories can't be subdirectories of each other or the same directory" exit 64 fi @@ -313,7 +306,7 @@ function mirror_envsubst_paths { # Create directory structure, check if destination is empty if [ -n "$(ls -A "${destination}")" ] && ! ${overlay} ; then - color_echo red "Destination directory is not empty, if you still want to overlay into it please use the -o/--overlay option" + echo "Destination directory is not empty, if you still want to overlay into it please use the -o/--overlay option" print_usage exit 1 else @@ -325,12 +318,9 @@ function mirror_envsubst_paths { # Create named pipes / files and set up cleanup on signals for them if [ -z "${files[*]}" ] ; then - color_echo magenta "Destination directory does not contain any files, no pipes created for ${full_path}!" + echo "Destination directory does not contain any files, no pipes created for ${full_path}!" else for file in "${files[@]:-}"; do - if ${dev_mode} ; then - add_on_sig "rm -f ${destination}${file#${full_path}}" - fi if ${nofifo} ; then render_file "${destination}" "${file}" "${full_path}" else @@ -341,109 +331,18 @@ function mirror_envsubst_paths { # Create symbolic links as needed and set up cleanup for link in "${links[@]}" ; do - color_echo green "Processing symbolic link ${link}" + echo "Processing symbolic link ${link}" target="${destination}${link#${full_path}}" ln --symbolic "$(readlink ${link})" "${target}" add_on_sig "unlink ${target}" done - - if ${dev_mode} ; then - # Set up safe cleanup for directory structure (needs to be done in - # reverse order to ensure safety of operation without recursive rm - local index - for (( index=${#directories[@]}-1 ; index>=0 ; index-- )) ; do - add_on_sig "rmdir ${destination}${directories[${index}]#${full_path}}" - done - - # Run update loop and detach it - if ${daemonize} ; then - inotify_looper "${destination}" "${full_path}" & - else - inotify_looper "${destination}" "${full_path}" & - fi - looper_pids+=( "${!}" ) - fi done if ! ${daemonize} ; then - debug 8 "Waiting for looper pids: ${looper_pids[*]}" + echo "Waiting for looper pids: ${looper_pids[*]}" wait "${looper_pids[*]}" fi } -# Unit tests -# shellcheck disable=SC2046,SC2154,SC2016,SC2034,SC2064 -function unit_tests { - export verbosity=10 - debug 5 "Running unit tests!" - # Basic setup - export TEST_VARIABLE1='/dev/null' - export TEST_VARIABLE2='example.com' - create_secure_tmp tmp_source_test_dir 'dir' - create_secure_tmp tmp_dest_test_dir 'dir' - create_secure_tmp tmp_source_test_file 'file' "${tmp_source_test_dir}" - test_string=$(tr -dc '[:alnum:]' < /dev/urandom | fold -w 1024 | head -n 1) - export signal='SIGUSR1' - # Set up a proces to listen to signals and perform actions - signal_test_file="${tmp_source_test_dir}/signal_test_file" - process="$(signal_processor "${signal}" "test -f ${signal_test_file} && echo ${test_string} > ${signal_test_file}")" - export process - - # Test setting up a named pipe - setup_named_pipe "${tmp_dest_test_dir}" "${tmp_source_test_file}" "${tmp_source_test_dir}" & - echo "${test_string}" > "${tmp_source_test_file}" & - sleep 1 - read_test_string="$(cat "${tmp_dest_test_dir}/${tmp_source_test_file#${tmp_source_test_dir}}")" - assert [ "${test_string}" == "${read_test_string}" ] - - # Test creating directory structure - mkdir "${tmp_source_test_dir}/sub_dir" - create_directory_structure "${tmp_dest_test_dir}" "${tmp_source_test_dir}/sub_dir" "${tmp_source_test_dir}" - assert [ "$(basename $(find "${tmp_dest_test_dir}" -mindepth 1 -type d))" == "$(basename $(find "${tmp_source_test_dir}" -mindepth 1 -type d))" ] - - # Test mirroring a more complicated structure - create_secure_tmp tmp_mirror_test_dir 'dir' - mkdir "${tmp_source_test_dir}/sub_dir/sub_sub_dir" - touch "${tmp_source_test_dir}/test_file" - touch "${tmp_source_test_dir}/sub_dir/sub_file" - touch "${tmp_source_test_dir}/sub_dir/sub_sub_dir/sub_sub_file" - - mirror_envsubst_paths "${tmp_mirror_test_dir}" "${tmp_source_test_dir}" & - - sleep 1 - mapfile -t files < <(find "${tmp_source_test_dir}" -type f) - mapfile -t pipes < <(find "${tmp_mirror_test_dir}" -type p) - assert [ "${#files}" -eq "${#pipes}" ] - - # Check each file matches - for (( index=0 ; index<${#files[@]} ; index++ )) ; do - assert diff "${files[${index}]}" "${pipes[${index}]}" - done - - # Test dynamically adding a file with variables - echo 'setting1=${TEST_VARIABLE1}' > "${tmp_source_test_dir}/settings_file" - sleep 1 - assert [ "$(cat "${tmp_mirror_test_dir}/settings_file")" == "$(cat "${tmp_mirror_test_dir}/settings_file")" ] - echo 'setting2=$TEST_VARIABLE2' >> "${tmp_source_test_dir}/settings_file" - sleep 1 - assert [ "$(cat "${tmp_mirror_test_dir}/settings_file")" == "$(cat "${tmp_mirror_test_dir}/settings_file")" ] - - # Test signaling - touch "${tmp_source_test_dir}/signal_test_file" - sleep 1 - assert test -f "${tmp_source_test_dir}/signal_test_file" - test_string_from_trap="$(cat "${signal_test_file}")" - assert [ "${test_string_from_trap}" == "${test_string}" ] - color_echo green "All tests successfully completed" - # Make sure all descendant processes get terminated - kill $(pgrep --pgroup "${$}" | grep -v "${0}") - exit 0 -} - -# Run tests or not -if ${run_unit_tests} ; then - unit_tests -fi - # Call the main mirroring function if ${daemonize} ; then mirror_envsubst_paths "${non_argument_parameters[@]:-}" & diff --git a/scripts/run_nginx.sh b/scripts/run_nginx.sh index b2bcce2..f7d6cf1 100755 --- a/scripts/run_nginx.sh +++ b/scripts/run_nginx.sh @@ -19,17 +19,15 @@ set -eu # Version # shellcheck disable=2034 -version='0.0.1' - -# Set verbose logging for shell script -#export verbosity=10 +version='0.0.2' # Import the shell standard library -source /bin/shtdlib.sh +source /bin/shtdlib_dccscr.sh # Dynamically figure add resolvers for nginx -export NAMESERVERS="resolver $(grep nameserver /etc/resolv.conf | awk '{print $2}') 127.0.0.11 valid=10s;" +export NAMESERVERS="resolver $(grep nameserver /etc/resolv.conf | awk '{print $2}') valid=10s;" + # Create config files rtenvsub.sh --nofifo --overlay --process nginx --daemon /etc/nginx /run/nginx & @@ -41,6 +39,6 @@ until test -e '/etc/nginx/nginx.conf' && test -d '/etc/nginx/sites-enabled'; do done # Run nginx -color_echo green 'Starting nginx' +echo 'Starting nginx' nginx -g 'daemon off;' || exit_on_fail -debug 2 "Nginx exited with return code: ${?}" +echo "Nginx exited with return code: ${?}" diff --git a/scripts/shtdlib.sh b/scripts/shtdlib.sh deleted file mode 100644 index b0f4bba..0000000 --- a/scripts/shtdlib.sh +++ /dev/null @@ -1,3050 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2034,SC2174,SC2016,SC2026,SC2206,SC2128 -# -# This is a collection of shared functions used by SD Elements products -# -# Copyright (c) 2018 SD Elements Inc. -# -# All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains -# the property of SD Elements Incorporated and its suppliers, -# if any. The intellectual and technical concepts contained -# herein are proprietary to SD Elements Incorporated -# and its suppliers and may be covered by U.S., Canadian and other Patents, -# patents in process, and are protected by trade secret or copyright law. -# - -# Set a debug log file to be used in addition to stderr/stdout -# debug_log_file="/tmp/${0}.log" - -# If there is no TTY then it's not interactive -if ! [[ -t 1 ]]; then - interactive=false -fi -# Default is interactive mode unless already set -interactive="${interactive:-true}" - -# Create which -s alias (whichs), same as POSIX: -s -# No output, just return 0 if all of the executables are found, or 1 if some were not found. -function whichs { - # Bash 3.1 does not flush stdout so we use tee to make sure it gets done - command -v "${*}" &> /dev/null | tee /dev/null &> /dev/null - return "${PIPESTATUS}" -} - -# Unless disabled set strict mode for non-interactive mode -if ${strict_mode:-true} && ! ${interactive} ; then - set -euo pipefail -fi - -# Set Version -shtdlib_version='0.2' - -# Timestamp, the date/time we started -start_timestamp=$(date +"%Y%m%d%H%M") - -# Store original arguments/parameters -#base_arguments="${@:-}" - -# Store original tty -init_tty="$(tty || true)" - -# Check if shell supports array append syntax -array_append_supported="$(bash -c 'a=(); a+=1 &>/dev/null && echo true || echo false')" - -# Exit unless syntax supports array append -if ! "${array_append_supported}" ; then - echo "This library (${0}) requires bash version 3.1+ with array append support to work properly" - exit 1 -fi - -# Determine OS family and OS type -OS="${OS:-}" -os_family='Unknown' -os_name='Unknown' -os_codename='Unknown' -# Preferred methods -if [ -e '/etc/redhat-release' ] ; then - os_family='RedHat' -elif [ -e '/etc/lsb-release' ] ; then - os_family='Debian' -else - # Educated guesses - yum help help > /dev/null 2>&1 && os_family='RedHat' - apt-get help > /dev/null 2>&1 && os_family='Debian' - echo "${OSTYPE}" | grep -q 'darwin' && os_family='MacOSX' - if [ "${OS}" == 'SunOS' ]; then os_family='Solaris'; fi - if [ "${OSTYPE}" == 'cygwin' ]; then os_family='Cygwin'; fi - if [ -f '/etc/alpine-release' ] ; then os_family='Alpine'; fi -fi -os_type="$(uname)" - -# Determine virtualization platform in a way that ignores SIGPIPE, requires root -if [ "${EUID}" == 0 ] && command -v virt-what &> /dev/null ; then - if [ -f '/.dockerenv' ] ; then - virt_platform='Docker' - else - virt_platform="$(virt-what | head -1 || if [[ ${?} -eq 141 ]]; then true; else exit ${?}; fi)" - fi -elif [ "${os_type}" == "Linux" ] && grep -Eq '/(lxc|docker)/[[:xdigit:]]{64}' /proc/self/cgroup; then - # A method of detecting if Docker is the virtual platform on Linux containers - virt_platform='Docker' -else - virt_platform="Unknown" -fi - -# Set major and minor version variables -if [ "${os_family}" == 'RedHat' ]; then - major_version="$(grep -oE '[0-9]+\.[0-9]+' /etc/redhat-release | awk -F. '{print $1}')" - minor_version="$(grep -oE '[0-9]+\.[0-9]+' /etc/redhat-release | awk -F. '{print $2}')" - if ! [[ ${major_version} =~ ^-?[0-9]+$ ]] ; then # If major version is not an integer - major_version="$(rpm -qa \*-release | grep -Ei 'oracle|redhat|centos' | cut -d'-' -f3)" - fi - if ! [[ ${minor_version} =~ ^-?[0-9]+$ ]] ; then # If minor version is not an integer - minor_version="$(rpm -qa \*-release | grep -Ei 'oracle|redhat|centos' | cut -d'-' -f4 | cut -d'.' -f1)" - fi - - # The following is a more robust way of determining the OS name than - # `rpm-qa \*release | grep -q -Ei "^(redhat|centos)"` - if grep -qEi 'centos' /etc/redhat-release; then - os_name='centos'; - elif grep -qEi 'red ?hat' /etc/redhat-release; then - os_name='redhat'; - fi - patch_version=0 -elif [ "${os_family}" == 'Debian' ]; then - if [ -e '/etc/os-release' ] ; then - # VERSION_CODENAME is the built-in optional identifier - grep -q VERSION_CODENAME /etc/os-release && os_codename="$(grep VERSION_CODENAME /etc/os-release | awk -F= '{print $2}')" - # For oses based on Ubuntu we often need the Ubuntu (parent distro) codename (e.g. repository configuration) - grep -q UBUNTU_CODENAME /etc/os-release && os_codename="$(grep UBUNTU_CODENAME /etc/os-release | awk -F= '{print $2}')" - fi - if [ -e '/etc/lsb-release' ] ; then - major_version="$(grep DISTRIB_RELEASE /etc/lsb-release | awk -F= '{print $2}' | awk -F. '{print $1}')" - minor_version="$(grep DISTRIB_RELEASE /etc/lsb-release | awk -F= '{print $2}' | awk -F. '{print $2}')" - os_name="$(grep DISTRIB_ID /etc/lsb-release | awk -F= '{print $2}')" - else - major_version="$(awk -F. '{print $1}' /etc/debian_version)" - minor_version="$(awk -F. '{print $2}' /etc/debian_version)" - os_name='debian' - fi - patch_version=0 -elif [ "${os_family}" == 'Alpine' ]; then - # A safe way to read the version regardless of bash version and buggy - # implementations - # shellcheck disable=2207 - command -v mapfile &> /dev/null | tee /dev/null &> /dev/null && mapfile -d. -t full_version < /etc/alpine-release &> /dev/null || full_version=($(awk -F. '{printf("%s %s %s\n", $1, $2, $3)}' /etc/alpine-release)) - major_version="${full_version[0]}" - minor_version="${full_version[1]}" - patch_version="${full_version[2]}" - os_name='alpine' -fi - -# Filters a stream of local addresses from inet adders formatted lines -function filter_sort_local_ip_addresses { - grep -v '127.' | \ - sort -Vu | \ - grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | \ - grep -Eo '([0-9]*\.){3}[0-9]*' -} - -# Gets local IP addresses (excluding localhost) and prints one per line -function get_local_ip_addresses { - local -a all_ipv4 - local -a local_iv4 - if whichs ip ; then - ip -4 addr show | filter_sort_local_ip_addresses - elif whichs ifconfig ; then - ifconfig | filter_sort_local_ip_addresses - else - $(awk '/32 host/ { print "inet " f } {f=$2}' > "${debug_log_file}" - fi -} - -# Debug function for verbose debugging -# Note debug is special because it's safe even in subshells because it bypasses -# the stdin/stdout and writes directly to the terminal -function debug { - if [ "${verbosity:-1}" -ge "${1}" ]; then - if [ -w "${init_tty}" ] ; then - color_echo yellow "${*:2}" > "${init_tty}" - else - color_echo yellow "${*:2}" >&2 - fi - fi -} - -# Error function for verbose explicit error messages -# First argument is the priority, second is the log message -# A priority of 0 will disable writing of errors to the syslog -function error { - if whichs logger ; then - logger --priority "${1}" "${*:2}" - else - debug 3 "Unable to fing logger command to write to syslog" - fi - if [ -w "${init_tty}" ] ; then - color_echo red "${*:2}" > "${init_tty}" - else - color_echo red "${*:2}" >&2 - fi -} - -# Fails/exits if the exit code of the last command does not match the one -# specified in the first argument. -# Example use: -# touch /tmp/test_file || conditional_exit_on_fail 128 "Failed to create tmp file and touch did not return 128" -function conditional_exit_on_fail { - valid_exit_codes=(0 "${1}") - if ! in_array "${?}" "${valid_exit_codes[@]}" ; then - exit_on_fail "${@}" - fi -} - -# Umask decorator, changes the umask for a function -# To use this add a line like the following (without #) as the first line of a function -# umask_decorator "${FUNCNAME[0]}" "${@:-}" && return -# umask_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with umask_decorator" - -# To specify a different umask set the umask_decorator_mask variable to the -# desired umask. -function umask_decorator { - if [ "${FUNCNAME[0]}" != "${FUNCNAME[2]:-}" ] ; then - local mask="${umask_decorator_mask:-0007}" - local original_mask - original_mask="$(umask)" - umask "${mask}" - debug 10 "Set umask to ${mask}" - #shellcheck disable=2068 - ${@} - umask "${original_mask}" - debug 10 "Set umask to ${original_mask}" - return 0 - fi - return 1 -} - -# Bash behaviour option decorator -# Allows changing/setting bash options for a command/function (code block) restoring -# the original once it's been executed and it's calls are complete. -# Requires an option name (see shopt) and a truthyness value "true"/"false" or -# other command/function that returns 0/1. These are set using the variables -# shopt_decorator_option_name and shopt_decorator_option_value -# To use this add a line like the following (without #) as the first line of a function -# Example: -# function smarter_sort { -# # 'sort' doesn't properly handle SIGPIPE -# shopt_decorator_option_name='pipefail' -# shopt_decorator_option_value='false' -# shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" -# -# echo "Bash option pipefail is set to false for this code" -# } -function shopt_decorator { - debug 10 "${FUNCNAME} called with ${*}" - if [ -n "${shopt_decorator_option_value:-}" ] && [ -n "$(shopt -o "${shopt_decorator_option_name:-}")" ] ; then - if [ "${FUNCNAME[0]}" != "${FUNCNAME[2]:-}" ] ; then - if shopt -qo "${shopt_decorator_option_name}" ; then - # Option is set - if ! "${shopt_decorator_option_value}" ; then - # Option should not be set - debug 10 "Temporarily unsetting bash option ${shopt_decorator_option_name}" - shopt -uo "${shopt_decorator_option_name}" - else - debug 10 "No need to set ${shopt_decorator_option_name}, it's already ${shopt_decorator_option_value}" - fi - "${@}" - return_code="${?}" - debug 10 "Got return code ${return_code}" - # Set the option again in case it was unset - debug 10 "(Re)Setting ${shopt_decorator_option_name}" - shopt -so "${shopt_decorator_option_name}" - return ${return_code} - else - # Option is not set - if "${shopt_decorator_option_value}" ; then - # Option should be set - debug 10 "Temporarily setting bash option ${shopt_decorator_option_name}" - shopt -so "${shopt_decorator_option_name}" - else - debug 10 "No need to unset ${shopt_decorator_option_name}, it's already ${shopt_decorator_option_value}" - fi - "${@}" - return_code="${?}" - debug 10 "Got return code ${return_code}" - # Unset the option in case it was set - debug 10 "(Re)Unsetting ${shopt_decorator_option_name}" - shopt -uo "${shopt_decorator_option_name}" - return ${return_code} - fi - fi - # Calling function is the decorator, skip - debug 10 "Already decorated, returning 121" - return 121 - else - color_echo red "Called ${FUNCNAME[*]} without setting required variables with valid option name/value. The variables shopt_decorator_option_name and shopt_decorator_option_value need to be set to a valid shopt option and a command/function that evaluates true/false, 'true'/'false' are valid commands" - exit 126 - fi - # We should never get here - exit 127 -} - -# Test decorator -# Forces a function to be executed in all bash variants using the bashtester -# submodule and containers. Requires docker to be installed and git submodules -# to be present and up do date. -# To use this add a line like the following (without #) as the first line of a function -# test_decorator "${FUNCNAME[0]}" "${@:-}" && return -# test_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with test_decorator" - -# To specify a different set of bash versions set supported-bash_versions to a -# space separated string of the supported versions. -function test_decorator { - # If not running in a container - if [ "${FUNCNAME[0]}" != "${FUNCNAME[2]:-}" ] && ! grep -q docker /proc/1/cgroup 2> /dev/null ; then - default_bash_versions=( '3.2.57' \ - '4.0.44' \ - '4.1.17' \ - '4.2.53' \ - '4.3.48' \ - '4.4.23' \ - '5.0-beta' ) - supported_bash_versions=( ${supported_bash_versions[@]:-"${default_bash_versions[@]}"} ) - verbosity="${verbosity:-}" bash_images="${supported_bash_versions[*]}" bashtester/run.sh ". /code/$(basename ${BASH_SOURCE[0]}) && ${*}" - return 0 - fi - return 1 -} - -# Imports/Sources an external script if it's not already been imported/sourced -# or is being imported/sourced as determined by BASH_SOURCE -# Only accepts one argument, the file to source. -# Returns 0 if file is successfully imported or has already been imported. -# For opertunistic usage use the following pattern: -# file_to_import='my_file_path' -# type -t import | grep -q '^function$' && import "${file_to_import}" || source "${file_to_import}" -declare -a sourced_imported_files -sourced_imported_files=() -function import { - assert test -n "${1}" - assert test -e "${1}" - local hasher - if whichs shasum; then - hasher='shasum' - elif whichs md5sum; then - hasher='md5sum' - elif whichs cksum; then - hasher='cksum' - else - debug 1 "Unable to find a valid hashing command, blindly importing/sourcing!" - # shellcheck disable=1090 - source "${1}" && return 0 - fi - # Create a hash of the target file - target_file_hash="$("${hasher}" "${1}")" - - # Add all files in source history to the list of imported files - for source_file in "${BASH_SOURCE[@]}"; do - source_file_hash="$("${hasher}" "${source_file}" | awk '{print $0}')" - if ! in_array "${source_file_hash}" "${sourced_imported_files[@]:-}" ; then - sourced_imported_files+=( "${source_file_hash}" ) - fi - done - - # Check if file has already been sourced/imported - if in_array "${target_file_hash}" "${sourced_imported_files[@]}" ; then - debug 5 "Source file ${1} has already been imported/sourced, skipping" - return 0 - fi - - # Finally import/source the file if needed - debug 7 "Sourcing file ${1}" - sourced_imported_files+=( "${target_file_hash}" ) - # shellcheck disable=1090 - source "${1}" && return 0 -} - -# A platform (readlink implementation) neutral way to follow symlinks -function readlink_m { - debug 10 "readlink_m called with: ${*}" - args=( ${@} ) - if [ "${#args[@]}" -eq 0 ] ; then - color_echo red 'readlink_m needs at least one argument, none were provided' - return 64 - elif [ "${#args[@]}" -gt 1 ] ; then - base_path="$(dirname "${args[0]}")" - new_path="${base_path}/${args[1]}" - elif whichs readlink && readlink -f "${args[0]}" &> /dev/null ; then - readlink -f "${args[0]}" - return 0 - elif whichs readlink && readlink -m "${args[0]}" &> /dev/null ; then - readlink -m "${args[0]}" - return 0 - elif whichs realpath && realpath -m "${args[0]}" &> /dev/null ; then - realpath -m "${args[0]}" - return 0 - elif whichs greadink ; then - greadlink -m "${args[0]}" - return 0 - elif whichs grealpath ; then - grealpath "${args[0]}" - return 0 - elif whichs realpath ; then - realpath "${args[0]}" - return 0 - elif [ -e "${args[0]}" ] ; then - if stat -f "%N %Y" "${args[0]}" &> /dev/null ; then - new_path="$(stat -f "%N %Y" "${args[0]}")" - elif stat -f "%n %N" "${args[0]}" &> /dev/null ; then - new_path="$(stat --format '%n %N' "${args[0]}" | tr -d "‘’")" - else - color_echo red "Unable to find a usable way to determine full path (readlink_m)" - exit_on_fail - fi - else - color_echo red "Unable to find a usable way to determine full path (readlink_m)" - exit_on_fail - fi - new_path=( ${new_path} ) - debug 10 "Processed path is: ${new_path[*]}" - if [ ${#new_path[@]} -gt 1 ] || [ -L "${new_path[0]}" ] ; then - readlink_m "${new_path[@]}" - elif [ -e "${new_path[0]}" ] ; then - echo "${new_path[0]}" - return 0 - elif command -v realpath ; then - realpath "${args[0]}" - return 0 - else - debug 10 "Failed to resolve path: ${new_path[*]}" - return 1 - fi -} - -# Platform independent version sort -# When input is piped it's assumed to be space and/or newline (NL) delimited -# When passed as parameters each one is processed independently -function _version_sort { - debug 12 "${FUNCNAME} called with ${*}" - # 'sort' doesn't properly handle SIGPIPE - shopt_decorator_option_name='pipefail' - shopt_decorator_option_value='false' - # shellcheck disable=2015 - shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" - - if sort --help 2>&1 | grep -q version-sort ; then - local vsorter='sort --version-sort' - else - debug 10 "Using suboptimal version sort due to old Coreutils/Platform" - local vsorter='sort -t. -k1,1n -k2,2n -k3,3n -k4,4n' - fi - - for arg in "${@}" ; do - echo "${arg}" - done | ${vsorter} -} -# shellcheck disable=2120 -function version_sort { - # First command needs to be read, this way any piped input goes to it - while read -rt "${read_timeout:-1}" piped_data; do - declare -a piped_versions - debug 10 "Versions piped to ${FUNCNAME}: ${piped_data}" - # shellcheck disable=2086 - piped_versions+=( ${piped_data} ) - done - shopt_decorator_option_name='nounset' - shopt_decorator_option_value='false' - # shellcheck disable=2015 - shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" - # shellcheck disable=2068 - _version_sort ${@} ${piped_versions[@]} -} - -# Increment a version number by 1 -function version_increment { - declare -a segment=( ${1//\./ } ) - declare new_version - declare -i carry=1 - - for (( n=${#segment[@]}-1; n>=0; n-=1 )); do - length=${#segment[n]} - new_version=$((segment[n]+carry)) - [ "${#new_version}" -gt "${length}" ] && carry=1 || carry=0 - [ "${n}" -gt 0 ] && segment[n]=${new_version: -length} || segment[n]=${new_version} - done - new_version="${segment[*]}" - echo -e "${new_version// /.}" -} - -# Allows clear assert syntax -function assert { - debug 10 "Assertion made: ${*}" - # shellcheck disable=SC2068 - if ! "${@}" ; then - color_echo red "Assertion failed: '${*}'" - exit_on_fail - fi -} - -# A bash only version of basename -s -function basename_s { - local path="${*}" - local path_no_ext="${path%.*}" - local basename="${path_no_ext##*/}" - echo "${basename}" -} - -# Converts relative paths to full paths, ignores invalid paths -# Accepts either the path or name of a variable holding the path -function finalize_path { - local setvar - assert test -n "${1}" - # Check if there is a filesystem object matching the path - if [ -e "${1}" ] || [[ "${1}" =~ '/' ]] || [[ "${1}" =~ '~' ]]; then - debug 10 "Assuming path argument: ${1} is a path" - path="${1}" - setvar=false - else - debug 5 "Assuming path argument: ${1} is a variable name" - declare path="${!1}" - setvar=true - fi - if [ -n "${path}" ] && [ -e "${path}" ] ; then - if [ "$(basename "$(readlink "$(command -v readlink)")")" == 'busybox' ] || [ "${os_family}" == 'MacOSX' ] ; then - full_path=$(readlink_m "${path}") - else - full_path="$(readlink -m "${path}")" - fi - debug 10 "Finalized path: '${path}' to full path: '${full_path}'" - if [ -n "${full_path}" ]; then - if ${setvar} ; then - export "$1"="${full_path}" - else - echo "${full_path}" - fi - fi - else - debug 5 "Unable to finalize path: ${path}" - fi -} - -# Store full path to this script -script_full_path="${0}" -if [ ! -f "${script_full_path}" ] ; then - script_full_path="$(pwd)" -fi -finalize_path script_full_path -run_dir="${run_dir:-$(dirname "${script_full_path}")}" - -# Allows checking of exit status, on error print debugging info and exit. -# Takes an optional error message in which case only it will be shown -# This is typically only used when running in non-strict mode but when errors -# should be raised and to help with debugging -function exit_on_fail { - message="${*:-}" - if [ -z "${message}" ] ; then - color_echo red "Last command did not execute successfully but is required!" >&2 - else - color_echo red "${*}" >&2 - fi - debug 10 "[$( caller )] ${*:-}" - debug 10 "BASH_SOURCE: ${BASH_SOURCE[*]}" - debug 10 "BASH_LINENO: ${BASH_LINENO[*]}" - debug 0 "FUNCNAME: ${FUNCNAME[*]}" - # Exit if we are running as a script, else return - if [ -f "${script_full_path}" ]; then - exit 1 - else - return 1 - fi -} - -# Returns the index number of the lowest version, in effect this means it -# returns true if the first value is the smallest but will always return -# the index of the lowest version. In the case of multiple matches, the lowest -# (the first match) index is returned. -# Example: -# compare_versions '1.1.1 1.2.2test' -> returns 0 # True -# compare_versions '1.2.2 1.1.1' -> returns 1 # False -# compare_versions '1.0.0 1.1.1 2.2.2' -> returns 0 # True -# compare_versions '4.0.0 3.0.0 2.0.0 1.1.1test 1.0.0 v5.0' -> returns 4 (the -# index number, which also evaluates to False since its a non-zero return code) -function compare_versions { - debug 10 "${FUNCNAME} called with ${*}" - # 'printf' doesn't properly handle SIGPIPE - shopt_decorator_option_name='pipefail' - shopt_decorator_option_value='false' - # shellcheck disable=2015 - shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" - - items=( ${@} ) - assert [ ${#items[@]} -gt 0 ] - # shellcheck disable=2119 - lowest_ver="$(printf "%s\\n" "${items[@]}" | version_sort | head -n1)" - for (( i=0; i<${#items[@]}; i++ )) ; do - if [ "${items[i]}" == "${lowest_ver}" ] ; then - debug 10 "${FUNCNAME} returning ${i}" - return "${i}" - fi - done - color_echo red "Failed to compare versions!" - exit_on_fail -} - -# Set conveniece variable for bash v4 compat -if compare_versions "${BASH_VERSION}" "4" ; then - bash_pre_v4=true -else - bash_pre_v4=false -fi - -# Set timeout value to use for read, v3 does not support decimal seconds -if "${bash_pre_v4}" ; then - read_timeout='1' -else - read_timeout='0.1' -fi - -# Prints the version of a command, arguments include: -# 1. Full or relative path to command (required) -# 2. Text to display before version info (optional) -# 3+. Flag(s)/Argument(s) to command to get version (optional, defaults to --version) -# error_msg variable: Error message if command is not found, to ignore redirect -# stderr run this like so: print_version bash 2> /dev/null -function print_version { - local error_msg - error_msg="${error_msg:-Unable to find command ${1}}" - if command -v "${1}" > /dev/null ; then - echo -n "${2:-}" - if [ -n "${3}" ] ; then - ${1} "${@:3}" - else - ${1} --version - fi - else - (>&2 echo "${error_msg}") - fi -} - -# Store full path to this script -script_full_path="${0}" -if [ ! -f "${script_full_path}" ] ; then - script_full_path="$(pwd)" -fi -finalize_path script_full_path -run_dir="${run_dir:-$(dirname "${script_full_path}")}" - -# Default is to clean up after ourselves -cleanup="${cleanup:-true}" - -# Create NSS Wrapper passwd and group files -# Accepts 4 optional arguments, uid:gid, username, group and home directory -# Defaults to current uid/gid, bob, builders and a temporary directory -# Note that if a home directory is specified and it's temporary it will need to -# be removed/cleaned up by the code calling this function -function init_nss_wrapper { - umask_decorator_mask=${NSS_WRAPPED_FILE_MASK:-0002} - umask_decorator "${FUNCNAME[0]}" "${@:-}" && return - - GUID="${1:-${GUID:-${UID:-$(id -u)}:$(id -g)}}" - debug 8 "Initializing NSS Wrapper with ${GUID}" - - export TMP_USER="${2:-bob}" - export TMP_GROUP="${3:-builders}" - # The ordering of -t and -d is important so this works on both BSD/OSX an - # linux since template and -t have different meanings and syntaxes - tmp_passwd_file="$(mktemp -t "passwd.${$}.XXXXXXXXXX")" && add_on_exit "rm -f '${tmp_passwd_file}'" && chmod "${NSS_WRAPPED_FILE_PERM:-0664}" "${tmp_passwd_file}" - tmp_group_file="$(mktemp -t "group.${$}.XXXXXXXXXX")" && add_on_exit "rm -f '${tmp_group_file}'" && chmod "${NSS_WRAPPED_FILE_PERM:-0664}" "${tmp_group_file}" - tmp_hosts_file="$(mktemp -t "hosts.${$}.XXXXXXXXXX")" && add_on_exit "rm -f '${tmp_hosts_file}'" && chmod "${NSS_WRAPPED_FILE_PERM:-0664}" "${tmp_hosts_file}" - - if [ -n "${4:-}" ] ; then - TMP_HOME_PATH="${4}" - else - TMP_HOME_PATH="$(mktemp -d -t "home.${TMP_USER}.XXXXXXXXXX")" && add_on_exit "rm -Rf '${TMP_HOME_PATH}'" && chown -R "${GUID}" "${TMP_HOME_PATH}" &> /dev/null - fi - export TMP_HOME_PATH - - mkdir -p "${TMP_HOME_PATH}" - cat '/etc/passwd' > "${tmp_passwd_file}" - cat '/etc/group' > "${tmp_group_file}" - cat '/etc/hosts' > "${tmp_hosts_file}" - export BUID="${GUID%:*}" - export BGID="${GUID#*:}" - passwd_string="${TMP_USER}:x:${BUID}:${BGID}:Bob the builder:${TMP_HOME_PATH}:/bin/false" - group_string="${TMP_GROUP}:x:${BUID}:" - passwd_pattern=".*:x:${BUID}:.*:.*:.*:.*" - group_pattern=".*:x:${BGID}:.*" - - sed -i "s|.*:x:${BUID}:.*:.*:.*:.*|${passwd_string}|g" "${tmp_passwd_file}" || echo "${passwd_string}" >> "${tmp_passwd_file}" - sed -i "s|.*:x:${BGID}:.*|${group_string}|g" "${tmp_group_file}" || echo "${group_string}" >> "${tmp_group_file}" - sed -i "/${passwd_pattern}/!{q42}; {s|${passwd_pattern}|${passwd_string}|g}" "${tmp_passwd_file}" || echo "${passwd_string}" >> "${tmp_passwd_file}" - sed -i "/${group_pattern}/!{q42}; {s|${group_pattern}|${group_string}|g}" "${tmp_group_file}" || echo "${group_string}" >> "${tmp_group_file}" - - export LD_PRELOAD='libnss_wrapper.so' - export NSS_WRAPPER_PASSWD="${tmp_passwd_file}" - export NSS_WRAPPER_GROUP="${tmp_group_file}" - export NSS_WRAPPER_HOSTS="${tmp_hosts_file}" -} - -# Enable a Python Software Collection, SCL allows multiple versions of the same RPMs to be -# installed at the same time. Accepts one required argument, the version of -# python to enable, this should be in the format '3.6' -function enable_scl_python { - assert [ "${os_name}" = "redhat" ] - shopt_decorator_option_name='nounset' - shopt_decorator_option_value='false' - assert test -n "${1}" - python_version="${1}" - short_version="$(echo "${python_version}" | tr -dc '0-9')" - python_enable_path="${2:-${PYTHON_ENABLE_PATH:-/opt/rh/python${short_version}/enable}}" - # shellcheck disable=SC2015 - shopt_decorator "${FUNCNAME[0]}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" - color_echo green "Enabling SCL environment for python version: ${python_version}" - # shellcheck disable=SC1090 - source "${python_enable_path}}" -} - -#Set username not available (unattended run) if passwd record exists -if [ -z "${USER:-}" ] && whoami &> /dev/null ; then - USER="$(whoami)" - export USER -fi - -# Set home directory if not available (unattended run) -if [ -z "${HOME:-}" ]; then - HOME="$(getent passwd "${USER}" | awk -F: '{print $6}')" - export HOME -fi - -# Find the best way to escalate our privileges -function set_priv_esc_cmd { - if [ "${EUID}" != "0" ]; then - if [ -x "$(command -v sudo)" ]; then - priv_esc_cmd='sudo -E' - elif [ -x "$(command -v su)" ]; then - priv_esc_cmd='su -c' - else - color_echo red "Not running as root and unable to locate/run sudo or su for privilege escalation" - return 1 - fi - else - priv_esc_cmd='' - fi - return 0 -} -set_priv_esc_cmd - -# Magical sudo/su which preserves all ssh keys, kerb creds and def. ssh user -# and tty/pty -function priv_esc_with_env { - debug 10 "Calling: \"${priv_esc_cmd} ${*}\" on tty: \"${init_tty}\" with priv esc command as: \"${priv_esc_cmd}\" and user: \"${USER}\"" - debug 11 "${priv_esc_cmd} /bin/bash -c export SSH_AUTH_SOCK='${SSH_AUTH_SOCK}' && export SUDO_USER_HOME='${HOME}' && export KRB5CCNAME='${KRB5CCNAME}' && export GPG_TTY='${init_tty}' && alias ssh='ssh -l ${USER}' && ${*}" - ${priv_esc_cmd} /bin/bash -c "export SSH_AUTH_SOCK='${SSH_AUTH_SOCK}' && export SUDO_USER_HOME='${HOME}' && export KRB5CCNAME='${KRB5CCNAME}' && export GPG_TTY='${init_tty}' && alias ssh='ssh -l ${USER}' && ${*}" - return ${?} -} - -# Create and manage a custom ssh auth agent, socket and pid -# Create a special ssh-agent for docker, accepts two optional -# parameters/arguments, the location of the named socket and the pid file -# Optionally accepts any number of ssh key files to import, these can include -# wildcards. -function get_custom_ssh_auth_agent { - custom_ssh_auth_socket_path="${1:-${HOME}/custom-ssh-agent}" - custom_ssh_auth_pid_file="${2:-${HOME}/.custom-ssh-agent.pid}" - ssh_key_files=( ${@:3} ) - if [ -S "${custom_ssh_auth_socket_path}" ] && pgrep -F ${custom_ssh_auth_pid_file} &> /dev/null ; then - color_echo cyan "Found custom ssh-agent with socket: ${custom_ssh_auth_socket_path}" - export SSH_AUTH_SOCK="${custom_ssh_auth_socket_path}" - if [ -f "${custom_ssh_auth_pid_file}" ] ; then - read -r SSH_AGENT_PID < "${custom_ssh_auth_pid_file}" - export SSH_AGENT_PID - fi - else - color_echo cyan "Creating custom ssh-agent with socket: ${custom_ssh_auth_socket_path}" - assert whichs ssh-agent - if rm -f ${custom_ssh_auth_socket_path} ; then - eval $(ssh-agent -a ${custom_ssh_auth_socket_path}) - echo "${SSH_AGENT_PID}" > "${custom_ssh_auth_pid_file}" - else - color_echo red "Unable to reset/create named socket ${custom_ssh_auth_socket_path}, please verify path and permissions" - return 1 - fi - fi - - color_echo cyan "Checking ssh-agent key status" - assert whichs ssh-add - if [ -n "${ssh_key_files:-}" ] ; then - color_echo green "Loading key files: ${ssh_key_files[*]}" - for ssh_key_file in "${ssh_key_files[@]}" ; do - debug 10 "Processing ssh key file: ${ssh_key_file}" - if ! ssh-add -l | grep -q "${ssh_key_file}" ; then - ssh-add ${ssh_key_file:-} || exit_on_fail "Unable to load ssh key file ${ssh_key_file} into agent" - else - color_echo green "Key file: ${ssh_key_file} already loaded into custom ssh agent" - fi - done - else - if ! ssh-add -l &> /dev/null ; then - color_echo green "No ssh key specified, loading default key" - ssh-add || exit_on_fail "Unable to load ssh key into agent" - else - color_echo green "Found existing ssh key in custom ssh agent, no key specified to load, skipping" - fi - fi - assert test -n "${SSH_AUTH_SOCK}" -} - -# A subprocess which performs a command when it receives a signal -# First parameter is the signal and the rest is assumed to be the command -# Returns the PID of the subprocess -function signal_processor { - local signal="${1}" - local command="${*:2}" - bash -c "trap '${command}' ${signal} && while true; do sleep 1 ; done" &> /dev/null & - echo "${!}" -} - -# Signals a process by either exact name or pid -# Accepts name/pid as first parameter and optionally signal as second parameter -function signal_process { -debug 8 "Signaling PID: ${1} with signal: ${2:-SIGTERM}" -if [[ "${1}" =~ ^[0-9]+$ ]] ; then - if [ "${2}" != '' ] ; then - kill -s "${2}" "${1}" - else - kill "${1}" - fi -else - assert whichs pkill - if [ "${2}" != '' ] ; then - pkill --exact --signal "${2}" "${1}" - else - pkill --exact "${1}" - fi -fi -} - -# This function watches a set of files/directories and lets you run commands -# when file system events (using inotifywait) are detected on them -# - Param 1: command/function to run -# - Param 2..N: files/directories to monitor. Note: Absolute paths to the -# modified objects are passed to the command/function -# Custom variables: -# - on_mod_max_frequency: the frequency, in seconds, to run command/function -# (acts as a debounce). If set to 0 then multiple instances of -# the command/function can run at the same time. Default: 1s -# - on_mod_refresh: determines if command/function should run again at the end -# of the timeout if re-triggered during the previous run. -# Default: true -# - on_mod_max_queue_depth: determines event queue size. Default: 1 event -# -# File system modification events: -# - MODIFY | CLOSE_WRITE -# - MOVED_TO | CREATE -# - MOVED_FROM | DELETE | MOVE_SELF -# - DELETE_SELF | UNMOUNT -# -# Example use: Create a callback function and register it for events -# -# path_to_monitor="/tmp" -# function callback { -# modified_obj="${1}" -# modified_dir=$(dirname "${modified_obj}") -# modified_file=$(basename "${modified_obj}") -# current_dir="${PWD}" -# cd ${modified_dir} -# echo "Do something with '${modified_file}' in '${modified_dir}'" -# ls -la ${modified_file} -# cd ${current_dir} -# } -# add_on_mod callback "${path_to_monitor}" -# -function add_on_mod { - shopt_decorator_option_name='nounset' - shopt_decorator_option_value='false' - # shellcheck disable=2015 - shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" - if whichs inotifywait ; then - file_monitor_command="inotifywait --monitor --recursive --format %w%f - --event modify - --event close_write - --event moved_to - --event create - --event moved_from - --event delete - --event move_self - --event delete_self - --event unmount" - elif whichs fswatch ; then - file_monitor_command="fswatch --recursive --format %p - --event Created - --event Updated - --event Removed - --event Renamed - --event MovedFrom - --event MovedTo" - else - color_echo red "Unable to find inotifywait or fswatch, please install one or the other before trying to use '${FUNCNAME[0]} ${*}'" - return 1 - fi - local arguments=("${@}") - on_mod_refresh="${on_mod_refresh:-true}" - on_mod_max_frequency="${max_frequency:-1}" - on_mod_max_queue_depth="${on_mod_max_queue_depth:-1}" - for fs_object in "${arguments[@]:1}"; do - if ! [ -e "${fs_object}" ] ; then - color_echo red "Unable to find filesystem object '${fs_object}' when running ${FUNCNAME[0]}" - return 1 - fi - ${file_monitor_command} "${fs_object}" \ - | while read -r mod_fs_object; do - debug 10 "Handling event using event loop with pid: ${$}" - declare -a sub_processes - # Remove stale pids from sub process array - live_sub_processes=() - for pid in "${sub_processes[@]}" ; do - if kill -0 "${pid}" &> /dev/null ; then - debug 10 "Contacted pid: ${pid}" - live_sub_processes+=("${pid}") - fi - done - sub_processes=("${live_sub_processes[@]}") - # Fork a process to run the command - ( - debug 8 "Found ${#sub_processes[@]} elements in sub process array: ${sub_processes[*]}" - if [ "${on_mod_max_frequency}" -gt 0 ] && [ "${#sub_processes[@]}" -gt 0 ] ; then - if "${on_mod_refresh}" && [ "${#sub_processes[@]}" -le "${on_mod_max_queue_depth}" ] ; then - sibling_pid="${sub_processes[$(( ${#sub_processes[@]} - 1 ))]}" - # Implement a special case for busybox support - # shellcheck disable=2009,2015,2230 - sibling_run_time="$(readlink -f "$(which ps)" | grep -q busybox && \ - ps -Ao pid,time | grep '^[\t ]*${sibling_pid}[\t ]' | awk '{print $2}' | awk -F: '{for(i=NF;i>=1;i--) printf "%s ", $i;print ""}' | awk '{print $1 + $2 * 60 + $3 * 3600 + $4 * 86400}' || \ - ps h -o etimes -p "${sibling_pid}")" - delta=$(( on_mod_max_frequency - sibling_run_time)) - if [ "${delta}" -gt 0 ] ; then - sleep "${delta}" - fi - # Watch for sibling and run when it is stopped - while kill -0 "${sibling_pid}" &> /dev/null ; do - sleep 1 - done - debug 7 "Running ${arguments} to refresh after ${on_mod_max_frequency} sec timeout with pid ${$}" - ${arguments} "${mod_fs_object}" - else - debug 10 "Discarding redundant/unwanted event since refresh is disabled or max queue depth has been reached" - fi - else - debug 7 "Running command: '${arguments} ${mod_fs_object}' in subshell with PID: ${$}" - ${arguments} "${mod_fs_object}" - fi - ) & - sub_processes+=("${!}") - done - done -} - -# Traps for cleaning up on exit -# Note that trap definition needs to happen here not inside the add_on_sig as -# shown in the original since this can easily be called in a subshell in which -# case the trap will only apply to that subshell -declare -a on_exit -on_exit=() -declare -a on_break -on_break=() - -function on_exit { - # shellcheck disable=SC2181 - if [ ${?} -ne 0 ]; then - # Prints to stderr to provide an easy way to check if the script - # failed. Because the exit signal gets propagated only the first call to - # this function will know the exit code of the script. All subsequent - # calls will see $? = 0 if the previous signal handler did not fail - color_echo red "Last command did not complete successfully" >&2 - fi - - if [ -n "${on_exit:-}" ] ; then - debug 10 "Received SIGEXIT, ${#on_exit[@]} items to clean up." - if [ ${#on_exit[@]} -gt 0 ]; then - for item in "${on_exit[@]}"; do - if [ -n "${item}" ] ; then - debug 10 "Executing cleanup statement on exit: ${item}" - # shellcheck disable=SC2091 - ${item} - fi - done - fi - fi - debug 10 "Finished cleaning up, de-registering signal trap" - trap - EXIT - if ! $interactive ; then - # Be a nice Unix citizen and propagate the signal - kill -s EXIT "${$}" - fi -} - -function on_break { - if [ -n "${on_break:-}" ] ; then - color_echo red "Break signal received, unexpected exit, ${#on_break[@]} items to clean up." - if [ ${#on_break[@]} -gt 0 ]; then - for item in "${on_break[@]}"; do - if [ -n "${item}" ] ; then - color_echo red "Executing cleanup statement on break: ${item}" - ${item} - fi - done - fi - fi - # Be a nice Unix citizen and propagate the signal - trap - "${1}" - if ! $interactive ; then - # Be a nice Unix citizen and propagate the signal - kill -s "${1}" "${$}" - fi -} - -function add_on_exit { - debug 10 "Registering signal action on exit: \"${*}\"" - if [ -n "${on_exit:-}" ] ; then - local n="${#on_exit[@]}" - else - local n=0 - fi - on_exit[${n}]="${*}" - debug 10 "on_exit content: ${on_exit[*]}, size: ${#on_exit[*]}, keys: ${!on_exit[*]}" -} - -function add_on_break { - debug 10 "Registering signal action on break: \"${*}\"" - if [ -n "${on_break:-}" ] ; then - local n="${#on_break[@]}" - else - local n=0 - fi - on_break[${n}]="${*}" - debug 10 "on_break content: ${on_break[*]}, size: ${#on_break[*]}, keys: ${!on_break[*]}" -} - -function add_on_sig { - add_on_exit "${*}" - add_on_break "${*}" -} - -function clear_sig_registry { - debug 10 "Clearing all registered signal actions" - on_exit=() - on_break=() -} - -debug 10 "Setting up signal traps" -trap on_exit EXIT -trap "on_break INT" INT -trap "on_break QUIT" QUIT -trap "on_break TERM" TERM -debug 10 "Signal trap successfully initialized" - -# Creates a secure temporary directory or file -# First argument (REQUIRED) is the name of the caller's return variable -# Second argument (REQUIRED) is either 'dir' or 'file' -# Third argument (OPTIONAL) can either be an existing or non-existing directory -# -# If "file" is chosen and the second argument matches a dir a tmp file with a -# random filename will be created. -# If "dir" is chosen and the second argument matches a dir a tmp dir with a -# random name will be created. -# If "file" is chosen and the second argument does not match any existing -# directory a temporary file with that name will be created. -# If "dir" is chosen and the second argument does not match any existing -# directory a temporary dir with that name will be created. -# If no second argument is given a randomly named tmp file/dir will be created -# -# DO NOT call this function in a subshell, it breaks the clean up functionality. -# Instead, call the function with the name of the caller's return variable as the -# first argument. For example: -# local my_temp_dir="" -# create_secure_tmp my_temp_dir 'dir' -function create_secure_tmp { - # Check for the minimum number of arguments - if [ ${#@} -lt 2 ]; then - color_echo red "Called 'create_secure_tmp' with less than 2 arguments." - exit_on_fail - fi - - # Save the name of the caller's return variable - local _RETVAL=${1} - - local type_flag - if [ "${2}" == 'file' ] ; then - type_flag='' - elif [ "${2}" == 'dir' ] ; then - type_flag='-d' - else - color_echo red 'Called create_secure_tmp without specifying a required second argument "dir" or "file"!' - color_echo red "You specified: ${2}" - exit_on_fail - fi - original_umask="$(umask)" - umask 0007 - - # Should not be a local variable so the calling environment can access it - secure_tmp_object="" - dir=${3:-} - if [ -d "${dir}" ]; then - if [ "${os_type}" == 'Linux' ]; then - secure_tmp_object="$(mktemp ${type_flag} -p "${dir}" -q )" - else - TMPDIR="${3}" - secure_tmp_object="$(mktemp -t tmp -q)" - fi - elif [ -e "${dir}" ] || [ -z "${dir}" ]; then - if [ "${os_type}" == 'Linux' ]; then - secure_tmp_object="$(mktemp ${type_flag} -q)" - else - secure_tmp_object="$(mktemp ${type_flag} -q -t tmp)" - fi - else - if [ "${2}" == 'file' ] ; then - mkdir -p -m 0700 "$(dirname "${dir}")" || exit_on_fail - install -m 0600 /dev/null "${dir}" || exit_on_fail - elif [ "${2}" == 'dir' ] ; then - mkdir -p -m 0700 "${dir}" || exit_on_fail - fi - secure_tmp_object="${dir}" - fi - # shellcheck disable=SC2181 - if [ ${?} -ne 0 ]; then - exit_on_fail "${secure_tmp_object}" - fi - - umask "${original_umask}" || exit_on_fail - - # Store temp file/dir path into the caller's variable - # shellcheck disable=SC2086 - eval ${_RETVAL}="'$secure_tmp_object'" - - if ${cleanup}; then - debug 10 "Setting up signal handler to delete tmp object ${secure_tmp_object} on exit" - add_on_sig "rm -Rf ${secure_tmp_object}" - fi -} - -# Extracts archives -# First argument is the archive, second is the destination folder -# Any subsequent arguments are assumed to be embedded archives to try to -# extract, these will all be normalized into the dest folder -# If no arguments are given or a simple dash it's assumed the archive is -# provided on stdin in which case we try to determine the type and extract -# using a temporary file -# Examples of usage: -# stdin/stdout: extract < cat /some/file OR cat /some/file | extract -# stdin/filename: extract - /output/path -# filename/filename: extract /input/path /output/path -# filename/stdout: extract /input/path -declare -a extract_trailing_arguments -function extract { - # Check if we have a filename or are dealing with data on stdin - if [ "${1:-}" == '-' ] || [ "${1:-}" == '' ] ; then - if [ "${2:-}" != '' ] ; then - dest_flag_place="-C ${2}" - else - dest_flag_place='' - fi - tmp_archive="$(mktemp)" - case "$(tee "${tmp_archive}" &> /dev/null && file "${tmp_archive}" --brief --mime-type)" in - application/x-tar) tar xf "${tmp_archive}" ${dest_flag_place};; - application/x-gzip) tar zxf "${tmp_archive}" ${dest_flag_place};; - application/pgp) gpg -q -o - --decrypt "${tmp_archive}" | extract "${@:1}";; - *) color_echo red "Unsupported mime type for extracting file from stdin" ;; - esac - debug 10 "Removing temporary archive: ${tmp_archive}" - rm -f "${tmp_archive}" - else - if [ "${verbosity}" -ge 10 ]; then - local tar_verb_flag="--verbose" - else - local tar_verb_flag='' - fi - if [ -f "${1}" ] && [ -d "${2}" ]; then - case "${1}" in - *.tar.bz2) ${priv_esc_cmd} tar xvjf "${1}" -C "${2}" ${tar_verb_flag};; - *.tar.gz) ${priv_esc_cmd} tar xvzf "${1}" -C "${2}" ${tar_verb_flag};; - *.bz2) ${priv_esc_cmd} bunzip2 -dc "${1}" > "${2}" ;; - *.rar) ${priv_esc_cmd} unrar x "${1}" "${2}" ;; - *.gz) ${priv_esc_cmd} gunzip -c "${1}" > "${2}" ;; - *.tar) ${priv_esc_cmd} tar xvf "${1}" -C "${2}" ${tar_verb_flag};; - *.pyball) ${priv_esc_cmd} tar xvf "${1}" -C "${2}" ${tar_verb_flag};; - *.tbz2) ${priv_esc_cmd} tar xvjf "${1}" -C "${2}" ${tar_verb_flag};; - *.tgz) ${priv_esc_cmd} tar xvzf "${1}" -C "${2}" ${tar_verb_flag};; - *.zip) ${priv_esc_cmd} unzip "${1}" -d "${2}" ;; - *.Z) ${priv_esc_cmd} uncompress -c "${1}" > "${2}" ;; - *.7z) ${priv_esc_cmd} 7za x -y "${1}" -o"${2}" ;; - *.tar.gpg) ${priv_esc_cmd} gpg -q -o - --decrypt "${1}" | tar xv -C "${2}" ${tar_verb_flag};; - *.tgz.gpg) ${priv_esc_cmd} gpg -q -o - --decrypt "${1}" | tar xvz -C "${2}" ${tar_verb_flag};; - *.tar.gz.gpg) ${priv_esc_cmd}gpg -q -o - --decrypt "${1}" | tar xvz -C "${2}" ${tar_verb_flag};; - *) color_echo red "${1} is not a known compression format" ;; - esac - extract_trailing_arguments=("${@:3}:-") - if [ -n "${extract_trailing_arguments}" ] ; then - if [ -f "${2}"/"${extract_trailing_arguments}" ] ; then - extract "$(find "${2}/${extract_trailing_arguments}")" "${2}" - extract_trailing_arguments=("${extract_trailing_arguments[@]:1}") - fi - else - color_echo cyan "Did not find any embedded archive matching ${extract_trailing_arguments}" - fi - else - color_echo red "'${1}' is not a valid file or '${2}' is not a valid directory" - exit_on_fail - fi - fi -} - -# If script is a part of a self extracting executable tar archive -# Extract itself and set variable to path -function extract_exec_archive { - # create_secure_tmp will store return data into the first argument - create_secure_tmp tmp_archive_dir 'dir' - export tmp_archive_dir - if ${interactive} ; then - while ! [[ "${REPLY:-}" =~ ^[NnYy]$ ]]; do - color_echo magenta "Detected self extracting executable archive" - read -rp "Please confirm you want to continue and extract the archive (Yy/Nn): " -n 1 - echo "" - done - else - REPLY="y" - fi - if [[ ${REPLY} =~ ^[Yy]$ ]]; then - bash_num_lines="$(awk '/^__ARCHIVE_FOLLOWS__/ { print NR + 1; exit 0; }' "${script_full_path}")" - debug 10 "Extracting embedded tar archive to ${tmp_archive_dir}" - tail -n +"${bash_num_lines}" "${script_full_path}" | extract - "${tmp_archive_dir}" || exit_on_fail - else - color_echo red "Archive extraction cancelled by user!" - exit 255 - fi -} - -# If this script is being run as a part of an executable installer archive handle correctly -if [ -f "${script_full_path}" ] && grep -qe '^__ARCHIVE_FOLLOWS__' "${script_full_path}" ; then - export running_as_exec_archive=true - debug 5 "Detected I'm an executable archive" - extract_exec_archive - if [ "$(type -t run_if_exec_archive)" == 'function' ] ; then - debug 10 "Found function named run_if_exec_archive, running it!" - run_if_exec_archive - else - debug 10 "Did not find a function named run_if_exec_archive, continuing" - fi -else - debug 5 "Detected I'm running as a script or interactive" - running_as_exec_archive=false -fi - -# This is a sample print usage function, it should be overwritten by scripts -# which import this library -function print_usage { -cat << EOF -usage: ${0} options - -This is an example usage help function - -OPTIONS: - -x Create an example bundle, optionally accepts a release, defaults to acme release - -a Apply an example bundle - -s Sign a bundle being created and force validation when it's applied - -p Create a patch, the patch only includes acme updates and does not update the release - -h Show this message - -v Print ${0} version and exit - -Examples: -${0} -c # Create a bundle with "acme" version -${0} -sc 1.0.1 # Create and sign an acme bundle with version 1.0.1 -${0} -a # Apply example update, default action when run from archive - -Version: ${version:-${shtdlib_version}} -EOF -} - -# Exits with error if a required argument was not provided -# Takes two arguments, first is the argument value and the second -# is the error message if argument is not set -# This is mostly irrelevant when running in strict mode -function required_argument { - print_usage_function="${3:-print_usage}" - if [ -z "${!1}" ]; then - ${print_usage_function} - color_echo red "${2}" - exit 255 - fi -} - -# Sometimes we want to process the required arguments later -declare -a arg_var_names -declare -a arg_err_msgs -function deferred_required_argument { - arg_var_names+=("${1}") - arg_err_msgs+=("${2}") -} -function process_deferred_required_arguments { - for ((i=0;i<${#arg_var_names[@]};++i)) ; do - required_argument "${arg_var_names[$i]}" "${arg_err_msgs[$i]}" - done -} - -# Parse for optional arguments (-f vs. -f optional_argument) -# Takes variable name as first arg and default value as optional second -# variable will be initialized in any case for compat with -e -# You need to set or export `parameter_array` in the script that uses `parse_opt_arg`: -# -# # shellcheck disable=2034 -# parameter_array=(${@:-}) # Store all parameters as an array -# -# # Parse command line arguments -# function parse_arguments { -# debug 5 "Parse Arguments got argument: ${1}" -# case ${1} in -# ... -function parse_opt_arg { - # Pick up optional arguments - debug 10 "Parameter Array is: ${parameter_array[*]:-}" - next_arg="${parameter_array[$((OPTIND - 1))]:-}" - debug 10 "Optarg/Option index is: ${OPTIND} and next argument is: ${next_arg}" - if [ "$(echo "${next_arg}" | grep -v '^-')" != "" ]; then - debug 10 "Found optional argument and setting ${1}=\"${next_arg}\"" - eval "${1}=\"${next_arg}\"" - # Skip over the optional value so getopts does not stop processing - (( OPTIND++ )) - else - if [ "${2}" != '' ]; then - debug 10 "Optional argument not found, using default and setting ${1}=\"${2}\"" - eval "${1}=\"${2}\"" - else - debug 10 "Initializing empty variable ${1}" - eval "${1}=" - fi - fi - unset next_arg - debug 10 "Set argument: ${1} to \"${!1}\"" -} - -# Resolve DNS name, returns IP if successful, otherwise name and error code -function resolve_domain_name { - lookup_result="$( (whichs getent >/dev/null && getent ahosts "${1}" | awk '{ print $1 }'| sort -u) || (whichs dscacheutil && dscacheutil -q host -a name "${1}" | grep ip_address | awk '{ print $2 }'| sort -u ))" - if [ -z "${lookup_result}" ]; then - echo "${1}" - return 1 - else - echo "${lookup_result}" - return 0 - fi -} - -# Resolve DNS SRV name given a service and a domain, returns host name(s) -function resolve_srv_name { - service="_${1}" - domain="${2}" - proto="_${3:-TCP}" - debug 10 "${service} ${domain} ${proto}" - mapfile -t lookup_result <<< "$(host -t SRV "${service}.${proto}.${domain}" ; echo -e "${?}" )" - if test "${lookup_result[@]: -1}" -eq 0 ; then - for line in "${lookup_result[@]}"; do - echo "${line}" - done - else - debug 2 "Failed to resolve ${service} ${domain} ${proto}" - fi -} - -# Wait for file to exists -# - first param: filename, -# - second param: timeout (optional, default 5 sec) -# - third param: sleep interval (optional, default 1 sec) -function wait_for_file { - local file_name="${1}" - local timeout="${2:-5}" - local sleep_interval="${3:-1}" - local max_count=$((timeout/sleep_interval)) - local count=0 - while [ ! -f "${file_name}" ]; do - (( count++ )) - if [ ${count} -ge ${max_count} ]; then - break - else - sleep "${sleep_interval}" - fi - done -} - -# Wait for a command to return a 0 exit status -# - first param: command -# - second param: timeout (optional, default 10 sec) -# - third param: sleep interval (optional, default 1 sec) -function wait_for_success { - local command="${1:-false}" - local timeout="${2:-10}" - local sleep_interval="${3:-1}" - local max_count=$((timeout/sleep_interval)) - local count=0 - while ! ${command}; do - (( count++ )) - if [ ${count} -ge ${max_count} ]; then - return 1 - else - sleep "${sleep_interval}" - fi - done -} - -# Helper function for copy_file -# Sets Permission/Owner on files -# Takes params/args file, owner[:group], oct_mode (permission) -function set_file_perm_owner { - debug 10 "Called set_file_perm_owner with ${1}, ${2}, ${3}" - if [ -z "${2}" ] ; then - rsync_base_flags="${rsync_base_flags} -og" - else - debug 10 "Changing owner on ${1} to ${2}" - rsync_base_flags="${rsync_base_flags} --usermap=${2}" - # Workaround when running from setuid and no supplemental groups are - # loaded automatically - # shellcheck disable=SC2091 - if [ "${EUID}" -ne '0' ] && $(echo "${2}" | grep -q ':') ; then - group="$(echo "${user_group:-}" | awk -F: '{print $2}')" - if [[ "${group}" != '' ]]; then - sg "${group}" -c "chown '${2}' '${1}'" || exit_on_fail - fi - else - chown "${2}" "${1}" || exit_on_fail - fi - fi - if [ -z "${3}" ] ; then - rsync_base_flags="${rsync_base_flags} -p" - else - debug 10 "Changing permissions on ${1} to ${3}" - rsync_base_flags="${rsync_base_flags} --chmod=${3}" - chmod "${3}" "${1}" || exit_on_fail - fi -} - -# Helper function for copy_dir -# Sets Permission/Owner of directories -# Takes params/args directory, owner[:group], oct_mode, file permission -function set_dir_perm_owner { - debug 10 "Called set_dir_perm_owner with ${1}, ${2}, ${3}, ${4}" - if [ -z "${2}" ] ; then - rsync_base_flags="${rsync_base_flags} -og" - else - debug 10 "Changing owner on ${1} to ${2}" - rsync_base_flags="${rsync_base_flags} --usermap=${2}" - # Workaround when running from setuid and no supplemental groups are - # loaded automatically - # shellcheck disable=SC2091 - if [ "${EUID}" -ne '0' ] && $(echo "${2}" | grep -q ':') ; then - group="$(echo "${user_group:-}" | awk -F: '{print $2}')" - if [[ "${group}" != '' ]]; then - sg "${group}" -c "chown '${2}' ${1}" || exit_on_fail - fi - else - chown -R "${2}" "${1}" || exit_on_fail - fi - fi - if [ -z "${3}" ] ; then - rsync_base_flags="${rsync_base_flags} -p" - else - debug 10 "Changing permissions recursively on dirs in ${1} to ${3}" - rsync_base_flags="${rsync_base_flags} --chmod=${3}" - find "${1}" -type d -exec chmod "${3}" {} + - fi - if [ -n "${4}" ] ; then - debug 10 "Changing permissions recursively on files in ${1} to ${4}" - # Figure out how to do this with rsync - #rsync_base_flags="${rsync_base_flags} --chmod=${4}" - find "${1}" -type f -exec chmod "${4}" {} + - fi -} - -# Very cautiously copy files -# First parameter source, second destination, third owner:group, fourth -# permissions, until we have rsync 3.1 everywhere we are actually changing -# the permissions on the source files which is not an issue when it's a tmp dir -# but could be an issue if used in a different way. Third and fourth parameters -# are optional -function copy_file { - rsync_base_flags="-ltDu --inplace --backup --backup-dir=\"${backup_dir:-${dest}.backup}\" --keep-dirlinks" - local source="${1}" - local dest="${2}" - local owner_group="${3:-}" - local perm="${4:-}" - set -f - find_directory="$(dirname "${source}")" - find_pattern="$(basename "${source}")" - set +f - debug 10 "Called copy_file with ${source} ${dest} ${owner_group} ${perm}" - # shellcheck disable=SC2086 - if [ -e "${source}" ] ; then - debug 10 "Filesystem object ${source} exists" - # Make sure permissions and owner are OK - set_file_perm_owner "${source}" "${owner_group}" "${perm}" - if [ -f "${source}" ] ; then - debug 10 "Found file ${source}" - if "${force_overwrite:-false}" ; then - debug 10 "Copying with forced overwrite" - rsync_flags="${rsync_base_flags} --force" - #rsync ${rsync_flags} "${1}" "${2}" - cp -pf "${source}" "${dest}" || exit_on_fail - elif "${interactive}" ; then - debug 10 "Copying in interactive mode" - rsync_flags="${rsync_base_flags}" - #rsync ${rsync_flags} "${1}" "${2}" - cp -pi "${source}" "${dest}" || exit_on_fail - else - debug 10 "Copying in non-interactive mode" - flags="${rsync_base_flags}" - #rsync ${rsync_flags} "${1}" "${2}" - cp -pn "${source}" "${dest}" || exit_on_fail - fi - debug 10 "Copied file ${source} to ${dest}" - else - color_echo red "Found filesystem object ${source} but it's not a file" - return 1 - fi - # Support globbing - elif [ -n "$(find ${find_directory} -maxdepth 1 -name ${find_pattern} -type f -print -quit)" ] ; then - debug 10 "Found globbing pattern in ${1}" - # Make sure permissions and owner are OK - set_file_perm_owner "${source}" "${owner_group}" "${perm}" - if "${force_overwrite:-false}" ; then - debug 10 "Copying with forced overwrite" - cp -pf ${source} "${dest}" || exit_on_fail - elif "${interactive}" ; then - debug 10 "Copying in interactive mode" - cp -pi ${source} "${dest}" || exit_on_fail - else - debug 10 "Copying in non-interactive mode" - cp -pn ${source} "${dest}" || exit_on_fail - fi - copied_files="$(find ${source} -type f -exec basename {} \; | tr '\n' ' ')" - debug 10 "Copied file(s) ${copied_files} to ${dest}" - else - color_echo cyan "Unable to find filesystem object ${source} while looking for file. Skipping..." - return 1 - fi - return 0 -} - -# Very cautiously copy directories -# First parameter source, second destination, third owner:group, fourth dir -# permissions, fifth file permissions. Last three parameters are optional -function copy_dir { - local source="${1}" - local dest="${2}" - local owner_group="${3:-}" - local file_perm="${4:-}" - local dir_perm="${5:-}" - set -f - find_directory="$(dirname "${source}")" - find_pattern="$(basename "${source}")" - set +f - # shellcheck disable=SC2086 - if [ -e "${source}" ] ; then - debug 10 "Filesystem object ${source} exists" - set_dir_perm_owner "${source}" "${owner_group}" "${file_perm}" "${dir_perm}" - if [ -d "${source}" ] ; then - debug 10 "Found directory ${source}" - if "${force_overwrite:-false}" ; then - debug 10 "Copying with forced overwrite" - cp -Rpf "${source}" "${dest}" || exit_on_fail - elif "${interactive}" ; then - debug 10 "Copying in interactive mode" - cp -Rpi "${source}" "${dest}" || exit_on_fail - else - debug 10 "Copying in non-interactive mode" - cp -Rpn "${source}" "${dest}" || exit_on_fail - fi - debug 10 "Copied dir ${source} to ${dest}" - else - color_echo red "Found filesystem object ${source} but it's not a directory" - return 1 - fi - # Support globbing - elif [ -n "$(find ${find_directory} -maxdepth 1 -name ${find_pattern} -type f -print -quit)" ] ; then - debug 10 "Found globbing pattern in ${source}" - set_dir_perm_owner "${source}" "${owner_group}" "${file_perm}" "${dir_perm}" - if "${force_overwrite:-false}" ; then - debug 10 "Copying with forced overwrite" - cp -Rpf ${source} "${dest}" || exit_on_fail - elif "${interactive}" ; then - debug 10 "Copying in interactive mode" - cp -Rpi ${source} "${dest}" || exit_on_fail - else - debug 10 "Copying in non-interactive mode" - cp -Rpn ${source} "${dest}" || exit_on_fail - fi - copied_dirs="$(find ${source} -type f -exec basename {} \; | tr '\n' ' ')" - debug 10 "Copied dir(s) ${copied_dirs}" - else - color_echo cyan "Unable to find filesystem object ${source} while looking for dir" - return 1 - fi - return 0 -} - -# Create directories, first argument is path, second is owner, third is -# group, fourth is mode -function create_dir_or_fail { - # Make sure directory exist, offer to create it or fail - debug 10 "Asked to create/check directory ${1}" - if [ ! -d "${1}" ]; then - if [ -e "${1}" ]; then - color_echo red "A non directory object already exists at ${1}" - exit_on_fail - fi - # Offer to create the directory if it does not exist - if ${interactive} ; then - while ! [[ "${REPLY}" =~ ^[NnYy]$ ]]; do - read -rp "The directory ${1} does not exist, do you want to create it (y/n):" -n 1 - echo "" - done - else - REPLY="y" - fi - if [[ ${REPLY} =~ ^[Yy]$ ]]; then - color_echo green "Creating directory ${1}" - if [ "${4}" != "" ] ; then - mode_flag="-m ${4}" - else - mode_flag='' - fi - # Create dir, use sudo/su if required - if [ -w "$(dirname "${1}")" ] ; then - mkdir -p "${1}" "${4}" - else - ${priv_esc_cmd} mkdir -p "${1}" "${4}" - fi - # Change owner if specified - if [ "${2}" != "" ] && [ "$(stat -c '%U' "${1}")" != "${2}" ] ; then - debug 5 "Changing owner on ${1} to ${2}" - ${priv_esc_cmd} chown "${2}" "${1}" - fi - # Change group if specified - if [ "${3}" != "" ] && [ "$(stat -c '%G' "${1}")" != "${3}" ] ; then - debug 5 "Changing group on ${1} to ${3}" - ${priv_esc_cmd} chgrp "${3}" "${1}" - fi - else - color_echo red "Target directory is required" - exit_on_fail - fi - fi -} - -# Takes yaml file as first parameter and key as second, e.g. -# load_from_yaml /etc/custom.yaml puppet::mykey (additional keys can be follow) -# example load_from_yaml example.yaml ':sources' ':base' "'remote'" -function load_from_yaml { - # ruby doesn't properly handle SIGPIPE - shopt_decorator_option_name='pipefail' - shopt_decorator_option_value='false' - # shellcheck disable=2015 - shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" - - if [ -r "${1}" ]; then - ruby_yaml_parser="data = YAML::load(STDIN.read); puts data['${2}']" - for key in "${@:3}" ; do - ruby_yaml_parser+="[${key}]" - done - assert whichs ruby - ruby -w0 -ryaml -e "${ruby_yaml_parser}" "${1}" 2> /dev/null | awk '{print $1}' || return 1 - return 0 - else - return 1 - fi -} - -# Install gem if not already installed -# Returns 0 if package was installed, 1 if package was already installed -function install_gem { - original_umask="$(umask)" - if [ "${verbosity}" -le 5 ]; then - gem_verb_flag='-q' - elif [ "${verbosity}" -ge 10 ]; then - gem_verb_flag='-V' - else - gem_verb_flag='' - fi - # First try gem with version code, ala ubuntu or installed with gem but - # default to basic gem command - gem_cmd="$(compgen -c | grep '^gem[0-9][0-9]*\.*[0-9][0-9]*' | sort | tail -n1)" - if [ "${gem_cmd}" == '' ]; then - gem_cmd='gem' - fi - debug 10 "Using gem command: '${gem_cmd}'" - gem_version=$(${gem_cmd} list "${1}" | grep -e "^${1}") - debug 10 "Query for gem package '${1}' version returned: '${gem_version}'" - if [ "${gem_version}" == "" ]; then - umask 0002 - ${priv_esc_cmd} bash -c "${gem_cmd} install ${gem_verb_flag} ${1} ${2}" || exit_on_fail - umask "${original_umask}" || exit_on_fail - return 0 - fi - return 1 -} - -# A platform independent way to install a package, accepts any number of -# arguments all of which are assumed to be name variations of a package that -# should be tried, will only error if none of the arguments represent a valid -# package name. -function install_package { - case "${os_family}" in - 'Debian') - ${priv_esc_cmd} apt-get update - exit_status=127 - for package_name in "${@}"; do - ${priv_esc_cmd} sudo apt-get --assume-yes --quiet install "${package_name}" && exit_status="${?}" && break - done - return "${exit_status}" - ;; - 'RedHat') - ${priv_esc_cmd} yum update - exit_status=127 - for package_name in "${@}"; do - ${priv_esc_cmd} yum -assumeyes --quiet install "${package_name}" && exit_status="${?}" && break - done - return "${exit_status}" - ;; - 'MacOSX') - assert whichs brew - brew update - exit_status=127 - for package_name in "${@}"; do - brew install "${package_name}" && exit_status="${?}" && break - done - return "${exit_status}" - ;; - 'Alpine') - ${priv_esc_cmd} apk update - exit_status=127 - for package_name in "${@}"; do - ${priv_esc_cmd} apk add "${package_name}" && exit_status="${?}" && break - done - return "${exit_status}" - ;; - - *) - color_echo red "Unsupported platform '${os_family}' for install_package function" >&2 - return 1 - ;; - esac -} - - -function validate_hostfile { - assigned_ip_addresses="$(ip -4 addr show | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*')" - ip_address_in_hostfile="$(getent hosts | grep -e "\\b$(hostname)\\b" | awk '{print $1}')" - - debug 10 "Currently assigned IP addresses: ${assigned_ip_addresses}" - debug 10 "IP address associated with hostname on hostfile: ${ip_address_in_hostfile}" - - if echo "${assigned_ip_addresses}" | grep -q "${ip_address_in_hostfile}" ; then - debug 8 "Hostname found in hostfile and resolves to IP address on the system" - else - color_echo red "Unable to resolve hostname to any IP address on the system" - exit_on_fail - fi -} - -# URI parsing function -# -# The function creates global variables with the parsed results. -# It returns 0 if parsing was successful or non-zero otherwise. -# -# [schema://][user[:password]@]host[:port][/path][?[arg1=val1]...][#fragment] -# -# Originally from: http://vpalos.com/537/uri-parsing-using-bash-built-in-features/ -function uri_parser { - # uri capture - uri="${*}" - - # safe escaping - uri="${uri//\`/%60}" - uri="${uri//\"/%22}" - - # top level parsing - pattern='^(([a-z]{3,5})://)?((([^:\/]+)(:([^@\/]*))?@)?([^:\/?]+)(:([0-9]+))?)([:\/][^?]*)?(\?[^#]*)?(#.*)?$' - [[ "${uri}" =~ ${pattern} ]] || [[ "${uri}" =~ ssh://${pattern} ]] || return 1; - - # component extraction - uri=${BASH_REMATCH[0]} - uri_schema=${BASH_REMATCH[2]} - uri_address=${BASH_REMATCH[3]} - uri_user=${BASH_REMATCH[5]} - uri_password=${BASH_REMATCH[7]} - uri_host=${BASH_REMATCH[8]} - uri_port=${BASH_REMATCH[10]} - uri_path=${BASH_REMATCH[11]} - uri_query=${BASH_REMATCH[12]} - uri_fragment=${BASH_REMATCH[13]} - - # path parsing - local count - count=0 - path="${uri_path}" - pattern='^/+([^/]+)' - while [[ ${path} =~ ${pattern} ]]; do - eval "uri_parts[${count}]=\"${BASH_REMATCH[1]}\"" - path="${path:${#BASH_REMATCH[0]}}" - (( count++ )) && true - done - # query parsing - count=0 - query="${uri_query}" - pattern='^[?&]+([^= ]+)(=([^&]*))?' - while [[ ${query} =~ ${pattern} ]]; do - eval "uri_args[${count}]=\"${BASH_REMATCH[1]}\"" - eval "uri_arg_${BASH_REMATCH[1]}=\"${BASH_REMATCH[3]}\"" - query="${query:${#BASH_REMATCH[0]}}" - (( count++ )) && true - done - - debug 8 "Uri parser paring summary:" - debug 8 "uri_parser: uri -> ${uri}" - debug 8 "uri_parser: uri_schema -> ${uri_schema}" - debug 8 "uri_parser: uri_address -> ${uri_address}" - debug 8 "uri_parser: uri_user -> ${uri_user}" - debug 8 "uri_parser: uri_password -> ${uri_password}" - debug 8 "uri_parser: uri_host -> ${uri_host}" - debug 8 "uri_parser: uri_port -> ${uri_port}" - debug 8 "uri_parser: uri_path -> ${uri_path}" - debug 8 "uri_parser: uri_query -> ${uri_query}" - debug 8 "uri_parser: uri_fragment -> ${uri_fragment}" - - # return success - return 0 -} - -## Create a uri back from all the variables created by uri_parser -# [schema://][user[:password]@]host[:port][/path][?[arg1=val1]...][#fragment] -function uri_unparser { - working_uri="${uri_schema}://" - if [ -n "${uri_user}" ] && [ -n "${uri_password}" ] ; then - working_uri+="${uri_user}:${uri_password}@" - fi - working_uri+="${uri_host}" - if [ -n "${uri_port}" ] ; then - working_uri+=":${uri_port}" - fi - if [ -n "${uri_path}" ] ; then - working_uri+="${uri_path}" - fi - if [ -n "${uri_query}" ] ; then - working_uri+="?${uri_query}" - fi - if [ -n "${uri_fragment}" ] ; then - working_uri+="#${uri_fragment}" - fi - echo "${working_uri}" -} - -## Uniform Resource Identifier (URI) Hostname to Fully Qualified Domain Name (FQDN) -# Opportunistically resolves the hostname portion of a URI, and replaces it -# with a FQDN using the Name Service Switch (nsswitch) library hosts database. -# If URI hostname resolves, or if no match is found, then it uses the unresolved -# hostname of the original URI. Returns status code 1 if URI fails to parse. -## Example -# $ uri_hostname_to_fqdn http://app:8080 -# http://app.example.com:8080 -function uri_hostname_to_fqdn { - uri="${*}" - uri_parser "${uri}" || return 1 - - local fqdnames - fqdnames=$(getent hosts "${uri_host}") - - # If hostname exists in hosts library, return - if echo "${fqdnames}" | grep -E -q "(^| )${uri_host}( |$)"; then - echo "${uri}" - return 0 - fi - - # If it doesn't exist, try appending the domains found under "search" in /etc/resolv.conf - local new_uri_host - local domain_names=($(grep -e '^search' /etc/resolv.conf)) - for domain_name in "${domain_names[@]:1}"; do # first element is "search", skip - new_uri_host="${uri_host}.${domain_name}" - if echo "${fqdnames}" | grep -E -q "(^| )${new_uri_host}( |$)"; then - # Found a match, set it as the new URI host, and break out of the matrix - uri_host="${new_uri_host}" - break - fi - done - - # Unparse the URI and echo - uri_unparser -} - -## Strip all leading/trailing whitespaces -function strip_space { - echo -n "${@}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -} - -# Load ini file parameter -# Requires at least two arguments and optionally accepts a third, ini_section -# If ini_section is specified and multiple sections match, an error will be -# raised. If no ini_section is specified and multiple parameter names match -# they will all be returned. -# To strip leading/trailing whitespace simple pipe to sed -e 's/^[[:space:]]*//g' -function load_ini_file_parameter { - local filename="${1}" - local name="${2}" - local ini_section="${3:-}" - debug 10 "Loading INI file parameter: ${name} from file: ${filename}, optional section ${ini_section}" - - if [ -n "${ini_section}" ]; then - #shellcheck disable=SC2086 - ini_section_match="$(grep -c "\[${ini_section}\]" "${filename}")" - if [ "${ini_section_match}" -lt 1 ]; then - color_echo red "Unable to find INI section matching ${ini_section}" - return 1 - elif [ "${ini_section_match}" -eq 1 ]; then - debug 9 "Found INI section ${ini_section}" - sed -n "/\[${ini_section}\]/,/\[/p" "${filename}" | grep --max-count=1 -E "^${name}" | awk -F= '{print $2}' - else - color_echo red "Multiple sections match the INI section specified: ${ini_section}" - exit 1 - fi - else - grep -E "^${name}" "${filename}" | awk -F= '{print $2}' - fi -} - -# This function is used to safely edit ini style config files parameters. -# This function will return 0 on success or 1 if it fails to change the value -# -# OPTIONS: -# -n Filename, for example: /tmp/config_file -# -p Regex pattern, for example: ^[a-z]* -# -v Value, the value to replace with, can include variables from previous regex -# pattern, if omitted the pattern is used as the value -# -a Append, if this flag is specified and the pattern does not exist it will be -# created, takes an optional argument which is the [INI] section to add the pattern to -# -o Opportunistic, don't fail if pattern is not found, takes an optional argument -# which is the number of matches expected/required for the change to be performed -# -c Create, if file does not exist we create it, assumes append and opportunistic -function edit_ini_file_parameter { - local n p v a o c - local OPTIND - local OPTARG - local opt - local force - local opportunistic - local filename - local pattern - local new_value - local ini_section - local force=false - local opportunistic=false - local create=false - local append=false - local req_matches=1 - - # Handle arguments - while getopts "n:p:v:aoc" opt; do - case ${opt} in - 'n') - filename="${OPTARG}" - ;; - 'p') - # Properly escape control characters in pattern - pattern="$(echo "${OPTARG}" | sed -e 's/[\/&]/\\\\&/g')" - debug 10 "Pattern set to ${pattern}" - - # If value is not set we set it to pattern for now - if [ "${new_value}" == "" ]; then - new_value="${pattern}" - fi - ;; - 'v') - # Properly escape control characters in new value - new_value="$(echo "${OPTARG}" | sed -e 's/[\/&]/\\\\&/g')" - ;; - 'a') - append=true - parse_opt_arg ini_section - ;; - 'o') - opportunistic=true - parse_opt_arg req_matches - ;; - 'c') - create=true - append=true - opportunistic=true - ;; - *) - print_usage - esac - done - # Cleanup getopts variables - unset OPTSTRING OPTIND - - # Make sure all required parameters are provided - if [ -z "${filename:-}" ] || [ -z "${pattern:-}" ] && ! ${append} || [ -z "${new_value:-}" ]; then - color_echo red "${FUNCNAME[0]} requires filename, pattern and value to be provided" - color_echo magenta "Provided filename: ${filename}" - color_echo magenta "Provided pattern: ${pattern}" - color_echo magenta "Provided value: ${new_value}" - exit 64 - fi - - # Check to make sure file exists and is normal file, create if needed and specified - if [ -f "${filename}" ]; then - debug 10 "${filename} found and is normal file" - else - if [ ! -e "${filename}" ] && ${create} ; then - # Create file if nothing exists with the same name - debug 10 "Created new file ${filename}" - ${priv_esc_cmd} touch "${filename}" - else - color_echo red "File ${filename} not found or is not regular file" - exit 74 - fi - fi - - # Count matches - num_matches="$(${priv_esc_cmd} grep -c "${pattern}" "${filename}")" - - # Handle replacements - if [ -n "${pattern}" ] && [ "${num_matches}" -eq "${req_matches}" ]; then - ${priv_esc_cmd} sed -i -e 's/'"${pattern}"'/'"${new_value}"'/g' "${filename}" - # Handle appends - elif ${append} ; then - if [ "${ini_section}" != "" ]; then - #shellcheck disable=SC2086 - ini_section_match="$(${priv_esc_cmd} grep -c \"\[${ini_section}\]\" \"${filename}\")" - if [ "${ini_section_match}" -lt 1 ]; then - echo -e '\n['"${ini_section}"']\n' | ${priv_esc_cmd} tee -a "${filename}" > /dev/null - elif [ "${ini_section_match}" -eq 1 ]; then - ${priv_esc_cmd} sed -i -e '/\['"${ini_section}"'\]/{:a;n;/^$/!ba;i'"${new_value}" -e '}' "${filename}" - else - color_echo red "Multiple sections match the INI file section specified: ${ini_section}" - exit 1 - fi - else - echo "${new_value}" | ${priv_esc_cmd} tee -a "${filename}" > /dev/null - fi - # Handle opportunistic, no error if match not found - elif ${opportunistic} ; then - color_echo magenta "Pattern: ${pattern} not found in ${filename}, continuing" - # Otherwise exit with error - else - color_echo red "Found ${num_matches} matches searching for ${pattern} in ${filename}" - color_echo red "This indicates a problem, there should be only one match" - exit 1 - fi -} - -# A function to make the ssh environment from a user available to the root -# user when running as a superuser via the priv_esc_cmd function -function link_ssh_config { - # If root has no ssh config but pre-sudo user does we use the users config during the run - if ! ${priv_esc_cmd} test -e /root/.ssh/config ; then - if [ -z "${SUDO_USER_HOME}" ] && [ "${HOME}" != "/root" ]; then - debug 10 "Did not find SUDO_USER_HOME varible setting to ${HOME}" - SUDO_USER_HOME="${HOME}" - fi - if [ -f "${SUDO_USER_HOME}/.ssh/config" ]; then - # Make sure .ssh directory exists and has correct permissions - ${priv_esc_cmd} mkdir -p "/root/.ssh" && sudo chmod 700 "/root/.ssh" - color_echo green "Copying ${SUDO_USER_HOME}/.ssh/config to /root/.ssh/config for this session" - color_echo green "Please note that for future/automated r10k runs you might need to make this permanent" - ${priv_esc_cmd} cp "${SUDO_USER_HOME}/.ssh/config" '/root/.ssh/config' - ${priv_esc_cmd} chown root "/root/.ssh/config" && ${priv_esc_cmd} chmod 700 "/root/.ssh/config" - add_on_sig ${priv_esc_cmd} "rm -f /root/.ssh/config" - fi - else - debug 10 "Running as user: $(whoami)" - debug 10 "Found User home: ${SUDO_USER_HOME}" - color_echo magenta "Not running as root or root user already has an SSH config, please make sure it's correctly configured as needed for GIT access" - fi -} - -#Creates a tar archive where all paths have been made relative -function create_relative_archive { - debug 10 "Creating relative archive ${1}" - local archive_path="${1}" - local arguments=("${@}") - local source_elements=("${arguments[@]:1}") - local transformations=() - local archive_operation="${archive_operation:-create}" - assert in_array "${archive_operation}" 'create' 'append' 'update' - - - local verbose_flag='' - if [ "${verbosity}" -ge 5 ]; then - local verbose_flag=' -v' - fi - # Iterate this way to avoid whitespace filename bugs - num_transformations=${#source_elements[@]} - for (( i=1; i> "${inline_dest_file}" - chmod --reference="${inline_source_file}" "${inline_dest_file}" - debug 10 "Wrote combined source to ${inline_dest_file}" - fi -} - -# Creates an executable tar archive that can extract and run itself -# Note that any script that's provided should not require any parameters -# and should source/include this library file -# Any special commands or things that should be done after extracting the -# archive should be defined in a function called run_if_exec_archive, note that -# the archive will be extracted into a tmp dir name stored in ${tmp_archive_dir} -# Note that run_if_exec_archive will need to be defined before -# importing/sourcing this file -# Note that the archive should be in .tar.gz format -function create_exec_archive { - # An executable archive is just a bash script concatenated with an archive - # but separated with a marker __ARCHIVE_FOLLOWS__ - local binary_path="${1}" - local script_path="${2}" - local archive="${3}" - debug 10 "Creating binary ${binary_path} using ${script_path} and ${archive}" - # create_secure_tmp will store return data into the first argument - create_secure_tmp tmp_script_file 'file' - # shellcheck disable=SC2154 - inline_bash_source "${script_path}" "${tmp_script_file}" - debug 10 "Created temporary inlined script file at: ${tmp_script_file}" - cat "${tmp_script_file}" > "${binary_path}" || exit_on_fail - echo '__ARCHIVE_FOLLOWS__' >> "${binary_path}" || exit_on_fail - cat "${archive}" >> "${binary_path}" || exit_on_fail - chmod +x "${binary_path}" - debug 3 "Finished writing binary: ${binary_path}" -} - -# Slugifies a string -function slugify { - echo "${*}" | sed -e 's/[^[:alnum:]._\-]/_/g' | tr -s '-' | tr '[:upper:]' '[:lower:]' -} - -# Converts a string to upper case -function _upper { - local string="${*}" - if "${bash_pre_v4}" ; then - echo "${string}" | tr '[:lower:]' '[:upper:]' - else - echo "${string^^}" - fi -} -function upper { - # First command needs to be read, this way any piped input goes to it - while read -rt "${read_timeout:-1}" piped_data; do - declare -a piped_string - debug 10 "String piped to ${FUNCNAME}: ${piped_data}" - # shellcheck disable=2086 - piped_string+=( ${piped_data} ) - done - _upper "${*}${piped_string[*]}" -} - -# Converts a string to lower case -function _lower { - local string="${*}" - if "${bash_pre_v4}" ; then - echo "${string}" | tr '[:upper:]' '[:lower:]' - else - echo "${string,,}" - fi -} -function lower { - # First command needs to be read, this way any piped input goes to it - while read -rt "${read_timeout:-1}" piped_data; do - declare -a piped_string - debug 10 "String piped to ${FUNCNAME}: ${piped_data}" - # shellcheck disable=2086 - piped_string+=( ${piped_data} ) - done - _lower "${*}${piped_string[*]}" -} - -# Load default login environment -function get_env { - # Load all default settings, including proxy, etc - declare -a env_files - env_files=('/etc/environment' '/etc/profile') - for env_file in "${env_files[@]}"; do - if [ -e "${env_file}" ]; then - debug 10 "Sourcing ${env_file}" - #shellcheck source=/dev/null - source "${env_file}" - else - debug 10 "Env file: ${env_file} not present" - fi - done -} - -# Pick pidfile location if it's ever needed -if [ "${EUID}" -eq "0" ]; then - pid_prefix="/var/run/" -else - pid_prefix="/tmp/.pid_" -fi - -# Check for or create a pid file for the program -# takes program/pidfile name as a first parameter, this is the unique ID -# Exits with error if a previous matching pidfile is found -function init_pid { - pidfile="${pid_prefix}${1}" - if [ -f "${pidfile}" ]; then - file_size="$(wc -c < "${pidfile}")" - file_type="$(file -b "${pidfile}")" - max_file_size=$(cat < '/proc/sys/kernel/pid_max' | wc -c) - max_pid=$(cat < /proc/sys/kernel/pid_max) - if [ "${file_size}" -le "${max_file_size}" ] && [ "${file_type}" == 'ASCII text' ]; then - pid="$(cat "${pidfile}")" - if [ "${pid}" -le "${max_pid}" ]; then - if [ "$(pgrep -cF "${pidfile}")" -eq 1 ]; then - color_echo green "Process with PID: ${pid} already running" - return 129 - else - color_echo red "Pidfile ${pidfile} already exists, but no process found with PID: ${pid}" - return 130 - fi - else - color_echo red "Pidfile ${pidfile} does not contain a real PID, value ${pid} is larger than max allowed pid of ${max_pid}" - return 1 - fi - else - color_echo red "Pidfile ${pidfile} is either too large or not of type ASCII, make sure it's a real PID file" - return 1 - fi - else - echo "${$}" > "${pidfile}" && add_on_sig "rm -f ${pidfile}" - return 0 - fi -} - -# Send success signal to other process by name -function signal_success { - signal "${1}" "SIGCONT" "Success" -} - -# Send failure signal to other process by name if send_failure_signal is true -send_failure_signal="${send_failure_signal:-true}" -function signal_failure { - if ${send_failure_signal} ; then - signal "${1}" "SIGUSR2" "Failure" - fi -} - -# Send a signal to process, read pid from file or search by name -# Parameters are: filename/processname signal message -function signal { - pidfile="${pid_prefix}${1}" - # Check if first parameter is pidfile or process name/search string - if init_pid "${1}" > /dev/null || [ ${?} == 129 ]; then - other_pids="$(cat "${pidfile}")" - else - other_pids="$(pgrep -f -d ' ' "${1}")" - fi - if [ "${other_pids}" != "" ]; then - kill -s "${2}" "${other_pids}" - color_echo cyan "Signalled ${3} to PID(s): ${other_pids}" - else - debug 5 "Unable to find process '${1}' to signal" - fi -} - -# Trim whitespaces from strings -function trim { - local var="${1}" - var="${var#"${var%%[![:space:]]*}"}" # remove leading whitespace characters - var="${var%"${var##*[![:space:]]}"}" # remove trailing whitespace characters - echo -n "${var}" -} - -# Sort array elements, accepts name of array to sort, defaults to unique sort -# but can be configured by setting the sort_command -function sort_array { - declare -ga "${1}" - local array_name="${1}" - local array_elements=( $(eval echo '${'"${array_name}"'[@]}') ) - sort_command="${sort_command:-sort -u}" - readarray -t "${1}" < <(for element in "${array_elements[@]}"; do echo "${element}"; done | ${sort_command}) -} - -# Creates an associative array from an array of variable names setting the -# values as the variable values. -# Accepts the name of an array to expand and the name of the associative array -# to be created. -# Unset or empty variables will raise an error unless -# ignore_missing_associate_value is set to true in which the key/value will be -# skipped. -function associate_array { - local source_array_name="${1}" - local array_elements=( $(eval echo '${'"${source_array_name}"'[@]}') ) - local new_array_name="${2}" - debug 10 "Creating associative array: ${new_array_name} from: ${source_array_name} with elements: ${array_elements[*]}" - declare -gA "${new_array_name}" - - for key in "${array_elements[@]}" ; do - debug 10 "Processing associate key: ${key}" - if [ -n "${!key:-}" ] ; then - debug 10 "Setting ${new_array_name}[${key}] to ${!key}" - eval ${new_array_name}[${key}]=${!key} - elif ! ${ignore_missing_associate_value:-false} ; then - error 0 "No variable found to be set with name ${key}" - exit_on_fail - fi - done -} - -# Safely loads config file -# First parameter is filename, all consequent parameters are assumed to be -# valid configuration parameters -function load_config { - config_file="${1}" - # Verify config file permissions are correct and warn if they aren't - # Dual stat commands to work with both linux and bsd - while read -r line; do - if [[ "${line}" =~ ^[^#]*= ]]; then - setting_name="$(echo "${line}" | awk -F '=' '{print $1}' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')" - setting_value="$(echo "${line}" | cut -f 2 -d '=' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')" - - for requested_setting in "${@:2}" ; do - if [ "${requested_setting}" == "${setting_name}" ] ; then - export "${setting_name}"="${setting_value}" - debug 10 "Loaded config parameter ${setting_name} with value of '${setting_value}'" - fi - done - fi - done < "${config_file}"; -} - -# Load settings from config file if they have not been set already -# First parameter is filename, all consequent parameters are assumed to be -# configuration parameters -function load_missing_config { - declare -a new_settings - new_settings=() - for setting in "${@:2}"; do - if [ -z "${!setting:-}" ] ; then - new_settings+=( "${setting}" ) - fi - done - if [ -n "${new_settings[*]:-}" ] ; then - debug 10 "Attempting to load missing settings: ${new_settings[*]} from config file: '${1}'" - load_config "${1}" "${new_settings[@]}" - else - #shellcheck disable=SC2145 - debug 5 "No missing settings to load, all specified settings already set for: ${@:2}" - fi -} - -# Make sure symlink exists and points to the correct target, will remove -# symlinks pointing to other locations or do nothing if it's correct. -function ln_sf { - # Check for the minimum number of arguments - if [ ${#@} -lt 2 ]; then - color_echo red "Called 'ln_sf' with less than 2 arguments." - exit_on_fail - fi - - target_path="${1}" - link_path="${2}" - assert test -e "${target_path}" - debug 10 "Creating symlink at ${2} pointing to ${1}" - if [ -L "${link_path}" ] ; then - current_target="$(readlink "${link_path}")" - if [ "${current_target}" != "${target_path}" ] ; then - debug 6 "Removing existing symlink: ${link_path}" - rm -f "${link_path}" - else - debug 6 "Current symlink at ${link_path} already points to ${target_path}" - return 0 - fi - elif [ -e "${link_path}" ]; then - color_echo red "Found filesystem object at: ${link_path} but it's not a symlink, fatal error, exiting!" - exit_on_fail - fi - # Create symlink - ln -s "${target_path}" "${link_path}" - debug 10 "Successfully created symlink" -} - -# Create string of random characters -# - First param is length, default: 20 -# - Second param is characters, default: A-Za-z0-9_ (Note: '-' specifies range) -function gen_rand_chars { - shopt_decorator_option_name='pipefail' - shopt_decorator_option_value='false' - # shellcheck disable=2015 - shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" - - local length="${1:-20}" - local chars="${2:-A-Za-z0-9_}" - debug 10 "Creating a string of random characters of length: ${length} and chars: ${chars}" - LC_CTYPE=C tr -dc "${chars}" < '/dev/urandom' | head -c "${length}" -} - -# Checks if an environment variable is set and contains a string longer than -# 0, if not then it's set to a random value. -# If a file name/path is specified then a line containing VARIABLE=VALUE is -# written to the end of the file. Optionally the length of the random -# string/value can be specified. (defaults to 50) -function check_set_persist_random_variable { - local var_name="${1}" - local file_path="${csprv_file_path:-${2:-}}" - local key_length="${csprv_key_length:-${3:-50}}" - assert test -n "${var_name}" - if [ -z "${!var_name:-}" ] ; then - debug 11 "No variable named ${var_name} found, generating a random string" - export "${var_name}"="$(gen_rand_chars "${key_length}")" - - if [ -n "${file_path}" ] ; then - if [ -e "${file_path}" ] ; then - debug 10 "Writing variable key/value to file ${file_path}" - echo "${var_name}=${!var_name}" >> "${file_path}" - else - color_echo red "Unable to find/open file: ${file_path}" - exit_on_fail - fi - else - debug 10 "${FUNCNAME[0]} no file_path specified, setting ${var_name} but not persisting" - fi - else - debug 10 "Variable ${var_name} is already set" - fi -} - -function manage_service { - # Ensure all arguments are passed in - local items=( ${@} ) - assert [ "${#items[@]}" -eq 2 ] - - # Set args into meaningful names - local service="${1}" - local action="${2}" - - # Disable paging when using systemd - if command -v systemd &> /dev/null; then - export SYSTEMD_PAGER='cat' - fi - - local commands=("/etc/init.d/${service} ${action}") # init.d - commands+=("/usr/sbin/service ${service} ${action}") # Old Redhat - commands+=("/sbin/service ${service} ${action}") # Old Debian - commands+=("/bin/systemctl ${action} ${service}") # Redhat systemd - commands+=("/usr/bin/systemctl ${action} ${service}") # Debian/other systemd - commands+=("${action} ${service}") # Upstart - - # Loop though each command - local command - for command in "${commands[@]}"; do - debug 10 "Checking command, '${command}', to determine if we can run it on this system" - - # Check if the path to the command exists - local path - path="$(echo "${command}" | cut -d' ' -f1)" - if [[ -e "${path}" ]]; then - debug 10 "Path to command found: '${path}'" - - # Run command - ${command} - return "${?}" - else - debug 10 "Path to command not found: '${path}'" - fi - done - - debug 10 'Exhausted init commands, try again with debug/verbosity for more information.' - return 1 -} - -tls_common_cert_attrib="${tls_common_cert_attrib:-/C=ZZ/ST=None/L=None/O=None/OU=None}" -tls_valid_days=${tls_valid_days:-3650} -tls_key_type="${tls_key_type:-rsa:4096}" - -# Creates a Certificate Authority if one does not exist in the CA cert path. -# Requires two arguments, paths to the key and certificate files. -# Optionally consumes COMMON_NAME variable and appends to CN attribute. -function tls_create_cert_authority { - assert whichs openssl - ca_key_path="${1}" - ca_cert_path="${2}" - common_name="${COMMON_NAME:-${HOSTNAME:-$(hostname --fqdn)}}" - assert test -n "${ca_cert_path}" - assert test -n "${ca_key_path}" - if ! [ -e "${ca_cert_path}" ] ; then - debug 8 "Creating CA: ${ca_cert_path} with key ${ca_key_path}" - openssl req -new -x509 -nodes -out "${ca_cert_path}" -keyout "${ca_key_path}" -subj "${tls_common_cert_attrib}/CN=${common_name}" -newkey "${tls_key_type}" -sha512 -days "${tls_valid_days}" - else - debug 8 "Certificate ${ca_cert_path} already exists, skipping!" - fi -} - -# Creates a new key/certificate pair and signs the certificate with a CA if a -# certificate does not already exist in the new cert path. -# Requires four arguments, new key path, new cert path, CA key path and CA cert -# path. Optionally consumes COMMON_NAME variable and appends to CN attribute -function tls_create_sign_cert { - assert whichs openssl - new_key_path="${1}" - new_cert_path="${2}" - ca_key_path="${3}" - ca_cert_path="${4}" - common_name="${COMMON_NAME:-${HOSTNAME:-$(hostname --fqdn)}}" - assert test -n "${new_cert_path}" - assert test -n "${new_key_path}" - assert test -n "${ca_cert_path}" && test -r "${ca_cert_path}" - assert test -n "${ca_key_path}" && test -r "${ca_key_path}" - if ! [ -e "${new_cert_path}" ] ; then - openssl req -new -keyout "${new_key_path}" -nodes -newkey "${tls_key_type}" -subj "${tls_common_cert_attrib}/CN=${common_name}" | \ - openssl x509 -req -CAkey "${ca_key_path}" -CA "${ca_cert_path}" -days "${tls_valid_days}" -set_serial "${RANDOM}" -sha512 -out "${new_cert_path}" - fi -} - -# Creates a self signed cert/key pair if a cert does not exist in the path. -# Requires two arguments, path to the key and cert to be created. -# Optionally consumes COMMON_NAME variable and appends to CN attribute. -function tls_create_self_signed_cert { - assert whichs openssl - new_key_path="${1}" - new_cert_path="${2}" - common_name="${COMMON_NAME:-${HOSTNAME:-$(hostname --fqdn)}}" - assert test -n "${new_cert_path}" - assert test -n "${new_key_path}" - openssl req -new -keyout "${new_key_path}" -nodes -newkey "${tls_key_type}" -subj "${tls_common_cert_attrib}/CN=${common_name}" -x509 -sha512 -days "${tls_valid_days}" -nodes -set_serial "${RANDOM}" -out "${new_cert_path}" -} - - - -alias "mantrap"='color_echo green "************,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,**********///****************************************///, .. .....**/////*,***//////////////////*/////////*** -> ,,,,,,,,,,,,,,,,,,,,,,,..,,,,,,,,,,********/////////////////////////////////////********************,,,**///////////////////,,**///////////////////////////*/// -> ,,,,,,************,,,,,,,,,,,,,...... .,*/**/*///////*//////////////////////////////******************,,,,**///////////////,,,**///////////////*//(////////// -> ,,,************************,. ...... ...,******//////////////////////**********///////********************,,*/////////////,,**///////////////////////////// -> /////////////////////////*,. ...... ....,*//////*////***************,,,,,,,,,,,,,*////*********************,,,*/////////*.,**//////////////////////////// -> //////////////////////*,,.. ............,*********/*********************,,,,,,,,,*******************************,**/////*/////////////////////////////// -> */////////////////*,,.... ........,,,*****//*******************,**,,,**,**************************************///////*//////////////(///////////// -> ***********//////,...... .....,,,,,,,,**/////************,,,,,(#######*/%%%%%%%%%%###%%#/%%%%#//**(#&@@@@@&%(**////////////#%%%%%%%%%%(///(///// -> **********//////,...... .....,,,,,,,,,,*********,,,,,,,,,,,,,%@@@@@@&,/@@@@@@@@@@@@@@@@(@@@@&/*(@@@@@@@@@@@@@(*////*//////&@@@@@@@@@@%///((///* -> ***************,......... . ...,,,,,,,,,,,,,,,,,,.. #@@@@@@&./@@@@@@@@@@@@@@@@/@@@@&*(@@@@@@@@@@@@@@@#***////////&@@@@@@@@@@&/((((((/* -> ,,,,,..... ......... .... ...,,,,,,,,,**,. #@@@@@@&.*###%&@@@@@@%####,#@@@(,%@@@@@@#*(@@@@@@&*****/////(@@@@@@@@@@@@(((((((/* -> *,,,,.,. .. .... .. . ...,,,,,,,,,,**, #@@@@@@&. ,%@@@@@@/ .&@@# %@@@@@@#*(@@@@@@&*******///#@@@@@@@@@@@@#((((((/* -> ****,,,........ .... ..,,,,,,,,,,**, . .*,. #@@@@@@&. ,%@@@@@@/ ./. #@@@@@@@/*///////********/(%@@@@@&%@@@@@%((((((/* -> ((*,,.,*,... ...... ..,,,,,,,,,,**, . .,. #@@@@@@&. ,%@@@@@@/ ,@@@@@@@@@**************(&@@@@@##@@@@@&((((((/* -> /(. ,,*(/*,. . ....... ..,,,,,,,,,,,*, . . #@@@@@@&. ,%@@@@@@/ ,&@@@@@@@@@@&(***********%@@@@@@((@@@@@@#(((((/* -> ./ .,.. ..... ....... ..... ..,,,,,,,,,,***, #@@@@@@&. ,%@@@@@@/ /%@@@@@@@@@@%**********&@@@@@@((&@@@@@#(((((/* -> ........ ...... ...... ..,,,,,,,,,****. .,,, #@@@@@@&. ,%@@@@@@/ *&@@@@@@@@&********/@@@@@@&//&@@@@@((((/* -> . .,.... . .. ... ..........,,,,**,,**,*.. ..,,,, #@@@@@@&. *%@@@@@@/ /%%%%%%/ #@@@@@@@/*******(@@@@@@@&&&@@@@@@%((((/* -> .. ,,......... ... . ........... .,,,,,.,,****. .,,,**, #@@@@@@&. ,%@@@@@@/ (@@@@@@( &@@@@@@#*******%@@@@@@@@@@@@@@@@&((((/* -> ,,. .,........... . . .............. ..,,,,.,,,,**. ...,*,. #@@@@@@&. ,%@@@@@@/ /@@@@@@( %@@@@@@#*******&@@@@@@@@@@@@@@@@@(((((/ -> ... .. . .... ..... . . .......,,,,,,..,* ...., #@@@@@@&. ,%@@@@@@/ ,@@@@@@&*(@@@@@@@,******/@@@@@@@&//(@@@@@@@#((((/ -> ... . ... ... ... .....,,,. .*, .. .,. #@@@@@@&. ,%@@@@@@/ /@@@@@@@@@@@@@@( ****/@@@@@@@%//(@@@@@@@%((((/ -> ..,.**, ... . .... ... ......,**/, .* .//,.. (&&&&&&%. ,#&&&&&&* *#@@@@@@@@&/ ***#@@@@@@@(//(&@@@@@@&((((( -> ..,**. .,...... .. .... .......,*//... .. .,.., ..... ***********////(((((((((((( -> . .... .. ...,. .. .... .,*.......... #@@@@@@@@@@@@@@@@# #@@@@@@@@@@@&%(. %@@@@@@@@@@@. ***#&@@@@@@@@@@@&%#((((((( -> ,(*. .,.(/*, .,,... #@@@@@@@@@@@@@@@@# #@@@@@@@@@@@@@@@%. .@@@@@@@@@@@@/ **#@@@@@@@@@@@@@@@@#((((( -> ,*////(/,. , . . #@@@@@@@@@@@@@@@@# #@@@@@@@%&@@@@@@@#, *@@@@@@@@@@@@% .*#@@@@@@@@&@@@@@@@@((((( -> .**********//,, .*,,. ...... .... ....*@@@@@@@%.... #@@@@@@@, ,@@@@@@&* (@@@@@@@@@@@@&. .#@@@@@@@@//&@@@@@@#(((( -> **************,,. ... .... ...... ,@@@@@@@% #@@@@@@@, .&@@@@@&/ %@@@@@@#@@@@@@, (&@@@@@@@//&@@@@@@#(((( -> ,,,***********,,. .........,,,,,,,, ,@@@@@@@% #@@@@@@@, ,@@@@@@%, .@@@@@@&*@@@@@@( (&@@@@@@@/*&@@@@@@#(((( -> ,,*,*****,,****,. . ...,,,***(* ,@@@@@@@# #@@@@@@@&@@@@@@@@/ ,@@@@@@%.@@@@@@% (&@@@@@@@/(@@@@@@@#(((( -> *,,,*****,,,****,. . .....,,,(((#(//***,. ,@@@@@@@# #@@@@@@@@@@@@@%* (@@@@@@( &@@@@@@. (&@@@@@@@@@@@@@@@&((((( -> *,,,*****,,,,,***,. ...,,*, .*/((((((((*. ,@@@@@@@# #@@@@@@@##&@@@@@@, %@@@@@@, %@@@@@@* (&@@@@@@@@@@@@@&%(((((( -> *,,,,,,,*,,,,,,,*,,. ..,,,, .*(##((((((((///,. ,@@@@@@@# #@@@@@@@, .@@@@@@%* .@@@@@@@, (@@@@@@# (&@@@@@@&/*******(((((( -> ,,,,,,,,**,,,,,,,***, . ..,, .*/(#((((((((((/*, ,@@@@@@@# #@@@@@@@, .@@@@@@&/ ,@@@@@@@@@@@@@@@@& (&@@@@@@@/*******/((((( -> ,,,,,,,,,,,,,,,,,,,***/*. . ,/((###(((((((((/ ,@@@@@@@% #@@@@@@@, .@@@@@@&/ (@@@@@@@@@@@@@@@@@, (&@@@@@@@/********((((( -> ,,,,,,,,,,,,,,,,,,,,,,,**/*,. .*/(((####((((((((((. ,@@@@@@@% #@@@@@@@, .@@@@@@&/ %@@@@@@@%%%@@@@@@@( (&@@@@@@@/********/(((( -> ,,,,,,,,,,,,,,,,,,,,,,,,,,,**********,,***////(((######((((((((((#, ,@@@@@@@% #@@@@@@@, .@@@@@@&/.@@@@@@@&. .@@@@@@@% (&@@@@@@@/*********(((( -> ,,,,,,,,,,,,,,,,,,,,,,,,,.,,,,,*******/////(((((#######((((((((((##. ,@@@@@@@# #@@@@@@@, .&@@@@@&/,@@@@@@@& .&@@@@@@&.(&@@@@@@@/**,,*****/((( -> ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,****///((((((#####((((((((((((###* .*******, ,*******. *******.,/(/****, *//*/***..******((/**,,,*****((( -> ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,******/////((((((#####(((((((((((((((( ..,,. . ,*****,,*****/(( -> ,,,,,,,,,,,,,,,,,,,,,,,,,,,**********//////(((((((#####(((((((((((((((. ..,. .,,,.. ,****,,,,****(( -> ..,,,,,,,,,,,,,,,,,,..,,,,,,***///////////////((((#####((((((((((((((#. . .... .,,,.. .****,,,,****(( -> .........,,,,,,,,,,,......,,,**////////////////((((((##(((((((((((((##. . ...,,,,,,.. ,***,,,,****/("' - - -# Unit tests -# -# Short tests should be placed in the test_shtdlib function, longer and more -# elaborated tests should be placed in their own functions and called from -# test_shtlib - -# Test function to decorate -function test_shopt_decorator { - shopt_decorator_option_name='pipefail' - shopt_decorator_option_value=true - # shellcheck disable=2015 - shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" - echo "${*}" - shopt -o pipefail - assert shopt -qo pipefail && color_echo green "Successfully decorated ${FUNCNAME[0]} with pipefail" -} - -# Test signaling -function test_signal_process { - signal_processor SIGUSR2 'exit 42' > /dev/null - local sub_pid_0="${!}" - signal_processor SIGUSR1 "sleep 2 && kill -s SIGUSR2 ${sub_pid_0} && exit 42" > /dev/null - local sub_pid_1="${!}" - debug 10 "Spawned sub processes using signal processor with pids: ${sub_pid_0} and ${sub_pid_1}" - debug 10 "Active sub processes are: $(pgrep -P ${$} | tr '\n' ' ')" - signal_process "${sub_pid_1}" SIGUSR1 > /dev/null - debug 10 "Waiting for sub processes to exit" - bash -c "sleep 10 && kill ${sub_pid_0} &> /dev/null" & - bash -c "sleep 10 && kill ${sub_pid_1} &> /dev/null" & - while pgrep -P ${$} > /dev/null ; do - debug 10 "Waiting for ${sub_pid_0}" - # Make sure the sub process exits with 42 - wait ${sub_pid_0} &> /dev/null || assert [ "${?}" == '42' ] - color_echo green "Sub process was signaled, responded and properly exited" - return 0 - done - color_echo red "Signaling and sub process test failed" - return 1 -} - -# Test filesystem monitoring/event triggers -# shellcheck disable=SC2120 -function test_add_on_mod { - shopt_decorator_option_name='errexit' - shopt_decorator_option_value='false' - # shellcheck disable=2015 - shopt_decorator "${FUNCNAME[0]}" "${@:-}" && return || conditional_exit_on_fail 121 "Failed to run ${FUNCNAME[0]} with shopt_decorator" - - if ! ( whichs inotifywait || whichs fswatch ) ; then - debug 4 "Unable to locate inotify or fswatch, trying to install them" - install_package inotify-tools fswatch - fi - - signal_processor SIGUSR1 'exit 42' > /dev/null - local signaler_pid="${!}" - local tmp_file_path - tmp_file_path="$(mktemp)" - add_on_exit "rm -f ${tmp_file_path}" - debug 10 "Using temporary file: ${tmp_file_path} to test add_on_mod" - max_frequency=5 add_on_mod "signal_process ${signaler_pid} SIGUSR1 &> /dev/null" "${tmp_file_path}" & - mod_watcher_pid="${!}" - bash -c "sleep 2 && echo 'test message' > '${tmp_file_path}'" - bash -c "sleep 10 && kill ${signaler_pid} &> /dev/null" & - while pgrep -P ${$} > /dev/null ; do - debug 10 "Waiting for PID ${signaler_pid} to exit" - wait "${signaler_pid}" &> /dev/null - return_status="${?}" - # Make sure the sub process exits with 42 - if [ "${return_status}" != '42' ] ; then - debug 1 "Got return status ${return_status} when waiting for ${signaler_pid} to exit" - exit_on_fail - fi - color_echo green "Sub process was signaled by file system monitoring thread, responded and properly exited" - debug 10 "Signaling mod_watcher ${mod_watcher_pid} to exit" - kill "${mod_watcher_pid}" - return 0 - done - color_echo red "Filesystem modification monitoring and trigger testing failed" - return 1 -} - -# Test function for create_secure_tmp function -function test_create_secure_tmp { - local tmp_file - local tmp_dir - - # Test 2 arguments - create_secure_tmp "tmp_file" "file" - create_secure_tmp "tmp_dir" "dir" - - assert [ -e "${tmp_file}" ] - assert [ "$(stat -c %a "${tmp_file}")" -eq 600 ] - echo 'test' > "${tmp_file}" - assert grep test "${tmp_file}" > /dev/null - - assert [ -e "${tmp_dir}" ] - assert [ "$(stat -c %a "${tmp_dir}")" -eq 700 ] - touch "${tmp_dir}/test" - assert [ -e "${tmp_dir}/test" ] - - # Test 3 arguments - create_secure_tmp "tmp_file2" "file" "${tmp_dir}" - create_secure_tmp "tmp_file3" "file" "/tmp/tmp_file3" - create_secure_tmp "tmp_dir2" "dir" "/tmp/tmp.new_dir" - - assert [ -e "${tmp_file}" ] - assert [ "$(stat -c %a "${tmp_file}")" -eq 600 ] - echo 'test' > "${tmp_file}" - assert grep test "${tmp_file}" > /dev/null - - assert [ -e "${tmp_file}" ] - assert [ "$(stat -c %a "${tmp_file}")" -eq 600 ] - echo 'test' > "${tmp_file}" - assert grep test "${tmp_file}" > /dev/null - - assert [ -e "${tmp_dir}" ] - assert [ "$(stat -c %a "${tmp_dir}")" -eq 700 ] - - color_echo green 'Temporary files and directories successfully created and tested' - return 0 -} - -# Primary Unit Test Function -# Defaults to testing all bash versions in containers, any/all arguments are -# assumed to be container image names (bash versions) to test with. -# Also supports "local" which will test without using containers. -function test_shtdlib { - export verbosity=11 - # Run this function inside bash containers as/if specified - if in_array 'local' "${@:-}" ; then - if [ "${#}" -ne 1 ] ; then - supported_bash_versions=( "${@/local}" ) - test_decorator "${FUNCNAME[0]}" - fi - else - supported_bash_versions=( "${@:-}" ) - test_decorator "${FUNCNAME[0]}" && return - fi - - color_echo green "Testing shtdlib functions" - - # Show some basic system stats - color_echo cyan "OS Family is: ${os_family}" - color_echo cyan "OS Type is: ${os_type}" - color_echo cyan "OS Name is: ${os_name}" - color_echo cyan "OS version is (major.minor.patch): ${major_version}.${minor_version}.${patch_version}" - color_echo cyan "Local IPs are:" - for ip in ${local_ip_addresses} ; do - color_echo cyan "${ip}" - done - - # Test color output - color_echo cyan "Testing echo colors:" - color_echo black "Black" - color_echo red "Red" - color_echo green "Green" - color_echo yellow "Yellow" - color_echo blue "Blue" - color_echo magenta "Magenta" - color_echo cyan "Cyan" - color_echo blank "Blank" - - # Test decorators - # shellcheck disable=2015 - shopt -uo pipefail && test_shopt_decorator 'Hello World' || exit_on_fail - - # Test whichs command - whichs command && color_echo green "whichs found the command 'command'" - - # Test assert command and make some basic assertions - assert true && color_echo green "asserted 'true' is true" - assert whichs ls - assert [ 0 -eq 0 ] - - # Test array inclusion, argument counting and empty check - declare -a shtdlib_test_array - shtdlib_test_array=(a b c d e f g) - # shellcheck disable=SC1117 - assert in_array 'a' "${shtdlib_test_array[@]}" && color_echo cyan "'a' is in '${shtdlib_test_array[*]}'" - assert [ "$(count_array_elements shtdlib_test_array)" == 7 ] && color_echo green "Found 7 elements in test array" - declare -a shtdlib_empty_array - assert empty_array shtdlib_empty_array - - # Test verbosity and debug logging - orig_verbosity="${verbosity:-1}" - verbosity=1 && color_echo green 'Verbosity set to 1 (should see debug up to 1)' - for ((i=1; i <= 11 ; i++)) ; do - debug ${i} "Debug Level ${i}" - done - verbosity=10 && color_echo green 'Verbosity set to 10 (should see debug up to 10)' - for ((i=1; i <= 11 ; i++)) ; do - debug ${i} "Debug Level ${i}" - done - verbosity="${orig_verbosity}" - - # Test finalizing paths - shtdlib_test_variable='/home/test' - finalize_path shtdlib_test_variable > /dev/null - finalize_path '~' > /dev/null - finalize_path './' > /dev/null - finalize_path '$HOME/test' > /dev/null - - # Test stripping path and exptension from a path - assert [ "$(basename_s /tmp/example.file)" == 'example' ] && color_echo green 'Tested basename_s correctly stripped path and extension from a path' - - # Test counting arguments - assert [ "$(count_arguments 1 2 3 4)" == 4 ] && color_echo green 'Tested count_arguments with 4 args' - - # Test platform neutral readlink -m/_m implementation - tmp_file_path="$(mktemp)" - tmp_symlink_dir="$(mktemp -d)" - tmp_file_name="$(basename "${tmp_file_path}")" - ln -s "${tmp_file_path}" "${tmp_symlink_dir}/${tmp_file_name}" - assert [ "$(readlink_m "${tmp_symlink_dir}/${tmp_file_name}")" == "${tmp_file_path}" ] && color_echo green "Sucessfully determined symlink target with readlink_m" - - # Test safe loading of config parameters - tmp_file="$(mktemp)" - add_on_sig "rm -f ${tmp_file}" - test_key='TEST_KEY' - test_value='test value moretest -f /somepath ./morepath \/ping ${}$() -- __' - echo "${test_key}=${test_value}" > "${tmp_file}" - load_config "${tmp_file}" 'TEST_KEY' - # shellcheck disable=SC2153 - test "'${TEST_KEY}'" == "'${test_value}'" || exit_on_fail - - # Test version sort - sorted_string="$(version_sort '1 0 2.3.2 3.3.3 1.1.1 0.0.1 2m 2.2.2m 4.4a')" - assert [ "${sorted_string//[$'\t\r\n ']/ }" == '0 0.0.1 1 1.1.1 2m 2.2.2m 2.3.2 3.3.3 4.4a' ] && color_echo green "Successfully tested version sort" - - # Test version comparison - assert compare_versions '1.1.1 1.2.2test' - assert [ "$(compare_versions '1.2.2 1.1.1'; echo "${?}")" == '1' ] - assert compare_versions '1.0.0 1.1.1 2.2.2' - assert [ "$(compare_versions '4.0.0 3.0.0 2.0.0 1.1.1test 1.0.0' ; echo "${?}" )" == '4' ] - - # Test process signaling - test_signal_process - - # Test filesystem object activity triggers - # shellcheck disable=SC2119 - test_add_on_mod - - # Test resolving domain names (IPv4) - assert [ "$(resolve_domain_name example.com | grep -v '.*:.*:.*:.*:.*:.*:.*:.*')" == '93.184.216.34' ] - - test_create_secure_tmp - - # Test version increment - new_version=$(version_increment 12323.3.2) - assert [ "${new_version}" == '12323.3.3' ] -} - -# Test bash version -if "${bash_pre_v4}" ; then - debug 9 "Detected bash version ${BASH_VERSION}, for optimal results we suggest using bash V4 or later" -fi diff --git a/scripts/shtdlib_dccscr.sh b/scripts/shtdlib_dccscr.sh new file mode 100644 index 0000000..d0cbc1a --- /dev/null +++ b/scripts/shtdlib_dccscr.sh @@ -0,0 +1,153 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2034,SC2174,SC2016,SC2026,SC2206,SC2128 +# +# This is a collection of shared functions used by SD Elements products +# +# Copyright (c) 2018 SD Elements Inc. +# +# All Rights Reserved. +# +# NOTICE: All information contained herein is, and remains +# the property of SD Elements Incorporated and its suppliers, +# if any. The intellectual and technical concepts contained +# herein are proprietary to SD Elements Incorporated +# and its suppliers and may be covered by U.S., Canadian and other Patents, +# patents in process, and are protected by trade secret or copyright law. +# + +# Create which -s alias (whichs), same as POSIX: -s +# No output, just return 0 if all of the executables are found, or 1 if some were not found. +function whichs { + # Bash 3.1 does not flush stdout so we use tee to make sure it gets done + command -v "${*}" &> /dev/null | tee /dev/null &> /dev/null + return "${PIPESTATUS}" +} + +# Allows clear assert syntax +function assert { + echo "Assertion made: ${*}" + # shellcheck disable=SC2068 + if ! "${@}" ; then + echo "Assertion failed: '${*}'" + exit 1 + fi +} + +# Signals a process by either exact name or pid +# Accepts name/pid as first parameter and optionally signal as second parameter +function signal_process { +echo "Signaling PID: ${1} with signal: ${2:-SIGTERM}" +if [[ "${1}" =~ ^[0-9]+$ ]] ; then + if [ "${2}" != '' ] ; then + kill -s "${2}" "${1}" + else + kill "${1}" + fi +else + assert whichs pkill + if [ "${2}" != '' ] ; then + pkill --exact --signal "${2}" "${1}" + else + pkill --exact "${1}" + fi +fi +} + +# Traps for cleaning up on exit +# Note that trap definition needs to happen here not inside the add_on_sig as +# shown in the original since this can easily be called in a subshell in which +# case the trap will only apply to that subshell +declare -a on_exit +on_exit=() +declare -a on_break +on_break=() + +function on_exit { + # shellcheck disable=SC2181 + if [ ${?} -ne 0 ]; then + # Prints to stderr to provide an easy way to check if the script + # failed. Because the exit signal gets propagated only the first call to + # this function will know the exit code of the script. All subsequent + # calls will see $? = 0 if the previous signal handler did not fail + echo "Last command did not complete successfully" >&2 + fi + + if [ -n "${on_exit:-}" ] ; then + echo "Received SIGEXIT, ${#on_exit[@]} items to clean up." + if [ ${#on_exit[@]} -gt 0 ]; then + for item in "${on_exit[@]}"; do + if [ -n "${item}" ] ; then + echo "Executing cleanup statement on exit: ${item}" + # shellcheck disable=SC2091 + ${item} + fi + done + fi + fi + echo "Finished cleaning up, de-registering signal trap" + trap - EXIT + if ! $interactive ; then + # Be a nice Unix citizen and propagate the signal + kill -s EXIT "${$}" + fi +} + +function on_break { + if [ -n "${on_break:-}" ] ; then + echo "Break signal received, unexpected exit, ${#on_break[@]} items to clean up." + if [ ${#on_break[@]} -gt 0 ]; then + for item in "${on_break[@]}"; do + if [ -n "${item}" ] ; then + echo "Executing cleanup statement on break: ${item}" + ${item} + fi + done + fi + fi + # Be a nice Unix citizen and propagate the signal + trap - "${1}" + if ! $interactive ; then + # Be a nice Unix citizen and propagate the signal + kill -s "${1}" "${$}" + fi +} + +function add_on_exit { + echo "Registering signal action on exit: \"${*}\"" + if [ -n "${on_exit:-}" ] ; then + local n="${#on_exit[@]}" + else + local n=0 + fi + on_exit[${n}]="${*}" + echo "on_exit content: ${on_exit[*]}, size: ${#on_exit[*]}, keys: ${!on_exit[*]}" +} + +function add_on_break { + echo "Registering signal action on break: \"${*}\"" + if [ -n "${on_break:-}" ] ; then + local n="${#on_break[@]}" + else + local n=0 + fi + on_break[${n}]="${*}" + echo "on_break content: ${on_break[*]}, size: ${#on_break[*]}, keys: ${!on_break[*]}" +} + +function add_on_sig { + add_on_exit "${*}" + add_on_break "${*}" +} + +function clear_sig_registry { + echo "Clearing all registered signal actions" + on_exit=() + on_break=() +} + +echo "Setting up signal traps" +trap on_exit EXIT +trap "on_break INT" INT +trap "on_break QUIT" QUIT +trap "on_break TERM" TERM +echo "Signal trap successfully initialized" -- GitLab From c4c2667a7560c38888a23ae108ac74efff013327 Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Fri, 11 Sep 2020 16:20:37 -0400 Subject: [PATCH 21/27] More changes --- scripts/run_nginx.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run_nginx.sh b/scripts/run_nginx.sh index f7d6cf1..58157ee 100755 --- a/scripts/run_nginx.sh +++ b/scripts/run_nginx.sh @@ -31,7 +31,7 @@ export NAMESERVERS="resolver $(grep nameserver /etc/resolv.conf | awk '{print $2 # Create config files rtenvsub.sh --nofifo --overlay --process nginx --daemon /etc/nginx /run/nginx & -color_echo green 'Waiting for config to become available ' +echo 'Waiting for config to become available ' until test -e '/etc/nginx/nginx.conf' && test -d '/etc/nginx/sites-enabled'; do add_on_break 'exit' echo -n . -- GitLab From 3a2166043c2cfab7c2c499ad364b62776a5a90f1 Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Sat, 12 Sep 2020 11:54:19 -0400 Subject: [PATCH 22/27] More changes --- Dockerfile | 1 + scripts/rtenvsub.sh | 24 +++++++++++++- scripts/shtdlib_dccscr.sh | 69 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 91671fd..7394a1f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -31,6 +31,7 @@ USER root RUN set -x \ && dnf -y upgrade \ + && dnf -y install gettext \ && dnf clean all \ && mkdir -p /var/nginx/proxy_temp \ && mkdir -p /var/nginx/client_body_temp \ diff --git a/scripts/rtenvsub.sh b/scripts/rtenvsub.sh index 1f1c0cd..3377536 100755 --- a/scripts/rtenvsub.sh +++ b/scripts/rtenvsub.sh @@ -20,6 +20,8 @@ version='0.2' # Set a safe umask umask 0077 +dev_mode="${DEV_MODE:-false}" + # Import the shell standard library source /bin/shtdlib_dccscr.sh @@ -31,7 +33,7 @@ if ! whichs envsubst ; then echo 'Perhaps this can be fixed with: apt-get -y install gettext-base' exit 1 fi -if ! whichs inotifywait ; then +if ! whichs inotifywait && ${dev_mode} ; then echo "Unable to locate the inotifywait command, please make sure it's available" echo 'Perhaps this can be fixed with: apt-get install inotify-tools' exit 1 @@ -321,6 +323,9 @@ function mirror_envsubst_paths { echo "Destination directory does not contain any files, no pipes created for ${full_path}!" else for file in "${files[@]:-}"; do + if ${dev_mode} ; then + add_on_sig "rm -f ${destination}${file#${full_path}}" + fi if ${nofifo} ; then render_file "${destination}" "${file}" "${full_path}" else @@ -336,6 +341,23 @@ function mirror_envsubst_paths { ln --symbolic "$(readlink ${link})" "${target}" add_on_sig "unlink ${target}" done + + if ${dev_mode} ; then + # Set up safe cleanup for directory structure (needs to be done in + # reverse order to ensure safety of operation without recursive rm + local index + for (( index=${#directories[@]}-1 ; index>=0 ; index-- )) ; do + add_on_sig "rmdir ${destination}${directories[${index}]#${full_path}}" + done + + # Run update loop and detach it + if ${daemonize} ; then + inotify_looper "${destination}" "${full_path}" & + else + inotify_looper "${destination}" "${full_path}" & + fi + looper_pids+=( "${!}" ) + fi done if ! ${daemonize} ; then echo "Waiting for looper pids: ${looper_pids[*]}" diff --git a/scripts/shtdlib_dccscr.sh b/scripts/shtdlib_dccscr.sh index d0cbc1a..c9244fa 100644 --- a/scripts/shtdlib_dccscr.sh +++ b/scripts/shtdlib_dccscr.sh @@ -15,6 +15,75 @@ # patents in process, and are protected by trade secret or copyright law. # +# If there is no TTY then it's not interactive +if ! [[ -t 1 ]]; then + interactive=false +fi +# Default is interactive mode unless already set +interactive="${interactive:-true}" + +# Allows checking of exit status, on error print debugging info and exit. +# Takes an optional error message in which case only it will be shown +# This is typically only used when running in non-strict mode but when errors +# should be raised and to help with debugging +function exit_on_fail { + message="${*:-}" + if [ -z "${message}" ] ; then + echo "Last command did not execute successfully but is required!" >&2 + else + echo "${*}" >&2 + fi + echo "[$( caller )] ${*:-}" + echo "BASH_SOURCE: ${BASH_SOURCE[*]}" + echo "BASH_LINENO: ${BASH_LINENO[*]}" + echo "FUNCNAME: ${FUNCNAME[*]}" + exit 1 +} + +# Check if a variable is in array +# First parameter is the variable, rest is the array +function in_array { + local x + for x in "${@:2}"; do [[ "${x}" == "${1}" ]] && return 0; done + return 1 +} + +# Parse for optional arguments (-f vs. -f optional_argument) +# Takes variable name as first arg and default value as optional second +# variable will be initialized in any case for compat with -e +# You need to set or export `parameter_array` in the script that uses `parse_opt_arg`: +# +# # shellcheck disable=2034 +# parameter_array=(${@:-}) # Store all parameters as an array +# +# # Parse command line arguments +# function parse_arguments { +# echo "Parse Arguments got argument: ${1}" +# case ${1} in +# ... +function parse_opt_arg { + # Pick up optional arguments + echo "Parameter Array is: ${parameter_array[*]:-}" + next_arg="${parameter_array[$((OPTIND - 1))]:-}" + echo "Optarg/Option index is: ${OPTIND} and next argument is: ${next_arg}" + if [ "$(echo "${next_arg}" | grep -v '^-')" != "" ]; then + echo "Found optional argument and setting ${1}=\"${next_arg}\"" + eval "${1}=\"${next_arg}\"" + # Skip over the optional value so getopts does not stop processing + (( OPTIND++ )) + else + if [ "${2}" != '' ]; then + echo "Optional argument not found, using default and setting ${1}=\"${2}\"" + eval "${1}=\"${2}\"" + else + echo "Initializing empty variable ${1}" + eval "${1}=" + fi + fi + unset next_arg + echo "Set argument: ${1} to \"${!1}\"" +} + # Create which -s alias (whichs), same as POSIX: -s # No output, just return 0 if all of the executables are found, or 1 if some were not found. function whichs { -- GitLab From 422bef44a6f5ea15ff6adfdbf69e9c4262ecb23f Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Thu, 8 Oct 2020 10:00:31 -0400 Subject: [PATCH 23/27] Update build command with new registry --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2050b02..3b0a85c 100644 --- a/README.md +++ b/README.md @@ -28,8 +28,8 @@ the version of memcached, in this case `5.6.33` ```bash export jitt_version='5.6.33' && \ docker build . -t localhost/security-compass/jitt/nginx-jitt:"local" \ - --build-arg BASE_REGISTRY="localhost" \ - --build-arg BASE_IMAGE="opensource/nginx/nginx" \ - --build-arg BASE_TAG="1.19.0" \ + --build-arg BASE_REGISTRY="registry1.dsop.io" \ + --build-arg BASE_IMAGE="ironbank/opensource/nginx/nginx" \ + --build-arg BASE_TAG="1.19.2" --build-arg jitt_version="${jitt_version}" ``` -- GitLab From 53721d117799cb25b387d720662db1cad48b3b90 Mon Sep 17 00:00:00 2001 From: Hrdayesh Patel Date: Thu, 8 Oct 2020 10:00:34 -0400 Subject: [PATCH 24/27] Add support for running as non-root --- Dockerfile | 11 +++++++++-- scripts/run_nginx.sh | 4 ++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7394a1f..2fa1b35 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ ARG BASE_REGISTRY=nexus-docker-secure.levelup-nexus.svc.cluster.local:18082 ARG BASE_IMAGE=opensource/nginx/nginx -ARG BASE_TAG=1.19.0 +ARG BASE_TAG=1.19.2 # Down with the bloat @@ -40,12 +40,19 @@ RUN set -x \ && rm -f /etc/nginx/conf.d/* \ && rm -f /etc/nginx/nginx.conf \ && rm -f /var/log/nginx/access.log \ - && rm -f /var/log/nginx/error.log + && rm -f /var/log/nginx/error.log \ + && groupadd --gid 49 www-data \ + && usermod nginx --groups www-data \ + && mkdir --mode 2775 --parents /etc/nginx /var/log/nginx /var/cache/nginx \ + ; chown --recursive www-data:www-data /etc/nginx \ + ; find /etc/nginx/ -type d -exec chmod g+rwx {} \; \ + ; chown --recursive nginx:www-data /var/log/nginx /var/cache/nginx COPY --from=extractor /jitt /jitt/ COPY /scripts/rtenvsub.sh /bin/rtenvsub.sh COPY /scripts/shtdlib_dccscr.sh /bin/shtdlib_dccscr.sh COPY /scripts/run_nginx.sh /bin/run_nginx.sh +USER nginx HEALTHCHECK --interval=15s --timeout=10s --retries=3 CMD pgrep -lf nginx || exit 1 diff --git a/scripts/run_nginx.sh b/scripts/run_nginx.sh index 58157ee..9ac6af8 100755 --- a/scripts/run_nginx.sh +++ b/scripts/run_nginx.sh @@ -29,7 +29,7 @@ source /bin/shtdlib_dccscr.sh export NAMESERVERS="resolver $(grep nameserver /etc/resolv.conf | awk '{print $2}') valid=10s;" # Create config files -rtenvsub.sh --nofifo --overlay --process nginx --daemon /etc/nginx /run/nginx & +sg www-data "/bin/rtenvsub.sh --nofifo --overlay --daemon /etc/nginx /run/nginx" echo 'Waiting for config to become available ' until test -e '/etc/nginx/nginx.conf' && test -d '/etc/nginx/sites-enabled'; do @@ -40,5 +40,5 @@ done # Run nginx echo 'Starting nginx' -nginx -g 'daemon off;' || exit_on_fail +/usr/sbin/nginx -g 'daemon off;' || exit_on_fail echo "Nginx exited with return code: ${?}" -- GitLab From 4cf952b5345672df0cf40e61942946b59f296fe7 Mon Sep 17 00:00:00 2001 From: "gavin.scallon" Date: Fri, 9 Oct 2020 15:50:58 +0000 Subject: [PATCH 25/27] Patch 1 --- download.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/download.yaml b/download.yaml index 09998cc..93bc8da 100644 --- a/download.yaml +++ b/download.yaml @@ -5,5 +5,5 @@ resources: type: sha256 value: "472ad942998b0a444e51637ccf8bda039c475ee4f0bccc714bd620485bb2d631" auth: - type: basic - id: scompass + type: "basic" + id: "scompass-credential" -- GitLab From 28c71c50bedb9bcba19e34e4d8ba52678a5b876d Mon Sep 17 00:00:00 2001 From: Al Fontaine Date: Tue, 13 Oct 2020 15:44:50 +0000 Subject: [PATCH 26/27] Update Jenkinsfile --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 99963ea..1f9f45a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,2 +1,2 @@ @Library('DCCSCR@master') _ -dccscrPipeline(version: "0.0.1") +dccscrPipeline(version: "5.6.33") -- GitLab From 2ef557be15232429963c74d82c36f6c9c44c087c Mon Sep 17 00:00:00 2001 From: "sean.melissari" Date: Tue, 13 Oct 2020 17:41:50 +0000 Subject: [PATCH 27/27] use registry1 --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 2fa1b35..e4d53bf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ -ARG BASE_REGISTRY=nexus-docker-secure.levelup-nexus.svc.cluster.local:18082 -ARG BASE_IMAGE=opensource/nginx/nginx +ARG BASE_REGISTRY=registry1.dsop.io +ARG BASE_IMAGE=ironbank/opensource/nginx/nginx ARG BASE_TAG=1.19.2 -- GitLab