UNCLASSIFIED

Commit 69548194 authored by karol's avatar karol
Browse files

Merged Dockerfile

parents 680e0b47 2051e938
Pipeline #283302 passed with stages
in 14 minutes and 53 seconds
......@@ -3,19 +3,20 @@ ARG BASE_IMAGE=ubi8
ARG BASE_TAG=8.3
FROM spark-operator/spark-operator:v1beta2-1.0.0-2.4.4 AS base
FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}
RUN dnf upgrade -y
RUN dnf install -y openssl curl
RUN rm -rf /var/cache/dnf
RUN dnf upgrade -y && \
dnf clean all && \
rm -rf /var/cache/dnf
COPY --from=base /usr/bin/spark-operator /usr/bin/
COPY tini /usr/bin/
RUN chmod +x /usr/bin/tini
COPY tini /sbin/
RUN chmod +x /sbin/tini
COPY scripts/gencerts.sh /usr/bin/
RUN chmod +x /usr/bin/gencerts.sh
COPY --from=base /usr/bin/entrypoint.sh /usr/bin/
COPY scripts/entrypoint.sh /usr/bin/
RUN chmod +x /usr/bin/entrypoint.sh
RUN find / -path /proc -prune -o -perm /4000 -exec chmod u-s {} \;
RUN find / -path /proc -prune -o -perm /2000 -exec chmod g-s {} \;
RUN groupadd -r spark-operator && useradd -r -g spark-operator spark-operator
RUN chown -R spark-operator /usr/bin
USER spark-operator
ENTRYPOINT ["/usr/bin/entrypoint.sh"]
\ No newline at end of file
ENTRYPOINT ["/usr/bin/entrypoint.sh"]
{
"assignees": [
"@cvernooy"
],
"baseBranches": [
"development"
],
"packageRules": [
{
"datasources": ["docker"],
"packageNames": ["gcr.io/spark-operator/spark-operator"],
"versioning": "regex:^v1beta2-(?<major>\\d+).(?<minor>\\d+).(?<patch>\\d+)(-(?<compatibility>.*))$"
}
],
"regexManagers": [
{
"fileMatch": [
"^hardening_manifest.yaml$"
],
"matchStrings": [
"org\\.opencontainers\\.image\\.version:\\s+\"(?<currentValue>.+?)\""
],
"depNameTemplate": "gcr.io/spark-operator/spark-operator",
"datasourceTemplate": "docker"
},
{
"fileMatch": [
"^hardening_manifest.yaml$"
],
"matchStrings": [
"tags:\\s+-\\s+\"(?<currentValue>.+?)\""
],
"depNameTemplate": "gcr.io/spark-operator/spark-operator",
"datasourceTemplate": "docker"
}
]
}
#!/bin/bash
# echo commands to the terminal output
set -ex
# Check whether there is a passwd entry for the container UID
myuid=$(id -u)
mygid=$(id -g)
# turn off -e for getent because it will return error code in anonymous uid case
set +e
uidentry=$(getent passwd $myuid)
set -e
echo $myuid
echo $mygid
echo $uidentry
# If there is no passwd entry for the container UID, attempt to create one
if [[ -z "$uidentry" ]] ; then
if [[ -w /etc/passwd ]] ; then
echo "$myuid:x:$myuid:$mygid:anonymous uid:$SPARK_HOME:/bin/false" >> /etc/passwd
else
echo "Container ENTRYPOINT failed to add passwd entry for anonymous UID"
fi
fi
exec /usr/bin/tini -s -- /usr/bin/spark-operator "$@"
#!/bin/bash
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generates a CA certificate, a server key, and a server certificate signed by the CA.
set -e
SCRIPT=`basename ${BASH_SOURCE[0]}`
function usage {
cat<< EOF
Usage: $SCRIPT
Options:
-h | --help Display help information.
-n | --namespace <namespace> The namespace where the Spark operator is installed.
-s | --service <service> The name of the webhook service.
-p | --in-pod Whether the script is running inside a pod or not.
EOF
}
function parse_arguments {
while [[ $# -gt 0 ]]
do
case "$1" in
-n|--namespace)
if [[ -n "$2" ]]; then
NAMESPACE="$2"
else
echo "-n or --namespace requires a value."
exit 1
fi
shift 2
continue
;;
-s|--service)
if [[ -n "$2" ]]; then
SERVICE="$2"
else
echo "-s or --service requires a value."
exit 1
fi
shift 2
continue
;;
-p|--in-pod)
export IN_POD=true
shift 1
continue
;;
-h|--help)
usage
exit 0
;;
--) # End of all options.
shift
break
;;
'') # End of all options.
break
;;
*)
echo "Unrecognized option: $1"
exit 1
;;
esac
done
}
# Set the namespace to "sparkoperator" by default if not provided.
# Set the webhook service name to "spark-webhook" by default if not provided.
IN_POD=false
SERVICE="spark-webhook"
NAMESPACE="spark-operator"
parse_arguments "$@"
TMP_DIR="/tmp/spark-pod-webhook-certs"
echo "Generating certs for the Spark pod admission webhook in ${TMP_DIR}."
mkdir -p ${TMP_DIR}
cat > ${TMP_DIR}/server.conf << EOF
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, serverAuth
subjectAltName = DNS:${SERVICE}.${NAMESPACE}.svc
EOF
# Create a certificate authority.
touch ${TMP_DIR}/.rnd
export RANDFILE=${TMP_DIR}/.rnd
openssl genrsa -out ${TMP_DIR}/ca-key.pem 2048
openssl req -x509 -new -nodes -key ${TMP_DIR}/ca-key.pem -days 100000 -out ${TMP_DIR}/ca-cert.pem -subj "/CN=${SERVICE}.${NAMESPACE}.svc"
# Create a server certificate.
openssl genrsa -out ${TMP_DIR}/server-key.pem 2048
# Note the CN is the DNS name of the service of the webhook.
openssl req -new -key ${TMP_DIR}/server-key.pem -out ${TMP_DIR}/server.csr -subj "/CN=${SERVICE}.${NAMESPACE}.svc" -config ${TMP_DIR}/server.conf
openssl x509 -req -in ${TMP_DIR}/server.csr -CA ${TMP_DIR}/ca-cert.pem -CAkey ${TMP_DIR}/ca-key.pem -CAcreateserial -out ${TMP_DIR}/server-cert.pem -days 100000 -extensions v3_req -extfile ${TMP_DIR}/server.conf
if [[ "$IN_POD" == "true" ]]; then
TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
# Base64 encode secrets and then remove the trailing newline to avoid issues in the curl command
ca_cert=$(cat ${TMP_DIR}/ca-cert.pem | base64 | tr -d '\n')
ca_key=$(cat ${TMP_DIR}/ca-key.pem | base64 | tr -d '\n')
server_cert=$(cat ${TMP_DIR}/server-cert.pem | base64 | tr -d '\n')
server_key=$(cat ${TMP_DIR}/server-key.pem | base64 | tr -d '\n')
# Create the secret resource
echo "Creating a secret for the certificate and keys"
STATUS=$(curl -ik \
-o ${TMP_DIR}/output \
-w "%{http_code}" \
-X POST \
-H "Authorization: Bearer $TOKEN" \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
"name": "spark-webhook-certs",
"namespace": "'"$NAMESPACE"'"
},
"data": {
"ca-cert.pem": "'"$ca_cert"'",
"ca-key.pem": "'"$ca_key"'",
"server-cert.pem": "'"$server_cert"'",
"server-key.pem": "'"$server_key"'"
}
}' \
https://kubernetes.default.svc/api/v1/namespaces/${NAMESPACE}/secrets)
cat ${TMP_DIR}/output
case "$STATUS" in
201)
printf "\nSuccess - secret created.\n"
;;
409)
printf "\nSuccess - secret already exists.\n"
;;
*)
printf "\nFailed creating secret.\n"
exit 1
;;
esac
else
kubectl create secret --namespace=${NAMESPACE} generic spark-webhook-certs --from-file=${TMP_DIR}/ca-key.pem --from-file=${TMP_DIR}/ca-cert.pem --from-file=${TMP_DIR}/server-key.pem --from-file=${TMP_DIR}/server-cert.pem
fi
# Clean up after we're done.
printf "\nDeleting ${TMP_DIR}.\n"
rm -rf ${TMP_DIR}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment