UNCLASSIFIED

Commit af99b3a3 authored by alexander.klepal's avatar alexander.klepal
Browse files

Merge branch 'mlz-container' into 'development'

Mission Landing Zone (MLZ) Hardened container

See merge request !16
parents 14620d7b 605d0dc9
Pipeline #383582 canceled with stages
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
ARG BASE_REGISTRY=registry1.dso.mil
ARG BASE_IMAGE=ironbank/redhat/ubi/ubi8
ARG BASE_TAG=8.4
ARG TERRAFORM_VERSION=1.0.0
ARG AZURERM_VERSION=2.67.0
ARG RANDOM_VERSION=3.1.0
ARG TIME_VERSION=0.7.1
ARG RELTAG=2021.06.0
FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} as intermediate
#Hardening: Update packages and remove cache
RUN dnf update -y --nodocs && \
dnf clean all && \
rm -rf /var/cache/dnf
#Install unzip package
RUN dnf --nodocs -y install --setopt=install_weak_deps=False \
unzip
# Downbload MLZ source code
RUN mkdir /workspaces
WORKDIR /workspaces
ARG MLZ_DEPENDENCY=v2021.06.0.zip
COPY ["${MLZ_DEPENDENCY}", "/tmp"]
RUN unzip /tmp/${MLZ_DEPENDENCY} -d /workspaces/missionlz \
&& rm /tmp/${MLZ_DEPENDENCY}
RUN dnf install unzip -y
WORKDIR /workspaces/missionlz
FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}
#Hardening: Update packages and remove cache
RUN dnf update -y --nodocs && \
dnf clean all && \
rm -rf /var/cache/dnf
#Install unzip package
RUN dnf --nodocs -y install --setopt=install_weak_deps=False \
unzip \
ca-certificates \
sudo
# Delete cached files we don't need anymore:
RUN dnf clean all
# Install the Microsoft package key
# This file is downloaded using the hardening_manifest file
ARG KEY_DEPENDENCY=microsoft.asc
COPY ["${KEY_DEPENDENCY}", "/tmp"]
RUN rpm --import /tmp/${KEY_DEPENDENCY}
# Install azure-cli
ARG AZURE_CLI_DEPENDENCY=azure-cli-2.26.1-1.el7.x86_64.rpm
COPY ["${AZURE_CLI_DEPENDENCY}", "/tmp"]
RUN dnf install /tmp/${AZURE_CLI_DEPENDENCY} -y && \
dnf clean all && \
rm -rf /tmp/${AZURE_CLI_DEPENDENCY}
RUN echo $'[azure-cli] \n\
name=Azure CLI \n\
baseurl=https://packages.microsoft.com/yumrepos/azure-cli \n\
enabled=1 \n\
gpgcheck=1 \n\
gpgkey=/tmp/${KEY_DEPENDENCY}' >> /etc/yum.repos.d/azure-cli.repo
# Install Terraform
ARG TERRAFORM_DEPENDENCY=terraform.zip
COPY ["${TERRAFORM_DEPENDENCY}", "/tmp"]
RUN unzip /tmp/${TERRAFORM_DEPENDENCY} -d /usr/local/bin/ \
&& rm /tmp/${TERRAFORM_DEPENDENCY}
# Download Terraform providers (plugins)
# Setting the TF_PLUGIN_CACHE_DIR environment variable instructs Terraform to search that folder for plugins first
ENV TF_PLUGIN_CACHE_DIR=/usr/lib/tf-plugins
ARG AZURERM_LOCAL_PATH="${TF_PLUGIN_CACHE_DIR}/registry.terraform.io/hashicorp/azurerm/2.55.0/linux_amd64"
ARG RANDOM_LOCAL_PATH="${TF_PLUGIN_CACHE_DIR}/registry.terraform.io/hashicorp/random/3.1.0/linux_amd64"
ARG TIME_LOCAL_PATH="${TF_PLUGIN_CACHE_DIR}/registry.terraform.io/hashicorp/time/0.7.1/linux_amd64"
RUN mkdir -p ${AZURERM_LOCAL_PATH} \
&& mkdir -p ${RANDOM_LOCAL_PATH} \
&& mkdir -p ${TIME_LOCAL_PATH}
ARG AZURERM_PROVIDER=terraform-provider-azurerm_2.55.0_linux_amd64.zip
COPY ["${AZURERM_PROVIDER}", "/tmp"]
RUN unzip /tmp/${AZURERM_PROVIDER} -d ${AZURERM_LOCAL_PATH} \
&& rm /tmp/${AZURERM_PROVIDER}
ARG RANDOM_PROVIDER=terraform-provider-random_3.1.0_linux_amd64.zip
COPY ["${RANDOM_PROVIDER}", "/tmp"]
RUN unzip /tmp/${RANDOM_PROVIDER} -d ${RANDOM_LOCAL_PATH} \
&& rm /tmp/${RANDOM_PROVIDER}
ARG TIME_PROVIDER=terraform-provider-time_0.7.1_linux_amd64.zip
COPY ["${TIME_PROVIDER}", "/tmp"]
RUN unzip /tmp/${TIME_PROVIDER} -d ${TIME_LOCAL_PATH} \
&& rm /tmp/${TIME_PROVIDER}
# Copy cloud-init script into image
COPY ./scripts/cloud-init.sh /usr/local/bin
# Add repo source files
ARG RELTAG
RUN mkdir /workspaces
RUN mkdir /workspaces/missionlz
COPY --from=intermediate /workspaces/missionlz/missionlz-${RELTAG}/src /workspaces/missionlz/src
# Add environment variables
WORKDIR /workspaces/missionlz/src
#Hardening: Healthcheck
#Check every five minutes or so that we're able to retrive file system info within three seconds:
HEALTHCHECK --interval=5m --timeout=3s \
CMD df -h || exit 1
CMD ["/bin/bash", "/usr/local/bin/cloud-init.sh"]
\ No newline at end of file
Microsoft Public License (MS-PL)
This license governs use of the accompanying software. If you use the software, you accept this license. If you do not accept the license, do not use the software.
1. Definitions
The terms "reproduce," "reproduction," "derivative works," and "distribution" have the
same meaning here as under U.S. copyright law.
A "contribution" is the original software, or any additions or changes to the software.
A "contributor" is any person that distributes its contribution under this license.
"Licensed patents" are a contributor's patent claims that read directly on its contribution.
2. Grant of Rights
(A) Copyright Grant- Subject to the terms of this license, including the license conditions and limitations in section 3, each contributor grants you a non-exclusive, worldwide, royalty-free copyright license to reproduce its contribution, prepare derivative works of its contribution, and distribute its contribution or any derivative works that you create.
(B) Patent Grant- Subject to the terms of this license, including the license conditions and limitations in section 3, each contributor grants you a non-exclusive, worldwide, royalty-free license under its licensed patents to make, have made, use, sell, offer for sale, import, and/or otherwise dispose of its contribution in the software or derivative works of the contribution in the software.
3. Conditions and Limitations
(A) No Trademark License- This license does not grant you rights to use any contributors' name, logo, or trademarks.
(B) If you bring a patent claim against any contributor over patents that you claim are infringed by the software, your patent license from such contributor to the software ends automatically.
(C) If you distribute any portion of the software, you must retain all copyright, patent, trademark, and attribution notices that are present in the software.
(D) If you distribute any portion of the software in source code form, you may do so only under this license by including a complete copy of this license with your distribution. If you distribute any portion of the software in compiled or object code form, you may only do so under a license that complies with this license.
(E) The software is licensed "as-is." You bear the risk of using it. The contributors give no express warranties, guarantees or conditions. You may have additional consumer rights under your local laws which this license cannot change. To the extent permitted under your local laws, the contributors exclude the implied warranties of merchantability, fitness for a particular purpose and non-infringement.
# <application name>
# Mission LZ
Project template for all Iron Bank container repositories.
\ No newline at end of file
Mission Landing Zone is a highly opinionated template which IT oversight organizations can use to create a cloud management system to deploy Azure environments for their teams. It addresses a narrowly scoped, specific need for an SCCA compliant hub and spoke infrastructure.
Mission LZ is:
- Designed for US Gov mission customers​
- Implements [SCCA](https://docs.microsoft.com/en-us/azure/azure-government/compliance/secure-azure-computing-architecture) requirements following Microsoft's [SACA](https://aka.ms/saca) implementation guidance
- Deployable in commercial, government, and air-gapped Azure clouds
- A narrow scope for a specific common need​
- A simple solution with low configuration​
- Written in Terraform and Linux shell scripts
Mission Landing Zone is the right solution when:
- A simple, secure, and scalable hub and spoke infrastructure is needed
- Various teams need separate, secure cloud environments administered by a central IT team
- There is a need to implement SCCA
- Hosting any workload requiring a secure environment, for example: data warehousing, AI/ML, and containerized applications
Design goals include:
- A simple, minimal set of code that is easy to configure
- Good defaults that allow experimentation and testing in a single subscription
- Deployment via command line or with a user interface
- Uses Azure PaaS products
Our intent is to enable IT Admins to use this software to:
- Test and evaluate the landing zone using a single Azure subscription
- Develop a known good configuration that can be used for production with multiple Azure subscriptions
- Optionally, customize the Terraform deployment configuration to suit specific needs
- Deploy multiple customer workloads in production
## Scope
Mission LZ has the following scope:
- Hub and spoke networking intended to comply with SCCA controls
- Remote access
- Shared services, i.e., services available to all workloads via the networking hub
- Ability to create multiple workloads or team subscriptions
- Compatibility with SCCA compliance (and other compliance frameworks)
- Security using standard Azure tools with sensible defaults
<!-- markdownlint-disable MD033 -->
<!-- allow html for images so that they can be sized -->
<img src="src/docs/images/scope.png" alt="Mission LZ Scope" width="600" />
<!-- markdownlint-enable MD033 -->
## Networking
Networking is set up in a hub and spoke design, separated by tiers: T0 (Identity and Authorization), T1 (Infrastructure Operations), T2 (DevSecOps and Shared Services), and multiple T3s (Workloads). Security can be configured to allow separation of duties between all tiers. Most customers will deploy each tier to a separate Azure subscription, but multiple subscriptions are not required.
<!-- markdownlint-disable MD033 -->
<img src="src/docs/images/networking.png" alt="Mission LZ Networking" width="600" />
<!-- markdownlint-enable MD033 -->
## Deploying the Mission LZ Container
1. On the system running Docker, enter the command below:
- export ARM_CLOUD_METADATA_URL=<metadata_url_for_target_cloud>
2. Using Docker, authenticate to registry1
3. Run the following commands:
- image_name=<copy_pull_link_from_registry1>
- container_name=<name_of_container>
- docker run -it -d --name ${container_name} ${image_name}
- docker exec -it -e ARM_CLOUD_METADATA_URL=${ARM_CLOUD_METADATA_URL} ${container_name} /bin/bash
After performing Steps 1-3, proceed with the deployment of the Mission Landing Zone arhcitecture by folowing the documentation located in the folder below:
https://repo1.dso.mil/dsop/microsoft/azure/mission-landing-zone/src/docs
## Getting Started using Mission LZ
See our [Getting Started Guide](src/docs/getting-started.md) in the docs.
## Product Roadmap
See the [Projects](https://github.com/Azure/missionlz/projects) page for the release timeline and feature areas.
Here's what the repo consists of as of May 2021:
<!-- markdownlint-disable MD033 -->
<img src="src/docs/images/missionlz_as_of_may2021.png" alt="Mission LZ as of April 2021" width="600" />
<!-- markdownlint-enable MD033 -->
## Contributing
This project welcomes contributions and suggestions. See our [Contributing Guide](CONTRIBUTING.md) for details.
## Feedback, Support, and How to Contact Us
Please see the [Support and Feedback Guide](SUPPORT.md). To report a security issue please see our [security guidance](./SECURITY.md).
## Trademarks
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
trademarks or logos is subject to and must follow
[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).
Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.
Any use of third-party trademarks or logos are subject to those third-party's policies.
---
apiVersion: v1
# The repository name in registry1, excluding /ironbank/
name: "redhat/ubi/ubi8"
# List of tags to push for the repository in registry1
# The most specific version should be the first tag and will be shown
# on ironbank.dsop.io
tags:
- "8.4"
- "latest"
# Build args passed to Dockerfile ARGs
args:
BASE_IMAGE: "redhat/ubi/ubi8"
BASE_TAG: "8.4"
# Docker image labels
labels:
org.opencontainers.image.title: "ubi8"
## Human-readable description of the software packaged in the image
org.opencontainers.image.description: "Red Hat Universal Base Images (UBI) are OCI-compliant container base operating system images with complementary runtime languages and packages that are freely redistributable."
## License(s) under which contained software is distributed
org.opencontainers.image.licenses: "Red Hat Universal Base Image EULA"
## URL to find more information on the image
org.opencontainers.image.url: "https://catalog.redhat.com/software/container-stacks/detail/5ec53f50ef29fd35586d9a56"
## Name of the distributing entity, organization or individual
org.opencontainers.image.vendor: "Red Hat"
org.opencontainers.image.version: "8.4"
## Keywords to help with search (ex. "cicd,gitops,golang")
mil.dso.ironbank.image.keywords: "base,os,rhel,ubi"
## This value can be "opensource" or "commercial"
mil.dso.ironbank.image.type: "commercial"
## Product the image belongs to for grouping multiple images
mil.dso.ironbank.product.name: "UBI8"
# List of resources to make available to the offline build context
resources:
- url: https://packages.microsoft.com/keys/microsoft.asc
filename: microsoft.asc
validation:
type: sha256
value: 2cfd20a306b2fa5e25522d78f2ef50a1f429d35fd30bd983e2ebffc2b80944fa
- url: https://packages.microsoft.com/yumrepos/azure-cli/azure-cli-2.26.1-1.el7.x86_64.rpm
filename: azure-cli-2.26.1-1.el7.x86_64.rpm
validation:
type: sha256
value: a42784024da7805fda8cd51f80b647ccf54f37437cc686a5d0cc7d00e81b989b
- url: https://releases.hashicorp.com/terraform/1.0.0/terraform_1.0.0_linux_amd64.zip
filename: terraform.zip
validation:
type: sha256
value: 8be33cc3be8089019d95eb8f546f35d41926e7c1e5deff15792e969dde573eb5
- url: https://github.com/Azure/missionlz/archive/refs/tags/v2021.06.0.zip
filename: v2021.06.0.zip
validation:
type: sha256
value: 28ed59538c1e45afdee5e4cbcdab17d976d04389b7700cacc103713cd6e38799
- url: https://releases.hashicorp.com/terraform-provider-azurerm/2.55.0/terraform-provider-azurerm_2.55.0_linux_amd64.zip
filename: terraform-provider-azurerm_2.55.0_linux_amd64.zip
validation:
type: sha256
value: 7e26b4b1e91a608a51169830b26fb26b039b1ec7457b445d98718a3f5eb969ee
- url: https://releases.hashicorp.com/terraform-provider-random/3.1.0/terraform-provider-random_3.1.0_linux_amd64.zip
filename: terraform-provider-random_3.1.0_linux_amd64.zip
validation:
type: sha256
value: d9e13427a7d011dbd654e591b0337e6074eef8c3b9bb11b2e39eaaf257044fd7
- url: https://releases.hashicorp.com/terraform-provider-time/0.7.1/terraform-provider-time_0.7.1_linux_amd64.zip
filename: terraform-provider-time_0.7.1_linux_amd64.zip
validation:
type: sha256
value: 96c3da650bda44b31ba5513e322fd1902d3cfa9cc99129ede70929c71ca74364
# List of project maintainers
maintainers:
- email: "Byron.Boudreaux@microsoft.com"
name: "Byron Boudreaux"
username: "Phydeauxman"
- email: "jeromejansen@microsoft.com"
name: "Jerome Jansen"
username: "jjansen23"
#!/bin/bash
# Variables
image_name=""
# Build image
docker build -t "${image_name}" .
\ No newline at end of file
{
"$schema": "https://json-schema.org/draft/2019-09/schema",
"$id": "https://repo1.dsop.io/ironbank-tools/ironbank-pipeline/schema/hardening_manifest.schema.json",
"definitions": {
"printable-characters-without-newlines": {
"type": "string",
"pattern": "^[ -~]*$",
"minLength": 1
},
"printable-characters-without-newlines-or-slashes": {
"type": "string",
"pattern": "^[A-Za-z0-9][ -.0-~]*$",
"minLength": 1
},
"docker-NameRegexp-without-domain": {
"$comment": "https://github.com/docker/distribution/blob/master/reference/regexp.go",
"type": "string",
"pattern": "^[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$"
},
"docker-TagRegexp": {
"$comment": "https://github.com/docker/distribution/blob/master/reference/regexp.go",
"type": "string",
"pattern": "^[\\w][\\w.-]{0,127}$"
},
"docker-TagRegexp-non-latest": {
"$comment": "https://github.com/docker/distribution/blob/master/reference/regexp.go",
"type": "string",
"pattern": "^(?!latest$)[\\w][\\w.-]{0,127}$"
},
"docker-ReferenceRegexp-url": {
"$comment": "https://github.com/docker/distribution/blob/master/reference/regexp.go",
"type": "string",
"pattern": "^docker://((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?/)?[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][0-9A-Fa-f]{32,}))?$"
},
"docker-name-and-tag": {
"$comment": "https://github.com/docker/distribution/blob/master/reference/regexp.go",
"type": "string",
"pattern": "^[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?:[\\w][\\w.-]{0,127}$"
},
"docker-label-name": {
"$comment": "https://docs.docker.com/config/labels-custom-metadata/",
"type": "string",
"pattern": "^[a-z0-9]([.-]?[a-z0-9]+)*$"
},
"github-ReferenceRegexp-url": {
"$comment": "https://github.com/docker/distribution/blob/master/reference/regexp.go",
"type": "string",
"pattern": "^docker.pkg.github.com/((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?/)?[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][0-9A-Fa-f]{32,}))?$"
},
"environment-variable-name": {
"type": "string",
"pattern": "^[a-zA-Z0-9][a-zA-Z0-9_.-]*$"
}
},
"title": "IronBank",
"description": "Metadata surrounding an Iron Bank Container",
"type": "object",
"properties": {
"apiVersion": {
"description": "Version of Iron Bank metadata file",
"type": "string",
"const": "v1"
},
"name": {
"description": "Name of the Iron Bank container",
"$ref": "#/definitions/docker-NameRegexp-without-domain"
},
"tags": {
"description": "Tags to tag an image with when pushed to registry1",
"type": "array",
"items": [
{
"$ref": "#/definitions/docker-TagRegexp-non-latest"
}
],
"additionalItems": {
"$ref": "#/definitions/docker-TagRegexp"
},
"minItems": 1,
"uniqueItems": true
},
"args": {
"description": "Arguments passed to image build",
"type": "object",
"properties": {
"BASE_IMAGE": {
"$comment": "May be an empty string if the Dockerfile does not use this variable",
"oneOf": [
{
"$ref": "#/definitions/docker-NameRegexp-without-domain"
},
{
"const": ""
}
]
},
"BASE_TAG": {
"$comment": "May be an empty string if the Dockerfile does not use this variable",
"oneOf": [
{
"$ref": "#/definitions/docker-TagRegexp"
},
{
"const": ""
}
]
}
},
"additionalProperties": {
"$ref": "#/definitions/printable-characters-without-newlines"
},
"propertyNames": {
"$ref": "#/definitions/environment-variable-name"
},
"required": ["BASE_IMAGE", "BASE_TAG"]
},
"labels": {
"description": "Labels added to Iron Bank containers",
"type": "object",
"properties": {
"org.opencontainers.image.title": {
"$ref": "#/definitions/printable-characters-without-newlines"
},
"org.opencontainers.image.description": {
"$ref": "#/definitions/printable-characters-without-newlines"
},
"org.opencontainers.image.licenses": {
"$comment": "See https://spdx.org/licenses/",
"$ref": "#/definitions/printable-characters-without-newlines"
},
"org.opencontainers.image.url": {
"format": "uri",
"$ref": "#/definitions/printable-characters-without-newlines"
},
"org.opencontainers.image.vendor": {
"$ref": "#/definitions/printable-characters-without-newlines"
},
"org.opencontainers.image.version": {
"$ref": "#/definitions/printable-characters-without-newlines"
},
"mil.dso.ironbank.image.keywords": {
"$ref": "#/definitions/printable-characters-without-newlines"
},
"mil.dso.ironbank.image.type": {
"$ref": "#/definitions/printable-characters-without-newlines"
},
"mil.dso.ironbank.product.name": {
"$ref": "#/definitions/printable-characters-without-newlines"
}
},
"propertyNames": {
"$ref": "#/definitions/docker-label-name"
},
"additionalProperties": false,
"required": [
"org.opencontainers.image.description",
"org.opencontainers.image.licenses",
"org.opencontainers.image.title",
"org.opencontainers.image.vendor",
"org.opencontainers.image.version"
]
},
"resources": {
"description": "Resources to download before building the image",
"type": "array",
"items": {
"oneOf": [
{
"type": "object",
"properties": {
"url": {
"type": "string",
"pattern": "^https?://.+$"
},
"filename": {
"$ref": "#/definitions/printable-characters-without-newlines-or-slashes"
},
"validation": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": ["sha256", "sha512"]
},
"value": {
"type": "string",
"pattern": "^[a-f0-9]+$"
}
},
"additionalProperties": false,
"required": ["type", "value"]
},
"auth": {
"type": "object",
"properties": {
"id": {
"$ref": "#/definitions/environment-variable-name"
},
"type": {
"type": "string",
"const": "basic"
}
},
"additionalProperties": false,
"required": ["id"]
}
},
"additionalProperties": false,
"required": ["url", "filename"]
},
{
"type": "object",
"properties": {
"url": {
"type": "string",
"pattern": "^s3://.+$"
},
"filename": {
"$ref": "#/definitions/printable-characters-without-newlines-or-slashes"
},
"validation": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": ["sha256", "sha512"]
},
"value": {
"type": "string",
"pattern": "^[a-f0-9]+$"
}
},
"additionalProperties": false,
"required": ["type", "value"]
},
"auth": {
"type": "object",
"properties": {
"id": {
"$ref": "#/definitions/environment-variable-name"
},
"region": {
"$ref": "#/definitions/printable-characters-without-newlines"
},
"type": {
"$comment": "aws is left for backwards compatibility. Please use s3 moving forward",
"type": "string",
"enum": ["aws", "s3"]
}
},
"additionalProperties": false,
"required": ["id"]
}
},
"additionalProperties": false,
"required": ["url", "filename"]
},
{
"type": "object",
"properties": {
"url": {
"$ref": "#/definitions/docker-ReferenceRegexp-url"
},
"tag": {
"$ref": "#/definitions/docker-name-and-tag"
},
"auth": {
"type": "object",
"properties": {
"id": {
"$ref": "#/definitions/environment-variable-name"
},
"type": {
"type": "string",
"const": "basic"
}
},
"additionalProperties": false,
"required": ["id"]
}
},
"additionalProperties": false,
"required": ["url", "tag"]
},
{
"type": "object",
"properties": {
"url": {
"$ref": "#/definitions/github-ReferenceRegexp-url"
},
"tag": {
"$ref": "#/definitions/docker-name-and-tag"
}
},
"additionalProperties": false,
"required": ["url"]
}
]
},
"uniqueItems": true
},
"maintainers": {
"description": "Maintainers for this specific container",
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"$ref": "#/definitions/printable-characters-without-newlines"
},
"username": {
"$ref": "#/definitions/printable-characters-without-newlines"
},
"email": {
"$ref": "#/definitions/printable-characters-without-newlines",
"format": "email"
},
"cht_member": {
"type": "boolean"
}
},
"additionalProperties": false,
"required": ["name", "username"]
},
"minItems": 1,
"uniqueItems": true
}
},
"required": ["apiVersion", "name", "tags", "args", "labels", "maintainers"],
"additionalProperties": false
}
\ No newline at end of file
#!/bin/bash
###########################################################################################
# This script pulls the CA bundle from the host wireserver, parses it, #
# and installs them in the Ubuntu CA bundle. Recommended execution method #
# is cloud-init or VM custom-data for execution at provisioning time #
# #
# NOTES: #
# Many Linux applications use their own CA bundle instead of the system one. #
# If you are still seeing TLS certificate validation errors, ensure that #
# you have also copied these certificates to the calling application's CA Bundle #
# or identify the environment setting to direct it to use the system CA bundle #
# Example: export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt for python/az cli #
# #
# This script must be run as root or else calls to the wireserver will time out #
# #
# This script will run in a container, but does require sed so that must be installed #
###########################################################################################
metadata=$(curl -H Metadata:true "http://169.254.169.254/metadata/instance?api-version=2019-08-01")
cloudEnvironment=$(echo $metadata | grep -oP '(?<=azEnvironment\":\")[^\"]*')
if [ $cloudEnvironment == "USNat" ] || [ $cloudEnvironment == "USSec" ]; then
if [[ ! -d /root/AzureCACertificates ]]; then
mkdir -p /root/AzureCACertificates
# http://168.63.129.16 is a constant for the host's wireserver endpoint
certs=$(curl "http://168.63.129.16/machine?comp=acmspackage&type=cacertificates&ext=json")
IFS_backup=$IFS
IFS=$'\r\n'
certNames=($(echo $certs | grep -oP '(?<=Name\": \")[^\"]*'))
certBodies=($(echo $certs | grep -oP '(?<=CertBody\": \")[^\"]*'))
for i in ${!certBodies[@]}; do
echo ${certBodies[$i]} | sed 's/\\r\\n/\n/g' | sed 's/\\//g' > "/root/AzureCACertificates/$(echo ${certNames[$i]} | sed 's/.cer/.crt/g')"
done
IFS=$IFS_backup
cp /root/AzureCACertificates/*.crt /etc/pki/ca-trust/source/anchors
update-ca-trust extract
fi
echo "export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt" >> /etc/bashrc
fi
tail -f /dev/null
\ No newline at end of file
#!/bin/bash
# Variables
cloud_storage_suffix=""
cloud_keyvault_suffix=""
sas_expiry_time=""
source_rg_name=""
source_acr_name=""
source_kv_name=""
source_sas_secret=""
source_sa_name=""
source_container_name=""
export_pipeline_name=""
# Create Storage SAS Token
export_sas=?$(az storage container generate-sas \
--name ${source_container_name} \
--account-name ${source_sa_name} \
--expiry ${sas_expiry_time} \
--permissions rwalc \
--https-only \
--output tsv)
# Store Token in Key Vault
az keyvault secret set \
--name ${source_sas_secret} \
--value ${export_sas} \
--vault-name ${source_kv_name}
# Add transfer extension to azcli
az extension add --source https://acrtransferext.${cloud_storage_suffix}/dist/acrtransfer-1.0.0-py2.py3-none-any.whl
# Create export pipeline
az acr export-pipeline create \
--resource-group ${source_rg_name} \
--registry ${source_acr_name} \
--name ${export_pipeline_name} \
--secret-uri https://${source_kv_name}.${cloud_keyvault_suffix}/secrets/${source_sas_secret} \
--storage-container-uri https://${source_sa_name}.${cloud_storage_suffix}/${source_container_name}
# Get Principal ID created as part of export pipeline
principal_id=$(az acr export-pipeline show \
--resource-group ${source_rg_name} \
--registry ${source_acr_name} \
--name ${export_pipeline_name} \
--query identity.principalId \
--output tsv)
# Get Resource ID of Key Vault
key_vault_id=$(az keyvault show \
--name ${target_kv_name} \
--query id \
--output tsv)
# Assign Key Vault Secrets User role to pipeline principal
az role assignment create \
--role "Key Vault Secrets User" \
--scope ${key_vault_id} \
--assignee-object-id ${principal_id}
\ No newline at end of file
#!/bin/bash
# Variables
cloud_storage_suffix=""
cloud_keyvault_suffix=""
sas_expiry_time=""
target_rg_name=""
target_acr_name=""
target_kv_name=""
target_sas_secret=""
target_sa_name=""
target_container_name=""
import_pipeline_name=""
# Create Storage SAS Token
import_sas=?$(az storage container generate-sas \
--name ${target_container_name} \
--account-name ${target_sa_name} \
--expiry ${sas_expiry_time} \
--permissions rwalc \
--https-only \
--output tsv)
# Store Token in Key Vault
az keyvault secret set \
--name ${target_sas_secret} \
--value ${import_sas} \
--vault-name ${target_kv_name}
# Add transfer extension to azcli
az extension add --target https://acrtransferext.${cloud_storage_suffix}/dist/acrtransfer-1.0.0-py2.py3-none-any.whl
# Create import pipeline
az acr import-pipeline create \
--resource-group ${target_rg_name} \
--registry ${target_acr_name} \
--name ${import_pipeline_name} \
--secret-uri https://${target_kv_name}.${cloud_keyvault_suffix}/secrets/${target_sas_secret} \
--storage-container-uri https://${target_sa_name}.${cloud_storage_suffix}/${target_container_name}
# Get Principal ID created as part of import pipeline
principal_id=$(az acr import-pipeline show \
--resource-group ${target_rg_name} \
--registry ${target_acr_name} \
--name ${import_pipeline_name} \
--query identity.principalId \
--output tsv)
# Get Resource ID of Key Vault
key_vault_id=$(az keyvault show \
--name ${target_kv_name} \
--query id \
--output tsv)
# Assign Key Vault Secrets User role to pipeline principal
az role assignment create \
--role "Key Vault Secrets User" \
--scope ${key_vault_id} \
--assignee-object-id ${principal_id}
\ No newline at end of file
#!/bin/bash
# Variables
acr_name=""
image_name=""
image_tag=""
kv_name=""
rg_name=""
container_name=""
container_dns_name=""
acr_sp_pwd_secret_name=""
acr_sp_appid_secret_name=""
arm_endpoint=""
cloud_metadata_api="metadata/endpoints?api-version=2020-06-01"
# Get ACR login server
acr_login_server=$(az acr show \
--name ${acr_name} \
--resource-group ${rg_name} \
--query "loginServer" \
--output tsv)
# Create container instance
az container create \
--name ${container_name} \
--resource-group ${rg_name} \
--image ${acr_login_server}/${image_name}:${image_tag} \
--registry-login-server ${acr_login_server} \
--registry-username $(az keyvault secret show --vault-name ${kv_name} --name ${acr_sp_appid_secret_name} --query value --output tsv) \
--registry-password $(az keyvault secret show --vault-name ${kv_name} --name ${acr_sp_pwd_secret_name} --query value --output tsv) \
--dns-name-label ${container_dns_name} \
--environment-variables "ARM_CLOUD_METADATA_URL=${arm_endpoint}${cloud_metadata_api}" \
--query ipAddress.fqdn
\ No newline at end of file
#!/bin/bash
# Variables
image_name=""
container_name=""
mlz_metadatahost=""
cloud_metadata_api="metadata/endpoints?api-version=2020-06-01"
# Create instance from image
docker run -it -d --env ARM_CLOUD_METADATA_URL="${mlz_metadatahost}${cloud_metadata_api}" --name ${container_name} ${image_name}
# Login to running instance
docker exec -it ${container_name} /bin/bash
\ No newline at end of file
#!/bin/bash
# Variables
acr_name=""
rg_name=""
image_name=""
container_name=""
mlz_metadatahost=""
cloud_metadata_api="metadata/endpoints?api-version=2020-06-01"
# Login to Azure registry
acr_login_server=$(az acr show \
--name ${acr_name} \
--resource-group ${rg_name} \
--query "loginServer" \
--output tsv)
az acr login --name ${acr_name}
# Pull down image from ACR
docker pull "${acr_login_server}/${image_name}:${image_tag}"
# Create instance from image
docker run -it -d --env ARM_CLOUD_METADATA_URL="${mlz_metadatahost}${cloud_metadata_api}" --name ${container_name} ${image_name}
# Login to running instance
docker exec -it ${container_name} /bin/bash
\ No newline at end of file
#!/bin/bash
# Bash "strict mode", to help catch problems and bugs in the shell
# script. Every bash script you write should include this. See
# http://redsymbol.net/articles/unofficial-bash-strict-mode/ for
# details.
set -euo pipefail
# Install security updates, bug fixes and enhancements only.
# --nodocs skips documentationm, which we don't need production
# Docker images.
dnf --nodocs -y upgrade-minimal
# Install a new package, without unnecessary recommended packages:
dnf --nodocs -y install --setopt=install_weak_deps=False \
wget \
python3 \
unzip \
ca-certificates \
sudo \
azure-cli
# Delete cached files we don't need anymore:
dnf clean all
\ No newline at end of file
#!/bin/bash
# Bash "strict mode", to help catch problems and bugs in the shell
# script. Every bash script you write should include this. See
# http://redsymbol.net/articles/unofficial-bash-strict-mode/ for
# details.
set -euo pipefail
# Install security updates, bug fixes and enhancements only.
# --nodocs skips documentationm, which we don't need production
# Docker images.
dnf --nodocs -y upgrade-minimal
# Install a new package, without unnecessary recommended packages:
dnf --nodocs -y install --setopt=install_weak_deps=False \
git
# Delete cached files we don't need anymore:
dnf clean all
\ No newline at end of file
#!/bin/bash
# Bash "strict mode", to help catch problems and bugs in the shell
# script. Every bash script you write should include this. See
# http://redsymbol.net/articles/unofficial-bash-strict-mode/ for
# details.
set -euo pipefail
# Install security updates, bug fixes and enhancements only.
# --nodocs skips documentationm, which we don't need production
# Docker images.
dnf --nodocs -y upgrade-minimal
# Install a new package, without unnecessary recommended packages:
dnf --nodocs -y install --setopt=install_weak_deps=False \
wget \
python3 \
unzip \
ca-certificates \
sudo \
azure-cli
# Delete cached files we don't need anymore:
dnf clean all
\ No newline at end of file
#!/bin/bash
# Variables
acr_name=""
rg_name=""
image_name=""
image_tag="latest"
# Login to Azure registry
acr_login_server=$(az acr show \
--name ${acr_name} \
--resource-group ${rg_name} \
--query "loginServer" \
--output tsv)
az acr login --name ${acr_name}
# Tag image for Azure and push to registry
docker tag "${image_name}:${image_tag}" "${acr_login_server}/${image_name}:${image_tag}"
docker push "${acr_login_server}/${image_name}:${image_tag}"
#!/bin/bash
# Variables
source_rg_name=""
source_acr_name=""
export_pipeline_name=""
pipeline_run_name=""
image_name=""
image_version=""
image_blob_name=""
# Run the export pipeline
az acr pipeline-run create \
--resource-group ${source_rg_name} \
--registry ${source_acr_name} \
--pipeline ${export_pipeline_name} \
--name ${pipeline_run_name} \
--pipeline-type export \
--storage-blob ${image_blob_name} \
--artifacts ${image_name}:${image_version} \
--force-redeploy
\ No newline at end of file
#!/bin/bash
# Variables
target_rg_name=""
target_acr_name=""
import_pipeline_name=""
pipeline_run_name=""
image_name=""
image_version=""
image_blob_name=""
# Run the import pipeline
az acr pipeline-run create \
--resource-group ${target_rg_name} \
--registry ${target_acr_name} \
--pipeline ${import_pipeline_name} \
--name ${pipeline_run_name} \
--pipeline-type import \
--storage-blob ${image_blob_name} \
--artifacts ${image_name}:${image_version} \
--force-redeploy
\ No newline at end of file
# build
This folder contains scripts that would be used by some automation tool to apply/destroy terraform in the repo.
This is a work in progress. Future work will be done to integrate this into a GitHub Actions workflow.
## Why
Provide an unattended way to ensure things are deployable in the repo.
## What you need
- Terraform CLI
- Azure CLI
- Deployed MLZ Config resources (Service Principal for deployment, Key Vault)
- A MLZ Config file
- A global.tfvars
- .tfvars for saca-hub, tier-0, tier-1, tier-2
## How
See the root [README's "Configure the Terraform Backend"](../README.md#Configure-the-Terraform-Backend) on how to get the MLZ Config resources deployed and a MLZ Config file.
Today, the global.tfvars file and the .tfvars for saca-hub, tier0-2, are well known and stored elsewhere. Reach out to the team if you need them.
Then, to apply and destroy pass those files as arguments to the relevant script.
There's an [optional argument to display terraform output](#Optionally-display-Terraform-output).
```shell
usage() {
echo "apply_tf.sh: Automation that calls apply terraform given a MLZ configuration and some tfvars"
error_log "usage: apply_tf.sh <mlz config> <mlz.tfvars> <display terraform output (y/n)>"
}
```
```shell
# assuming src/scripts/config/create_required_resources.sh has been run before...
./apply_tf.sh \
./path-to/mlz.config \
./path-to/mlz.tfvars
y
```
```shell
# assuming src/scripts/config/create_required_resources.sh has been run before...
./destroy_tf.sh \
./path-to/mlz.config \
./path-to/mlz.tfvars \
y
```
### Optionally display Terraform output
There's an optional argument at the end to specify whether or not to display terraform's output. Set it to 'y' if you want to see things as they happen.
By default, if you do not set this argument, terraform output will be sent to /dev/null (to support clean logs in a CI/CD environment) and your logs will look like:
```plaintext
Applying saca-hub (1/5)...
Finished applying saca-hub!
Applying tier-0 (1/5)...
Finished applying tier-0!
Applying tier-1 (1/5)...
Finished applying tier-1!
Applying tier-2 (1/5)...
Finished applying tier-2!
```
## Gotchas
There's wonky behavior with how Log Analytics Workspaces and Azure Monitor diagnostic log settings are deleted at the Azure Resource Manager level.
For example, if you deployed your environment with Terraform, then deleted it with Azure CLI or the Portal, you can end up with orphan/ghost resources that will be deleted at some other unknown time.
To ensure you're able to deploy on-top of existing resources over and over again, __use Terraform to apply and destroy your environment.__
#!/bin/bash
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
# shellcheck disable=SC1090,SC2154
# SC1090: Can't follow non-constant source. Use a directive to specify location.
# SC2154: "var is referenced but not assigned". These values come from an external file.
#
# Automation that calls apply terraform given a MLZ configuration
set -e
error_log() {
echo "${1}" 1>&2;
}
usage() {
echo "apply_tf.sh: Automation that calls apply terraform given a MLZ configuration and some tfvars"
error_log "usage: apply_tf.sh <mlz config> <mlz.tfvars> <display terraform output (y/n)>"
}
if [[ "$#" -lt 2 ]]; then
usage
exit 1
fi
# take some valid, well known, mlz_config and tfvars as input
mlz_config=$(realpath "${1}")
mlz_tfvars=$(realpath "${2}")
display_tf_output=${3:-n}
# reference paths
this_script_path=$(realpath "${BASH_SOURCE%/*}")
src_dir=$(dirname "${this_script_path}")
terraform_dir="${src_dir}/terraform/"
scripts_dir="${src_dir}/scripts/"
# apply function
apply() {
sub_id=$1
tf_dir=$2
vars=$3
# generate config.vars based on MLZ Config and Terraform module
. "${scripts_dir}/config/generate_vars.sh" \
"${mlz_config}" \
"${sub_id}" \
"${tf_dir}"
# remove any existing terraform initialzation
rm -rf "${tf_dir}/.terraform"
# copy input vars to temporary file
input_vars=$(realpath "${vars}")
temp_vars="temp_vars.tfvars"
rm -f "${temp_vars}"
touch "${temp_vars}"
cp "${input_vars}" "${temp_vars}"
# remove any tfvars and subtitute it with input vars
tf_vars="${tf_dir}/$(basename "${vars}")"
rm -f "${tf_vars}"
touch "${tf_vars}"
cp "${temp_vars}" "${tf_vars}"
rm -f "${temp_vars}"
# set the target subscription
az account set \
--subscription "${sub_id}" \
--output none
# attempt to apply $max_attempts times before giving up
# (race conditions, transient errors etc.)
apply_success="false"
attempts=1
max_attempts=5
apply_command="${scripts_dir}/terraform/apply_terraform.sh ${tf_dir} ${tf_vars} y"
destroy_command="${scripts_dir}/terraform/destroy_terraform.sh ${tf_dir} ${tf_vars} y"
if [[ $display_tf_output == "n" ]]; then
apply_command+=" &>/dev/null"
destroy_command+=" &>/dev/null"
fi
while [ $apply_success == "false" ]
do
echo "INFO: applying Terraform at ${tf_dir} (${attempts}/${max_attempts})..."
if ! eval "$apply_command";
then
# if we fail, run terraform destroy and try again
error_log "ERROR: failed to apply ${tf_dir} (${attempts}/${max_attempts}). Trying some manual clean-up and Terraform destroy..."
eval "$destroy_command"
((attempts++))
if [[ $attempts -gt $max_attempts ]]; then
error_log "ERROR: failed ${max_attempts} times to apply ${tf_dir}. Exiting."
exit 1
fi
else
# if we succeed meet the base case
apply_success="true"
echo "INFO: finished applying ${tf_dir}!"
fi
done
}
# source vars from mlz_config
. "${mlz_config}"
# call apply()
apply "${mlz_saca_subid}" "${terraform_dir}/mlz" "${mlz_tfvars}"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment