UNCLASSIFIED - NO CUI

Skip to content
Snippets Groups Projects
Commit 6780e4e0 authored by Zachariah Dzielinski's avatar Zachariah Dzielinski
Browse files

BB-1078 - Integrate umbrella-templates logic into umbrella, and remove...

BB-1078 - Integrate umbrella-templates logic into umbrella, and remove umbrella's dependency on umbrella-templates
parent baf4ee3f
No related branches found
No related tags found
No related merge requests found
Showing
with 631 additions and 24 deletions
......@@ -9,10 +9,9 @@ workflow:
when: never
- when: always
# include templates
include:
- project: 'platform-one/big-bang/pipeline-templates/umbrella-templates'
file: '/global.gitlab-ci.yml'
ref: 0.0.1
- local: '/.gitlab-ci/templates.yml'
stages:
- smoke tests
......@@ -183,19 +182,10 @@ release:
#-----------------------------------------------------------------------------------------------------------------------
# Infrastructure: Management Jobs
#
# .pre job for pulling pipeline contents from umbrella-templates project
# TODO: Currently all jobs connected via "needs" must explicitly need this job, should evaluate turning this into a global cache
fetch umbrella templates:
extends:
- .fetch
- .infra create
stage: .pre
# Abstract for job manually triggering infrastructure builds
.infra fork:
stage: network up
needs:
- fetch umbrella templates
rules:
# Skip when branch name starts with "hotfix" or "patch"
- if: '$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME =~ /^(hotfix|patch)/'
......@@ -229,6 +219,7 @@ fetch umbrella templates:
#-----------------------------------------------------------------------------------------------------------------------
# Infrastructure: Networking
#
aws/network up:
extends:
- .infra fork
......@@ -250,6 +241,7 @@ aws/network down:
#-----------------------------------------------------------------------------------------------------------------------
# Infrastructure: RKE2
#
# Create RKE2 cluster on AWS
aws/rke2/cluster up:
stage: cluster up
......@@ -257,8 +249,6 @@ aws/rke2/cluster up:
- .infra create
- .rke2 up
needs:
- job: fetch umbrella templates
artifacts: true
- job: aws/network up
# Install BigBang on RKE2 cluster on AWS
......@@ -268,16 +258,13 @@ aws/rke2/bigbang up:
- .infra create
- .bigbang
needs:
- job: fetch umbrella templates
artifacts: true
- job: aws/rke2/cluster up
artifacts: true
before_script:
- mkdir -p ~/.kube
- cp ${CI_PROJECT_DIR}/rke2.yaml ~/.kube/config
# Deploy a default storage class for aws
- kubectl apply -f ${CI_PROJECT_DIR}/umbrella-templates/jobs/rke2/dependencies/k8s-resources/aws/default-ebs-sc.yaml
- kubectl apply -f ${CI_PROJECT_DIR}/.gitlab-ci/jobs/rke2/dependencies/k8s-resources/aws/default-ebs-sc.yaml
script:
- *deploy_bigbang
......@@ -288,8 +275,6 @@ aws/rke2/bigbang test:
- .infra create
- .bigbang
needs:
- job: fetch umbrella templates
artifacts: true
- job: aws/rke2/cluster up
artifacts: true
- job: aws/rke2/bigbang up
......@@ -310,8 +295,6 @@ aws/rke2/bigbang down:
- .infra cleanup
- .bigbang
needs:
- job: fetch umbrella templates
artifacts: true
- job: aws/rke2/cluster up
artifacts: true
- job: aws/rke2/bigbang test
......@@ -330,7 +313,5 @@ aws/rke2/cluster down:
- .infra cleanup
- .rke2 down
needs:
- job: fetch umbrella templates
artifacts: true
- job: aws/rke2/bigbang down
#-----------------------------------------------------------------------------------------------------------------------
\ No newline at end of file
.k3d_before_script: &k3d_before_script
# Starting dnsmasq for cluster dns resolution
- docker run -d -p 53:53/udp -p 53:53 registry.dsop.io/platform-one/big-bang/pipeline-templates/pipeline-templates/go-dnsmasq:0eddd476
- echo "nameserver 127.0.0.1" >> /etc/resolv.conf
# Standup cluster
- k3d cluster create ${CLUSTER_NAME} --k3s-server-arg "--disable=traefik" --k3s-server-arg "--disable=metrics-server" -p 80:80@loadbalancer -p 443:443@loadbalancer --wait --agents $N_AGENTS --servers $N_SERVERS
- while ! (kubectl get node | grep "server" > /dev/null); do sleep 3; done
- kubectl get nodes
- k3d node list
.k3d_after_script: &k3d_after_script
- k3d cluster delete ${CLUSTER_NAME}
.k3d:
image: registry.dsop.io/platform-one/big-bang/pipeline-templates/pipeline-templates/k3d-builder:045fb1c2
services:
- registry.dsop.io/platform-one/big-bang/pipeline-templates/pipeline-templates/docker:dind
tags:
- bigbang
- privileged
- public
variables:
DOCKER_HOST: tcp://localhost:2375/
DOCKER_DRIVER: overlay2
DOCKER_TLS_CERTDIR: ""
CLUSTER_NAME: ${CI_COMMIT_REF_SLUG}
N_SERVERS: 1
N_AGENTS: 0
before_script:
- *k3d_before_script
after_script:
- *k3d_after_script
\ No newline at end of file
.calc_unique_cidr: &calc_unique_cidr
- apk add python3 py3-boto3
- echo "Calculating unique cidr range for vpc"
- chmod 755 ../../../get-vpc.py
- TF_VAR_vpc_cidr=$(terraform output vpc_cidr | tr -d '\n' | tr -d '\r' | grep 10) || TF_VAR_vpc_cidr=$(python3 ../../../get-vpc.py | tr -d '\n' | tr -d '\r')
- echo "Using VPC CIDR $TF_VAR_vpc_cidr for $CLUSTER_NAME cluster"
- export TF_VAR_vpc_cidr=$TF_VAR_vpc_cidr
.network:
extends: .terraformer
variables:
TF_ROOT: ".gitlab-ci/jobs/networking/aws/dependencies/terraform/env/ci"
.network up:
extends: .network
script:
- *calc_unique_cidr
- echo "Creating network with cidr range ${TF_VAR_vpc_cidr}"
- terraform apply -auto-approve
.network down:
extends:
- .network
- .terraform destroy workspace
script:
- *calc_unique_cidr
- echo "Destroying network"
- terraform destroy -auto-approve
\ No newline at end of file
[[source]]
name = "pypi"
url = "https://pypi.org/simple"
verify_ssl = true
[dev-packages]
[packages]
boto3 = "*"
[requires]
python_version = "3.8"
{
"_meta": {
"hash": {
"sha256": "0ba145c19353da73840755ed85984b6653241c800c6ad2c772805a6089dfb424"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.8"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": true
}
]
},
"default": {
"boto3": {
"hashes": [
"sha256:b091cf6581dc137f100789240d628a105c989cf8f559b863fd15e18c1a29b714",
"sha256:bd4c26d304abba8d96817bb83917bb2e19123f5ce1a5dd26255f866daeff61c7"
],
"index": "pypi",
"version": "==1.16.17"
},
"botocore": {
"hashes": [
"sha256:33f650b2d63cc1f2d5239947c9ecdadfd8ceeb4ab8bdefa0a711ac175a43bf44",
"sha256:81184afc24d19d730c1ded84513fbfc9e88409c329de5df1151bb45ac30dfce4"
],
"version": "==1.19.17"
},
"jmespath": {
"hashes": [
"sha256:b85d0567b8666149a93172712e68920734333c0ce7e89b78b3e987f71e5ed4f9",
"sha256:cdf6525904cc597730141d61b36f2e4b8ecc257c420fa2f4549bac2c2d0cb72f"
],
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==0.10.0"
},
"python-dateutil": {
"hashes": [
"sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c",
"sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.8.1"
},
"s3transfer": {
"hashes": [
"sha256:2482b4259524933a022d59da830f51bd746db62f047d6eb213f2f8855dcb8a13",
"sha256:921a37e2aefc64145e7b73d50c71bb4f26f46e4c9f414dc648c6245ff92cf7db"
],
"version": "==0.3.3"
},
"six": {
"hashes": [
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
"sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.15.0"
},
"urllib3": {
"hashes": [
"sha256:19188f96923873c92ccb987120ec4acaa12f0461fa9ce5d3d0772bc965a39e08",
"sha256:d8ff90d979214d7b4f8ce956e80f4028fc6860e4431f731ea4a8c08f23f99473"
],
"markers": "python_version != '3.4'",
"version": "==1.26.2"
}
},
"develop": {}
}
import boto3
import operator
import ipaddress
initial_cidr = "10.10.0.0/16"
client = boto3.client('ec2', region_name='us-gov-west-1')
res = client.describe_vpcs(Filters=[{}])
vpcIds = list(map(operator.itemgetter("CidrBlock"), res["Vpcs"]))
vpcIds.sort()
unique_cidr = False
while not unique_cidr:
found_cidr_overlap = False
for cidr in vpcIds:
aws_cidr = ipaddress.IPv4Network(cidr)
try:
proposed_cidr = ipaddress.IPv4Network(initial_cidr)
except:
logger.error("Couldn't convert cidr of " + str(initial_cidr))
sys.exit(2)
if aws_cidr.overlaps(proposed_cidr):
found_cidr_overlap = True
break
allowed_private_cidr = ipaddress.IPv4Network("10.0.0.0/8")
if not found_cidr_overlap:
if allowed_private_cidr.overlaps(proposed_cidr):
unique_cidr = True
final_vpc = initial_cidr
else:
logger.error("Proposed cidr not in private ip space: " + str(initial_cidr))
sys.exit(2)
else:
try:
initial_cidr = str(ipaddress.ip_address(initial_cidr.split("/")[0]) + 65536) + "/16"
except:
logger.error("Couldn't update cidr of " + str(initial_cidr))
sys.exit(2)
print(final_vpc)
terraform {
backend "s3" {
bucket = "umbrella-tf-states"
key = "terraform.tfstate"
region = "us-gov-west-1"
dynamodb_table = "umbrella-tf-states-lock"
workspace_key_prefix = "aws-networking"
}
}
module "ci" {
source = "../../main"
# Set by CI - "${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA}"
env = var.env
# Calculated in CI
vpc_cidr = var.vpc_cidr
}
output "vpc_id" {
value = module.ci.vpc_id
}
output "public_subnets" {
value = module.ci.public_subnet_ids
}
output "private_subnets" {
value = module.ci.private_subnet_ids
}
\ No newline at end of file
variable "vpc_cidr" {}
variable "env" {}
\ No newline at end of file
module "dev" {
source = "../../main"
env = "dev"
vpc_cidr = "10.255.0.0/16"
}
output "vpc_id" {
value = module.dev.vpc_id
}
output "public_subnets" {
value = module.dev.public_subnet_ids
}
output "private_subnets" {
value = module.dev.private_subnet_ids
}
\ No newline at end of file
## TODO: Revisit the terraform gitlab http backend
# terraform {
# backend "http" {}
# }
provider "aws" {
region = var.aws_region
}
locals {
public_subnet_cidrs = [
cidrsubnet(var.vpc_cidr, ceil(log(6, 2)), 0),
cidrsubnet(var.vpc_cidr, ceil(log(6, 2)), 1),
]
private_subnet_cidrs = [
cidrsubnet(var.vpc_cidr, ceil(log(6, 2)), 2),
cidrsubnet(var.vpc_cidr, ceil(log(6, 2)), 3),
]
intra_subnet_cidrs = [
cidrsubnet(var.vpc_cidr, ceil(log(6, 2)), 4),
cidrsubnet(var.vpc_cidr, ceil(log(6, 2)), 5),
]
name = "umbrella-${var.env}"
tags = {
"terraform" = "true",
"env" = var.env,
"project" = "umbrella"
}
}
#
# Network
#
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
name = local.name
cidr = var.vpc_cidr
azs = ["${var.aws_region}a", "${var.aws_region}b", "${var.aws_region}c"]
public_subnets = local.public_subnet_cidrs
private_subnets = local.private_subnet_cidrs
intra_subnets = local.intra_subnet_cidrs
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
enable_dns_support = true
# Use AWS VPC private endpoints to mirror functionality on airgapped (T)C2S environments
# S3: for some vendors cluster bootstrapping/artifact storage
# STS: for caller identity checks
# EC2: for cloud manager type requests (such as auto ebs provisioning)
# ASG: for cluster autoscaler
# ELB: for auto elb provisioning
enable_s3_endpoint = true
enable_sts_endpoint = true
enable_ec2_endpoint = true
enable_ec2_autoscaling_endpoint = true
enable_elasticloadbalancing_endpoint = true
ec2_endpoint_security_group_ids = [aws_security_group.endpoints.id]
ec2_endpoint_subnet_ids = module.vpc.intra_subnets
ec2_endpoint_private_dns_enabled = true
ec2_autoscaling_endpoint_security_group_ids = [aws_security_group.endpoints.id]
ec2_autoscaling_endpoint_subnet_ids = module.vpc.intra_subnets
ec2_autoscaling_endpoint_private_dns_enabled = true
elasticloadbalancing_endpoint_security_group_ids = [aws_security_group.endpoints.id]
elasticloadbalancing_endpoint_subnet_ids = module.vpc.intra_subnets
elasticloadbalancing_endpoint_private_dns_enabled = true
sts_endpoint_security_group_ids = [aws_security_group.endpoints.id]
sts_endpoint_subnet_ids = module.vpc.intra_subnets
sts_endpoint_private_dns_enabled = true
# Prevent creation of EIPs for NAT gateways
reuse_nat_ips = false
# Add in required tags for proper AWS CCM integration
public_subnet_tags = merge({
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/elb" = "1"
}, local.tags)
private_subnet_tags = merge({
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}, local.tags)
intra_subnet_tags = merge({
"kubernetes.io/cluster/${local.name}" = "shared"
}, local.tags)
tags = merge({
"kubernetes.io/cluster/${local.name}" = "shared"
}, local.tags)
}
# Shared Private Endpoint Security Group
resource "aws_security_group" "endpoints" {
name = "${local.name}-endpoint"
description = "${local.name} endpoint"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
#
# TGW Attachments
# Attaches the management vpc (the hub) to the created vpc (the spokes).
#
module "spoke" {
source = "git::https://repo1.dsop.io/platform-one/big-bang/terraform-modules/spoke-tgw-attachments.git"
name = local.name
hub_vpc_id = var.hub_vpc_id
hub_tgw = var.hub_tgw
hub_tgw_rt = var.hub_tgw_rt
hub_tgwa = var.hub_tgwa
spoke_vpc_id = module.vpc.vpc_id
spoke_subnets = module.vpc.private_subnets
spoke_rt_ids = module.vpc.private_route_table_ids
}
output "vpc_id" {
value = module.vpc.vpc_id
}
output "private_subnet_ids" {
value = module.vpc.private_subnets
}
output "public_subnet_ids" {
value = module.vpc.public_subnets
}
\ No newline at end of file
variable "env" {}
variable "vpc_cidr" {
description = "The CIDR block for the VPC. Default value is a valid CIDR"
type = string
}
variable "aws_region" {
type = string
default = "us-gov-west-1"
}
#
# Spoke variables
# We can hardcode these for now... they haven't changed in 8 months
#
variable "hub_vpc_id" {
default = "vpc-5f627a3b"
}
variable "hub_tgw" {
default = "tgw-0c324b57d019790f4"
}
variable "hub_tgwa" {
default = "tgw-attach-0dce16098dd33fd2c"
}
variable "hub_tgw_rt" {
default = "tgw-rtb-04b66987e7d96a3d4"
}
\ No newline at end of file
.rke2 tf:
extends: .terraformer
variables:
TF_ROOT: ".gitlab-ci/jobs/rke2/dependencies/terraform/env/ci"
.rke2 up:
extends: .rke2 tf
script:
# Fetch dependencies
- apk add bash aws-cli
- terraform apply -input=false -auto-approve
- mv rke2.yaml ${CI_PROJECT_DIR}/rke2.yaml
artifacts:
paths:
- ${CI_PROJECT_DIR}/rke2.yaml
.rke2 down:
extends:
- .rke2 tf
- .terraform destroy workspace
script:
- terraform destroy -input=false -auto-approve
\ No newline at end of file
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ebs
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- debug
volumeBindingMode: Immediate
\ No newline at end of file
# RKE2 Packer
An _extremely_ simple packer script to pre-load rke2 dependencies for airgapped deployment.
This packer script is __not__ intended to be used as a standard for airgapped rke2 deployments, it is simply a quick and dirty way to enable airgap deployments in the context of BigBang's CI.
## Future Work
This is currently baselined off of a vanilla RHEL8.3 ami, we should base this off of a P1 gold standard stig'd ami.
{
"variables": {
"aws_region": "us-gov-west-1",
"rke2_version": "v1.18.12+rke2r1",
"rke2_url": "https://github.com/rancher/rke2/releases/download",
"ami_name": "rhel8",
"ami_description": "An RKE2 base image based on RHEL 8 Build Date: {{ isotime }}",
"source_ami_name": "RHEL-8.3*",
"source_ami_owner": "309956199498",
"source_ami_owner_govcloud": "219670896067",
"source_ami_ssh_user": "ec2-user"
},
"builders": [
{
"type": "amazon-ebs",
"region": "{{user `aws_region`}}",
"ami_regions": "us-gov-west-1",
"source_ami_filter": {
"filters": {
"name": "{{user `source_ami_name`}}",
"root-device-type": "ebs",
"state": "available",
"virtualization-type": "hvm",
"architecture": "x86_64"
},
"owners": [ "{{user `source_ami_owner`}}", "{{user `source_ami_owner_govcloud`}}" ],
"most_recent": true
},
"instance_type": "m5.large",
"ssh_username": "{{user `source_ami_ssh_user`}}",
"subnet_id": "{{user `subnet_id`}}",
"kms_key_id": "{{user `kms_key_id`}}",
"launch_block_device_mappings": [
{
"device_name": "/dev/sda1",
"volume_size": 25,
"volume_type": "gp2",
"delete_on_termination": true
}
],
"tags": {
"Name": "rke2-{{user `ami_name`}}-{{ timestamp }}",
"BuildDate": "{{ isotime }}",
"RKE2-Version": "{{user `rke2_version`}}"
},
"ami_name": "rke2-{{user `ami_name`}}-{{ timestamp }}",
"ami_description": "{{user `ami_description` }}",
"ami_virtualization_type": "hvm",
"run_tags": {
"Name": "packer-builder-rke2-{{user `ami_name`}}-ami"
}
}
],
"provisioners": [
{
"type": "shell",
"environment_vars": [
"RKE2_VERSION={{ user `rke2_version` }}",
"RKE2_URL={{ user `rke2_url` }}"
],
"script": "./setup.sh",
"execute_command": "chmod +x {{ .Path }}; sudo {{ .Vars }} {{ .Path }}"
}
]
}
\ No newline at end of file
#!/bin/bash
set -o pipefail
set -o errexit
# Bare minimum dependency collection
yum install -y unzip
yum update -y
cd /usr/local/bin
# RKE2
curl -sL https://get.rke2.io -o rke2.sh
curl -OLs "${RKE2_URL}/${RKE2_VERSION}/{rke2.linux-amd64,rke2.linux-amd64.tar.gz,rke2-images.linux-amd64.txt,rke2-images.linux-amd64.tar.gz,sha256sum-amd64.txt}"
grep -v "e2e-*" sha256sum-amd64.txt | sha256sum -c /dev/stdin
if [ $? -ne 0 ]
then
echo "[ERROR] checksum of rke2 files don't match"
exit 1
fi
rm -f sha256sum-amd64.txt
chmod 755 rke2*
# Install rke2 components (with yum so selinux components are fetched)
INSTALL_RKE2_METHOD='yum' ./rke2.sh
INSTALL_RKE2_METHOD='yum' INSTALL_RKE2_TYPE="agent" ./rke2.sh
# Move and decompress images to pre-load dir
mkdir -p /var/lib/rancher/rke2/agent/images/ && zcat rke2-images.linux-amd64.tar.gz > /var/lib/rancher/rke2/agent/images/rke2-images.linux-amd64.tar
# AWS CLI
curl -sL https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip -o /tmp/awscliv2.zip && unzip -qq -d /tmp /tmp/awscliv2.zip && /tmp/aws/install --bin-dir /usr/bin
rm -rf /tmp/aws*
# WARN: This sets the default region to the current region that packer is building from
aws configure set default.region $(curl -s http://169.254.169.254/latest/meta-data/placement/region)
cat <<EOF >> /etc/environment
HISTTIMEFORMAT="%F %T "
KUBECONFIG=/etc/rancher/rke2/rke2.yaml
EOF
cat <<EOF >> /root/.bash_aliases
alias k='rke2 kubectl'
EOF
# Clean up build instance history
rm -rf \
/etc/hostname \
/home/ec2-user/.ssh/authorized_keys \
/root/.ssh/authorized_keys \
/var/lib/cloud/data \
/var/lib/cloud/instance \
/var/lib/cloud/instances \
/var/lib/cloud/sem \
/var/log/cloud-init-output.log \
/var/log/cloud-init.log \
/var/log/secure \
/var/log/wtmp \
/var/log/apt
> /etc/machine-id
> /var/log/wtmp
> /var/log/btmp
yum clean all -y
df -h; date
history -c
terraform {
backend "s3" {
bucket = "umbrella-tf-states"
key = "terraform.tfstate"
region = "us-gov-west-1"
dynamodb_table = "umbrella-tf-states-lock"
workspace_key_prefix = "rke2"
}
}
data "terraform_remote_state" "networking" {
backend = "s3"
config = {
bucket = "umbrella-tf-states"
key = "terraform.tfstate"
region = "us-gov-west-1"
workspace_key_prefix = "aws-networking"
}
workspace = var.env
}
module "ci" {
source = "../../main"
env = var.env
vpc_id = data.terraform_remote_state.networking.outputs.vpc_id
subnets = data.terraform_remote_state.networking.outputs.intra_subnets
download = false
server_ami = "ami-00aab2121681e4a31"
agent_ami = "ami-00aab2121681e4a31"
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment