Skip to content

Commit

Permalink
set up CDK & jenkins & Go app
Browse files Browse the repository at this point in the history
  • Loading branch information
rico-bincentive committed Apr 19, 2020
1 parent 5a268c6 commit 0279965
Show file tree
Hide file tree
Showing 29 changed files with 1,358 additions and 23 deletions.
16 changes: 16 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Python virtualenv
.env/

# Python Byte-compiled / optimized / DLL files
__pycache__/

# Distribution / packaging
*.egg-info/

# CDK Context & Staging files
cdk.context.json
.cdk.staging/
cdk.out/

# VSCode files
.vscode
12 changes: 10 additions & 2 deletions README.md → 01-install-eks-cluster/README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

# Welcome to your CDK Python project!

This is a blank project for Python development with CDK.
Expand Down Expand Up @@ -55,4 +54,13 @@ command.
* `cdk diff` compare deployed stack with current state
* `cdk docs` open CDK documentation

Enjoy!
# Project Environment Parameters
Mandatory Environment Parameters
```
$ EKS_ADMIN_IAM_USERNAME={IAM ARN}
```
Optional Environment Parameters
```
$ CDK_ACCOUNT={AWS Account id}
$ CDK_REGION={region name}
```
26 changes: 26 additions & 0 deletions 01-install-eks-cluster/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#!/usr/bin/env python3

from aws_cdk import core

from eks_cluster.vpc_stack import VpcStack
from eks_cluster.eks_cluster_stack import EksClusterStack
from ecr.ecr_stack import EcrStack
from env import aws_account

app = core.App()

# VPC

vpc_stack = VpcStack(app, 'vpc-stack', env=aws_account)

# EKS

eks_cluster_stack = EksClusterStack(app, 'jenkins-workshop-eks-cluster', vpc=vpc_stack.eks_vpc, env=aws_account)

eks_cluster_stack.add_dependency(vpc_stack)

# ECR

ecr_repository_stack = EcrStack(app, 'ecr-repository', env=aws_account)

app.synth()
File renamed without changes.
27 changes: 27 additions & 0 deletions 01-install-eks-cluster/ecr/ecr_stack.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
from aws_cdk import (
core,
aws_ecr as ecr,
)

class EcrStack(core.Stack):

def __init__(self, scope: core.Construct, name: str, **kwargs) -> None:
super().__init__(scope, name, **kwargs)

app_repositories = [
'jenkins-workshop-go-app'
]

default_ecr_lifecyclerule = ecr.LifecycleRule(
description='Default ECR lifecycle Rule',
max_image_count=500,
rule_priority=100
)

for a in app_repositories:
ecr.Repository(
self, a,
repository_name=a,
lifecycle_rules=[default_ecr_lifecyclerule],
removal_policy=core.RemovalPolicy.DESTROY
)
File renamed without changes.
68 changes: 68 additions & 0 deletions 01-install-eks-cluster/eks_cluster/eks_cluster_stack.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
from aws_cdk import (
core,
aws_iam as iam,
aws_ec2 as ec2,
aws_eks as eks,
)

from eks_cluster.load_config_files import read_k8s_resource, read_docker_daemon_resource
from env import get_eks_admin_iam_username

class EksClusterStack(core.Stack):

def __init__(self, scope: core.Construct, name: str, vpc: ec2.IVpc, **kwargs) -> None:
super().__init__(scope, name, **kwargs)

cluster = eks.Cluster(
self, 'jenkins-workshop-eks-control-plane',
vpc=vpc,
default_capacity=0
)

asg_worker_nodes = cluster.add_capacity(
'worker-node',
instance_type=ec2.InstanceType('t3.medium'),
desired_capacity=2,
)

asg_jenkins_slave = cluster.add_capacity(
'worker-node-jenkins-slave',
instance_type=ec2.InstanceType('t3.medium'),
desired_capacity=1,
bootstrap_options=eks.BootstrapOptions(
kubelet_extra_args='--node-labels jenkins=slave --register-with-taints jenkins=slave:NoSchedule',
docker_config_json=read_docker_daemon_resource('kubernetes_resources/docker-daemon.json')
)
)
asg_jenkins_slave.add_to_role_policy(iam.PolicyStatement(
actions=[
'ecr:CompleteLayerUpload',
'ecr:InitiateLayerUpload',
'ecr:PutImage',
'ecr:UploadLayerPart'
],
resources=["*"]
)
)

asg_worker_nodes.connections.allow_from(
asg_jenkins_slave,
ec2.Port.all_traffic()
)
asg_jenkins_slave.connections.allow_from(
asg_worker_nodes,
ec2.Port.all_traffic()
)

eks_master_role = iam.Role(
self, 'AdminRole',
assumed_by=iam.ArnPrincipal(get_eks_admin_iam_username())
)

cluster.aws_auth.add_masters_role(eks_master_role)

helm_tiller_rbac = eks.KubernetesResource(
self, 'helm-tiller-rbac',
cluster=cluster,
manifest=read_k8s_resource('kubernetes_resources/helm-tiller-rbac.yaml')
)
10 changes: 10 additions & 0 deletions 01-install-eks-cluster/eks_cluster/load_config_files.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import yaml
import json

def read_k8s_resource(filename):
with open(filename,'r') as stream:
return list(yaml.safe_load_all(stream))

def read_docker_daemon_resource(filename):
with open(filename,'r') as stream:
return json.dumps(json.load(stream))
28 changes: 28 additions & 0 deletions 01-install-eks-cluster/eks_cluster/vpc_stack.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
from aws_cdk import (
core,
aws_ec2 as ec2,
)

class VpcStack(core.Stack):

def __init__(self, scope: core.Construct, name: str, **kwargs) -> None:
super().__init__(scope, name, **kwargs)

public_subnet = ec2.SubnetConfiguration(
cidr_mask=20,
name='Ingress',
subnet_type=ec2.SubnetType.PUBLIC
)

private_subnet = ec2.SubnetConfiguration(
cidr_mask=20,
name='Application',
subnet_type=ec2.SubnetType.PRIVATE
)

self.eks_vpc = ec2.Vpc(
self, 'eks-vpc',
cidr='10.1.0.0/16',
max_azs=2,
subnet_configuration=[public_subnet, private_subnet]
)
24 changes: 24 additions & 0 deletions 01-install-eks-cluster/env/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import os
import sys


def env_or_default(name, default):
return os.environ.get(name, default)

def env_or_error(name):
env = os.environ.get(name)
if env is None:
print(f'ERROR: Environment variable {name} is required, but was not set.')
sys.exit(1)
return env

def get_eks_admin_iam_username():
return env_or_error('EKS_ADMIN_IAM_USERNAME')

cdk_default_account = os.environ.get('CDK_DEFAULT_ACCOUNT')
cdk_default_region = os.environ.get('CDK_DEFAULT_REGION')

aws_account = {
'account': env_or_default('CDK_ACCOUNT', cdk_default_account),
'region': env_or_default('CDK_REGION', cdk_default_region)
}
12 changes: 12 additions & 0 deletions 01-install-eks-cluster/install.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
#!/bin/sh

set -e
set -x

sudo apt install jq -y
export EKS_ADMIN_IAM_USERNAME=`aws sts get-caller-identity | jq '.Arn' | cut -d '"' -s -f2`
echo $EKS_ADMIN_IAM_USERNAME
pip3 install --no-cache-dir -r requirements.txt
cdk bootstrap
cdk list
cdk deploy --require-approval never vpc-stack jenkins-workshop-eks-cluster ecr-repository
11 changes: 11 additions & 0 deletions 01-install-eks-cluster/kubernetes_resources/docker-daemon.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
{
"bridge": "docker0",
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "10"
},
"live-restore": false,
"max-concurrent-downloads": 10,
"experimental": true
}
18 changes: 18 additions & 0 deletions 01-install-eks-cluster/kubernetes_resources/helm-tiller-rbac.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
6 changes: 6 additions & 0 deletions 01-install-eks-cluster/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
aws-cdk.core==1.27.0
aws-cdk.aws-eks==1.27.0
aws-cdk.aws-ec2==1.27.0
aws-cdk.aws-iam==1.27.0
aws-cdk.aws-ecr==1.27.0
pyyaml==5.2
File renamed without changes.
21 changes: 21 additions & 0 deletions 02-install-istio-jenkins/get-links.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/bin/sh

ep=$(kubectl get svc istio-ingressgateway -n istio-system -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")

echo "kiali"
echo "http://${ep}:15029"

echo "grafana"
echo "http://${ep}:15031"

echo "prometheus"
echo "http://${ep}:15030"

echo "tracing"
echo "http://${ep}:15032"

echo "jenkins"
echo "http://${ep}"

echo "go-app-example"
echo "http://${ep}/go-app-example"
55 changes: 55 additions & 0 deletions 02-install-istio-jenkins/install.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
#!/bin/sh

set -e
set -x

# install helm & kubectl cli

wget https://get.helm.sh/helm-v2.16.1-linux-amd64.tar.gz
tar -zxvf helm-v2.16.1-linux-amd64.tar.gz
sudo mv linux-amd64/helm /usr/local/bin/helm
rm -r helm-v2.16.1-linux-amd64.tar.gz linux-amd64/

sudo apt-get update && sudo apt-get install -y apt-transport-https
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubectl

# get aws eks kube-config

EKS_CLUSTER_NAME=`aws eks list-clusters | grep jenkinsworkshopekscontrolplane | cut -d '"' -s -f2`
EKS_ADMIN_ARN=`aws iam list-roles | grep jenkins-workshop-eks-cluster-AdminRole | grep Arn | cut -d'"' -s -f4`
EKS_CLUSTER_ARN=`aws eks describe-cluster --name $EKS_CLUSTER_NAME | jq '.cluster.arn' | cut -d '"' -s -f2`

aws eks update-kubeconfig --region ap-northeast-2 --name $EKS_CLUSTER_NAME --role-arn $EKS_ADMIN_ARN

kubectl config use-context $EKS_CLUSTER_ARN

# install istio

helm init --service-account tiller --wait

helm repo add istio.io https://storage.googleapis.com/istio-release/releases/1.5.0/charts/

helm upgrade --install istio-init --namespace istio-system istio.io/istio-init --wait

sleep 10;

helm upgrade --install istio --namespace istio-system -f istio/istio-customized.yaml istio.io/istio --wait

kubectl apply -f istio/addons

kubectl apply -f istio/http-gateway.yaml

kubectl create namespace jenkins

# install jenkins

helm upgrade --install --recreate-pods jenkins --namespace jenkins --version 1.9.21 -f jenkins/jenkins-values.yaml stable/jenkins

kubectl apply -f jenkins/istio-jenkins.yaml

kubectl create clusterrole jenkins --verb=get,list,create --resource=pods,pods/portforward

kubectl create clusterrolebinding jenkins-binding --clusterrole=jenkins --serviceaccount=jenkins:jenkins
Loading

0 comments on commit 0279965

Please sign in to comment.