Skip to content

Commit

Permalink
github workflow added
Browse files Browse the repository at this point in the history
  • Loading branch information
argonautbot[bot] authored Dec 8, 2021
1 parent 4ebf4ec commit 66e06ba
Show file tree
Hide file tree
Showing 2 changed files with 290 additions and 0 deletions.
71 changes: 71 additions & 0 deletions .github/workflows/argonaut-violet-lambda1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@

name: "Build and Push to CR"

on:
push:
branches:
- main
tags:
- "v*" # Push events to matching v*, i.e. v1.0, v20.15.10
workflow_dispatch:

jobs:
build:
runs-on: ubuntu-latest
name: Build and Push Img
steps:
- name: Get the version
id: get_version
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/}
- name: Fetch repo
uses: actions/checkout@v2
- name: Get Short SHA
id: get_sha
run: echo ::set-output name=SHA_SHORT::$(git rev-parse --short HEAD)
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build Image
uses: docker/build-push-action@v2
id: build
with:
context: .
file: ./Dockerfile
push: false
tags: ${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.us-east-1.amazonaws.com/argonaut/lambda1:${{ steps.get_sha.outputs.SHA_SHORT }}
outputs: type=docker,dest=image.tar

- name: Push to ecr
uses: argonautdev/[email protected]
id: push_to_ecr
with:
access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
account_id: ${{ secrets.AWS_ACCOUNT_ID }}
repo: argonaut/lambda1
region: us-east-1
tags: ${{ steps.get_sha.outputs.SHA_SHORT }}
create_repo: true
image_scanning_configuration: true
docker_image_path: image.tar

deploy:
runs-on: ubuntu-latest
name: Deploy to Argonaut
needs: build
steps:
- name: Fetch repo
uses: actions/checkout@v2
- name: Get Short SHA
id: get_sha
run: echo ::set-output name=SHA_SHORT::$(git rev-parse --short HEAD)
- name: Download art
run: curl --silent https://github.com/argonautdev/public/releases/latest/download/art-linux -L -o art
- name: Configure art
run: |
chmod +x ./art
sudo mv ./art /usr/local/bin/art
art configure --key ${{ secrets.ART_KEY }} --secret ${{ secrets.ART_SECRET }}
art app deploy -f ./art-lambda1.yaml --set image="${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.us-east-1.amazonaws.com/argonaut/lambda1" --set imageTag="${{ steps.get_sha.outputs.SHA_SHORT }}" --set appName="lambda1" --set argonaut.env="violet" --set argonaut.region="us-east-1" --set argonaut.cluster="violet" --set argonaut.serviceType="stateless" --set argonaut.imageRegistry="ecr"
219 changes: 219 additions & 0 deletions art-lambda1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,219 @@
---
version: "v1"
appName: "lambda1"
image: "054565121117.dkr.ecr.us-east-1.amazonaws.com/argonaut/lambda1"

imageTag: "latest"

services:
- port: 8080
protocol: "tls-terminated" # tls-passthrough, tls-terminated, tcp, http, grpc need to be supported
external:
hosts:
- "battleship.violet.argonaut.live"
hostPort: 443
paths: ["/"]

argonaut:
env: violet
region: us-east-1
cluster: violet
imageRegistry: ecr # corresponding to the image that is to be deployed
serviceType: "stateless" # One of {stateful, stateless, managed}
persistentStorage:
[]

replicas: 1
minReplicas: 1
maxReplicas: 1
resources:
requests:
cpu: "500m"
memory: "512M"
limits:
cpu: "1000m"
memory: "1500M"
#########################################################################################
# Everything below this is optional and advanced configuration #
# and irrelevant in most scenarios. #
#########################################################################################
# Can only do one of the httpGet and exec handler methods for livenessProbe
livenessProbe:
httpGet:
path: /
port: 8080

# exec:
# command:
# - sh
# - -c
# - |
# #!/usr/bin/env sh
# test -f /etc/
failureThreshold: 5
initialDelaySeconds: 10
successThreshold: 3
periodSeconds: 10
timeoutSeconds: 5
# Can only do one of the httpGet and exec handler methods for readinessProbe
readinessProbe:
httpGet:
path: /
port: 8080

# # Handler 2
# exec:
# command:
# - sh
# - -c
# - |
# #!/usr/bin/env sh
# test -f /etc/
# Common fields
failureThreshold: 5
initialDelaySeconds: 10
successThreshold: 3
periodSeconds: 10
timeoutSeconds: 5

externalServices: []
podAnnotations:
{}
# iam.amazonaws.com/role: myapp-cluster
# additionals labels
labels: {}
# Allows you to load environment variables from kubernetes secret or config map
envFrom: []
# - secretRef:
# name: env-secret
# - configMapRef:
# name: config-map
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security
secretMounts:
[]
# - name: beamd-cert
# secretName: beamd-cert
# path: /usr/share/myapp/config/certs
sidecarResources:
{}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
# networkHost: "0.0.0.0"
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
# maxUnavailable: 25%
updateStrategy: RollingUpdate
# How long to wait for myapp to stop gracefully
terminationGracePeriod: 30
lifecycle:
{}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
# postStart:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
rbac:
create: false
serviceAccountAnnotations: {}
serviceAccountName: ""
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
# This doesn't apply if antiAffinity is not set
antiAffinityTopologyKey: "kubernetes.io/hostname"
# "hard" means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to "soft" will do this best effort
antiAffinity: ""
# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"
podSecurityContext:
{}
# fsGroup: 1000
# runAsUser: 1000
securityContext:
{}
# capabilities:
# drop:
# - ALL
# # readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
nodeSelector: {}
tolerations: []
initContainer:
enabled: false
# command: ["echo", "I am an initContainer"]
# image: nginx
initResources:
{}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
extraInitContainers:
[]
# - name: do-something
# image: busybox
# command: ['do', 'something']
extraVolumes:
[]
# - name: extras
# emptyDir: {}
extraVolumeMounts:
[]
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraContainers:
[]
# - name: do-something
# image: busybox
# command: ['do', 'something']
# Allows you to add any config files in /usr/share/myapp/config/
# as a ConfigMap
extraConfig:
[]
# - name: configName
# path: "/path/to/config/folder/"
# readOnly: true
# data:
# pokedex.yaml: |
# pokemonName: Pikachu
# pokemonType: Lightning
# battle.yaml: |
# pokemon1: Pikachu
# pokemon2: MewTwo
# - name: configName2
# path: "/path/to/config/anotherfolder/"
# readOnly: true
# data:
# pokedex.yaml: |
# pokemonName: Pikachu
# pokemonType: Lightning
# battle.yaml: |
# pokemon1: Pikachu
# pokemon2: MewTwo
# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs: []
# - name: MY_ENVIRONMENT_VAR
# value: the_value_goes_here

0 comments on commit 66e06ba

Please sign in to comment.