-
Notifications
You must be signed in to change notification settings - Fork 1
421 lines (346 loc) · 17.9 KB
/
backstage.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
name: Install-run-backstage
on:
workflow_dispatch:
push:
branches:
- main
env:
IDPBUILDER_VERSION: v0.5.0
## TODO: To be passed to idpbuilder package
TEKTON_VERSION: v0.60.1
TEKTON_CLIENT_VERSION: 0.37.0
# From idpbuilder project
ARGOCD_VERSION: 2.10.7
GITEA_VERSION: 1.22
KUBEVIRT_VERSION: v1.2.1
KUBEVIRT_CDI_VERSION: v1.59.0
# The git repo name is also the name of the project's parameter of the template
REPO_NAME: my-quarkus-app-job
REPO_ORG: ch007m
QUAY_ORG: ch007m
KUBE_NAMESPACE: dummy
jobs:
setup-idp:
runs-on: ubuntu-latest
permissions:
contents: write
if: ${{ github.event_name == 'push' && contains( github.event.head_commit.message, 'qshift') }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install idpbuilder
run: |
version=${IDPBUILDER_VERSION}
curl -L -o ./idpbuilder.tar.gz "https://github.com/cnoe-io/idpbuilder/releases/download/${version}/idpbuilder-$(uname | awk '{print tolower($0)}')-$(uname -m | sed 's/x86_64/amd64/').tar.gz"
tar xzf idpbuilder.tar.gz
sudo mv ./idpbuilder /usr/local/bin/
idpbuilder version
- name: Install Argocd client
run: |
curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-amd64
sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd
rm argocd-linux-amd64
- name: Install tekton client
run: |
curl -sSL "https://github.com/tektoncd/cli/releases/download/v{$TEKTON_CLIENT_VERSION}/tkn_${TEKTON_CLIENT_VERSION}_Linux_x86_64.tar.gz" -o tkn.tar.gz
sudo tar xvzf tkn.tar.gz -C /usr/local/bin/ tkn
tkn version
- name: Create an IDP cluster and install the packages
run: |
PACKAGES_DIR=.github/resources/idp/packages
idpbuilder create \
-p $PACKAGES_DIR/tekton \
-p $PACKAGES_DIR/backstage \
-c argocd:$PACKAGES_DIR/argocd/cm.yaml
- name: Create the namespace where applications, resources should be deployed
run: |
kubectl create ns ${KUBE_NAMESPACE}
- name: Create the secret containing the creds to write an image on Quay
run: |
QUAY_CREDS=$(echo -n "${{ secrets.QUAY_USERNAME }}:${{ secrets.QUAY_ROBOT_TOKEN }}" | base64 -w 0)
cat <<EOF > config.json
{
"auths": {
"quay.io/${QUAY_ORG}": {
"auth": "$QUAY_CREDS"
}
}
}
EOF
kubectl create secret generic dockerconfig-secret -n dummy --from-file=config.json
- name: Install Kubevirt
run: |
function is_nested_virt_enabled() {
kvm_nested="unknown"
if [ -f "/sys/module/kvm_intel/parameters/nested" ]; then
kvm_nested=$( cat /sys/module/kvm_intel/parameters/nested )
elif [ -f "/sys/module/kvm_amd/parameters/nested" ]; then
kvm_nested=$( cat /sys/module/kvm_amd/parameters/nested )
fi
[ "$kvm_nested" == "1" ] || [ "$kvm_nested" == "Y" ] || [ "$kvm_nested" == "y" ]
}
echo "Deploying KubeVirt"
kubectl apply -f "https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator.yaml"
kubectl apply -f "https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr.yaml"
echo "Configuring Kubevirt to use emulation if needed"
if ! is_nested_virt_enabled; then
kubectl -n kubevirt patch kubevirt kubevirt --type=merge --patch '{"spec":{"configuration":{"developerConfiguration":{"useEmulation":true}}}}'
fi
echo "Deploying KubeVirt containerized-data-importer"
kubectl apply -f "https://github.com/kubevirt/containerized-data-importer/releases/download/${KUBEVIRT_CDI_VERSION}/cdi-operator.yaml"
kubectl apply -f "https://github.com/kubevirt/containerized-data-importer/releases/download/${KUBEVIRT_CDI_VERSION}/cdi-cr.yaml"
echo "Waiting for KubeVirt to be ready"
kubectl wait --for=condition=Available kubevirt kubevirt --namespace=kubevirt --timeout=5m
- name: Give more RBAC to Virt resources and RW for PVC
run: |
echo "Patch the StorageProfile to use the storageclass standard and give ReadWrite access"
kubectl get StorageProfile
kubectl patch --type merge -p '{"spec": {"claimPropertySets": [{"accessModes": ["ReadWriteOnce"], "volumeMode": "Filesystem"}]}}' StorageProfile standard
kubectl create clusterrolebinding pod-kubevirt-viewer --clusterrole=kubevirt.io:view --serviceaccount=${KUBE_NAMESPACE}:default
kubectl create clusterrolebinding cdi-kubevirt-viewer --clusterrole=cdi.kubevirt.io:view --serviceaccount=${KUBE_NAMESPACE}:default
kubectl create clusterrolebinding quarkus-dev --clusterrole=admin --serviceaccount=${KUBE_NAMESPACE}:default
# Give RBAC to the SA argocd-server of the namespace argocd to access Applications running in another namespaces
kubectl create clusterrolebinding argocd-server-applications --clusterrole=argocd-applicationset-controller --serviceaccount=argocd:argocd-server
- name: Install our Fedora podman image
run: |
kubectl create ns vm-images
kubectl apply -n vm-images -f https://raw.githubusercontent.com/ch007m/test-github-action/main/.github/resources/quay-to-pvc-datavolume.yml
kubectl wait datavolume -n vm-images podman-remote --for condition=Ready=True --timeout=360s
- name: Create ssh key, secret and VM
run: |
ssh-keygen -N "" -f id_rsa
kubectl create secret generic quarkus-dev-ssh-key -n ${KUBE_NAMESPACE} --from-file=key=id_rsa.pub
kubectl apply -f https://raw.githubusercontent.com/ch007m/test-github-action/main/.github/resources/quarkus-dev-vm.yml -n ${KUBE_NAMESPACE}
kubectl wait --for=condition=Ready vm/quarkus-dev -n ${KUBE_NAMESPACE} --timeout=360s
- name: Set idp env variables
run: |
#GITEA_URL=$(kubectl get ingress/my-gitea -n gitea -ojson | jq -r '.spec.rules[0].host')
#GITEA_USERNAME=$(idpbuilder get secrets -p gitea -ojson | jq -r '.[].data.username')
#GITEA_PASSWORD=$(idpbuilder get secrets -p gitea -ojson | jq -r '.[].data.password')
#
#echo "GITEA_URL=$GITEA_URL" >> "$GITHUB_ENV"
#echo "GITEA_USERNAME=$GITEA_USERNAME" >> "$GITHUB_ENV"
#echo "GITEA_PASSWORD=$GITEA_PASSWORD" >> "$GITHUB_ENV"
ARGO_USERNAME=$(idpbuilder get secrets -p argocd -ojson | jq -r '.[].data.username')
ARGO_PASSWORD=$(idpbuilder get secrets -p argocd -ojson | jq -r '.[].data.password')
ARGO_SERVER_HOST=$(kubectl get ingress/argocd-server-ingress -n argocd -ojson | jq -r '.spec.rules[0].host')
ARGO_SERVER_PORT=8443
ARGO_SERVER_URL="https://$ARGO_SERVER_HOST:$ARGO_SERVER_PORT"
echo "ARGO_PASSWORD=$ARGO_PASSWORD" >> "$GITHUB_ENV"
echo "ARGO_SERVER_HOST=$ARGO_SERVER_HOST" >> "$GITHUB_ENV"
echo "ARGO_SERVER_PORT=$ARGO_SERVER_PORT" >> "$GITHUB_ENV"
echo "ARGO_SERVER=$ARGO_SERVER_URL" >> "$GITHUB_ENV"
echo "ARGO_USERNAME=$ARGO_USERNAME" >> "$GITHUB_ENV"
SERVICE_ACCOUNT_TOKEN=$(kubectl get secret backstage-secret -n backstage -o json | jq -r '.data.token' | base64 -d)
echo "SERVICE_ACCOUNT_TOKEN=$SERVICE_ACCOUNT_TOKEN" >> "$GITHUB_ENV"
BACKSTAGE_AUTH_SECRET=$(node -p 'require("crypto").randomBytes(24).toString("base64")')
echo "BACKSTAGE_AUTH_SECRET=$BACKSTAGE_AUTH_SECRET" >> "$GITHUB_ENV"
echo "Get node IP and port to access it"
CLUSTER_IP=$(docker inspect localdev-control-plane | jq -r '.[].NetworkSettings.Ports."6443/tcp"[0].HostIp')
CLUSTER_PORT=$(docker inspect localdev-control-plane | jq -r '.[].NetworkSettings.Ports."6443/tcp"[0].HostPort')
API_URL="https://$CLUSTER_IP:$CLUSTER_PORT"
echo "API_URL=$API_URL" >> "$GITHUB_ENV"
- name: Logon anc configure ArgoCD server
run: |
argocd --insecure login $ARGO_SERVER_HOST:$ARGO_SERVER_PORT --username $ARGO_USERNAME --password $ARGO_PASSWORD
echo "Allow argocd to handle applications in all the namespaces"
argocd proj add-source-namespace default '*'
- name: Rollout ArgoCD as we created a dummy namespace where Applications will be deployed
run: |
echo "Rollout Argocd to take into consideration the new namespace created ..."
kubectl rollout restart -n argocd deployment argocd-server
kubectl rollout restart -n argocd statefulset argocd-application-controller
- name: Checkout QShift backstage playground
uses: actions/checkout@v4
with:
repository: q-shift/backstage-playground
path: backstage-playground
- uses: actions/setup-node@v4
with:
node-version: 20.x
- run: corepack enable
- name: Install & build QShift
working-directory: backstage-playground
run: |
yarn --immutable
yarn tsc
yarn build:all
- name: Configure app-config.local.yaml file
env:
GITHUB_PERSONAL_ACCESS_TOKEN: ${{ secrets.BACKSTAGE_GITHUB_TOKEN }}
run: |
cat <<EOF > backstage_env_secret.env
APP_BASE_URL=http://localhost:3000
BACKEND_BASE_URL=http://localhost:7007
BACKSTAGE_AUTH_SECRET=$BACKSTAGE_AUTH_SECRET
TEMPLATE_URL=https://github.com/q-shift/backstage-playground/blob/main/locations/root.yaml
ARGOCD_SERVER=$ARGO_SERVER
ARGOCD_ADMIN_USER=$ARGO_USERNAME
ARGOCD_ADMIN_PASSWORD=$ARGO_PASSWORD
GITHUB_PERSONAL_ACCESS_TOKEN=$GITHUB_PERSONAL_ACCESS_TOKEN
#GITEA_URL=$GITEA_URL
#GITEA_USERNAME=$GITEA_USERNAME
#GITEA_PASSWORD='$GITEA_PASSWORD'
KUBERNETES_API_URL=$API_URL
SERVICE_ACCOUNT_TOKEN=$SERVICE_ACCOUNT_TOKEN
EOF
export $(grep -v '^#' backstage_env_secret.env | xargs)
envsubst < .github/resources/app-config.qshift.tmpl > backstage-playground/app-config.local.yaml
cat backstage-playground/app-config.local.yaml
- name: Start backstage
# https://github.com/JarvusInnovations/background-action
uses: JarvusInnovations/background-action@v1
with:
run: |
export NODE_OPTIONS=--no-node-snapshot
export NODE_TLS_REJECT_UNAUTHORIZED=0
yarn dev
working-directory: backstage-playground
tail: true
log-output: true
wait-on: http://localhost:7007/settings
- name: Check backstage resources
run: |
get_entities_length() {
curl -s -H "Authorization: Bearer $BACKSTAGE_AUTH_SECRET" http://localhost:7007/api/catalog/entities?filter=kind=location | jq '. | length'
}
until [[ $(get_entities_length) -gt 2 ]]; do
echo "Wait till locations are generated ..."
length=$(get_entities_length)
echo "Current length: $length"
sleep 5
done
echo "Show the locations ..."
curl -s "http://localhost:7007/api/catalog/entities?filter=kind=location" \
-H 'Content-Type: application/json' \
-H "Authorization: Bearer $BACKSTAGE_AUTH_SECRET" \
--compressed \
| jq -r
- name: Delete application repository
run: |
if gh repo view $REPO_ORG/$REPO_NAME > /dev/null 2>&1; then
echo "Repository '$REPO_ORG/$REPO_NAME' exists."
gh repo delete $REPO_ORG/$REPO_NAME --yes
fi
env:
GH_TOKEN: ${{ secrets.WORKFLOW_GITHUB_TOKEN }}
- name : Scaffold a project
run: |
curl -s 'http://localhost:7007/api/scaffolder/v2/tasks' \
-X POST \
-H 'Content-Type: application/json' \
-H "Authorization: Bearer $BACKSTAGE_AUTH_SECRET" \
-d @.github/resources/qshift/quarkus-app.json
- name: Check backstage components
run: |
echo "Show the components ..."
get_entities_length() {
curl -s -H "Authorization: Bearer $BACKSTAGE_AUTH_SECRET" http://localhost:7007/api/catalog/entities?filter=kind=component | jq '. | length'
}
until [[ $(get_entities_length) -gt 0 ]]; do
echo "Wait till component is created ..."
echo "Component(s) length: $(get_entities_length)"
sleep 10
done
curl -s "http://localhost:7007/api/catalog/entities?filter=kind=component" \
-H 'Content-Type: application/json' \
-H "Authorization: Bearer $BACKSTAGE_AUTH_SECRET" \
--compressed \
| jq -r
- name: Wait till the status of the Application parent is healthy
run: |
SCRIPTS=$(pwd)/.github/scripts
if ! $SCRIPTS/waitFor.sh application $REPO_NAME-bootstrap argocd Healthy; then
echo "Failed to watch: application $REPO_NAME-bootstrap"
exit 1;
fi
echo "#########################################"
echo "Detail Application(s) information"
echo "#########################################"
kubectl get application -A
if ! $SCRIPTS/waitFor.sh application $REPO_NAME-build-test-push ${KUBE_NAMESPACE} Synced ; then
echo "Failed to get: application $REPO_NAME-build-test-push Synced"
kubectl describe application $REPO_NAME-build-test-push -n ${KUBE_NAMESPACE}
exit 1;
fi
if ! $SCRIPTS/waitFor.sh application $REPO_NAME-db ${KUBE_NAMESPACE} Synced ; then
echo "Failed to get: application $REPO_NAME-db Synced"
kubectl describe application $REPO_NAME-db -n ${KUBE_NAMESPACE}
exit 1;
fi
if ! $SCRIPTS/waitFor.sh application $REPO_NAME-deploy ${KUBE_NAMESPACE} Synced ; then
echo "Failed to get: application $REPO_NAME-deploy Synced"
kubectl describe application $REPO_NAME-deploy -n ${KUBE_NAMESPACE}
exit 1;
fi
echo "#########################################"
argocd app get ${KUBE_NAMESPACE}/${REPO_NAME}-build-test-push
echo "#########################################"
argocd app get ${KUBE_NAMESPACE}/${REPO_NAME}-deploy
echo "#########################################"
argocd app get ${KUBE_NAMESPACE}/${REPO_NAME}-db
- name: Watch tekton resources ...
run: |
get_pipelineruns_length() {
tkn pipelinerun list -n ${KUBE_NAMESPACE} -o json | jq -r '.items | length'
}
echo "#########################################"
echo "Pipelineruns starting ..."
echo "#########################################"
for i in `seq 30`; do
if [[ $(get_pipelineruns_length) -gt 0 ]]; then
tkn pipelinerun logs -n ${KUBE_NAMESPACE} $REPO_NAME-build-test-push -f
else
echo "#########################################"
echo "Wait till the pipelineruns is running ..."
echo "#########################################"
kubectl get pods -n ${KUBE_NAMESPACE}
sleep 30
fi
done
- name: Wait till the application is running and log it
run: |
SCRIPTS=$(pwd)/.github/scripts
if ! $SCRIPTS/waitFor.sh deployment $REPO_NAME -n ${KUBE_NAMESPACE} READY; then
echo "Failed to get deployment of $REPO_NAME"
exit 1;
else
kubectl logs -n ${KUBE_NAMESPACE} -lapp.kubernetes.io/name=${REPO_NAME}
fi
- name: Execute if the job fails
if: ${{ failure() }}
run: |
echo "#########################################"
echo "Get pods ...."
echo "#########################################"
kubectl get pods -A
echo "#########################################"
echo "Get Tekton pipeline ..."
echo "#########################################"
kubectl get pipeline -A --ignore-not-found
echo "#########################################"
echo "Get Tekton pipelineruns ..."
echo "#########################################"
kubectl get pipelineruns -A --ignore-not-found
echo "#########################################"
echo "Get Tekton taskruns ..."
echo "#########################################"
kubectl get taskruns -A --ignore-not-found
echo "#########################################"
echo "List Argo Application ..."
echo "#########################################"
argocd app list
echo "#########################################"
echo "List the Argocd projects ..."
echo "#########################################"
kubectl get appprojects -A
echo "#########################################"
echo "List the Argocd applications: parent and children ..."
echo "#########################################"
argocd app list -p ${REPO_NAME} -N argocd
argocd app list -p default -N ${KUBE_NAMESPACE}
echo "#########################################"
echo "Detail the applications and projects ..."
echo "#########################################"
kubectl get appprojects -A -oyaml
kubectl describe applications -n ${KUBE_NAMESPACE}