forked from vgteam/vg
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path.gitlab-ci.yml
216 lines (203 loc) · 10.7 KB
/
.gitlab-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
# The VG tests are going to use toil-vg.
# toil-vg needs to be able to mount paths it can see into Docker containers.
# There's no good way to do that when running Docker containers as siblings of a container containing toil-vg.
# So we either have to genuinely nest Docker inside another Docker, or we have to run the build on the real host.
# Pull in an image that has our apt packages pre-installed, to save time installing them for every test.
# Make sure to use :latest so we re-check and re-pull if needed on every run.
image: quay.io/vgteam/vg_ci_prebake:latest
before_script:
- sudo apt-get -q -y update
# Make sure we have some curl stuff for pycurl which we need for some Python stuff
# And the CI report upload needs uuidgen from uuid-runtime
- sudo apt-get -q -y install --no-upgrade docker.io python3-pip python3-virtualenv libcurl4-gnutls-dev python-dev npm nodejs node-gyp uuid-runtime libgnutls28-dev doxygen libzstd-dev
- which junit-merge || sudo npm install -g junit-merge
# Configure Docker to use a mirror for Docker Hub and restart the daemon
- |
if [[ ! -z "${DOCKER_HUB_MIRROR}" ]] ; then
if [[ "${DOCKER_HUB_MIRROR}" == https* ]] ; then
# Set up a secure mirror
echo "{\"registry-mirrors\": [\"${DOCKER_HUB_MIRROR}\"]}" | sudo tee /etc/docker/daemon.json
else
# Set up an insecure mirror
echo "{\"registry-mirrors\": [\"${DOCKER_HUB_MIRROR}\"], \"insecure-registries\": [\"${DOCKER_HUB_MIRROR##*://}\"]}" | sudo tee /etc/docker/daemon.json
fi
fi
- startdocker || true
- docker info
- mkdir -p ~/.aws && cp "$GITLAB_SECRET_FILE_AWS_CREDENTIALS" ~/.aws/credentials
after_script:
- stopdocker || true
# We have two pipeline stages: build to make a Docker, and test to run tests.
# TODO: make test stage parallel
stages:
- build
- test
- report
# We define one job to do the out-of-container (re)build, and run the Bash
# tests. It uses a Gitlab-managed cache to prevent a full rebuild every time. It
# still takes longer than the Docker build, so we put it in the test stage
# alongside other longer jobs.
local-build-test-job:
stage: test
cache:
# Gitlab isn't clever enough to fill PR caches from the main branch, so we
# just use one megacache and hope the Makefile is smart enough to recover
# from the future
key: local-build-test-cache
paths:
- deps/
- include/
- lib/
- bin/
- obj/
before_script:
- sudo apt-get -q -y update
# We need to make sure we get the right submodule files for this version
# and don't clobber sources with the cache. We want to have a dirty state
# with the correct source files.
- scripts/restore-deps.sh
# We need to make sure we have nvm for testing the tube map
- curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash
- export NVM_DIR="$HOME/.nvm" && . "$NVM_DIR/nvm.sh"
script:
- nvm version
- python3 ./configure.py
- source ./source_me.sh
- make get-deps
- make -j8
- echo Testing
- bin/vg test "Target to alignment extraction"
- echo Full Testing
- make test
- make static -j8
# Also test as a backend for the tube map
- git clone https://github.com/vgteam/sequenceTubeMap.git
- cd sequenceTubeMap
# Tube map expects local IPv6 but Kubernetes won't let us have it
- 'sed -i "s/^}$/,\"serverBindAddress\": \"127.0.0.1\"}/" src/config.json'
# Tube map expects to have its specified version of Node
- nvm install
- nvm use
- npm ci
- CI=true npm run test
variables:
VG_FULL_TRACEBACK: "1"
GIT_SUBMODULE_STRATEGY: none
# We define one job to do the Docker container build
build-job:
stage: build
before_script:
# Don't bother starting the Docker daemon or installing much
- which docker || (sudo apt-get -q -y update && sudo apt-get -q -y install --no-upgrade docker.io)
# Get buildx
- mkdir -p ~/.docker/cli-plugins/ ; curl -L https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-amd64 > ~/.docker/cli-plugins/docker-buildx ; chmod u+x ~/.docker/cli-plugins/docker-buildx
# Connect to the Kubernetes-based builder "buildkit"
# See vgci/buildkit-deployment.yml
- docker buildx create --use --name=buildkit --platform=linux/amd64,linux/arm64 --node=buildkit-amd64 --driver=kubernetes --driver-opt="nodeselector=kubernetes.io/arch=amd64"
# Report on the builders, and make sure they exist.
- docker buildx inspect --bootstrap || (echo "Docker builder deployment can't be found in our Kubernetes namespace! Are we on the right Gitlab runner?" && exit 1)
# Prune down build cache to make space. This will hang if the builder isn't findable.
- (echo "y" | docker buildx prune --keep-storage 80G) || true
script:
- make include/vg_git_version.hpp
- cat include/vg_git_version.hpp
# Build but don't push, just fill the cache
- docker buildx build --platform=linux/amd64 --build-arg THREADS=8 --target run -f Dockerfile .
# Run the tests
- docker buildx build --platform=linux/amd64 --build-arg THREADS=8 --target test -f Dockerfile .
# Connect so we can upload our images
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
# Keep trying to push until it works or we time out or run out of tries
- COUNT=0
- while ! docker buildx build --platform=linux/amd64 --build-arg THREADS=8 --target run --push -t "quay.io/vgteam/vg:ci-${CI_PIPELINE_IID}-${CI_COMMIT_SHA}" -f Dockerfile . ; do docker logout "${CI_REGISTRY}" ; sleep 30; docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}" ; sleep 30; ((COUNT+=1)); if [[ ${COUNT} == 10 ]] ; then exit 1; fi; done
variables:
GIT_SUBMODULE_STRATEGY: recursive
# The arm container build takes like 90 minutes, so we don't want to run it
# before the main test phase where the other long tests live.
# To ship a final production Docker tag, we need the ARM and x86 builds
# happening in the same command so we can push one multiarch manifest.
production-build-job:
stage: test
only:
- /^arm/
- master
- tags
before_script:
# Don't bother starting the Docker daemon or installing much
- which docker || (sudo apt-get -q -y update && sudo apt-get -q -y install --no-upgrade docker.io)
# Get buildx
- mkdir -p ~/.docker/cli-plugins/ ; curl -L https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-amd64 > ~/.docker/cli-plugins/docker-buildx ; chmod u+x ~/.docker/cli-plugins/docker-buildx
# Connect to the Kubernetes-based builder "buildkit"
# See vgci/buildkit-deployment.yml
- docker buildx create --use --name=buildkit --platform=linux/amd64,linux/arm64 --node=buildkit-amd64 --driver=kubernetes --driver-opt="nodeselector=kubernetes.io/arch=amd64"
# Report on the builders, and make sure they exist.
- docker buildx inspect --bootstrap || (echo "Docker builder deployment can't be found in our Kubernetes namespace! Are we on the right Gitlab runner?" && exit 1)
# Prune down build cache to make space. This will hang if the builder isn't findable.
- docker buildx prune --keep-storage 80G
# Connect so we can upload our images
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
script:
- make include/vg_git_version.hpp
# Determine what we should be tagging vg Dockers as. If we're running on a Git tag we want to use that. Otherwise push over the tag we made already.
- if [[ ! -z "${CI_COMMIT_TAG}" ]]; then VG_DOCKER_TAG="${CI_COMMIT_TAG}" ; else VG_DOCKER_TAG="ci-${CI_PIPELINE_IID}-${CI_COMMIT_SHA}"; fi
# Build the container for all architectures.
- docker buildx build --platform=linux/amd64,linux/arm64 --build-arg THREADS=8 --target run --push -t "quay.io/vgteam/vg:${VG_DOCKER_TAG}" -f Dockerfile .
# Tag it latest if we pushed a real release tag
- if [[ ! -z "${CI_COMMIT_TAG}" ]]; then docker buildx build --platform=linux/amd64,linux/arm64 --build-arg THREADS=8 --target run --push -t "quay.io/vgteam/vg:latest" -f Dockerfile .; fi
# Also run the ARM tests (emulated!)
# But don't fail if they fail yet, because they don't yet actually work.
- docker buildx build --platform=linux/arm64 --build-arg THREADS=8 --target test -f Dockerfile . || true
variables:
GIT_SUBMODULE_STRATEGY: recursive
# We also run the toil-vg/pytest-based tests
# Note that WE ONLY RUN TESTS LISTED IN vgci/test-list.txt
test-job:
stage: test
# Run in parallel, setting CI_NODE_INDEX and CI_NODE_TOTAL
# We will find our share of tests from vgci/test-list.txt and run them
# We ought to run one job per test, but we can wrap around.
parallel: 6
script:
- docker pull "quay.io/vgteam/vg:ci-${CI_PIPELINE_IID}-${CI_COMMIT_SHA}"
- docker tag "quay.io/vgteam/vg:ci-${CI_PIPELINE_IID}-${CI_COMMIT_SHA}" vgci-docker-vg-local
- mkdir -p junit
# Drop secrets before we do any Toil; it might want to log the environment
- export GITLAB_SECRET_FILE_AWS_CREDENTIALS=""
- export GITLAB_SECRET_FILE_DOCS_SSH_KEY=""
- export CI_REGISTRY_PASSWORD=""
- export GH_TOKEN=""
# Make sure IO to Gitlab is in blocking mode so we can't swamp it and crash
- vgci/blockify.py bash vgci/vgci-parallel-wrapper.sh vgci/test-list.txt vgci-docker-vg-local ${CI_NODE_INDEX} ${CI_NODE_TOTAL} ./junit ./test_output
artifacts:
# Let Gitlab see the junit report
reports:
junit: junit/*.xml
paths:
- junit/*.xml
- test_output/*
# Make sure they get artifact'd even if (especially when) the tests fail
when: always
expire_in: 3 days
# We have a final job in the last stage to compose an HTML report
report-job:
stage: report
# Run this even when the tests fail, because otherwise we won't hear about it.
# Hopefully if the actual build failed we fail at the docker pull and we don't upload stuff for no version.
when: always
# All artifacts from previous stages are available
script:
# Get the Docker for version detection
- docker pull "quay.io/vgteam/vg:ci-${CI_PIPELINE_IID}-${CI_COMMIT_SHA}"
- docker tag "quay.io/vgteam/vg:ci-${CI_PIPELINE_IID}-${CI_COMMIT_SHA}" vgci-docker-vg-local
# Collect all the junit files from all the test jobs into one
- junit-merge -o junit.all.xml junit/*.xml
# All the test output folder artifacts should automatically merge.
# Make the report and post it.
# We still need the Docker for version detection.
# Make sure IO to Gitlab is in blocking mode so we can't swamp it and crash
- vgci/blockify.py bash vgci/vgci.sh -J junit.all.xml -T vgci-docker-vg-local -W test_output
# We need a separate job to build the Doxygen docs
docs-job:
stage: build
script:
- doc/publish-docs.sh