diff --git a/.github/workflows/site_deploy.yml b/.github/workflows/site_deploy.yml index aa4008e455..cab7c38de7 100644 --- a/.github/workflows/site_deploy.yml +++ b/.github/workflows/site_deploy.yml @@ -9,13 +9,13 @@ on: vcell_version: description: 'version.major.minor' required: true - default: '7.5.0' + default: '7.6.0' vcell_build: description: 'build number' required: true - default: '148' + default: '18' vcell_site: - description: 'rel or alpha or test (note rel is Swarm, alpha and test are Kubernetes)' + description: 'rel or alpha or test' required: true default: 'alpha' server_only: @@ -208,7 +208,6 @@ jobs: set -ux mvn clean install -DskipTests - name: deploy installers and singularity to kubernetes site and web help to vcell.org - if: ${{ github.event.inputs.vcell_site != 'rel' }} run: | set -ux cd docker/swarm @@ -251,7 +250,6 @@ jobs: echo "KUSTOMIZE_OVERLAY=${KUSTOMIZE_OVERLAY}" >> $GITHUB_ENV echo "CONTAINER_IMAGE_TAG=${CONTAINER_IMAGE_TAG}" >> $GITHUB_ENV - name: Call webhook to deploy to kubernetes cluster (overlay 'prod', 'dev' or 'stage') - if: ${{ github.event.inputs.vcell_site != 'rel' }} run: | git_sha=$(git rev-parse --short "$GITHUB_SHA") echo '{"ref": "main","inputs":{"overlay": "'${KUSTOMIZE_OVERLAY}'","tag":"'${CONTAINER_IMAGE_TAG}'","swversion": "'${VCELL_SWVERSION}'"}}' >body @@ -259,41 +257,6 @@ jobs: -H 'Authorization: Bearer ${{ secrets.ACTION_TOKEN }}' \ -H 'Content-Type: application/json' \ --data "@body" - - name: deploy to swarm site - if: ${{ github.event.inputs.vcell_site == 'rel' }} - run: | - set -ux - cd docker/swarm - ssh -t ${{ secrets.CD_FULL_USER }}@${VCELL_MANAGER_NODE} sudo docker login -u ${{ secrets.ACTION_USER }} -p ${{ secrets.ACTION_TOKEN }} ghcr.io - if ${{ github.event.inputs.server_only != 'true' }}; then - # build and install the client installers, singularity images, and docker swarm configuration - ./deploy-action-swarm.sh \ - --ssh-user ${{ secrets.CD_FULL_USER }} \ - --install-singularity \ - --build-installers \ - --installer-deploy-dir $VCELL_INSTALLER_REMOTE_DIR \ - ${VCELL_MANAGER_NODE} \ - ./${VCELL_CONFIG_FILE_NAME} \ - ${VCELL_DEPLOY_REMOTE_DIR}/config/${VCELL_CONFIG_FILE_NAME} \ - ./docker-compose.yml \ - ${VCELL_DEPLOY_REMOTE_DIR}/config/docker-compose_${VCELL_TAG}.yml \ - vcell${VCELL_SITE} - export VCELL_SITE_CAMEL=`cat $VCELL_CONFIG_FILE_NAME | grep VCELL_SITE_CAMEL | cut -d"=" -f2` - ssh ${{ secrets.CD_FULL_USER }}@${VCELL_MANAGER_NODE} \ - installer_deploy_dir=$VCELL_INSTALLER_REMOTE_DIR vcell_siteCamel=$VCELL_SITE_CAMEL vcell_version=$VCELL_VERSION vcell_build=$VCELL_BUILD \ - 'bash -s' < link-installers.sh - else - # build and install only the singularity images and docker swarm configuration - ./deploy-action-swarm.sh \ - --ssh-user ${{ secrets.CD_FULL_USER }} \ - --install-singularity \ - ${VCELL_MANAGER_NODE} \ - ./${VCELL_CONFIG_FILE_NAME} \ - ${VCELL_DEPLOY_REMOTE_DIR}/config/${VCELL_CONFIG_FILE_NAME} \ - ./docker-compose.yml \ - ${VCELL_DEPLOY_REMOTE_DIR}/config/docker-compose_${VCELL_TAG}.yml \ - vcell${VCELL_SITE} - fi - name: Setup tmate session 3 uses: mxschmitt/action-tmate@v3 if: ${{ failure() }} diff --git a/docker/swarm/deploy-action-swarm.sh b/docker/swarm/deploy-action-swarm.sh deleted file mode 100755 index 9764af92b9..0000000000 --- a/docker/swarm/deploy-action-swarm.sh +++ /dev/null @@ -1,237 +0,0 @@ -#!/usr/bin/env bash - -set -ux - -show_help() { - echo "Deploys or updates a deployment of VCell on remote Docker swarm cluster" - echo "" - echo "usage: deploy-action-swarm.sh [OPTIONS] REQUIRED-ARGUMENTS" - echo "" - echo " REQUIRED-ARGUMENTS" - echo " manager-node swarm node with manager role ( vcellapi.cam.uchc.edu or vcellapi-beta.cam.uchc.edu )" - echo "" - echo " local-config-file local config file for deployment, copied to manager-node including:" - echo " VCELL_REPO_NAMESPACE=(repo/namespace | namespace)" - echo " (e.g. schaff or vcell-docker.cam.uchc.edu:5000/schaff )" - echo " (must be reachable from swarm nodes and include namespace)" - echo " VCELL_TAG=tag (e.g. dev | 7.0.0-alpha.4 | f98dfe3)" - echo "" - echo " remote-config-file absolute path of target config file on remote manager-node" - echo " WARNING: will overwrite remote file" - echo "" - echo " local-compose-file local docker-compose.yml file for deployment, copied to manager-node" - echo "" - echo " remote-compose-file absolute path of target docker-compose.yml file on remote manager-node" - echo " WARNING: will overwrite remote file" - echo "" - echo " stack-name name of Docker swarm stack" - echo "" - echo " [OPTIONS]" - echo " -h | --help show this message" - echo "" - echo " --ssh-user user user for ssh to node [defaults to current user id using whoami]" - echo " (user must have passwordless sudo for docker commands on manager-node)" - echo "" - echo " --ssh-key keyfile ssh key for passwordless ssh to node" - echo "" - echo " --build-installers optionally build client installers and place in ./generated_installers dir" - echo "" - echo " --installer-deploy-dir /path/to/installer/dir" - echo " directory for installers accessible to users" - echo " typically a web-accessible location to download the client installers for each platform" - echo "" - echo " --install-singularity optionally install batch and opt singularity images on each compute node in 'vcell' SLURM partition" - echo "" - echo "" - echo "example:" - echo "" - echo "deploy-action-swarm.sh \\" - echo " --ssh-user vcell \\" - echo " --ssh-key ~/.ssh/schaff_rsa \\" - echo " --install_singularity \\" - echo " --build_installers --installer_deploy_dir /share/apps/vcell3/apache_webroot/htdocs/webstart/Rel \\" - echo " vcellapi.cam.uchc.edu \\" - echo " ./server.config \\" - echo " /usr/local/deploy/Test/server_01.config \\" - echo " ./docker-compose.yml \\" - echo " /usr/local/deploy/Test/docker-compose_01.yml \\" - echo " vcellrel" - exit 1 -} - -if [[ $# -lt 6 ]]; then - show_help -fi - -ssh_user=$(whoami) -ssh_key= -installer_deploy_dir= -build_installers=false -#link_installers=false -install_singularity=false -while :; do - case $1 in - -h|--help) - show_help - exit - ;; - --ssh-user) - shift - ssh_user=$1 - ;; - --ssh-key) - shift - ssh_key="-i $1" - ;; - --installer-deploy-dir) - shift - installer_deploy_dir=$1 - ;; - --install-singularity) - install_singularity=true - ;; - --build-installers) - build_installers=true - ;; - -?*) - printf 'ERROR: Unknown option: %s\n' "$1" >&2 - echo "" - show_help - ;; - *) # Default case: No more options, so break out of the loop. - break - esac - shift -done - -if [[ $# -ne 6 ]] ; then - show_help -fi - -manager_node=$1 -local_config_file=$2 -remote_config_file=$3 -local_compose_file=$4 -remote_compose_file=$5 -stack_name=$6 - -# get settings from config file -vcell_siteCamel=$(grep VCELL_SITE_CAMEL "$local_config_file" | cut -d"=" -f2) -vcell_version=$(grep VCELL_VERSION_NUMBER "$local_config_file" | cut -d"=" -f2) -vcell_build=$(grep VCELL_BUILD_NUMBER "$local_config_file" | cut -d"=" -f2) -batch_singularity_filename=$(grep VCELL_BATCH_SINGULARITY_FILENAME "$local_config_file" | cut -d"=" -f2) -batch_singularity_image_external=$(grep VCELL_BATCH_SINGULARITY_IMAGE_EXTERNAL "$local_config_file" | cut -d"=" -f2) -opt_singularity_filename=$(grep VCELL_OPT_SINGULARITY_FILENAME "$local_config_file" | cut -d"=" -f2) -opt_singularity_image_external=$(grep VCELL_OPT_SINGULARITY_IMAGE_EXTERNAL "$local_config_file" | cut -d"=" -f2) -#partitionName=$(grep VCELL_SLURM_PARTITION "$local_config_file" | cut -d"=" -f2) -batchHost=$(grep VCELL_BATCH_HOST "$local_config_file" | cut -d"=" -f2) -slurm_singularity_central_dir=$(grep VCELL_SLURM_CENTRAL_SINGULARITY_DIR "$local_config_file" | cut -d"=" -f2) - - -echo "" -echo "coping $local_config_file to $manager_node:$remote_config_file as user $ssh_user" -cmd="scp $ssh_key $local_config_file $ssh_user@$manager_node:$remote_config_file" -($cmd) || (echo "failed to upload config file" && exit 1) - -echo "" -echo "coping $local_compose_file to $manager_node:$remote_compose_file as user $ssh_user" -cmd="scp $ssh_key $local_compose_file $ssh_user@$manager_node:$remote_compose_file" -($cmd) || (echo "failed to upload docker-compose file" && exit 1) - - -# -# install the singularity images on the cluster nodes -# -if [ "$install_singularity" == "true" ]; then - - echo "" - pushd ../build/singularity-vm || (echo "pushd ../build/singularity-vm failed"; exit 1) - echo "" - echo "CURRENT DIRECTORY IS $PWD" - - # - # get configuration from config file and load into current bash environment - # - echo "" - - if [ ! -e "./${batch_singularity_filename}" ]; then - echo "failed to find local batch singularity image file $batch_singularity_filename in ./singularity-vm directory" - exit 1 - fi - - if ! scp "./${batch_singularity_filename}" "$ssh_user@$manager_node:${slurm_singularity_central_dir}"; then - echo "failed to copy batch singularity image to server" - exit 1 - fi - - if [ ! -e "./${opt_singularity_filename}" ]; then - echo "failed to find local opt singularity image file $opt_singularity_filename in ./singularity-vm directory" - exit 1 - fi - - if ! scp "./${opt_singularity_filename}" "$ssh_user@$manager_node:${slurm_singularity_central_dir}"; then - echo "failed to copy opt singularity image to server" - exit 1 - fi - - echo "popd" - popd || (echo "popd failed"; exit 1) -fi - - -# -# deploy the stack on remote cluster -# -echo "" -echo "deploying stack $stack_name to $manager_node using config in $manager_node:$remote_config_file" -localmachine=$(hostname) -if [ "$localmachine" == "$manager_node" ]; then - if ! env $(xargs < "$remote_config_file") docker stack deploy -c "$remote_compose_file" "$stack_name"; - then echo "failed to deploy stack" && exit 1; fi -else - cmd="ssh $ssh_key -t $ssh_user@$manager_node sudo env \$(cat $remote_config_file | xargs) docker stack deploy -c $remote_compose_file $stack_name --with-registry-auth" - ($cmd) || (echo "failed to deploy stack" && exit 1) -fi - -# -# if --build-installers, then generate client installers, placing then in ./generated_installers -# if --installer-deploy-dir, then also copy installers to $installer_deploy_dir -# *** unimplemented *** (if --link-installers, then also link installers to version independent installer names for each platform) -# -if [ "$build_installers" == "true" ]; then - # - # if --installer-deploy-dir, then copy the installers from ./generated_installers directory to the installer deploy directory - # - if [ ! -z "$installer_deploy_dir" ]; then - # vcell_siteCamel=Alpha - # vcell_version=7.0.0 - # vcell_build=19 - # version=7_0_0_19 - version=$(echo "${vcell_version}_${vcell_build}" | tr '.' _) - if ! scp "./generated_installers/VCell_${vcell_siteCamel}_windows-x64_${version}_64bit.exe" \ - "./generated_installers/VCell_${vcell_siteCamel}_unix_${version}_32bit.sh" \ - "./generated_installers/VCell_${vcell_siteCamel}_macos_${version}_64bit.dmg" \ - "./generated_installers/VCell_${vcell_siteCamel}_windows-x32_${version}_32bit.exe" \ - "./generated_installers/VCell_${vcell_siteCamel}_unix_${version}_64bit.sh" \ - "./generated_installers/updates.xml" \ - "./generated_installers/updates_linux32.xml" \ - "./generated_installers/updates_linux64.xml" \ - "./generated_installers/updates_mac64.xml" \ - "./generated_installers/updates_win32.xml" \ - "./generated_installers/updates_win64.xml" \ - "./generated_installers/output.txt" \ - "./generated_installers/md5sums" \ - "./generated_installers/sha256sums" \ - "$ssh_user@$manager_node:${installer_deploy_dir}"; - then - echo "failed to copy installers"; - exit 1; - fi - - fi -fi - - -echo "exited normally" - -exit 0 diff --git a/docker/swarm/serverconfig-uch.sh b/docker/swarm/serverconfig-uch.sh index ea8139ff82..f15e7f4612 100755 --- a/docker/swarm/serverconfig-uch.sh +++ b/docker/swarm/serverconfig-uch.sh @@ -38,11 +38,11 @@ vcell_slurm_qos_pu=vcellpu case $VCELL_SITE in REL) _site_port_offset=0 - VCELL_API_HOST_EXTERNAL=vcellapi.cam.uchc.edu - VCELL_S3_EXPORT_BASEURL=https://vcellapi.cam.uchc.edu + VCELL_API_HOST_EXTERNAL=vcell.cam.uchc.edu + VCELL_S3_EXPORT_BASEURL=https://vcell.cam.uchc.edu VCELL_API_PORT_EXTERNAL=443 - VCELL_API_PREFIX_V0="" - VCELL_API_PREFIX_V1="" + VCELL_API_PREFIX_V0="/api/v0" + VCELL_API_PREFIX_V1="/api/v1" _applicationId="1471-8022-1038-5553" ;; ALPHA) @@ -115,8 +115,8 @@ VCELL_SSH_CMD_RESTORE_TIMEOUT=5 # # write out the environment file to be for: -# 1. deployment actions (e.g. deploy-action-swarm.sh or deploy-action-kubernetes.sh) -# 2. runtime environment for the docker stack run command (Docker swarm mode only, kubernetes uses ConfigMaps in vcell-fluxcd repo) +# 1. deployment actions (e.g. deploy-action-kubernetes.sh) +# 2. runtime environment for the docker stack run command (kubernetes uses ConfigMaps in vcell-fluxcd repo), what about VCell Installers? # cat <"$_outputfile" VCELL_API_HOST_EXTERNAL=$VCELL_API_HOST_EXTERNAL