diff --git a/docker/packer/template-jammy1.pkr.hcl.earlier b/docker/packer/template-jammy1.pkr.hcl.earlier new file mode 100644 index 0000000..9185be1 --- /dev/null +++ b/docker/packer/template-jammy1.pkr.hcl.earlier @@ -0,0 +1,105 @@ +# +# Packer template for jenkins instances +# The purpose of this step is to prepopulate the jenkins agents with the docker images, so they don't have +# to download multigigabyte images every time they launch. Jenkins will operate without packer, but there is +# a much longer delay while the docker images are pulled. +# +# Instructions: +# +# Whenever the docker images mentioned in this repo are modified and updated, packer should be re-run. +# +# export AWS_ACCESS_KEY_ID=_ +# export AWS_SECRET_ACCESS_KEY=_ +# packer build template-jammy1.pkr.hcl +# +# Use the AWS credentials for the "packer" IAM account, which has permissions in us-west-2, for isolation, and then copies +# the AMI to us-east for usage. A copy of the installed IAM policy can be found in this directory. +# +# : ' +# curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - +# sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" +# sudo apt-get update && sudo apt-get install packer +# ' + +variable "ami_name" { + type = string + default = "my-custom-ami" +} + +locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } + +# source blocks configure your builder plugins; your source is then used inside +# build blocks to create resources. A build block runs provisioners and +# post-processors on an instance created by the source. +source "amazon-ebs" "example" { + # access_key = "${var.aws_access_key}" + ami_name = "jenkins-jammy-ami ${local.timestamp}" + instance_type = "t2.xlarge" + region = "us-west-2" + # region = "eu-west-1" + ami_regions = ["us-east-1"] + # secret_key = "${var.aws_secret_key}" + launch_block_device_mappings { + device_name = "/dev/sda1" + volume_size = 60 + volume_type = "gp2" + delete_on_termination = true + } + # source_ami = "ami-03d5c68bab01f3496" + # either specify an exact ami, or use the ami_filter below. Both methods work. The filter is likely better. + # source_ami = "ami-0ddf424f81ddb0720" + source_ami_filter { + filters = { + virtualization-type = "hvm" + architecture = "x86_64" + name = "ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*" + # block-device-mapping.volume-type = "gp2" + root-device-type = "ebs" + } + most_recent = true + owners = ["099720109477"] + } + ssh_username = "ubuntu" +} + +# a build block invokes sources and runs provisioning steps on them. +build { + sources = ["source.amazon-ebs.example"] + + #"sudo bash -c \"echo deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable > /etc/apt/sources.list.d/docker.list\"", + + provisioner "shell" { + inline = [ + "set -xe", + "PACKERUSERNAME=nodejenkins", + "PACKERUSERID=1001", + "PUBLICKEY=\"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgG1Vr4/8tKjae03NChazvoqoDPghZfXrtchQdqcUhFyxO9r+5kZGG6BMYEfGL37a1slhSkwlIlept2DClf/j8T4KCO8ZR6r7oyPdj4Dx3PwquxALCBEOGR4FgzdzioxF56DwQtBbSX7JSB9caMxh3HQ12EsEecSN+er8m77TzD8977lBu2oI8jQUtYfVVLyfuASD0v799zPl+IpS2/EPDYCbcMPHV3BJvRUuc5nmKgEcdxrTrnQhG13LB98it6jxSUgeVrRwg5LL8GDd0yugPkPS3/DmJ3i9Ugf/Ca9C/1kX+FbXdmyoHxbyKWqvCpK0g4vFnDkgs2QLSgxuI7bbB nodejenkins\"", + "echo \"$PACKERUSERNAME ALL=(ALL) NOPASSWD:ALL\" | sudo tee /etc/sudoers.d/90-$PACKERUSERNAME", + "sudo useradd -s /bin/bash -u $PACKERUSERID $PACKERUSERNAME", + "sudo mkdir -p /home/$PACKERUSERNAME/.ssh", + "echo $PUBLICKEY | sudo tee /home/$PACKERUSERNAME/.ssh/authorized_keys", + "sudo chmod 600 /home/$PACKERUSERNAME/.ssh/authorized_keys", + "sudo chown -R $PACKERUSERNAME:$PACKERUSERNAME /home/$PACKERUSERNAME", + "sudo mkdir -p /jenkins", + "sudo chown $PACKERUSERNAME:$PACKERUSERNAME /jenkins", + "sleep 90", + "sudo apt-get update", + "sleep 15", + "sudo apt-get install -y openjdk-11-jre-headless", + "sudo apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release build-essential python3-pip", + "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg", + "echo \"deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null", + "sudo apt-get update", + "sudo apt-get install -y docker-ce docker-ce-cli docker-ce-rootless-extras", + "sudo systemctl stop unattended-upgrades", + "sudo systemctl disable unattended-upgrades", + "sudo apt-get purge -y unattended-upgrades", + "sudo usermod -a -G docker $PACKERUSERNAME", + "sudo docker pull cppalliance/tracing:nj5", + "sudo docker pull sdarwin/jsonbenchmarks:latest", + "sudo docker pull cppalliance/boost_superproject_build:20.04-v1", + "sudo docker pull cppalliance/boost_superproject_build:22.04-v1", + "pip3 install gcovr" + ] + } +} diff --git a/docker/packer/template-noble.pkr.hcl b/docker/packer/template-noble.pkr.hcl new file mode 100644 index 0000000..453f86d --- /dev/null +++ b/docker/packer/template-noble.pkr.hcl @@ -0,0 +1,128 @@ +# +# Packer template for jenkins instances +# The purpose of this step is to prepopulate the jenkins agents with the docker images, so they don't have +# to download multigigabyte images every time they launch. Jenkins will operate without packer, but there is +# a much longer delay while the docker images are pulled. +# +# Instructions: +# +# Whenever the docker images mentioned in this repo are modified and updated, packer should be re-run. +# +# export AWS_ACCESS_KEY_ID=_ +# export AWS_SECRET_ACCESS_KEY=_ +# packer build template-noble.pkr.hcl +# +# Use the AWS credentials for the "packer" IAM account, which has permissions in us-west-2, for isolation, and then copies +# the AMI to us-east for usage. A copy of the installed IAM policy can be found in this directory. +# +# : ' +# curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - +# sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" +# sudo apt-get update && sudo apt-get install packer +# ' + +variable "ami_name" { + type = string + default = "my-custom-ami" +} + +locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } + +# source blocks configure your builder plugins; your source is then used inside +# build blocks to create resources. A build block runs provisioners and +# post-processors on an instance created by the source. +source "amazon-ebs" "example" { + # access_key = "${var.aws_access_key}" + ami_name = "jenkins-noble-ami ${local.timestamp}" + instance_type = "t2.xlarge" + region = "us-west-2" + # region = "eu-west-1" + ami_regions = ["us-east-1"] + # secret_key = "${var.aws_secret_key}" + launch_block_device_mappings { + device_name = "/dev/sda1" + volume_size = 60 + volume_type = "gp2" + delete_on_termination = true + } + # source_ami = "ami-03d5c68bab01f3496" + # either specify an exact ami, or use the ami_filter below. Both methods work. The filter is likely better. + # source_ami = "ami-0ddf424f81ddb0720" + + + # from terraform + # source_ami_filter { + # filters = { + # name = "*/ubuntu-noble-24.04-amd64-server-*" + # root-device-type = "ebs" + # virtualization-type = "hvm" + # } + # most_recent = true + # owners = ["099720109477"] + # } + + source_ami_filter { + filters = { + virtualization-type = "hvm" + architecture = "x86_64" + name = "*/ubuntu-noble-24.04-amd64-server-*" + # block-device-mapping.volume-type = "gp2" + root-device-type = "ebs" + } + most_recent = true + owners = ["099720109477"] + } + ssh_username = "ubuntu" +} + +# a build block invokes sources and runs provisioning steps on them. +build { + sources = ["source.amazon-ebs.example"] + + #"sudo bash -c \"echo deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable > /etc/apt/sources.list.d/docker.list\"", + + provisioner "shell" { + inline = [ + "set -xe", + "whoami", + "PACKERUSERNAME=nodejenkins", + "PACKERUSERID=1001", + "PUBLICKEY=\"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgG1Vr4/8tKjae03NChazvoqoDPghZfXrtchQdqcUhFyxO9r+5kZGG6BMYEfGL37a1slhSkwlIlept2DClf/j8T4KCO8ZR6r7oyPdj4Dx3PwquxALCBEOGR4FgzdzioxF56DwQtBbSX7JSB9caMxh3HQ12EsEecSN+er8m77TzD8977lBu2oI8jQUtYfVVLyfuASD0v799zPl+IpS2/EPDYCbcMPHV3BJvRUuc5nmKgEcdxrTrnQhG13LB98it6jxSUgeVrRwg5LL8GDd0yugPkPS3/DmJ3i9Ugf/Ca9C/1kX+FbXdmyoHxbyKWqvCpK0g4vFnDkgs2QLSgxuI7bbB nodejenkins\"", + "echo \"$PACKERUSERNAME ALL=(ALL) NOPASSWD:ALL\" | sudo tee /etc/sudoers.d/90-$PACKERUSERNAME", + "sudo useradd -s /bin/bash -u $PACKERUSERID $PACKERUSERNAME", + "sudo mkdir -p /home/$PACKERUSERNAME/.ssh", + "echo $PUBLICKEY | sudo tee /home/$PACKERUSERNAME/.ssh/authorized_keys", + "sudo chmod 600 /home/$PACKERUSERNAME/.ssh/authorized_keys", + "sudo chown -R $PACKERUSERNAME:$PACKERUSERNAME /home/$PACKERUSERNAME", + "sudo mkdir -p /jenkins", + "sudo chown $PACKERUSERNAME:$PACKERUSERNAME /jenkins", + "sleep 90", + "sudo apt-get update", + "sleep 15", + "sudo apt-get install -y openjdk-21-jre-headless", + "sudo apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release build-essential python3-pip python3-venv", + "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg", + "echo \"deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null", + "sudo apt-get update", + "sudo apt-get install -y docker-ce docker-ce-cli docker-ce-rootless-extras", + "# considering moving gcovr to a docker container. Not running anything in the plain VM.", + "# but for the moment, all both methods", + "sudo python3 -m venv /opt/venv", + "sudo chmod -R 777 /opt/venv", + "export PATH=/opt/venv/bin:$PATH", + "pip3 install gcovr", + "sudo systemctl stop unattended-upgrades", + "sudo systemctl disable unattended-upgrades", + "sudo apt-get purge -y unattended-upgrades", + "sudo systemctl disable apt-daily-upgrade.timer", + "sudo systemctl stop apt-daily-upgrade.timer", + "sudo systemctl disable apt-daily.timer", + "sudo systemctl stop apt-daily.timer", + "sudo usermod -a -G docker $PACKERUSERNAME", + "# sudo docker pull cppalliance/tracing:nj5", + "# sudo docker pull sdarwin/jsonbenchmarks:latest", + "sudo docker pull cppalliance/boost_superproject_build:22.04-v1", + "sudo docker pull cppalliance/boost_superproject_build:24.04-v1" + ] + } +} diff --git a/docs/jenkins-details.md b/docs/jenkins-details.md index 78ae3a9..c16b750 100644 --- a/docs/jenkins-details.md +++ b/docs/jenkins-details.md @@ -99,10 +99,54 @@ Dashboard -> Manage Jenkins -> Plugins - AWS Credentials Plugin - Pipeline: GitHub - Remote Jenkinsfile Provider +- Amazon EC2 plugin -Add a credential: github-cppalliance-bot . It's a "username with password". In reality, username with a token. This is a github account. +Credentials: -Another credential: cppalliance-bot-aws-user, AWS credential access to S3. +Add each of these credentials. + +While it may be convenient to have access to the same credentials already in use, and faster to set up, in fact all credentials can be recreated/regenerated/reassigned. The same exact credentials are not needed. Issue new tokens. Create new users. With permissions in the AWS and github accounts. + +github-cppalliance-bot . It's a "username with password". In reality, username with a token. This is a github account , cppalliance-bot + +cppalliance-bot-aws-user, AWS credential access to S3. Permissions to S3. + +jenkinsec2plugin , AKIAQWC... , this is an "aws credential", with key/secret to launch instances in the cloud. While this could be + the same as the previous cppalliance-bot-aws-user, it happens to be a separate user. + +nodejenkins-private-key - an ssh key, where you enter a private key, that will be used to ssh into the auto-scaled cloud nodes that + jenkinsec2plugin is launching. + +Cloud: + +Manage Jenkins->Clouds->New Cloud +name: cloud-jenkinspool1 +ec2 creds: AKIAQWC... (jenkinsec2plugin) +region: us-east-1 +ec2-keypair: nodejenkins-private-key +description: jenkinspool1 +ami: ami-0b1cd4177a6d9ee12 (will vary) +size: t3xlarge +security group names: jenkinsnode +remote filesystem: /jenkins +remote user: nodejenkins +ssh port: 22 +labels: jenkinspool1 +usage: only when labels match +idle termination: 6 +Advanced: +executor: 1 +min: 0 +min: 0 +connection strategy: public dns +host verification: off +max uses: -1 + +See docker/packer folder to run packer and generate the AMI. The AMI id will be entered in the above Cloud configuration. + +The pool is referenced via the label jenkinspool1. + +Not all jobs use the cloud pool. At the moment many doc previews are built locally on the jenkins host itself, using docker but not remote cloud agents. Install Docker. Add Jenkins to docker group. Restart jenkins. @@ -274,7 +318,3 @@ SSH into jenkins.cppalliance.org, go to root/scripts, and run: Certbot + Cloudflare seem to only support 15 domains per cert. Create certbot3.sh, certbot4.sh, etc. as needed. Copies of these scripts are found in the nginx directory here in this repo. - - - - diff --git a/jenkinsfiles/lcov_1 b/jenkinsfiles/lcov_1 new file mode 100644 index 0000000..8ccaeb7 --- /dev/null +++ b/jenkinsfiles/lcov_1 @@ -0,0 +1,306 @@ +pipeline { + + agent { + node { + label 'jenkinspool1' + } + } + + //agent { + // docker { + // image 'cppalliance/boost_superproject_build:24.04-v1' + // // label 'jenkinspool1' + // } + //} + + stages { + stage('Preclean Workspace') { + steps { + sh '''#!/bin/bash + set -xe + rm -rf * .* + + # lcov is using ../boost-root. It could re-used but what if + # multiple jobs are sharing that directory over time. For now, clear it. + rm -rf ../boost-root || true + + # The lcov tests are also using the following directory. Remove that. + TMPREPONAME=$(basename -s .git "$(git config --get remote.origin.url)") + TMPDIRNAME=${TMPREPONAME}-target-branch-iteration + rm -rf ../${TMPDIRNAME} || true + ''' + checkout scm + } + } + + stage('Set Variables') { + steps { + sh '''#!/bin/bash -xe + echo "" > jenkinsjobinfo.sh + chmod 777 jenkinsjobinfo.sh + REPONAME=$(basename -s .git "$(git config --get remote.origin.url)") + # REPONAME=$(basename `git rev-parse --show-toplevel`) + DNSREPONAME=$(echo $REPONAME | tr '_' '-') + ORGANIZATION=$(basename $(dirname "${GIT_URL}")) + echo "export JENKINS_CI_REPO=cppalliance/jenkins-ci" >> jenkinsjobinfo.sh + echo "export JENKINS_CI_REPO_BRANCH=master" >> jenkinsjobinfo.sh + # echo "export JENKINS_CI_REPO=sdarwin/jenkins-ci" >> jenkinsjobinfo.sh + # echo "export JENKINS_CI_REPO_BRANCH=testing" >> jenkinsjobinfo.sh + echo "export PRTEST=prtest" >> jenkinsjobinfo.sh + echo "export REPONAME=${REPONAME}" >> jenkinsjobinfo.sh + echo "export DNSREPONAME=${DNSREPONAME}" >> jenkinsjobinfo.sh + echo "export ORGANIZATION=${ORGANIZATION}" >> jenkinsjobinfo.sh + echo "export ONLY_BUILD_ON_DOCS_MODIFICATION=false" >> jenkinsjobinfo.sh + echo "export PATH_TO_DOCS=libs/${REPONAME}/doc" >> jenkinsjobinfo.sh + ''' + } + } + + stage('Diagnostics') { + steps { + sh '''#!/bin/bash + set -x + # not set -e. errors may occur in diagnostics + cat jenkinsjobinfo.sh + . jenkinsjobinfo.sh + ls -al + cat /etc/os-release + pwd + env + whoami + touch $(date "+%A-%B-%d-%T-%y") + mount | grep ^/dev/ | grep -v /etc | awk '{print \$3}' + git branch + git branch -avv + true + ''' + } + } + + stage('Prebuild script') { + when { + anyOf{ + branch 'develop' + branch 'master' + expression { env.CHANGE_ID != null } + } + } + steps { + sh '''#!/bin/bash + set -xe + . jenkinsjobinfo.sh + curl -f -o jenkins_prebuild_script.sh https://raw.githubusercontent.com/${JENKINS_CI_REPO}/${JENKINS_CI_REPO_BRANCH}/scripts/${ORGANIZATION}_${REPONAME}_lcov_prebuild.sh || true + if [ -f jenkins_prebuild_script.sh ]; then + chmod 755 jenkins_prebuild_script.sh + ./jenkins_prebuild_script.sh + fi + ''' + } + } + + stage('Check if docs were modified') { + when { + anyOf{ + expression { env.CHANGE_ID != null } + } + } + steps { + sh '''#!/bin/bash + set -xe + . jenkinsjobinfo.sh + + if [ "$ONLY_BUILD_ON_DOCS_MODIFICATION" == "true" ]; then + echo "Starting check to see if docs have been updated." + git config remote.origin.fetch '+refs/heads/*:refs/remotes/origin/*' + git fetch origin develop + mergebase=$(git merge-base HEAD remotes/origin/develop) + counter=0 + for i in $(git diff --name-only HEAD $mergebase) + do + echo "file is $i" + if [[ $i =~ ^doc/ ]]; then + counter=$((counter+1)) + fi + done + + if [ "$counter" -eq "0" ]; then + echo "No docs found. Exiting." + # exit 1 + echo "export AUTOCANCEL=true" >> jenkinsjobinfo.sh + else + echo "Found $counter docs. Proceeding." + fi + else + echo "Not checking if docs were updated. Always build." + fi + ''' + } + } + + stage('Determine if the job should exit') { + when { + anyOf{ + expression { env.CHANGE_ID != null } + } + } + environment { + // See https://www.jenkins.io/doc/book/pipeline/jenkinsfile/#using-environment-variables + AUTOCANCEL = """${sh( + returnStdout: true, + script: '#!/bin/bash \n' + 'source jenkinsjobinfo.sh; echo -n "${AUTOCANCEL}"' + )}""" + } + steps { + script { + if (env.AUTOCANCEL == "true") { + currentBuild.result = 'ABORTED' + error("Aborting the build.") + } + } + } + } + + // To skip this step, and actually all steps, adjust the job's Filter regex in the Jenkins UI + // (develop|master|PR-.*) will build all branches + // (PR-.*) will build pull requests. Etc. + stage('Build docs') { + when { + anyOf{ + branch 'develop' + branch 'master' + expression { env.CHANGE_ID != null } + } + } + + steps { + sh '''#!/bin/bash + set -xe + . jenkinsjobinfo.sh + export pythonvirtenvpath=/opt/venvboostdocs + if [ -f ${pythonvirtenvpath}/bin/activate ]; then + source ${pythonvirtenvpath}/bin/activate + fi + + # Is there a custom build script? Often not. But let's check: + + curl -f -o jenkins_build_script.sh https://raw.githubusercontent.com/${JENKINS_CI_REPO}/${JENKINS_CI_REPO_BRANCH}/scripts/${ORGANIZATION}_${REPONAME}_lcov_build.sh || true + if [ -f jenkins_build_script.sh ]; then + chmod 755 jenkins_build_script.sh + ./jenkins_build_script.sh + exit 0 + fi + + # Otherwise, proceed using standard build steps: + + export pythonvirtenvpath=/opt/venv + if [ -f ${pythonvirtenvpath}/bin/activate ]; then + source ${pythonvirtenvpath}/bin/activate + fi + mkdir -p ~/.local/bin + GITHUB_REPO_URL="https://github.com/cppalliance/ci-automation/raw/master" + DIR="scripts" + FILENAME="lcov-jenkins-gcc-13.sh" + URL="${GITHUB_REPO_URL}/$DIR/$FILENAME" + FILE=~/.local/bin/$FILENAME + if [ ! -f "$FILE" ]; then + curl -s -S --retry 10 -L -o $FILE $URL && chmod 755 $FILE + fi + + $FILE + ''' + } + } + + stage('Postbuild script') { + when { + anyOf{ + branch 'develop' + branch 'master' + expression { env.CHANGE_ID != null } + } + } + steps { + sh '''#!/bin/bash + set -xe + + . jenkinsjobinfo.sh + curl -f -o jenkins_postbuild_script.sh https://raw.githubusercontent.com/${JENKINS_CI_REPO}/${JENKINS_CI_REPO_BRANCH}/scripts/${ORGANIZATION}_${REPONAME}_lcov_postbuild.sh || true + if [ -f jenkins_postbuild_script.sh ]; then + chmod 755 jenkins_postbuild_script.sh + ./jenkins_postbuild_script.sh + fi + ''' + } + } + + stage('Pull requests: Upload to S3') { + when { + anyOf{ + expression { env.CHANGE_ID != null } + } + } + + environment { + // See https://www.jenkins.io/doc/book/pipeline/jenkinsfile/#using-environment-variables + REPONAME = """${sh( + returnStdout: true, + script: '#!/bin/bash \n' + 'source jenkinsjobinfo.sh; echo -n "${REPONAME}"' + )}""" + DNSREPONAME = """${sh( + returnStdout: true, + script: '#!/bin/bash \n' + 'source jenkinsjobinfo.sh; echo -n "${DNSREPONAME}"' + )}""" + PRTEST = """${sh( + returnStdout: true, + script: '#!/bin/bash \n' + 'source jenkinsjobinfo.sh; echo -n "${PRTEST}"' + )}""" + PATH_TO_DOCS = """${sh( + returnStdout: true, + script: '#!/bin/bash \n' + 'source jenkinsjobinfo.sh; echo -n "${PATH_TO_DOCS}"' + )}""" + DIFF2HTML = """${sh( + returnStdout: true, + script: '#!/bin/bash \n' + 'source jenkinsjobinfo.sh; echo -n "${DIFF2HTML}"' + )}""" + + } + + steps { + withAWS(region:'us-east-1', credentials: 'cppalliance-bot-aws-user') { + + s3Upload(bucket:"cppalliance-previews", path:"${DNSREPONAME}/${CHANGE_ID}/genhtml/", workingDir: "genhtml" , includePathPattern:"**") + s3Upload(bucket:"cppalliance-previews", path:"${DNSREPONAME}/${CHANGE_ID}/gcovr/", workingDir: "gcovr" , includePathPattern:"**") + + } + script { + + commenttext = "GCOVR code coverage report [https://${env.CHANGE_ID}.${env.DNSREPONAME}.${env.PRTEST}.cppalliance.org/gcovr/index.html](https://${env.CHANGE_ID}.${env.DNSREPONAME}.${env.PRTEST}.cppalliance.org/gcovr/index.html)\nLCOV code coverage report [https://${env.CHANGE_ID}.${env.DNSREPONAME}.${env.PRTEST}.cppalliance.org/genhtml/index.html](https://${env.CHANGE_ID}.${env.DNSREPONAME}.${env.PRTEST}.cppalliance.org/genhtml/index.html)\nCoverage Diff [https://${env.CHANGE_ID}.${env.DNSREPONAME}.${env.PRTEST}.cppalliance.org/gcovr/coverage_diff.txt](https://${env.CHANGE_ID}.${env.DNSREPONAME}.${env.PRTEST}.cppalliance.org/gcovr/coverage_diff.txt)" + + pullRequest.comment(commenttext) + + } + } + } + + stage('Post Diagnostics') { + steps { + sh '''#!/bin/bash + set -x + # not set -e. errors may occur in diagnostics + cat jenkinsjobinfo.sh + . jenkinsjobinfo.sh + ls -al + cat /etc/os-release + pwd + env + whoami + touch $(date "+%A-%B-%d-%T-%y") + mount | grep ^/dev/ | grep -v /etc | awk '{print \$3}' + git branch + git branch -avv + true + ''' + } + } + } +} diff --git a/scripts/boostorg_buffers_prebuild.sh b/scripts/boostorg_buffers_prebuild.sh index 4f66ca7..a431d92 100755 --- a/scripts/boostorg_buffers_prebuild.sh +++ b/scripts/boostorg_buffers_prebuild.sh @@ -1,4 +1,4 @@ #!/bin/bash set -xe -echo "export PRTEST=prtest2" >> jenkinsjobinfo.sh +echo "export PRTEST=prtest" >> jenkinsjobinfo.sh diff --git a/scripts/boostorg_url_lcov_prebuild.sh b/scripts/boostorg_url_lcov_prebuild.sh new file mode 100755 index 0000000..4f66ca7 --- /dev/null +++ b/scripts/boostorg_url_lcov_prebuild.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +set -xe +echo "export PRTEST=prtest2" >> jenkinsjobinfo.sh diff --git a/scripts/cppalliance_buffers_lcov_prebuild.sh b/scripts/cppalliance_buffers_lcov_prebuild.sh new file mode 100755 index 0000000..a431d92 --- /dev/null +++ b/scripts/cppalliance_buffers_lcov_prebuild.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +set -xe +echo "export PRTEST=prtest" >> jenkinsjobinfo.sh diff --git a/scripts/cppalliance_http_proto_lcov_prebuild.sh b/scripts/cppalliance_http_proto_lcov_prebuild.sh new file mode 100755 index 0000000..1446266 --- /dev/null +++ b/scripts/cppalliance_http_proto_lcov_prebuild.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +set -xe +echo "export PRTEST=prtest" >> jenkinsjobinfo.sh +echo "export EXTRA_BOOST_LIBRARIES=cppalliance/buffers" >> jenkinsjobinfo.sh