Skip to content

Commit

Permalink
Merge pull request #121 from RobertKrawitz/kata-perf-ci-force-timeout
Browse files Browse the repository at this point in the history
Restore --force-cleanup-i-know-this-is-dangerous
  • Loading branch information
RobertKrawitz authored Feb 27, 2023
2 parents 0baec70 + 4b93a1a commit 0c6da1e
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 8 deletions.
9 changes: 6 additions & 3 deletions CI/run-kata-perf-suite
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ declare -i use_python_venv=1
declare python_venv=
declare analyze_results=
declare -i debugonly=0
declare force_cleanup_timeout=
declare -i take_prometheus_snapshot=0
declare unique_job_prefix=
declare snapshot_date_format='%Y_%m_%dT%H_%M_%S%z'
Expand Down Expand Up @@ -100,7 +101,7 @@ function report_results() {
function retrieve_prometheus_timestamp() {
"${OC}" exec -n openshift-monitoring prometheus-k8s-0 -- /bin/sh -c "date -u '+$snapshot_date_format'"
}

function start_prometheus_snapshot() {
echo "Starting Prometheus snapshot" 1>&2
"${OC}" delete pod -n openshift-monitoring prometheus-k8s-0
Expand Down Expand Up @@ -418,7 +419,7 @@ function monitor() {
function help() {
${PAGER:-less} <<EOF
Usage: $0 [options | clusterbuster_options] [workloads]
Here is a brief description of all available workloads. If not provided,
Here is a brief description of all available workloads. If not provided,
all workloads are run:
$(_document_workloads)
Expand All @@ -439,7 +440,7 @@ $(_document_workloads)
--sync-pin-node=node Pin the sync pod to the specified node.
By default, the third worker node is used.
--pin-node[=class]=node
Pin pods of the specified class to the
Pin pods of the specified class to the
specified node. Class is optional; if
specified, it should be either client,
server, or pin.
Expand Down Expand Up @@ -547,6 +548,7 @@ function process_option() {
forcepull*) force_pull_image="$(bool "$optvalue")" ;;
usepythonvenv*) use_python_venv="$(bool "$optvalue")" ;;
uuid) uuid=$optvalue ;;
forcecleanupiknowthisisdangerous) force_cleanup_timeout=$optvalue ;;
prometheussnapshot) take_prometheus_snapshot=$(bool "$optvalue") ;;
uniqueprefix) unique_job_prefix=$(bool -Y "$optvalue") ;;
jobdelay) job_delay=$optvalue ;;
Expand Down Expand Up @@ -715,6 +717,7 @@ function run_clusterbuster_1() {
${jobname:+"--jobname=$jobname"} \
${tmp_jobdir:+"--artifactdir=$tmp_jobdir"} \
${runtimeclass:+"--runtimeclass=$runtimeclass"} \
${force_cleanup_timeout:+"--force-cleanup-i-know-this-is-dangerous=$force_cleanup_timeout"} \
${unique_job_prefix:+"--pod-prefix=$job_prefix"} \
"${extra_clusterbuster_args[@]}" \
"$@" 2>&1 || status=$?
Expand Down
10 changes: 5 additions & 5 deletions clusterbuster
Original file line number Diff line number Diff line change
Expand Up @@ -381,16 +381,16 @@ $(print_workloads_supporting_reporting ' - ')
Do not attempt anything that would use metrics
or the prometheus pod.
--failure-status=<status>
Failures should be reported as specified rather
Failures should be reported as specified rather
than "Fail"
--pod-start-timeout=<seconds>
Wait specified time for pods to come on line.
Default $pod_start_timeout
--retrieve-successful-logs=<0|1>
If retrieving artifacts, retrieve logs for all
pods, not just failing pods. Default $retrieve_successful_logs.
If retrieving artifacts, retrieve logs for all
pods, not just failing pods. Default $retrieve_successful_logs.
--parallel-logs=n
If retrieving artifacts, parallelize log retrieval.
If retrieving artifacts, parallelize log retrieval.
Workload sizing options:
--containers_per_pod=N
Expand Down Expand Up @@ -495,7 +495,7 @@ $(print_workloads_supporting_reporting ' - ')
Arrange for the liveness probe to sleep for specified
time.
--privileged-pods=[0,1]
Create pods as privileged (default $create_pods_privileged)
Create pods as privileged (default $create_pods_privileged)
Kata Virtualization Tuning:
--virtiofsd-writeback=[0,1]
Expand Down

0 comments on commit 0c6da1e

Please sign in to comment.