From b736ea2153f56c33349cacaa827f8c3fcee0458a Mon Sep 17 00:00:00 2001 From: Paul Spooren Date: Tue, 19 Jul 2022 12:22:26 +0200 Subject: [PATCH] add support for OpenMetrics message format This commit adds support for the OpenMetrics message format[1] which is equivalent to the message format of Prometheus. Its advantage is to be easily human readable as well as consumable by time series databases. Lastly it's possible to display results directly within the GitLab CI. It's now possible to pass 'openmetrics' as a message format, an example is shown below: criterion_benchmark_result_ns{id="address_scan",confidence="estimate",input_size="4",aggregation="slope"} 33811.02693018697 criterion_benchmark_result_ns{id="address_scan",confidence="upper_bound",input_size="4",aggregation="slope"} 35170.884432780265 criterion_benchmark_result_ns{id="address_scan",confidence="lower_bound",input_size="4",aggregation="slope"} 32402.287415202973 criterion_benchmark_info{id="address_scan",input_size="4",report_directory="/path/criterion/reports/address_scan/4"} 1 [1]: https://openmetrics.io Signed-off-by: Kevin Becker Signed-off-by: Paul Spooren --- CHANGELOG.md | 4 +- src/config.rs | 6 +- src/message_formats/json.rs | 454 ++++++++++++++--------------- src/message_formats/mod.rs | 101 ++++++- src/message_formats/openmetrics.rs | 107 +++++++ 5 files changed, 414 insertions(+), 258 deletions(-) create mode 100644 src/message_formats/openmetrics.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index dc95f43..e205fad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). # [Unreleased] +### Added +- New message output format for [OpenMetrics](https://openmetrics.io). ## [1.1.0] - 2021-07-28 ### Fixed @@ -66,4 +68,4 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. [1.0.0]: https://github.com/bheisler/cargo-criterion/compare/1.0.0-alpha3...1.0.0 [1.0.1]: https://github.com/bheisler/cargo-criterion/compare/1.0.0-alpha3...1.0.1 [1.0.1]: https://github.com/bheisler/cargo-criterion/compare/1.0.1...1.1.0 -[Unreleased]: https://github.com/bheisler/cargo-criterion/compare/1.1.0...HEAD \ No newline at end of file +[Unreleased]: https://github.com/bheisler/cargo-criterion/compare/1.1.0...HEAD diff --git a/src/config.rs b/src/config.rs index a7b198b..d07da45 100644 --- a/src/config.rs +++ b/src/config.rs @@ -146,11 +146,13 @@ impl PlottingBackend { #[derive(Debug)] pub enum MessageFormat { Json, + OpenMetrics, } impl MessageFormat { fn from_str(s: &str) -> MessageFormat { match s { "json" => MessageFormat::Json, + "openmetrics" => MessageFormat::OpenMetrics, other => panic!("Unknown message format: {}", other), } } @@ -422,10 +424,10 @@ bencher: Emulates the output format of the bencher crate and nightly-only libtes .arg(Arg::with_name("message-format") .long("message-format") .takes_value(true) - .possible_values(&["json"]) + .possible_values(&["json", "openmetrics"]) .help("If set, machine-readable output of the requested format will be printed to stdout.") .long_help( -"Change the machine-readable output format. Possible values are [json]. +"Change the machine-readable output format. Possible values are [json, openmetrics]. Machine-readable information on the benchmarks will be printed in the requested format to stdout. All of cargo-criterion's other output will be printed to stderr. diff --git a/src/message_formats/json.rs b/src/message_formats/json.rs index 8947c53..a5418ee 100644 --- a/src/message_formats/json.rs +++ b/src/message_formats/json.rs @@ -1,243 +1,211 @@ -use crate::connection::Throughput as ThroughputEnum; -use crate::estimate::Estimate; -use crate::model::BenchmarkGroup; -use crate::report::{ - compare_to_threshold, BenchmarkId, ComparisonResult, MeasurementData, Report, ReportContext, -}; -use crate::value_formatter::ValueFormatter; -use anyhow::Result; -use serde_derive::Serialize; -use serde_json::json; -use std::io::{stdout, Write}; - -trait Message: serde::ser::Serialize { - fn reason() -> &'static str; -} - -#[derive(Serialize)] -struct ConfidenceInterval { - estimate: f64, - lower_bound: f64, - upper_bound: f64, - unit: String, -} -impl ConfidenceInterval { - fn from_estimate(estimate: &Estimate, value_formatter: &ValueFormatter) -> ConfidenceInterval { - let mut array = [ - estimate.point_estimate, - estimate.confidence_interval.lower_bound, - estimate.confidence_interval.upper_bound, - ]; - let unit = value_formatter.scale_for_machines(&mut array); - let [estimate, lower_bound, upper_bound] = array; - ConfidenceInterval { - estimate, - lower_bound, - upper_bound, - unit, - } - } - fn from_percent(estimate: &Estimate) -> ConfidenceInterval { - ConfidenceInterval { - estimate: estimate.point_estimate, - lower_bound: estimate.confidence_interval.lower_bound, - upper_bound: estimate.confidence_interval.upper_bound, - unit: "%".to_owned(), - } - } -} - -#[derive(Serialize)] -struct Throughput { - per_iteration: u64, - unit: String, -} -impl From<&ThroughputEnum> for Throughput { - fn from(other: &ThroughputEnum) -> Self { - match other { - ThroughputEnum::Bytes(bytes) => Throughput { - per_iteration: *bytes, - unit: "bytes".to_owned(), - }, - ThroughputEnum::Elements(elements) => Throughput { - per_iteration: *elements, - unit: "elements".to_owned(), - }, - } - } -} - -#[derive(Serialize)] -enum ChangeType { - NoChange, - Improved, - Regressed, -} - -#[derive(Serialize)] -struct ChangeDetails { - mean: ConfidenceInterval, - median: ConfidenceInterval, - - change: ChangeType, -} - -#[derive(Serialize)] -struct BenchmarkComplete { - id: String, - report_directory: String, - iteration_count: Vec, - measured_values: Vec, - unit: String, - - throughput: Vec, - - typical: ConfidenceInterval, - mean: ConfidenceInterval, - median: ConfidenceInterval, - median_abs_dev: ConfidenceInterval, - slope: Option, - - change: Option, -} -impl Message for BenchmarkComplete { - fn reason() -> &'static str { - "benchmark-complete" - } -} - -#[derive(Serialize)] -struct BenchmarkGroupComplete { - group_name: String, - benchmarks: Vec, - report_directory: String, -} -impl Message for BenchmarkGroupComplete { - fn reason() -> &'static str { - "group-complete" - } -} - -pub struct JsonMessageReport; -impl JsonMessageReport { - fn send_message(&self, message: M) { - fn do_send(message: M) -> Result<()> { - // Format the message to string - let message_text = serde_json::to_string(&message)?; - assert!(message_text.starts_with('{')); - - let reason = json!(M::reason()); - - // Concatenate that into the message - writeln!(stdout(), "{{\"reason\":{},{}", reason, &message_text[1..])?; - Ok(()) - } - if let Err(e) = do_send(message) { - error!("Unexpected error writing JSON message: {:?}", e) - } - } -} -impl Report for JsonMessageReport { - fn measurement_complete( - &self, - id: &BenchmarkId, - context: &ReportContext, - measurements: &MeasurementData<'_>, - formatter: &ValueFormatter, - ) { - let mut measured_values = measurements.sample_times().to_vec(); - let unit = formatter.scale_for_machines(&mut measured_values); - - let iteration_count: Vec = measurements - .iter_counts() - .iter() - .map(|count| *count as u64) - .collect(); - - let message = BenchmarkComplete { - id: id.as_title().to_owned(), - report_directory: path!(&context.output_directory, id.as_directory_name()) - .display() - .to_string(), - iteration_count, - measured_values, - unit, - - throughput: measurements - .throughput - .iter() - .map(Throughput::from) - .collect(), - - typical: ConfidenceInterval::from_estimate( - measurements.absolute_estimates.typical(), - formatter, - ), - mean: ConfidenceInterval::from_estimate( - &measurements.absolute_estimates.mean, - formatter, - ), - median: ConfidenceInterval::from_estimate( - &measurements.absolute_estimates.median, - formatter, - ), - median_abs_dev: ConfidenceInterval::from_estimate( - &measurements.absolute_estimates.median_abs_dev, - formatter, - ), - slope: measurements - .absolute_estimates - .slope - .as_ref() - .map(|slope| ConfidenceInterval::from_estimate(slope, formatter)), - change: measurements.comparison.as_ref().map(|comparison| { - let different_mean = comparison.p_value < comparison.significance_threshold; - let mean_est = &comparison.relative_estimates.mean; - - let change = if !different_mean { - ChangeType::NoChange - } else { - let comparison = compare_to_threshold(mean_est, comparison.noise_threshold); - match comparison { - ComparisonResult::Improved => ChangeType::Improved, - ComparisonResult::Regressed => ChangeType::Regressed, - ComparisonResult::NonSignificant => ChangeType::NoChange, - } - }; - - ChangeDetails { - mean: ConfidenceInterval::from_percent(&comparison.relative_estimates.mean), - median: ConfidenceInterval::from_percent(&comparison.relative_estimates.median), - change, - } - }), - }; - - self.send_message(message); - } - - fn summarize( - &self, - context: &ReportContext, - group_id: &str, - benchmark_group: &BenchmarkGroup, - _formatter: &ValueFormatter, - ) { - let message = BenchmarkGroupComplete { - group_name: group_id.to_owned(), - benchmarks: benchmark_group - .benchmarks - .keys() - .map(|id| id.as_title().to_owned()) - .collect(), - report_directory: path!( - &context.output_directory, - BenchmarkId::new(group_id.to_owned(), None, None, None).as_directory_name() - ) - .display() - .to_string(), - }; - - self.send_message(message); - } -} +use crate::connection::Throughput as ThroughputEnum; +use crate::model::BenchmarkGroup; +use crate::report::{ + compare_to_threshold, BenchmarkId, ComparisonResult, MeasurementData, Report, ReportContext, +}; +use crate::value_formatter::ValueFormatter; +use anyhow::Result; +use serde_derive::Serialize; +use serde_json::json; +use std::io::{stdout, Write}; + +use super::ConfidenceInterval; + +trait Message: serde::ser::Serialize { + fn reason() -> &'static str; +} + +#[derive(Serialize)] +struct Throughput { + per_iteration: u64, + unit: String, +} +impl From<&ThroughputEnum> for Throughput { + fn from(other: &ThroughputEnum) -> Self { + match other { + ThroughputEnum::Bytes(bytes) => Throughput { + per_iteration: *bytes, + unit: "bytes".to_owned(), + }, + ThroughputEnum::Elements(elements) => Throughput { + per_iteration: *elements, + unit: "elements".to_owned(), + }, + } + } +} + +#[derive(Serialize)] +enum ChangeType { + NoChange, + Improved, + Regressed, +} + +#[derive(Serialize)] +struct ChangeDetails { + mean: ConfidenceInterval, + median: ConfidenceInterval, + + change: ChangeType, +} + +#[derive(Serialize)] +struct BenchmarkComplete { + id: String, + report_directory: String, + iteration_count: Vec, + measured_values: Vec, + unit: String, + + throughput: Vec, + + typical: ConfidenceInterval, + mean: ConfidenceInterval, + median: ConfidenceInterval, + median_abs_dev: ConfidenceInterval, + slope: Option, + + change: Option, +} +impl Message for BenchmarkComplete { + fn reason() -> &'static str { + "benchmark-complete" + } +} + +#[derive(Serialize)] +struct BenchmarkGroupComplete { + group_name: String, + benchmarks: Vec, + report_directory: String, +} +impl Message for BenchmarkGroupComplete { + fn reason() -> &'static str { + "group-complete" + } +} + +pub struct JsonMessageReport; +impl JsonMessageReport { + fn send_message(&self, message: M) { + fn do_send(message: M) -> Result<()> { + // Format the message to string + let message_text = serde_json::to_string(&message)?; + assert!(message_text.starts_with('{')); + + let reason = json!(M::reason()); + + // Concatenate that into the message + writeln!(stdout(), "{{\"reason\":{},{}", reason, &message_text[1..])?; + Ok(()) + } + if let Err(e) = do_send(message) { + error!("Unexpected error writing JSON message: {:?}", e) + } + } +} +impl Report for JsonMessageReport { + fn measurement_complete( + &self, + id: &BenchmarkId, + context: &ReportContext, + measurements: &MeasurementData<'_>, + formatter: &ValueFormatter, + ) { + let mut measured_values = measurements.sample_times().to_vec(); + let unit = formatter.scale_for_machines(&mut measured_values); + + let iteration_count: Vec = measurements + .iter_counts() + .iter() + .map(|count| *count as u64) + .collect(); + + let message = BenchmarkComplete { + id: id.as_title().to_owned(), + report_directory: path!(&context.output_directory, id.as_directory_name()) + .display() + .to_string(), + iteration_count, + measured_values, + unit, + + throughput: measurements + .throughput + .iter() + .map(Throughput::from) + .collect(), + + typical: ConfidenceInterval::from_estimate( + measurements.absolute_estimates.typical(), + formatter, + ), + mean: ConfidenceInterval::from_estimate( + &measurements.absolute_estimates.mean, + formatter, + ), + median: ConfidenceInterval::from_estimate( + &measurements.absolute_estimates.median, + formatter, + ), + median_abs_dev: ConfidenceInterval::from_estimate( + &measurements.absolute_estimates.median_abs_dev, + formatter, + ), + slope: measurements + .absolute_estimates + .slope + .as_ref() + .map(|slope| ConfidenceInterval::from_estimate(slope, formatter)), + change: measurements.comparison.as_ref().map(|comparison| { + let different_mean = comparison.p_value < comparison.significance_threshold; + let mean_est = &comparison.relative_estimates.mean; + + let change = if !different_mean { + ChangeType::NoChange + } else { + let comparison = compare_to_threshold(mean_est, comparison.noise_threshold); + match comparison { + ComparisonResult::Improved => ChangeType::Improved, + ComparisonResult::Regressed => ChangeType::Regressed, + ComparisonResult::NonSignificant => ChangeType::NoChange, + } + }; + + ChangeDetails { + mean: ConfidenceInterval::from_percent(&comparison.relative_estimates.mean), + median: ConfidenceInterval::from_percent(&comparison.relative_estimates.median), + change, + } + }), + }; + + self.send_message(message); + } + + fn summarize( + &self, + context: &ReportContext, + group_id: &str, + benchmark_group: &BenchmarkGroup, + _formatter: &ValueFormatter, + ) { + let message = BenchmarkGroupComplete { + group_name: group_id.to_owned(), + benchmarks: benchmark_group + .benchmarks + .keys() + .map(|id| id.as_title().to_owned()) + .collect(), + report_directory: path!( + &context.output_directory, + BenchmarkId::new(group_id.to_owned(), None, None, None).as_directory_name() + ) + .display() + .to_string(), + }; + + self.send_message(message); + } +} diff --git a/src/message_formats/mod.rs b/src/message_formats/mod.rs index fc2426a..397d10f 100644 --- a/src/message_formats/mod.rs +++ b/src/message_formats/mod.rs @@ -1,12 +1,89 @@ -mod json; -use crate::config::{MessageFormat, SelfConfig}; - -use self::json::JsonMessageReport; - -pub fn create_machine_report(self_config: &SelfConfig) -> Option { - if let Some(MessageFormat::Json) = self_config.message_format { - Some(JsonMessageReport) - } else { - None - } -} +mod json; +mod openmetrics; + +use crate::config::{MessageFormat, SelfConfig}; +use crate::estimate::Estimate; +use crate::report::Report; +use crate::value_formatter::ValueFormatter; + +use self::json::JsonMessageReport; +use self::openmetrics::OpenMetricsMessageReport; + +#[derive(Serialize)] +struct ConfidenceInterval { + estimate: f64, + lower_bound: f64, + upper_bound: f64, + unit: String, +} +impl ConfidenceInterval { + fn from_estimate(estimate: &Estimate, value_formatter: &ValueFormatter) -> ConfidenceInterval { + let mut array = [ + estimate.point_estimate, + estimate.confidence_interval.lower_bound, + estimate.confidence_interval.upper_bound, + ]; + let unit = value_formatter.scale_for_machines(&mut array); + let [estimate, lower_bound, upper_bound] = array; + ConfidenceInterval { + estimate, + lower_bound, + upper_bound, + unit, + } + } + fn from_percent(estimate: &Estimate) -> ConfidenceInterval { + ConfidenceInterval { + estimate: estimate.point_estimate, + lower_bound: estimate.confidence_interval.lower_bound, + upper_bound: estimate.confidence_interval.upper_bound, + unit: "%".to_owned(), + } + } +} + +pub enum MessageReport { + Json(JsonMessageReport), + OpenMetrics(OpenMetricsMessageReport), +} +impl Report for MessageReport { + fn measurement_complete( + &self, + id: &crate::report::BenchmarkId, + context: &crate::report::ReportContext, + measurements: &crate::report::MeasurementData<'_>, + formatter: &crate::value_formatter::ValueFormatter, + ) { + match self { + Self::Json(report) => report.measurement_complete(id, context, measurements, formatter), + Self::OpenMetrics(report) => { + report.measurement_complete(id, context, measurements, formatter) + } + } + } + + fn summarize( + &self, + context: &crate::report::ReportContext, + group_id: &str, + benchmark_group: &crate::model::BenchmarkGroup, + formatter: &crate::value_formatter::ValueFormatter, + ) { + match self { + Self::Json(report) => report.summarize(context, group_id, benchmark_group, formatter), + Self::OpenMetrics(report) => { + report.summarize(context, group_id, benchmark_group, formatter) + } + } + } +} + +pub fn create_machine_report(self_config: &SelfConfig) -> Option { + match self_config.message_format { + Some(MessageFormat::Json) => Some(MessageReport::Json(JsonMessageReport)), + Some(MessageFormat::OpenMetrics) => { + Some(MessageReport::OpenMetrics(OpenMetricsMessageReport)) + } + None => None, + } +} diff --git a/src/message_formats/openmetrics.rs b/src/message_formats/openmetrics.rs new file mode 100644 index 0000000..8bd6fa7 --- /dev/null +++ b/src/message_formats/openmetrics.rs @@ -0,0 +1,107 @@ +use crate::report::{BenchmarkId, MeasurementData, Report, ReportContext}; +use crate::value_formatter::ValueFormatter; + +use super::ConfidenceInterval; + +pub struct OpenMetricsMessageReport; + +impl OpenMetricsMessageReport { + fn print_confidence_interval(id: &BenchmarkId, metric: &ConfidenceInterval, name: &str) { + let mut labels = vec![]; + + if let Some(func) = &id.function_id { + labels.push(("function", func.clone())); + } + + if let Some(value) = &id.value_str { + labels.push(("input_size", value.clone())); + } + + labels.push(("aggregation", name.to_owned())); + + let labels = labels + .into_iter() + .map(|(key, value)| format!("{}=\"{}\"", key, value)) + .collect::>() + .join(","); + + println!( + "criterion_benchmark_result_{}{{id=\"{}\",confidence=\"estimate\",{}}} {}", + metric.unit, id.group_id, labels, metric.estimate + ); + println!( + "criterion_benchmark_result_{}{{id=\"{}\",confidence=\"upper_bound\",{}}} {}", + metric.unit, id.group_id, labels, metric.upper_bound + ); + println!( + "criterion_benchmark_result_{}{{id=\"{}\",confidence=\"lower_bound\",{}}} {}", + metric.unit, id.group_id, labels, metric.lower_bound + ); + } +} + +impl Report for OpenMetricsMessageReport { + fn measurement_complete( + &self, + id: &BenchmarkId, + context: &ReportContext, + measurements: &MeasurementData<'_>, + formatter: &ValueFormatter, + ) { + Self::print_confidence_interval( + id, + &ConfidenceInterval::from_estimate( + measurements.absolute_estimates.typical(), + formatter, + ), + "typical", + ); + Self::print_confidence_interval( + id, + &ConfidenceInterval::from_estimate(&measurements.absolute_estimates.mean, formatter), + "mean", + ); + Self::print_confidence_interval( + id, + &ConfidenceInterval::from_estimate(&measurements.absolute_estimates.median, formatter), + "median", + ); + Self::print_confidence_interval( + id, + &ConfidenceInterval::from_estimate( + &measurements.absolute_estimates.median_abs_dev, + formatter, + ), + "median_abs_dev", + ); + + if let Some(slope) = measurements + .absolute_estimates + .slope + .as_ref() + .map(|slope| ConfidenceInterval::from_estimate(slope, formatter)) + { + Self::print_confidence_interval(id, &slope, "slope"); + } + + let input_size = if let Some(input_size) = &id.value_str { + format!("input_size=\"{}\",", input_size) + } else { + "".into() + }; + + let function = if let Some(function) = &id.function_id { + format!("function=\"{}\",", function) + } else { + "".into() + }; + + println!( + "criterion_benchmark_info{{id=\"{}\",{}{}report_directory=\"{}\"}} 1", + id.group_id, + input_size, + function, + path!(&context.output_directory, id.as_directory_name()).display() + ); + } +}