From 55617fa7831204620b7ae3c49ba34488c9d5153f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Mon, 21 Oct 2024 13:05:12 +0200 Subject: [PATCH 01/42] Work in progress: new end-of-test summary Co-authored-by: oleiade --- js/runner.go | 172 ++++++++- js/summary-wrapper.js | 5 +- js/summary.js | 787 +++++++++++++++++++++++------------------- 3 files changed, 609 insertions(+), 355 deletions(-) diff --git a/js/runner.go b/js/runner.go index 2a21eab6ef6..e2f4fd2039a 100644 --- a/js/runner.go +++ b/js/runner.go @@ -348,8 +348,175 @@ func (r *Runner) IsExecutable(name string) bool { return exists } +type MetricData struct { + Type string + Contains string + Values map[string]float64 +} + +func NewMetricsDataFrom(summary *lib.Summary, m *metrics.Metric, summaryTrendStats []string) MetricData { + // TODO: we obtain this from [options.SummaryTrendStats] which is a string slice + getMetricValues := metricValueGetter(summaryTrendStats) + + return MetricData{ + Type: m.Type.String(), + Contains: m.Contains.String(), + Values: getMetricValues(m.Sink, summary.TestRunDuration), + } +} + +type ReportMetrics struct { + // HTTP contains report data specific to HTTP metrics and is used + // to produce the summary HTTP subsection's content. + HTTP map[string]MetricData + // Execution contains report data specific to Execution metrics and is used + // to produce the summary Execution subsection's content. + Execution map[string]MetricData + // Network contains report data specific to Network metrics and is used + // to produce the summary Network subsection's content. + Network map[string]MetricData + + Browser map[string]MetricData + + // FIXME: WebVitals are a browser metrics too, and we might want to move them in a browser subsection + // of the report + WebVitals map[string]MetricData + + // Miscellaneous contains user-defined metric results as well as extensions metrics + Miscellaneous map[string]MetricData +} + +type ReportChecksMetrics struct { + Total MetricData `js:"checks_total"` + Success MetricData `js:"checks_succeeded"` + Fail MetricData `js:"checks_failed"` +} + +type ReportChecks struct { + Metrics ReportChecksMetrics + OrderedChecks []*lib.Check +} + +type Report struct { + Metrics ReportMetrics + Checks ReportChecks + Groups []Report + Scenarios []Report +} + +func NewReport() Report { + initMetricData := func(t metrics.MetricType) MetricData { + return MetricData{ + Type: t.String(), + Contains: metrics.Default.String(), + Values: make(map[string]float64), + } + } + + return Report{ + Metrics: ReportMetrics{ + HTTP: make(map[string]MetricData), + Execution: make(map[string]MetricData), + Network: make(map[string]MetricData), + Browser: make(map[string]MetricData), + WebVitals: make(map[string]MetricData), + Miscellaneous: make(map[string]MetricData), + }, + Checks: ReportChecks{ + Metrics: ReportChecksMetrics{ + Total: initMetricData(metrics.Counter), + Success: initMetricData(metrics.Rate), + Fail: initMetricData(metrics.Rate), + }, + }, + } +} + +func isHTTPMetric(m *metrics.Metric) bool { + return oneOfMetrics(m, + metrics.HTTPReqsName, + metrics.HTTPReqFailedName, + metrics.HTTPReqDurationName, + metrics.HTTPReqBlockedName, + metrics.HTTPReqConnectingName, + metrics.HTTPReqTLSHandshakingName, + metrics.HTTPReqSendingName, + metrics.HTTPReqWaitingName, + metrics.HTTPReqReceivingName, + ) +} + +func isExecutionMetric(m *metrics.Metric) bool { + return oneOfMetrics(m, metrics.VUsName, + metrics.VUsMaxName, + metrics.IterationsName, + metrics.IterationDurationName, + metrics.DroppedIterationsName, + ) +} + +func isNetworkMetric(m *metrics.Metric) bool { + return oneOfMetrics(m, metrics.DataSentName, metrics.DataReceivedName) +} + +func isBrowserMetric(m *metrics.Metric) bool { + return strings.HasPrefix(m.Name, "browser_") && !isWebVitalsMetric(m) +} + +func isWebVitalsMetric(m *metrics.Metric) bool { + return strings.HasPrefix(m.Name, "browser_web_vital_") +} + +func oneOfMetrics(m *metrics.Metric, values ...string) bool { + for _, v := range values { + if m.Name == v { + return true + } + } + return false +} + +// TODO: it would be nicer to only receive summary-specific options here, as opposed to the whole [lib.Options] struct +func NewReportFrom(summary *lib.Summary, options lib.Options) Report { + report := NewReport() + + for _, m := range summary.Metrics { + switch { + case isHTTPMetric(m): + report.Metrics.HTTP[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) + case isExecutionMetric(m): + report.Metrics.Execution[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) + case isNetworkMetric(m): + report.Metrics.Network[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) + case isBrowserMetric(m): + report.Metrics.Browser[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) + case isWebVitalsMetric(m): + report.Metrics.WebVitals[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) + default: + report.Metrics.Miscellaneous[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) + } + } + + totalChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Total) + successChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Trues) + + report.Checks.Metrics.Total.Values["count"] = totalChecks // Counter metric with total checks + report.Checks.Metrics.Total.Values["rate"] = 0.0 // TODO: Calculate based on summary.TotalTestDuration + report.Checks.Metrics.Success = NewMetricsDataFrom(summary, summary.Metrics[metrics.ChecksName], options.SummaryTrendStats) // Rate metric with successes (equivalent to the 'checks' metric) + report.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks + report.Checks.Metrics.Fail.Values["fails"] = successChecks + report.Checks.Metrics.Fail.Values["rate"] = (totalChecks - successChecks) / totalChecks + + report.Checks.OrderedChecks = summary.RootGroup.OrderedChecks + + return report +} + // HandleSummary calls the specified summary callback, if supplied. func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[string]io.Reader, error) { + reportData := NewReportFrom(summary, r.Bundle.Options) + fmt.Println(reportData) + summaryDataForJS := summarizeMetricsToObject(summary, r.Bundle.Options, r.setupData) out := make(chan metrics.SampleContainer, 100) @@ -360,7 +527,7 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[s } }() - summaryCtx, cancel := context.WithTimeout(ctx, r.getTimeoutFor(consts.HandleSummaryFn)) + summaryCtx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() vu, err := r.newVU(summaryCtx, 0, 0, out) @@ -375,7 +542,7 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[s vu.moduleVUImpl.ctx = summaryCtx callbackResult := sobek.Undefined() - fn := vu.getExported(consts.HandleSummaryFn) + fn := vu.getExported(consts.HandleSummaryFn) // TODO: rename to UserDefinedHandleSummaryFn? if fn != nil { handleSummaryFn, ok := sobek.AssertFunction(fn) if !ok { @@ -403,6 +570,7 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[s callbackResult, vu.Runtime.ToValue(r.Bundle.preInitState.RuntimeOptions.SummaryExport.String), vu.Runtime.ToValue(summaryDataForJS), + vu.Runtime.ToValue(reportData), } rawResult, _, _, err := vu.runFn(summaryCtx, false, handleSummaryWrapper, nil, wrapperArgs...) diff --git a/js/summary-wrapper.js b/js/summary-wrapper.js index 2ec8f65a5a5..7f876bf6c94 100644 --- a/js/summary-wrapper.js +++ b/js/summary-wrapper.js @@ -1,4 +1,5 @@ (function () { + // TODO: Find a better name, more descriptive more this variable. var jslib = {}; (function (module, exports) { /*JSLIB_SUMMARY_CODE*/; @@ -59,12 +60,12 @@ return JSON.stringify(results, null, 4); }; - return function (summaryCallbackResult, jsonSummaryPath, data) { + return function (summaryCallbackResult, jsonSummaryPath, data, report) { var result = summaryCallbackResult; if (!result) { var enableColors = (!data.options.noColor && data.state.isStdOutTTY); result = { - 'stdout': '\n' + jslib.textSummary(data, {indent: ' ', enableColors: enableColors}) + '\n\n', + 'stdout': '\n' + jslib.textSummary(data, {indent: ' ', enableColors: enableColors}, report) + '\n\n', }; } diff --git a/js/summary.js b/js/summary.js index 93b2a7a95dc..e7e5c0670ab 100644 --- a/js/summary.js +++ b/js/summary.js @@ -1,20 +1,20 @@ var forEach = function (obj, callback) { - for (var key in obj) { - if (obj.hasOwnProperty(key)) { - if (callback(key, obj[key])) { - break - } - } - } + for (var key in obj) { + if (obj.hasOwnProperty(key)) { + if (callback(key, obj[key])) { + break + } + } + } } var palette = { - bold: 1, - faint: 2, - red: 31, - green: 32, - cyan: 36, - //TODO: add others? + bold: 1, + faint: 2, + red: 31, + green: 32, + cyan: 36, + //TODO: add others? } var groupPrefix = '█' @@ -22,391 +22,476 @@ var detailsPrefix = '↳' var succMark = '✓' var failMark = '✗' var defaultOptions = { - indent: ' ', - enableColors: true, - summaryTimeUnit: null, - summaryTrendStats: null, + indent: ' ', + enableColors: true, + summaryTimeUnit: null, + summaryTrendStats: null, + sortByName: true, } // strWidth tries to return the actual width the string will take up on the // screen, without any terminal formatting, unicode ligatures, etc. function strWidth(s) { - // TODO: determine if NFC or NFKD are not more appropriate? or just give up? https://hsivonen.fi/string-length/ - var data = s.normalize('NFKC') // This used to be NFKD in Go, but this should be better - var inEscSeq = false - var inLongEscSeq = false - var width = 0 - for (var char of data) { - if (char.done) { - break - } - - // Skip over ANSI escape codes. - if (char == '\x1b') { - inEscSeq = true - continue - } - if (inEscSeq && char == '[') { - inLongEscSeq = true - continue - } - if (inEscSeq && inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x7e) { - inEscSeq = false - inLongEscSeq = false - continue - } - if (inEscSeq && !inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x5f) { - inEscSeq = false - continue - } - - if (!inEscSeq && !inLongEscSeq) { - width++ - } - } - return width + // TODO: determine if NFC or NFKD are not more appropriate? or just give up? https://hsivonen.fi/string-length/ + var data = s.normalize('NFKC') // This used to be NFKD in Go, but this should be better + var inEscSeq = false + var inLongEscSeq = false + var width = 0 + for (var char of data) { + if (char.done) { + break + } + + // Skip over ANSI escape codes. + if (char == '\x1b') { + inEscSeq = true + continue + } + if (inEscSeq && char == '[') { + inLongEscSeq = true + continue + } + if (inEscSeq && inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x7e) { + inEscSeq = false + inLongEscSeq = false + continue + } + if (inEscSeq && !inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x5f) { + inEscSeq = false + continue + } + + if (!inEscSeq && !inLongEscSeq) { + width++ + } + } + return width } function summarizeCheck(indent, check, decorate) { - if (check.fails == 0) { - return decorate(indent + succMark + ' ' + check.name, palette.green) - } - - var succPercent = Math.floor((100 * check.passes) / (check.passes + check.fails)) - return decorate( - indent + - failMark + - ' ' + - check.name + - '\n' + - indent + - ' ' + - detailsPrefix + - ' ' + - succPercent + - '% — ' + - succMark + - ' ' + - check.passes + - ' / ' + - failMark + - ' ' + - check.fails, - palette.red - ) + if (check.fails == 0) { + return decorate(indent + succMark + ' ' + check.name, palette.green) + } + + var succPercent = Math.floor((100 * check.passes) / (check.passes + check.fails)) + return decorate( + indent + + failMark + + ' ' + + check.name + + '\n' + + indent + + ' ' + + detailsPrefix + + ' ' + + succPercent + + '% — ' + + succMark + + ' ' + + check.passes + + ' / ' + + failMark + + ' ' + + check.fails, + palette.red + ) } function summarizeGroup(indent, group, decorate) { - var result = [] - if (group.name != '') { - result.push(indent + groupPrefix + ' ' + group.name + '\n') - indent = indent + ' ' - } - - for (var i = 0; i < group.checks.length; i++) { - result.push(summarizeCheck(indent, group.checks[i], decorate)) - } - if (group.checks.length > 0) { - result.push('') - } - for (var i = 0; i < group.groups.length; i++) { - Array.prototype.push.apply(result, summarizeGroup(indent, group.groups[i], decorate)) - } - - return result + var result = [] + if (group.name != '') { + result.push(indent + groupPrefix + ' ' + group.name + '\n') + indent = indent + ' ' + } + + for (var i = 0; i < group.checks.length; i++) { + result.push(summarizeCheck(indent, group.checks[i], decorate)) + } + if (group.checks.length > 0) { + result.push('') + } + for (var i = 0; i < group.groups.length; i++) { + Array.prototype.push.apply(result, summarizeGroup(indent, group.groups[i], decorate)) + } + + return result } function displayNameForMetric(name) { - var subMetricPos = name.indexOf('{') - if (subMetricPos >= 0) { - return '{ ' + name.substring(subMetricPos + 1, name.length - 1) + ' }' - } - return name + var subMetricPos = name.indexOf('{') + if (subMetricPos >= 0) { + return '{ ' + name.substring(subMetricPos + 1, name.length - 1) + ' }' + } + return name } function indentForMetric(name) { - if (name.indexOf('{') >= 0) { - return ' ' - } - return '' + if (name.indexOf('{') >= 0) { + return '' + } + return '' } function humanizeBytes(bytes) { - var units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] - var base = 1000 - if (bytes < 10) { - return bytes + ' B' - } - - var e = Math.floor(Math.log(bytes) / Math.log(base)) - var suffix = units[e | 0] - var val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10 - return val.toFixed(val < 10 ? 1 : 0) + ' ' + suffix + var units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] + var base = 1000 + if (bytes < 10) { + return bytes + ' B' + } + + var e = Math.floor(Math.log(bytes) / Math.log(base)) + var suffix = units[e | 0] + var val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10 + return val.toFixed(val < 10 ? 1 : 0) + ' ' + suffix } var unitMap = { - s: { unit: 's', coef: 0.001 }, - ms: { unit: 'ms', coef: 1 }, - us: { unit: 'µs', coef: 1000 }, + s: {unit: 's', coef: 0.001}, + ms: {unit: 'ms', coef: 1}, + us: {unit: 'µs', coef: 1000}, } function toFixedNoTrailingZeros(val, prec) { - // TODO: figure out something better? - return parseFloat(val.toFixed(prec)).toString() + // TODO: figure out something better? + return parseFloat(val.toFixed(prec)).toString() } function toFixedNoTrailingZerosTrunc(val, prec) { - var mult = Math.pow(10, prec) - return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec) + var mult = Math.pow(10, prec) + return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec) } function humanizeGenericDuration(dur) { - if (dur === 0) { - return '0s' - } - - if (dur < 0.001) { - // smaller than a microsecond, print nanoseconds - return Math.trunc(dur * 1000000) + 'ns' - } - if (dur < 1) { - // smaller than a millisecond, print microseconds - return toFixedNoTrailingZerosTrunc(dur * 1000, 2) + 'µs' - } - if (dur < 1000) { - // duration is smaller than a second - return toFixedNoTrailingZerosTrunc(dur, 2) + 'ms' - } - - var result = toFixedNoTrailingZerosTrunc((dur % 60000) / 1000, dur > 60000 ? 0 : 2) + 's' - var rem = Math.trunc(dur / 60000) - if (rem < 1) { - // less than a minute - return result - } - result = (rem % 60) + 'm' + result - rem = Math.trunc(rem / 60) - if (rem < 1) { - // less than an hour - return result - } - return rem + 'h' + result + if (dur === 0) { + return '0s' + } + + if (dur < 0.001) { + // smaller than a microsecond, print nanoseconds + return Math.trunc(dur * 1000000) + 'ns' + } + if (dur < 1) { + // smaller than a millisecond, print microseconds + return toFixedNoTrailingZerosTrunc(dur * 1000, 2) + 'µs' + } + if (dur < 1000) { + // duration is smaller than a second + return toFixedNoTrailingZerosTrunc(dur, 2) + 'ms' + } + + var result = toFixedNoTrailingZerosTrunc((dur % 60000) / 1000, dur > 60000 ? 0 : 2) + 's' + var rem = Math.trunc(dur / 60000) + if (rem < 1) { + // less than a minute + return result + } + result = (rem % 60) + 'm' + result + rem = Math.trunc(rem / 60) + if (rem < 1) { + // less than an hour + return result + } + return rem + 'h' + result } function humanizeDuration(dur, timeUnit) { - if (timeUnit !== '' && unitMap.hasOwnProperty(timeUnit)) { - return (dur * unitMap[timeUnit].coef).toFixed(2) + unitMap[timeUnit].unit - } + if (timeUnit !== '' && unitMap.hasOwnProperty(timeUnit)) { + return (dur * unitMap[timeUnit].coef).toFixed(2) + unitMap[timeUnit].unit + } - return humanizeGenericDuration(dur) + return humanizeGenericDuration(dur) } function humanizeValue(val, metric, timeUnit) { - if (metric.type == 'rate') { - // Truncate instead of round when decreasing precision to 2 decimal places - return (Math.trunc(val * 100 * 100) / 100).toFixed(2) + '%' - } - - switch (metric.contains) { - case 'data': - return humanizeBytes(val) - case 'time': - return humanizeDuration(val, timeUnit) - default: - return toFixedNoTrailingZeros(val, 6) - } + if (metric.type == 'rate') { + // Truncate instead of round when decreasing precision to 2 decimal places + return (Math.trunc(val * 100 * 100) / 100).toFixed(2) + '%' + } + + switch (metric.contains) { + case 'data': + return humanizeBytes(val) + case 'time': + return humanizeDuration(val, timeUnit) + default: + return toFixedNoTrailingZeros(val, 6) + } } function nonTrendMetricValueForSum(metric, timeUnit) { - switch (metric.type) { - case 'counter': - return [ - humanizeValue(metric.values.count, metric, timeUnit), - humanizeValue(metric.values.rate, metric, timeUnit) + '/s', - ] - case 'gauge': - return [ - humanizeValue(metric.values.value, metric, timeUnit), - 'min=' + humanizeValue(metric.values.min, metric, timeUnit), - 'max=' + humanizeValue(metric.values.max, metric, timeUnit), - ] - case 'rate': - return [ - humanizeValue(metric.values.rate, metric, timeUnit), - `${metric.values.passes} out of ${metric.values.passes + metric.values.fails}`, - ] - default: - return ['[no data]'] - } + switch (metric.type) { + case 'counter': + return [ + humanizeValue(metric.values.count, metric, timeUnit), + humanizeValue(metric.values.rate, metric, timeUnit) + '/s', + ] + case 'gauge': + return [ + humanizeValue(metric.values.value, metric, timeUnit), + 'min=' + humanizeValue(metric.values.min, metric, timeUnit), + 'max=' + humanizeValue(metric.values.max, metric, timeUnit), + ] + case 'rate': + return [ + humanizeValue(metric.values.rate, metric, timeUnit), + `${metric.values.passes} out of ${metric.values.passes + metric.values.fails}`, + ] + default: + return ['[no data]'] + } } function summarizeMetrics(options, data, decorate) { - var indent = options.indent + ' ' - var result = [] - - var names = [] - var nameLenMax = 0 - - var nonTrendValues = {} - var nonTrendValueMaxLen = 0 - var nonTrendExtras = {} - var nonTrendExtraMaxLens = [0, 0] - - var trendCols = {} - var numTrendColumns = options.summaryTrendStats.length - var trendColMaxLens = new Array(numTrendColumns).fill(0) - forEach(data.metrics, function (name, metric) { - names.push(name) - // When calculating widths for metrics, account for the indentation on submetrics. - var displayName = indentForMetric(name) + displayNameForMetric(name) - var displayNameWidth = strWidth(displayName) - if (displayNameWidth > nameLenMax) { - nameLenMax = displayNameWidth - } - - if (metric.type == 'trend') { - var cols = [] - for (var i = 0; i < numTrendColumns; i++) { - var tc = options.summaryTrendStats[i] - var value = metric.values[tc] - if (tc === 'count') { - value = value.toString() - } else { - value = humanizeValue(value, metric, options.summaryTimeUnit) - } - var valLen = strWidth(value) - if (valLen > trendColMaxLens[i]) { - trendColMaxLens[i] = valLen - } - cols[i] = value - } - trendCols[name] = cols - return - } - var values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) - nonTrendValues[name] = values[0] - var valueLen = strWidth(values[0]) - if (valueLen > nonTrendValueMaxLen) { - nonTrendValueMaxLen = valueLen - } - nonTrendExtras[name] = values.slice(1) - for (var i = 1; i < values.length; i++) { - var extraLen = strWidth(values[i]) - if (extraLen > nonTrendExtraMaxLens[i - 1]) { - nonTrendExtraMaxLens[i - 1] = extraLen - } - } - }) - - // sort all metrics but keep sub metrics grouped with their parent metrics - names.sort(function (metric1, metric2) { - var parent1 = metric1.split('{', 1)[0] - var parent2 = metric2.split('{', 1)[0] - var result = parent1.localeCompare(parent2) - if (result !== 0) { - return result - } - var sub1 = metric1.substring(parent1.length) - var sub2 = metric2.substring(parent2.length) - return sub1.localeCompare(sub2) - }) - - var getData = function (name) { - if (trendCols.hasOwnProperty(name)) { - var cols = trendCols[name] - var tmpCols = new Array(numTrendColumns) - for (var i = 0; i < cols.length; i++) { - tmpCols[i] = - options.summaryTrendStats[i] + - '=' + - decorate(cols[i], palette.cyan) + - ' '.repeat(trendColMaxLens[i] - strWidth(cols[i])) - } - return tmpCols.join(' ') - } - - var value = nonTrendValues[name] - var fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) - - var extras = nonTrendExtras[name] - if (extras.length == 1) { - fmtData = fmtData + ' ' + decorate(extras[0], palette.cyan, palette.faint) - } else if (extras.length > 1) { - var parts = new Array(extras.length) - for (var i = 0; i < extras.length; i++) { - parts[i] = - decorate(extras[i], palette.cyan, palette.faint) + - ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(extras[i])) - } - fmtData = fmtData + ' ' + parts.join(' ') - } - - return fmtData - } - - for (var name of names) { - var metric = data.metrics[name] - var mark = ' ' - var markColor = function (text) { - return text - } // noop - - if (metric.thresholds) { - mark = succMark - markColor = function (text) { - return decorate(text, palette.green) - } - forEach(metric.thresholds, function (name, threshold) { - if (!threshold.ok) { - mark = failMark - markColor = function (text) { - return decorate(text, palette.red) - } - return true // break - } - }) - } - var fmtIndent = indentForMetric(name) - var fmtName = displayNameForMetric(name) - fmtName = - fmtName + - decorate( - '.'.repeat(nameLenMax - strWidth(fmtName) - strWidth(fmtIndent) + 3) + ':', - palette.faint - ) - - result.push(indent + fmtIndent + markColor(mark) + ' ' + fmtName + ' ' + getData(name)) - } - - return result + var indent = options.indent + ' ' + var result = [] + + var names = [] + var nameLenMax = 0 + + var nonTrendValues = {} + var nonTrendValueMaxLen = 0 + var nonTrendExtras = {} + var nonTrendExtraMaxLens = [0, 0] + + var trendCols = {} + var numTrendColumns = options.summaryTrendStats.length + var trendColMaxLens = new Array(numTrendColumns).fill(0) + forEach(data.metrics, function (name, metric) { + names.push(name) + // When calculating widths for metrics, account for the indentation on submetrics. + var displayName = indentForMetric(name) + displayNameForMetric(name) + var displayNameWidth = strWidth(displayName) + if (displayNameWidth > nameLenMax) { + nameLenMax = displayNameWidth + } + + if (metric.type == 'trend') { + var cols = [] + for (var i = 0; i < numTrendColumns; i++) { + var tc = options.summaryTrendStats[i] + var value = metric.values[tc] + if (tc === 'count') { + value = value.toString() + } else { + value = humanizeValue(value, metric, options.summaryTimeUnit) + } + var valLen = strWidth(value) + if (valLen > trendColMaxLens[i]) { + trendColMaxLens[i] = valLen + } + cols[i] = value + } + trendCols[name] = cols + return + } + var values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) + nonTrendValues[name] = values[0] + var valueLen = strWidth(values[0]) + if (valueLen > nonTrendValueMaxLen) { + nonTrendValueMaxLen = valueLen + } + nonTrendExtras[name] = values.slice(1) + for (var i = 1; i < values.length; i++) { + var extraLen = strWidth(values[i]) + if (extraLen > nonTrendExtraMaxLens[i - 1]) { + nonTrendExtraMaxLens[i - 1] = extraLen + } + } + }) + + // sort all metrics but keep sub metrics grouped with their parent metrics + if (options.sortByName) { + names.sort(function (metric1, metric2) { + var parent1 = metric1.split('{', 1)[0] + var parent2 = metric2.split('{', 1)[0] + var result = parent1.localeCompare(parent2) + if (result !== 0) { + return result + } + var sub1 = metric1.substring(parent1.length) + var sub2 = metric2.substring(parent2.length) + return sub1.localeCompare(sub2) + }) + } + + var getData = function (name) { + if (trendCols.hasOwnProperty(name)) { + var cols = trendCols[name] + var tmpCols = new Array(numTrendColumns) + for (var i = 0; i < cols.length; i++) { + tmpCols[i] = + options.summaryTrendStats[i] + + '=' + + decorate(cols[i], palette.cyan) + + ' '.repeat(trendColMaxLens[i] - strWidth(cols[i])) + } + return tmpCols.join(' ') + } + + var value = nonTrendValues[name] + var fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) + + var extras = nonTrendExtras[name] + if (extras.length == 1) { + fmtData = fmtData + ' ' + decorate(extras[0], palette.cyan, palette.faint) + } else if (extras.length > 1) { + var parts = new Array(extras.length) + for (var i = 0; i < extras.length; i++) { + parts[i] = + decorate(extras[i], palette.cyan, palette.faint) + + ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(extras[i])) + } + fmtData = fmtData + ' ' + parts.join(' ') + } + + return fmtData + } + + for (var name of names) { + var metric = data.metrics[name] + var mark = ' ' + var markColor = function (text) { + return text + } // noop + + if (metric.thresholds) { + mark = succMark + markColor = function (text) { + return decorate(text, palette.green) + } + forEach(metric.thresholds, function (name, threshold) { + if (!threshold.ok) { + mark = failMark + markColor = function (text) { + return decorate(text, palette.red) + } + return true // break + } + }) + } + var fmtIndent = indentForMetric(name) + var fmtName = displayNameForMetric(name) + fmtName = + fmtName + + decorate( + '.'.repeat(nameLenMax - strWidth(fmtName) - strWidth(fmtIndent) + 3) + ':', + palette.faint + ) + + result.push(indent + fmtIndent + markColor(mark) + ' ' + fmtName + ' ' + getData(name)) + } + + return result } -function generateTextSummary(data, options) { - var mergedOpts = Object.assign({}, defaultOptions, data.options, options) - var lines = [] - - // TODO: move all of these functions into an object with methods? - var decorate = function (text) { - return text - } - if (mergedOpts.enableColors) { - decorate = function (text, color /*, ...rest*/) { - var result = '\x1b[' + color - for (var i = 2; i < arguments.length; i++) { - result += ';' + arguments[i] - } - return result + 'm' + text + '\x1b[0m' - } - } - - Array.prototype.push.apply( - lines, - summarizeGroup(mergedOpts.indent + ' ', data.root_group, decorate) - ) - - Array.prototype.push.apply(lines, summarizeMetrics(mergedOpts, data, decorate)) - - return lines.join('\n') +function generateTextSummary(data, options, report) { + var mergedOpts = Object.assign({}, defaultOptions, data.options, options) + var lines = [] + + // TODO: move all of these functions into an object with methods? + var decorate = function (text) { + return text + } + if (mergedOpts.enableColors) { + decorate = function (text, color /*, ...rest*/) { + var result = '\x1b[' + color + for (var i = 2; i < arguments.length; i++) { + result += ';' + arguments[i] + } + return result + 'm' + text + '\x1b[0m' + } + } + + const ANSI_CODES = { + reset: "\x1b[0m", + + // Standard Colors + black: "\x1b[30m", + red: "\x1b[31m", + green: "\x1b[32m", + yellow: "\x1b[33m", + blue: "\x1b[34m", + magenta: "\x1b[35m", + cyan: "\x1b[36m", + white: "\x1b[37m", + + // Bright Colors + brightBlack: "\x1b[90m", + brightRed: "\x1b[91m", + brightGreen: "\x1b[92m", + brightYellow: "\x1b[93m", + brightBlue: "\x1b[94m", + brightMagenta: "\x1b[95m", + brightCyan: "\x1b[96m", + brightWhite: "\x1b[97m", + + // Dark Colors + darkGrey: "\x1b[90m", + }; + + const BOLD = '\u001b[1m' + const RESET = ANSI_CODES.reset; + const boldify = (text) => BOLD + text + RESET + + const metricGroupIndent = ' ' + + /** + * + * @typedef {{bold: boolean}} DisplayMetricsSectionNameOptions + * @param sectionName + * @param options [DisplayMetricsSectionNameOptions={bold: true}] + */ + const displayMetricsSectionName = (sectionName, options) => { + let bold = true; + if (options && options.bold === false) { + bold = false + } + + let normalizedSectionName = sectionName.toUpperCase() + + if (bold) { + normalizedSectionName = boldify(normalizedSectionName) + } + + lines.push(metricGroupIndent + metricGroupIndent + normalizedSectionName) + } + + const displayMetricsSectionBlock = (sectionMetrics, opts) => { + const summarizeOpts = Object.assign({}, mergedOpts, opts) + Array.prototype.push.apply(lines, summarizeMetrics(summarizeOpts, {metrics: sectionMetrics}, decorate)) + lines.push('') + } + + // START OF GROUP + // TITLE + lines.push(metricGroupIndent + groupPrefix + ' ' + boldify('GLOBAL RESULTS') + '\n') + + // CHECKS + displayMetricsSectionBlock(report.checks.metrics, {sortByName: false}) + + displayMetricsSectionName('CHECKS', { bold: false }) + for (var i = 0; i < report.checks.ordered_checks.length; i++) { + lines.push(summarizeCheck(metricGroupIndent + metricGroupIndent, report.checks.ordered_checks[i], decorate)) + } + if (report.checks.ordered_checks.length > 0) { + lines.push('') + } + + // METRICS + forEach(report.metrics, (sectionName, sectionMetrics) => { + displayMetricsSectionName(sectionName) + displayMetricsSectionBlock(sectionMetrics) + }) + // END OF GROUP + + Array.prototype.push.apply( + lines, + summarizeGroup(mergedOpts.indent + ' ', data.root_group, decorate) + ) + + Array.prototype.push.apply(lines, summarizeMetrics(mergedOpts, data, decorate)) + + return lines.join('\n') } exports.humanizeValue = humanizeValue From 0d6eab2f4adaf2b95ea2b5f2140333b70beeb7b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Tue, 22 Oct 2024 13:03:50 +0200 Subject: [PATCH 02/42] More improvements for the new end-of-test summary Co-authored-by: oleiade --- js/runner.go | 34 +++++++++++-- js/summary.go | 13 +++-- js/summary.js | 7 ++- playground/full-summary/api.js | 35 +++++++++++++ playground/full-summary/browser.js | 12 +++++ playground/full-summary/grpc.js | 23 +++++++++ playground/full-summary/ws.js | 79 ++++++++++++++++++++++++++++++ 7 files changed, 192 insertions(+), 11 deletions(-) create mode 100644 playground/full-summary/api.js create mode 100644 playground/full-summary/browser.js create mode 100644 playground/full-summary/grpc.js create mode 100644 playground/full-summary/ws.js diff --git a/js/runner.go b/js/runner.go index e2f4fd2039a..fdcd3a8dc02 100644 --- a/js/runner.go +++ b/js/runner.go @@ -378,10 +378,12 @@ type ReportMetrics struct { Browser map[string]MetricData - // FIXME: WebVitals are a browser metrics too, and we might want to move them in a browser subsection - // of the report WebVitals map[string]MetricData + Grpc map[string]MetricData + + WebSocket map[string]MetricData `js:"websocket"` + // Miscellaneous contains user-defined metric results as well as extensions metrics Miscellaneous map[string]MetricData } @@ -420,6 +422,8 @@ func NewReport() Report { Network: make(map[string]MetricData), Browser: make(map[string]MetricData), WebVitals: make(map[string]MetricData), + Grpc: make(map[string]MetricData), + WebSocket: make(map[string]MetricData), Miscellaneous: make(map[string]MetricData), }, Checks: ReportChecks{ @@ -467,9 +471,21 @@ func isWebVitalsMetric(m *metrics.Metric) bool { return strings.HasPrefix(m.Name, "browser_web_vital_") } +func isGrpcMetric(m *metrics.Metric) bool { + return strings.HasPrefix(m.Name, "grpc_") +} + +func isWebSocketsMetric(m *metrics.Metric) bool { + return strings.HasPrefix(m.Name, "ws_") +} + +func isSkippedMetric(m *metrics.Metric) bool { + return oneOfMetrics(m, metrics.ChecksName, metrics.GroupDurationName) +} + func oneOfMetrics(m *metrics.Metric, values ...string) bool { for _, v := range values { - if m.Name == v { + if strings.HasPrefix(m.Name, v) { return true } } @@ -482,6 +498,8 @@ func NewReportFrom(summary *lib.Summary, options lib.Options) Report { for _, m := range summary.Metrics { switch { + case isSkippedMetric(m): + // Do nothing, just skip. case isHTTPMetric(m): report.Metrics.HTTP[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) case isExecutionMetric(m): @@ -490,6 +508,10 @@ func NewReportFrom(summary *lib.Summary, options lib.Options) Report { report.Metrics.Network[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) case isBrowserMetric(m): report.Metrics.Browser[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) + case isGrpcMetric(m): + report.Metrics.Grpc[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) + case isWebSocketsMetric(m): + report.Metrics.WebSocket[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) case isWebVitalsMetric(m): report.Metrics.WebVitals[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) default: @@ -500,9 +522,11 @@ func NewReportFrom(summary *lib.Summary, options lib.Options) Report { totalChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Total) successChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Trues) - report.Checks.Metrics.Total.Values["count"] = totalChecks // Counter metric with total checks - report.Checks.Metrics.Total.Values["rate"] = 0.0 // TODO: Calculate based on summary.TotalTestDuration + report.Checks.Metrics.Total.Values["count"] = totalChecks // Counter metric with total checks + report.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, summary.TestRunDuration) + report.Checks.Metrics.Success = NewMetricsDataFrom(summary, summary.Metrics[metrics.ChecksName], options.SummaryTrendStats) // Rate metric with successes (equivalent to the 'checks' metric) + report.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks report.Checks.Metrics.Fail.Values["fails"] = successChecks report.Checks.Metrics.Fail.Values["rate"] = (totalChecks - successChecks) / totalChecks diff --git a/js/summary.go b/js/summary.go index 86dc34d18bc..d7452244b0e 100644 --- a/js/summary.go +++ b/js/summary.go @@ -33,11 +33,7 @@ func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Durat switch sink := sink.(type) { case *metrics.CounterSink: result = sink.Format(t) - rate := 0.0 - if t > 0 { - rate = sink.Value / (float64(t) / float64(time.Second)) - } - result["rate"] = rate + result["rate"] = calculateCounterRate(sink.Value, t) case *metrics.GaugeSink: result = sink.Format(t) result["min"] = sink.Min @@ -159,3 +155,10 @@ func getSummaryResult(rawResult sobek.Value) (map[string]io.Reader, error) { return results, nil } + +func calculateCounterRate(count float64, duration time.Duration) float64 { + if duration == 0 { + return 0 + } + return count / (float64(duration) / float64(time.Second)) +} diff --git a/js/summary.js b/js/summary.js index e7e5c0670ab..d6e5e471fcf 100644 --- a/js/summary.js +++ b/js/summary.js @@ -127,7 +127,7 @@ function displayNameForMetric(name) { function indentForMetric(name) { if (name.indexOf('{') >= 0) { - return '' + return ' ' } return '' } @@ -479,6 +479,11 @@ function generateTextSummary(data, options, report) { // METRICS forEach(report.metrics, (sectionName, sectionMetrics) => { + // If there are no metrics in this section, skip it + if (Object.keys(sectionMetrics).length === 0) { + return + } + displayMetricsSectionName(sectionName) displayMetricsSectionBlock(sectionMetrics) }) diff --git a/playground/full-summary/api.js b/playground/full-summary/api.js new file mode 100644 index 00000000000..f8390a9626f --- /dev/null +++ b/playground/full-summary/api.js @@ -0,0 +1,35 @@ +import http from 'k6/http' +import {check, group} from 'k6' + +export function apiTest() { + const res = http.get('https://httpbin.org/get') + check(res, { + 'test api is up': (r) => r.status === 200, + 'test api is 500': (r) => r.status === 500, + }) + + group('auth', () => { + const res = http.post( + 'https://httpbin.org/auth', + JSON.stringify({ + username: 'sakai', + first_name: 'jin', + last_name: 'sakai', + email: 'jin.sakai@suckerpunch.com', + password: 'onegaishimasu', + }) + ) + + check(res, { + 'status is 201 CREATED': (r) => r.status === 201, + }) + }) + + group('my crocodiles', () => { + const res = http.get('https://httpbin.org/get') + + check(res, { + 'status is 200 OK': (r) => r.status === 200, + }) + }) +} \ No newline at end of file diff --git a/playground/full-summary/browser.js b/playground/full-summary/browser.js new file mode 100644 index 00000000000..6a04b04724f --- /dev/null +++ b/playground/full-summary/browser.js @@ -0,0 +1,12 @@ +import {browser} from 'k6/browser' + +export async function browserTest() { + const page = await browser.newPage() + + try { + await page.goto('https://test.k6.io/') + await page.screenshot({path: 'screenshots/screenshot.png'}) + } finally { + await page.close() + } +} diff --git a/playground/full-summary/grpc.js b/playground/full-summary/grpc.js new file mode 100644 index 00000000000..6b22388fbd9 --- /dev/null +++ b/playground/full-summary/grpc.js @@ -0,0 +1,23 @@ +import grpc from 'k6/net/grpc'; +import {check} from 'k6' + +const GRPC_ADDR = __ENV.GRPC_ADDR || '127.0.0.1:10000'; +const GRPC_PROTO_PATH = __ENV.GRPC_PROTO_PATH || '../../lib/testutils/grpcservice/route_guide.proto'; + +let client = new grpc.Client(); + +client.load([], GRPC_PROTO_PATH); + +export function grpcTest() { + client.connect(GRPC_ADDR, {plaintext: true}); + + const response = client.invoke("main.FeatureExplorer/GetFeature", { + latitude: 410248224, + longitude: -747127767 + }) + + check(response, {"status is OK": (r) => r && r.status === grpc.StatusOK}); + console.log(JSON.stringify(response.message)) + + client.close() +} \ No newline at end of file diff --git a/playground/full-summary/ws.js b/playground/full-summary/ws.js new file mode 100644 index 00000000000..7b021405860 --- /dev/null +++ b/playground/full-summary/ws.js @@ -0,0 +1,79 @@ +import { + randomString, + randomIntBetween, +} from "https://jslib.k6.io/k6-utils/1.1.0/index.js"; +import {WebSocket} from "k6/experimental/websockets"; +import { + setTimeout, + clearTimeout, + setInterval, + clearInterval, +} from "k6/timers"; + +let chatRoomName = "publicRoom"; // choose your chat room name +let sessionDuration = randomIntBetween(1000, 5000); // user session between 5s and 1m + +export function wsTest() { + for (let i = 0; i < 4; i++) { + startWSWorker(i); + } +} + +function startWSWorker(id) { + let url = `wss://test-api.k6.io/ws/crocochat/${chatRoomName}/`; + let ws = new WebSocket(url); + ws.binaryType = "arraybuffer"; + ws.addEventListener("open", () => { + ws.send( + JSON.stringify({ + event: "SET_NAME", + new_name: `Croc ${__VU}:${id}`, + }) + ); + + ws.addEventListener("message", (e) => { + let msg = JSON.parse(e.data); + if (msg.event === "CHAT_MSG") { + console.log( + `VU ${__VU}:${id} received: ${msg.user} says: ${msg.message}` + ); + } else if (msg.event === "ERROR") { + console.error(`VU ${__VU}:${id} received:: ${msg.message}`); + } else { + console.log( + `VU ${__VU}:${id} received unhandled message: ${msg.message}` + ); + } + }); + + let intervalId = setInterval(() => { + ws.send( + JSON.stringify({ + event: "SAY", + message: `I'm saying ${randomString(5)}`, + }) + ); + }, randomIntBetween(2000, 8000)); // say something every 2-8seconds + + let timeout1id = setTimeout(function () { + clearInterval(intervalId); + console.log( + `VU ${__VU}:${id}: ${sessionDuration}ms passed, leaving the chat` + ); + ws.send(JSON.stringify({event: "LEAVE"})); + }, sessionDuration); + + let timeout2id = setTimeout(function () { + console.log( + `Closing the socket forcefully 3s after graceful LEAVE` + ); + ws.close(); + }, sessionDuration + 3000); + + ws.addEventListener("close", () => { + clearTimeout(timeout1id); + clearTimeout(timeout2id); + console.log(`VU ${__VU}:${id}: disconnected`); + }); + }); +} \ No newline at end of file From a6d0c68e1bcd17a93874ed0a74c535e8072dfd18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Wed, 23 Oct 2024 11:02:39 +0200 Subject: [PATCH 03/42] Dummy summary output Co-authored-by: oleiade --- cmd/builtin_output_gen.go | 12 ++++++++---- cmd/outputs.go | 3 +++ output/summary/summary.go | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 4 deletions(-) create mode 100644 output/summary/summary.go diff --git a/cmd/builtin_output_gen.go b/cmd/builtin_output_gen.go index 35fe34505b8..8983063e8f5 100644 --- a/cmd/builtin_output_gen.go +++ b/cmd/builtin_output_gen.go @@ -7,11 +7,11 @@ import ( "strings" ) -const _builtinOutputName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsdexperimental-opentelemetry" +const _builtinOutputName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsdexperimental-opentelemetrysummary" -var _builtinOutputIndex = [...]uint8{0, 5, 8, 15, 41, 49, 53, 58, 64, 90} +var _builtinOutputIndex = [...]uint8{0, 5, 8, 15, 41, 49, 53, 58, 64, 90, 97} -const _builtinOutputLowerName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsdexperimental-opentelemetry" +const _builtinOutputLowerName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsdexperimental-opentelemetrysummary" func (i builtinOutput) String() string { if i >= builtinOutput(len(_builtinOutputIndex)-1) { @@ -33,9 +33,10 @@ func _builtinOutputNoOp() { _ = x[builtinOutputKafka-(6)] _ = x[builtinOutputStatsd-(7)] _ = x[builtinOutputExperimentalOpentelemetry-(8)] + _ = x[builtinOutputSummary-(9)] } -var _builtinOutputValues = []builtinOutput{builtinOutputCloud, builtinOutputCSV, builtinOutputDatadog, builtinOutputExperimentalPrometheusRW, builtinOutputInfluxdb, builtinOutputJSON, builtinOutputKafka, builtinOutputStatsd, builtinOutputExperimentalOpentelemetry} +var _builtinOutputValues = []builtinOutput{builtinOutputCloud, builtinOutputCSV, builtinOutputDatadog, builtinOutputExperimentalPrometheusRW, builtinOutputInfluxdb, builtinOutputJSON, builtinOutputKafka, builtinOutputStatsd, builtinOutputExperimentalOpentelemetry, builtinOutputSummary} var _builtinOutputNameToValueMap = map[string]builtinOutput{ _builtinOutputName[0:5]: builtinOutputCloud, @@ -56,6 +57,8 @@ var _builtinOutputNameToValueMap = map[string]builtinOutput{ _builtinOutputLowerName[58:64]: builtinOutputStatsd, _builtinOutputName[64:90]: builtinOutputExperimentalOpentelemetry, _builtinOutputLowerName[64:90]: builtinOutputExperimentalOpentelemetry, + _builtinOutputName[90:97]: builtinOutputSummary, + _builtinOutputLowerName[90:97]: builtinOutputSummary, } var _builtinOutputNames = []string{ @@ -68,6 +71,7 @@ var _builtinOutputNames = []string{ _builtinOutputName[53:58], _builtinOutputName[58:64], _builtinOutputName[64:90], + _builtinOutputName[90:97], } // builtinOutputString retrieves an enum value from the enum constants string name. diff --git a/cmd/outputs.go b/cmd/outputs.go index 4aa428f4c60..a68f8e141fc 100644 --- a/cmd/outputs.go +++ b/cmd/outputs.go @@ -3,6 +3,7 @@ package cmd import ( "errors" "fmt" + "go.k6.io/k6/output/summary" "sort" "strings" @@ -38,12 +39,14 @@ const ( builtinOutputKafka builtinOutputStatsd builtinOutputExperimentalOpentelemetry + builtinOutputSummary ) // TODO: move this to an output sub-module after we get rid of the old collectors? func getAllOutputConstructors() (map[string]output.Constructor, error) { // Start with the built-in outputs result := map[string]output.Constructor{ + builtinOutputSummary.String(): summary.New, builtinOutputJSON.String(): json.New, builtinOutputCloud.String(): cloud.New, builtinOutputCSV.String(): csv.New, diff --git a/output/summary/summary.go b/output/summary/summary.go new file mode 100644 index 00000000000..43463b2cceb --- /dev/null +++ b/output/summary/summary.go @@ -0,0 +1,33 @@ +package summary + +import ( + "go.k6.io/k6/metrics" + "go.k6.io/k6/output" +) + +var _ output.Output = &Output{} + +// Output ... +type Output struct { +} + +// New returns a new JSON output. +func New(params output.Params) (output.Output, error) { + return &Output{}, nil +} + +func (o Output) Description() string { + return "" +} + +func (o Output) Start() error { + return nil +} + +func (o Output) AddMetricSamples(samples []metrics.SampleContainer) { + +} + +func (o Output) Stop() error { + return nil +} From f324c2c3d863d17f7e33723ee7a74117d4dc7628 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Sat, 26 Oct 2024 13:49:47 +0200 Subject: [PATCH 04/42] Start collecting metrics on summary output Co-authored-by: oleiade --- output/summary/summary.go | 129 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 123 insertions(+), 6 deletions(-) diff --git a/output/summary/summary.go b/output/summary/summary.go index 43463b2cceb..4630495d019 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -1,33 +1,150 @@ package summary import ( + "fmt" "go.k6.io/k6/metrics" + "strings" + "time" + "go.k6.io/k6/output" + + "github.com/sirupsen/logrus" ) +const flushPeriod = 200 * time.Millisecond // TODO: make this configurable + var _ output.Output = &Output{} // Output ... type Output struct { + output.SampleBuffer + + periodicFlusher *output.PeriodicFlusher + logger logrus.FieldLogger + + dataModel DataModel + + // FIXME: drop me + startTime time.Time } // New returns a new JSON output. func New(params output.Params) (output.Output, error) { - return &Output{}, nil + return &Output{ + logger: params.Logger.WithFields(logrus.Fields{ + "output": "summary", + "filename": params.ConfigArgument, + }), + dataModel: NewDataModel(), + }, nil } -func (o Output) Description() string { +func (o *Output) Description() string { return "" } -func (o Output) Start() error { +func (o *Output) Start() error { + pf, err := output.NewPeriodicFlusher(flushPeriod, o.flushMetrics) + if err != nil { + return err + } + o.logger.Debug("Started!") + o.periodicFlusher = pf + + //FIXME: drop me + o.startTime = time.Now() + return nil } -func (o Output) AddMetricSamples(samples []metrics.SampleContainer) { +func (o *Output) Stop() error { + o.periodicFlusher.Stop() -} + for groupName, aggregatedData := range o.dataModel.Groups { + o.logger.Warning(groupName) -func (o Output) Stop() error { + for metricName, sink := range aggregatedData { + o.logger.Warning(fmt.Sprintf(" %s: %+v", metricName, sink)) + } + } return nil } + +type MetricData struct { + container map[string]*metrics.Metric +} + +type ScenarioData struct { + MetricData + + Groups map[string]AggregatedMetricData +} + +type DataModel struct { + ScenarioData + + Scenarios map[string]AggregatedMetricData +} + +type AggregatedMetricData map[string]metrics.Sink + +func (a AggregatedMetricData) AddSample(sample metrics.Sample) { + if _, exists := a[sample.Metric.Name]; !exists { + a[sample.Metric.Name] = metrics.NewSink(sample.Metric.Type) + } + + a[sample.Metric.Name].Add(sample) +} + +func NewDataModel() DataModel { + return DataModel{ + ScenarioData: ScenarioData{ + MetricData: MetricData{ + container: make(map[string]*metrics.Metric), + }, + Groups: make(map[string]AggregatedMetricData), + }, + Scenarios: make(map[string]AggregatedMetricData), + } +} + +func (d DataModel) GroupStored(groupName string) bool { + _, exists := d.Groups[groupName] + return exists +} + +func (d DataModel) ScenarioStored(scenarioName string) bool { + _, exists := d.Scenarios[scenarioName] + return exists +} + +func (o *Output) flushMetrics() { + samples := o.GetBufferedSamples() + for _, sc := range samples { + samples := sc.GetSamples() + for _, sample := range samples { + if _, ok := o.dataModel.container[sample.Metric.Name]; !ok { + o.dataModel.container[sample.Metric.Name] = sample.Metric + } + + if groupName, exists := sample.Tags.Get("group"); exists { + normalizedGroupName := strings.TrimPrefix(groupName, "::") + + if !o.dataModel.GroupStored(normalizedGroupName) { + o.dataModel.Groups[normalizedGroupName] = make(AggregatedMetricData) + } + + o.dataModel.Groups[normalizedGroupName].AddSample(sample) + } + + if scenarioName, exists := sample.Tags.Get("scenario"); exists { + if !o.dataModel.ScenarioStored(scenarioName) { + o.dataModel.Scenarios[scenarioName] = make(AggregatedMetricData) + } + + o.dataModel.Scenarios[scenarioName].AddSample(sample) + } + + } + } +} From a6b496b7e00ed1f0775f5bad617a3ff04d91ccb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Mon, 28 Oct 2024 16:37:51 +0100 Subject: [PATCH 05/42] Include groups & scenarios metrics on summary Co-authored-by: oleiade --- cmd/outputs.go | 2 - cmd/run.go | 20 ++- js/runner.go | 195 +------------------------ js/summary.js | 33 ++++- lib/report.go | 142 ++++++++++++++++++ lib/runner.go | 2 +- lib/testutils/minirunner/minirunner.go | 2 +- output/summary/report.go | 70 +++++++++ output/summary/summary.go | 105 ++++++++++++- 9 files changed, 363 insertions(+), 208 deletions(-) create mode 100644 lib/report.go create mode 100644 output/summary/report.go diff --git a/cmd/outputs.go b/cmd/outputs.go index a68f8e141fc..17c7d4c432b 100644 --- a/cmd/outputs.go +++ b/cmd/outputs.go @@ -3,7 +3,6 @@ package cmd import ( "errors" "fmt" - "go.k6.io/k6/output/summary" "sort" "strings" @@ -46,7 +45,6 @@ const ( func getAllOutputConstructors() (map[string]output.Constructor, error) { // Start with the built-in outputs result := map[string]output.Constructor{ - builtinOutputSummary.String(): summary.New, builtinOutputJSON.String(): json.New, builtinOutputCloud.String(): cloud.New, builtinOutputCSV.String(): csv.New, diff --git a/cmd/run.go b/cmd/run.go index 1a0e04c8ffc..6715f41a2a6 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -30,6 +30,7 @@ import ( "go.k6.io/k6/metrics" "go.k6.io/k6/metrics/engine" "go.k6.io/k6/output" + "go.k6.io/k6/output/summary" "go.k6.io/k6/ui/pb" ) @@ -190,9 +191,20 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { executionState := execScheduler.GetState() if !testRunState.RuntimeOptions.NoSummary.Bool { + // Instantiates the summary output + summaryOutput, err := summary.New(output.Params{ + Logger: c.gs.Logger, + }) + if err != nil { + logger.WithError(err).Error("failed to initialize the end-of-test summary output") + } + outputs = append(outputs, summaryOutput) + + // At the end of the test run defer func() { logger.Debug("Generating the end-of-test summary...") - summaryResult, hsErr := test.initRunner.HandleSummary(globalCtx, &lib.Summary{ + + testSummary := &lib.Summary{ Metrics: metricsEngine.ObservedMetrics, RootGroup: testRunState.GroupSummary.Group(), TestRunDuration: executionState.GetCurrentTestRunDuration(), @@ -201,7 +213,11 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { IsStdOutTTY: c.gs.Stdout.IsTTY, IsStdErrTTY: c.gs.Stderr.IsTTY, }, - }) + } + + report := summaryOutput.MetricsReport(testSummary, test.initRunner.GetOptions()) + + summaryResult, hsErr := test.initRunner.HandleSummary(globalCtx, testSummary, report) if hsErr == nil { hsErr = handleSummaryResult(c.gs.FS, c.gs.Stdout, c.gs.Stderr, summaryResult) } diff --git a/js/runner.go b/js/runner.go index fdcd3a8dc02..05a4808e2bf 100644 --- a/js/runner.go +++ b/js/runner.go @@ -348,198 +348,9 @@ func (r *Runner) IsExecutable(name string) bool { return exists } -type MetricData struct { - Type string - Contains string - Values map[string]float64 -} - -func NewMetricsDataFrom(summary *lib.Summary, m *metrics.Metric, summaryTrendStats []string) MetricData { - // TODO: we obtain this from [options.SummaryTrendStats] which is a string slice - getMetricValues := metricValueGetter(summaryTrendStats) - - return MetricData{ - Type: m.Type.String(), - Contains: m.Contains.String(), - Values: getMetricValues(m.Sink, summary.TestRunDuration), - } -} - -type ReportMetrics struct { - // HTTP contains report data specific to HTTP metrics and is used - // to produce the summary HTTP subsection's content. - HTTP map[string]MetricData - // Execution contains report data specific to Execution metrics and is used - // to produce the summary Execution subsection's content. - Execution map[string]MetricData - // Network contains report data specific to Network metrics and is used - // to produce the summary Network subsection's content. - Network map[string]MetricData - - Browser map[string]MetricData - - WebVitals map[string]MetricData - - Grpc map[string]MetricData - - WebSocket map[string]MetricData `js:"websocket"` - - // Miscellaneous contains user-defined metric results as well as extensions metrics - Miscellaneous map[string]MetricData -} - -type ReportChecksMetrics struct { - Total MetricData `js:"checks_total"` - Success MetricData `js:"checks_succeeded"` - Fail MetricData `js:"checks_failed"` -} - -type ReportChecks struct { - Metrics ReportChecksMetrics - OrderedChecks []*lib.Check -} - -type Report struct { - Metrics ReportMetrics - Checks ReportChecks - Groups []Report - Scenarios []Report -} - -func NewReport() Report { - initMetricData := func(t metrics.MetricType) MetricData { - return MetricData{ - Type: t.String(), - Contains: metrics.Default.String(), - Values: make(map[string]float64), - } - } - - return Report{ - Metrics: ReportMetrics{ - HTTP: make(map[string]MetricData), - Execution: make(map[string]MetricData), - Network: make(map[string]MetricData), - Browser: make(map[string]MetricData), - WebVitals: make(map[string]MetricData), - Grpc: make(map[string]MetricData), - WebSocket: make(map[string]MetricData), - Miscellaneous: make(map[string]MetricData), - }, - Checks: ReportChecks{ - Metrics: ReportChecksMetrics{ - Total: initMetricData(metrics.Counter), - Success: initMetricData(metrics.Rate), - Fail: initMetricData(metrics.Rate), - }, - }, - } -} - -func isHTTPMetric(m *metrics.Metric) bool { - return oneOfMetrics(m, - metrics.HTTPReqsName, - metrics.HTTPReqFailedName, - metrics.HTTPReqDurationName, - metrics.HTTPReqBlockedName, - metrics.HTTPReqConnectingName, - metrics.HTTPReqTLSHandshakingName, - metrics.HTTPReqSendingName, - metrics.HTTPReqWaitingName, - metrics.HTTPReqReceivingName, - ) -} - -func isExecutionMetric(m *metrics.Metric) bool { - return oneOfMetrics(m, metrics.VUsName, - metrics.VUsMaxName, - metrics.IterationsName, - metrics.IterationDurationName, - metrics.DroppedIterationsName, - ) -} - -func isNetworkMetric(m *metrics.Metric) bool { - return oneOfMetrics(m, metrics.DataSentName, metrics.DataReceivedName) -} - -func isBrowserMetric(m *metrics.Metric) bool { - return strings.HasPrefix(m.Name, "browser_") && !isWebVitalsMetric(m) -} - -func isWebVitalsMetric(m *metrics.Metric) bool { - return strings.HasPrefix(m.Name, "browser_web_vital_") -} - -func isGrpcMetric(m *metrics.Metric) bool { - return strings.HasPrefix(m.Name, "grpc_") -} - -func isWebSocketsMetric(m *metrics.Metric) bool { - return strings.HasPrefix(m.Name, "ws_") -} - -func isSkippedMetric(m *metrics.Metric) bool { - return oneOfMetrics(m, metrics.ChecksName, metrics.GroupDurationName) -} - -func oneOfMetrics(m *metrics.Metric, values ...string) bool { - for _, v := range values { - if strings.HasPrefix(m.Name, v) { - return true - } - } - return false -} - -// TODO: it would be nicer to only receive summary-specific options here, as opposed to the whole [lib.Options] struct -func NewReportFrom(summary *lib.Summary, options lib.Options) Report { - report := NewReport() - - for _, m := range summary.Metrics { - switch { - case isSkippedMetric(m): - // Do nothing, just skip. - case isHTTPMetric(m): - report.Metrics.HTTP[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) - case isExecutionMetric(m): - report.Metrics.Execution[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) - case isNetworkMetric(m): - report.Metrics.Network[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) - case isBrowserMetric(m): - report.Metrics.Browser[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) - case isGrpcMetric(m): - report.Metrics.Grpc[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) - case isWebSocketsMetric(m): - report.Metrics.WebSocket[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) - case isWebVitalsMetric(m): - report.Metrics.WebVitals[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) - default: - report.Metrics.Miscellaneous[m.Name] = NewMetricsDataFrom(summary, m, options.SummaryTrendStats) - } - } - - totalChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Total) - successChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Trues) - - report.Checks.Metrics.Total.Values["count"] = totalChecks // Counter metric with total checks - report.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, summary.TestRunDuration) - - report.Checks.Metrics.Success = NewMetricsDataFrom(summary, summary.Metrics[metrics.ChecksName], options.SummaryTrendStats) // Rate metric with successes (equivalent to the 'checks' metric) - - report.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks - report.Checks.Metrics.Fail.Values["fails"] = successChecks - report.Checks.Metrics.Fail.Values["rate"] = (totalChecks - successChecks) / totalChecks - - report.Checks.OrderedChecks = summary.RootGroup.OrderedChecks - - return report -} - // HandleSummary calls the specified summary callback, if supplied. -func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[string]io.Reader, error) { - reportData := NewReportFrom(summary, r.Bundle.Options) - fmt.Println(reportData) +func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary, report lib.Report) (map[string]io.Reader, error) { + fmt.Println(report) summaryDataForJS := summarizeMetricsToObject(summary, r.Bundle.Options, r.setupData) @@ -594,7 +405,7 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[s callbackResult, vu.Runtime.ToValue(r.Bundle.preInitState.RuntimeOptions.SummaryExport.String), vu.Runtime.ToValue(summaryDataForJS), - vu.Runtime.ToValue(reportData), + vu.Runtime.ToValue(report), } rawResult, _, _, err := vu.runFn(summaryCtx, false, handleSummaryWrapper, nil, wrapperArgs...) diff --git a/js/summary.js b/js/summary.js index d6e5e471fcf..822602763d3 100644 --- a/js/summary.js +++ b/js/summary.js @@ -462,7 +462,7 @@ function generateTextSummary(data, options, report) { lines.push('') } - // START OF GROUP + // START OF GLOBAL RESULTS // TITLE lines.push(metricGroupIndent + groupPrefix + ' ' + boldify('GLOBAL RESULTS') + '\n') @@ -487,7 +487,36 @@ function generateTextSummary(data, options, report) { displayMetricsSectionName(sectionName) displayMetricsSectionBlock(sectionMetrics) }) - // END OF GROUP + // END OF GLOBAL RESULTS + + // GROUPS + forEach(report.groups, (groupName, groupMetrics) => { + lines.push(metricGroupIndent + groupPrefix + ' ' + boldify(`GROUP: ${groupName}`) + '\n') + forEach(groupMetrics, (sectionName, sectionMetrics) => { + // If there are no metrics in this section, skip it + if (Object.keys(sectionMetrics).length === 0) { + return + } + + displayMetricsSectionName(sectionName) + displayMetricsSectionBlock(sectionMetrics) + }) + }) + + // SCENARIOS + forEach(report.scenarios, (scenarioName, scenarioMetrics) => { + lines.push(metricGroupIndent + groupPrefix + ' ' + boldify(`SCENARIO: ${scenarioName}`) + '\n') + forEach(scenarioMetrics, (sectionName, sectionMetrics) => { + // If there are no metrics in this section, skip it + if (Object.keys(sectionMetrics).length === 0) { + return + } + + displayMetricsSectionName(sectionName) + displayMetricsSectionBlock(sectionMetrics) + }) + }) + Array.prototype.push.apply( lines, diff --git a/lib/report.go b/lib/report.go new file mode 100644 index 00000000000..5dc9da5aecd --- /dev/null +++ b/lib/report.go @@ -0,0 +1,142 @@ +package lib + +import ( + "go.k6.io/k6/metrics" + + "time" +) + +type ReportMetricData struct { + Type string + Contains string + Values map[string]float64 +} + +func NewReportMetricsDataFrom( + mType metrics.MetricType, vType metrics.ValueType, sink metrics.Sink, + testDuration time.Duration, summaryTrendStats []string, +) ReportMetricData { + // TODO: we obtain this from [options.SummaryTrendStats] which is a string slice + getMetricValues := metricValueGetter(summaryTrendStats) + + return ReportMetricData{ + Type: mType.String(), + Contains: vType.String(), + Values: getMetricValues(sink, testDuration), + } +} + +type ReportMetrics struct { + // HTTP contains report data specific to HTTP metrics and is used + // to produce the summary HTTP subsection's content. + HTTP map[string]ReportMetricData + // Execution contains report data specific to Execution metrics and is used + // to produce the summary Execution subsection's content. + Execution map[string]ReportMetricData + // Network contains report data specific to Network metrics and is used + // to produce the summary Network subsection's content. + Network map[string]ReportMetricData + + Browser map[string]ReportMetricData + + WebVitals map[string]ReportMetricData + + Grpc map[string]ReportMetricData + + WebSocket map[string]ReportMetricData `js:"websocket"` + + // Miscellaneous contains user-defined metric results as well as extensions metrics + Miscellaneous map[string]ReportMetricData +} + +func NewReportMetrics() ReportMetrics { + return ReportMetrics{ + HTTP: make(map[string]ReportMetricData), + Execution: make(map[string]ReportMetricData), + Network: make(map[string]ReportMetricData), + Browser: make(map[string]ReportMetricData), + WebVitals: make(map[string]ReportMetricData), + Grpc: make(map[string]ReportMetricData), + WebSocket: make(map[string]ReportMetricData), + Miscellaneous: make(map[string]ReportMetricData), + } +} + +type ReportChecksMetrics struct { + Total ReportMetricData `js:"checks_total"` + Success ReportMetricData `js:"checks_succeeded"` + Fail ReportMetricData `js:"checks_failed"` +} + +type ReportChecks struct { + Metrics ReportChecksMetrics + OrderedChecks []*Check +} + +type Report struct { + Metrics ReportMetrics + Checks ReportChecks + // FIXME: Groups could have groups + Groups map[string]ReportMetrics + Scenarios map[string]ReportMetrics +} + +func NewReport() Report { + initMetricData := func(t metrics.MetricType) ReportMetricData { + return ReportMetricData{ + Type: t.String(), + Contains: metrics.Default.String(), + Values: make(map[string]float64), + } + } + + return Report{ + Metrics: NewReportMetrics(), + Checks: ReportChecks{ + Metrics: ReportChecksMetrics{ + Total: initMetricData(metrics.Counter), + Success: initMetricData(metrics.Rate), + Fail: initMetricData(metrics.Rate), + }, + }, + Groups: make(map[string]ReportMetrics), + Scenarios: make(map[string]ReportMetrics), + } +} + +func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Duration) map[string]float64 { + trendResolvers, err := metrics.GetResolversForTrendColumns(summaryTrendStats) + if err != nil { + panic(err.Error()) // this should have been validated already + } + + return func(sink metrics.Sink, t time.Duration) (result map[string]float64) { + switch sink := sink.(type) { + case *metrics.CounterSink: + result = sink.Format(t) + result["rate"] = calculateCounterRate(sink.Value, t) + case *metrics.GaugeSink: + result = sink.Format(t) + result["min"] = sink.Min + result["max"] = sink.Max + case *metrics.RateSink: + result = sink.Format(t) + result["passes"] = float64(sink.Trues) + result["fails"] = float64(sink.Total - sink.Trues) + case *metrics.TrendSink: + result = make(map[string]float64, len(summaryTrendStats)) + for _, col := range summaryTrendStats { + result[col] = trendResolvers[col](sink) + } + } + + return result + } +} + +func calculateCounterRate(count float64, duration time.Duration) float64 { + if duration == 0 { + return 0 + } + return count / (float64(duration) / float64(time.Second)) +} diff --git a/lib/runner.go b/lib/runner.go index 3d840f22b79..8b46b7c5155 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -81,7 +81,7 @@ type Runner interface { // function in the script. IsExecutable(string) bool - HandleSummary(context.Context, *Summary) (map[string]io.Reader, error) + HandleSummary(context.Context, *Summary, Report) (map[string]io.Reader, error) } // UIState describes the state of the UI, which might influence what diff --git a/lib/testutils/minirunner/minirunner.go b/lib/testutils/minirunner/minirunner.go index d5fc66fe050..451750a0b31 100644 --- a/lib/testutils/minirunner/minirunner.go +++ b/lib/testutils/minirunner/minirunner.go @@ -108,7 +108,7 @@ func (r *MiniRunner) SetOptions(opts lib.Options) error { } // HandleSummary calls the specified summary callback, if supplied. -func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.Summary) (map[string]io.Reader, error) { +func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.Summary, _ lib.Report) (map[string]io.Reader, error) { if r.HandleSummaryFn != nil { return r.HandleSummaryFn(ctx, s) } diff --git a/output/summary/report.go b/output/summary/report.go new file mode 100644 index 00000000000..0a1a57349d2 --- /dev/null +++ b/output/summary/report.go @@ -0,0 +1,70 @@ +package summary + +import ( + "go.k6.io/k6/metrics" + "strings" + "time" +) + +func isHTTPMetric(metricName string) bool { + return oneOfMetrics(metricName, + metrics.HTTPReqsName, + metrics.HTTPReqFailedName, + metrics.HTTPReqDurationName, + metrics.HTTPReqBlockedName, + metrics.HTTPReqConnectingName, + metrics.HTTPReqTLSHandshakingName, + metrics.HTTPReqSendingName, + metrics.HTTPReqWaitingName, + metrics.HTTPReqReceivingName, + ) +} + +func isExecutionMetric(metricName string) bool { + return oneOfMetrics(metricName, metrics.VUsName, + metrics.VUsMaxName, + metrics.IterationsName, + metrics.IterationDurationName, + metrics.DroppedIterationsName, + ) +} + +func isNetworkMetric(metricName string) bool { + return oneOfMetrics(metricName, metrics.DataSentName, metrics.DataReceivedName) +} + +func isBrowserMetric(metricName string) bool { + return strings.HasPrefix(metricName, "browser_") && !isWebVitalsMetric(metricName) +} + +func isWebVitalsMetric(metricName string) bool { + return strings.HasPrefix(metricName, "browser_web_vital_") +} + +func isGrpcMetric(metricName string) bool { + return strings.HasPrefix(metricName, "grpc_") +} + +func isWebSocketsMetric(metricName string) bool { + return strings.HasPrefix(metricName, "ws_") +} + +func isSkippedMetric(metricName string) bool { + return oneOfMetrics(metricName, metrics.ChecksName, metrics.GroupDurationName) +} + +func oneOfMetrics(metricName string, values ...string) bool { + for _, v := range values { + if strings.HasPrefix(metricName, v) { + return true + } + } + return false +} + +func calculateCounterRate(count float64, duration time.Duration) float64 { + if duration == 0 { + return 0 + } + return count / (float64(duration) / float64(time.Second)) +} diff --git a/output/summary/summary.go b/output/summary/summary.go index 4630495d019..f57dbe98feb 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -2,10 +2,11 @@ package summary import ( "fmt" - "go.k6.io/k6/metrics" "strings" "time" + "go.k6.io/k6/lib" + "go.k6.io/k6/metrics" "go.k6.io/k6/output" "github.com/sirupsen/logrus" @@ -29,11 +30,10 @@ type Output struct { } // New returns a new JSON output. -func New(params output.Params) (output.Output, error) { +func New(params output.Params) (*Output, error) { return &Output{ logger: params.Logger.WithFields(logrus.Fields{ - "output": "summary", - "filename": params.ConfigArgument, + "output": "summary", }), dataModel: NewDataModel(), }, nil @@ -77,6 +77,7 @@ type MetricData struct { type ScenarioData struct { MetricData + // FIXME: Groups could have groups Groups map[string]AggregatedMetricData } @@ -86,14 +87,26 @@ type DataModel struct { Scenarios map[string]AggregatedMetricData } -type AggregatedMetricData map[string]metrics.Sink +type AggregatedMetric struct { + Metric *metrics.Metric + Sink metrics.Sink +} + +func NewAggregatedMetric(metric *metrics.Metric) AggregatedMetric { + return AggregatedMetric{ + Metric: metric, + Sink: metrics.NewSink(metric.Type), + } +} + +type AggregatedMetricData map[string]AggregatedMetric func (a AggregatedMetricData) AddSample(sample metrics.Sample) { if _, exists := a[sample.Metric.Name]; !exists { - a[sample.Metric.Name] = metrics.NewSink(sample.Metric.Type) + a[sample.Metric.Name] = NewAggregatedMetric(sample.Metric) } - a[sample.Metric.Name].Add(sample) + a[sample.Metric.Name].Sink.Add(sample) } func NewDataModel() DataModel { @@ -127,7 +140,7 @@ func (o *Output) flushMetrics() { o.dataModel.container[sample.Metric.Name] = sample.Metric } - if groupName, exists := sample.Tags.Get("group"); exists { + if groupName, exists := sample.Tags.Get("group"); exists && len(groupName) > 0 { normalizedGroupName := strings.TrimPrefix(groupName, "::") if !o.dataModel.GroupStored(normalizedGroupName) { @@ -148,3 +161,79 @@ func (o *Output) flushMetrics() { } } } + +func (o *Output) MetricsReport(summary *lib.Summary, options lib.Options) lib.Report { + report := lib.NewReport() + + storeMetric := func(dest lib.ReportMetrics, m *metrics.Metric, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string) { + switch { + case isSkippedMetric(m.Name): + // Do nothing, just skip. + case isHTTPMetric(m.Name): + dest.HTTP[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isExecutionMetric(m.Name): + dest.Execution[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isNetworkMetric(m.Name): + dest.Network[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isBrowserMetric(m.Name): + dest.Browser[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isGrpcMetric(m.Name): + dest.Grpc[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isWebSocketsMetric(m.Name): + dest.WebSocket[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isWebVitalsMetric(m.Name): + dest.WebVitals[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + default: + dest.Miscellaneous[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + } + } + + for _, m := range summary.Metrics { + storeMetric(report.Metrics, m, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + } + + totalChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Total) + successChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Trues) + + report.Checks.Metrics.Total.Values["count"] = totalChecks // Counter metric with total checks + report.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, summary.TestRunDuration) + + checksMetric := summary.Metrics[metrics.ChecksName] + report.Checks.Metrics.Success = lib.NewReportMetricsDataFrom(checksMetric.Type, checksMetric.Contains, checksMetric.Sink, summary.TestRunDuration, options.SummaryTrendStats) // Rate metric with successes (equivalent to the 'checks' metric) + + report.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks + report.Checks.Metrics.Fail.Values["fails"] = successChecks + report.Checks.Metrics.Fail.Values["rate"] = (totalChecks - successChecks) / totalChecks + + report.Checks.OrderedChecks = summary.RootGroup.OrderedChecks + + for groupName, aggregatedData := range o.dataModel.Groups { + report.Groups[groupName] = lib.NewReportMetrics() + + for _, metricData := range aggregatedData { + storeMetric( + report.Groups[groupName], + metricData.Metric, + metricData.Sink, + summary.TestRunDuration, + options.SummaryTrendStats, + ) + } + } + + for scenarioName, aggregatedData := range o.dataModel.Scenarios { + report.Scenarios[scenarioName] = lib.NewReportMetrics() + + for _, metricData := range aggregatedData { + storeMetric( + report.Scenarios[scenarioName], + metricData.Metric, + metricData.Sink, + summary.TestRunDuration, + options.SummaryTrendStats, + ) + } + } + + return report +} From 89379a7d68143f04a264acba830273f07b739e14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Wed, 30 Oct 2024 11:08:42 +0100 Subject: [PATCH 06/42] Display nested groups in the summary Co-authored-by: oleiade --- cmd/run.go | 2 +- js/summary.js | 92 ++++++++++++---- lib/report.go | 29 +++-- output/summary/report.go | 133 ++++++++++++++++++++++- output/summary/summary.go | 189 +++++++-------------------------- playground/full-summary/api.js | 8 ++ 6 files changed, 267 insertions(+), 186 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 6715f41a2a6..5318996a8ea 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -421,7 +421,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { backgroundProcesses.Add(1) go func() { defer backgroundProcesses.Done() - reportCtx, reportCancel := context.WithTimeout(globalCtx, 3*time.Second) + reportCtx, reportCancel := context.WithTimeout(globalCtx, 60*time.Second) defer reportCancel() logger.Debug("Sending usage report...") diff --git a/js/summary.js b/js/summary.js index 822602763d3..ca8c5eec1d5 100644 --- a/js/summary.js +++ b/js/summary.js @@ -433,6 +433,7 @@ function generateTextSummary(data, options, report) { const RESET = ANSI_CODES.reset; const boldify = (text) => BOLD + text + RESET + const defaultIndent = ' ' const metricGroupIndent = ' ' /** @@ -441,7 +442,7 @@ function generateTextSummary(data, options, report) { * @param sectionName * @param options [DisplayMetricsSectionNameOptions={bold: true}] */ - const displayMetricsSectionName = (sectionName, options) => { + const displayMetricsBlockName = (sectionName, options) => { let bold = true; if (options && options.bold === false) { bold = false @@ -453,10 +454,14 @@ function generateTextSummary(data, options, report) { normalizedSectionName = boldify(normalizedSectionName) } - lines.push(metricGroupIndent + metricGroupIndent + normalizedSectionName) + let indent = ' ' + if (options && options.metricsBlockIndent) { + indent += options.metricsBlockIndent + } + lines.push(indent + normalizedSectionName) } - const displayMetricsSectionBlock = (sectionMetrics, opts) => { + const displayMetricsBlock = (sectionMetrics, opts) => { const summarizeOpts = Object.assign({}, mergedOpts, opts) Array.prototype.push.apply(lines, summarizeMetrics(summarizeOpts, {metrics: sectionMetrics}, decorate)) lines.push('') @@ -464,12 +469,12 @@ function generateTextSummary(data, options, report) { // START OF GLOBAL RESULTS // TITLE - lines.push(metricGroupIndent + groupPrefix + ' ' + boldify('GLOBAL RESULTS') + '\n') + lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify('GLOBAL RESULTS') + '\n') // CHECKS - displayMetricsSectionBlock(report.checks.metrics, {sortByName: false}) + displayMetricsBlock(report.checks.metrics, {sortByName: false}) - displayMetricsSectionName('CHECKS', { bold: false }) + displayMetricsBlockName('CHECKS', {bold: false}) for (var i = 0; i < report.checks.ordered_checks.length; i++) { lines.push(summarizeCheck(metricGroupIndent + metricGroupIndent, report.checks.ordered_checks[i], decorate)) } @@ -484,38 +489,79 @@ function generateTextSummary(data, options, report) { return } - displayMetricsSectionName(sectionName) - displayMetricsSectionBlock(sectionMetrics) + displayMetricsBlockName(sectionName) + displayMetricsBlock(sectionMetrics) }) // END OF GLOBAL RESULTS // GROUPS - forEach(report.groups, (groupName, groupMetrics) => { + /** + * + * @typedef {Object} GroupData + * @param groupName string + * @param groupData + */ + + const summarize = (prefix, indent) => { + return (groupName, groupData) => { + console.log('summarizeNestedGroups', groupName, JSON.stringify(groupData)) + + lines.push(metricGroupIndent + indent + prefix + defaultIndent + boldify(`GROUP: ${groupName}`) + '\n') + forEach(groupData.metrics, (sectionName, sectionMetrics) => { + // If there are no metrics in this section, skip it + if (Object.keys(sectionMetrics).length === 0) { + return + } + + displayMetricsBlockName(sectionName, {metricsBlockIndent: indent}) + displayMetricsBlock(sectionMetrics, {indent: indent + defaultIndent}) + }) + if (groupData.groups !== undefined) { + forEach(groupData.groups, summarize(detailsPrefix, indent + metricGroupIndent)); + } + } + } + + const summarizeNestedGroups = (groupName, groupData) => { + console.log('summarizeNestedGroups', groupName, JSON.stringify(groupData)) + lines.push(metricGroupIndent + groupPrefix + ' ' + boldify(`GROUP: ${groupName}`) + '\n') - forEach(groupMetrics, (sectionName, sectionMetrics) => { + forEach(groupData.metrics, (sectionName, sectionMetrics) => { // If there are no metrics in this section, skip it if (Object.keys(sectionMetrics).length === 0) { return } - displayMetricsSectionName(sectionName) - displayMetricsSectionBlock(sectionMetrics) + displayMetricsBlockName(sectionName) + displayMetricsBlock(sectionMetrics) }) - }) + if (groupData.groups !== undefined) { + forEach(groupData.groups, summarizeNestedGroups); + } + } + + if (report.groups !== undefined) { + forEach(report.groups, summarize(groupPrefix, defaultIndent)); + } // SCENARIOS - forEach(report.scenarios, (scenarioName, scenarioMetrics) => { - lines.push(metricGroupIndent + groupPrefix + ' ' + boldify(`SCENARIO: ${scenarioName}`) + '\n') - forEach(scenarioMetrics, (sectionName, sectionMetrics) => { - // If there are no metrics in this section, skip it - if (Object.keys(sectionMetrics).length === 0) { - return - } + if (report.scenarios !== undefined) { + forEach(report.scenarios, (scenarioName, scenarioData) => { + lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify(`SCENARIO: ${scenarioName}`) + '\n') + forEach(scenarioData.metrics, (sectionName, sectionMetrics) => { + // If there are no metrics in this section, skip it + if (Object.keys(sectionMetrics).length === 0) { + return + } - displayMetricsSectionName(sectionName) - displayMetricsSectionBlock(sectionMetrics) + displayMetricsBlockName(sectionName) + displayMetricsBlock(sectionMetrics) + }) + if (scenarioData.groups !== undefined) { + forEach(scenarioData.groups, summarize(detailsPrefix, metricGroupIndent)); + } }) - }) + } Array.prototype.push.apply( diff --git a/lib/report.go b/lib/report.go index 5dc9da5aecd..04b11055f09 100644 --- a/lib/report.go +++ b/lib/report.go @@ -73,12 +73,23 @@ type ReportChecks struct { OrderedChecks []*Check } -type Report struct { +type ReportGroup struct { Metrics ReportMetrics - Checks ReportChecks - // FIXME: Groups could have groups - Groups map[string]ReportMetrics - Scenarios map[string]ReportMetrics + Groups map[string]ReportGroup +} + +func NewReportGroup() ReportGroup { + return ReportGroup{ + Metrics: NewReportMetrics(), + Groups: make(map[string]ReportGroup), + } +} + +type Report struct { + Checks ReportChecks + + ReportGroup + Scenarios map[string]ReportGroup } func NewReport() Report { @@ -91,7 +102,10 @@ func NewReport() Report { } return Report{ - Metrics: NewReportMetrics(), + ReportGroup: ReportGroup{ + Metrics: NewReportMetrics(), + Groups: make(map[string]ReportGroup), + }, Checks: ReportChecks{ Metrics: ReportChecksMetrics{ Total: initMetricData(metrics.Counter), @@ -99,8 +113,7 @@ func NewReport() Report { Fail: initMetricData(metrics.Rate), }, }, - Groups: make(map[string]ReportMetrics), - Scenarios: make(map[string]ReportMetrics), + Scenarios: make(map[string]ReportGroup), } } diff --git a/output/summary/report.go b/output/summary/report.go index 0a1a57349d2..464060bd21c 100644 --- a/output/summary/report.go +++ b/output/summary/report.go @@ -1,11 +1,142 @@ package summary import ( - "go.k6.io/k6/metrics" "strings" "time" + + "go.k6.io/k6/lib" + "go.k6.io/k6/metrics" ) +type dataModel struct { + aggregatedGroupData + scenarios map[string]aggregatedGroupData +} + +func newDataModel() dataModel { + return dataModel{ + aggregatedGroupData: newAggregatedGroupData(), + scenarios: make(map[string]aggregatedGroupData), + } +} + +func (d dataModel) groupDataFor(scenario string) aggregatedGroupData { + if groupData, exists := d.scenarios[scenario]; exists { + return groupData + } + d.scenarios[scenario] = newAggregatedGroupData() + return d.scenarios[scenario] +} + +type aggregatedGroupData struct { + Metrics aggregatedMetricData + Groups map[string]aggregatedGroupData +} + +func newAggregatedGroupData() aggregatedGroupData { + return aggregatedGroupData{ + Metrics: make(map[string]aggregatedMetric), + Groups: make(map[string]aggregatedGroupData), + } +} + +func (d aggregatedGroupData) groupDataFor(group string) aggregatedGroupData { + if groupData, exists := d.Groups[group]; exists { + return groupData + } + d.Groups[group] = newAggregatedGroupData() + return d.Groups[group] +} + +type aggregatedMetricData map[string]aggregatedMetric + +func (a aggregatedMetricData) addSample(sample metrics.Sample) { + if _, exists := a[sample.Metric.Name]; !exists { + a[sample.Metric.Name] = newAggregatedMetric(sample.Metric) + } + + a[sample.Metric.Name].Sink.Add(sample) +} + +func (a aggregatedMetricData) storeSample(sample metrics.Sample) { + if _, exists := a[sample.Metric.Name]; !exists { + a[sample.Metric.Name] = aggregatedMetric{ + Metric: sample.Metric, + Sink: sample.Metric.Sink, + } + } +} + +type aggregatedMetric struct { + Metric *metrics.Metric + Sink metrics.Sink +} + +func newAggregatedMetric(metric *metrics.Metric) aggregatedMetric { + return aggregatedMetric{ + Metric: metric, + Sink: metrics.NewSink(metric.Type), + } +} + +func populateReportChecks(report *lib.Report, summary *lib.Summary, options lib.Options) { + totalChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Total) + successChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Trues) + + report.Checks.Metrics.Total.Values["count"] = totalChecks + report.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, summary.TestRunDuration) + + checksMetric := summary.Metrics[metrics.ChecksName] + report.Checks.Metrics.Success = lib.NewReportMetricsDataFrom(checksMetric.Type, checksMetric.Contains, checksMetric.Sink, summary.TestRunDuration, options.SummaryTrendStats) + + report.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks + report.Checks.Metrics.Fail.Values["fails"] = successChecks + report.Checks.Metrics.Fail.Values["rate"] = (totalChecks - successChecks) / totalChecks + + report.Checks.OrderedChecks = summary.RootGroup.OrderedChecks +} + +func populateReportGroup(reportGroup *lib.ReportGroup, groupData aggregatedGroupData, summary *lib.Summary, options lib.Options) { + storeMetric := func(dest lib.ReportMetrics, m *metrics.Metric, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string) { + switch { + case isSkippedMetric(m.Name): + // Do nothing, just skip. + case isHTTPMetric(m.Name): + dest.HTTP[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isExecutionMetric(m.Name): + dest.Execution[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isNetworkMetric(m.Name): + dest.Network[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isBrowserMetric(m.Name): + dest.Browser[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isGrpcMetric(m.Name): + dest.Grpc[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isWebSocketsMetric(m.Name): + dest.WebSocket[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isWebVitalsMetric(m.Name): + dest.WebVitals[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + default: + dest.Miscellaneous[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + } + } + + for _, metricData := range groupData.Metrics { + storeMetric( + reportGroup.Metrics, + metricData.Metric, + metricData.Sink, + summary.TestRunDuration, + options.SummaryTrendStats, + ) + } + + for groupName, subGroupData := range groupData.Groups { + subReportGroup := lib.NewReportGroup() + populateReportGroup(&subReportGroup, subGroupData, summary, options) + reportGroup.Groups[groupName] = subReportGroup + } +} + func isHTTPMetric(metricName string) bool { return oneOfMetrics(metricName, metrics.HTTPReqsName, diff --git a/output/summary/summary.go b/output/summary/summary.go index f57dbe98feb..2d9b0208115 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -23,10 +23,7 @@ type Output struct { periodicFlusher *output.PeriodicFlusher logger logrus.FieldLogger - dataModel DataModel - - // FIXME: drop me - startTime time.Time + dataModel dataModel } // New returns a new JSON output. @@ -35,7 +32,7 @@ func New(params output.Params) (*Output, error) { logger: params.Logger.WithFields(logrus.Fields{ "output": "summary", }), - dataModel: NewDataModel(), + dataModel: newDataModel(), }, nil } @@ -50,10 +47,6 @@ func (o *Output) Start() error { } o.logger.Debug("Started!") o.periodicFlusher = pf - - //FIXME: drop me - o.startTime = time.Now() - return nil } @@ -63,72 +56,12 @@ func (o *Output) Stop() error { for groupName, aggregatedData := range o.dataModel.Groups { o.logger.Warning(groupName) - for metricName, sink := range aggregatedData { + for metricName, sink := range aggregatedData.Metrics { o.logger.Warning(fmt.Sprintf(" %s: %+v", metricName, sink)) } } - return nil -} - -type MetricData struct { - container map[string]*metrics.Metric -} - -type ScenarioData struct { - MetricData - - // FIXME: Groups could have groups - Groups map[string]AggregatedMetricData -} - -type DataModel struct { - ScenarioData - - Scenarios map[string]AggregatedMetricData -} -type AggregatedMetric struct { - Metric *metrics.Metric - Sink metrics.Sink -} - -func NewAggregatedMetric(metric *metrics.Metric) AggregatedMetric { - return AggregatedMetric{ - Metric: metric, - Sink: metrics.NewSink(metric.Type), - } -} - -type AggregatedMetricData map[string]AggregatedMetric - -func (a AggregatedMetricData) AddSample(sample metrics.Sample) { - if _, exists := a[sample.Metric.Name]; !exists { - a[sample.Metric.Name] = NewAggregatedMetric(sample.Metric) - } - - a[sample.Metric.Name].Sink.Add(sample) -} - -func NewDataModel() DataModel { - return DataModel{ - ScenarioData: ScenarioData{ - MetricData: MetricData{ - container: make(map[string]*metrics.Metric), - }, - Groups: make(map[string]AggregatedMetricData), - }, - Scenarios: make(map[string]AggregatedMetricData), - } -} - -func (d DataModel) GroupStored(groupName string) bool { - _, exists := d.Groups[groupName] - return exists -} - -func (d DataModel) ScenarioStored(scenarioName string) bool { - _, exists := d.Scenarios[scenarioName] - return exists + return nil } func (o *Output) flushMetrics() { @@ -136,28 +69,35 @@ func (o *Output) flushMetrics() { for _, sc := range samples { samples := sc.GetSamples() for _, sample := range samples { - if _, ok := o.dataModel.container[sample.Metric.Name]; !ok { - o.dataModel.container[sample.Metric.Name] = sample.Metric - } + o.storeSample(sample) + } + } +} - if groupName, exists := sample.Tags.Get("group"); exists && len(groupName) > 0 { - normalizedGroupName := strings.TrimPrefix(groupName, "::") +func (o *Output) storeSample(sample metrics.Sample) { + // First, we store the sample data into the global metrics. + o.dataModel.Metrics.storeSample(sample) - if !o.dataModel.GroupStored(normalizedGroupName) { - o.dataModel.Groups[normalizedGroupName] = make(AggregatedMetricData) - } + // Then, we'll proceed to store the sample data into each group + // metrics. However, we need to determine whether the groups tree + // is within a scenario or not. + groupData := o.dataModel.aggregatedGroupData + if scenarioName, hasScenario := sample.Tags.Get("scenario"); hasScenario { + groupData = o.dataModel.groupDataFor(scenarioName) + groupData.Metrics.addSample(sample) + } - o.dataModel.Groups[normalizedGroupName].AddSample(sample) - } + if groupTag, exists := sample.Tags.Get("group"); exists && len(groupTag) > 0 { + normalizedGroupName := strings.TrimPrefix(groupTag, "::") + groupNames := strings.Split(normalizedGroupName, "::") - if scenarioName, exists := sample.Tags.Get("scenario"); exists { - if !o.dataModel.ScenarioStored(scenarioName) { - o.dataModel.Scenarios[scenarioName] = make(AggregatedMetricData) - } + for i, groupName := range groupNames { + groupData.groupDataFor(groupName) + groupData.Groups[groupName].Metrics.addSample(sample) - o.dataModel.Scenarios[scenarioName].AddSample(sample) + if i < len(groupNames)-1 { + groupData = groupData.Groups[groupName] } - } } } @@ -165,74 +105,17 @@ func (o *Output) flushMetrics() { func (o *Output) MetricsReport(summary *lib.Summary, options lib.Options) lib.Report { report := lib.NewReport() - storeMetric := func(dest lib.ReportMetrics, m *metrics.Metric, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string) { - switch { - case isSkippedMetric(m.Name): - // Do nothing, just skip. - case isHTTPMetric(m.Name): - dest.HTTP[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isExecutionMetric(m.Name): - dest.Execution[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isNetworkMetric(m.Name): - dest.Network[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isBrowserMetric(m.Name): - dest.Browser[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isGrpcMetric(m.Name): - dest.Grpc[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isWebSocketsMetric(m.Name): - dest.WebSocket[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isWebVitalsMetric(m.Name): - dest.WebVitals[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - default: - dest.Miscellaneous[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - } - } - - for _, m := range summary.Metrics { - storeMetric(report.Metrics, m, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - } - - totalChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Total) - successChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Trues) - - report.Checks.Metrics.Total.Values["count"] = totalChecks // Counter metric with total checks - report.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, summary.TestRunDuration) - - checksMetric := summary.Metrics[metrics.ChecksName] - report.Checks.Metrics.Success = lib.NewReportMetricsDataFrom(checksMetric.Type, checksMetric.Contains, checksMetric.Sink, summary.TestRunDuration, options.SummaryTrendStats) // Rate metric with successes (equivalent to the 'checks' metric) + // Populate report checks. + populateReportChecks(&report, summary, options) - report.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks - report.Checks.Metrics.Fail.Values["fails"] = successChecks - report.Checks.Metrics.Fail.Values["rate"] = (totalChecks - successChecks) / totalChecks + // Populate root group and nested groups recursively. + populateReportGroup(&report.ReportGroup, o.dataModel.aggregatedGroupData, summary, options) - report.Checks.OrderedChecks = summary.RootGroup.OrderedChecks - - for groupName, aggregatedData := range o.dataModel.Groups { - report.Groups[groupName] = lib.NewReportMetrics() - - for _, metricData := range aggregatedData { - storeMetric( - report.Groups[groupName], - metricData.Metric, - metricData.Sink, - summary.TestRunDuration, - options.SummaryTrendStats, - ) - } - } - - for scenarioName, aggregatedData := range o.dataModel.Scenarios { - report.Scenarios[scenarioName] = lib.NewReportMetrics() - - for _, metricData := range aggregatedData { - storeMetric( - report.Scenarios[scenarioName], - metricData.Metric, - metricData.Sink, - summary.TestRunDuration, - options.SummaryTrendStats, - ) - } + // Populate scenario groups and nested groups recursively. + for scenarioName, scenarioData := range o.dataModel.scenarios { + scenarioReportGroup := lib.NewReportGroup() + populateReportGroup(&scenarioReportGroup, scenarioData, summary, options) + report.Scenarios[scenarioName] = scenarioReportGroup } return report diff --git a/playground/full-summary/api.js b/playground/full-summary/api.js index f8390a9626f..dae480dd8cf 100644 --- a/playground/full-summary/api.js +++ b/playground/full-summary/api.js @@ -20,6 +20,14 @@ export function apiTest() { }) ) + group('authorized crocodiles', () => { + const res = http.get('https://httpbin.org/get') + + check(res, { + 'status is 200 OK': (r) => r.status === 200, + }) + }) + check(res, { 'status is 201 CREATED': (r) => r.status === 201, }) From 40f3e1c52c03e07627b204c8bb1ad5ec45b918bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Wed, 30 Oct 2024 11:32:28 +0100 Subject: [PATCH 07/42] Fix summary report metric values Co-authored-by: oleiade --- lib/report.go | 79 ++++++++++++++++++---------------- output/summary/report.go | 51 ++++++++++++++-------- playground/full-summary/api.js | 7 ++- 3 files changed, 78 insertions(+), 59 deletions(-) diff --git a/lib/report.go b/lib/report.go index 04b11055f09..f0ef9943088 100644 --- a/lib/report.go +++ b/lib/report.go @@ -6,66 +6,70 @@ import ( "time" ) -type ReportMetricData struct { +type ReportMetricInfo struct { + Name string Type string Contains string - Values map[string]float64 } -func NewReportMetricsDataFrom( - mType metrics.MetricType, vType metrics.ValueType, sink metrics.Sink, +type ReportMetric struct { + ReportMetricInfo + Values map[string]float64 +} + +func NewReportMetricFrom( + info ReportMetricInfo, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string, -) ReportMetricData { +) ReportMetric { // TODO: we obtain this from [options.SummaryTrendStats] which is a string slice getMetricValues := metricValueGetter(summaryTrendStats) - return ReportMetricData{ - Type: mType.String(), - Contains: vType.String(), - Values: getMetricValues(sink, testDuration), + return ReportMetric{ + ReportMetricInfo: info, + Values: getMetricValues(sink, testDuration), } } type ReportMetrics struct { // HTTP contains report data specific to HTTP metrics and is used // to produce the summary HTTP subsection's content. - HTTP map[string]ReportMetricData + HTTP map[string]ReportMetric // Execution contains report data specific to Execution metrics and is used // to produce the summary Execution subsection's content. - Execution map[string]ReportMetricData + Execution map[string]ReportMetric // Network contains report data specific to Network metrics and is used // to produce the summary Network subsection's content. - Network map[string]ReportMetricData + Network map[string]ReportMetric - Browser map[string]ReportMetricData + Browser map[string]ReportMetric - WebVitals map[string]ReportMetricData + WebVitals map[string]ReportMetric - Grpc map[string]ReportMetricData + Grpc map[string]ReportMetric - WebSocket map[string]ReportMetricData `js:"websocket"` + WebSocket map[string]ReportMetric `js:"websocket"` // Miscellaneous contains user-defined metric results as well as extensions metrics - Miscellaneous map[string]ReportMetricData + Miscellaneous map[string]ReportMetric } func NewReportMetrics() ReportMetrics { return ReportMetrics{ - HTTP: make(map[string]ReportMetricData), - Execution: make(map[string]ReportMetricData), - Network: make(map[string]ReportMetricData), - Browser: make(map[string]ReportMetricData), - WebVitals: make(map[string]ReportMetricData), - Grpc: make(map[string]ReportMetricData), - WebSocket: make(map[string]ReportMetricData), - Miscellaneous: make(map[string]ReportMetricData), + HTTP: make(map[string]ReportMetric), + Execution: make(map[string]ReportMetric), + Network: make(map[string]ReportMetric), + Browser: make(map[string]ReportMetric), + WebVitals: make(map[string]ReportMetric), + Grpc: make(map[string]ReportMetric), + WebSocket: make(map[string]ReportMetric), + Miscellaneous: make(map[string]ReportMetric), } } type ReportChecksMetrics struct { - Total ReportMetricData `js:"checks_total"` - Success ReportMetricData `js:"checks_succeeded"` - Fail ReportMetricData `js:"checks_failed"` + Total ReportMetric `js:"checks_total"` + Success ReportMetric `js:"checks_succeeded"` + Fail ReportMetric `js:"checks_failed"` } type ReportChecks struct { @@ -93,11 +97,14 @@ type Report struct { } func NewReport() Report { - initMetricData := func(t metrics.MetricType) ReportMetricData { - return ReportMetricData{ - Type: t.String(), - Contains: metrics.Default.String(), - Values: make(map[string]float64), + initMetricData := func(name string, t metrics.MetricType) ReportMetric { + return ReportMetric{ + ReportMetricInfo: ReportMetricInfo{ + Name: name, + Type: t.String(), + Contains: metrics.Default.String(), + }, + Values: make(map[string]float64), } } @@ -108,9 +115,9 @@ func NewReport() Report { }, Checks: ReportChecks{ Metrics: ReportChecksMetrics{ - Total: initMetricData(metrics.Counter), - Success: initMetricData(metrics.Rate), - Fail: initMetricData(metrics.Rate), + Total: initMetricData("checks_total", metrics.Counter), + Success: initMetricData("checks_succeeded", metrics.Rate), + Fail: initMetricData("checks_failed", metrics.Rate), }, }, Scenarios: make(map[string]ReportGroup), diff --git a/output/summary/report.go b/output/summary/report.go index 464060bd21c..725a159272d 100644 --- a/output/summary/report.go +++ b/output/summary/report.go @@ -87,7 +87,16 @@ func populateReportChecks(report *lib.Report, summary *lib.Summary, options lib. report.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, summary.TestRunDuration) checksMetric := summary.Metrics[metrics.ChecksName] - report.Checks.Metrics.Success = lib.NewReportMetricsDataFrom(checksMetric.Type, checksMetric.Contains, checksMetric.Sink, summary.TestRunDuration, options.SummaryTrendStats) + report.Checks.Metrics.Success = lib.NewReportMetricFrom( + lib.ReportMetricInfo{ + Name: "checks_succeeded", + Type: checksMetric.Type.String(), + Contains: checksMetric.Contains.String(), + }, + checksMetric.Sink, + summary.TestRunDuration, + options.SummaryTrendStats, + ) report.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks report.Checks.Metrics.Fail.Values["fails"] = successChecks @@ -97,33 +106,37 @@ func populateReportChecks(report *lib.Report, summary *lib.Summary, options lib. } func populateReportGroup(reportGroup *lib.ReportGroup, groupData aggregatedGroupData, summary *lib.Summary, options lib.Options) { - storeMetric := func(dest lib.ReportMetrics, m *metrics.Metric, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string) { + storeMetric := func(dest lib.ReportMetrics, info lib.ReportMetricInfo, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string) { switch { - case isSkippedMetric(m.Name): + case isSkippedMetric(info.Name): // Do nothing, just skip. - case isHTTPMetric(m.Name): - dest.HTTP[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isExecutionMetric(m.Name): - dest.Execution[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isNetworkMetric(m.Name): - dest.Network[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isBrowserMetric(m.Name): - dest.Browser[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isGrpcMetric(m.Name): - dest.Grpc[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isWebSocketsMetric(m.Name): - dest.WebSocket[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) - case isWebVitalsMetric(m.Name): - dest.WebVitals[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + case isHTTPMetric(info.Name): + dest.HTTP[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + case isExecutionMetric(info.Name): + dest.Execution[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + case isNetworkMetric(info.Name): + dest.Network[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + case isBrowserMetric(info.Name): + dest.Browser[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + case isGrpcMetric(info.Name): + dest.Grpc[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + case isWebSocketsMetric(info.Name): + dest.WebSocket[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + case isWebVitalsMetric(info.Name): + dest.WebVitals[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) default: - dest.Miscellaneous[m.Name] = lib.NewReportMetricsDataFrom(m.Type, m.Contains, m.Sink, summary.TestRunDuration, options.SummaryTrendStats) + dest.Miscellaneous[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) } } for _, metricData := range groupData.Metrics { storeMetric( reportGroup.Metrics, - metricData.Metric, + lib.ReportMetricInfo{ + Name: metricData.Metric.Name, + Type: metricData.Metric.Type.String(), + Contains: metricData.Metric.Contains.String(), + }, metricData.Sink, summary.TestRunDuration, options.SummaryTrendStats, diff --git a/playground/full-summary/api.js b/playground/full-summary/api.js index dae480dd8cf..f78f9bafbef 100644 --- a/playground/full-summary/api.js +++ b/playground/full-summary/api.js @@ -19,6 +19,9 @@ export function apiTest() { password: 'onegaishimasu', }) ) + check(res, { + 'status is 201 CREATED': (r) => r.status === 201, + }) group('authorized crocodiles', () => { const res = http.get('https://httpbin.org/get') @@ -27,10 +30,6 @@ export function apiTest() { 'status is 200 OK': (r) => r.status === 200, }) }) - - check(res, { - 'status is 201 CREATED': (r) => r.status === 201, - }) }) group('my crocodiles', () => { From e088fabb88efd14ec80c0677e2765cf3bd442130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Wed, 30 Oct 2024 11:53:06 +0100 Subject: [PATCH 08/42] Push multi-scenario script example Co-authored-by: oleiade --- playground/full-summary/script.js | 36 +++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 playground/full-summary/script.js diff --git a/playground/full-summary/script.js b/playground/full-summary/script.js new file mode 100644 index 00000000000..47c268d985c --- /dev/null +++ b/playground/full-summary/script.js @@ -0,0 +1,36 @@ +export {apiTest} from './api.js'; +export {browserTest} from './browser.js'; +export {grpcTest} from './grpc.js'; +export {wsTest} from './ws.js'; + +export const options = { + thresholds: { + 'http_reqs{group: ::auth}': ['count>1'], + 'http_reqs{scenario: api}': ['count>1'], + }, + scenarios: { + api: { + executor: 'per-vu-iterations', + vus: 1, + iterations: 1, + exec: 'apiTest', + }, + browser: { + executor: 'shared-iterations', + options: { + browser: { + type: 'chromium', + }, + }, + exec: 'browserTest', + }, + grpc: { + executor: 'shared-iterations', + exec: 'grpcTest', + }, + ws: { + executor: 'shared-iterations', + exec: 'wsTest', + }, + }, +} From 589b0e61a05565d734eac69ca3b4e74835ef1d89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Mon, 4 Nov 2024 12:00:15 +0100 Subject: [PATCH 09/42] Fix end-of-test summary when there are no checks Co-authored-by: oleiade --- js/summary.js | 16 ++++++++------- lib/report.go | 43 ++++++++++++++++++++++------------------ output/summary/report.go | 12 ++++++++--- 3 files changed, 42 insertions(+), 29 deletions(-) diff --git a/js/summary.js b/js/summary.js index ca8c5eec1d5..3ba567b5184 100644 --- a/js/summary.js +++ b/js/summary.js @@ -472,14 +472,16 @@ function generateTextSummary(data, options, report) { lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify('GLOBAL RESULTS') + '\n') // CHECKS - displayMetricsBlock(report.checks.metrics, {sortByName: false}) + if (report.checks !== undefined && report.checks !== null) { + displayMetricsBlock(report.checks.metrics, {sortByName: false}) - displayMetricsBlockName('CHECKS', {bold: false}) - for (var i = 0; i < report.checks.ordered_checks.length; i++) { - lines.push(summarizeCheck(metricGroupIndent + metricGroupIndent, report.checks.ordered_checks[i], decorate)) - } - if (report.checks.ordered_checks.length > 0) { - lines.push('') + displayMetricsBlockName('CHECKS', {bold: false}) + for (var i = 0; i < report.checks.ordered_checks.length; i++) { + lines.push(summarizeCheck(metricGroupIndent + metricGroupIndent, report.checks.ordered_checks[i], decorate)) + } + if (report.checks.ordered_checks.length > 0) { + lines.push('') + } } // METRICS diff --git a/lib/report.go b/lib/report.go index f0ef9943088..fa2489c5c89 100644 --- a/lib/report.go +++ b/lib/report.go @@ -77,6 +77,27 @@ type ReportChecks struct { OrderedChecks []*Check } +func NewReportChecks() *ReportChecks { + initChecksMetricData := func(name string, t metrics.MetricType) ReportMetric { + return ReportMetric{ + ReportMetricInfo: ReportMetricInfo{ + Name: name, + Type: t.String(), + Contains: metrics.Default.String(), + }, + Values: make(map[string]float64), + } + } + + return &ReportChecks{ + Metrics: ReportChecksMetrics{ + Total: initChecksMetricData("checks_total", metrics.Counter), + Success: initChecksMetricData("checks_succeeded", metrics.Rate), + Fail: initChecksMetricData("checks_failed", metrics.Rate), + }, + } +} + type ReportGroup struct { Metrics ReportMetrics Groups map[string]ReportGroup @@ -90,36 +111,20 @@ func NewReportGroup() ReportGroup { } type Report struct { - Checks ReportChecks + // Checks is a pointer to ReportChecks because checks metrics + // are not always present (only when checks are used). + Checks *ReportChecks ReportGroup Scenarios map[string]ReportGroup } func NewReport() Report { - initMetricData := func(name string, t metrics.MetricType) ReportMetric { - return ReportMetric{ - ReportMetricInfo: ReportMetricInfo{ - Name: name, - Type: t.String(), - Contains: metrics.Default.String(), - }, - Values: make(map[string]float64), - } - } - return Report{ ReportGroup: ReportGroup{ Metrics: NewReportMetrics(), Groups: make(map[string]ReportGroup), }, - Checks: ReportChecks{ - Metrics: ReportChecksMetrics{ - Total: initMetricData("checks_total", metrics.Counter), - Success: initMetricData("checks_succeeded", metrics.Rate), - Fail: initMetricData("checks_failed", metrics.Rate), - }, - }, Scenarios: make(map[string]ReportGroup), } } diff --git a/output/summary/report.go b/output/summary/report.go index 725a159272d..029123c9e72 100644 --- a/output/summary/report.go +++ b/output/summary/report.go @@ -80,13 +80,19 @@ func newAggregatedMetric(metric *metrics.Metric) aggregatedMetric { } func populateReportChecks(report *lib.Report, summary *lib.Summary, options lib.Options) { - totalChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Total) - successChecks := float64(summary.Metrics[metrics.ChecksName].Sink.(*metrics.RateSink).Trues) + checksMetric, exists := summary.Metrics[metrics.ChecksName] + if !exists { + return + } + + report.Checks = lib.NewReportChecks() + + totalChecks := float64(checksMetric.Sink.(*metrics.RateSink).Total) + successChecks := float64(checksMetric.Sink.(*metrics.RateSink).Trues) report.Checks.Metrics.Total.Values["count"] = totalChecks report.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, summary.TestRunDuration) - checksMetric := summary.Metrics[metrics.ChecksName] report.Checks.Metrics.Success = lib.NewReportMetricFrom( lib.ReportMetricInfo{ Name: "checks_succeeded", From 099d0c4a8d9d3f0ed7796010318f9a801e22db4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Tue, 5 Nov 2024 14:30:06 +0100 Subject: [PATCH 10/42] Include nested checks to the end-of-test summary Co-authored-by: oleiade --- js/summary.js | 53 ++++++---- lib/models.go | 2 +- lib/report.go | 5 +- output/summary/report.go | 178 +++++++++++++++++++++++--------- output/summary/summary.go | 30 ++++-- playground/full-summary/api.js | 8 +- playground/full-summary/grpc.js | 2 +- 7 files changed, 189 insertions(+), 89 deletions(-) diff --git a/js/summary.js b/js/summary.js index 3ba567b5184..397c706ac8b 100644 --- a/js/summary.js +++ b/js/summary.js @@ -1,3 +1,6 @@ +/** + * @typedef {{sortByName: boolean, bold: boolean, indent: string, metricsBlockIndent: string}} DisplayOptions + */ var forEach = function (obj, callback) { for (var key in obj) { if (obj.hasOwnProperty(key)) { @@ -438,13 +441,12 @@ function generateTextSummary(data, options, report) { /** * - * @typedef {{bold: boolean}} DisplayMetricsSectionNameOptions * @param sectionName - * @param options [DisplayMetricsSectionNameOptions={bold: true}] + * @param {DisplayOptions} opts */ - const displayMetricsBlockName = (sectionName, options) => { + const displayMetricsBlockName = (sectionName, opts) => { let bold = true; - if (options && options.bold === false) { + if (opts && opts.bold === false) { bold = false } @@ -455,35 +457,48 @@ function generateTextSummary(data, options, report) { } let indent = ' ' - if (options && options.metricsBlockIndent) { - indent += options.metricsBlockIndent + if (opts && opts.metricsBlockIndent) { + indent += opts.metricsBlockIndent } lines.push(indent + normalizedSectionName) } + /** + * + * @param {Object[]} sectionMetrics + * @param {DisplayOptions} opts + */ const displayMetricsBlock = (sectionMetrics, opts) => { const summarizeOpts = Object.assign({}, mergedOpts, opts) Array.prototype.push.apply(lines, summarizeMetrics(summarizeOpts, {metrics: sectionMetrics}, decorate)) lines.push('') } - // START OF GLOBAL RESULTS - // TITLE - lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify('GLOBAL RESULTS') + '\n') - - // CHECKS - if (report.checks !== undefined && report.checks !== null) { - displayMetricsBlock(report.checks.metrics, {sortByName: false}) - displayMetricsBlockName('CHECKS', {bold: false}) - for (var i = 0; i < report.checks.ordered_checks.length; i++) { - lines.push(summarizeCheck(metricGroupIndent + metricGroupIndent, report.checks.ordered_checks[i], decorate)) + /** + * + * @param {Object[]} checks + * @param {Partial} opts + */ + const displayChecks = (checks, opts = { indent: '' }) => { + if (checks === undefined || checks === null) { + return + } + displayMetricsBlock(checks.metrics, {...opts, indent: opts.indent + defaultIndent, sortByName: false}) + for (var i = 0; i < checks.ordered_checks.length; i++) { + lines.push(summarizeCheck(metricGroupIndent + metricGroupIndent + opts.indent, checks.ordered_checks[i], decorate)) } - if (report.checks.ordered_checks.length > 0) { + if (checks.ordered_checks.length > 0) { lines.push('') } } + // START OF GLOBAL RESULTS + // TITLE + lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify('GLOBAL RESULTS') + '\n') + + // CHECKS + displayChecks(report.checks) // METRICS forEach(report.metrics, (sectionName, sectionMetrics) => { // If there are no metrics in this section, skip it @@ -506,9 +521,8 @@ function generateTextSummary(data, options, report) { const summarize = (prefix, indent) => { return (groupName, groupData) => { - console.log('summarizeNestedGroups', groupName, JSON.stringify(groupData)) - lines.push(metricGroupIndent + indent + prefix + defaultIndent + boldify(`GROUP: ${groupName}`) + '\n') + displayChecks(groupData.checks, {indent: indent}) forEach(groupData.metrics, (sectionName, sectionMetrics) => { // If there are no metrics in this section, skip it if (Object.keys(sectionMetrics).length === 0) { @@ -550,6 +564,7 @@ function generateTextSummary(data, options, report) { if (report.scenarios !== undefined) { forEach(report.scenarios, (scenarioName, scenarioData) => { lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify(`SCENARIO: ${scenarioName}`) + '\n') + displayChecks(scenarioData.checks) forEach(scenarioData.metrics, (sectionName, sectionMetrics) => { // If there are no metrics in this section, skip it if (Object.keys(sectionMetrics).length === 0) { diff --git a/lib/models.go b/lib/models.go index 30cb6b13b72..7b10d44fecd 100644 --- a/lib/models.go +++ b/lib/models.go @@ -207,7 +207,7 @@ type Check struct { Fails int64 `json:"fails"` } -// NewCheck creates a new check with the given name and parent group. The group may not be nil. +// NewCheck creates a new check with the given name and parent group. The group must not be nil. func NewCheck(name string, group *Group) (*Check, error) { if strings.Contains(name, GroupSeparator) { return nil, ErrNameContainsGroupSeparator diff --git a/lib/report.go b/lib/report.go index fa2489c5c89..9fa934d77f5 100644 --- a/lib/report.go +++ b/lib/report.go @@ -99,6 +99,7 @@ func NewReportChecks() *ReportChecks { } type ReportGroup struct { + Checks *ReportChecks // Not always present, thus we use a pointer. Metrics ReportMetrics Groups map[string]ReportGroup } @@ -111,10 +112,6 @@ func NewReportGroup() ReportGroup { } type Report struct { - // Checks is a pointer to ReportChecks because checks metrics - // are not always present (only when checks are used). - Checks *ReportChecks - ReportGroup Scenarios map[string]ReportGroup } diff --git a/output/summary/report.go b/output/summary/report.go index 029123c9e72..e52ef4fa9ef 100644 --- a/output/summary/report.go +++ b/output/summary/report.go @@ -2,6 +2,7 @@ package summary import ( "strings" + "sync/atomic" "time" "go.k6.io/k6/lib" @@ -13,6 +14,22 @@ type dataModel struct { scenarios map[string]aggregatedGroupData } +// storeSample differs from addSample in that it stores the metric and the metric sink from the sample, +// while addSample updates the internally stored metric sink with the sample, which differs from the +// original metric sink. +func (d dataModel) storeSample(sample metrics.Sample) { + d.metrics.storeSample(sample) + + if checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()); hasCheckTag && sample.Metric.Name == metrics.ChecksName { + check := d.checks.checkFor(checkName) + if sample.Value == 0 { + atomic.AddInt64(&check.Fails, 1) + } else { + atomic.AddInt64(&check.Passes, 1) + } + } +} + func newDataModel() dataModel { return dataModel{ aggregatedGroupData: newAggregatedGroupData(), @@ -29,23 +46,41 @@ func (d dataModel) groupDataFor(scenario string) aggregatedGroupData { } type aggregatedGroupData struct { - Metrics aggregatedMetricData - Groups map[string]aggregatedGroupData + checks *aggregatedChecksData + metrics aggregatedMetricData + groupsData map[string]aggregatedGroupData } func newAggregatedGroupData() aggregatedGroupData { return aggregatedGroupData{ - Metrics: make(map[string]aggregatedMetric), - Groups: make(map[string]aggregatedGroupData), + checks: newAggregatedChecksData(), + metrics: make(map[string]aggregatedMetric), + groupsData: make(map[string]aggregatedGroupData), } } -func (d aggregatedGroupData) groupDataFor(group string) aggregatedGroupData { - if groupData, exists := d.Groups[group]; exists { +func (a aggregatedGroupData) groupDataFor(group string) aggregatedGroupData { + if groupData, exists := a.groupsData[group]; exists { return groupData } - d.Groups[group] = newAggregatedGroupData() - return d.Groups[group] + a.groupsData[group] = newAggregatedGroupData() + return a.groupsData[group] +} + +// addSample differs from storeSample in that it updates the internally stored metric sink with the sample, +// which differs from the original metric sink, while storeSample stores the metric and the metric sink from +// the sample. +func (a aggregatedGroupData) addSample(sample metrics.Sample) { + a.metrics.addSample(sample) + + if checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()); hasCheckTag && sample.Metric.Name == metrics.ChecksName { + check := a.checks.checkFor(checkName) + if sample.Value == 0 { + atomic.AddInt64(&check.Fails, 1) + } else { + atomic.AddInt64(&check.Passes, 1) + } + } } type aggregatedMetricData map[string]aggregatedMetric @@ -79,63 +114,67 @@ func newAggregatedMetric(metric *metrics.Metric) aggregatedMetric { } } -func populateReportChecks(report *lib.Report, summary *lib.Summary, options lib.Options) { - checksMetric, exists := summary.Metrics[metrics.ChecksName] - if !exists { - return - } - - report.Checks = lib.NewReportChecks() - - totalChecks := float64(checksMetric.Sink.(*metrics.RateSink).Total) - successChecks := float64(checksMetric.Sink.(*metrics.RateSink).Trues) - - report.Checks.Metrics.Total.Values["count"] = totalChecks - report.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, summary.TestRunDuration) - - report.Checks.Metrics.Success = lib.NewReportMetricFrom( - lib.ReportMetricInfo{ - Name: "checks_succeeded", - Type: checksMetric.Type.String(), - Contains: checksMetric.Contains.String(), - }, - checksMetric.Sink, - summary.TestRunDuration, - options.SummaryTrendStats, - ) +type aggregatedChecksData struct { + checks map[string]*lib.Check + orderedChecks []*lib.Check +} - report.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks - report.Checks.Metrics.Fail.Values["fails"] = successChecks - report.Checks.Metrics.Fail.Values["rate"] = (totalChecks - successChecks) / totalChecks +func newAggregatedChecksData() *aggregatedChecksData { + return &aggregatedChecksData{ + checks: make(map[string]*lib.Check), + orderedChecks: make([]*lib.Check, 0), + } +} - report.Checks.OrderedChecks = summary.RootGroup.OrderedChecks +func (a *aggregatedChecksData) checkFor(name string) *lib.Check { + check, ok := a.checks[name] + if !ok { + var err error + check, err = lib.NewCheck(name, &lib.Group{}) // FIXME: Do we really need the group? + if err != nil { + panic(err) // This should never happen + } + a.checks[name] = check + a.orderedChecks = append(a.orderedChecks, check) + } + return check } -func populateReportGroup(reportGroup *lib.ReportGroup, groupData aggregatedGroupData, summary *lib.Summary, options lib.Options) { +func populateReportGroup( + reportGroup *lib.ReportGroup, + groupData aggregatedGroupData, + testRunDuration time.Duration, + summaryTrendStats []string, +) { + // First, we populate the checks metrics, which are treated independently. + populateReportChecks(reportGroup, groupData, testRunDuration, summaryTrendStats) + storeMetric := func(dest lib.ReportMetrics, info lib.ReportMetricInfo, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string) { + reportMetric := lib.NewReportMetricFrom(info, sink, testDuration, summaryTrendStats) + switch { case isSkippedMetric(info.Name): // Do nothing, just skip. case isHTTPMetric(info.Name): - dest.HTTP[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + dest.HTTP[info.Name] = reportMetric case isExecutionMetric(info.Name): - dest.Execution[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + dest.Execution[info.Name] = reportMetric case isNetworkMetric(info.Name): - dest.Network[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + dest.Network[info.Name] = reportMetric case isBrowserMetric(info.Name): - dest.Browser[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + dest.Browser[info.Name] = reportMetric case isGrpcMetric(info.Name): - dest.Grpc[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + dest.Grpc[info.Name] = reportMetric case isWebSocketsMetric(info.Name): - dest.WebSocket[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + dest.WebSocket[info.Name] = reportMetric case isWebVitalsMetric(info.Name): - dest.WebVitals[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + dest.WebVitals[info.Name] = reportMetric default: - dest.Miscellaneous[info.Name] = lib.NewReportMetricFrom(info, sink, summary.TestRunDuration, options.SummaryTrendStats) + dest.Miscellaneous[info.Name] = reportMetric } } - for _, metricData := range groupData.Metrics { + for _, metricData := range groupData.metrics { storeMetric( reportGroup.Metrics, lib.ReportMetricInfo{ @@ -144,18 +183,57 @@ func populateReportGroup(reportGroup *lib.ReportGroup, groupData aggregatedGroup Contains: metricData.Metric.Contains.String(), }, metricData.Sink, - summary.TestRunDuration, - options.SummaryTrendStats, + testRunDuration, + summaryTrendStats, ) } - for groupName, subGroupData := range groupData.Groups { + for groupName, subGroupData := range groupData.groupsData { subReportGroup := lib.NewReportGroup() - populateReportGroup(&subReportGroup, subGroupData, summary, options) + populateReportGroup(&subReportGroup, subGroupData, testRunDuration, summaryTrendStats) reportGroup.Groups[groupName] = subReportGroup } } +// FIXME: This function is a bit flurry, we should consider refactoring it. +// For instance, it would be possible to directly construct these metrics on-the-fly. +func populateReportChecks( + reportGroup *lib.ReportGroup, + groupData aggregatedGroupData, + testRunDuration time.Duration, + summaryTrendStats []string, +) { + checksMetric, exists := groupData.metrics[metrics.ChecksName] + if !exists { + return + } + + reportGroup.Checks = lib.NewReportChecks() + + totalChecks := float64(checksMetric.Sink.(*metrics.RateSink).Total) + successChecks := float64(checksMetric.Sink.(*metrics.RateSink).Trues) + + reportGroup.Checks.Metrics.Total.Values["count"] = totalChecks + reportGroup.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, testRunDuration) + + reportGroup.Checks.Metrics.Success = lib.NewReportMetricFrom( + lib.ReportMetricInfo{ + Name: "checks_succeeded", + Type: checksMetric.Metric.Type.String(), + Contains: checksMetric.Metric.Contains.String(), + }, + checksMetric.Sink, + testRunDuration, + summaryTrendStats, + ) + + reportGroup.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks + reportGroup.Checks.Metrics.Fail.Values["fails"] = successChecks + reportGroup.Checks.Metrics.Fail.Values["rate"] = (totalChecks - successChecks) / totalChecks + + reportGroup.Checks.OrderedChecks = groupData.checks.orderedChecks +} + func isHTTPMetric(metricName string) bool { return oneOfMetrics(metricName, metrics.HTTPReqsName, diff --git a/output/summary/summary.go b/output/summary/summary.go index 2d9b0208115..10a33f0a7b1 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -53,10 +53,10 @@ func (o *Output) Start() error { func (o *Output) Stop() error { o.periodicFlusher.Stop() - for groupName, aggregatedData := range o.dataModel.Groups { + for groupName, aggregatedData := range o.dataModel.groupsData { o.logger.Warning(groupName) - for metricName, sink := range aggregatedData.Metrics { + for metricName, sink := range aggregatedData.metrics { o.logger.Warning(fmt.Sprintf(" %s: %+v", metricName, sink)) } } @@ -76,7 +76,7 @@ func (o *Output) flushMetrics() { func (o *Output) storeSample(sample metrics.Sample) { // First, we store the sample data into the global metrics. - o.dataModel.Metrics.storeSample(sample) + o.dataModel.storeSample(sample) // Then, we'll proceed to store the sample data into each group // metrics. However, we need to determine whether the groups tree @@ -84,7 +84,7 @@ func (o *Output) storeSample(sample metrics.Sample) { groupData := o.dataModel.aggregatedGroupData if scenarioName, hasScenario := sample.Tags.Get("scenario"); hasScenario { groupData = o.dataModel.groupDataFor(scenarioName) - groupData.Metrics.addSample(sample) + groupData.addSample(sample) } if groupTag, exists := sample.Tags.Get("group"); exists && len(groupTag) > 0 { @@ -93,10 +93,10 @@ func (o *Output) storeSample(sample metrics.Sample) { for i, groupName := range groupNames { groupData.groupDataFor(groupName) - groupData.Groups[groupName].Metrics.addSample(sample) + groupData.groupsData[groupName].addSample(sample) if i < len(groupNames)-1 { - groupData = groupData.Groups[groupName] + groupData = groupData.groupsData[groupName] } } } @@ -105,16 +105,26 @@ func (o *Output) storeSample(sample metrics.Sample) { func (o *Output) MetricsReport(summary *lib.Summary, options lib.Options) lib.Report { report := lib.NewReport() - // Populate report checks. - populateReportChecks(&report, summary, options) + testRunDuration := summary.TestRunDuration + summaryTrendStats := options.SummaryTrendStats // Populate root group and nested groups recursively. - populateReportGroup(&report.ReportGroup, o.dataModel.aggregatedGroupData, summary, options) + populateReportGroup( + &report.ReportGroup, + o.dataModel.aggregatedGroupData, + testRunDuration, + summaryTrendStats, + ) // Populate scenario groups and nested groups recursively. for scenarioName, scenarioData := range o.dataModel.scenarios { scenarioReportGroup := lib.NewReportGroup() - populateReportGroup(&scenarioReportGroup, scenarioData, summary, options) + populateReportGroup( + &scenarioReportGroup, + scenarioData, + testRunDuration, + summaryTrendStats, + ) report.Scenarios[scenarioName] = scenarioReportGroup } diff --git a/playground/full-summary/api.js b/playground/full-summary/api.js index f78f9bafbef..5d900b5277b 100644 --- a/playground/full-summary/api.js +++ b/playground/full-summary/api.js @@ -4,8 +4,8 @@ import {check, group} from 'k6' export function apiTest() { const res = http.get('https://httpbin.org/get') check(res, { - 'test api is up': (r) => r.status === 200, - 'test api is 500': (r) => r.status === 500, + 'httpbin.org is up': (r) => r.status === 200, + 'httpbin.org is down': (r) => r.status === 500, }) group('auth', () => { @@ -27,7 +27,7 @@ export function apiTest() { const res = http.get('https://httpbin.org/get') check(res, { - 'status is 200 OK': (r) => r.status === 200, + 'authorized crocodiles are 200 OK': (r) => r.status === 200, }) }) }) @@ -36,7 +36,7 @@ export function apiTest() { const res = http.get('https://httpbin.org/get') check(res, { - 'status is 200 OK': (r) => r.status === 200, + 'my crocodiles are 200 OK': (r) => r.status === 200, }) }) } \ No newline at end of file diff --git a/playground/full-summary/grpc.js b/playground/full-summary/grpc.js index 6b22388fbd9..f218cf5b3cc 100644 --- a/playground/full-summary/grpc.js +++ b/playground/full-summary/grpc.js @@ -16,7 +16,7 @@ export function grpcTest() { longitude: -747127767 }) - check(response, {"status is OK": (r) => r && r.status === grpc.StatusOK}); + check(response, {"gRPCC status is OK": (r) => r && r.status === grpc.StatusOK}); console.log(JSON.stringify(response.message)) client.close() From 9beea6987a1521eced9d21aac13f759e2d961671 Mon Sep 17 00:00:00 2001 From: oleiade Date: Tue, 5 Nov 2024 15:48:52 +0100 Subject: [PATCH 11/42] Rename storeSample and addSample methods for clarity --- output/summary/report.go | 75 ++++++++++++++++++++------------------- output/summary/summary.go | 27 +++++++++++--- 2 files changed, 60 insertions(+), 42 deletions(-) diff --git a/output/summary/report.go b/output/summary/report.go index e52ef4fa9ef..1a9c80ba3c6 100644 --- a/output/summary/report.go +++ b/output/summary/report.go @@ -14,22 +14,6 @@ type dataModel struct { scenarios map[string]aggregatedGroupData } -// storeSample differs from addSample in that it stores the metric and the metric sink from the sample, -// while addSample updates the internally stored metric sink with the sample, which differs from the -// original metric sink. -func (d dataModel) storeSample(sample metrics.Sample) { - d.metrics.storeSample(sample) - - if checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()); hasCheckTag && sample.Metric.Name == metrics.ChecksName { - check := d.checks.checkFor(checkName) - if sample.Value == 0 { - atomic.AddInt64(&check.Fails, 1) - } else { - atomic.AddInt64(&check.Passes, 1) - } - } -} - func newDataModel() dataModel { return dataModel{ aggregatedGroupData: newAggregatedGroupData(), @@ -46,16 +30,16 @@ func (d dataModel) groupDataFor(scenario string) aggregatedGroupData { } type aggregatedGroupData struct { - checks *aggregatedChecksData - metrics aggregatedMetricData - groupsData map[string]aggregatedGroupData + checks *aggregatedChecksData + aggregatedMetrics aggregatedMetricData + groupsData map[string]aggregatedGroupData } func newAggregatedGroupData() aggregatedGroupData { return aggregatedGroupData{ - checks: newAggregatedChecksData(), - metrics: make(map[string]aggregatedMetric), - groupsData: make(map[string]aggregatedGroupData), + checks: newAggregatedChecksData(), + aggregatedMetrics: make(map[string]aggregatedMetric), + groupsData: make(map[string]aggregatedGroupData), } } @@ -67,11 +51,11 @@ func (a aggregatedGroupData) groupDataFor(group string) aggregatedGroupData { return a.groupsData[group] } -// addSample differs from storeSample in that it updates the internally stored metric sink with the sample, -// which differs from the original metric sink, while storeSample stores the metric and the metric sink from +// addSample differs from relayMetricFrom in that it updates the internally stored metric sink with the sample, +// which differs from the original metric sink, while relayMetricFrom stores the metric and the metric sink from // the sample. func (a aggregatedGroupData) addSample(sample metrics.Sample) { - a.metrics.addSample(sample) + a.aggregatedMetrics.addSample(sample) if checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()); hasCheckTag && sample.Metric.Name == metrics.ChecksName { check := a.checks.checkFor(checkName) @@ -83,17 +67,15 @@ func (a aggregatedGroupData) addSample(sample metrics.Sample) { } } +// aggregatedMetricData is a container that can either hold a reference to a k6 metric stored in the registry, or +// hold a pointer to such metric but keeping a separated Sink of values in order to keep an aggregated view of the +// metric values. The latter is useful for tracking aggregated metric values specific to a group or scenario. type aggregatedMetricData map[string]aggregatedMetric -func (a aggregatedMetricData) addSample(sample metrics.Sample) { - if _, exists := a[sample.Metric.Name]; !exists { - a[sample.Metric.Name] = newAggregatedMetric(sample.Metric) - } - - a[sample.Metric.Name].Sink.Add(sample) -} - -func (a aggregatedMetricData) storeSample(sample metrics.Sample) { +// relayMetricFrom stores the metric and the metric sink from the sample. It makes the underlying metric of our +// report's aggregatedMetricData point directly to a metric in the k6 registry, and relies on that specific pointed +// at metrics internal state for its computations. +func (a aggregatedMetricData) relayMetricFrom(sample metrics.Sample) { if _, exists := a[sample.Metric.Name]; !exists { a[sample.Metric.Name] = aggregatedMetric{ Metric: sample.Metric, @@ -102,9 +84,28 @@ func (a aggregatedMetricData) storeSample(sample metrics.Sample) { } } +// addSample stores the value of the sample in a separate internal sink completely detached from the underlying metrics. +// This allows to keep an aggregated view of the values specific to a group or scenario. +func (a aggregatedMetricData) addSample(sample metrics.Sample) { + if _, exists := a[sample.Metric.Name]; !exists { + a[sample.Metric.Name] = newAggregatedMetric(sample.Metric) + } + + a[sample.Metric.Name].Sink.Add(sample) +} + +// FIXME (@joan): rename this to make it explicit this is different from an actual k6 metric, and this is used +// only to keep an aggregated view of specific metric-check-group-scenario-thresholds set of values. type aggregatedMetric struct { + // FIXME (@joan): Drop this and replace it with a concrete copy of the metric data we want to track + // to avoid any potential confusion. Metric *metrics.Metric - Sink metrics.Sink + + // FIXME (@joan): Introduce our own way of tracking thresholds, and whether they're crossed or not. + // Without relying on the internal submetrics the engine maintains specifically for thresholds. + // Thresholds []OurThreshold // { crossed: boolean } + + Sink metrics.Sink } func newAggregatedMetric(metric *metrics.Metric) aggregatedMetric { @@ -174,7 +175,7 @@ func populateReportGroup( } } - for _, metricData := range groupData.metrics { + for _, metricData := range groupData.aggregatedMetrics { storeMetric( reportGroup.Metrics, lib.ReportMetricInfo{ @@ -203,7 +204,7 @@ func populateReportChecks( testRunDuration time.Duration, summaryTrendStats []string, ) { - checksMetric, exists := groupData.metrics[metrics.ChecksName] + checksMetric, exists := groupData.aggregatedMetrics[metrics.ChecksName] if !exists { return } diff --git a/output/summary/summary.go b/output/summary/summary.go index 10a33f0a7b1..c7f3c76b3d5 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -3,6 +3,7 @@ package summary import ( "fmt" "strings" + "sync/atomic" "time" "go.k6.io/k6/lib" @@ -56,7 +57,7 @@ func (o *Output) Stop() error { for groupName, aggregatedData := range o.dataModel.groupsData { o.logger.Warning(groupName) - for metricName, sink := range aggregatedData.metrics { + for metricName, sink := range aggregatedData.aggregatedMetrics { o.logger.Warning(fmt.Sprintf(" %s: %+v", metricName, sink)) } } @@ -69,14 +70,14 @@ func (o *Output) flushMetrics() { for _, sc := range samples { samples := sc.GetSamples() for _, sample := range samples { - o.storeSample(sample) + o.flushSample(sample) } } } -func (o *Output) storeSample(sample metrics.Sample) { - // First, we store the sample data into the global metrics. - o.dataModel.storeSample(sample) +func (o *Output) flushSample(sample metrics.Sample) { + // First, we store the sample data into the metrics stored at the k6 metrics registry level. + o.storeSample(sample) // Then, we'll proceed to store the sample data into each group // metrics. However, we need to determine whether the groups tree @@ -130,3 +131,19 @@ func (o *Output) MetricsReport(summary *lib.Summary, options lib.Options) lib.Re return report } + +// storeSample relays the sample to the k6 metrics registry relevant metric. +// +// If it's a check-specific metric, it will also update the check's pass/fail counters. +func (o *Output) storeSample(sample metrics.Sample) { + o.dataModel.aggregatedMetrics.relayMetricFrom(sample) + + if checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()); hasCheckTag && sample.Metric.Name == metrics.ChecksName { + check := o.dataModel.checks.checkFor(checkName) + if sample.Value == 0 { + atomic.AddInt64(&check.Fails, 1) + } else { + atomic.AddInt64(&check.Passes, 1) + } + } +} From ce95d3610d33b23c75804c118f26bc3a61b1f20f Mon Sep 17 00:00:00 2001 From: oleiade Date: Tue, 3 Dec 2024 14:06:28 +0100 Subject: [PATCH 12/42] WIP --- output/summary/summary.go | 20 ++++++++++++++++++++ playground/full-summary/script.js | 2 +- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/output/summary/summary.go b/output/summary/summary.go index c7f3c76b3d5..291dcd5461d 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -79,6 +79,26 @@ func (o *Output) flushSample(sample metrics.Sample) { // First, we store the sample data into the metrics stored at the k6 metrics registry level. o.storeSample(sample) + hasThresholds := func(metric *metrics.Metric) bool { + return metric.Thresholds.Thresholds != nil && len(metric.Thresholds.Thresholds) > 0 + } + + printThresholds := func(metric *metrics.Metric) { + for _, threshold := range metric.Thresholds.Thresholds { + fmt.Printf("Metric=%s, Threshold=%+v\n", metric.Name, threshold) + } + } + + if hasThresholds(sample.Metric) { + printThresholds(sample.Metric) + } + + for _, submetric := range sample.Metric.Submetrics { + if hasThresholds(submetric.Metric) { + printThresholds(submetric.Metric) + } + } + // Then, we'll proceed to store the sample data into each group // metrics. However, we need to determine whether the groups tree // is within a scenario or not. diff --git a/playground/full-summary/script.js b/playground/full-summary/script.js index 47c268d985c..21fd939bf52 100644 --- a/playground/full-summary/script.js +++ b/playground/full-summary/script.js @@ -5,7 +5,7 @@ export {wsTest} from './ws.js'; export const options = { thresholds: { - 'http_reqs{group: ::auth}': ['count>1'], + 'http_reqs{group: ::auth}': ['count>1', 'count<5'], 'http_reqs{scenario: api}': ['count>1'], }, scenarios: { From 6bceb9c985f45008e27226694dec3331b9a4c67b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Wed, 4 Dec 2024 12:13:01 +0100 Subject: [PATCH 13/42] Store Threholds to Summary output and its report --- js/runner.go | 4 +-- js/summary.js | 2 -- lib/report.go | 15 ++++++++ output/summary/report.go | 56 +++++++++++++++++++++++++---- output/summary/summary.go | 60 +++++++++++-------------------- playground/full-summary/script.js | 5 ++- 6 files changed, 91 insertions(+), 51 deletions(-) diff --git a/js/runner.go b/js/runner.go index 05a4808e2bf..04b39520cdf 100644 --- a/js/runner.go +++ b/js/runner.go @@ -350,14 +350,12 @@ func (r *Runner) IsExecutable(name string) bool { // HandleSummary calls the specified summary callback, if supplied. func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary, report lib.Report) (map[string]io.Reader, error) { - fmt.Println(report) - summaryDataForJS := summarizeMetricsToObject(summary, r.Bundle.Options, r.setupData) out := make(chan metrics.SampleContainer, 100) defer close(out) - go func() { // discard all metrics + go func() { // discard all metrics for range out { //nolint:revive } }() diff --git a/js/summary.js b/js/summary.js index 397c706ac8b..3feb52b74c7 100644 --- a/js/summary.js +++ b/js/summary.js @@ -539,8 +539,6 @@ function generateTextSummary(data, options, report) { } const summarizeNestedGroups = (groupName, groupData) => { - console.log('summarizeNestedGroups', groupName, JSON.stringify(groupData)) - lines.push(metricGroupIndent + groupPrefix + ' ' + boldify(`GROUP: ${groupName}`) + '\n') forEach(groupData.metrics, (sectionName, sectionMetrics) => { // If there are no metrics in this section, skip it diff --git a/lib/report.go b/lib/report.go index 9fa934d77f5..d719b4c08e4 100644 --- a/lib/report.go +++ b/lib/report.go @@ -98,6 +98,19 @@ func NewReportChecks() *ReportChecks { } } +type ReportThreshold struct { + Source string `js:"source"` + Metric ReportMetric `js:"metric"` + Ok bool `js:"ok"` +} + +type ReportThresholds map[string][]*ReportThreshold + +func NewReportThresholds() ReportThresholds { + thresholds := make(ReportThresholds) + return thresholds +} + type ReportGroup struct { Checks *ReportChecks // Not always present, thus we use a pointer. Metrics ReportMetrics @@ -112,12 +125,14 @@ func NewReportGroup() ReportGroup { } type Report struct { + ReportThresholds ReportGroup Scenarios map[string]ReportGroup } func NewReport() Report { return Report{ + ReportThresholds: NewReportThresholds(), ReportGroup: ReportGroup{ Metrics: NewReportMetrics(), Groups: make(map[string]ReportGroup), diff --git a/output/summary/report.go b/output/summary/report.go index 1a9c80ba3c6..117f6e8d082 100644 --- a/output/summary/report.go +++ b/output/summary/report.go @@ -10,6 +10,7 @@ import ( ) type dataModel struct { + thresholds aggregatedGroupData scenarios map[string]aggregatedGroupData } @@ -21,7 +22,7 @@ func newDataModel() dataModel { } } -func (d dataModel) groupDataFor(scenario string) aggregatedGroupData { +func (d *dataModel) groupDataFor(scenario string) aggregatedGroupData { if groupData, exists := d.scenarios[scenario]; exists { return groupData } @@ -29,6 +30,20 @@ func (d dataModel) groupDataFor(scenario string) aggregatedGroupData { return d.scenarios[scenario] } +func (d *dataModel) storeThresholdsFor(m *metrics.Metric) { + for _, threshold := range m.Thresholds.Thresholds { + d.thresholds = append(d.thresholds, struct { + *metrics.Threshold + Metric *metrics.Metric + }{Metric: m, Threshold: threshold}) + } +} + +type thresholds []struct { + *metrics.Threshold + Metric *metrics.Metric +} + type aggregatedGroupData struct { checks *aggregatedChecksData aggregatedMetrics aggregatedMetricData @@ -76,11 +91,9 @@ type aggregatedMetricData map[string]aggregatedMetric // report's aggregatedMetricData point directly to a metric in the k6 registry, and relies on that specific pointed // at metrics internal state for its computations. func (a aggregatedMetricData) relayMetricFrom(sample metrics.Sample) { - if _, exists := a[sample.Metric.Name]; !exists { - a[sample.Metric.Name] = aggregatedMetric{ - Metric: sample.Metric, - Sink: sample.Metric.Sink, - } + a[sample.Metric.Name] = aggregatedMetric{ + Metric: sample.Metric, + Sink: sample.Metric.Sink, } } @@ -150,6 +163,7 @@ func populateReportGroup( // First, we populate the checks metrics, which are treated independently. populateReportChecks(reportGroup, groupData, testRunDuration, summaryTrendStats) + // Then, we store the metrics. storeMetric := func(dest lib.ReportMetrics, info lib.ReportMetricInfo, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string) { reportMetric := lib.NewReportMetricFrom(info, sink, testDuration, summaryTrendStats) @@ -189,6 +203,7 @@ func populateReportGroup( ) } + // Finally, we keep moving down the hierarchy and populate the nested groups. for groupName, subGroupData := range groupData.groupsData { subReportGroup := lib.NewReportGroup() populateReportGroup(&subReportGroup, subGroupData, testRunDuration, summaryTrendStats) @@ -196,6 +211,35 @@ func populateReportGroup( } } +func reportThresholds( + thresholds thresholds, + testRunDuration time.Duration, + summaryTrendStats []string, +) lib.ReportThresholds { + rts := make(map[string][]*lib.ReportThreshold, len(thresholds)) + for _, threshold := range thresholds { + metric := threshold.Metric + if _, exists := rts[metric.Name]; !exists { + rts[metric.Name] = make([]*lib.ReportThreshold, 0) + } + rts[metric.Name] = append(rts[metric.Name], &lib.ReportThreshold{ + Source: threshold.Source, + Metric: lib.NewReportMetricFrom( + lib.ReportMetricInfo{ + Name: metric.Name, + Type: metric.Type.String(), + Contains: metric.Contains.String(), + }, + metric.Sink, + testRunDuration, + summaryTrendStats, + ), + Ok: !threshold.LastFailed, + }) + } + return rts +} + // FIXME: This function is a bit flurry, we should consider refactoring it. // For instance, it would be possible to directly construct these metrics on-the-fly. func populateReportChecks( diff --git a/output/summary/summary.go b/output/summary/summary.go index 291dcd5461d..91e7773894b 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -1,7 +1,6 @@ package summary import ( - "fmt" "strings" "sync/atomic" "time" @@ -53,15 +52,6 @@ func (o *Output) Start() error { func (o *Output) Stop() error { o.periodicFlusher.Stop() - - for groupName, aggregatedData := range o.dataModel.groupsData { - o.logger.Warning(groupName) - - for metricName, sink := range aggregatedData.aggregatedMetrics { - o.logger.Warning(fmt.Sprintf(" %s: %+v", metricName, sink)) - } - } - return nil } @@ -79,26 +69,6 @@ func (o *Output) flushSample(sample metrics.Sample) { // First, we store the sample data into the metrics stored at the k6 metrics registry level. o.storeSample(sample) - hasThresholds := func(metric *metrics.Metric) bool { - return metric.Thresholds.Thresholds != nil && len(metric.Thresholds.Thresholds) > 0 - } - - printThresholds := func(metric *metrics.Metric) { - for _, threshold := range metric.Thresholds.Thresholds { - fmt.Printf("Metric=%s, Threshold=%+v\n", metric.Name, threshold) - } - } - - if hasThresholds(sample.Metric) { - printThresholds(sample.Metric) - } - - for _, submetric := range sample.Metric.Submetrics { - if hasThresholds(submetric.Metric) { - printThresholds(submetric.Metric) - } - } - // Then, we'll proceed to store the sample data into each group // metrics. However, we need to determine whether the groups tree // is within a scenario or not. @@ -109,17 +79,17 @@ func (o *Output) flushSample(sample metrics.Sample) { } if groupTag, exists := sample.Tags.Get("group"); exists && len(groupTag) > 0 { - normalizedGroupName := strings.TrimPrefix(groupTag, "::") - groupNames := strings.Split(normalizedGroupName, "::") + normalizedGroupName := strings.TrimPrefix(groupTag, lib.GroupSeparator) + groupNames := strings.Split(normalizedGroupName, lib.GroupSeparator) - for i, groupName := range groupNames { + // We traverse over all the groups to create a nested structure, + // but we only add the sample to the group the sample belongs to, + // cause by definition every group is independent. + for _, groupName := range groupNames { groupData.groupDataFor(groupName) - groupData.groupsData[groupName].addSample(sample) - - if i < len(groupNames)-1 { - groupData = groupData.groupsData[groupName] - } + groupData = groupData.groupsData[groupName] } + groupData.addSample(sample) } } @@ -129,6 +99,9 @@ func (o *Output) MetricsReport(summary *lib.Summary, options lib.Options) lib.Re testRunDuration := summary.TestRunDuration summaryTrendStats := options.SummaryTrendStats + // Populate the thresholds. + report.ReportThresholds = reportThresholds(o.dataModel.thresholds, testRunDuration, summaryTrendStats) + // Populate root group and nested groups recursively. populateReportGroup( &report.ReportGroup, @@ -156,7 +129,16 @@ func (o *Output) MetricsReport(summary *lib.Summary, options lib.Options) lib.Re // // If it's a check-specific metric, it will also update the check's pass/fail counters. func (o *Output) storeSample(sample metrics.Sample) { - o.dataModel.aggregatedMetrics.relayMetricFrom(sample) + // If it's the first time we see this metric, we relay the metric from the sample + // and, we store the thresholds for that particular metric, and its sub-metrics. + if _, exists := o.dataModel.aggregatedMetrics[sample.Metric.Name]; !exists { + o.dataModel.aggregatedMetrics.relayMetricFrom(sample) + + o.dataModel.storeThresholdsFor(sample.Metric) + for _, sub := range sample.Metric.Submetrics { + o.dataModel.storeThresholdsFor(sub.Metric) + } + } if checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()); hasCheckTag && sample.Metric.Name == metrics.ChecksName { check := o.dataModel.checks.checkFor(checkName) diff --git a/playground/full-summary/script.js b/playground/full-summary/script.js index 21fd939bf52..6429b1f0beb 100644 --- a/playground/full-summary/script.js +++ b/playground/full-summary/script.js @@ -5,8 +5,11 @@ export {wsTest} from './ws.js'; export const options = { thresholds: { - 'http_reqs{group: ::auth}': ['count>1', 'count<5'], + 'http_reqs': ['count<10', 'rate>2'], + 'http_reqs{group: ::auth}': ['count>1'], 'http_reqs{scenario: api}': ['count>1'], + 'http_reqs{scenario: api, group: ::auth}': ['count<5'], + 'http_req_duration{group: ::auth}': ['p(95)<200', 'avg<100'], }, scenarios: { api: { From b0215a7db8c449a761210a26f5fc5022e2d54d2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Wed, 4 Dec 2024 17:56:09 +0100 Subject: [PATCH 14/42] Print Threholds as part of the summary output --- js/summary.js | 201 +++++++++++++++++++++++++++++++++++---- lib/report.go | 14 ++- output/summary/report.go | 35 ++++--- 3 files changed, 209 insertions(+), 41 deletions(-) diff --git a/js/summary.js b/js/summary.js index 3feb52b74c7..2708bd6d2d2 100644 --- a/js/summary.js +++ b/js/summary.js @@ -1,4 +1,6 @@ /** + * @typedef {{source: string, ok: boolean}} Threshold + * @typedef {{name: string, type: string, contains: string, values: Record, thresholds?: Threshold[]}} ReportMetric * @typedef {{sortByName: boolean, bold: boolean, indent: string, metricsBlockIndent: string}} DisplayOptions */ var forEach = function (obj, callback) { @@ -244,6 +246,7 @@ function nonTrendMetricValueForSum(metric, timeUnit) { } } +// FIXME (@oleiade) split this code up for reusability (for instance in the summarizeThreshold function below) function summarizeMetrics(options, data, decorate) { var indent = options.indent + ' ' var result = [] @@ -387,6 +390,146 @@ function summarizeMetrics(options, data, decorate) { return result } +/** + * @typedef {{metrics: Record}} ReportData + * + * @param options + * @param {ReportData} data + * @param decorate + * @returns {string[]} + */ +function summarizeMetricsWithThresholds(options, data, decorate) { + var indent = options.indent + ' ' + var result = [] + + var names = [] + var nameLenMax = 0 + + var nonTrendValues = {} + var nonTrendValueMaxLen = 0 + var nonTrendExtras = {} + var nonTrendExtraMaxLens = [0, 0] + + var trendCols = {} + var numTrendColumns = options.summaryTrendStats.length + var trendColMaxLens = new Array(numTrendColumns).fill(0) + forEach(data.metrics, function (name, metric) { + names.push(name) + // When calculating widths for metrics, account for the indentation on submetrics. + var displayNameWidth = strWidth(name) + if (displayNameWidth > nameLenMax) { + nameLenMax = displayNameWidth + } + + if (metric.type == 'trend') { + var cols = [] + for (var i = 0; i < numTrendColumns; i++) { + var tc = options.summaryTrendStats[i] + var value = metric.values[tc] + if (tc === 'count') { + value = value.toString() + } else { + value = humanizeValue(value, metric, options.summaryTimeUnit) + } + var valLen = strWidth(value) + if (valLen > trendColMaxLens[i]) { + trendColMaxLens[i] = valLen + } + cols[i] = value + } + trendCols[name] = cols + return + } + var values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) + nonTrendValues[name] = values[0] + var valueLen = strWidth(values[0]) + if (valueLen > nonTrendValueMaxLen) { + nonTrendValueMaxLen = valueLen + } + nonTrendExtras[name] = values.slice(1) + for (var i = 1; i < values.length; i++) { + var extraLen = strWidth(values[i]) + if (extraLen > nonTrendExtraMaxLens[i - 1]) { + nonTrendExtraMaxLens[i - 1] = extraLen + } + } + }) + + // sort all metrics but keep sub metrics grouped with their parent metrics + if (options.sortByName) { + names.sort(function (metric1, metric2) { + var parent1 = metric1.split('{', 1)[0] + var parent2 = metric2.split('{', 1)[0] + var result = parent1.localeCompare(parent2) + if (result !== 0) { + return result + } + var sub1 = metric1.substring(parent1.length) + var sub2 = metric2.substring(parent2.length) + return sub1.localeCompare(sub2) + }) + } + + var getData = function (name) { + if (trendCols.hasOwnProperty(name)) { + var cols = trendCols[name] + var tmpCols = new Array(numTrendColumns) + for (var i = 0; i < cols.length; i++) { + tmpCols[i] = + options.summaryTrendStats[i] + + '=' + + decorate(cols[i], palette.cyan) + + ' '.repeat(trendColMaxLens[i] - strWidth(cols[i])) + } + return tmpCols.join(' ') + } + + var value = nonTrendValues[name] + var fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) + + var extras = nonTrendExtras[name] + if (extras.length == 1) { + fmtData = fmtData + ' ' + decorate(extras[0], palette.cyan, palette.faint) + } else if (extras.length > 1) { + var parts = new Array(extras.length) + for (var i = 0; i < extras.length; i++) { + parts[i] = + decorate(extras[i], palette.cyan, palette.faint) + + ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(extras[i])) + } + fmtData = fmtData + ' ' + parts.join(' ') + } + + return fmtData + } + + for (var name of names) { + var metric = data.metrics[name] + var mark = ' ' + var markColor = function (text) { + return text + } // noop + + var fmtName = + name + + decorate( + '.'.repeat(nameLenMax - strWidth(name) + 3) + ':', + palette.faint + ) + + result.push(indent + markColor(mark) + ' ' + fmtName + ' ' + getData(name)) + if (metric.thresholds) { + forEach(metric.thresholds, function (name, threshold) { + const resultIndent = threshold.ok ? ' ' : ' '; + const thresholdResult = threshold.ok ? decorate('SATISFIED', palette.green) : decorate('UNSATISFIED', palette.red); + result.push(indent + indent + ' ' + thresholdResult + resultIndent + decorate(`'${threshold.source}'`, palette.faint)) + }) + } + } + + return result +} + function generateTextSummary(data, options, report) { var mergedOpts = Object.assign({}, defaultOptions, data.options, options) var lines = [] @@ -474,13 +617,12 @@ function generateTextSummary(data, options, report) { lines.push('') } - /** * * @param {Object[]} checks * @param {Partial} opts */ - const displayChecks = (checks, opts = { indent: '' }) => { + const displayChecks = (checks, opts = {indent: ''}) => { if (checks === undefined || checks === null) { return } @@ -493,12 +635,44 @@ function generateTextSummary(data, options, report) { } } - // START OF GLOBAL RESULTS - // TITLE - lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify('GLOBAL RESULTS') + '\n') + /** + * @typedef {{name: string, type: string, contains: string, values: Record}} Metric + * @typedef {{metric: Metric, thresholds: Threshold[]}} ReportThreshold + * + * @param {Record} thresholds + * @param {Partial} opts + */ + const displayThresholds = (thresholds, opts = {indent: ''}) => { + if (thresholds === undefined || thresholds === null) { + return + } + + lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify('THRESHOLDS') + '\n') + + const mergedOpts = Object.assign({}, defaultOptions, data.options, options) + + let metrics = {}; + forEach(thresholds, (_, threshold) => { + metrics[threshold.metric.name] = {...threshold.metric, thresholds: threshold.thresholds} + }); + + Array.prototype.push.apply(lines, summarizeMetricsWithThresholds( + {...mergedOpts, indent: mergedOpts.indent + defaultIndent}, + {metrics}, + decorate), + ) + lines.push('') + }; + + // THRESHOLDS + displayThresholds(report.thresholds) + + // TOTAL RESULTS + lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify('TOTAL RESULTS') + '\n') // CHECKS displayChecks(report.checks) + // METRICS forEach(report.metrics, (sectionName, sectionMetrics) => { // If there are no metrics in this section, skip it @@ -509,16 +683,9 @@ function generateTextSummary(data, options, report) { displayMetricsBlockName(sectionName) displayMetricsBlock(sectionMetrics) }) - // END OF GLOBAL RESULTS + // END OF TOTAL RESULTS // GROUPS - /** - * - * @typedef {Object} GroupData - * @param groupName string - * @param groupData - */ - const summarize = (prefix, indent) => { return (groupName, groupData) => { lines.push(metricGroupIndent + indent + prefix + defaultIndent + boldify(`GROUP: ${groupName}`) + '\n') @@ -578,14 +745,6 @@ function generateTextSummary(data, options, report) { }) } - - Array.prototype.push.apply( - lines, - summarizeGroup(mergedOpts.indent + ' ', data.root_group, decorate) - ) - - Array.prototype.push.apply(lines, summarizeMetrics(mergedOpts, data, decorate)) - return lines.join('\n') } diff --git a/lib/report.go b/lib/report.go index d719b4c08e4..ba40a0a8c12 100644 --- a/lib/report.go +++ b/lib/report.go @@ -99,12 +99,16 @@ func NewReportChecks() *ReportChecks { } type ReportThreshold struct { - Source string `js:"source"` - Metric ReportMetric `js:"metric"` - Ok bool `js:"ok"` + Source string `js:"source"` + Ok bool `js:"ok"` } -type ReportThresholds map[string][]*ReportThreshold +type MetricThresholds struct { + Metric ReportMetric `js:"metric"` + Thresholds []ReportThreshold `js:"thresholds"` +} + +type ReportThresholds map[string]MetricThresholds func NewReportThresholds() ReportThresholds { thresholds := make(ReportThresholds) @@ -125,7 +129,7 @@ func NewReportGroup() ReportGroup { } type Report struct { - ReportThresholds + ReportThresholds `js:"thresholds"` ReportGroup Scenarios map[string]ReportGroup } diff --git a/output/summary/report.go b/output/summary/report.go index 117f6e8d082..3e56f02b90d 100644 --- a/output/summary/report.go +++ b/output/summary/report.go @@ -216,26 +216,31 @@ func reportThresholds( testRunDuration time.Duration, summaryTrendStats []string, ) lib.ReportThresholds { - rts := make(map[string][]*lib.ReportThreshold, len(thresholds)) + rts := make(map[string]lib.MetricThresholds, len(thresholds)) for _, threshold := range thresholds { metric := threshold.Metric - if _, exists := rts[metric.Name]; !exists { - rts[metric.Name] = make([]*lib.ReportThreshold, 0) + + mt, exists := rts[metric.Name] + if !exists { + mt = lib.MetricThresholds{ + Metric: lib.NewReportMetricFrom( + lib.ReportMetricInfo{ + Name: metric.Name, + Type: metric.Type.String(), + Contains: metric.Contains.String(), + }, + metric.Sink, + testRunDuration, + summaryTrendStats, + ), + } } - rts[metric.Name] = append(rts[metric.Name], &lib.ReportThreshold{ + + mt.Thresholds = append(rts[metric.Name].Thresholds, lib.ReportThreshold{ Source: threshold.Source, - Metric: lib.NewReportMetricFrom( - lib.ReportMetricInfo{ - Name: metric.Name, - Type: metric.Type.String(), - Contains: metric.Contains.String(), - }, - metric.Sink, - testRunDuration, - summaryTrendStats, - ), - Ok: !threshold.LastFailed, + Ok: !threshold.LastFailed, }) + rts[metric.Name] = mt } return rts } From 5d3008897fa5f446a7e043394b7d6edaaed1b611 Mon Sep 17 00:00:00 2001 From: oleiade Date: Tue, 10 Dec 2024 11:02:47 +0100 Subject: [PATCH 15/42] Add JSDoc documentation to summary.js --- js/summary.js | 215 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 193 insertions(+), 22 deletions(-) diff --git a/js/summary.js b/js/summary.js index 2708bd6d2d2..1ac1d9e970a 100644 --- a/js/summary.js +++ b/js/summary.js @@ -1,7 +1,70 @@ /** - * @typedef {{source: string, ok: boolean}} Threshold - * @typedef {{name: string, type: string, contains: string, values: Record, thresholds?: Threshold[]}} ReportMetric - * @typedef {{sortByName: boolean, bold: boolean, indent: string, metricsBlockIndent: string}} DisplayOptions + * @typedef {Object} Threshold + * @property {string} source - The threshold expression source. + * @property {boolean} ok - Whether the threshold was satisfied or not. + */ + +/** + * @typedef {Object} Check + * @property {string} id - The check ID. + * @property {string} name - The check name. + * @property {string} path - The check path. + * @property {number} passes - The number of successful checks. + * @property {number} fails - The number of failed checks. + */ + +/** + * @typedef {Object} ReportMetric + * @property {string} name - The metric name. + * @property {string} type - The type of the metric (e.g., "counter", "gauge", "rate", "trend"). + * @property {string} contains - The type of data contained in the metric (e.g., "time", "data", "default"). + * @property {Record} values - Key-value pairs of metric statistics (e.g. min, max, avg). + * @property {Threshold[]} [thresholds] - Optional array of thresholds associated with this metric. + */ + +/** + * @typedef {Object} ReportThreshold + * @property {string} source - The threshold expression source. + * @property {boolean} ok - Whether the threshold was satisfied or not. + */ + +/** + * @typedef {Object} ReportChecksMetrics + * @property {ReportMetric[]} total - The total metrics. + * @property {ReportMetric} success - The successful metrics. + * @property {ReportMetric} fail - The failed metrics. + */ + +/** + * @typedef {Object} MetricThresholds + * @property {ReportMetric} metric - The metric object. + * @property {ReportThreshold[]} thresholds - The thresholds for the metric. + */ + +/** + * @typedef {Object} ReportChecks + * @property {ReportChecksMetrics} metrics - The metrics for checks. + * @property {Check[]} ordered_checks - The ordered checks. + */ + +/** + * @typedef {Object} DisplayOptions + * @property {boolean} sortByName - Whether metrics should be sorted by name. + * @property {boolean} bold - Whether to display section names in bold. + * @property {string} indent - Indentation string for the output. + * @property {string} metricsBlockIndent - Additional indentation for metrics blocks. + */ + +/** + * @typedef {Object} ReportData + * @property {Record} metrics - Collection of metrics keyed by their names. + */ + +/** + * A simple iteration utility function for objects. + * + * @param {Object} obj - the object to iterate over + * @param {(key: string, value: any) => (boolean|void)} callback - Callback invoked with (key, value) */ var forEach = function (obj, callback) { for (var key in obj) { @@ -13,6 +76,7 @@ var forEach = function (obj, callback) { } } +/** A palette of ANSI color codes for terminal output. */ var palette = { bold: 1, faint: 2, @@ -34,8 +98,13 @@ var defaultOptions = { sortByName: true, } -// strWidth tries to return the actual width the string will take up on the -// screen, without any terminal formatting, unicode ligatures, etc. +/** + * Compute the width of a string as displayed in a terminal, excluding ANSI codes, terminal + * formatting, Unicode ligatures, etc. + * + * @param {string} s - The string to measure + * @returns {number} The display width of the string + */ function strWidth(s) { // TODO: determine if NFC or NFKD are not more appropriate? or just give up? https://hsivonen.fi/string-length/ var data = s.normalize('NFKC') // This used to be NFKD in Go, but this should be better @@ -73,6 +142,14 @@ function strWidth(s) { return width } +/** + * Summarizes single check result. + * + * @param {string} indent + * @param {{name: string, passes: number, fails: number}} check - The check object with name, passes and fails + * @param {(text: string, ...colors: number[]) => string} decorate - A function to apply ANSI colors. + * @returns {string} - A formatted line summarizing the check. + */ function summarizeCheck(indent, check, decorate) { if (check.fails == 0) { return decorate(indent + succMark + ' ' + check.name, palette.green) @@ -102,6 +179,14 @@ function summarizeCheck(indent, check, decorate) { ) } +/** + * Summarizes a group of checks, recursively handling nested groups. + * + * @param {string} indent -The indentation for the group. + * @param {{name: string, checks: Object[], groups: Object[]}} group - Group object with name, checks, and subgroups. + * @param {(text: string, ...colors: number[]) => string} decorate = Function to decorate text with ANSI colors. + * @returns {string[]} - An array of formatted lines summarizing the group and its checks + */ function summarizeGroup(indent, group, decorate) { var result = [] if (group.name != '') { @@ -122,6 +207,12 @@ function summarizeGroup(indent, group, decorate) { return result } +/** + * Extracts a display name for a metric, handling sub-metrics (e.g. "metric{sub}" -> "{ sub }"). + * + * @param {string} name - The metric name. + * @returns {string} - The display name + */ function displayNameForMetric(name) { var subMetricPos = name.indexOf('{') if (subMetricPos >= 0) { @@ -130,6 +221,12 @@ function displayNameForMetric(name) { return name } +/** + * Determines the indentation for a metric line based on whether it has submetrics. + * + * @param {string} name - The metric name. + * @returns {string} - Indentation string. + */ function indentForMetric(name) { if (name.indexOf('{') >= 0) { return ' ' @@ -137,6 +234,12 @@ function indentForMetric(name) { return '' } +/** + * Converts a number of bytes into a human-readable string with units. + * + * @param {number} bytes - The number of bytes. + * @returns {string} A human-readable string (e.g. "10 kB"). + */ function humanizeBytes(bytes) { var units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] var base = 1000 @@ -156,16 +259,37 @@ var unitMap = { us: {unit: 'µs', coef: 1000}, } +/** + * Converts a number to a fixed decimal string, removing trailing zeros. + * + * @param {number} val - The number to convert. + * @param {number} prec - Decimal precision. + * @returns {string} A string representation of the number without trailing zeros. + */ function toFixedNoTrailingZeros(val, prec) { // TODO: figure out something better? return parseFloat(val.toFixed(prec)).toString() } +/** + * Truncates a number to a certain precision without rounding, then removes trailing zeros. + * + * @param {number} val - The number to truncate. + * @param {number} prec - Decimal precision. + * @returns {string} A truncated, not rounded string representation. + */ function toFixedNoTrailingZerosTrunc(val, prec) { var mult = Math.pow(10, prec) return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec) } +/** + * Humanizes a duration (in milliseconds) to a human-readable string, + * choosing appropriate units (ns, µs, ms, s, m, h). + * + * @param {number} dur - The duration in milliseconds. + * @returns {string} Human-readable duration (e.g. "2.5ms", "3s", "1m30s"). + */ function humanizeGenericDuration(dur) { if (dur === 0) { return '0s' @@ -199,6 +323,13 @@ function humanizeGenericDuration(dur) { return rem + 'h' + result } +/** + * Humanizes a duration according to a specified time unit or uses a generic formatting. + * + * @param {number} dur - The duration in milliseconds. + * @param {string|null} timeUnit - Optional time unit (e.g. "ms", "s"). + * @returns {string} A human-readable duration string. + */ function humanizeDuration(dur, timeUnit) { if (timeUnit !== '' && unitMap.hasOwnProperty(timeUnit)) { return (dur * unitMap[timeUnit].coef).toFixed(2) + unitMap[timeUnit].unit @@ -207,6 +338,14 @@ function humanizeDuration(dur, timeUnit) { return humanizeGenericDuration(dur) } +/** + * Formats a metric value into a human-readable form, depending on the metric type and content. + * + * @param {number} val - The metric value. + * @param {ReportMetric} metric - The metric object. + * @param {string|null} timeUnit - The time unit for duration metrics. + * @returns {string} The humanized metric value. + */ function humanizeValue(val, metric, timeUnit) { if (metric.type == 'rate') { // Truncate instead of round when decreasing precision to 2 decimal places @@ -223,6 +362,13 @@ function humanizeValue(val, metric, timeUnit) { } } +/** + * Returns the summary values for non-trend metrics (counter, gauge, rate). + * + * @param {ReportMetric} metric - The metric to summarize. + * @param {string|null} timeUnit - The time unit for durations. + * @returns {string[]} - An array of summary values. + */ function nonTrendMetricValueForSum(metric, timeUnit) { switch (metric.type) { case 'counter': @@ -247,6 +393,14 @@ function nonTrendMetricValueForSum(metric, timeUnit) { } // FIXME (@oleiade) split this code up for reusability (for instance in the summarizeThreshold function below) +/** + * Summarizes given metrics into an array of formatted lines. + * + * @param {Object} options - Display options merged with defaultOptions. + * @param {ReportData} data - The data object containing metrics. + * @param {(text: string, ...colors: number[]) => string} decorate - A decoration function for ANSI colors. + * @returns {string[]} Array of formatted lines. + */ function summarizeMetrics(options, data, decorate) { var indent = options.indent + ' ' var result = [] @@ -391,12 +545,12 @@ function summarizeMetrics(options, data, decorate) { } /** - * @typedef {{metrics: Record}} ReportData + * Summarizes metrics and their thresholds into formatted lines. * - * @param options - * @param {ReportData} data - * @param decorate - * @returns {string[]} + * @param {Object} options - Options merged with defaults. + * @param {ReportData} data - The data containing metrics. + * @param {(text: string, ...colors: number[]) => string} decorate - Decoration function. + * @returns {string[]} - Array of formatted lines including threshold statuses. */ function summarizeMetricsWithThresholds(options, data, decorate) { var indent = options.indent + ' ' @@ -530,12 +684,27 @@ function summarizeMetricsWithThresholds(options, data, decorate) { return result } +/** + * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. + * + * @param {Object} data - The data input for the summary (includes options, metrics, etc.). + * @param {Object} options - Additional options that override defaults. + * @param {Object} report - The report object containing thresholds, checks, metrics, groups, and scenarios. + * @returns {string} A formatted summary of the test results. + */ function generateTextSummary(data, options, report) { var mergedOpts = Object.assign({}, defaultOptions, data.options, options) var lines = [] // TODO: move all of these functions into an object with methods? - var decorate = function (text) { + /** + * Decorates text with ANSI color codes. + * + * @param text + * @param _ + * @returns {*} + */ + let decorate = function (text, _) { return text } if (mergedOpts.enableColors) { @@ -583,9 +752,10 @@ function generateTextSummary(data, options, report) { const metricGroupIndent = ' ' /** + * Displays a metrics block name (section heading). * - * @param sectionName - * @param {DisplayOptions} opts + * @param {string} sectionName - The section name (e.g., "checks", "http_req_duration"). + * @param {Partial} opts - Display options. */ const displayMetricsBlockName = (sectionName, opts) => { let bold = true; @@ -607,9 +777,10 @@ function generateTextSummary(data, options, report) { } /** + * Displays a block of metrics with the given options. * - * @param {Object[]} sectionMetrics - * @param {DisplayOptions} opts + * @param {Object[]} sectionMetrics - The metrics to display. + * @param {Partial} opts - Display options. */ const displayMetricsBlock = (sectionMetrics, opts) => { const summarizeOpts = Object.assign({}, mergedOpts, opts) @@ -618,9 +789,10 @@ function generateTextSummary(data, options, report) { } /** + * Displays checks within a certain context (indentation, etc.). * - * @param {Object[]} checks - * @param {Partial} opts + * @param {Object} checks - Checks data, containing `metrics` and `ordered_checks`. + * @param {Partial} [opts={indent: ''}] - Options including indentation. */ const displayChecks = (checks, opts = {indent: ''}) => { if (checks === undefined || checks === null) { @@ -636,11 +808,10 @@ function generateTextSummary(data, options, report) { } /** - * @typedef {{name: string, type: string, contains: string, values: Record}} Metric - * @typedef {{metric: Metric, thresholds: Threshold[]}} ReportThreshold + * Displays thresholds and their satisfaction status. * - * @param {Record} thresholds - * @param {Partial} opts + * @param {Record} thresholds - Threshold data. + * @param {Partial} [opts={indent: ''}] - Display options. */ const displayThresholds = (thresholds, opts = {indent: ''}) => { if (thresholds === undefined || thresholds === null) { @@ -749,4 +920,4 @@ function generateTextSummary(data, options, report) { } exports.humanizeValue = humanizeValue -exports.textSummary = generateTextSummary \ No newline at end of file +exports.textSummary = generateTextSummary From b80a09cd1b52988788c996deff509cd67c4b0634 Mon Sep 17 00:00:00 2001 From: oleiade Date: Tue, 10 Dec 2024 14:26:36 +0100 Subject: [PATCH 16/42] Apply JS linter recommendations to summary.js --- js/summary.js | 305 +++++++++++++++++++++++--------------------------- 1 file changed, 138 insertions(+), 167 deletions(-) diff --git a/js/summary.js b/js/summary.js index 1ac1d9e970a..ae48600321d 100644 --- a/js/summary.js +++ b/js/summary.js @@ -66,8 +66,8 @@ * @param {Object} obj - the object to iterate over * @param {(key: string, value: any) => (boolean|void)} callback - Callback invoked with (key, value) */ -var forEach = function (obj, callback) { - for (var key in obj) { +function forEach(obj, callback) { + for (const key in obj) { if (obj.hasOwnProperty(key)) { if (callback(key, obj[key])) { break @@ -77,7 +77,7 @@ var forEach = function (obj, callback) { } /** A palette of ANSI color codes for terminal output. */ -var palette = { +const palette = { bold: 1, faint: 2, red: 31, @@ -86,11 +86,11 @@ var palette = { //TODO: add others? } -var groupPrefix = '█' -var detailsPrefix = '↳' -var succMark = '✓' -var failMark = '✗' -var defaultOptions = { +const groupPrefix = '█' +const detailsPrefix = '↳' +const succMark = '✓' +const failMark = '✗' +const defaultOptions = { indent: ' ', enableColors: true, summaryTimeUnit: null, @@ -107,21 +107,21 @@ var defaultOptions = { */ function strWidth(s) { // TODO: determine if NFC or NFKD are not more appropriate? or just give up? https://hsivonen.fi/string-length/ - var data = s.normalize('NFKC') // This used to be NFKD in Go, but this should be better - var inEscSeq = false - var inLongEscSeq = false - var width = 0 - for (var char of data) { + const data = s.normalize('NFKC') // This used to be NFKD in Go, but this should be better + let inEscSeq = false + let inLongEscSeq = false + let width = 0 + for (const char of data) { if (char.done) { break } // Skip over ANSI escape codes. - if (char == '\x1b') { + if (char === '\x1b') { inEscSeq = true continue } - if (inEscSeq && char == '[') { + if (inEscSeq && char === '[') { inLongEscSeq = true continue } @@ -151,11 +151,11 @@ function strWidth(s) { * @returns {string} - A formatted line summarizing the check. */ function summarizeCheck(indent, check, decorate) { - if (check.fails == 0) { + if (check.fails === 0) { return decorate(indent + succMark + ' ' + check.name, palette.green) } - var succPercent = Math.floor((100 * check.passes) / (check.passes + check.fails)) + const succPercent = Math.floor((100 * check.passes) / (check.passes + check.fails)) return decorate( indent + failMark + @@ -179,34 +179,6 @@ function summarizeCheck(indent, check, decorate) { ) } -/** - * Summarizes a group of checks, recursively handling nested groups. - * - * @param {string} indent -The indentation for the group. - * @param {{name: string, checks: Object[], groups: Object[]}} group - Group object with name, checks, and subgroups. - * @param {(text: string, ...colors: number[]) => string} decorate = Function to decorate text with ANSI colors. - * @returns {string[]} - An array of formatted lines summarizing the group and its checks - */ -function summarizeGroup(indent, group, decorate) { - var result = [] - if (group.name != '') { - result.push(indent + groupPrefix + ' ' + group.name + '\n') - indent = indent + ' ' - } - - for (var i = 0; i < group.checks.length; i++) { - result.push(summarizeCheck(indent, group.checks[i], decorate)) - } - if (group.checks.length > 0) { - result.push('') - } - for (var i = 0; i < group.groups.length; i++) { - Array.prototype.push.apply(result, summarizeGroup(indent, group.groups[i], decorate)) - } - - return result -} - /** * Extracts a display name for a metric, handling sub-metrics (e.g. "metric{sub}" -> "{ sub }"). * @@ -214,7 +186,7 @@ function summarizeGroup(indent, group, decorate) { * @returns {string} - The display name */ function displayNameForMetric(name) { - var subMetricPos = name.indexOf('{') + const subMetricPos = name.indexOf('{') if (subMetricPos >= 0) { return '{ ' + name.substring(subMetricPos + 1, name.length - 1) + ' }' } @@ -241,19 +213,19 @@ function indentForMetric(name) { * @returns {string} A human-readable string (e.g. "10 kB"). */ function humanizeBytes(bytes) { - var units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] - var base = 1000 + const units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] + const base = 1000 if (bytes < 10) { return bytes + ' B' } - var e = Math.floor(Math.log(bytes) / Math.log(base)) - var suffix = units[e | 0] - var val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10 + const e = Math.floor(Math.log(bytes) / Math.log(base)) + const suffix = units[e | 0] + const val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10 return val.toFixed(val < 10 ? 1 : 0) + ' ' + suffix } -var unitMap = { +const unitMap = { s: {unit: 's', coef: 0.001}, ms: {unit: 'ms', coef: 1}, us: {unit: 'µs', coef: 1000}, @@ -279,7 +251,7 @@ function toFixedNoTrailingZeros(val, prec) { * @returns {string} A truncated, not rounded string representation. */ function toFixedNoTrailingZerosTrunc(val, prec) { - var mult = Math.pow(10, prec) + const mult = Math.pow(10, prec) return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec) } @@ -287,40 +259,40 @@ function toFixedNoTrailingZerosTrunc(val, prec) { * Humanizes a duration (in milliseconds) to a human-readable string, * choosing appropriate units (ns, µs, ms, s, m, h). * - * @param {number} dur - The duration in milliseconds. + * @param {number} duration - The duration in milliseconds. * @returns {string} Human-readable duration (e.g. "2.5ms", "3s", "1m30s"). */ -function humanizeGenericDuration(dur) { - if (dur === 0) { +function humanizeGenericDuration(duration) { + if (duration === 0) { return '0s' } - if (dur < 0.001) { + if (duration < 0.001) { // smaller than a microsecond, print nanoseconds - return Math.trunc(dur * 1000000) + 'ns' + return Math.trunc(duration * 1000000) + 'ns' } - if (dur < 1) { + if (duration < 1) { // smaller than a millisecond, print microseconds - return toFixedNoTrailingZerosTrunc(dur * 1000, 2) + 'µs' + return toFixedNoTrailingZerosTrunc(duration * 1000, 2) + 'µs' } - if (dur < 1000) { + if (duration < 1000) { // duration is smaller than a second - return toFixedNoTrailingZerosTrunc(dur, 2) + 'ms' + return toFixedNoTrailingZerosTrunc(duration, 2) + 'ms' } - var result = toFixedNoTrailingZerosTrunc((dur % 60000) / 1000, dur > 60000 ? 0 : 2) + 's' - var rem = Math.trunc(dur / 60000) + let fixedDuration= toFixedNoTrailingZerosTrunc((duration % 60000) / 1000, duration > 60000 ? 0 : 2) + 's' + let rem = Math.trunc(duration / 60000) if (rem < 1) { // less than a minute - return result + return fixedDuration } - result = (rem % 60) + 'm' + result + fixedDuration = (rem % 60) + 'm' + fixedDuration rem = Math.trunc(rem / 60) if (rem < 1) { // less than an hour - return result + return fixedDuration } - return rem + 'h' + result + return rem + 'h' + fixedDuration } /** @@ -347,7 +319,7 @@ function humanizeDuration(dur, timeUnit) { * @returns {string} The humanized metric value. */ function humanizeValue(val, metric, timeUnit) { - if (metric.type == 'rate') { + if (metric.type === 'rate') { // Truncate instead of round when decreasing precision to 2 decimal places return (Math.trunc(val * 100 * 100) / 100).toFixed(2) + '%' } @@ -397,45 +369,45 @@ function nonTrendMetricValueForSum(metric, timeUnit) { * Summarizes given metrics into an array of formatted lines. * * @param {Object} options - Display options merged with defaultOptions. - * @param {ReportData} data - The data object containing metrics. + * @param {{metrics: Object[]}} data - The data object containing metrics. * @param {(text: string, ...colors: number[]) => string} decorate - A decoration function for ANSI colors. * @returns {string[]} Array of formatted lines. */ function summarizeMetrics(options, data, decorate) { - var indent = options.indent + ' ' - var result = [] + const indent = options.indent + ' ' + let result = [] - var names = [] - var nameLenMax = 0 + const names = [] + let nameLenMax = 0 - var nonTrendValues = {} - var nonTrendValueMaxLen = 0 - var nonTrendExtras = {} - var nonTrendExtraMaxLens = [0, 0] + const nonTrendValues = {} + let nonTrendValueMaxLen = 0 + const nonTrendExtras = {} + const nonTrendExtraMaxLens = [0, 0] - var trendCols = {} - var numTrendColumns = options.summaryTrendStats.length - var trendColMaxLens = new Array(numTrendColumns).fill(0) + const trendCols = {} + const numTrendColumns = options.summaryTrendStats.length + const trendColMaxLens = new Array(numTrendColumns).fill(0) forEach(data.metrics, function (name, metric) { names.push(name) // When calculating widths for metrics, account for the indentation on submetrics. - var displayName = indentForMetric(name) + displayNameForMetric(name) - var displayNameWidth = strWidth(displayName) + const displayName = indentForMetric(name) + displayNameForMetric(name) + const displayNameWidth = strWidth(displayName) if (displayNameWidth > nameLenMax) { nameLenMax = displayNameWidth } - if (metric.type == 'trend') { - var cols = [] - for (var i = 0; i < numTrendColumns; i++) { - var tc = options.summaryTrendStats[i] - var value = metric.values[tc] + if (metric.type === 'trend') { + const cols = [] + for (let i = 0; i < numTrendColumns; i++) { + const tc = options.summaryTrendStats[i] + let value = metric.values[tc] if (tc === 'count') { value = value.toString() } else { value = humanizeValue(value, metric, options.summaryTimeUnit) } - var valLen = strWidth(value) + const valLen = strWidth(value) if (valLen > trendColMaxLens[i]) { trendColMaxLens[i] = valLen } @@ -444,15 +416,15 @@ function summarizeMetrics(options, data, decorate) { trendCols[name] = cols return } - var values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) + const values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) nonTrendValues[name] = values[0] - var valueLen = strWidth(values[0]) + const valueLen = strWidth(values[0]) if (valueLen > nonTrendValueMaxLen) { nonTrendValueMaxLen = valueLen } nonTrendExtras[name] = values.slice(1) - for (var i = 1; i < values.length; i++) { - var extraLen = strWidth(values[i]) + for (let i = 1; i < values.length; i++) { + const extraLen = strWidth(values[i]) if (extraLen > nonTrendExtraMaxLens[i - 1]) { nonTrendExtraMaxLens[i - 1] = extraLen } @@ -462,23 +434,23 @@ function summarizeMetrics(options, data, decorate) { // sort all metrics but keep sub metrics grouped with their parent metrics if (options.sortByName) { names.sort(function (metric1, metric2) { - var parent1 = metric1.split('{', 1)[0] - var parent2 = metric2.split('{', 1)[0] - var result = parent1.localeCompare(parent2) + const parent1 = metric1.split('{', 1)[0] + const parent2 = metric2.split('{', 1)[0] + const result = parent1.localeCompare(parent2) if (result !== 0) { return result } - var sub1 = metric1.substring(parent1.length) - var sub2 = metric2.substring(parent2.length) + const sub1 = metric1.substring(parent1.length) + const sub2 = metric2.substring(parent2.length) return sub1.localeCompare(sub2) }) } - var getData = function (name) { + const getData = function (name) { if (trendCols.hasOwnProperty(name)) { - var cols = trendCols[name] - var tmpCols = new Array(numTrendColumns) - for (var i = 0; i < cols.length; i++) { + const cols = trendCols[name] + const tmpCols = new Array(numTrendColumns) + for (let i = 0; i < cols.length; i++) { tmpCols[i] = options.summaryTrendStats[i] + '=' + @@ -488,15 +460,15 @@ function summarizeMetrics(options, data, decorate) { return tmpCols.join(' ') } - var value = nonTrendValues[name] - var fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) + const value = nonTrendValues[name] + let fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) - var extras = nonTrendExtras[name] - if (extras.length == 1) { + const extras = nonTrendExtras[name] + if (extras.length === 1) { fmtData = fmtData + ' ' + decorate(extras[0], palette.cyan, palette.faint) } else if (extras.length > 1) { - var parts = new Array(extras.length) - for (var i = 0; i < extras.length; i++) { + const parts = new Array(extras.length) + for (let i = 0; i < extras.length; i++) { parts[i] = decorate(extras[i], palette.cyan, palette.faint) + ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(extras[i])) @@ -507,10 +479,10 @@ function summarizeMetrics(options, data, decorate) { return fmtData } - for (var name of names) { - var metric = data.metrics[name] - var mark = ' ' - var markColor = function (text) { + for (const name of names) { + const metric = data.metrics[name] + let mark = ' ' + let markColor = function (text) { return text } // noop @@ -529,8 +501,8 @@ function summarizeMetrics(options, data, decorate) { } }) } - var fmtIndent = indentForMetric(name) - var fmtName = displayNameForMetric(name) + const fmtIndent = indentForMetric(name) + let fmtName = displayNameForMetric(name) fmtName = fmtName + decorate( @@ -553,39 +525,39 @@ function summarizeMetrics(options, data, decorate) { * @returns {string[]} - Array of formatted lines including threshold statuses. */ function summarizeMetricsWithThresholds(options, data, decorate) { - var indent = options.indent + ' ' - var result = [] + const indent = options.indent + ' ' + const result = [] - var names = [] - var nameLenMax = 0 + const names = [] + let nameLenMax = 0 - var nonTrendValues = {} - var nonTrendValueMaxLen = 0 - var nonTrendExtras = {} - var nonTrendExtraMaxLens = [0, 0] + const nonTrendValues = {} + let nonTrendValueMaxLen = 0 + const nonTrendExtras = {} + let nonTrendExtraMaxLens = [0, 0] - var trendCols = {} - var numTrendColumns = options.summaryTrendStats.length - var trendColMaxLens = new Array(numTrendColumns).fill(0) + const trendCols = {} + const numTrendColumns = options.summaryTrendStats.length + const trendColMaxLens = new Array(numTrendColumns).fill(0) forEach(data.metrics, function (name, metric) { names.push(name) // When calculating widths for metrics, account for the indentation on submetrics. - var displayNameWidth = strWidth(name) + const displayNameWidth = strWidth(name) if (displayNameWidth > nameLenMax) { nameLenMax = displayNameWidth } - if (metric.type == 'trend') { - var cols = [] - for (var i = 0; i < numTrendColumns; i++) { - var tc = options.summaryTrendStats[i] - var value = metric.values[tc] + if (metric.type === 'trend') { + const cols = [] + for (let i = 0; i < numTrendColumns; i++) { + const tc = options.summaryTrendStats[i] + let value = metric.values[tc] if (tc === 'count') { value = value.toString() } else { value = humanizeValue(value, metric, options.summaryTimeUnit) } - var valLen = strWidth(value) + const valLen = strWidth(value) if (valLen > trendColMaxLens[i]) { trendColMaxLens[i] = valLen } @@ -594,15 +566,15 @@ function summarizeMetricsWithThresholds(options, data, decorate) { trendCols[name] = cols return } - var values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) + let values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) nonTrendValues[name] = values[0] - var valueLen = strWidth(values[0]) + const valueLen = strWidth(values[0]) if (valueLen > nonTrendValueMaxLen) { nonTrendValueMaxLen = valueLen } nonTrendExtras[name] = values.slice(1) - for (var i = 1; i < values.length; i++) { - var extraLen = strWidth(values[i]) + for (let i = 1; i < values.length; i++) { + const extraLen = strWidth(values[i]) if (extraLen > nonTrendExtraMaxLens[i - 1]) { nonTrendExtraMaxLens[i - 1] = extraLen } @@ -612,23 +584,23 @@ function summarizeMetricsWithThresholds(options, data, decorate) { // sort all metrics but keep sub metrics grouped with their parent metrics if (options.sortByName) { names.sort(function (metric1, metric2) { - var parent1 = metric1.split('{', 1)[0] - var parent2 = metric2.split('{', 1)[0] - var result = parent1.localeCompare(parent2) + const parent1 = metric1.split('{', 1)[0] + const parent2 = metric2.split('{', 1)[0] + const result = parent1.localeCompare(parent2) if (result !== 0) { return result } - var sub1 = metric1.substring(parent1.length) - var sub2 = metric2.substring(parent2.length) + const sub1 = metric1.substring(parent1.length) + const sub2 = metric2.substring(parent2.length) return sub1.localeCompare(sub2) }) } - var getData = function (name) { + const getData = function (name) { if (trendCols.hasOwnProperty(name)) { - var cols = trendCols[name] - var tmpCols = new Array(numTrendColumns) - for (var i = 0; i < cols.length; i++) { + const cols = trendCols[name] + const tmpCols = new Array(numTrendColumns) + for (let i = 0; i < cols.length; i++) { tmpCols[i] = options.summaryTrendStats[i] + '=' + @@ -638,15 +610,15 @@ function summarizeMetricsWithThresholds(options, data, decorate) { return tmpCols.join(' ') } - var value = nonTrendValues[name] - var fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) + const value = nonTrendValues[name] + let fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) - var extras = nonTrendExtras[name] - if (extras.length == 1) { + const extras = nonTrendExtras[name] + if (extras.length === 1) { fmtData = fmtData + ' ' + decorate(extras[0], palette.cyan, palette.faint) } else if (extras.length > 1) { - var parts = new Array(extras.length) - for (var i = 0; i < extras.length; i++) { + const parts = new Array(extras.length) + for (let i = 0; i < extras.length; i++) { parts[i] = decorate(extras[i], palette.cyan, palette.faint) + ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(extras[i])) @@ -657,14 +629,14 @@ function summarizeMetricsWithThresholds(options, data, decorate) { return fmtData } - for (var name of names) { - var metric = data.metrics[name] - var mark = ' ' - var markColor = function (text) { + for (const name of names) { + const metric = data.metrics[name] + const mark = ' ' + const markColor = function (text) { return text } // noop - var fmtName = + const fmtName = name + decorate( '.'.repeat(nameLenMax - strWidth(name) + 3) + ':', @@ -693,8 +665,8 @@ function summarizeMetricsWithThresholds(options, data, decorate) { * @returns {string} A formatted summary of the test results. */ function generateTextSummary(data, options, report) { - var mergedOpts = Object.assign({}, defaultOptions, data.options, options) - var lines = [] + const mergedOpts = Object.assign({}, defaultOptions, data.options, options) + const lines = [] // TODO: move all of these functions into an object with methods? /** @@ -709,15 +681,15 @@ function generateTextSummary(data, options, report) { } if (mergedOpts.enableColors) { decorate = function (text, color /*, ...rest*/) { - var result = '\x1b[' + color - for (var i = 2; i < arguments.length; i++) { + let result = '\x1b[' + color + for (let i = 2; i < arguments.length; i++) { result += ';' + arguments[i] } return result + 'm' + text + '\x1b[0m' } } - const ANSI_CODES = { + const ANSI= { reset: "\x1b[0m", // Standard Colors @@ -745,7 +717,7 @@ function generateTextSummary(data, options, report) { }; const BOLD = '\u001b[1m' - const RESET = ANSI_CODES.reset; + const RESET = ANSI.reset; const boldify = (text) => BOLD + text + RESET const defaultIndent = ' ' @@ -755,7 +727,7 @@ function generateTextSummary(data, options, report) { * Displays a metrics block name (section heading). * * @param {string} sectionName - The section name (e.g., "checks", "http_req_duration"). - * @param {Partial} opts - Display options. + * @param {Partial} [opts] - Display options. */ const displayMetricsBlockName = (sectionName, opts) => { let bold = true; @@ -780,7 +752,7 @@ function generateTextSummary(data, options, report) { * Displays a block of metrics with the given options. * * @param {Object[]} sectionMetrics - The metrics to display. - * @param {Partial} opts - Display options. + * @param {Partial} [opts] - Display options. */ const displayMetricsBlock = (sectionMetrics, opts) => { const summarizeOpts = Object.assign({}, mergedOpts, opts) @@ -799,7 +771,7 @@ function generateTextSummary(data, options, report) { return } displayMetricsBlock(checks.metrics, {...opts, indent: opts.indent + defaultIndent, sortByName: false}) - for (var i = 0; i < checks.ordered_checks.length; i++) { + for (let i = 0; i < checks.ordered_checks.length; i++) { lines.push(summarizeCheck(metricGroupIndent + metricGroupIndent + opts.indent, checks.ordered_checks[i], decorate)) } if (checks.ordered_checks.length > 0) { @@ -811,9 +783,8 @@ function generateTextSummary(data, options, report) { * Displays thresholds and their satisfaction status. * * @param {Record} thresholds - Threshold data. - * @param {Partial} [opts={indent: ''}] - Display options. */ - const displayThresholds = (thresholds, opts = {indent: ''}) => { + const displayThresholds = (thresholds) => { if (thresholds === undefined || thresholds === null) { return } From b2b5823f46d2127c549e8db8f843137b03b2cd3f Mon Sep 17 00:00:00 2001 From: oleiade Date: Wed, 11 Dec 2024 14:02:33 +0100 Subject: [PATCH 17/42] Refactor metrics and thresholds rendering in summary.js --- js/summary.js | 635 +++++++++++++++++++++++++++----------------------- 1 file changed, 346 insertions(+), 289 deletions(-) diff --git a/js/summary.js b/js/summary.js index ae48600321d..d2ced5c831f 100644 --- a/js/summary.js +++ b/js/summary.js @@ -1,3 +1,6 @@ +// FIXME (@oleiade): We need a more consistent and central way to manage indentations +// FIXME (@oleiade): We call them "options" everywhere but they're actually configuration I would argue + /** * @typedef {Object} Threshold * @property {string} source - The threshold expression source. @@ -142,42 +145,6 @@ function strWidth(s) { return width } -/** - * Summarizes single check result. - * - * @param {string} indent - * @param {{name: string, passes: number, fails: number}} check - The check object with name, passes and fails - * @param {(text: string, ...colors: number[]) => string} decorate - A function to apply ANSI colors. - * @returns {string} - A formatted line summarizing the check. - */ -function summarizeCheck(indent, check, decorate) { - if (check.fails === 0) { - return decorate(indent + succMark + ' ' + check.name, palette.green) - } - - const succPercent = Math.floor((100 * check.passes) / (check.passes + check.fails)) - return decorate( - indent + - failMark + - ' ' + - check.name + - '\n' + - indent + - ' ' + - detailsPrefix + - ' ' + - succPercent + - '% — ' + - succMark + - ' ' + - check.passes + - ' / ' + - failMark + - ' ' + - check.fails, - palette.red - ) -} /** * Extracts a display name for a metric, handling sub-metrics (e.g. "metric{sub}" -> "{ sub }"). @@ -364,298 +331,387 @@ function nonTrendMetricValueForSum(metric, timeUnit) { } } -// FIXME (@oleiade) split this code up for reusability (for instance in the summarizeThreshold function below) + +/** + * Sorts metrics by name, keeping submetrics grouped with their parent metrics. + * + * @param {string[]} metricNames - The metric names to sort. + * @returns {string[]} - The sorted metric names. + */ +function sortMetricsByName(metricNames) { + metricNames.sort(function (lhsMetricName, rhsMetricName) { + const lhsParent = lhsMetricName.split('{', 1)[0] + const rhsParent = rhsMetricName.split('{', 1)[0] + const result = lhsParent.localeCompare(rhsParent) + if (result !== 0) { + return result + } + const lhsSub = lhsMetricName.substring(lhsParent.length) + const rhsSub = rhsMetricName.substring(rhsParent.length) + return lhsSub.localeCompare(rhsSub) + }) + + return metricNames +} + /** - * Summarizes given metrics into an array of formatted lines. + * Renders a single check into a formatted line ready for output. + * + * @param {string} indent + * @param {{name: string, passes: number, fails: number}} check - The check object with name, passes and fails + * @param {(text: string, ...colors: number[]) => string} decorate - A function to apply ANSI colors. + * @returns {string} - A formatted line summarizing the check. + */ +function renderCheck(indent, check, decorate) { + if (check.fails === 0) { + return decorate(indent + succMark + ' ' + check.name, palette.green) + } + + const succPercent = Math.floor((100 * check.passes) / (check.passes + check.fails)) + return decorate( + indent + + failMark + + ' ' + + check.name + + '\n' + + indent + + ' ' + + detailsPrefix + + ' ' + + succPercent + + '% — ' + + succMark + + ' ' + + check.passes + + ' / ' + + failMark + + ' ' + + check.fails, + palette.red + ) +} + +/** + * @typedef {Object} summarizeMetricsOptions + * @property {string} indent - The indentation string. + * @property {boolean} enableColors - Whether to enable ANSI colors. + * @property {string} summaryTimeUnit - The time unit for duration metrics. + * @property {string[]} summaryTrendStats - The trend statistics to summarize. + * @property {boolean} sortByName - Whether to sort metrics by name. + * @property {boolean} noColor - Whether to disable ANSI colors. + */ + +/** + * Summarizes metrics into an array of formatted lines ready to be printed to stdout. * - * @param {Object} options - Display options merged with defaultOptions. * @param {{metrics: Object[]}} data - The data object containing metrics. + * @param {summarizeMetricsOptions} options - Display options merged with defaultOptions. * @param {(text: string, ...colors: number[]) => string} decorate - A decoration function for ANSI colors. - * @returns {string[]} Array of formatted lines. + * @returns {string[]} */ -function summarizeMetrics(options, data, decorate) { - const indent = options.indent + ' ' - let result = [] +function renderMetrics(data, decorate, options) { + const indent = options.indent + ' ' // FIXME @oleiade shouldn't we provide this at the caller? + + // Extract all metric names + let metricNames = Object.keys(data.metrics) + + // If sorting by name is required, do it now + if (options.sortByName) { + metricNames = sortMetricsByName(metricNames) + } + + // Precompute all formatting information + const summaryInfo = computeSummaryInfo(metricNames, data, options) + + // Format each metric line + return metricNames.map((name) => { + const metric = data.metrics[name] + return renderMetricLine( + name, + metric, + summaryInfo, + options, + decorate, + indent, + ) + }) +} - const names = [] - let nameLenMax = 0 +/** + * @typedef {Object} SummaryInfo + * @property {number} maxNameWidth - The maximum width of the metric names. + * @property {Object} nonTrendValues - The non-trend metric values. + * @property {Object} nonTrendExtras - The non-trend metric extras. + * @property {Object} trendCols - The trend columns. + * @property {number[]} trendColMaxLens - The trend column maximum lengths. + * @property {number} numTrendColumns - The number of trend columns. + * @property {string[]} trendStats - The trend statistics. + * @property {number} maxNonTrendValueLen - The maximum non-trend value length. + * @property {number[]} nonTrendExtraMaxLens - The non-trend extra maximum lengths. + */ + +/** + * Compute all necessary formatting information such as maximum lengths, trend columns and non-trend values for each + * metric. + * + * @param {string[]} metricNames + * @param {{metrics: Object[]}} data - The data object containing metrics. + * @param {summarizeMetricsOptions} options + * @returns {SummaryInfo} +*/ +function computeSummaryInfo(metricNames, data, options) { + const trendStats = options.summaryTrendStats + const numTrendColumns = trendStats.length const nonTrendValues = {} - let nonTrendValueMaxLen = 0 const nonTrendExtras = {} - const nonTrendExtraMaxLens = [0, 0] - const trendCols = {} - const numTrendColumns = options.summaryTrendStats.length + + let maxNameWidth = 0 + let maxNonTrendValueLen = 0 + let nonTrendExtraMaxLens = [] // FIXME: "lens"? + + // Initialize tracking arrays for trend widths const trendColMaxLens = new Array(numTrendColumns).fill(0) - forEach(data.metrics, function (name, metric) { - names.push(name) - // When calculating widths for metrics, account for the indentation on submetrics. - const displayName = indentForMetric(name) + displayNameForMetric(name) - const displayNameWidth = strWidth(displayName) - if (displayNameWidth > nameLenMax) { - nameLenMax = displayNameWidth - } + + for (const name of metricNames) { + const metric = data.metrics[name] + const displayName = indentForMetric(name) + displayNameForMetric(name); + maxNameWidth = Math.max(maxNameWidth, strWidth(displayName)) if (metric.type === 'trend') { - const cols = [] - for (let i = 0; i < numTrendColumns; i++) { - const tc = options.summaryTrendStats[i] - let value = metric.values[tc] - if (tc === 'count') { - value = value.toString() - } else { - value = humanizeValue(value, metric, options.summaryTimeUnit) - } - const valLen = strWidth(value) - if (valLen > trendColMaxLens[i]) { - trendColMaxLens[i] = valLen - } - cols[i] = value - } + const cols = trendStats.map(stat => formatTrendValue(metric.values[stat], stat, metric, options)) + + // Compute max column widths + cols.forEach((col, index) => { + trendColMaxLens[index] = Math.max(trendColMaxLens[index], strWidth(col)) + }) trendCols[name] = cols - return - } - const values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) - nonTrendValues[name] = values[0] - const valueLen = strWidth(values[0]) - if (valueLen > nonTrendValueMaxLen) { - nonTrendValueMaxLen = valueLen - } - nonTrendExtras[name] = values.slice(1) - for (let i = 1; i < values.length; i++) { - const extraLen = strWidth(values[i]) - if (extraLen > nonTrendExtraMaxLens[i - 1]) { - nonTrendExtraMaxLens[i - 1] = extraLen - } + } else { + const values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) + const mainValue = values[0] // FIXME (@oleiade) we should assert that the index exists here + nonTrendValues[name] = mainValue + maxNonTrendValueLen = Math.max(maxNonTrendValueLen, strWidth(mainValue)) + + // FIXME (@oleiade): what the fuck is an extra, really? + const extras = values.slice(1) + nonTrendExtras[name] = extras + extras.forEach((value, index) => { + const width = strWidth(value) + if (nonTrendExtraMaxLens[index] === undefined || width > nonTrendExtraMaxLens[index]) { + nonTrendExtraMaxLens[index] = width + } + }) } - }) + } - // sort all metrics but keep sub metrics grouped with their parent metrics - if (options.sortByName) { - names.sort(function (metric1, metric2) { - const parent1 = metric1.split('{', 1)[0] - const parent2 = metric2.split('{', 1)[0] - const result = parent1.localeCompare(parent2) - if (result !== 0) { - return result - } - const sub1 = metric1.substring(parent1.length) - const sub2 = metric2.substring(parent2.length) - return sub1.localeCompare(sub2) - }) + return { + maxNameWidth, + nonTrendValues, + nonTrendExtras, + trendCols, + trendColMaxLens, + numTrendColumns, + trendStats, + maxNonTrendValueLen, + nonTrendExtraMaxLens } +} - const getData = function (name) { - if (trendCols.hasOwnProperty(name)) { - const cols = trendCols[name] - const tmpCols = new Array(numTrendColumns) - for (let i = 0; i < cols.length; i++) { - tmpCols[i] = - options.summaryTrendStats[i] + - '=' + - decorate(cols[i], palette.cyan) + - ' '.repeat(trendColMaxLens[i] - strWidth(cols[i])) - } - return tmpCols.join(' ') - } +/** + * + * @param value + * @param stat + * @param metric + * @param options + * @returns {string} + */ +function formatTrendValue(value, stat, metric, options) { + if (stat === 'count') { + return value.toString(); + } + return humanizeValue(value, metric, options.summaryTimeUnit); +} - const value = nonTrendValues[name] - let fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) - - const extras = nonTrendExtras[name] - if (extras.length === 1) { - fmtData = fmtData + ' ' + decorate(extras[0], palette.cyan, palette.faint) - } else if (extras.length > 1) { - const parts = new Array(extras.length) - for (let i = 0; i < extras.length; i++) { - parts[i] = - decorate(extras[i], palette.cyan, palette.faint) + - ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(extras[i])) - } - fmtData = fmtData + ' ' + parts.join(' ') - } +/** + * Renders a metric line into a formatted string for display. + * + * @param {string} name - The name of the metric. + * @param {ReportMetric} metric - The metric object containing details about the metric. + * @param {SummaryInfo} info - An object containing summary information such as maximum name width and trend columns. + * @param {summarizeMetricsOptions} options - Configuration options for summarizing metrics. + * @param {(text: string, ...colors: number[]) => string} decorate - A function to apply ANSI colors to text. + * @param {string} indent - The indentation string to use for the output. + * @returns {string} - The formatted metric line. + */ +function renderMetricLine(name, metric, info, options, decorate, indent) { + const { maxNameWidth } = info; - return fmtData - } + const displayedName = displayNameForMetric(name); + const fmtIndent = indentForMetric(name); - for (const name of names) { - const metric = data.metrics[name] - let mark = ' ' - let markColor = function (text) { - return text - } // noop + // Compute the trailing dots: + // Use `3` as a spacing offset as per original code. + const dotsCount = maxNameWidth - strWidth(displayedName) - strWidth(fmtIndent) + 3; + const dottedName = displayedName + decorate('.'.repeat(dotsCount) + ':', palette.faint); - if (metric.thresholds) { - mark = succMark - markColor = function (text) { - return decorate(text, palette.green) - } - forEach(metric.thresholds, function (name, threshold) { - if (!threshold.ok) { - mark = failMark - markColor = function (text) { - return decorate(text, palette.red) - } - return true // break - } - }) - } - const fmtIndent = indentForMetric(name) - let fmtName = displayNameForMetric(name) - fmtName = - fmtName + - decorate( - '.'.repeat(nameLenMax - strWidth(fmtName) - strWidth(fmtIndent) + 3) + ':', - palette.faint - ) + const dataPart = (metric.type === 'trend') + ? formatTrendData(name, info, decorate) + : formatNonTrendData(name, info, decorate); + + // FIXME (@oleiade): We need a more consistent and central way to manage indentations + // FIXME (@oleiade): We call them "options" everywhere but they're actually configuration I would argue + return indent + fmtIndent + ' ' + dottedName + ' ' + dataPart; +} + +// FIXME (@oleiade): summarizeMetricsOptions needs a better name "DisplayConfig"? +// FIXME (@oleiade): decorate function should have a dedicated typedef +/** + * Formats a submetric (metric+tags key/value pairs) line for output. + * + * @param {string} name - name of the submetric + * @param {ReportMetric} metric - submetric object (submetric really are just a specialized metric with a tags set and a pointer to their parent) + * @param {SummaryInfo} info - summary information object + * @param {summarizeMetricsOptions} options - display options + * @param {(text: string, ...colors: number[]) => string} decorate - decoration function + * @param indent indentation string + * @returns {string} submetric report line in the form: `{submetric name}...: {value} {extra}` + */ +function formatSubmetricLine(name, metric, info, options, decorate, indent) { + const { maxNameWidth } = info; + + // Compute the trailing dots: + // Use `3` as a spacing offset as per original code. + let dotsCount = maxNameWidth - strWidth(name) - strWidth(indent) + 3; + dotsCount = Math.max(1, dotsCount) + const dottedName = name + decorate('.'.repeat(dotsCount) + ':', palette.faint); + + const dataPart = (metric.type === 'trend') + ? formatTrendData(name, info, decorate) + : formatNonTrendData(name, info, decorate); - result.push(indent + fmtIndent + markColor(mark) + ' ' + fmtName + ' ' + getData(name)) + return indent + ' ' + dottedName + ' ' + dataPart; +} + +/** + * Format data for trend metrics. + */ +function formatTrendData(name, info, decorate) { + const { trendStats, trendCols, trendColMaxLens } = info; + const cols = trendCols[name]; + + return cols.map((col, i) => { + const statName = trendStats[i]; + const padding = ' '.repeat(trendColMaxLens[i] - strWidth(col)); + return statName + '=' + decorate(col, palette.cyan) + padding; + }).join(' '); +} + +/** + * Format data for non-trend metrics. + * + * @param {string} name - The metric name. + * @param {Object} info - The summary information object. + * @param {(text: string, ...colors: number[]) => string} decorate - A decoration function for ANSI colors. + */ +function formatNonTrendData(name, info, decorate) { + const { nonTrendValues, nonTrendExtras, maxNonTrendValueLen, nonTrendExtraMaxLens } = info; + + const value = nonTrendValues[name]; + const extras = nonTrendExtras[name] || []; + + let result = decorate(value, palette.cyan); + result += ' '.repeat(maxNonTrendValueLen - strWidth(value)); + + if (extras.length === 1) { + // Single extra value + result += ' ' + decorate(extras[0], palette.cyan, palette.faint); + } else if (extras.length > 1) { + // Multiple extras need their own spacing + const parts = extras.map((val, i) => { + const extraSpace = ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(val)); + return decorate(val, palette.cyan, palette.faint) + extraSpace; + }); + result += ' ' + parts.join(' '); } - return result + return result; } /** - * Summarizes metrics and their thresholds into formatted lines. + * Renders each thresholds results into a formatted set of lines ready for display in the terminal. + * + * Thresholds are rendered in the format: + * {metric/submetric}...: {value} {extra} + * {SATISFIED|UNSATISFIED} {source} + * //... additional threshold lines * * @param {Object} options - Options merged with defaults. * @param {ReportData} data - The data containing metrics. * @param {(text: string, ...colors: number[]) => string} decorate - Decoration function. * @returns {string[]} - Array of formatted lines including threshold statuses. */ -function summarizeMetricsWithThresholds(options, data, decorate) { +function renderThresholds(data, decorate, options) { const indent = options.indent + ' ' - const result = [] - - const names = [] - let nameLenMax = 0 - - const nonTrendValues = {} - let nonTrendValueMaxLen = 0 - const nonTrendExtras = {} - let nonTrendExtraMaxLens = [0, 0] - - const trendCols = {} - const numTrendColumns = options.summaryTrendStats.length - const trendColMaxLens = new Array(numTrendColumns).fill(0) - forEach(data.metrics, function (name, metric) { - names.push(name) - // When calculating widths for metrics, account for the indentation on submetrics. - const displayNameWidth = strWidth(name) - if (displayNameWidth > nameLenMax) { - nameLenMax = displayNameWidth - } - - if (metric.type === 'trend') { - const cols = [] - for (let i = 0; i < numTrendColumns; i++) { - const tc = options.summaryTrendStats[i] - let value = metric.values[tc] - if (tc === 'count') { - value = value.toString() - } else { - value = humanizeValue(value, metric, options.summaryTimeUnit) - } - const valLen = strWidth(value) - if (valLen > trendColMaxLens[i]) { - trendColMaxLens[i] = valLen - } - cols[i] = value - } - trendCols[name] = cols - return - } - let values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) - nonTrendValues[name] = values[0] - const valueLen = strWidth(values[0]) - if (valueLen > nonTrendValueMaxLen) { - nonTrendValueMaxLen = valueLen - } - nonTrendExtras[name] = values.slice(1) - for (let i = 1; i < values.length; i++) { - const extraLen = strWidth(values[i]) - if (extraLen > nonTrendExtraMaxLens[i - 1]) { - nonTrendExtraMaxLens[i - 1] = extraLen - } - } - }) - // sort all metrics but keep sub metrics grouped with their parent metrics + // Extract and optionally sort metric names + let metricNames = Object.keys(data.metrics) if (options.sortByName) { - names.sort(function (metric1, metric2) { - const parent1 = metric1.split('{', 1)[0] - const parent2 = metric2.split('{', 1)[0] - const result = parent1.localeCompare(parent2) - if (result !== 0) { - return result - } - const sub1 = metric1.substring(parent1.length) - const sub2 = metric2.substring(parent2.length) - return sub1.localeCompare(sub2) - }) + metricNames = sortMetricsByName(metricNames) } - const getData = function (name) { - if (trendCols.hasOwnProperty(name)) { - const cols = trendCols[name] - const tmpCols = new Array(numTrendColumns) - for (let i = 0; i < cols.length; i++) { - tmpCols[i] = - options.summaryTrendStats[i] + - '=' + - decorate(cols[i], palette.cyan) + - ' '.repeat(trendColMaxLens[i] - strWidth(cols[i])) - } - return tmpCols.join(' ') - } - - const value = nonTrendValues[name] - let fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) - - const extras = nonTrendExtras[name] - if (extras.length === 1) { - fmtData = fmtData + ' ' + decorate(extras[0], palette.cyan, palette.faint) - } else if (extras.length > 1) { - const parts = new Array(extras.length) - for (let i = 0; i < extras.length; i++) { - parts[i] = - decorate(extras[i], palette.cyan, palette.faint) + - ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(extras[i])) - } - fmtData = fmtData + ' ' + parts.join(' ') - } + // Precompute all formatting information + const summaryInfo = computeSummaryInfo(metricNames, data, options) - return fmtData - } - - for (const name of names) { + // Format each threshold line by preparing each metric affected by a threshold, as + // well as the thresholds results for each expression. + const result = [] + for (const name of metricNames) { const metric = data.metrics[name] - const mark = ' ' - const markColor = function (text) { - return text - } // noop - - const fmtName = - name + - decorate( - '.'.repeat(nameLenMax - strWidth(name) + 3) + ':', - palette.faint - ) - - result.push(indent + markColor(mark) + ' ' + fmtName + ' ' + getData(name)) + const line = formatSubmetricLine(name, metric, summaryInfo, options, decorate, '') + result.push(line) + if (metric.thresholds) { - forEach(metric.thresholds, function (name, threshold) { - const resultIndent = threshold.ok ? ' ' : ' '; - const thresholdResult = threshold.ok ? decorate('SATISFIED', palette.green) : decorate('UNSATISFIED', palette.red); - result.push(indent + indent + ' ' + thresholdResult + resultIndent + decorate(`'${threshold.source}'`, palette.faint)) - }) + // TODO (@oleiade): make sure the arguments are always ordered consistently across functions (indent, decorate, etc.) + const thresholdLines = renderThresholdResults(metric.thresholds, indent, decorate) + result.push(...thresholdLines) } } return result } +/** + * Renders each threshold result into a formatted set of lines ready for display in the terminal. + * + * @param {Object} thresholds - The thresholds to render. + * @param {string} indent - The indentation string to use for the output. + * @param {(text: string, ...colors: number[]) => string} decorate - A function to apply ANSI colors to text. + * @returns {string[]} - An array of formatted lines including threshold statuses. + */ +function renderThresholdResults(thresholds, indent, decorate) { + const lines = [] + + forEach(thresholds, (_, threshold) => { + const isSatisfied = threshold.ok + const statusText = isSatisfied + ? decorate('SATISFIED', palette.green) + : decorate('UNSATISFIED', palette.red) + + // Extra indentation for threshold lines + // Adjusting spacing so that it aligns nicely under the metric line + const additionalIndent = isSatisfied ? ' ' : ' ' + const sourceText = decorate(`'${threshold.source}'`, palette.faint) + + // Here we push a line describing the threshold's result + lines.push( + indent + indent + ' ' + statusText + additionalIndent + sourceText + ) + }) + + return lines +} + /** * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. * @@ -689,7 +745,7 @@ function generateTextSummary(data, options, report) { } } - const ANSI= { + const ANSI = { reset: "\x1b[0m", // Standard Colors @@ -715,7 +771,6 @@ function generateTextSummary(data, options, report) { // Dark Colors darkGrey: "\x1b[90m", }; - const BOLD = '\u001b[1m' const RESET = ANSI.reset; const boldify = (text) => BOLD + text + RESET @@ -754,9 +809,10 @@ function generateTextSummary(data, options, report) { * @param {Object[]} sectionMetrics - The metrics to display. * @param {Partial} [opts] - Display options. */ + // FIXME const displayMetricsBlock = (sectionMetrics, opts) => { const summarizeOpts = Object.assign({}, mergedOpts, opts) - Array.prototype.push.apply(lines, summarizeMetrics(summarizeOpts, {metrics: sectionMetrics}, decorate)) + Array.prototype.push.apply(lines, renderMetrics({metrics: sectionMetrics}, decorate, summarizeOpts)) lines.push('') } @@ -772,7 +828,7 @@ function generateTextSummary(data, options, report) { } displayMetricsBlock(checks.metrics, {...opts, indent: opts.indent + defaultIndent, sortByName: false}) for (let i = 0; i < checks.ordered_checks.length; i++) { - lines.push(summarizeCheck(metricGroupIndent + metricGroupIndent + opts.indent, checks.ordered_checks[i], decorate)) + lines.push(renderCheck(metricGroupIndent + metricGroupIndent + opts.indent, checks.ordered_checks[i], decorate)) } if (checks.ordered_checks.length > 0) { lines.push('') @@ -798,11 +854,12 @@ function generateTextSummary(data, options, report) { metrics[threshold.metric.name] = {...threshold.metric, thresholds: threshold.thresholds} }); - Array.prototype.push.apply(lines, summarizeMetricsWithThresholds( - {...mergedOpts, indent: mergedOpts.indent + defaultIndent}, - {metrics}, - decorate), - ) + // Array.prototype.push.apply(lines, summarizeMetricsWithThresholds( + // {...mergedOpts, indent: mergedOpts.indent + defaultIndent}, + // {metrics}, + // decorate), + // ) + Array.prototype.push.apply(lines, renderThresholds({metrics}, decorate, {...mergedOpts, indent: mergedOpts.indent + defaultIndent})) lines.push('') }; From 4153ba7f800987f74f63bca00579239fa34774a0 Mon Sep 17 00:00:00 2001 From: oleiade Date: Mon, 16 Dec 2024 09:33:56 +0100 Subject: [PATCH 18/42] Import prettier+eslint configurations from docs and format summary.js --- js/.eslintignore | 5 + js/.eslintrc | 90 +++++++ js/.prettierignore | 5 + js/.prettierrc | 4 + js/summary.js | 635 +++++++++++++++++++++++++++------------------ 5 files changed, 483 insertions(+), 256 deletions(-) create mode 100644 js/.eslintignore create mode 100644 js/.eslintrc create mode 100644 js/.prettierignore create mode 100644 js/.prettierrc diff --git a/js/.eslintignore b/js/.eslintignore new file mode 100644 index 00000000000..d5d55e77cbc --- /dev/null +++ b/js/.eslintignore @@ -0,0 +1,5 @@ +# Ignore everything in this folder +* + +# But the end-of-test summary file +!summary.js diff --git a/js/.eslintrc b/js/.eslintrc new file mode 100644 index 00000000000..81c27781453 --- /dev/null +++ b/js/.eslintrc @@ -0,0 +1,90 @@ +{ + "parserOptions": { + "ecmaFeatures": { + "jsx": true + }, + "ecmaVersion": 2020, + "sourceType": "module" + }, + "extends": ["airbnb", "prettier"], + "plugins": ["react", "prettier"], + "env": { + "browser": true, + "node": true + }, + "rules": { + "prettier/prettier": ["error"], + "arrow-body-style": "warn", + "camelcase": 0, + "object-curly-newline": 0, + "operator-linebreak": 0, + "no-shadow": 0, + "max-len": [2, 120], + "no-underscore-dangle": "off", + "react/prop-types": 0, + "react/function-component-definition": 0, + "react/no-unstable-nested-components": 0, + "react/jsx-curly-brace-presence": [ + 2, + { + "props": "ignore", + "children": "never" + } + ], + "react/jsx-tag-spacing": [ + 2, + { + "closingSlash": "never", + "beforeSelfClosing": "always", + "afterOpening": "allow-multiline", + "beforeClosing": "never" + } + ], + "react/jsx-filename-extension": [ + 2, + { + "extensions": [".js"] + } + ], + "react/no-array-index-key": 0, + "react/jsx-one-expression-per-line": 0, + "react/jsx-props-no-spreading": 0, + "react/jsx-wrap-multilines": 0, + "import/no-extraneous-dependencies": [ + "warn", + { "devDependencies": false, "peerDependencies": true } + ], + "import/order": [ + "warn", + { + "alphabetize": { + "order": "asc" /* sort in ascending order. Options: ['ignore', 'asc', 'desc'] */, + "caseInsensitive": true /* ignore case. Options: [true, false] */ + }, + "newlines-between": "always" + } + ], + "import/no-unresolved": [ + 2, + { + "ignore": [ + "components", + "hooks", + "images", + "layouts", + "pages", + "styles", + "svg", + "templates", + "utils", + "contexts", + "i18n", + "data" + ] + } + ], + "import/prefer-default-export": 0, + "jsx-a11y/html-has-lang": 0, + "jsx-a11y/control-has-associated-label": 0 + } +} diff --git a/js/.prettierignore b/js/.prettierignore new file mode 100644 index 00000000000..d5d55e77cbc --- /dev/null +++ b/js/.prettierignore @@ -0,0 +1,5 @@ +# Ignore everything in this folder +* + +# But the end-of-test summary file +!summary.js diff --git a/js/.prettierrc b/js/.prettierrc new file mode 100644 index 00000000000..9b25e806cb8 --- /dev/null +++ b/js/.prettierrc @@ -0,0 +1,4 @@ +{ + "singleQuote": true, + "trailingComma": "all" +} diff --git a/js/summary.js b/js/summary.js index d2ced5c831f..242dd6da009 100644 --- a/js/summary.js +++ b/js/summary.js @@ -73,7 +73,7 @@ function forEach(obj, callback) { for (const key in obj) { if (obj.hasOwnProperty(key)) { if (callback(key, obj[key])) { - break + break; } } } @@ -87,19 +87,19 @@ const palette = { green: 32, cyan: 36, //TODO: add others? -} +}; -const groupPrefix = '█' -const detailsPrefix = '↳' -const succMark = '✓' -const failMark = '✗' +const groupPrefix = '█'; +const detailsPrefix = '↳'; +const succMark = '✓'; +const failMark = '✗'; const defaultOptions = { indent: ' ', enableColors: true, summaryTimeUnit: null, summaryTrendStats: null, sortByName: true, -} +}; /** * Compute the width of a string as displayed in a terminal, excluding ANSI codes, terminal @@ -110,42 +110,51 @@ const defaultOptions = { */ function strWidth(s) { // TODO: determine if NFC or NFKD are not more appropriate? or just give up? https://hsivonen.fi/string-length/ - const data = s.normalize('NFKC') // This used to be NFKD in Go, but this should be better - let inEscSeq = false - let inLongEscSeq = false - let width = 0 + const data = s.normalize('NFKC'); // This used to be NFKD in Go, but this should be better + let inEscSeq = false; + let inLongEscSeq = false; + let width = 0; for (const char of data) { if (char.done) { - break + break; } // Skip over ANSI escape codes. if (char === '\x1b') { - inEscSeq = true - continue + inEscSeq = true; + continue; } if (inEscSeq && char === '[') { - inLongEscSeq = true - continue + inLongEscSeq = true; + continue; } - if (inEscSeq && inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x7e) { - inEscSeq = false - inLongEscSeq = false - continue + if ( + inEscSeq && + inLongEscSeq && + char.charCodeAt(0) >= 0x40 && + char.charCodeAt(0) <= 0x7e + ) { + inEscSeq = false; + inLongEscSeq = false; + continue; } - if (inEscSeq && !inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x5f) { - inEscSeq = false - continue + if ( + inEscSeq && + !inLongEscSeq && + char.charCodeAt(0) >= 0x40 && + char.charCodeAt(0) <= 0x5f + ) { + inEscSeq = false; + continue; } if (!inEscSeq && !inLongEscSeq) { - width++ + width++; } } - return width + return width; } - /** * Extracts a display name for a metric, handling sub-metrics (e.g. "metric{sub}" -> "{ sub }"). * @@ -153,11 +162,11 @@ function strWidth(s) { * @returns {string} - The display name */ function displayNameForMetric(name) { - const subMetricPos = name.indexOf('{') + const subMetricPos = name.indexOf('{'); if (subMetricPos >= 0) { - return '{ ' + name.substring(subMetricPos + 1, name.length - 1) + ' }' + return '{ ' + name.substring(subMetricPos + 1, name.length - 1) + ' }'; } - return name + return name; } /** @@ -168,9 +177,9 @@ function displayNameForMetric(name) { */ function indentForMetric(name) { if (name.indexOf('{') >= 0) { - return ' ' + return ' '; } - return '' + return ''; } /** @@ -180,23 +189,23 @@ function indentForMetric(name) { * @returns {string} A human-readable string (e.g. "10 kB"). */ function humanizeBytes(bytes) { - const units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] - const base = 1000 + const units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + const base = 1000; if (bytes < 10) { - return bytes + ' B' + return bytes + ' B'; } - const e = Math.floor(Math.log(bytes) / Math.log(base)) - const suffix = units[e | 0] - const val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10 - return val.toFixed(val < 10 ? 1 : 0) + ' ' + suffix + const e = Math.floor(Math.log(bytes) / Math.log(base)); + const suffix = units[e | 0]; + const val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10; + return val.toFixed(val < 10 ? 1 : 0) + ' ' + suffix; } const unitMap = { - s: {unit: 's', coef: 0.001}, - ms: {unit: 'ms', coef: 1}, - us: {unit: 'µs', coef: 1000}, -} + s: { unit: 's', coef: 0.001 }, + ms: { unit: 'ms', coef: 1 }, + us: { unit: 'µs', coef: 1000 }, +}; /** * Converts a number to a fixed decimal string, removing trailing zeros. @@ -207,7 +216,7 @@ const unitMap = { */ function toFixedNoTrailingZeros(val, prec) { // TODO: figure out something better? - return parseFloat(val.toFixed(prec)).toString() + return parseFloat(val.toFixed(prec)).toString(); } /** @@ -218,8 +227,8 @@ function toFixedNoTrailingZeros(val, prec) { * @returns {string} A truncated, not rounded string representation. */ function toFixedNoTrailingZerosTrunc(val, prec) { - const mult = Math.pow(10, prec) - return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec) + const mult = Math.pow(10, prec); + return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec); } /** @@ -231,35 +240,39 @@ function toFixedNoTrailingZerosTrunc(val, prec) { */ function humanizeGenericDuration(duration) { if (duration === 0) { - return '0s' + return '0s'; } if (duration < 0.001) { // smaller than a microsecond, print nanoseconds - return Math.trunc(duration * 1000000) + 'ns' + return Math.trunc(duration * 1000000) + 'ns'; } if (duration < 1) { // smaller than a millisecond, print microseconds - return toFixedNoTrailingZerosTrunc(duration * 1000, 2) + 'µs' + return toFixedNoTrailingZerosTrunc(duration * 1000, 2) + 'µs'; } if (duration < 1000) { // duration is smaller than a second - return toFixedNoTrailingZerosTrunc(duration, 2) + 'ms' + return toFixedNoTrailingZerosTrunc(duration, 2) + 'ms'; } - let fixedDuration= toFixedNoTrailingZerosTrunc((duration % 60000) / 1000, duration > 60000 ? 0 : 2) + 's' - let rem = Math.trunc(duration / 60000) + let fixedDuration = + toFixedNoTrailingZerosTrunc( + (duration % 60000) / 1000, + duration > 60000 ? 0 : 2, + ) + 's'; + let rem = Math.trunc(duration / 60000); if (rem < 1) { // less than a minute - return fixedDuration + return fixedDuration; } - fixedDuration = (rem % 60) + 'm' + fixedDuration - rem = Math.trunc(rem / 60) + fixedDuration = (rem % 60) + 'm' + fixedDuration; + rem = Math.trunc(rem / 60); if (rem < 1) { // less than an hour - return fixedDuration + return fixedDuration; } - return rem + 'h' + fixedDuration + return rem + 'h' + fixedDuration; } /** @@ -271,10 +284,12 @@ function humanizeGenericDuration(duration) { */ function humanizeDuration(dur, timeUnit) { if (timeUnit !== '' && unitMap.hasOwnProperty(timeUnit)) { - return (dur * unitMap[timeUnit].coef).toFixed(2) + unitMap[timeUnit].unit + return ( + (dur * unitMap[timeUnit].coef).toFixed(2) + unitMap[timeUnit].unit + ); } - return humanizeGenericDuration(dur) + return humanizeGenericDuration(dur); } /** @@ -288,16 +303,16 @@ function humanizeDuration(dur, timeUnit) { function humanizeValue(val, metric, timeUnit) { if (metric.type === 'rate') { // Truncate instead of round when decreasing precision to 2 decimal places - return (Math.trunc(val * 100 * 100) / 100).toFixed(2) + '%' + return (Math.trunc(val * 100 * 100) / 100).toFixed(2) + '%'; } switch (metric.contains) { case 'data': - return humanizeBytes(val) + return humanizeBytes(val); case 'time': - return humanizeDuration(val, timeUnit) + return humanizeDuration(val, timeUnit); default: - return toFixedNoTrailingZeros(val, 6) + return toFixedNoTrailingZeros(val, 6); } } @@ -314,24 +329,23 @@ function nonTrendMetricValueForSum(metric, timeUnit) { return [ humanizeValue(metric.values.count, metric, timeUnit), humanizeValue(metric.values.rate, metric, timeUnit) + '/s', - ] + ]; case 'gauge': return [ humanizeValue(metric.values.value, metric, timeUnit), 'min=' + humanizeValue(metric.values.min, metric, timeUnit), 'max=' + humanizeValue(metric.values.max, metric, timeUnit), - ] + ]; case 'rate': return [ humanizeValue(metric.values.rate, metric, timeUnit), `${metric.values.passes} out of ${metric.values.passes + metric.values.fails}`, - ] + ]; default: - return ['[no data]'] + return ['[no data]']; } } - /** * Sorts metrics by name, keeping submetrics grouped with their parent metrics. * @@ -340,18 +354,18 @@ function nonTrendMetricValueForSum(metric, timeUnit) { */ function sortMetricsByName(metricNames) { metricNames.sort(function (lhsMetricName, rhsMetricName) { - const lhsParent = lhsMetricName.split('{', 1)[0] - const rhsParent = rhsMetricName.split('{', 1)[0] - const result = lhsParent.localeCompare(rhsParent) + const lhsParent = lhsMetricName.split('{', 1)[0]; + const rhsParent = rhsMetricName.split('{', 1)[0]; + const result = lhsParent.localeCompare(rhsParent); if (result !== 0) { - return result + return result; } - const lhsSub = lhsMetricName.substring(lhsParent.length) - const rhsSub = rhsMetricName.substring(rhsParent.length) - return lhsSub.localeCompare(rhsSub) - }) + const lhsSub = lhsMetricName.substring(lhsParent.length); + const rhsSub = rhsMetricName.substring(rhsParent.length); + return lhsSub.localeCompare(rhsSub); + }); - return metricNames + return metricNames; } /** @@ -364,31 +378,33 @@ function sortMetricsByName(metricNames) { */ function renderCheck(indent, check, decorate) { if (check.fails === 0) { - return decorate(indent + succMark + ' ' + check.name, palette.green) + return decorate(indent + succMark + ' ' + check.name, palette.green); } - const succPercent = Math.floor((100 * check.passes) / (check.passes + check.fails)) + const succPercent = Math.floor( + (100 * check.passes) / (check.passes + check.fails), + ); return decorate( indent + - failMark + - ' ' + - check.name + - '\n' + - indent + - ' ' + - detailsPrefix + - ' ' + - succPercent + - '% — ' + - succMark + - ' ' + - check.passes + - ' / ' + - failMark + - ' ' + - check.fails, - palette.red - ) + failMark + + ' ' + + check.name + + '\n' + + indent + + ' ' + + detailsPrefix + + ' ' + + succPercent + + '% — ' + + succMark + + ' ' + + check.passes + + ' / ' + + failMark + + ' ' + + check.fails, + palette.red, + ); } /** @@ -410,22 +426,22 @@ function renderCheck(indent, check, decorate) { * @returns {string[]} */ function renderMetrics(data, decorate, options) { - const indent = options.indent + ' ' // FIXME @oleiade shouldn't we provide this at the caller? + const indent = options.indent + ' '; // FIXME @oleiade shouldn't we provide this at the caller? // Extract all metric names - let metricNames = Object.keys(data.metrics) + let metricNames = Object.keys(data.metrics); // If sorting by name is required, do it now if (options.sortByName) { - metricNames = sortMetricsByName(metricNames) + metricNames = sortMetricsByName(metricNames); } // Precompute all formatting information - const summaryInfo = computeSummaryInfo(metricNames, data, options) + const summaryInfo = computeSummaryInfo(metricNames, data, options); // Format each metric line return metricNames.map((name) => { - const metric = data.metrics[name] + const metric = data.metrics[name]; return renderMetricLine( name, metric, @@ -433,8 +449,8 @@ function renderMetrics(data, decorate, options) { options, decorate, indent, - ) - }) + ); + }); } /** @@ -458,50 +474,64 @@ function renderMetrics(data, decorate, options) { * @param {{metrics: Object[]}} data - The data object containing metrics. * @param {summarizeMetricsOptions} options * @returns {SummaryInfo} -*/ + */ function computeSummaryInfo(metricNames, data, options) { - const trendStats = options.summaryTrendStats - const numTrendColumns = trendStats.length + const trendStats = options.summaryTrendStats; + const numTrendColumns = trendStats.length; - const nonTrendValues = {} - const nonTrendExtras = {} - const trendCols = {} + const nonTrendValues = {}; + const nonTrendExtras = {}; + const trendCols = {}; - let maxNameWidth = 0 - let maxNonTrendValueLen = 0 - let nonTrendExtraMaxLens = [] // FIXME: "lens"? + let maxNameWidth = 0; + let maxNonTrendValueLen = 0; + let nonTrendExtraMaxLens = []; // FIXME: "lens"? // Initialize tracking arrays for trend widths - const trendColMaxLens = new Array(numTrendColumns).fill(0) + const trendColMaxLens = new Array(numTrendColumns).fill(0); for (const name of metricNames) { - const metric = data.metrics[name] + const metric = data.metrics[name]; const displayName = indentForMetric(name) + displayNameForMetric(name); - maxNameWidth = Math.max(maxNameWidth, strWidth(displayName)) + maxNameWidth = Math.max(maxNameWidth, strWidth(displayName)); if (metric.type === 'trend') { - const cols = trendStats.map(stat => formatTrendValue(metric.values[stat], stat, metric, options)) + const cols = trendStats.map((stat) => + formatTrendValue(metric.values[stat], stat, metric, options), + ); // Compute max column widths cols.forEach((col, index) => { - trendColMaxLens[index] = Math.max(trendColMaxLens[index], strWidth(col)) - }) - trendCols[name] = cols + trendColMaxLens[index] = Math.max( + trendColMaxLens[index], + strWidth(col), + ); + }); + trendCols[name] = cols; } else { - const values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) - const mainValue = values[0] // FIXME (@oleiade) we should assert that the index exists here - nonTrendValues[name] = mainValue - maxNonTrendValueLen = Math.max(maxNonTrendValueLen, strWidth(mainValue)) + const values = nonTrendMetricValueForSum( + metric, + options.summaryTimeUnit, + ); + const mainValue = values[0]; // FIXME (@oleiade) we should assert that the index exists here + nonTrendValues[name] = mainValue; + maxNonTrendValueLen = Math.max( + maxNonTrendValueLen, + strWidth(mainValue), + ); // FIXME (@oleiade): what the fuck is an extra, really? - const extras = values.slice(1) - nonTrendExtras[name] = extras + const extras = values.slice(1); + nonTrendExtras[name] = extras; extras.forEach((value, index) => { - const width = strWidth(value) - if (nonTrendExtraMaxLens[index] === undefined || width > nonTrendExtraMaxLens[index]) { - nonTrendExtraMaxLens[index] = width + const width = strWidth(value); + if ( + nonTrendExtraMaxLens[index] === undefined || + width > nonTrendExtraMaxLens[index] + ) { + nonTrendExtraMaxLens[index] = width; } - }) + }); } } @@ -514,8 +544,8 @@ function computeSummaryInfo(metricNames, data, options) { numTrendColumns, trendStats, maxNonTrendValueLen, - nonTrendExtraMaxLens - } + nonTrendExtraMaxLens, + }; } /** @@ -552,12 +582,15 @@ function renderMetricLine(name, metric, info, options, decorate, indent) { // Compute the trailing dots: // Use `3` as a spacing offset as per original code. - const dotsCount = maxNameWidth - strWidth(displayedName) - strWidth(fmtIndent) + 3; - const dottedName = displayedName + decorate('.'.repeat(dotsCount) + ':', palette.faint); + const dotsCount = + maxNameWidth - strWidth(displayedName) - strWidth(fmtIndent) + 3; + const dottedName = + displayedName + decorate('.'.repeat(dotsCount) + ':', palette.faint); - const dataPart = (metric.type === 'trend') - ? formatTrendData(name, info, decorate) - : formatNonTrendData(name, info, decorate); + const dataPart = + metric.type === 'trend' + ? formatTrendData(name, info, decorate) + : formatNonTrendData(name, info, decorate); // FIXME (@oleiade): We need a more consistent and central way to manage indentations // FIXME (@oleiade): We call them "options" everywhere but they're actually configuration I would argue @@ -583,12 +616,14 @@ function formatSubmetricLine(name, metric, info, options, decorate, indent) { // Compute the trailing dots: // Use `3` as a spacing offset as per original code. let dotsCount = maxNameWidth - strWidth(name) - strWidth(indent) + 3; - dotsCount = Math.max(1, dotsCount) - const dottedName = name + decorate('.'.repeat(dotsCount) + ':', palette.faint); + dotsCount = Math.max(1, dotsCount); + const dottedName = + name + decorate('.'.repeat(dotsCount) + ':', palette.faint); - const dataPart = (metric.type === 'trend') - ? formatTrendData(name, info, decorate) - : formatNonTrendData(name, info, decorate); + const dataPart = + metric.type === 'trend' + ? formatTrendData(name, info, decorate) + : formatNonTrendData(name, info, decorate); return indent + ' ' + dottedName + ' ' + dataPart; } @@ -600,11 +635,13 @@ function formatTrendData(name, info, decorate) { const { trendStats, trendCols, trendColMaxLens } = info; const cols = trendCols[name]; - return cols.map((col, i) => { - const statName = trendStats[i]; - const padding = ' '.repeat(trendColMaxLens[i] - strWidth(col)); - return statName + '=' + decorate(col, palette.cyan) + padding; - }).join(' '); + return cols + .map((col, i) => { + const statName = trendStats[i]; + const padding = ' '.repeat(trendColMaxLens[i] - strWidth(col)); + return statName + '=' + decorate(col, palette.cyan) + padding; + }) + .join(' '); } /** @@ -615,7 +652,12 @@ function formatTrendData(name, info, decorate) { * @param {(text: string, ...colors: number[]) => string} decorate - A decoration function for ANSI colors. */ function formatNonTrendData(name, info, decorate) { - const { nonTrendValues, nonTrendExtras, maxNonTrendValueLen, nonTrendExtraMaxLens } = info; + const { + nonTrendValues, + nonTrendExtras, + maxNonTrendValueLen, + nonTrendExtraMaxLens, + } = info; const value = nonTrendValues[name]; const extras = nonTrendExtras[name] || []; @@ -629,7 +671,9 @@ function formatNonTrendData(name, info, decorate) { } else if (extras.length > 1) { // Multiple extras need their own spacing const parts = extras.map((val, i) => { - const extraSpace = ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(val)); + const extraSpace = ' '.repeat( + nonTrendExtraMaxLens[i] - strWidth(val), + ); return decorate(val, palette.cyan, palette.faint) + extraSpace; }); result += ' ' + parts.join(' '); @@ -652,33 +696,44 @@ function formatNonTrendData(name, info, decorate) { * @returns {string[]} - Array of formatted lines including threshold statuses. */ function renderThresholds(data, decorate, options) { - const indent = options.indent + ' ' + const indent = options.indent + ' '; // Extract and optionally sort metric names - let metricNames = Object.keys(data.metrics) + let metricNames = Object.keys(data.metrics); if (options.sortByName) { - metricNames = sortMetricsByName(metricNames) + metricNames = sortMetricsByName(metricNames); } // Precompute all formatting information - const summaryInfo = computeSummaryInfo(metricNames, data, options) + const summaryInfo = computeSummaryInfo(metricNames, data, options); // Format each threshold line by preparing each metric affected by a threshold, as // well as the thresholds results for each expression. - const result = [] + const result = []; for (const name of metricNames) { - const metric = data.metrics[name] - const line = formatSubmetricLine(name, metric, summaryInfo, options, decorate, '') - result.push(line) + const metric = data.metrics[name]; + const line = formatSubmetricLine( + name, + metric, + summaryInfo, + options, + decorate, + '', + ); + result.push(line); if (metric.thresholds) { // TODO (@oleiade): make sure the arguments are always ordered consistently across functions (indent, decorate, etc.) - const thresholdLines = renderThresholdResults(metric.thresholds, indent, decorate) - result.push(...thresholdLines) + const thresholdLines = renderThresholdResults( + metric.thresholds, + indent, + decorate, + ); + result.push(...thresholdLines); } } - return result + return result; } /** @@ -690,26 +745,26 @@ function renderThresholds(data, decorate, options) { * @returns {string[]} - An array of formatted lines including threshold statuses. */ function renderThresholdResults(thresholds, indent, decorate) { - const lines = [] + const lines = []; forEach(thresholds, (_, threshold) => { - const isSatisfied = threshold.ok + const isSatisfied = threshold.ok; const statusText = isSatisfied ? decorate('SATISFIED', palette.green) - : decorate('UNSATISFIED', palette.red) + : decorate('UNSATISFIED', palette.red); // Extra indentation for threshold lines // Adjusting spacing so that it aligns nicely under the metric line - const additionalIndent = isSatisfied ? ' ' : ' ' - const sourceText = decorate(`'${threshold.source}'`, palette.faint) + const additionalIndent = isSatisfied ? ' ' : ' '; + const sourceText = decorate(`'${threshold.source}'`, palette.faint); // Here we push a line describing the threshold's result lines.push( - indent + indent + ' ' + statusText + additionalIndent + sourceText - ) - }) + indent + indent + ' ' + statusText + additionalIndent + sourceText, + ); + }); - return lines + return lines; } /** @@ -721,8 +776,8 @@ function renderThresholdResults(thresholds, indent, decorate) { * @returns {string} A formatted summary of the test results. */ function generateTextSummary(data, options, report) { - const mergedOpts = Object.assign({}, defaultOptions, data.options, options) - const lines = [] + const mergedOpts = Object.assign({}, defaultOptions, data.options, options); + const lines = []; // TODO: move all of these functions into an object with methods? /** @@ -733,50 +788,50 @@ function generateTextSummary(data, options, report) { * @returns {*} */ let decorate = function (text, _) { - return text - } + return text; + }; if (mergedOpts.enableColors) { decorate = function (text, color /*, ...rest*/) { - let result = '\x1b[' + color + let result = '\x1b[' + color; for (let i = 2; i < arguments.length; i++) { - result += ';' + arguments[i] + result += ';' + arguments[i]; } - return result + 'm' + text + '\x1b[0m' - } + return result + 'm' + text + '\x1b[0m'; + }; } const ANSI = { - reset: "\x1b[0m", + reset: '\x1b[0m', // Standard Colors - black: "\x1b[30m", - red: "\x1b[31m", - green: "\x1b[32m", - yellow: "\x1b[33m", - blue: "\x1b[34m", - magenta: "\x1b[35m", - cyan: "\x1b[36m", - white: "\x1b[37m", + black: '\x1b[30m', + red: '\x1b[31m', + green: '\x1b[32m', + yellow: '\x1b[33m', + blue: '\x1b[34m', + magenta: '\x1b[35m', + cyan: '\x1b[36m', + white: '\x1b[37m', // Bright Colors - brightBlack: "\x1b[90m", - brightRed: "\x1b[91m", - brightGreen: "\x1b[92m", - brightYellow: "\x1b[93m", - brightBlue: "\x1b[94m", - brightMagenta: "\x1b[95m", - brightCyan: "\x1b[96m", - brightWhite: "\x1b[97m", + brightBlack: '\x1b[90m', + brightRed: '\x1b[91m', + brightGreen: '\x1b[92m', + brightYellow: '\x1b[93m', + brightBlue: '\x1b[94m', + brightMagenta: '\x1b[95m', + brightCyan: '\x1b[96m', + brightWhite: '\x1b[97m', // Dark Colors - darkGrey: "\x1b[90m", + darkGrey: '\x1b[90m', }; - const BOLD = '\u001b[1m' + const BOLD = '\u001b[1m'; const RESET = ANSI.reset; - const boldify = (text) => BOLD + text + RESET + const boldify = (text) => BOLD + text + RESET; - const defaultIndent = ' ' - const metricGroupIndent = ' ' + const defaultIndent = ' '; + const metricGroupIndent = ' '; /** * Displays a metrics block name (section heading). @@ -787,21 +842,21 @@ function generateTextSummary(data, options, report) { const displayMetricsBlockName = (sectionName, opts) => { let bold = true; if (opts && opts.bold === false) { - bold = false + bold = false; } - let normalizedSectionName = sectionName.toUpperCase() + let normalizedSectionName = sectionName.toUpperCase(); if (bold) { - normalizedSectionName = boldify(normalizedSectionName) + normalizedSectionName = boldify(normalizedSectionName); } - let indent = ' ' + let indent = ' '; if (opts && opts.metricsBlockIndent) { - indent += opts.metricsBlockIndent + indent += opts.metricsBlockIndent; } - lines.push(indent + normalizedSectionName) - } + lines.push(indent + normalizedSectionName); + }; /** * Displays a block of metrics with the given options. @@ -811,10 +866,13 @@ function generateTextSummary(data, options, report) { */ // FIXME const displayMetricsBlock = (sectionMetrics, opts) => { - const summarizeOpts = Object.assign({}, mergedOpts, opts) - Array.prototype.push.apply(lines, renderMetrics({metrics: sectionMetrics}, decorate, summarizeOpts)) - lines.push('') - } + const summarizeOpts = Object.assign({}, mergedOpts, opts); + Array.prototype.push.apply( + lines, + renderMetrics({ metrics: sectionMetrics }, decorate, summarizeOpts), + ); + lines.push(''); + }; /** * Displays checks within a certain context (indentation, etc.). @@ -822,18 +880,28 @@ function generateTextSummary(data, options, report) { * @param {Object} checks - Checks data, containing `metrics` and `ordered_checks`. * @param {Partial} [opts={indent: ''}] - Options including indentation. */ - const displayChecks = (checks, opts = {indent: ''}) => { + const displayChecks = (checks, opts = { indent: '' }) => { if (checks === undefined || checks === null) { - return + return; } - displayMetricsBlock(checks.metrics, {...opts, indent: opts.indent + defaultIndent, sortByName: false}) + displayMetricsBlock(checks.metrics, { + ...opts, + indent: opts.indent + defaultIndent, + sortByName: false, + }); for (let i = 0; i < checks.ordered_checks.length; i++) { - lines.push(renderCheck(metricGroupIndent + metricGroupIndent + opts.indent, checks.ordered_checks[i], decorate)) + lines.push( + renderCheck( + metricGroupIndent + metricGroupIndent + opts.indent, + checks.ordered_checks[i], + decorate, + ), + ); } if (checks.ordered_checks.length > 0) { - lines.push('') + lines.push(''); } - } + }; /** * Displays thresholds and their satisfaction status. @@ -842,16 +910,30 @@ function generateTextSummary(data, options, report) { */ const displayThresholds = (thresholds) => { if (thresholds === undefined || thresholds === null) { - return + return; } - lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify('THRESHOLDS') + '\n') - - const mergedOpts = Object.assign({}, defaultOptions, data.options, options) + lines.push( + metricGroupIndent + + groupPrefix + + defaultIndent + + boldify('THRESHOLDS') + + '\n', + ); + + const mergedOpts = Object.assign( + {}, + defaultOptions, + data.options, + options, + ); let metrics = {}; forEach(thresholds, (_, threshold) => { - metrics[threshold.metric.name] = {...threshold.metric, thresholds: threshold.thresholds} + metrics[threshold.metric.name] = { + ...threshold.metric, + thresholds: threshold.thresholds, + }; }); // Array.prototype.push.apply(lines, summarizeMetricsWithThresholds( @@ -859,66 +941,98 @@ function generateTextSummary(data, options, report) { // {metrics}, // decorate), // ) - Array.prototype.push.apply(lines, renderThresholds({metrics}, decorate, {...mergedOpts, indent: mergedOpts.indent + defaultIndent})) - lines.push('') + Array.prototype.push.apply( + lines, + renderThresholds({ metrics }, decorate, { + ...mergedOpts, + indent: mergedOpts.indent + defaultIndent, + }), + ); + lines.push(''); }; // THRESHOLDS - displayThresholds(report.thresholds) + displayThresholds(report.thresholds); // TOTAL RESULTS - lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify('TOTAL RESULTS') + '\n') + lines.push( + metricGroupIndent + + groupPrefix + + defaultIndent + + boldify('TOTAL RESULTS') + + '\n', + ); // CHECKS - displayChecks(report.checks) + displayChecks(report.checks); // METRICS forEach(report.metrics, (sectionName, sectionMetrics) => { // If there are no metrics in this section, skip it if (Object.keys(sectionMetrics).length === 0) { - return + return; } - displayMetricsBlockName(sectionName) - displayMetricsBlock(sectionMetrics) - }) + displayMetricsBlockName(sectionName); + displayMetricsBlock(sectionMetrics); + }); // END OF TOTAL RESULTS // GROUPS const summarize = (prefix, indent) => { return (groupName, groupData) => { - lines.push(metricGroupIndent + indent + prefix + defaultIndent + boldify(`GROUP: ${groupName}`) + '\n') - displayChecks(groupData.checks, {indent: indent}) + lines.push( + metricGroupIndent + + indent + + prefix + + defaultIndent + + boldify(`GROUP: ${groupName}`) + + '\n', + ); + displayChecks(groupData.checks, { indent: indent }); forEach(groupData.metrics, (sectionName, sectionMetrics) => { // If there are no metrics in this section, skip it if (Object.keys(sectionMetrics).length === 0) { - return + return; } - displayMetricsBlockName(sectionName, {metricsBlockIndent: indent}) - displayMetricsBlock(sectionMetrics, {indent: indent + defaultIndent}) - }) + displayMetricsBlockName(sectionName, { + metricsBlockIndent: indent, + }); + displayMetricsBlock(sectionMetrics, { + indent: indent + defaultIndent, + }); + }); if (groupData.groups !== undefined) { - forEach(groupData.groups, summarize(detailsPrefix, indent + metricGroupIndent)); + forEach( + groupData.groups, + summarize(detailsPrefix, indent + metricGroupIndent), + ); } - } - } + }; + }; const summarizeNestedGroups = (groupName, groupData) => { - lines.push(metricGroupIndent + groupPrefix + ' ' + boldify(`GROUP: ${groupName}`) + '\n') + lines.push( + metricGroupIndent + + groupPrefix + + ' ' + + boldify(`GROUP: ${groupName}`) + + '\n', + ); forEach(groupData.metrics, (sectionName, sectionMetrics) => { // If there are no metrics in this section, skip it if (Object.keys(sectionMetrics).length === 0) { - return + return; } - displayMetricsBlockName(sectionName) - displayMetricsBlock(sectionMetrics) - }) + displayMetricsBlockName(sectionName); + displayMetricsBlock(sectionMetrics); + }); if (groupData.groups !== undefined) { forEach(groupData.groups, summarizeNestedGroups); } - } + }; if (report.groups !== undefined) { forEach(report.groups, summarize(groupPrefix, defaultIndent)); @@ -927,25 +1041,34 @@ function generateTextSummary(data, options, report) { // SCENARIOS if (report.scenarios !== undefined) { forEach(report.scenarios, (scenarioName, scenarioData) => { - lines.push(metricGroupIndent + groupPrefix + defaultIndent + boldify(`SCENARIO: ${scenarioName}`) + '\n') - displayChecks(scenarioData.checks) + lines.push( + metricGroupIndent + + groupPrefix + + defaultIndent + + boldify(`SCENARIO: ${scenarioName}`) + + '\n', + ); + displayChecks(scenarioData.checks); forEach(scenarioData.metrics, (sectionName, sectionMetrics) => { // If there are no metrics in this section, skip it if (Object.keys(sectionMetrics).length === 0) { - return + return; } - displayMetricsBlockName(sectionName) - displayMetricsBlock(sectionMetrics) - }) + displayMetricsBlockName(sectionName); + displayMetricsBlock(sectionMetrics); + }); if (scenarioData.groups !== undefined) { - forEach(scenarioData.groups, summarize(detailsPrefix, metricGroupIndent)); + forEach( + scenarioData.groups, + summarize(detailsPrefix, metricGroupIndent), + ); } - }) + }); } - return lines.join('\n') + return lines.join('\n'); } -exports.humanizeValue = humanizeValue -exports.textSummary = generateTextSummary +exports.humanizeValue = humanizeValue; +exports.textSummary = generateTextSummary; From af4d6f4b678768bf2b2bcf2551b8c05cedfe820d Mon Sep 17 00:00:00 2001 From: oleiade Date: Mon, 16 Dec 2024 15:32:14 +0100 Subject: [PATCH 19/42] Factor decoration in a ANSIFormatter class --- js/summary.js | 274 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 163 insertions(+), 111 deletions(-) diff --git a/js/summary.js b/js/summary.js index 242dd6da009..6d220758823 100644 --- a/js/summary.js +++ b/js/summary.js @@ -79,16 +79,6 @@ function forEach(obj, callback) { } } -/** A palette of ANSI color codes for terminal output. */ -const palette = { - bold: 1, - faint: 2, - red: 31, - green: 32, - cyan: 36, - //TODO: add others? -}; - const groupPrefix = '█'; const detailsPrefix = '↳'; const succMark = '✓'; @@ -373,18 +363,21 @@ function sortMetricsByName(metricNames) { * * @param {string} indent * @param {{name: string, passes: number, fails: number}} check - The check object with name, passes and fails - * @param {(text: string, ...colors: number[]) => string} decorate - A function to apply ANSI colors. + * @param {ANSIFormatter} formatter - ANSI formatter used for decorating text. * @returns {string} - A formatted line summarizing the check. */ -function renderCheck(indent, check, decorate) { +function renderCheck(indent, check, formatter) { if (check.fails === 0) { - return decorate(indent + succMark + ' ' + check.name, palette.green); + return formatter.decorate( + indent + succMark + ' ' + check.name, + 'green', + ); } const succPercent = Math.floor( (100 * check.passes) / (check.passes + check.fails), ); - return decorate( + return formatter.decorate( indent + failMark + ' ' + @@ -403,7 +396,7 @@ function renderCheck(indent, check, decorate) { failMark + ' ' + check.fails, - palette.red, + 'red', ); } @@ -422,11 +415,11 @@ function renderCheck(indent, check, decorate) { * * @param {{metrics: Object[]}} data - The data object containing metrics. * @param {summarizeMetricsOptions} options - Display options merged with defaultOptions. - * @param {(text: string, ...colors: number[]) => string} decorate - A decoration function for ANSI colors. + * @param {ANSIFormatter} formatter - An ANSIFormatter function for ANSI colors. * @returns {string[]} */ -function renderMetrics(data, decorate, options) { - const indent = options.indent + ' '; // FIXME @oleiade shouldn't we provide this at the caller? +function renderMetrics(data, formatter, options) { + const indent = options.indent + ' '; // Extract all metric names let metricNames = Object.keys(data.metrics); @@ -447,7 +440,7 @@ function renderMetrics(data, decorate, options) { metric, summaryInfo, options, - decorate, + formatter, indent, ); }); @@ -570,11 +563,11 @@ function formatTrendValue(value, stat, metric, options) { * @param {ReportMetric} metric - The metric object containing details about the metric. * @param {SummaryInfo} info - An object containing summary information such as maximum name width and trend columns. * @param {summarizeMetricsOptions} options - Configuration options for summarizing metrics. - * @param {(text: string, ...colors: number[]) => string} decorate - A function to apply ANSI colors to text. + * @param {ANSIFormatter} formatter - A function to apply ANSI colors to text. * @param {string} indent - The indentation string to use for the output. * @returns {string} - The formatted metric line. */ -function renderMetricLine(name, metric, info, options, decorate, indent) { +function renderMetricLine(name, metric, info, options, formatter, indent) { const { maxNameWidth } = info; const displayedName = displayNameForMetric(name); @@ -585,12 +578,13 @@ function renderMetricLine(name, metric, info, options, decorate, indent) { const dotsCount = maxNameWidth - strWidth(displayedName) - strWidth(fmtIndent) + 3; const dottedName = - displayedName + decorate('.'.repeat(dotsCount) + ':', palette.faint); + displayedName + + formatter.decorate('.'.repeat(dotsCount) + ':', 'white', 'faint'); const dataPart = metric.type === 'trend' - ? formatTrendData(name, info, decorate) - : formatNonTrendData(name, info, decorate); + ? renderTrendData(name, info, formatter) + : renderNonTrendData(name, info, formatter); // FIXME (@oleiade): We need a more consistent and central way to manage indentations // FIXME (@oleiade): We call them "options" everywhere but they're actually configuration I would argue @@ -598,7 +592,6 @@ function renderMetricLine(name, metric, info, options, decorate, indent) { } // FIXME (@oleiade): summarizeMetricsOptions needs a better name "DisplayConfig"? -// FIXME (@oleiade): decorate function should have a dedicated typedef /** * Formats a submetric (metric+tags key/value pairs) line for output. * @@ -606,11 +599,11 @@ function renderMetricLine(name, metric, info, options, decorate, indent) { * @param {ReportMetric} metric - submetric object (submetric really are just a specialized metric with a tags set and a pointer to their parent) * @param {SummaryInfo} info - summary information object * @param {summarizeMetricsOptions} options - display options - * @param {(text: string, ...colors: number[]) => string} decorate - decoration function + * @param {ANSIFormatter} formatter - ANSI formatter * @param indent indentation string * @returns {string} submetric report line in the form: `{submetric name}...: {value} {extra}` */ -function formatSubmetricLine(name, metric, info, options, decorate, indent) { +function formatSubmetricLine(name, metric, info, options, formatter, indent) { const { maxNameWidth } = info; // Compute the trailing dots: @@ -618,12 +611,13 @@ function formatSubmetricLine(name, metric, info, options, decorate, indent) { let dotsCount = maxNameWidth - strWidth(name) - strWidth(indent) + 3; dotsCount = Math.max(1, dotsCount); const dottedName = - name + decorate('.'.repeat(dotsCount) + ':', palette.faint); + name + + formatter.decorate('.'.repeat(dotsCount) + ':', 'white', 'faint'); const dataPart = metric.type === 'trend' - ? formatTrendData(name, info, decorate) - : formatNonTrendData(name, info, decorate); + ? renderTrendData(name, info, formatter) + : renderNonTrendData(name, info, formatter); return indent + ' ' + dottedName + ' ' + dataPart; } @@ -631,7 +625,8 @@ function formatSubmetricLine(name, metric, info, options, decorate, indent) { /** * Format data for trend metrics. */ -function formatTrendData(name, info, decorate) { +// FIXME (@oleiade): rename +function renderTrendData(name, info, formatter) { const { trendStats, trendCols, trendColMaxLens } = info; const cols = trendCols[name]; @@ -639,7 +634,7 @@ function formatTrendData(name, info, decorate) { .map((col, i) => { const statName = trendStats[i]; const padding = ' '.repeat(trendColMaxLens[i] - strWidth(col)); - return statName + '=' + decorate(col, palette.cyan) + padding; + return statName + '=' + formatter.decorate(col, 'cyan') + padding; }) .join(' '); } @@ -649,9 +644,9 @@ function formatTrendData(name, info, decorate) { * * @param {string} name - The metric name. * @param {Object} info - The summary information object. - * @param {(text: string, ...colors: number[]) => string} decorate - A decoration function for ANSI colors. + * @param {ANSIFormatter} formatter - A decoration function for ANSI colors. */ -function formatNonTrendData(name, info, decorate) { +function renderNonTrendData(name, info, formatter) { const { nonTrendValues, nonTrendExtras, @@ -662,19 +657,19 @@ function formatNonTrendData(name, info, decorate) { const value = nonTrendValues[name]; const extras = nonTrendExtras[name] || []; - let result = decorate(value, palette.cyan); + let result = formatter.decorate(value, 'cyan'); result += ' '.repeat(maxNonTrendValueLen - strWidth(value)); if (extras.length === 1) { // Single extra value - result += ' ' + decorate(extras[0], palette.cyan, palette.faint); + result += ' ' + formatter.decorate(extras[0], 'cyan', 'faint'); } else if (extras.length > 1) { // Multiple extras need their own spacing const parts = extras.map((val, i) => { const extraSpace = ' '.repeat( nonTrendExtraMaxLens[i] - strWidth(val), ); - return decorate(val, palette.cyan, palette.faint) + extraSpace; + return formatter.decorate(val, 'cyan', 'faint') + extraSpace; }); result += ' ' + parts.join(' '); } @@ -692,10 +687,10 @@ function formatNonTrendData(name, info, decorate) { * * @param {Object} options - Options merged with defaults. * @param {ReportData} data - The data containing metrics. - * @param {(text: string, ...colors: number[]) => string} decorate - Decoration function. + * @param {ANSIFormatter} formatter - ANSI formatter used for decorating text. * @returns {string[]} - Array of formatted lines including threshold statuses. */ -function renderThresholds(data, decorate, options) { +function renderThresholds(data, formatter, options) { const indent = options.indent + ' '; // Extract and optionally sort metric names @@ -717,7 +712,7 @@ function renderThresholds(data, decorate, options) { metric, summaryInfo, options, - decorate, + formatter, '', ); result.push(line); @@ -727,7 +722,7 @@ function renderThresholds(data, decorate, options) { const thresholdLines = renderThresholdResults( metric.thresholds, indent, - decorate, + formatter, ); result.push(...thresholdLines); } @@ -741,22 +736,26 @@ function renderThresholds(data, decorate, options) { * * @param {Object} thresholds - The thresholds to render. * @param {string} indent - The indentation string to use for the output. - * @param {(text: string, ...colors: number[]) => string} decorate - A function to apply ANSI colors to text. + * @param {ANSIFormatter} formatter - ANSIFormatter used for decorating text. * @returns {string[]} - An array of formatted lines including threshold statuses. */ -function renderThresholdResults(thresholds, indent, decorate) { +function renderThresholdResults(thresholds, indent, formatter) { const lines = []; forEach(thresholds, (_, threshold) => { const isSatisfied = threshold.ok; const statusText = isSatisfied - ? decorate('SATISFIED', palette.green) - : decorate('UNSATISFIED', palette.red); + ? formatter.decorate('SATISFIED', 'green') + : formatter.decorate('UNSATISFIED', 'red'); // Extra indentation for threshold lines // Adjusting spacing so that it aligns nicely under the metric line const additionalIndent = isSatisfied ? ' ' : ' '; - const sourceText = decorate(`'${threshold.source}'`, palette.faint); + const sourceText = formatter.decorate( + `'${threshold.source}'`, + 'white', + 'faint', + ); // Here we push a line describing the threshold's result lines.push( @@ -767,6 +766,109 @@ function renderThresholdResults(thresholds, indent, decorate) { return lines; } +/** + */ +/** + * ANSIColor maps ANSI color names to their respective escape codes. + * + * @typedef {'reset'|'black'|'red'|'green'|'yellow'|'blue'|'magenta'|'cyan'| + * 'white'|'brightRed'|'brightGreen'|'brightYellow'} ANSIColor + * + * @typedef {Record} ANSIColors + */ +const ANSIColors = { + reset: '\x1b[0m', + + // Standard Colors + black: '30', + red: '31', + green: '32', + yellow: '33', + blue: '34', + magenta: '35', + cyan: '36', + white: '37', + + // Bright Colors + brightRed: '91', + brightGreen: '92', + brightYellow: '93', +}; + +/** + * ANSIStyle maps ANSI style names to their respective escape codes. + * + * @typedef {'bold' | 'faint' | 'underline' | 'reversed'} ANSIStyle + * + * @typedef {Record} ANSIStyles + */ +const ANSIStyles = { + bold: '1', + faint: '2', + underline: '4', + reversed: '7', +}; + +class ANSIFormatter { + /** + * Constructs an ANSIFormatter with configurable color and styling options + * @param {Object} options - Configuration options for formatting + * @param {boolean} [options.enableColors=true] - Whether to enable color output + */ + constructor(options = {}) { + this.options = { + enableColors: true, + ...options, + }; + } + + /** + * Decorates text with ANSI color and style. + * @param {string} text - The text to decorate. + * @param {ANSIColor} color - The ANSI color to apply. + * @param {...ANSIStyle} styles - optional additional styles to apply. + * @returns {string} - Decorated text, or plain text if colors are disabled. + */ + decorate(text, color, ...styles) { + if (!this.options.enableColors) { + return text; + } + + const colorCode = ANSIColors[color] || ANSIColors.white; + const styleCodes = styles + .map((style) => ANSIStyles[style]) + .filter(Boolean); + + const fullCodes = styleCodes.length + ? [...styleCodes, colorCode].join(';') + : colorCode; + + const fullSequence = `\x1b[${fullCodes}m`; + + return `${fullSequence}${text}\x1b[0m`; + } + + /** + * Applies bold styling to text + * @param {string} text - Text to make bold + * @returns {string} Bold text + */ + boldify(text) { + return this.decorate(text, 'white', 'bold'); + } + + /** + * Colorizes text with optional styling. + * @param {string} text - The text to colorize. + * @param {ANSIColor} [color=ANSIColors.white] - Color to apply. + * @param {...ANSIStyle} styles - Additional styles. + * @returns {string} - Colorized text. + */ + colorize(text, color = ANSIColors.white, ...styles) { + return this.decorate(text, color, ...styles); + } +} + /** * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. * @@ -779,56 +881,8 @@ function generateTextSummary(data, options, report) { const mergedOpts = Object.assign({}, defaultOptions, data.options, options); const lines = []; - // TODO: move all of these functions into an object with methods? - /** - * Decorates text with ANSI color codes. - * - * @param text - * @param _ - * @returns {*} - */ - let decorate = function (text, _) { - return text; - }; - if (mergedOpts.enableColors) { - decorate = function (text, color /*, ...rest*/) { - let result = '\x1b[' + color; - for (let i = 2; i < arguments.length; i++) { - result += ';' + arguments[i]; - } - return result + 'm' + text + '\x1b[0m'; - }; - } - - const ANSI = { - reset: '\x1b[0m', - - // Standard Colors - black: '\x1b[30m', - red: '\x1b[31m', - green: '\x1b[32m', - yellow: '\x1b[33m', - blue: '\x1b[34m', - magenta: '\x1b[35m', - cyan: '\x1b[36m', - white: '\x1b[37m', - - // Bright Colors - brightBlack: '\x1b[90m', - brightRed: '\x1b[91m', - brightGreen: '\x1b[92m', - brightYellow: '\x1b[93m', - brightBlue: '\x1b[94m', - brightMagenta: '\x1b[95m', - brightCyan: '\x1b[96m', - brightWhite: '\x1b[97m', - - // Dark Colors - darkGrey: '\x1b[90m', - }; - const BOLD = '\u001b[1m'; - const RESET = ANSI.reset; - const boldify = (text) => BOLD + text + RESET; + // Create a formatter with default settings (colors enabled) + const formatter = new ANSIFormatter(); const defaultIndent = ' '; const metricGroupIndent = ' '; @@ -848,7 +902,7 @@ function generateTextSummary(data, options, report) { let normalizedSectionName = sectionName.toUpperCase(); if (bold) { - normalizedSectionName = boldify(normalizedSectionName); + normalizedSectionName = formatter.boldify(normalizedSectionName); } let indent = ' '; @@ -864,12 +918,15 @@ function generateTextSummary(data, options, report) { * @param {Object[]} sectionMetrics - The metrics to display. * @param {Partial} [opts] - Display options. */ - // FIXME const displayMetricsBlock = (sectionMetrics, opts) => { const summarizeOpts = Object.assign({}, mergedOpts, opts); Array.prototype.push.apply( lines, - renderMetrics({ metrics: sectionMetrics }, decorate, summarizeOpts), + renderMetrics( + { metrics: sectionMetrics }, + formatter, + summarizeOpts, + ), ); lines.push(''); }; @@ -894,7 +951,7 @@ function generateTextSummary(data, options, report) { renderCheck( metricGroupIndent + metricGroupIndent + opts.indent, checks.ordered_checks[i], - decorate, + formatter, ), ); } @@ -917,7 +974,7 @@ function generateTextSummary(data, options, report) { metricGroupIndent + groupPrefix + defaultIndent + - boldify('THRESHOLDS') + + formatter.boldify('THRESHOLDS') + '\n', ); @@ -936,14 +993,9 @@ function generateTextSummary(data, options, report) { }; }); - // Array.prototype.push.apply(lines, summarizeMetricsWithThresholds( - // {...mergedOpts, indent: mergedOpts.indent + defaultIndent}, - // {metrics}, - // decorate), - // ) Array.prototype.push.apply( lines, - renderThresholds({ metrics }, decorate, { + renderThresholds({ metrics }, formatter, { ...mergedOpts, indent: mergedOpts.indent + defaultIndent, }), @@ -959,7 +1011,7 @@ function generateTextSummary(data, options, report) { metricGroupIndent + groupPrefix + defaultIndent + - boldify('TOTAL RESULTS') + + formatter.boldify('TOTAL RESULTS') + '\n', ); @@ -986,7 +1038,7 @@ function generateTextSummary(data, options, report) { indent + prefix + defaultIndent + - boldify(`GROUP: ${groupName}`) + + formatter.boldify(`GROUP: ${groupName}`) + '\n', ); displayChecks(groupData.checks, { indent: indent }); @@ -1017,7 +1069,7 @@ function generateTextSummary(data, options, report) { metricGroupIndent + groupPrefix + ' ' + - boldify(`GROUP: ${groupName}`) + + formatter.boldify(`GROUP: ${groupName}`) + '\n', ); forEach(groupData.metrics, (sectionName, sectionMetrics) => { @@ -1045,7 +1097,7 @@ function generateTextSummary(data, options, report) { metricGroupIndent + groupPrefix + defaultIndent + - boldify(`SCENARIO: ${scenarioName}`) + + formatter.boldify(`SCENARIO: ${scenarioName}`) + '\n', ); displayChecks(scenarioData.checks); From 8b1da1db59024d14fbfde85ea006a3994abc96d6 Mon Sep 17 00:00:00 2001 From: oleiade Date: Tue, 17 Dec 2024 16:08:29 +0100 Subject: [PATCH 20/42] Refactor text summary generation for simplicity and maintainability --- js/summary.js | 720 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 433 insertions(+), 287 deletions(-) diff --git a/js/summary.js b/js/summary.js index 6d220758823..29f02c933ef 100644 --- a/js/summary.js +++ b/js/summary.js @@ -1,6 +1,3 @@ -// FIXME (@oleiade): We need a more consistent and central way to manage indentations -// FIXME (@oleiade): We call them "options" everywhere but they're actually configuration I would argue - /** * @typedef {Object} Threshold * @property {string} source - The threshold expression source. @@ -63,6 +60,14 @@ * @property {Record} metrics - Collection of metrics keyed by their names. */ +/** + * @typedef {Object} Options + * @property {boolean} [enableColors = true] - Whether to enable ANSI colors. + * @property {string | null} [summaryTimeUnit = null] - The time unit for duration metrics. + * @property {string[] | null} [summaryTrendStats = null] - The trend statistics to summarize. + * @property {boolean} [sortByName = true] - Whether to sort metrics by name. + */ + /** * A simple iteration utility function for objects. * @@ -79,13 +84,12 @@ function forEach(obj, callback) { } } -const groupPrefix = '█'; -const detailsPrefix = '↳'; -const succMark = '✓'; +const titlePrefix = '█'; +const subtitlePrefix = '↳'; +const successMark = '✓'; const failMark = '✗'; const defaultOptions = { - indent: ' ', - enableColors: true, + enableColors: true, // FIXME (@oleiade): we should ensure we respect this flag summaryTimeUnit: null, summaryTrendStats: null, sortByName: true, @@ -159,19 +163,6 @@ function displayNameForMetric(name) { return name; } -/** - * Determines the indentation for a metric line based on whether it has submetrics. - * - * @param {string} name - The metric name. - * @returns {string} - Indentation string. - */ -function indentForMetric(name) { - if (name.indexOf('{') >= 0) { - return ' '; - } - return ''; -} - /** * Converts a number of bytes into a human-readable string with units. * @@ -361,35 +352,32 @@ function sortMetricsByName(metricNames) { /** * Renders a single check into a formatted line ready for output. * - * @param {string} indent * @param {{name: string, passes: number, fails: number}} check - The check object with name, passes and fails * @param {ANSIFormatter} formatter - ANSI formatter used for decorating text. + * @param {RenderContext} renderContext - The render context to use for text rendering. * @returns {string} - A formatted line summarizing the check. */ -function renderCheck(indent, check, formatter) { +function renderCheck(check, formatter, renderContext) { + // If the check was successful, immediately render a green line indicating success if (check.fails === 0) { - return formatter.decorate( - indent + succMark + ' ' + check.name, - 'green', + return renderContext.indent( + formatter.decorate(successMark + ' ' + check.name, 'green'), ); } - const succPercent = Math.floor( + // Other we want to display both the check name and the percentage of successful checks + // in red, along with the number of passes and fails. + const successfulPct = Math.floor( (100 * check.passes) / (check.passes + check.fails), ); - return formatter.decorate( - indent + - failMark + - ' ' + - check.name + - '\n' + - indent + - ' ' + - detailsPrefix + + + const checkName = formatter.decorate(failMark + ' ' + check.name, 'red'); + const results = formatter.decorate( + subtitlePrefix + ' ' + - succPercent + + successfulPct + '% — ' + - succMark + + successMark + ' ' + check.passes + ' / ' + @@ -398,29 +386,65 @@ function renderCheck(indent, check, formatter) { check.fails, 'red', ); + + return ( + renderContext.indent(checkName) + + '\n' + + renderContext.indent(results, renderContext.baseIndentationLevel + 1) + ); } /** - * @typedef {Object} summarizeMetricsOptions - * @property {string} indent - The indentation string. - * @property {boolean} enableColors - Whether to enable ANSI colors. - * @property {string} summaryTimeUnit - The time unit for duration metrics. - * @property {string[]} summaryTrendStats - The trend statistics to summarize. - * @property {boolean} sortByName - Whether to sort metrics by name. - * @property {boolean} noColor - Whether to disable ANSI colors. + * Renders checks into a formatted set of lines ready for display in the terminal. + * + * @param checks + * @param formatter + * @param {RenderContext} renderContext + * @param options + * @returns {*[]} */ +function renderChecks(checks, formatter, renderContext, options = {}) { + // If no checks exist, return empty array + if (!checks || !checks.ordered_checks) { + return []; + } + + // Add indentation to the render context for checks + renderContext = renderContext.indentedContext(1); + + const { showPassedChecks = true, showFailedChecks = true } = options; + + // Process each check and filter based on options + const renderedChecks = checks.ordered_checks + .filter((check) => { + // Filter logic for passed/failed checks + if (check.fails === 0 && !showPassedChecks) return false; + return !(check.fails > 0 && !showFailedChecks); + }) + .map((check) => renderCheck(check, formatter, renderContext)); + + // Render metrics for checks if they exist + const checkMetrics = checks.metrics + ? renderMetrics({ metrics: checks.metrics }, formatter, renderContext, { + ...options, + sortByName: false, + }) + : []; + + // Combine metrics and checks + return [...checkMetrics, ...renderedChecks]; +} /** * Summarizes metrics into an array of formatted lines ready to be printed to stdout. * * @param {{metrics: Object[]}} data - The data object containing metrics. - * @param {summarizeMetricsOptions} options - Display options merged with defaultOptions. * @param {ANSIFormatter} formatter - An ANSIFormatter function for ANSI colors. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param {summarizeMetricsOptions} options - Display options merged with defaultOptions. * @returns {string[]} */ -function renderMetrics(data, formatter, options) { - const indent = options.indent + ' '; - +function renderMetrics(data, formatter, renderContext, options) { // Extract all metric names let metricNames = Object.keys(data.metrics); @@ -430,7 +454,12 @@ function renderMetrics(data, formatter, options) { } // Precompute all formatting information - const summaryInfo = computeSummaryInfo(metricNames, data, options); + const summaryInfo = computeSummaryInfo( + metricNames, + data, + renderContext, + options, + ); // Format each metric line return metricNames.map((name) => { @@ -441,7 +470,7 @@ function renderMetrics(data, formatter, options) { summaryInfo, options, formatter, - indent, + renderContext, ); }); } @@ -464,11 +493,12 @@ function renderMetrics(data, formatter, options) { * metric. * * @param {string[]} metricNames - * @param {{metrics: Object[]}} data - The data object containing metrics. + * @param {ReportData} data - The data object containing metrics. + * @param {RenderContext} renderContext - The render context to use for text rendering. * @param {summarizeMetricsOptions} options * @returns {SummaryInfo} */ -function computeSummaryInfo(metricNames, data, options) { +function computeSummaryInfo(metricNames, data, renderContext, options) { const trendStats = options.summaryTrendStats; const numTrendColumns = trendStats.length; @@ -485,7 +515,9 @@ function computeSummaryInfo(metricNames, data, options) { for (const name of metricNames) { const metric = data.metrics[name]; - const displayName = indentForMetric(name) + displayNameForMetric(name); + const displayName = renderContext.indent( + name + displayNameForMetric(name), + ); maxNameWidth = Math.max(maxNameWidth, strWidth(displayName)); if (metric.type === 'trend') { @@ -564,14 +596,21 @@ function formatTrendValue(value, stat, metric, options) { * @param {SummaryInfo} info - An object containing summary information such as maximum name width and trend columns. * @param {summarizeMetricsOptions} options - Configuration options for summarizing metrics. * @param {ANSIFormatter} formatter - A function to apply ANSI colors to text. - * @param {string} indent - The indentation string to use for the output. + * @param {RenderContext} renderContext - The render context to use for text rendering. * @returns {string} - The formatted metric line. */ -function renderMetricLine(name, metric, info, options, formatter, indent) { +function renderMetricLine( + name, + metric, + info, + options, + formatter, + renderContext, +) { const { maxNameWidth } = info; const displayedName = displayNameForMetric(name); - const fmtIndent = indentForMetric(name); + const fmtIndent = renderContext.indentLevel(); // Compute the trailing dots: // Use `3` as a spacing offset as per original code. @@ -586,9 +625,7 @@ function renderMetricLine(name, metric, info, options, formatter, indent) { ? renderTrendData(name, info, formatter) : renderNonTrendData(name, info, formatter); - // FIXME (@oleiade): We need a more consistent and central way to manage indentations - // FIXME (@oleiade): We call them "options" everywhere but they're actually configuration I would argue - return indent + fmtIndent + ' ' + dottedName + ' ' + dataPart; + return renderContext.indent(dottedName + ' ' + dataPart); } // FIXME (@oleiade): summarizeMetricsOptions needs a better name "DisplayConfig"? @@ -600,15 +637,26 @@ function renderMetricLine(name, metric, info, options, formatter, indent) { * @param {SummaryInfo} info - summary information object * @param {summarizeMetricsOptions} options - display options * @param {ANSIFormatter} formatter - ANSI formatter - * @param indent indentation string + * @param {RenderContext} renderContext - render context * @returns {string} submetric report line in the form: `{submetric name}...: {value} {extra}` */ -function formatSubmetricLine(name, metric, info, options, formatter, indent) { +function formatSubmetricLine( + name, + metric, + info, + options, + formatter, + renderContext, +) { const { maxNameWidth } = info; // Compute the trailing dots: // Use `3` as a spacing offset as per original code. - let dotsCount = maxNameWidth - strWidth(name) - strWidth(indent) + 3; + let dotsCount = + maxNameWidth - + strWidth(name) - + strWidth(renderContext.indentLevel()) + + 3; dotsCount = Math.max(1, dotsCount); const dottedName = name + @@ -619,7 +667,7 @@ function formatSubmetricLine(name, metric, info, options, formatter, indent) { ? renderTrendData(name, info, formatter) : renderNonTrendData(name, info, formatter); - return indent + ' ' + dottedName + ' ' + dataPart; + return renderContext.indent(dottedName + ' ' + dataPart); } /** @@ -685,14 +733,13 @@ function renderNonTrendData(name, info, formatter) { * {SATISFIED|UNSATISFIED} {source} * //... additional threshold lines * - * @param {Object} options - Options merged with defaults. * @param {ReportData} data - The data containing metrics. * @param {ANSIFormatter} formatter - ANSI formatter used for decorating text. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param {Object} options - Options merged with defaults. * @returns {string[]} - Array of formatted lines including threshold statuses. */ -function renderThresholds(data, formatter, options) { - const indent = options.indent + ' '; - +function renderThresholds(data, formatter, renderContext, options) { // Extract and optionally sort metric names let metricNames = Object.keys(data.metrics); if (options.sortByName) { @@ -700,7 +747,12 @@ function renderThresholds(data, formatter, options) { } // Precompute all formatting information - const summaryInfo = computeSummaryInfo(metricNames, data, options); + const summaryInfo = computeSummaryInfo( + metricNames, + data, + renderContext, + options, + ); // Format each threshold line by preparing each metric affected by a threshold, as // well as the thresholds results for each expression. @@ -713,7 +765,7 @@ function renderThresholds(data, formatter, options) { summaryInfo, options, formatter, - '', + renderContext, ); result.push(line); @@ -721,8 +773,8 @@ function renderThresholds(data, formatter, options) { // TODO (@oleiade): make sure the arguments are always ordered consistently across functions (indent, decorate, etc.) const thresholdLines = renderThresholdResults( metric.thresholds, - indent, formatter, + renderContext.indentedContext(1), ); result.push(...thresholdLines); } @@ -735,11 +787,11 @@ function renderThresholds(data, formatter, options) { * Renders each threshold result into a formatted set of lines ready for display in the terminal. * * @param {Object} thresholds - The thresholds to render. - * @param {string} indent - The indentation string to use for the output. * @param {ANSIFormatter} formatter - ANSIFormatter used for decorating text. + * @param {RenderContext} renderContext - The render context to use for text rendering. * @returns {string[]} - An array of formatted lines including threshold statuses. */ -function renderThresholdResults(thresholds, indent, formatter) { +function renderThresholdResults(thresholds, formatter, renderContext) { const lines = []; forEach(thresholds, (_, threshold) => { @@ -759,13 +811,38 @@ function renderThresholdResults(thresholds, indent, formatter) { // Here we push a line describing the threshold's result lines.push( - indent + indent + ' ' + statusText + additionalIndent + sourceText, + renderContext.indent(statusText + additionalIndent + sourceText), ); }); return lines; } +/** + * Renders a section title with a specified formatter, indentation level, and options. + * + * For example, a bold section title at first indentation level with a block prefix and newline suffix: + * █ THRESHOLDS + * + * @param {string} title - The section title to render. + * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param {Object} options - Additional options for rendering the section title. + * @param {string} [options.prefix=titlePrefix] - The prefix to use for the section title. + * @param {string} [options.suffix='\n'] - The suffix to use for the section title. + * @returns {string} - The formatted section title. + */ +function renderTitle( + title, + formatter, + renderContext, + options = { prefix: titlePrefix, suffix: '\n' }, +) { + return renderContext.indent( + `${options.prefix} ${formatter.boldify(title)} ${options.suffix}`, + ); +} + /** */ /** @@ -809,6 +886,9 @@ const ANSIStyles = { reversed: '7', }; +/** + * ANSIFormatter provides methods for decorating text with ANSI color and style codes. + */ class ANSIFormatter { /** * Constructs an ANSIFormatter with configurable color and styling options @@ -869,257 +949,323 @@ class ANSIFormatter { } } -/** - * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. - * - * @param {Object} data - The data input for the summary (includes options, metrics, etc.). - * @param {Object} options - Additional options that override defaults. - * @param {Object} report - The report object containing thresholds, checks, metrics, groups, and scenarios. - * @returns {string} A formatted summary of the test results. - */ -function generateTextSummary(data, options, report) { - const mergedOpts = Object.assign({}, defaultOptions, data.options, options); - const lines = []; - - // Create a formatter with default settings (colors enabled) - const formatter = new ANSIFormatter(); - - const defaultIndent = ' '; - const metricGroupIndent = ' '; +class RenderContext { + constructor(baseIndentationLevel = 0) { + this.baseIndentationLevel = baseIndentationLevel; + } /** - * Displays a metrics block name (section heading). + * Returns a string of spaces for a given indentation level. * - * @param {string} sectionName - The section name (e.g., "checks", "http_req_duration"). - * @param {Partial} [opts] - Display options. + * @param {number} [level] + * @returns {string} */ - const displayMetricsBlockName = (sectionName, opts) => { - let bold = true; - if (opts && opts.bold === false) { - bold = false; - } - - let normalizedSectionName = sectionName.toUpperCase(); - - if (bold) { - normalizedSectionName = formatter.boldify(normalizedSectionName); - } + indentLevel(level = 1) { + return ' '.repeat((this.baseIndentationLevel + level) * 2); + } - let indent = ' '; - if (opts && opts.metricsBlockIndent) { - indent += opts.metricsBlockIndent; - } - lines.push(indent + normalizedSectionName); - }; + /** + * @param {string} text - The text to indent. + * @param {number} [level] + * @returns {string} + */ + indent(text, level = 1) { + return this.indentLevel(level) + text; + } /** - * Displays a block of metrics with the given options. + * indentedContext returns a new RenderContext with an incremented base indentation level. + * + * This allows to easily obtain a new RenderContext from a parent one with an + * increased indentation level. * - * @param {Object[]} sectionMetrics - The metrics to display. - * @param {Partial} [opts] - Display options. + * @param {number} increment - The increment to apply to the base indentation level. + * @returns {RenderContext} */ - const displayMetricsBlock = (sectionMetrics, opts) => { - const summarizeOpts = Object.assign({}, mergedOpts, opts); - Array.prototype.push.apply( - lines, - renderMetrics( - { metrics: sectionMetrics }, - formatter, - summarizeOpts, - ), - ); - lines.push(''); - }; + indentedContext(increment = 1) { + return new RenderContext(this.baseIndentationLevel + increment); + } +} +/** + * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. + */ +class TestReportGenerator { /** - * Displays checks within a certain context (indentation, etc.). + * Constructs a TestReportGenerator with a specified formatter * - * @param {Object} checks - Checks data, containing `metrics` and `ordered_checks`. - * @param {Partial} [opts={indent: ''}] - Options including indentation. + * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * // FIXME (@oleiade): needs JSDoc + * @param options */ - const displayChecks = (checks, opts = { indent: '' }) => { - if (checks === undefined || checks === null) { - return; - } - displayMetricsBlock(checks.metrics, { - ...opts, - indent: opts.indent + defaultIndent, - sortByName: false, - }); - for (let i = 0; i < checks.ordered_checks.length; i++) { - lines.push( - renderCheck( - metricGroupIndent + metricGroupIndent + opts.indent, - checks.ordered_checks[i], - formatter, - ), - ); - } - if (checks.ordered_checks.length > 0) { - lines.push(''); - } - }; + constructor(formatter, renderContext, options = {}) { + this.formatter = formatter; + this.renderContext = renderContext; + this.options = { + defaultIndent: ' ', + metricGroupIndent: ' ', + ...options, + }; + } + // FIXME (@oleiade): needs JSDoc /** - * Displays thresholds and their satisfaction status. + * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. * - * @param {Record} thresholds - Threshold data. + * @param data + * @param report + * @returns {*} */ - const displayThresholds = (thresholds) => { - if (thresholds === undefined || thresholds === null) { - return; - } - - lines.push( - metricGroupIndent + - groupPrefix + - defaultIndent + - formatter.boldify('THRESHOLDS') + - '\n', - ); - - const mergedOpts = Object.assign( - {}, - defaultOptions, - data.options, - options, + generate(data, report) { + const reportBuilder = new ReportBuilder( + this.formatter, + this.renderContext, + this.options, ); + return reportBuilder + .addThresholds(report.thresholds) + .addTotalResults(report) + .addGroups(report.groups) + .addScenarios(report.scenarios) + .build(); + } +} - let metrics = {}; - forEach(thresholds, (_, threshold) => { - metrics[threshold.metric.name] = { - ...threshold.metric, - thresholds: threshold.thresholds, - }; - }); +/** + * Exposes methods for generating a textual summary of test results. + */ +class ReportBuilder { + /** + * Creates a new ReportBuilder with a specified formatter and options. + * + * // FIXME: ANSIFormatter could be an attribute of the render context + * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param options + */ + constructor(formatter, renderContext, options) { + this.formatter = formatter; + this.renderContext = renderContext; + this.options = options; + this.sections = []; + } - Array.prototype.push.apply( - lines, - renderThresholds({ metrics }, formatter, { - ...mergedOpts, - indent: mergedOpts.indent + defaultIndent, - }), - ); - lines.push(''); - }; + addThresholds(thresholds) { + if (!thresholds) return this; - // THRESHOLDS - displayThresholds(report.thresholds); + this.sections.push({ + title: 'THRESHOLDS', + content: this._renderThresholds(thresholds), + }); + return this; + } - // TOTAL RESULTS - lines.push( - metricGroupIndent + - groupPrefix + - defaultIndent + - formatter.boldify('TOTAL RESULTS') + - '\n', - ); + addTotalResults(report) { + this.sections.push({ + title: 'TOTAL RESULTS', + content: [ + ...this._renderChecks(report.checks), + ...'\n', + ...this._renderMetrics(report.metrics), + ], + }); + return this; + } - // CHECKS - displayChecks(report.checks); + addGroups(groups) { + if (!groups) return this; - // METRICS - forEach(report.metrics, (sectionName, sectionMetrics) => { - // If there are no metrics in this section, skip it - if (Object.keys(sectionMetrics).length === 0) { - return; - } + Object.entries(groups).forEach(([groupName, groupData]) => { + this.sections.push({ + title: `GROUP: ${groupName}`, + content: this._renderGroupContent(groupData), + }); + }); + return this; + } - displayMetricsBlockName(sectionName); - displayMetricsBlock(sectionMetrics); - }); - // END OF TOTAL RESULTS - - // GROUPS - const summarize = (prefix, indent) => { - return (groupName, groupData) => { - lines.push( - metricGroupIndent + - indent + - prefix + - defaultIndent + - formatter.boldify(`GROUP: ${groupName}`) + - '\n', - ); - displayChecks(groupData.checks, { indent: indent }); - forEach(groupData.metrics, (sectionName, sectionMetrics) => { - // If there are no metrics in this section, skip it - if (Object.keys(sectionMetrics).length === 0) { - return; - } + addScenarios(scenarios) { + if (!scenarios) return this; - displayMetricsBlockName(sectionName, { - metricsBlockIndent: indent, - }); - displayMetricsBlock(sectionMetrics, { - indent: indent + defaultIndent, - }); + Object.entries(scenarios).forEach(([scenarioName, scenarioData]) => { + this.sections.push({ + title: `SCENARIO: ${scenarioName}`, + content: this._renderScenarioContent(scenarioData), }); - if (groupData.groups !== undefined) { - forEach( - groupData.groups, - summarize(detailsPrefix, indent + metricGroupIndent), - ); - } - }; - }; + }); + return this; + } - const summarizeNestedGroups = (groupName, groupData) => { - lines.push( - metricGroupIndent + - groupPrefix + - ' ' + - formatter.boldify(`GROUP: ${groupName}`) + + build() { + return this.sections + .map((section) => [ + renderTitle(section.title, this.formatter, this.renderContext), + ...section.content, '\n', + ]) + .flat() + .join('\n'); + } + + /** + * @param {Object} thresholds + * @param {RenderContext} [renderContext] + * @returns {string[]} + * @private + */ + _renderThresholds(thresholds, renderContext) { + // The thresholds list should be indent one level higher than the title + renderContext = renderContext || this.renderContext; + renderContext = renderContext.indentedContext(1); + + // Implement threshold rendering logic + return renderThresholds( + { metrics: this._processThresholds(thresholds) }, + this.formatter, + renderContext, + this.options, ); - forEach(groupData.metrics, (sectionName, sectionMetrics) => { - // If there are no metrics in this section, skip it - if (Object.keys(sectionMetrics).length === 0) { - return; - } + } - displayMetricsBlockName(sectionName); - displayMetricsBlock(sectionMetrics); - }); - if (groupData.groups !== undefined) { - forEach(groupData.groups, summarizeNestedGroups); - } - }; + /** + * @param checks + * @param {RenderContext} [renderContext] - The render context to use for text rendering. + * @returns {string[]} + * @private + */ + _renderChecks(checks, renderContext) { + renderContext = renderContext || this.renderContext; + renderContext = renderContext.indentedContext(1); - if (report.groups !== undefined) { - forEach(report.groups, summarize(groupPrefix, defaultIndent)); + return checks + ? renderChecks(checks, this.formatter, renderContext, this.options) + : []; } - // SCENARIOS - if (report.scenarios !== undefined) { - forEach(report.scenarios, (scenarioName, scenarioData) => { - lines.push( - metricGroupIndent + - groupPrefix + - defaultIndent + - formatter.boldify(`SCENARIO: ${scenarioName}`) + - '\n', - ); - displayChecks(scenarioData.checks); - forEach(scenarioData.metrics, (sectionName, sectionMetrics) => { - // If there are no metrics in this section, skip it - if (Object.keys(sectionMetrics).length === 0) { - return; - } + /** + * @param metrics + * @param {RenderContext} [renderContext] + * @returns {string[]} + * @private + */ + _renderMetrics(metrics, renderContext) { + renderContext = renderContext || this.renderContext; + renderContext = renderContext.indentedContext(1); + + // Implement metrics rendering logic + return Object.entries(metrics) + .filter( + ([_, sectionMetrics]) => Object.keys(sectionMetrics).length > 0, + ) + .flatMap(([sectionName, sectionMetrics]) => [ + renderContext.indent( + this.formatter.boldify(sectionName.toUpperCase()), + ), + ...renderMetrics( + { metrics: sectionMetrics }, + this.formatter, + renderContext, + this.options, + ), + ]); + } - displayMetricsBlockName(sectionName); - displayMetricsBlock(sectionMetrics); - }); - if (scenarioData.groups !== undefined) { - forEach( - scenarioData.groups, - summarize(detailsPrefix, metricGroupIndent), - ); - } + /** + * @param groupData + * @param {RenderContext} [renderContext] + * @returns {*[]} + * @private + */ + _renderGroupContent(groupData, renderContext) { + renderContext = renderContext || this.renderContext; + + // Implement group content rendering + return [ + ...this._renderChecks(groupData.checks, renderContext), + ...this._renderMetrics(groupData.metrics, renderContext), + ...(groupData.groups + ? this._renderNestedGroups(groupData.groups) + : []), + ]; + } + + /** + * @param scenarioData + * @param {RenderContext} [renderContext] + * @returns {*[]} + * @private + */ + _renderScenarioContent(scenarioData, renderContext) { + renderContext = renderContext || this.renderContext; + + // Similar to group content rendering + return [ + ...this._renderChecks(scenarioData.checks, renderContext), + ...this._renderMetrics(scenarioData.metrics, renderContext), + ...(scenarioData.groups + ? this._renderNestedGroups(scenarioData.groups) + : []), + ]; + } + + /** + * @param groups + * @param {RenderContext} [renderContext] + * @returns {*[]} + * @private + */ + _renderNestedGroups(groups, renderContext) { + renderContext = renderContext || this.renderContext; + renderContext = renderContext.indentedContext(1); + + // Render nested groups recursively + return Object.entries(groups).flatMap(([groupName, groupData]) => [ + renderTitle(`GROUP: ${groupName}`, this.formatter, renderContext, { + prefix: subtitlePrefix, + }), + ...this._renderGroupContent(groupData), + ]); + } + + // Private rendering methods + _processThresholds(thresholds) { + // Transform thresholds into a format suitable for rendering + const metrics = {}; + Object.values(thresholds).forEach((threshold) => { + metrics[threshold.metric.name] = { + ...threshold.metric, + thresholds: threshold.thresholds, + }; }); + return metrics; } +} + +/** + * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. + * + * @param {Object} data - The data input for the summary (includes options, metrics, etc.). + * @param {Object} options - Additional options that override defaults. + * @param {Object} report - The report object containing thresholds, checks, metrics, groups, and scenarios. + * @returns {string} A formatted summary of the test results. + */ +function generateTextSummary(data, options, report) { + const mergedOpts = Object.assign({}, defaultOptions, data.options, options); + + // Create a render context holding information such as indentation level to apply + const context = new RenderContext(0); + + // Create a formatter with default settings (colors enabled) + const formatter = new ANSIFormatter(); + + const reportGenerator = new TestReportGenerator( + formatter, + context, + mergedOpts, + ); - return lines.join('\n'); + return reportGenerator.generate(data, report); } exports.humanizeValue = humanizeValue; From 126a18883ac4305f2085c9191c26b8ea45ba41b6 Mon Sep 17 00:00:00 2001 From: oleiade Date: Tue, 17 Dec 2024 16:58:26 +0100 Subject: [PATCH 21/42] Reorganize the summary.js file for easier maintenance --- js/summary.js | 1630 ++++++++++++++++++++++++------------------------- 1 file changed, 812 insertions(+), 818 deletions(-) diff --git a/js/summary.js b/js/summary.js index 29f02c933ef..eb24f53ec84 100644 --- a/js/summary.js +++ b/js/summary.js @@ -1,3 +1,67 @@ +/** + * This file contains code used to generate a textual summary of tests results, as displayed + * in the user's terminal at the end of a k6 test run, also know as "end of test summary". + * + * The main entry point is the `generateTextSummary` function, which takes the test data as well as a report + * object containing results for checks, metrics, thresholds, groups, and scenarios, and returns a formatted + * string summarizing the test results, ready to be written to the terminal. + * + * For convinience, the file also exports the `humanizeValue` function. + */ +exports.humanizeValue = humanizeValue; +exports.textSummary = generateTextSummary; + +/** + * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. + * + * @param {Object} data - The data input for the summary (includes options, metrics, etc.). + * @param {Object} options - Additional options that override defaults. + * @param {Object} report - The report object containing thresholds, checks, metrics, groups, and scenarios. + * @returns {string} A formatted summary of the test results. + */ +//FIXME (@oleiade): because options is... optional, it should, if possible, be the last argument here. +function generateTextSummary(data, options, report) { + const mergedOpts = Object.assign({}, defaultOptions, data.options, options); + + // Create a render context holding information such as indentation level to apply + const context = new RenderContext(0); + + // Create a formatter with default settings (colors enabled) + const formatter = new ANSIFormatter(); + + const reportGenerator = new TestReportGenerator( + formatter, + context, + mergedOpts, + ); + + return reportGenerator.generate(data, report); +} + +/** + * Formats a metric value into a human-readable form, depending on the metric type and content. + * + * @param {number} val - The metric value. + * @param {ReportMetric} metric - The metric object. + * @param {string|null} timeUnit - The time unit for duration metrics. + * @returns {string} The humanized metric value. + */ +function humanizeValue(val, metric, timeUnit) { + if (metric.type === 'rate') { + // Truncate instead of round when decreasing precision to 2 decimal places + return (Math.trunc(val * 100 * 100) / 100).toFixed(2) + '%'; + } + + switch (metric.contains) { + case 'data': + return humanizeBytes(val); + case 'time': + return humanizeDuration(val, timeUnit); + default: + return toFixedNoTrailingZeros(val, 6); + } +} + /** * @typedef {Object} Threshold * @property {string} source - The threshold expression source. @@ -69,284 +133,414 @@ */ /** - * A simple iteration utility function for objects. - * - * @param {Object} obj - the object to iterate over - * @param {(key: string, value: any) => (boolean|void)} callback - Callback invoked with (key, value) + * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. */ -function forEach(obj, callback) { - for (const key in obj) { - if (obj.hasOwnProperty(key)) { - if (callback(key, obj[key])) { - break; - } - } +class TestReportGenerator { + /** + * Constructs a TestReportGenerator with a specified formatter + * + * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * // FIXME (@oleiade): needs JSDoc + * @param options + */ + constructor(formatter, renderContext, options = {}) { + this.formatter = formatter; + this.renderContext = renderContext; + this.options = { + defaultIndent: ' ', + metricGroupIndent: ' ', + ...options, + }; } -} -const titlePrefix = '█'; -const subtitlePrefix = '↳'; -const successMark = '✓'; -const failMark = '✗'; -const defaultOptions = { - enableColors: true, // FIXME (@oleiade): we should ensure we respect this flag - summaryTimeUnit: null, - summaryTrendStats: null, - sortByName: true, -}; + // FIXME (@oleiade): needs JSDoc + /** + * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. + * + * @param data + * @param report + * @returns {*} + */ + generate(data, report) { + const reportBuilder = new ReportBuilder( + this.formatter, + this.renderContext, + this.options, + ); + return reportBuilder + .addThresholds(report.thresholds) + .addTotalResults(report) + .addGroups(report.groups) + .addScenarios(report.scenarios) + .build(); + } +} /** - * Compute the width of a string as displayed in a terminal, excluding ANSI codes, terminal - * formatting, Unicode ligatures, etc. - * - * @param {string} s - The string to measure - * @returns {number} The display width of the string + * Exposes methods for generating a textual summary of test results. */ -function strWidth(s) { - // TODO: determine if NFC or NFKD are not more appropriate? or just give up? https://hsivonen.fi/string-length/ - const data = s.normalize('NFKC'); // This used to be NFKD in Go, but this should be better - let inEscSeq = false; - let inLongEscSeq = false; - let width = 0; - for (const char of data) { - if (char.done) { - break; - } +class ReportBuilder { + /** + * Creates a new ReportBuilder with a specified formatter and options. + * + * // FIXME: ANSIFormatter could be an attribute of the render context + * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param options + */ + constructor(formatter, renderContext, options) { + this.formatter = formatter; + this.renderContext = renderContext; + this.options = options; + this.sections = []; + } - // Skip over ANSI escape codes. - if (char === '\x1b') { - inEscSeq = true; - continue; - } - if (inEscSeq && char === '[') { - inLongEscSeq = true; - continue; - } - if ( - inEscSeq && - inLongEscSeq && - char.charCodeAt(0) >= 0x40 && - char.charCodeAt(0) <= 0x7e - ) { - inEscSeq = false; - inLongEscSeq = false; - continue; - } - if ( - inEscSeq && - !inLongEscSeq && - char.charCodeAt(0) >= 0x40 && - char.charCodeAt(0) <= 0x5f - ) { - inEscSeq = false; - continue; - } + addThresholds(thresholds) { + if (!thresholds) return this; - if (!inEscSeq && !inLongEscSeq) { - width++; - } + this.sections.push({ + title: 'THRESHOLDS', + content: this._renderThresholds(thresholds), + }); + return this; } - return width; -} -/** - * Extracts a display name for a metric, handling sub-metrics (e.g. "metric{sub}" -> "{ sub }"). - * - * @param {string} name - The metric name. - * @returns {string} - The display name - */ -function displayNameForMetric(name) { - const subMetricPos = name.indexOf('{'); - if (subMetricPos >= 0) { - return '{ ' + name.substring(subMetricPos + 1, name.length - 1) + ' }'; + addTotalResults(report) { + this.sections.push({ + title: 'TOTAL RESULTS', + content: [ + ...this._renderChecks(report.checks), + ...'\n', + ...this._renderMetrics(report.metrics), + ], + }); + return this; } - return name; -} -/** - * Converts a number of bytes into a human-readable string with units. - * - * @param {number} bytes - The number of bytes. - * @returns {string} A human-readable string (e.g. "10 kB"). - */ -function humanizeBytes(bytes) { - const units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; - const base = 1000; - if (bytes < 10) { - return bytes + ' B'; + addGroups(groups) { + if (!groups) return this; + + Object.entries(groups).forEach(([groupName, groupData]) => { + this.sections.push({ + title: `GROUP: ${groupName}`, + content: this._renderGroupContent(groupData), + }); + }); + return this; } - const e = Math.floor(Math.log(bytes) / Math.log(base)); - const suffix = units[e | 0]; - const val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10; - return val.toFixed(val < 10 ? 1 : 0) + ' ' + suffix; -} + addScenarios(scenarios) { + if (!scenarios) return this; -const unitMap = { - s: { unit: 's', coef: 0.001 }, - ms: { unit: 'ms', coef: 1 }, - us: { unit: 'µs', coef: 1000 }, -}; + Object.entries(scenarios).forEach(([scenarioName, scenarioData]) => { + this.sections.push({ + title: `SCENARIO: ${scenarioName}`, + content: this._renderScenarioContent(scenarioData), + }); + }); + return this; + } -/** - * Converts a number to a fixed decimal string, removing trailing zeros. - * - * @param {number} val - The number to convert. - * @param {number} prec - Decimal precision. - * @returns {string} A string representation of the number without trailing zeros. - */ -function toFixedNoTrailingZeros(val, prec) { - // TODO: figure out something better? - return parseFloat(val.toFixed(prec)).toString(); -} + build() { + return this.sections + .map((section) => [ + renderTitle(section.title, this.formatter, this.renderContext), + ...section.content, + '\n', + ]) + .flat() + .join('\n'); + } -/** - * Truncates a number to a certain precision without rounding, then removes trailing zeros. - * - * @param {number} val - The number to truncate. - * @param {number} prec - Decimal precision. - * @returns {string} A truncated, not rounded string representation. - */ -function toFixedNoTrailingZerosTrunc(val, prec) { - const mult = Math.pow(10, prec); - return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec); -} + /** + * @param {Object} thresholds + * @param {RenderContext} [renderContext] + * @returns {string[]} + * @private + */ + _renderThresholds(thresholds, renderContext) { + // The thresholds list should be indent one level higher than the title + renderContext = renderContext || this.renderContext; + renderContext = renderContext.indentedContext(1); -/** - * Humanizes a duration (in milliseconds) to a human-readable string, - * choosing appropriate units (ns, µs, ms, s, m, h). - * - * @param {number} duration - The duration in milliseconds. - * @returns {string} Human-readable duration (e.g. "2.5ms", "3s", "1m30s"). - */ -function humanizeGenericDuration(duration) { - if (duration === 0) { - return '0s'; + // Implement threshold rendering logic + return renderThresholds( + { metrics: this._processThresholds(thresholds) }, + this.formatter, + renderContext, + this.options, + ); } - if (duration < 0.001) { - // smaller than a microsecond, print nanoseconds - return Math.trunc(duration * 1000000) + 'ns'; + /** + * @param checks + * @param {RenderContext} [renderContext] - The render context to use for text rendering. + * @returns {string[]} + * @private + */ + _renderChecks(checks, renderContext) { + renderContext = renderContext || this.renderContext; + renderContext = renderContext.indentedContext(1); + + return checks + ? renderChecks(checks, this.formatter, renderContext, this.options) + : []; } - if (duration < 1) { - // smaller than a millisecond, print microseconds - return toFixedNoTrailingZerosTrunc(duration * 1000, 2) + 'µs'; + + /** + * @param metrics + * @param {RenderContext} [renderContext] + * @returns {string[]} + * @private + */ + _renderMetrics(metrics, renderContext) { + renderContext = renderContext || this.renderContext; + renderContext = renderContext.indentedContext(1); + + // Implement metrics rendering logic + return Object.entries(metrics) + .filter( + ([_, sectionMetrics]) => Object.keys(sectionMetrics).length > 0, + ) + .flatMap(([sectionName, sectionMetrics]) => [ + renderContext.indent( + this.formatter.boldify(sectionName.toUpperCase()), + ), + ...renderMetrics( + { metrics: sectionMetrics }, + this.formatter, + renderContext, + this.options, + ), + ]); } - if (duration < 1000) { - // duration is smaller than a second - return toFixedNoTrailingZerosTrunc(duration, 2) + 'ms'; + + /** + * @param groupData + * @param {RenderContext} [renderContext] + * @returns {*[]} + * @private + */ + _renderGroupContent(groupData, renderContext) { + renderContext = renderContext || this.renderContext; + + // Implement group content rendering + return [ + ...this._renderChecks(groupData.checks, renderContext), + ...this._renderMetrics(groupData.metrics, renderContext), + ...(groupData.groups + ? this._renderNestedGroups(groupData.groups) + : []), + ]; } - let fixedDuration = - toFixedNoTrailingZerosTrunc( - (duration % 60000) / 1000, - duration > 60000 ? 0 : 2, - ) + 's'; - let rem = Math.trunc(duration / 60000); - if (rem < 1) { - // less than a minute - return fixedDuration; + /** + * @param scenarioData + * @param {RenderContext} [renderContext] + * @returns {*[]} + * @private + */ + _renderScenarioContent(scenarioData, renderContext) { + renderContext = renderContext || this.renderContext; + + // Similar to group content rendering + return [ + ...this._renderChecks(scenarioData.checks, renderContext), + ...this._renderMetrics(scenarioData.metrics, renderContext), + ...(scenarioData.groups + ? this._renderNestedGroups(scenarioData.groups) + : []), + ]; } - fixedDuration = (rem % 60) + 'm' + fixedDuration; - rem = Math.trunc(rem / 60); - if (rem < 1) { - // less than an hour - return fixedDuration; + + /** + * @param groups + * @param {RenderContext} [renderContext] + * @returns {*[]} + * @private + */ + _renderNestedGroups(groups, renderContext) { + renderContext = renderContext || this.renderContext; + renderContext = renderContext.indentedContext(1); + + // Render nested groups recursively + return Object.entries(groups).flatMap(([groupName, groupData]) => [ + renderTitle(`GROUP: ${groupName}`, this.formatter, renderContext, { + prefix: subtitlePrefix, + }), + ...this._renderGroupContent(groupData), + ]); + } + + // Private rendering methods + _processThresholds(thresholds) { + // Transform thresholds into a format suitable for rendering + const metrics = {}; + Object.values(thresholds).forEach((threshold) => { + metrics[threshold.metric.name] = { + ...threshold.metric, + thresholds: threshold.thresholds, + }; + }); + return metrics; } - return rem + 'h' + fixedDuration; } -/** - * Humanizes a duration according to a specified time unit or uses a generic formatting. - * - * @param {number} dur - The duration in milliseconds. - * @param {string|null} timeUnit - Optional time unit (e.g. "ms", "s"). - * @returns {string} A human-readable duration string. - */ -function humanizeDuration(dur, timeUnit) { - if (timeUnit !== '' && unitMap.hasOwnProperty(timeUnit)) { - return ( - (dur * unitMap[timeUnit].coef).toFixed(2) + unitMap[timeUnit].unit - ); +class RenderContext { + constructor(baseIndentationLevel = 0) { + this.baseIndentationLevel = baseIndentationLevel; } - return humanizeGenericDuration(dur); -} + /** + * Returns a string of spaces for a given indentation level. + * + * @param {number} [level] + * @returns {string} + */ + indentLevel(level = 1) { + return ' '.repeat((this.baseIndentationLevel + level) * 2); + } -/** - * Formats a metric value into a human-readable form, depending on the metric type and content. - * - * @param {number} val - The metric value. - * @param {ReportMetric} metric - The metric object. - * @param {string|null} timeUnit - The time unit for duration metrics. - * @returns {string} The humanized metric value. - */ -function humanizeValue(val, metric, timeUnit) { - if (metric.type === 'rate') { - // Truncate instead of round when decreasing precision to 2 decimal places - return (Math.trunc(val * 100 * 100) / 100).toFixed(2) + '%'; + /** + * @param {string} text - The text to indent. + * @param {number} [level] + * @returns {string} + */ + indent(text, level = 1) { + return this.indentLevel(level) + text; } - switch (metric.contains) { - case 'data': - return humanizeBytes(val); - case 'time': - return humanizeDuration(val, timeUnit); - default: - return toFixedNoTrailingZeros(val, 6); + /** + * indentedContext returns a new RenderContext with an incremented base indentation level. + * + * This allows to easily obtain a new RenderContext from a parent one with an + * increased indentation level. + * + * @param {number} increment - The increment to apply to the base indentation level. + * @returns {RenderContext} + */ + indentedContext(increment = 1) { + return new RenderContext(this.baseIndentationLevel + increment); } } /** - * Returns the summary values for non-trend metrics (counter, gauge, rate). - * - * @param {ReportMetric} metric - The metric to summarize. - * @param {string|null} timeUnit - The time unit for durations. - * @returns {string[]} - An array of summary values. + * ANSIFormatter provides methods for decorating text with ANSI color and style codes. */ -function nonTrendMetricValueForSum(metric, timeUnit) { - switch (metric.type) { - case 'counter': - return [ - humanizeValue(metric.values.count, metric, timeUnit), - humanizeValue(metric.values.rate, metric, timeUnit) + '/s', - ]; - case 'gauge': - return [ - humanizeValue(metric.values.value, metric, timeUnit), - 'min=' + humanizeValue(metric.values.min, metric, timeUnit), - 'max=' + humanizeValue(metric.values.max, metric, timeUnit), - ]; - case 'rate': - return [ - humanizeValue(metric.values.rate, metric, timeUnit), - `${metric.values.passes} out of ${metric.values.passes + metric.values.fails}`, - ]; - default: - return ['[no data]']; +class ANSIFormatter { + /** + * Constructs an ANSIFormatter with configurable color and styling options + * @param {Object} options - Configuration options for formatting + * @param {boolean} [options.enableColors=true] - Whether to enable color output + */ + constructor(options = {}) { + this.options = { + enableColors: true, + ...options, + }; + } + + /** + * Decorates text with ANSI color and style. + * @param {string} text - The text to decorate. + * @param {ANSIColor} color - The ANSI color to apply. + * @param {...ANSIStyle} styles - optional additional styles to apply. + * @returns {string} - Decorated text, or plain text if colors are disabled. + */ + decorate(text, color, ...styles) { + if (!this.options.enableColors) { + return text; + } + + const colorCode = ANSIColors[color] || ANSIColors.white; + const styleCodes = styles + .map((style) => ANSIStyles[style]) + .filter(Boolean); + + const fullCodes = styleCodes.length + ? [...styleCodes, colorCode].join(';') + : colorCode; + + const fullSequence = `\x1b[${fullCodes}m`; + + return `${fullSequence}${text}\x1b[0m`; + } + + /** + * Applies bold styling to text + * @param {string} text - Text to make bold + * @returns {string} Bold text + */ + boldify(text) { + return this.decorate(text, 'white', 'bold'); } } /** - * Sorts metrics by name, keeping submetrics grouped with their parent metrics. + * ANSIColor maps ANSI color names to their respective escape codes. * - * @param {string[]} metricNames - The metric names to sort. - * @returns {string[]} - The sorted metric names. + * @typedef {'reset'|'black'|'red'|'green'|'yellow'|'blue'|'magenta'|'cyan'| + * 'white'|'brightRed'|'brightGreen'|'brightYellow'} ANSIColor + * + * @typedef {Record} ANSIColors */ -function sortMetricsByName(metricNames) { - metricNames.sort(function (lhsMetricName, rhsMetricName) { - const lhsParent = lhsMetricName.split('{', 1)[0]; - const rhsParent = rhsMetricName.split('{', 1)[0]; - const result = lhsParent.localeCompare(rhsParent); - if (result !== 0) { - return result; - } - const lhsSub = lhsMetricName.substring(lhsParent.length); - const rhsSub = rhsMetricName.substring(rhsParent.length); - return lhsSub.localeCompare(rhsSub); - }); +const ANSIColors = { + reset: '\x1b[0m', - return metricNames; + // Standard Colors + black: '30', + red: '31', + green: '32', + yellow: '33', + blue: '34', + magenta: '35', + cyan: '36', + white: '37', + + // Bright Colors + brightRed: '91', + brightGreen: '92', + brightYellow: '93', +}; + +/** + * ANSIStyle maps ANSI style names to their respective escape codes. + * + * @typedef {'bold' | 'faint' | 'underline' | 'reversed'} ANSIStyle + * + * @typedef {Record} ANSIStyles + */ +const ANSIStyles = { + bold: '1', + faint: '2', + underline: '4', + reversed: '7', +}; + +/** + * Renders a section title with a specified formatter, indentation level, and options. + * + * For example, a bold section title at first indentation level with a block prefix and newline suffix: + * █ THRESHOLDS + * + * @param {string} title - The section title to render. + * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param {Object} options - Additional options for rendering the section title. + * @param {string} [options.prefix=titlePrefix] - The prefix to use for the section title. + * @param {string} [options.suffix='\n'] - The suffix to use for the section title. + * @returns {string} - The formatted section title. + */ +function renderTitle( + title, + formatter, + renderContext, + options = { prefix: titlePrefix, suffix: '\n' }, +) { + return renderContext.indent( + `${options.prefix} ${formatter.boldify(title)} ${options.suffix}`, + ); } /** @@ -441,7 +635,7 @@ function renderChecks(checks, formatter, renderContext, options = {}) { * @param {{metrics: Object[]}} data - The data object containing metrics. * @param {ANSIFormatter} formatter - An ANSIFormatter function for ANSI colors. * @param {RenderContext} renderContext - The render context to use for text rendering. - * @param {summarizeMetricsOptions} options - Display options merged with defaultOptions. + * @param {Options} options - Display options merged with defaultOptions. * @returns {string[]} */ function renderMetrics(data, formatter, renderContext, options) { @@ -476,116 +670,96 @@ function renderMetrics(data, formatter, renderContext, options) { } /** - * @typedef {Object} SummaryInfo - * @property {number} maxNameWidth - The maximum width of the metric names. - * @property {Object} nonTrendValues - The non-trend metric values. - * @property {Object} nonTrendExtras - The non-trend metric extras. - * @property {Object} trendCols - The trend columns. - * @property {number[]} trendColMaxLens - The trend column maximum lengths. - * @property {number} numTrendColumns - The number of trend columns. - * @property {string[]} trendStats - The trend statistics. - * @property {number} maxNonTrendValueLen - The maximum non-trend value length. - * @property {number[]} nonTrendExtraMaxLens - The non-trend extra maximum lengths. - */ - -/** - * Compute all necessary formatting information such as maximum lengths, trend columns and non-trend values for each - * metric. + * Renders each thresholds results into a formatted set of lines ready for display in the terminal. * - * @param {string[]} metricNames - * @param {ReportData} data - The data object containing metrics. + * Thresholds are rendered in the format: + * {metric/submetric}...: {value} {extra} + * {SATISFIED|UNSATISFIED} {source} + * //... additional threshold lines + * + * @param {ReportData} data - The data containing metrics. + * @param {ANSIFormatter} formatter - ANSI formatter used for decorating text. * @param {RenderContext} renderContext - The render context to use for text rendering. - * @param {summarizeMetricsOptions} options - * @returns {SummaryInfo} + * @param {Object} options - Options merged with defaults. + * @returns {string[]} - Array of formatted lines including threshold statuses. */ -function computeSummaryInfo(metricNames, data, renderContext, options) { - const trendStats = options.summaryTrendStats; - const numTrendColumns = trendStats.length; - - const nonTrendValues = {}; - const nonTrendExtras = {}; - const trendCols = {}; - - let maxNameWidth = 0; - let maxNonTrendValueLen = 0; - let nonTrendExtraMaxLens = []; // FIXME: "lens"? +function renderThresholds(data, formatter, renderContext, options) { + // Extract and optionally sort metric names + let metricNames = Object.keys(data.metrics); + if (options.sortByName) { + metricNames = sortMetricsByName(metricNames); + } - // Initialize tracking arrays for trend widths - const trendColMaxLens = new Array(numTrendColumns).fill(0); + // Precompute all formatting information + const summaryInfo = computeSummaryInfo( + metricNames, + data, + renderContext, + options, + ); + // Format each threshold line by preparing each metric affected by a threshold, as + // well as the thresholds results for each expression. + const result = []; for (const name of metricNames) { const metric = data.metrics[name]; - const displayName = renderContext.indent( - name + displayNameForMetric(name), + const line = renderSubmetricLine( + name, + metric, + summaryInfo, + options, + formatter, + renderContext, ); - maxNameWidth = Math.max(maxNameWidth, strWidth(displayName)); - - if (metric.type === 'trend') { - const cols = trendStats.map((stat) => - formatTrendValue(metric.values[stat], stat, metric, options), - ); + result.push(line); - // Compute max column widths - cols.forEach((col, index) => { - trendColMaxLens[index] = Math.max( - trendColMaxLens[index], - strWidth(col), - ); - }); - trendCols[name] = cols; - } else { - const values = nonTrendMetricValueForSum( - metric, - options.summaryTimeUnit, - ); - const mainValue = values[0]; // FIXME (@oleiade) we should assert that the index exists here - nonTrendValues[name] = mainValue; - maxNonTrendValueLen = Math.max( - maxNonTrendValueLen, - strWidth(mainValue), + if (metric.thresholds) { + // TODO (@oleiade): make sure the arguments are always ordered consistently across functions (indent, decorate, etc.) + const thresholdLines = renderThresholdResults( + metric.thresholds, + formatter, + renderContext.indentedContext(1), ); - - // FIXME (@oleiade): what the fuck is an extra, really? - const extras = values.slice(1); - nonTrendExtras[name] = extras; - extras.forEach((value, index) => { - const width = strWidth(value); - if ( - nonTrendExtraMaxLens[index] === undefined || - width > nonTrendExtraMaxLens[index] - ) { - nonTrendExtraMaxLens[index] = width; - } - }); + result.push(...thresholdLines); } } - return { - maxNameWidth, - nonTrendValues, - nonTrendExtras, - trendCols, - trendColMaxLens, - numTrendColumns, - trendStats, - maxNonTrendValueLen, - nonTrendExtraMaxLens, - }; + return result; } /** + * Renders each threshold result into a formatted set of lines ready for display in the terminal. * - * @param value - * @param stat - * @param metric - * @param options - * @returns {string} + * @param {Object} thresholds - The thresholds to render. + * @param {ANSIFormatter} formatter - ANSIFormatter used for decorating text. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @returns {string[]} - An array of formatted lines including threshold statuses. */ -function formatTrendValue(value, stat, metric, options) { - if (stat === 'count') { - return value.toString(); - } - return humanizeValue(value, metric, options.summaryTimeUnit); +function renderThresholdResults(thresholds, formatter, renderContext) { + const lines = []; + + forEach(thresholds, (_, threshold) => { + const isSatisfied = threshold.ok; + const statusText = isSatisfied + ? formatter.decorate('SATISFIED', 'green') + : formatter.decorate('UNSATISFIED', 'red'); + + // Extra indentation for threshold lines + // Adjusting spacing so that it aligns nicely under the metric line + const additionalIndent = isSatisfied ? ' ' : ' '; + const sourceText = formatter.decorate( + `'${threshold.source}'`, + 'white', + 'faint', + ); + + // Here we push a line describing the threshold's result + lines.push( + renderContext.indent(statusText + additionalIndent + sourceText), + ); + }); + + return lines; } /** @@ -594,7 +768,7 @@ function formatTrendValue(value, stat, metric, options) { * @param {string} name - The name of the metric. * @param {ReportMetric} metric - The metric object containing details about the metric. * @param {SummaryInfo} info - An object containing summary information such as maximum name width and trend columns. - * @param {summarizeMetricsOptions} options - Configuration options for summarizing metrics. + * @param {Options} options - Configuration options for summarizing metrics. * @param {ANSIFormatter} formatter - A function to apply ANSI colors to text. * @param {RenderContext} renderContext - The render context to use for text rendering. * @returns {string} - The formatted metric line. @@ -609,7 +783,7 @@ function renderMetricLine( ) { const { maxNameWidth } = info; - const displayedName = displayNameForMetric(name); + const displayedName = renderMetricDisplayName(name); const fmtIndent = renderContext.indentLevel(); // Compute the trailing dots: @@ -635,12 +809,12 @@ function renderMetricLine( * @param {string} name - name of the submetric * @param {ReportMetric} metric - submetric object (submetric really are just a specialized metric with a tags set and a pointer to their parent) * @param {SummaryInfo} info - summary information object - * @param {summarizeMetricsOptions} options - display options + * @param {Options} options - display options * @param {ANSIFormatter} formatter - ANSI formatter * @param {RenderContext} renderContext - render context * @returns {string} submetric report line in the form: `{submetric name}...: {value} {extra}` */ -function formatSubmetricLine( +function renderSubmetricLine( name, metric, info, @@ -673,7 +847,6 @@ function formatSubmetricLine( /** * Format data for trend metrics. */ -// FIXME (@oleiade): rename function renderTrendData(name, info, formatter) { const { trendStats, trendCols, trendColMaxLens } = info; const cols = trendCols[name]; @@ -726,547 +899,368 @@ function renderNonTrendData(name, info, formatter) { } /** - * Renders each thresholds results into a formatted set of lines ready for display in the terminal. * - * Thresholds are rendered in the format: - * {metric/submetric}...: {value} {extra} - * {SATISFIED|UNSATISFIED} {source} - * //... additional threshold lines + * @param value + * @param stat + * @param metric + * @param options + * @returns {string} + */ +function renderTrendValue(value, stat, metric, options) { + if (stat === 'count') { + return value.toString(); + } + return humanizeValue(value, metric, options.summaryTimeUnit); +} + +/** + * Compute all necessary formatting information such as maximum lengths, trend columns and non-trend values for each + * metric. * - * @param {ReportData} data - The data containing metrics. - * @param {ANSIFormatter} formatter - ANSI formatter used for decorating text. + * @typedef {Object} SummaryInfo + * @property {number} maxNameWidth - The maximum width of the metric names. + * @property {Object} nonTrendValues - The non-trend metric values. + * @property {Object} nonTrendExtras - The non-trend metric extras. + * @property {Object} trendCols - The trend columns. + * @property {number[]} trendColMaxLens - The trend column maximum lengths. + * @property {number} numTrendColumns - The number of trend columns. + * @property {string[]} trendStats - The trend statistics. + * @property {number} maxNonTrendValueLen - The maximum non-trend value length. + * @property {number[]} nonTrendExtraMaxLens - The non-trend extra maximum lengths. + * + * @param {string[]} metricNames + * @param {ReportData} data - The data object containing metrics. * @param {RenderContext} renderContext - The render context to use for text rendering. - * @param {Object} options - Options merged with defaults. - * @returns {string[]} - Array of formatted lines including threshold statuses. + * @param {Options} options + * @returns {SummaryInfo} */ -function renderThresholds(data, formatter, renderContext, options) { - // Extract and optionally sort metric names - let metricNames = Object.keys(data.metrics); - if (options.sortByName) { - metricNames = sortMetricsByName(metricNames); - } +function computeSummaryInfo(metricNames, data, renderContext, options) { + const trendStats = options.summaryTrendStats; + const numTrendColumns = trendStats.length; - // Precompute all formatting information - const summaryInfo = computeSummaryInfo( - metricNames, - data, - renderContext, - options, - ); + const nonTrendValues = {}; + const nonTrendExtras = {}; + const trendCols = {}; + + let maxNameWidth = 0; + let maxNonTrendValueLen = 0; + let nonTrendExtraMaxLens = []; // FIXME: "lens"? + + // Initialize tracking arrays for trend widths + const trendColMaxLens = new Array(numTrendColumns).fill(0); - // Format each threshold line by preparing each metric affected by a threshold, as - // well as the thresholds results for each expression. - const result = []; for (const name of metricNames) { const metric = data.metrics[name]; - const line = formatSubmetricLine( - name, - metric, - summaryInfo, - options, - formatter, - renderContext, + const displayName = renderContext.indent( + name + renderMetricDisplayName(name), ); - result.push(line); + maxNameWidth = Math.max(maxNameWidth, strWidth(displayName)); - if (metric.thresholds) { - // TODO (@oleiade): make sure the arguments are always ordered consistently across functions (indent, decorate, etc.) - const thresholdLines = renderThresholdResults( - metric.thresholds, - formatter, - renderContext.indentedContext(1), + if (metric.type === 'trend') { + const cols = trendStats.map((stat) => + renderTrendValue(metric.values[stat], stat, metric, options), ); - result.push(...thresholdLines); + + // Compute max column widths + cols.forEach((col, index) => { + trendColMaxLens[index] = Math.max( + trendColMaxLens[index], + strWidth(col), + ); + }); + trendCols[name] = cols; + } else { + const values = nonTrendMetricValueForSum( + metric, + options.summaryTimeUnit, + ); + const mainValue = values[0]; // FIXME (@oleiade) we should assert that the index exists here + nonTrendValues[name] = mainValue; + maxNonTrendValueLen = Math.max( + maxNonTrendValueLen, + strWidth(mainValue), + ); + + // FIXME (@oleiade): what the fuck is an extra, really? + const extras = values.slice(1); + nonTrendExtras[name] = extras; + extras.forEach((value, index) => { + const width = strWidth(value); + if ( + nonTrendExtraMaxLens[index] === undefined || + width > nonTrendExtraMaxLens[index] + ) { + nonTrendExtraMaxLens[index] = width; + } + }); } } - return result; + return { + maxNameWidth, + nonTrendValues, + nonTrendExtras, + trendCols, + trendColMaxLens, + numTrendColumns, + trendStats, + maxNonTrendValueLen, + nonTrendExtraMaxLens, + }; } /** - * Renders each threshold result into a formatted set of lines ready for display in the terminal. + * Sorts metrics by name, keeping submetrics grouped with their parent metrics. * - * @param {Object} thresholds - The thresholds to render. - * @param {ANSIFormatter} formatter - ANSIFormatter used for decorating text. - * @param {RenderContext} renderContext - The render context to use for text rendering. - * @returns {string[]} - An array of formatted lines including threshold statuses. + * @param {string[]} metricNames - The metric names to sort. + * @returns {string[]} - The sorted metric names. */ -function renderThresholdResults(thresholds, formatter, renderContext) { - const lines = []; - - forEach(thresholds, (_, threshold) => { - const isSatisfied = threshold.ok; - const statusText = isSatisfied - ? formatter.decorate('SATISFIED', 'green') - : formatter.decorate('UNSATISFIED', 'red'); - - // Extra indentation for threshold lines - // Adjusting spacing so that it aligns nicely under the metric line - const additionalIndent = isSatisfied ? ' ' : ' '; - const sourceText = formatter.decorate( - `'${threshold.source}'`, - 'white', - 'faint', - ); - - // Here we push a line describing the threshold's result - lines.push( - renderContext.indent(statusText + additionalIndent + sourceText), - ); +function sortMetricsByName(metricNames) { + metricNames.sort(function (lhsMetricName, rhsMetricName) { + const lhsParent = lhsMetricName.split('{', 1)[0]; + const rhsParent = rhsMetricName.split('{', 1)[0]; + const result = lhsParent.localeCompare(rhsParent); + if (result !== 0) { + return result; + } + const lhsSub = lhsMetricName.substring(lhsParent.length); + const rhsSub = rhsMetricName.substring(rhsParent.length); + return lhsSub.localeCompare(rhsSub); }); - return lines; + return metricNames; } /** - * Renders a section title with a specified formatter, indentation level, and options. - * - * For example, a bold section title at first indentation level with a block prefix and newline suffix: - * █ THRESHOLDS + * A simple iteration utility function for objects. * - * @param {string} title - The section title to render. - * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. - * @param {RenderContext} renderContext - The render context to use for text rendering. - * @param {Object} options - Additional options for rendering the section title. - * @param {string} [options.prefix=titlePrefix] - The prefix to use for the section title. - * @param {string} [options.suffix='\n'] - The suffix to use for the section title. - * @returns {string} - The formatted section title. + * @param {Object} obj - the object to iterate over + * @param {(key: string, value: any) => (boolean|void)} callback - Callback invoked with (key, value) */ -function renderTitle( - title, - formatter, - renderContext, - options = { prefix: titlePrefix, suffix: '\n' }, -) { - return renderContext.indent( - `${options.prefix} ${formatter.boldify(title)} ${options.suffix}`, - ); +function forEach(obj, callback) { + for (const key in obj) { + if (obj.hasOwnProperty(key)) { + if (callback(key, obj[key])) { + break; + } + } + } } -/** - */ -/** - * ANSIColor maps ANSI color names to their respective escape codes. - * - * @typedef {'reset'|'black'|'red'|'green'|'yellow'|'blue'|'magenta'|'cyan'| - * 'white'|'brightRed'|'brightGreen'|'brightYellow'} ANSIColor - * - * @typedef {Record} ANSIColors - */ -const ANSIColors = { - reset: '\x1b[0m', - - // Standard Colors - black: '30', - red: '31', - green: '32', - yellow: '33', - blue: '34', - magenta: '35', - cyan: '36', - white: '37', - - // Bright Colors - brightRed: '91', - brightGreen: '92', - brightYellow: '93', +const titlePrefix = '█'; +const subtitlePrefix = '↳'; +const successMark = '✓'; +const failMark = '✗'; +const defaultOptions = { + enableColors: true, // FIXME (@oleiade): we should ensure we respect this flag + summaryTimeUnit: null, + summaryTrendStats: null, + sortByName: true, }; /** - * ANSIStyle maps ANSI style names to their respective escape codes. - * - * @typedef {'bold' | 'faint' | 'underline' | 'reversed'} ANSIStyle + * Compute the width of a string as displayed in a terminal, excluding ANSI codes, terminal + * formatting, Unicode ligatures, etc. * - * @typedef {Record} ANSIStyles - */ -const ANSIStyles = { - bold: '1', - faint: '2', - underline: '4', - reversed: '7', -}; - -/** - * ANSIFormatter provides methods for decorating text with ANSI color and style codes. + * @param {string} s - The string to measure + * @returns {number} The display width of the string */ -class ANSIFormatter { - /** - * Constructs an ANSIFormatter with configurable color and styling options - * @param {Object} options - Configuration options for formatting - * @param {boolean} [options.enableColors=true] - Whether to enable color output - */ - constructor(options = {}) { - this.options = { - enableColors: true, - ...options, - }; - } - - /** - * Decorates text with ANSI color and style. - * @param {string} text - The text to decorate. - * @param {ANSIColor} color - The ANSI color to apply. - * @param {...ANSIStyle} styles - optional additional styles to apply. - * @returns {string} - Decorated text, or plain text if colors are disabled. - */ - decorate(text, color, ...styles) { - if (!this.options.enableColors) { - return text; +function strWidth(s) { + // TODO: determine if NFC or NFKD are not more appropriate? or just give up? https://hsivonen.fi/string-length/ + const data = s.normalize('NFKC'); // This used to be NFKD in Go, but this should be better + let inEscSeq = false; + let inLongEscSeq = false; + let width = 0; + for (const char of data) { + if (char.done) { + break; } - const colorCode = ANSIColors[color] || ANSIColors.white; - const styleCodes = styles - .map((style) => ANSIStyles[style]) - .filter(Boolean); - - const fullCodes = styleCodes.length - ? [...styleCodes, colorCode].join(';') - : colorCode; - - const fullSequence = `\x1b[${fullCodes}m`; - - return `${fullSequence}${text}\x1b[0m`; - } - - /** - * Applies bold styling to text - * @param {string} text - Text to make bold - * @returns {string} Bold text - */ - boldify(text) { - return this.decorate(text, 'white', 'bold'); - } - - /** - * Colorizes text with optional styling. - * @param {string} text - The text to colorize. - * @param {ANSIColor} [color=ANSIColors.white] - Color to apply. - * @param {...ANSIStyle} styles - Additional styles. - * @returns {string} - Colorized text. - */ - colorize(text, color = ANSIColors.white, ...styles) { - return this.decorate(text, color, ...styles); - } -} - -class RenderContext { - constructor(baseIndentationLevel = 0) { - this.baseIndentationLevel = baseIndentationLevel; - } - - /** - * Returns a string of spaces for a given indentation level. - * - * @param {number} [level] - * @returns {string} - */ - indentLevel(level = 1) { - return ' '.repeat((this.baseIndentationLevel + level) * 2); - } - - /** - * @param {string} text - The text to indent. - * @param {number} [level] - * @returns {string} - */ - indent(text, level = 1) { - return this.indentLevel(level) + text; - } - - /** - * indentedContext returns a new RenderContext with an incremented base indentation level. - * - * This allows to easily obtain a new RenderContext from a parent one with an - * increased indentation level. - * - * @param {number} increment - The increment to apply to the base indentation level. - * @returns {RenderContext} - */ - indentedContext(increment = 1) { - return new RenderContext(this.baseIndentationLevel + increment); - } -} - -/** - * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. - */ -class TestReportGenerator { - /** - * Constructs a TestReportGenerator with a specified formatter - * - * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. - * @param {RenderContext} renderContext - The render context to use for text rendering. - * // FIXME (@oleiade): needs JSDoc - * @param options - */ - constructor(formatter, renderContext, options = {}) { - this.formatter = formatter; - this.renderContext = renderContext; - this.options = { - defaultIndent: ' ', - metricGroupIndent: ' ', - ...options, - }; - } - - // FIXME (@oleiade): needs JSDoc - /** - * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. - * - * @param data - * @param report - * @returns {*} - */ - generate(data, report) { - const reportBuilder = new ReportBuilder( - this.formatter, - this.renderContext, - this.options, - ); - return reportBuilder - .addThresholds(report.thresholds) - .addTotalResults(report) - .addGroups(report.groups) - .addScenarios(report.scenarios) - .build(); - } -} - -/** - * Exposes methods for generating a textual summary of test results. - */ -class ReportBuilder { - /** - * Creates a new ReportBuilder with a specified formatter and options. - * - * // FIXME: ANSIFormatter could be an attribute of the render context - * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. - * @param {RenderContext} renderContext - The render context to use for text rendering. - * @param options - */ - constructor(formatter, renderContext, options) { - this.formatter = formatter; - this.renderContext = renderContext; - this.options = options; - this.sections = []; - } - - addThresholds(thresholds) { - if (!thresholds) return this; - - this.sections.push({ - title: 'THRESHOLDS', - content: this._renderThresholds(thresholds), - }); - return this; - } - - addTotalResults(report) { - this.sections.push({ - title: 'TOTAL RESULTS', - content: [ - ...this._renderChecks(report.checks), - ...'\n', - ...this._renderMetrics(report.metrics), - ], - }); - return this; - } - - addGroups(groups) { - if (!groups) return this; - - Object.entries(groups).forEach(([groupName, groupData]) => { - this.sections.push({ - title: `GROUP: ${groupName}`, - content: this._renderGroupContent(groupData), - }); - }); - return this; - } - - addScenarios(scenarios) { - if (!scenarios) return this; - - Object.entries(scenarios).forEach(([scenarioName, scenarioData]) => { - this.sections.push({ - title: `SCENARIO: ${scenarioName}`, - content: this._renderScenarioContent(scenarioData), - }); - }); - return this; - } - - build() { - return this.sections - .map((section) => [ - renderTitle(section.title, this.formatter, this.renderContext), - ...section.content, - '\n', - ]) - .flat() - .join('\n'); - } - - /** - * @param {Object} thresholds - * @param {RenderContext} [renderContext] - * @returns {string[]} - * @private - */ - _renderThresholds(thresholds, renderContext) { - // The thresholds list should be indent one level higher than the title - renderContext = renderContext || this.renderContext; - renderContext = renderContext.indentedContext(1); - - // Implement threshold rendering logic - return renderThresholds( - { metrics: this._processThresholds(thresholds) }, - this.formatter, - renderContext, - this.options, - ); - } - - /** - * @param checks - * @param {RenderContext} [renderContext] - The render context to use for text rendering. - * @returns {string[]} - * @private - */ - _renderChecks(checks, renderContext) { - renderContext = renderContext || this.renderContext; - renderContext = renderContext.indentedContext(1); + // Skip over ANSI escape codes. + if (char === '\x1b') { + inEscSeq = true; + continue; + } + if (inEscSeq && char === '[') { + inLongEscSeq = true; + continue; + } + if ( + inEscSeq && + inLongEscSeq && + char.charCodeAt(0) >= 0x40 && + char.charCodeAt(0) <= 0x7e + ) { + inEscSeq = false; + inLongEscSeq = false; + continue; + } + if ( + inEscSeq && + !inLongEscSeq && + char.charCodeAt(0) >= 0x40 && + char.charCodeAt(0) <= 0x5f + ) { + inEscSeq = false; + continue; + } - return checks - ? renderChecks(checks, this.formatter, renderContext, this.options) - : []; + if (!inEscSeq && !inLongEscSeq) { + width++; + } } + return width; +} - /** - * @param metrics - * @param {RenderContext} [renderContext] - * @returns {string[]} - * @private - */ - _renderMetrics(metrics, renderContext) { - renderContext = renderContext || this.renderContext; - renderContext = renderContext.indentedContext(1); +/** + * Extracts a display name for a metric, handling sub-metrics (e.g. "metric{sub}" -> "{ sub }"). + * + * @param {string} name - The metric name. + * @returns {string} - The display name + */ +function renderMetricDisplayName(name) { + const subMetricPos = name.indexOf('{'); + if (subMetricPos >= 0) { + return '{ ' + name.substring(subMetricPos + 1, name.length - 1) + ' }'; + } + return name; +} - // Implement metrics rendering logic - return Object.entries(metrics) - .filter( - ([_, sectionMetrics]) => Object.keys(sectionMetrics).length > 0, - ) - .flatMap(([sectionName, sectionMetrics]) => [ - renderContext.indent( - this.formatter.boldify(sectionName.toUpperCase()), - ), - ...renderMetrics( - { metrics: sectionMetrics }, - this.formatter, - renderContext, - this.options, - ), - ]); +/** + * Converts a number of bytes into a human-readable string with units. + * + * @param {number} bytes - The number of bytes. + * @returns {string} A human-readable string (e.g. "10 kB"). + */ +function humanizeBytes(bytes) { + const units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + const base = 1000; + if (bytes < 10) { + return bytes + ' B'; } - /** - * @param groupData - * @param {RenderContext} [renderContext] - * @returns {*[]} - * @private - */ - _renderGroupContent(groupData, renderContext) { - renderContext = renderContext || this.renderContext; + const e = Math.floor(Math.log(bytes) / Math.log(base)); + const suffix = units[e | 0]; + const val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10; + return val.toFixed(val < 10 ? 1 : 0) + ' ' + suffix; +} - // Implement group content rendering - return [ - ...this._renderChecks(groupData.checks, renderContext), - ...this._renderMetrics(groupData.metrics, renderContext), - ...(groupData.groups - ? this._renderNestedGroups(groupData.groups) - : []), - ]; - } +const unitMap = { + s: { unit: 's', coef: 0.001 }, + ms: { unit: 'ms', coef: 1 }, + us: { unit: 'µs', coef: 1000 }, +}; - /** - * @param scenarioData - * @param {RenderContext} [renderContext] - * @returns {*[]} - * @private - */ - _renderScenarioContent(scenarioData, renderContext) { - renderContext = renderContext || this.renderContext; +/** + * Converts a number to a fixed decimal string, removing trailing zeros. + * + * @param {number} val - The number to convert. + * @param {number} prec - Decimal precision. + * @returns {string} A string representation of the number without trailing zeros. + */ +function toFixedNoTrailingZeros(val, prec) { + return parseFloat(val.toFixed(prec)).toString(); +} - // Similar to group content rendering - return [ - ...this._renderChecks(scenarioData.checks, renderContext), - ...this._renderMetrics(scenarioData.metrics, renderContext), - ...(scenarioData.groups - ? this._renderNestedGroups(scenarioData.groups) - : []), - ]; - } +/** + * Truncates a number to a certain precision without rounding, then removes trailing zeros. + * + * @param {number} val - The number to truncate. + * @param {number} prec - Decimal precision. + * @returns {string} A truncated, not rounded string representation. + */ +function toFixedNoTrailingZerosTrunc(val, prec) { + const mult = Math.pow(10, prec); + return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec); +} - /** - * @param groups - * @param {RenderContext} [renderContext] - * @returns {*[]} - * @private - */ - _renderNestedGroups(groups, renderContext) { - renderContext = renderContext || this.renderContext; - renderContext = renderContext.indentedContext(1); +/** + * Humanizes a duration (in milliseconds) to a human-readable string, + * choosing appropriate units (ns, µs, ms, s, m, h). + * + * @param {number} duration - The duration in milliseconds. + * @returns {string} Human-readable duration (e.g. "2.5ms", "3s", "1m30s"). + */ +function humanizeGenericDuration(duration) { + if (duration === 0) { + return '0s'; + } - // Render nested groups recursively - return Object.entries(groups).flatMap(([groupName, groupData]) => [ - renderTitle(`GROUP: ${groupName}`, this.formatter, renderContext, { - prefix: subtitlePrefix, - }), - ...this._renderGroupContent(groupData), - ]); + if (duration < 0.001) { + // smaller than a microsecond, print nanoseconds + return Math.trunc(duration * 1000000) + 'ns'; + } + if (duration < 1) { + // smaller than a millisecond, print microseconds + return toFixedNoTrailingZerosTrunc(duration * 1000, 2) + 'µs'; + } + if (duration < 1000) { + // duration is smaller than a second + return toFixedNoTrailingZerosTrunc(duration, 2) + 'ms'; } - // Private rendering methods - _processThresholds(thresholds) { - // Transform thresholds into a format suitable for rendering - const metrics = {}; - Object.values(thresholds).forEach((threshold) => { - metrics[threshold.metric.name] = { - ...threshold.metric, - thresholds: threshold.thresholds, - }; - }); - return metrics; + let fixedDuration = + toFixedNoTrailingZerosTrunc( + (duration % 60000) / 1000, + duration > 60000 ? 0 : 2, + ) + 's'; + let rem = Math.trunc(duration / 60000); + if (rem < 1) { + // less than a minute + return fixedDuration; + } + fixedDuration = (rem % 60) + 'm' + fixedDuration; + rem = Math.trunc(rem / 60); + if (rem < 1) { + // less than an hour + return fixedDuration; } + return rem + 'h' + fixedDuration; } /** - * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. + * Humanizes a duration according to a specified time unit or uses a generic formatting. * - * @param {Object} data - The data input for the summary (includes options, metrics, etc.). - * @param {Object} options - Additional options that override defaults. - * @param {Object} report - The report object containing thresholds, checks, metrics, groups, and scenarios. - * @returns {string} A formatted summary of the test results. + * @param {number} dur - The duration in milliseconds. + * @param {string|null} timeUnit - Optional time unit (e.g. "ms", "s"). + * @returns {string} A human-readable duration string. */ -function generateTextSummary(data, options, report) { - const mergedOpts = Object.assign({}, defaultOptions, data.options, options); - - // Create a render context holding information such as indentation level to apply - const context = new RenderContext(0); - - // Create a formatter with default settings (colors enabled) - const formatter = new ANSIFormatter(); - - const reportGenerator = new TestReportGenerator( - formatter, - context, - mergedOpts, - ); +function humanizeDuration(dur, timeUnit) { + if (timeUnit !== '' && unitMap.hasOwnProperty(timeUnit)) { + return ( + (dur * unitMap[timeUnit].coef).toFixed(2) + unitMap[timeUnit].unit + ); + } - return reportGenerator.generate(data, report); + return humanizeGenericDuration(dur); } -exports.humanizeValue = humanizeValue; -exports.textSummary = generateTextSummary; +/** + * Returns the summary values for non-trend metrics (counter, gauge, rate). + * + * @param {ReportMetric} metric - The metric to summarize. + * @param {string|null} timeUnit - The time unit for durations. + * @returns {string[]} - An array of summary values. + */ +function nonTrendMetricValueForSum(metric, timeUnit) { + switch (metric.type) { + case 'counter': + return [ + humanizeValue(metric.values.count, metric, timeUnit), + humanizeValue(metric.values.rate, metric, timeUnit) + '/s', + ]; + case 'gauge': + return [ + humanizeValue(metric.values.value, metric, timeUnit), + 'min=' + humanizeValue(metric.values.min, metric, timeUnit), + 'max=' + humanizeValue(metric.values.max, metric, timeUnit), + ]; + case 'rate': + return [ + humanizeValue(metric.values.rate, metric, timeUnit), + `${metric.values.passes} out of ${metric.values.passes + metric.values.fails}`, + ]; + default: + return ['[no data]']; + } +} From 63d89e0eb5fc67c8944ea63db017a228eb040c31 Mon Sep 17 00:00:00 2001 From: oleiade Date: Wed, 18 Dec 2024 16:19:20 +0100 Subject: [PATCH 22/42] Fulfill JSDoc documentation of summary.js --- js/summary.js | 181 +++++++++++++++++++++++++++++++++----------------- lib/report.go | 39 +++++------ 2 files changed, 140 insertions(+), 80 deletions(-) diff --git a/js/summary.js b/js/summary.js index eb24f53ec84..b5601f1139f 100644 --- a/js/summary.js +++ b/js/summary.js @@ -63,33 +63,46 @@ function humanizeValue(val, metric, timeUnit) { } /** - * @typedef {Object} Threshold + * @typedef {Object} Report + * @property {Record} thresholds - The thresholds report. + * @property {ReportMetrics} metrics - The metrics report. + * @property {Record} groups - The groups report. + * @property {Record} scenarios - The scenarios report. + */ + +/** + * @typedef {Object} ReportThreshold * @property {string} source - The threshold expression source. * @property {boolean} ok - Whether the threshold was satisfied or not. */ +// FIXME (@oleiade): Could use a better name as it's not really a group in the k6 sense? /** - * @typedef {Object} Check - * @property {string} id - The check ID. - * @property {string} name - The check name. - * @property {string} path - The check path. - * @property {number} passes - The number of successful checks. - * @property {number} fails - The number of failed checks. + * @typedef {Object} ReportGroup + * @property {ReportChecks} checks - The checks report. + * @property {ReportMetrics} metrics - The metrics report. + * @property {Record} groups - The nested groups report. */ /** * @typedef {Object} ReportMetric - * @property {string} name - The metric name. - * @property {string} type - The type of the metric (e.g., "counter", "gauge", "rate", "trend"). - * @property {string} contains - The type of data contained in the metric (e.g., "time", "data", "default"). + * @property {string} name - The name of the reported metric. + * @property {"counter"|"gauge"|"rate"|"trend"} type - The type of the metric. + * @property {"time"|"data"|"default"} contains - The type of data contained in the metric * @property {Record} values - Key-value pairs of metric statistics (e.g. min, max, avg). - * @property {Threshold[]} [thresholds] - Optional array of thresholds associated with this metric. + * @property {EngineThreshold[]} [thresholds] - Optional array of thresholds associated with this metric. */ /** - * @typedef {Object} ReportThreshold - * @property {string} source - The threshold expression source. - * @property {boolean} ok - Whether the threshold was satisfied or not. + * @typedef {Object} ReportMetrics + * @property {Record} http - The HTTP metrics. + * @property {Record} execution - The execution-related metrics. + * @property {Record} network - The network-related metrics. + * @property {Record} browser - The browser-related metrics. + * @property {Record} webvitals - The web vitals metrics. + * @property {Record} grpc - The grpc-related metrics. + * @property {Record} websocket - The websocket-related metrics. + * @property {Record} miscelaneous - The custom metrics. */ /** @@ -100,28 +113,35 @@ function humanizeValue(val, metric, timeUnit) { */ /** - * @typedef {Object} MetricThresholds + * @typedef {Object} ReportChecks + * @property {ReportChecksMetrics} metrics - The metrics for checks. + * @property {EngineCheck[]} ordered_checks - The ordered checks. + */ + +/** + * @typedef {Object} ReportMetricThresholds * @property {ReportMetric} metric - The metric object. * @property {ReportThreshold[]} thresholds - The thresholds for the metric. */ /** - * @typedef {Object} ReportChecks - * @property {ReportChecksMetrics} metrics - The metrics for checks. - * @property {Check[]} ordered_checks - The ordered checks. + * @typedef {Object} ReportData + * @property {Record} metrics - Collection of metrics keyed by their names. */ /** - * @typedef {Object} DisplayOptions - * @property {boolean} sortByName - Whether metrics should be sorted by name. - * @property {boolean} bold - Whether to display section names in bold. - * @property {string} indent - Indentation string for the output. - * @property {string} metricsBlockIndent - Additional indentation for metrics blocks. + * @typedef {Object} EngineCheck + * @property {string} id - The check ID. + * @property {string} name - The check name. + * @property {string} path - The check path. + * @property {number} passes - The number of successful checks. + * @property {number} fails - The number of failed checks. */ /** - * @typedef {Object} ReportData - * @property {Record} metrics - Collection of metrics keyed by their names. + * @typedef {Object} EngineThreshold + * @property {string} source - The threshold expression source. + * @property {boolean} ok - Whether the threshold was satisfied or not. */ /** @@ -141,28 +161,21 @@ class TestReportGenerator { * * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. * @param {RenderContext} renderContext - The render context to use for text rendering. - * // FIXME (@oleiade): needs JSDoc - * @param options + * @param {Options} [options = {}] */ constructor(formatter, renderContext, options = {}) { this.formatter = formatter; this.renderContext = renderContext; - this.options = { - defaultIndent: ' ', - metricGroupIndent: ' ', - ...options, - }; + this.options = options; } - // FIXME (@oleiade): needs JSDoc /** * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. * - * @param data - * @param report - * @returns {*} + * @param {Report} report - The report object containing thresholds, checks, metrics, groups, and scenarios as provided by k6. + * @returns {string} - A formatted summary of the test results. */ - generate(data, report) { + generate(report) { const reportBuilder = new ReportBuilder( this.formatter, this.renderContext, @@ -184,7 +197,6 @@ class ReportBuilder { /** * Creates a new ReportBuilder with a specified formatter and options. * - * // FIXME: ANSIFormatter could be an attribute of the render context * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. * @param {RenderContext} renderContext - The render context to use for text rendering. * @param options @@ -196,6 +208,12 @@ class ReportBuilder { this.sections = []; } + /** + * Adds a thresholds section to the report. + * + * @param {Record} thresholds - The thresholds to add to the report. + * @returns {ReportBuilder} + */ addThresholds(thresholds) { if (!thresholds) return this; @@ -206,6 +224,12 @@ class ReportBuilder { return this; } + /** + * Adds a total results section to the report. + * + * @param {Report} report - The report object containing thresholds, checks, metrics, groups, and scenarios as provided by k6. + * @returns {ReportBuilder} + */ addTotalResults(report) { this.sections.push({ title: 'TOTAL RESULTS', @@ -218,6 +242,12 @@ class ReportBuilder { return this; } + /** + * Adds groups sections to the report. + * + * @param {Record} groups + * @returns {ReportBuilder} + */ addGroups(groups) { if (!groups) return this; @@ -230,6 +260,12 @@ class ReportBuilder { return this; } + /** + * Adds scenarios sections to the report. + * + * @param {Record} scenarios - The scenarios to add to the report. + * @returns {ReportBuilder} + */ addScenarios(scenarios) { if (!scenarios) return this; @@ -242,6 +278,12 @@ class ReportBuilder { return this; } + /** + * Builds the final report by concatenating all sections together, resulting + * in a formatted string ready to be printed to the terminal. + * + * @returns {string} + */ build() { return this.sections .map((section) => [ @@ -254,7 +296,7 @@ class ReportBuilder { } /** - * @param {Object} thresholds + * @param {Record} thresholds * @param {RenderContext} [renderContext] * @returns {string[]} * @private @@ -289,7 +331,7 @@ class ReportBuilder { } /** - * @param metrics + * @param {ReportMetrics} metrics * @param {RenderContext} [renderContext] * @returns {string[]} * @private @@ -317,28 +359,26 @@ class ReportBuilder { } /** - * @param groupData + * @param {ReportGroup} group - The group data to render. * @param {RenderContext} [renderContext] - * @returns {*[]} + * @returns {string[]} * @private */ - _renderGroupContent(groupData, renderContext) { + _renderGroupContent(group, renderContext) { renderContext = renderContext || this.renderContext; // Implement group content rendering return [ - ...this._renderChecks(groupData.checks, renderContext), - ...this._renderMetrics(groupData.metrics, renderContext), - ...(groupData.groups - ? this._renderNestedGroups(groupData.groups) - : []), + ...this._renderChecks(group.checks, renderContext), + ...this._renderMetrics(group.metrics, renderContext), + ...(group.groups ? this._renderNestedGroups(group.groups) : []), ]; } /** - * @param scenarioData + * @param {ReportGroup} scenarioData * @param {RenderContext} [renderContext] - * @returns {*[]} + * @returns {string[]} * @private */ _renderScenarioContent(scenarioData, renderContext) { @@ -355,9 +395,9 @@ class ReportBuilder { } /** - * @param groups + * @param {Record} groups * @param {RenderContext} [renderContext] - * @returns {*[]} + * @returns {string[]} * @private */ _renderNestedGroups(groups, renderContext) { @@ -374,6 +414,12 @@ class ReportBuilder { } // Private rendering methods + /** + * + * @param {ReportMetricThresholds} thresholds + * @returns {{}} + * @private + */ _processThresholds(thresholds) { // Transform thresholds into a format suitable for rendering const metrics = {}; @@ -387,6 +433,18 @@ class ReportBuilder { } } +/** + * RenderContext is a helper class that provides methods for rendering text + * with indentation. + * + * It is used to keep track of the current indentation level and provide + * methods for rendering text with the correct indentation. + * + * It also facilitates the creation of new RenderContext instances with + * different indentation levels. That way the indentation level can be + * easily adjusted relatively to a parent indentation level without having + * to manage some dedicated state manually. + */ class RenderContext { constructor(baseIndentationLevel = 0) { this.baseIndentationLevel = baseIndentationLevel; @@ -527,7 +585,7 @@ const ANSIStyles = { * @param {string} title - The section title to render. * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. * @param {RenderContext} renderContext - The render context to use for text rendering. - * @param {Object} options - Additional options for rendering the section title. + * @param {Options & Object} options - Additional options for rendering the section title. * @param {string} [options.prefix=titlePrefix] - The prefix to use for the section title. * @param {string} [options.suffix='\n'] - The suffix to use for the section title. * @returns {string} - The formatted section title. @@ -546,7 +604,7 @@ function renderTitle( /** * Renders a single check into a formatted line ready for output. * - * @param {{name: string, passes: number, fails: number}} check - The check object with name, passes and fails + * @param {EngineCheck} check - The check object with name, passes and fails * @param {ANSIFormatter} formatter - ANSI formatter used for decorating text. * @param {RenderContext} renderContext - The render context to use for text rendering. * @returns {string} - A formatted line summarizing the check. @@ -591,7 +649,7 @@ function renderCheck(check, formatter, renderContext) { /** * Renders checks into a formatted set of lines ready for display in the terminal. * - * @param checks + * @param {ReportChecks} checks * @param formatter * @param {RenderContext} renderContext * @param options @@ -629,10 +687,11 @@ function renderChecks(checks, formatter, renderContext, options = {}) { return [...checkMetrics, ...renderedChecks]; } +//FIXME (@oleiade): We should clarify the data argument's type and give it a better name and typedef /** * Summarizes metrics into an array of formatted lines ready to be printed to stdout. * - * @param {{metrics: Object[]}} data - The data object containing metrics. + * @param {ReportChecks} data - The data object containing metrics. * @param {ANSIFormatter} formatter - An ANSIFormatter function for ANSI colors. * @param {RenderContext} renderContext - The render context to use for text rendering. * @param {Options} options - Display options merged with defaultOptions. @@ -864,7 +923,7 @@ function renderTrendData(name, info, formatter) { * Format data for non-trend metrics. * * @param {string} name - The metric name. - * @param {Object} info - The summary information object. + * @param {SummaryInfo} info - The summary information object. * @param {ANSIFormatter} formatter - A decoration function for ANSI colors. */ function renderNonTrendData(name, info, formatter) { @@ -900,10 +959,10 @@ function renderNonTrendData(name, info, formatter) { /** * - * @param value - * @param stat - * @param metric - * @param options + * @param {number} value + * @param {string} stat + * @param {ReportMetric} metric + * @param {Options} options * @returns {string} */ function renderTrendValue(value, stat, metric, options) { diff --git a/lib/report.go b/lib/report.go index ba40a0a8c12..4d410cec06d 100644 --- a/lib/report.go +++ b/lib/report.go @@ -1,11 +1,28 @@ package lib import ( - "go.k6.io/k6/metrics" - "time" + + "go.k6.io/k6/metrics" ) +type Report struct { + ReportThresholds `js:"thresholds"` + ReportGroup + Scenarios map[string]ReportGroup +} + +func NewReport() Report { + return Report{ + ReportThresholds: NewReportThresholds(), + ReportGroup: ReportGroup{ + Metrics: NewReportMetrics(), + Groups: make(map[string]ReportGroup), + }, + Scenarios: make(map[string]ReportGroup), + } +} + type ReportMetricInfo struct { Name string Type string @@ -115,6 +132,7 @@ func NewReportThresholds() ReportThresholds { return thresholds } +// FIXME (@oleiade): While writing JSDOC I found the name ambiguous, should we rename it? type ReportGroup struct { Checks *ReportChecks // Not always present, thus we use a pointer. Metrics ReportMetrics @@ -128,23 +146,6 @@ func NewReportGroup() ReportGroup { } } -type Report struct { - ReportThresholds `js:"thresholds"` - ReportGroup - Scenarios map[string]ReportGroup -} - -func NewReport() Report { - return Report{ - ReportThresholds: NewReportThresholds(), - ReportGroup: ReportGroup{ - Metrics: NewReportMetrics(), - Groups: make(map[string]ReportGroup), - }, - Scenarios: make(map[string]ReportGroup), - } -} - func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Duration) map[string]float64 { trendResolvers, err := metrics.GetResolversForTrendColumns(summaryTrendStats) if err != nil { From 644623be0fe5afea55e9dcc5e3912161dbc3fbe6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Wed, 15 Jan 2025 14:21:26 +0100 Subject: [PATCH 23/42] Enable --summary-extended mode --- cmd/run.go | 7 ++++++- cmd/runtime_options.go | 2 ++ lib/runtime_options.go | 11 ++++++----- output/summary/summary.go | 20 ++++++++++++++------ 4 files changed, 28 insertions(+), 12 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 5318996a8ea..1f56dd9b75e 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -195,7 +195,12 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { summaryOutput, err := summary.New(output.Params{ Logger: c.gs.Logger, }) - if err != nil { + if err == nil { + isSummaryExtended := testRunState.RuntimeOptions.SummaryExtended + if isSummaryExtended.Valid && isSummaryExtended.Bool { + summaryOutput.EnableExtendedMode() + } + } else { logger.WithError(err).Error("failed to initialize the end-of-test summary output") } outputs = append(outputs, summaryOutput) diff --git a/cmd/runtime_options.go b/cmd/runtime_options.go index 60c2d038935..174d4a6d8e7 100644 --- a/cmd/runtime_options.go +++ b/cmd/runtime_options.go @@ -32,6 +32,7 @@ experimental_enhanced: esbuild-based transpiling for TypeScript and ES6+ support flags.StringArrayP("env", "e", nil, "add/override environment variable with `VAR=value`") flags.Bool("no-thresholds", false, "don't run thresholds") flags.Bool("no-summary", false, "don't show the summary at the end of the test") + flags.Bool("summary-extended", false, "show an extended summary at the end of the test") flags.String( "summary-export", "", @@ -67,6 +68,7 @@ func getRuntimeOptions(flags *pflag.FlagSet, environment map[string]string) (lib CompatibilityMode: getNullString(flags, "compatibility-mode"), NoThresholds: getNullBool(flags, "no-thresholds"), NoSummary: getNullBool(flags, "no-summary"), + SummaryExtended: getNullBool(flags, "summary-extended"), SummaryExport: getNullString(flags, "summary-export"), TracesOutput: getNullString(flags, "traces-output"), Env: make(map[string]string), diff --git a/lib/runtime_options.go b/lib/runtime_options.go index b82acc38db5..7099dc44cad 100644 --- a/lib/runtime_options.go +++ b/lib/runtime_options.go @@ -39,11 +39,12 @@ type RuntimeOptions struct { // Environment variables passed onto the runner Env map[string]string `json:"env"` - NoThresholds null.Bool `json:"noThresholds"` - NoSummary null.Bool `json:"noSummary"` - SummaryExport null.String `json:"summaryExport"` - KeyWriter null.String `json:"-"` - TracesOutput null.String `json:"tracesOutput"` + NoThresholds null.Bool `json:"noThresholds"` + NoSummary null.Bool `json:"noSummary"` + SummaryExtended null.Bool `json:"summaryExtended"` + SummaryExport null.String `json:"summaryExport"` + KeyWriter null.String `json:"-"` + TracesOutput null.String `json:"tracesOutput"` } // ValidateCompatibilityMode checks if the provided val is a valid compatibility mode diff --git a/output/summary/summary.go b/output/summary/summary.go index 91e7773894b..069e25a5a2b 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -23,7 +23,8 @@ type Output struct { periodicFlusher *output.PeriodicFlusher logger logrus.FieldLogger - dataModel dataModel + dataModel dataModel + extendedModeEnabled bool } // New returns a new JSON output. @@ -32,7 +33,8 @@ func New(params output.Params) (*Output, error) { logger: params.Logger.WithFields(logrus.Fields{ "output": "summary", }), - dataModel: newDataModel(), + dataModel: newDataModel(), + extendedModeEnabled: false, }, nil } @@ -55,6 +57,10 @@ func (o *Output) Stop() error { return nil } +func (o *Output) EnableExtendedMode() { + o.extendedModeEnabled = true +} + func (o *Output) flushMetrics() { samples := o.GetBufferedSamples() for _, sc := range samples { @@ -66,12 +72,14 @@ func (o *Output) flushMetrics() { } func (o *Output) flushSample(sample metrics.Sample) { - // First, we store the sample data into the metrics stored at the k6 metrics registry level. + // First, the sample data is stored into the metrics stored at the k6 metrics registry level. o.storeSample(sample) + if !o.extendedModeEnabled { + return + } - // Then, we'll proceed to store the sample data into each group - // metrics. However, we need to determine whether the groups tree - // is within a scenario or not. + // Then, if the extended mode is enabled, the sample data is stored into each group metrics. + // However, we need to determine whether the groups tree is within a scenario or not. groupData := o.dataModel.aggregatedGroupData if scenarioName, hasScenario := sample.Tags.Get("scenario"); hasScenario { groupData = o.dataModel.groupDataFor(scenarioName) From 061e60e74f41e913d96792670840434ac986f03a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Wed, 15 Jan 2025 14:39:49 +0100 Subject: [PATCH 24/42] Add a custom metric to the example script --- playground/full-summary/api.js | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/playground/full-summary/api.js b/playground/full-summary/api.js index 5d900b5277b..9b7ee0bd037 100644 --- a/playground/full-summary/api.js +++ b/playground/full-summary/api.js @@ -1,8 +1,12 @@ import http from 'k6/http' import {check, group} from 'k6' +import {Trend} from 'k6/metrics'; + +const myTrend = new Trend('waiting_time'); export function apiTest() { const res = http.get('https://httpbin.org/get') + myTrend.add(res.timings.waiting); check(res, { 'httpbin.org is up': (r) => r.status === 200, 'httpbin.org is down': (r) => r.status === 500, @@ -19,13 +23,14 @@ export function apiTest() { password: 'onegaishimasu', }) ) + myTrend.add(res.timings.waiting); check(res, { 'status is 201 CREATED': (r) => r.status === 201, }) group('authorized crocodiles', () => { const res = http.get('https://httpbin.org/get') - + myTrend.add(res.timings.waiting); check(res, { 'authorized crocodiles are 200 OK': (r) => r.status === 200, }) @@ -34,7 +39,7 @@ export function apiTest() { group('my crocodiles', () => { const res = http.get('https://httpbin.org/get') - + myTrend.add(res.timings.waiting); check(res, { 'my crocodiles are 200 OK': (r) => r.status === 200, }) From 037c16a910ec37b2decc56d83dcd71bbcce6094d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Fri, 17 Jan 2025 13:22:56 +0100 Subject: [PATCH 25/42] Clean up the old summary data (except for the user-defined handler) --- cmd/run.go | 2 +- js/runner.go | 16 ++++++++++++---- js/summary-wrapper.js | 7 +++---- js/summary.js | 15 +++++++-------- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 1f56dd9b75e..ecd24e2c883 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -426,7 +426,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { backgroundProcesses.Add(1) go func() { defer backgroundProcesses.Done() - reportCtx, reportCancel := context.WithTimeout(globalCtx, 60*time.Second) + reportCtx, reportCancel := context.WithTimeout(globalCtx, 3*time.Second) defer reportCancel() logger.Debug("Sending usage report...") diff --git a/js/runner.go b/js/runner.go index a0920fc678e..552bb88245f 100644 --- a/js/runner.go +++ b/js/runner.go @@ -350,12 +350,10 @@ func (r *Runner) IsExecutable(name string) bool { // HandleSummary calls the specified summary callback, if supplied. func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary, report lib.Report) (map[string]io.Reader, error) { - summaryDataForJS := summarizeMetricsToObject(summary, r.Bundle.Options, r.setupData) - out := make(chan metrics.SampleContainer, 100) defer close(out) - go func() { // discard all metrics + go func() { // discard all metrics for range out { //nolint:revive } }() @@ -381,6 +379,8 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary, report return nil, fmt.Errorf("exported identifier %s must be a function", consts.HandleSummaryFn) } + // TODO: Do we want to keep it compatible with the old format? Or do we want to break it? + summaryDataForJS := summarizeMetricsToObject(summary, r.Bundle.Options, r.setupData) callbackResult, _, _, err = vu.runFn(summaryCtx, false, handleSummaryFn, nil, vu.Runtime.ToValue(summaryDataForJS)) if err != nil { errText, fields := errext.Format(err) @@ -398,11 +398,19 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary, report return nil, fmt.Errorf("unexpected error did not get a callable summary wrapper") } + options := map[string]interface{}{ + // TODO: improve when we can easily export all option values, including defaults? + "summaryTrendStats": r.Bundle.Options.SummaryTrendStats, + "summaryTimeUnit": r.Bundle.Options.SummaryTimeUnit.String, + "noColor": summary.NoColor, // TODO: move to the (runtime) options + "enableColors": !summary.NoColor && summary.UIState.IsStdOutTTY, + } + wrapperArgs := []sobek.Value{ callbackResult, vu.Runtime.ToValue(r.Bundle.preInitState.RuntimeOptions.SummaryExport.String), - vu.Runtime.ToValue(summaryDataForJS), vu.Runtime.ToValue(report), + vu.Runtime.ToValue(options), } rawResult, _, _, err := vu.runFn(summaryCtx, false, handleSummaryWrapper, nil, wrapperArgs...) diff --git a/js/summary-wrapper.js b/js/summary-wrapper.js index 7f876bf6c94..7164fc27459 100644 --- a/js/summary-wrapper.js +++ b/js/summary-wrapper.js @@ -60,12 +60,11 @@ return JSON.stringify(results, null, 4); }; - return function (summaryCallbackResult, jsonSummaryPath, data, report) { - var result = summaryCallbackResult; + return function (summaryCallbackResult, jsonSummaryPath, report, options) { + let result = summaryCallbackResult; if (!result) { - var enableColors = (!data.options.noColor && data.state.isStdOutTTY); result = { - 'stdout': '\n' + jslib.textSummary(data, {indent: ' ', enableColors: enableColors}, report) + '\n\n', + 'stdout': '\n' + jslib.textSummary(report, options) + '\n\n', }; } diff --git a/js/summary.js b/js/summary.js index b5601f1139f..dea40337ce8 100644 --- a/js/summary.js +++ b/js/summary.js @@ -1,12 +1,12 @@ /** * This file contains code used to generate a textual summary of tests results, as displayed - * in the user's terminal at the end of a k6 test run, also know as "end of test summary". + * in the user's terminal at the end of a k6 test run, also known as "end of test summary". * * The main entry point is the `generateTextSummary` function, which takes the test data as well as a report * object containing results for checks, metrics, thresholds, groups, and scenarios, and returns a formatted * string summarizing the test results, ready to be written to the terminal. * - * For convinience, the file also exports the `humanizeValue` function. + * For convenience, the file also exports the `humanizeValue` function. */ exports.humanizeValue = humanizeValue; exports.textSummary = generateTextSummary; @@ -14,14 +14,12 @@ exports.textSummary = generateTextSummary; /** * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. * - * @param {Object} data - The data input for the summary (includes options, metrics, etc.). + * @param {Report} report - The report object containing thresholds, checks, metrics, groups, and scenarios. * @param {Object} options - Additional options that override defaults. - * @param {Object} report - The report object containing thresholds, checks, metrics, groups, and scenarios. * @returns {string} A formatted summary of the test results. */ -//FIXME (@oleiade): because options is... optional, it should, if possible, be the last argument here. -function generateTextSummary(data, options, report) { - const mergedOpts = Object.assign({}, defaultOptions, data.options, options); +function generateTextSummary(report, options) { + const mergedOpts = Object.assign({}, defaultOptions, options); // Create a render context holding information such as indentation level to apply const context = new RenderContext(0); @@ -35,7 +33,7 @@ function generateTextSummary(data, options, report) { mergedOpts, ); - return reportGenerator.generate(data, report); + return reportGenerator.generate(report); } /** @@ -1111,6 +1109,7 @@ const subtitlePrefix = '↳'; const successMark = '✓'; const failMark = '✗'; const defaultOptions = { + indent: ' ', enableColors: true, // FIXME (@oleiade): we should ensure we respect this flag summaryTimeUnit: null, summaryTrendStats: null, From 04cd35b363486f3732dc42642ce7cc8d72b5d40c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Thu, 30 Jan 2025 11:04:34 +0100 Subject: [PATCH 26/42] Par the end-of-test summary with the design --- internal/cmd/run.go | 1 - internal/js/summary.js | 206 ++++++++++++++++++++------------ lib/report.go | 20 ++-- output/summary/report.go | 2 +- output/summary/summary.go | 2 +- playground/full-summary/grpc.js | 2 +- 6 files changed, 143 insertions(+), 90 deletions(-) diff --git a/internal/cmd/run.go b/internal/cmd/run.go index c675b020bb6..d694386647b 100644 --- a/internal/cmd/run.go +++ b/internal/cmd/run.go @@ -32,7 +32,6 @@ import ( "go.k6.io/k6/metrics" "go.k6.io/k6/output" "go.k6.io/k6/output/summary" - "go.k6.io/k6/ui/pb" ) // cmdRun handles the `k6 run` sub-command diff --git a/internal/js/summary.js b/internal/js/summary.js index dea40337ce8..38a158788e8 100644 --- a/internal/js/summary.js +++ b/internal/js/summary.js @@ -213,7 +213,7 @@ class ReportBuilder { * @returns {ReportBuilder} */ addThresholds(thresholds) { - if (!thresholds) return this; + if (!thresholds || Object.keys(thresholds).length === 0) return this; this.sections.push({ title: 'THRESHOLDS', @@ -233,7 +233,6 @@ class ReportBuilder { title: 'TOTAL RESULTS', content: [ ...this._renderChecks(report.checks), - ...'\n', ...this._renderMetrics(report.metrics), ], }); @@ -249,12 +248,14 @@ class ReportBuilder { addGroups(groups) { if (!groups) return this; - Object.entries(groups).forEach(([groupName, groupData]) => { - this.sections.push({ - title: `GROUP: ${groupName}`, - content: this._renderGroupContent(groupData), + Object.entries(groups) + .sort(([a], [b]) => a.localeCompare(b)) + .forEach(([groupName, groupData]) => { + this.sections.push({ + title: `GROUP: ${groupName}`, + content: this._renderGroupContent(groupData), + }); }); - }); return this; } @@ -267,12 +268,14 @@ class ReportBuilder { addScenarios(scenarios) { if (!scenarios) return this; - Object.entries(scenarios).forEach(([scenarioName, scenarioData]) => { - this.sections.push({ - title: `SCENARIO: ${scenarioName}`, - content: this._renderScenarioContent(scenarioData), + Object.entries(scenarios) + .sort(([a], [b]) => a.localeCompare(b)) + .forEach(([scenarioName, scenarioData]) => { + this.sections.push({ + title: `SCENARIO: ${scenarioName}`, + content: this._renderScenarioContent(scenarioData), + }); }); - }); return this; } @@ -290,7 +293,9 @@ class ReportBuilder { '\n', ]) .flat() - .join('\n'); + .reduce((acc, curr) => { + return (curr === '\n') ? acc + curr : acc + '\n' + curr; + }, ''); } /** @@ -321,10 +326,9 @@ class ReportBuilder { */ _renderChecks(checks, renderContext) { renderContext = renderContext || this.renderContext; - renderContext = renderContext.indentedContext(1); return checks - ? renderChecks(checks, this.formatter, renderContext, this.options) + ? [...renderChecks(checks, this.formatter, renderContext, this.options), '\n'] : []; } @@ -343,6 +347,11 @@ class ReportBuilder { .filter( ([_, sectionMetrics]) => Object.keys(sectionMetrics).length > 0, ) + .reduce( + (acc, [sectionName, sectionMetrics]) => (sectionName === "custom") + ? [[sectionName, sectionMetrics], ...acc] + : [...acc, [sectionName, sectionMetrics]] + , []) .flatMap(([sectionName, sectionMetrics]) => [ renderContext.indent( this.formatter.boldify(sectionName.toUpperCase()), @@ -353,6 +362,7 @@ class ReportBuilder { renderContext, this.options, ), + '\n', ]); } @@ -403,19 +413,21 @@ class ReportBuilder { renderContext = renderContext.indentedContext(1); // Render nested groups recursively - return Object.entries(groups).flatMap(([groupName, groupData]) => [ - renderTitle(`GROUP: ${groupName}`, this.formatter, renderContext, { - prefix: subtitlePrefix, - }), - ...this._renderGroupContent(groupData), - ]); + return Object.entries(groups) + .sort(([a], [b]) => a.localeCompare(b)) + .flatMap(([groupName, groupData]) => [ + renderTitle(`GROUP: ${groupName}`, this.formatter, renderContext, { + prefix: subtitlePrefix, + }), + ...this._renderGroupContent(groupData, renderContext), + ]); } // Private rendering methods /** * * @param {ReportMetricThresholds} thresholds - * @returns {{}} + * @returns {Record} * @private */ _processThresholds(thresholds) { @@ -529,7 +541,10 @@ class ANSIFormatter { * @returns {string} Bold text */ boldify(text) { - return this.decorate(text, 'white', 'bold'); + if (!this.options.enableColors) { + return text; + } + return `\u001b[1m${text}\x1b[0m`; } } @@ -595,7 +610,7 @@ function renderTitle( options = { prefix: titlePrefix, suffix: '\n' }, ) { return renderContext.indent( - `${options.prefix} ${formatter.boldify(title)} ${options.suffix}`, + `${options.prefix} ${formatter.boldify(title)} ${options.suffix || ''}`, ); } @@ -682,7 +697,7 @@ function renderChecks(checks, formatter, renderContext, options = {}) { : []; // Combine metrics and checks - return [...checkMetrics, ...renderedChecks]; + return [...checkMetrics, '\n', ...renderedChecks]; } //FIXME (@oleiade): We should clarify the data argument's type and give it a better name and typedef @@ -759,60 +774,76 @@ function renderThresholds(data, formatter, renderContext, options) { // well as the thresholds results for each expression. const result = []; for (const name of metricNames) { - const metric = data.metrics[name]; - const line = renderSubmetricLine( + const parentName = name.split('{', 1)[0]; + const isSubmetric = name.length > parentName.length; + const parentMetricExists = !!data.metrics[parentName]; + + const innerContext = (isSubmetric && parentMetricExists) + ? renderContext.indentedContext() + : renderContext; + + const line = renderMetricNameForThresholds( name, - metric, - summaryInfo, - options, - formatter, - renderContext, + parentName, + isSubmetric, + parentMetricExists, + innerContext ); result.push(line); + const metric = data.metrics[name]; if (metric.thresholds) { - // TODO (@oleiade): make sure the arguments are always ordered consistently across functions (indent, decorate, etc.) const thresholdLines = renderThresholdResults( - metric.thresholds, + metric, + summaryInfo, formatter, - renderContext.indentedContext(1), + innerContext, ); - result.push(...thresholdLines); + result.push(...thresholdLines, '\n'); } } - return result; + return result } /** * Renders each threshold result into a formatted set of lines ready for display in the terminal. * - * @param {Object} thresholds - The thresholds to render. + * @param {ReportMetric} metric - The metric with the thresholds to render. + * @param {SummaryInfo} info - An object containing summary information such as maximum name width and trend columns. * @param {ANSIFormatter} formatter - ANSIFormatter used for decorating text. * @param {RenderContext} renderContext - The render context to use for text rendering. * @returns {string[]} - An array of formatted lines including threshold statuses. */ -function renderThresholdResults(thresholds, formatter, renderContext) { +function renderThresholdResults( + metric, + info, + formatter, + renderContext, +) { const lines = []; - forEach(thresholds, (_, threshold) => { + forEach(metric.thresholds, (_, threshold) => { const isSatisfied = threshold.ok; const statusText = isSatisfied - ? formatter.decorate('SATISFIED', 'green') - : formatter.decorate('UNSATISFIED', 'red'); + ? formatter.decorate(successMark, 'green') + : formatter.decorate(failMark, 'red'); - // Extra indentation for threshold lines - // Adjusting spacing so that it aligns nicely under the metric line - const additionalIndent = isSatisfied ? ' ' : ' '; const sourceText = formatter.decorate( `'${threshold.source}'`, 'white', - 'faint', ); + const metricValueText = renderMetricValueForThresholds( + metric, + threshold, + info, + formatter, + ) + // Here we push a line describing the threshold's result lines.push( - renderContext.indent(statusText + additionalIndent + sourceText), + renderContext.indent([statusText, sourceText, metricValueText].join(' ')), ); }); @@ -859,53 +890,76 @@ function renderMetricLine( return renderContext.indent(dottedName + ' ' + dataPart); } -// FIXME (@oleiade): summarizeMetricsOptions needs a better name "DisplayConfig"? /** - * Formats a submetric (metric+tags key/value pairs) line for output. + * Formats a metric or submetric line for the thresholds' section output. * - * @param {string} name - name of the submetric - * @param {ReportMetric} metric - submetric object (submetric really are just a specialized metric with a tags set and a pointer to their parent) - * @param {SummaryInfo} info - summary information object - * @param {Options} options - display options - * @param {ANSIFormatter} formatter - ANSI formatter + * @param {string} name - name of the metric + * @param {string} parentName - name of the parent metric + * @param {boolean} isSubmetric - whether the metric is a submetric + * @param {boolean} parentMetricExists - in case of submetric, whether the parent metric exists * @param {RenderContext} renderContext - render context * @returns {string} submetric report line in the form: `{submetric name}...: {value} {extra}` */ -function renderSubmetricLine( +function renderMetricNameForThresholds( name, + parentName, + isSubmetric, + parentMetricExists, + renderContext, +) { + // If it's a parent metric, or it's a submetric, + // which parent metric is not included in results, we just print the name. + if (!isSubmetric || !parentMetricExists) { + return renderContext.indent(name); + } + + // Otherwise, we only print the labels. + return renderContext.indent(name.substring(parentName.length)); +} + +/** + * Formats the metric's value for the thresholds' section output. + * + * @param {ReportMetric} metric - the metric + * @param {EngineThreshold} threshold - the threshold + * @param {SummaryInfo} info - An object containing summary information such as maximum name width and trend columns. + * @param {ANSIFormatter} formatter - ANSIFormatter used for decorating text. + * @returns {string} metric's value line in the form: `{agg}={value}` + */ +function renderMetricValueForThresholds( metric, + threshold, info, - options, formatter, - renderContext, ) { - const { maxNameWidth } = info; + const { trendStats, trendCols, nonTrendValues, nonTrendExtras} = info; + const thresholdAgg = threshold.source.split(/[=><]/)[0]; - // Compute the trailing dots: - // Use `3` as a spacing offset as per original code. - let dotsCount = - maxNameWidth - - strWidth(name) - - strWidth(renderContext.indentLevel()) + - 3; - dotsCount = Math.max(1, dotsCount); - const dottedName = - name + - formatter.decorate('.'.repeat(dotsCount) + ':', 'white', 'faint'); - - const dataPart = - metric.type === 'trend' - ? renderTrendData(name, info, formatter) - : renderNonTrendData(name, info, formatter); + let value; + switch (metric.type) { + case 'trend': + value = trendCols[metric.name][trendStats.indexOf(thresholdAgg)] + break; + case 'counter': + value = (thresholdAgg === 'count') + ? nonTrendValues[metric.name] + : nonTrendExtras[metric.name][0]; + break; + default: + value = nonTrendValues[metric.name]; + } - return renderContext.indent(dottedName + ' ' + dataPart); + return [ + formatter.decorate(thresholdAgg, 'white'), + formatter.decorate(value, 'cyan') + ].join('='); } /** * Format data for trend metrics. */ function renderTrendData(name, info, formatter) { - const { trendStats, trendCols, trendColMaxLens } = info; + const {trendStats, trendCols, trendColMaxLens} = info; const cols = trendCols[name]; return cols diff --git a/lib/report.go b/lib/report.go index 4d410cec06d..049d1057be8 100644 --- a/lib/report.go +++ b/lib/report.go @@ -66,20 +66,20 @@ type ReportMetrics struct { WebSocket map[string]ReportMetric `js:"websocket"` - // Miscellaneous contains user-defined metric results as well as extensions metrics - Miscellaneous map[string]ReportMetric + // Custom contains user-defined metric results as well as extensions metrics + Custom map[string]ReportMetric } func NewReportMetrics() ReportMetrics { return ReportMetrics{ - HTTP: make(map[string]ReportMetric), - Execution: make(map[string]ReportMetric), - Network: make(map[string]ReportMetric), - Browser: make(map[string]ReportMetric), - WebVitals: make(map[string]ReportMetric), - Grpc: make(map[string]ReportMetric), - WebSocket: make(map[string]ReportMetric), - Miscellaneous: make(map[string]ReportMetric), + HTTP: make(map[string]ReportMetric), + Execution: make(map[string]ReportMetric), + Network: make(map[string]ReportMetric), + Browser: make(map[string]ReportMetric), + WebVitals: make(map[string]ReportMetric), + Grpc: make(map[string]ReportMetric), + WebSocket: make(map[string]ReportMetric), + Custom: make(map[string]ReportMetric), } } diff --git a/output/summary/report.go b/output/summary/report.go index 3e56f02b90d..d2f9d80894a 100644 --- a/output/summary/report.go +++ b/output/summary/report.go @@ -185,7 +185,7 @@ func populateReportGroup( case isWebVitalsMetric(info.Name): dest.WebVitals[info.Name] = reportMetric default: - dest.Miscellaneous[info.Name] = reportMetric + dest.Custom[info.Name] = reportMetric } } diff --git a/output/summary/summary.go b/output/summary/summary.go index 069e25a5a2b..f079c72d7fe 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -81,7 +81,7 @@ func (o *Output) flushSample(sample metrics.Sample) { // Then, if the extended mode is enabled, the sample data is stored into each group metrics. // However, we need to determine whether the groups tree is within a scenario or not. groupData := o.dataModel.aggregatedGroupData - if scenarioName, hasScenario := sample.Tags.Get("scenario"); hasScenario { + if scenarioName, hasScenario := sample.Tags.Get("scenario"); hasScenario && scenarioName != "default" { groupData = o.dataModel.groupDataFor(scenarioName) groupData.addSample(sample) } diff --git a/playground/full-summary/grpc.js b/playground/full-summary/grpc.js index f218cf5b3cc..f5da2769dc4 100644 --- a/playground/full-summary/grpc.js +++ b/playground/full-summary/grpc.js @@ -2,7 +2,7 @@ import grpc from 'k6/net/grpc'; import {check} from 'k6' const GRPC_ADDR = __ENV.GRPC_ADDR || '127.0.0.1:10000'; -const GRPC_PROTO_PATH = __ENV.GRPC_PROTO_PATH || '../../lib/testutils/grpcservice/route_guide.proto'; +const GRPC_PROTO_PATH = __ENV.GRPC_PROTO_PATH || '../../internal/lib/testutils/grpcservice/route_guide.proto'; let client = new grpc.Client(); From f0aa13a7ef435237bef231e2dc3a83eb4f4ef90c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Thu, 30 Jan 2025 12:00:01 +0100 Subject: [PATCH 27/42] Align metrics sections within the same block --- internal/js/summary.js | 148 ++++++++++++++++++++++------------------- 1 file changed, 81 insertions(+), 67 deletions(-) diff --git a/internal/js/summary.js b/internal/js/summary.js index 38a158788e8..b3a218be54f 100644 --- a/internal/js/summary.js +++ b/internal/js/summary.js @@ -74,7 +74,6 @@ function humanizeValue(val, metric, timeUnit) { * @property {boolean} ok - Whether the threshold was satisfied or not. */ -// FIXME (@oleiade): Could use a better name as it's not really a group in the k6 sense? /** * @typedef {Object} ReportGroup * @property {ReportChecks} checks - The checks report. @@ -103,16 +102,9 @@ function humanizeValue(val, metric, timeUnit) { * @property {Record} miscelaneous - The custom metrics. */ -/** - * @typedef {Object} ReportChecksMetrics - * @property {ReportMetric[]} total - The total metrics. - * @property {ReportMetric} success - The successful metrics. - * @property {ReportMetric} fail - The failed metrics. - */ - /** * @typedef {Object} ReportChecks - * @property {ReportChecksMetrics} metrics - The metrics for checks. + * @property {Record} metrics - The metrics for checks. * @property {EngineCheck[]} ordered_checks - The ordered checks. */ @@ -242,7 +234,7 @@ class ReportBuilder { /** * Adds groups sections to the report. * - * @param {Record} groups + * @param {Record} groups - The groups to add to the report. * @returns {ReportBuilder} */ addGroups(groups) { @@ -311,7 +303,7 @@ class ReportBuilder { // Implement threshold rendering logic return renderThresholds( - { metrics: this._processThresholds(thresholds) }, + this._processThresholds(thresholds), this.formatter, renderContext, this.options, @@ -333,8 +325,8 @@ class ReportBuilder { } /** - * @param {ReportMetrics} metrics - * @param {RenderContext} [renderContext] + * @param {ReportMetrics} metrics - The metrics to render. + * @param {RenderContext} [renderContext] - The render context to use for text rendering. * @returns {string[]} * @private */ @@ -342,6 +334,19 @@ class ReportBuilder { renderContext = renderContext || this.renderContext; renderContext = renderContext.indentedContext(1); + // Collect all metrics into a single object, so we can precompute all formatting information + const allMetrics = Object.entries(metrics).reduce((acc, [_, metrics]) => { + Object.assign(acc, metrics); + return acc; + }, {}); + + // Precompute all formatting information + const summaryInfo = computeSummaryInfo( + allMetrics, + renderContext, + this.options, + ); + // Implement metrics rendering logic return Object.entries(metrics) .filter( @@ -357,7 +362,8 @@ class ReportBuilder { this.formatter.boldify(sectionName.toUpperCase()), ), ...renderMetrics( - { metrics: sectionMetrics }, + sectionMetrics, + summaryInfo, this.formatter, renderContext, this.options, @@ -368,7 +374,7 @@ class ReportBuilder { /** * @param {ReportGroup} group - The group data to render. - * @param {RenderContext} [renderContext] + * @param {RenderContext} [renderContext] - The render context to use for text rendering. * @returns {string[]} * @private */ @@ -384,8 +390,8 @@ class ReportBuilder { } /** - * @param {ReportGroup} scenarioData - * @param {RenderContext} [renderContext] + * @param {ReportGroup} scenarioData - The scenario data to render. + * @param {RenderContext} [renderContext] - The render context to use for text rendering. * @returns {string[]} * @private */ @@ -403,8 +409,8 @@ class ReportBuilder { } /** - * @param {Record} groups - * @param {RenderContext} [renderContext] + * @param {Record} groups - The nested groups data to render. + * @param {RenderContext} [renderContext] - The render context to use for text rendering. * @returns {string[]} * @private */ @@ -418,6 +424,7 @@ class ReportBuilder { .flatMap(([groupName, groupData]) => [ renderTitle(`GROUP: ${groupName}`, this.formatter, renderContext, { prefix: subtitlePrefix, + suffix: '\n', }), ...this._renderGroupContent(groupData, renderContext), ]); @@ -426,7 +433,7 @@ class ReportBuilder { // Private rendering methods /** * - * @param {ReportMetricThresholds} thresholds + * @param {ReportMetricThresholds} thresholds - The thresholds data to render. * @returns {Record} * @private */ @@ -499,6 +506,7 @@ class RenderContext { class ANSIFormatter { /** * Constructs an ANSIFormatter with configurable color and styling options + * * @param {Object} options - Configuration options for formatting * @param {boolean} [options.enableColors=true] - Whether to enable color output */ @@ -511,6 +519,7 @@ class ANSIFormatter { /** * Decorates text with ANSI color and style. + * * @param {string} text - The text to decorate. * @param {ANSIColor} color - The ANSI color to apply. * @param {...ANSIStyle} styles - optional additional styles to apply. @@ -537,6 +546,7 @@ class ANSIFormatter { /** * Applies bold styling to text + * * @param {string} text - Text to make bold * @returns {string} Bold text */ @@ -688,48 +698,53 @@ function renderChecks(checks, formatter, renderContext, options = {}) { }) .map((check) => renderCheck(check, formatter, renderContext)); + // Precompute all formatting information + const summaryInfo = computeSummaryInfo( + checks.metrics, + renderContext, + options, + ); + // Render metrics for checks if they exist const checkMetrics = checks.metrics - ? renderMetrics({ metrics: checks.metrics }, formatter, renderContext, { - ...options, - sortByName: false, - }) + ? renderMetrics(checks.metrics, summaryInfo, formatter, renderContext, { + ...options, + sortByName: false, + }) : []; // Combine metrics and checks return [...checkMetrics, '\n', ...renderedChecks]; } -//FIXME (@oleiade): We should clarify the data argument's type and give it a better name and typedef /** * Summarizes metrics into an array of formatted lines ready to be printed to stdout. * - * @param {ReportChecks} data - The data object containing metrics. + * @param {Record} metrics - The data object containing metrics. + * @param {SummaryInfo} summaryInfo - An object containing summary information such as maximum name width and trend columns. * @param {ANSIFormatter} formatter - An ANSIFormatter function for ANSI colors. * @param {RenderContext} renderContext - The render context to use for text rendering. * @param {Options} options - Display options merged with defaultOptions. * @returns {string[]} */ -function renderMetrics(data, formatter, renderContext, options) { +function renderMetrics( + metrics, + summaryInfo, + formatter, + renderContext, + options, +) { // Extract all metric names - let metricNames = Object.keys(data.metrics); + let metricNames = Object.keys(metrics); // If sorting by name is required, do it now if (options.sortByName) { metricNames = sortMetricsByName(metricNames); } - // Precompute all formatting information - const summaryInfo = computeSummaryInfo( - metricNames, - data, - renderContext, - options, - ); - // Format each metric line return metricNames.map((name) => { - const metric = data.metrics[name]; + const metric = metrics[name]; return renderMetricLine( name, metric, @@ -744,28 +759,22 @@ function renderMetrics(data, formatter, renderContext, options) { /** * Renders each thresholds results into a formatted set of lines ready for display in the terminal. * - * Thresholds are rendered in the format: - * {metric/submetric}...: {value} {extra} - * {SATISFIED|UNSATISFIED} {source} - * //... additional threshold lines - * - * @param {ReportData} data - The data containing metrics. + * @param {Record} metrics - The data object containing metrics. * @param {ANSIFormatter} formatter - ANSI formatter used for decorating text. * @param {RenderContext} renderContext - The render context to use for text rendering. * @param {Object} options - Options merged with defaults. * @returns {string[]} - Array of formatted lines including threshold statuses. */ -function renderThresholds(data, formatter, renderContext, options) { +function renderThresholds(metrics, formatter, renderContext, options) { // Extract and optionally sort metric names - let metricNames = Object.keys(data.metrics); + let metricNames = Object.keys(metrics); if (options.sortByName) { metricNames = sortMetricsByName(metricNames); } // Precompute all formatting information const summaryInfo = computeSummaryInfo( - metricNames, - data, + metrics, renderContext, options, ); @@ -776,7 +785,7 @@ function renderThresholds(data, formatter, renderContext, options) { for (const name of metricNames) { const parentName = name.split('{', 1)[0]; const isSubmetric = name.length > parentName.length; - const parentMetricExists = !!data.metrics[parentName]; + const parentMetricExists = !!metrics[parentName]; const innerContext = (isSubmetric && parentMetricExists) ? renderContext.indentedContext() @@ -791,7 +800,7 @@ function renderThresholds(data, formatter, renderContext, options) { ); result.push(line); - const metric = data.metrics[name]; + const metric = metrics[name]; if (metric.thresholds) { const thresholdLines = renderThresholdResults( metric, @@ -810,14 +819,14 @@ function renderThresholds(data, formatter, renderContext, options) { * Renders each threshold result into a formatted set of lines ready for display in the terminal. * * @param {ReportMetric} metric - The metric with the thresholds to render. - * @param {SummaryInfo} info - An object containing summary information such as maximum name width and trend columns. + * @param {SummaryInfo} summaryInfo - An object containing summary information such as maximum name width and trend columns. * @param {ANSIFormatter} formatter - ANSIFormatter used for decorating text. * @param {RenderContext} renderContext - The render context to use for text rendering. * @returns {string[]} - An array of formatted lines including threshold statuses. */ function renderThresholdResults( metric, - info, + summaryInfo, formatter, renderContext, ) { @@ -837,7 +846,7 @@ function renderThresholdResults( const metricValueText = renderMetricValueForThresholds( metric, threshold, - info, + summaryInfo, formatter, ) @@ -893,12 +902,12 @@ function renderMetricLine( /** * Formats a metric or submetric line for the thresholds' section output. * - * @param {string} name - name of the metric - * @param {string} parentName - name of the parent metric - * @param {boolean} isSubmetric - whether the metric is a submetric - * @param {boolean} parentMetricExists - in case of submetric, whether the parent metric exists - * @param {RenderContext} renderContext - render context - * @returns {string} submetric report line in the form: `{submetric name}...: {value} {extra}` + * @param {string} name - The name of the metric + * @param {string} parentName - The name of the parent metric + * @param {boolean} isSubmetric - Whether the metric is a submetric + * @param {boolean} parentMetricExists - In case of submetric, whether the parent metric exists + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @returns {string} - The metric name report line */ function renderMetricNameForThresholds( name, @@ -920,11 +929,11 @@ function renderMetricNameForThresholds( /** * Formats the metric's value for the thresholds' section output. * - * @param {ReportMetric} metric - the metric - * @param {EngineThreshold} threshold - the threshold + * @param {ReportMetric} metric - The metric for which value will be rendered. + * @param {EngineThreshold} threshold - The threshold to use for rendering. * @param {SummaryInfo} info - An object containing summary information such as maximum name width and trend columns. * @param {ANSIFormatter} formatter - ANSIFormatter used for decorating text. - * @returns {string} metric's value line in the form: `{agg}={value}` + * @returns {string} - The metric's value line in the form: `{agg}={value}` */ function renderMetricValueForThresholds( metric, @@ -1039,13 +1048,12 @@ function renderTrendValue(value, stat, metric, options) { * @property {number} maxNonTrendValueLen - The maximum non-trend value length. * @property {number[]} nonTrendExtraMaxLens - The non-trend extra maximum lengths. * - * @param {string[]} metricNames - * @param {ReportData} data - The data object containing metrics. + * @param {Record} metrics - The data object containing metrics. * @param {RenderContext} renderContext - The render context to use for text rendering. - * @param {Options} options + * @param {Options} options - Display options merged with defaultOptions. * @returns {SummaryInfo} */ -function computeSummaryInfo(metricNames, data, renderContext, options) { +function computeSummaryInfo(metrics, renderContext, options) { const trendStats = options.summaryTrendStats; const numTrendColumns = trendStats.length; @@ -1060,8 +1068,14 @@ function computeSummaryInfo(metricNames, data, renderContext, options) { // Initialize tracking arrays for trend widths const trendColMaxLens = new Array(numTrendColumns).fill(0); + let metricNames = Object.keys(metrics); + // If sorting by name is required, do it now + if (options.sortByName) { + metricNames = sortMetricsByName(metricNames); + } + for (const name of metricNames) { - const metric = data.metrics[name]; + const metric = metrics[name]; const displayName = renderContext.indent( name + renderMetricDisplayName(name), ); @@ -1121,7 +1135,7 @@ function computeSummaryInfo(metricNames, data, renderContext, options) { } /** - * Sorts metrics by name, keeping submetrics grouped with their parent metrics. + * Sorts metrics by name, keeping sub-metrics grouped with their parent metrics. * * @param {string[]} metricNames - The metric names to sort. * @returns {string[]} - The sorted metric names. From 28fd762c3a1fbc36764bdc93c61e7c32785fd7b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Mon, 3 Feb 2025 11:42:15 +0100 Subject: [PATCH 28/42] Replace --summary-extended with --with-summary --- internal/cmd/run.go | 10 +--- internal/cmd/runtime_options.go | 12 +++- lib/runner.go | 12 +--- lib/runtime_options.go | 12 ++-- lib/summary.go | 100 ++++++++++++++++++++++++++++++++ output/summary/summary.go | 21 ++++--- 6 files changed, 132 insertions(+), 35 deletions(-) create mode 100644 lib/summary.go diff --git a/internal/cmd/run.go b/internal/cmd/run.go index d694386647b..9783b9cd799 100644 --- a/internal/cmd/run.go +++ b/internal/cmd/run.go @@ -193,14 +193,10 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { if !testRunState.RuntimeOptions.NoSummary.Bool { // Instantiates the summary output summaryOutput, err := summary.New(output.Params{ - Logger: c.gs.Logger, + RuntimeOptions: testRunState.RuntimeOptions, + Logger: c.gs.Logger, }) - if err == nil { - isSummaryExtended := testRunState.RuntimeOptions.SummaryExtended - if isSummaryExtended.Valid && isSummaryExtended.Bool { - summaryOutput.EnableExtendedMode() - } - } else { + if err != nil { logger.WithError(err).Error("failed to initialize the end-of-test summary output") } outputs = append(outputs, summaryOutput) diff --git a/internal/cmd/runtime_options.go b/internal/cmd/runtime_options.go index 174d4a6d8e7..12fb856dd8a 100644 --- a/internal/cmd/runtime_options.go +++ b/internal/cmd/runtime_options.go @@ -32,7 +32,7 @@ experimental_enhanced: esbuild-based transpiling for TypeScript and ES6+ support flags.StringArrayP("env", "e", nil, "add/override environment variable with `VAR=value`") flags.Bool("no-thresholds", false, "don't run thresholds") flags.Bool("no-summary", false, "don't show the summary at the end of the test") - flags.Bool("summary-extended", false, "show an extended summary at the end of the test") + flags.String("with-summary", lib.SummaryModeCompact.String(), "determine the summary mode, \"compact\", \"full\" or \"legacy\"") flags.String( "summary-export", "", @@ -68,7 +68,7 @@ func getRuntimeOptions(flags *pflag.FlagSet, environment map[string]string) (lib CompatibilityMode: getNullString(flags, "compatibility-mode"), NoThresholds: getNullBool(flags, "no-thresholds"), NoSummary: getNullBool(flags, "no-summary"), - SummaryExtended: getNullBool(flags, "summary-extended"), + SummaryMode: getNullString(flags, "with-summary"), SummaryExport: getNullString(flags, "summary-export"), TracesOutput: getNullString(flags, "traces-output"), Env: make(map[string]string), @@ -87,6 +87,14 @@ func getRuntimeOptions(flags *pflag.FlagSet, environment map[string]string) (lib return opts, err } + if envVar, ok := environment["K6_WITH_SUMMARY"]; ok && !opts.SummaryMode.Valid { + opts.SummaryMode = null.StringFrom(envVar) + } + if _, err := lib.ValidateSummaryMode(opts.SummaryMode.String); err != nil { + // some early validation + return opts, err + } + if err := saveBoolFromEnv(environment, "K6_INCLUDE_SYSTEM_ENV_VARS", &opts.IncludeSystemEnvVars); err != nil { return opts, err } diff --git a/lib/runner.go b/lib/runner.go index 8b46b7c5155..3e4b4a6850c 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -3,8 +3,7 @@ package lib import ( "context" "io" - "time" - + "go.k6.io/k6/metrics" ) @@ -90,12 +89,3 @@ type UIState struct { IsStdOutTTY bool IsStdErrTTY bool } - -// Summary contains all of the data the summary handler gets. -type Summary struct { - Metrics map[string]*metrics.Metric - RootGroup *Group - TestRunDuration time.Duration // TODO: use lib.ExecutionState-based interface instead? - NoColor bool // TODO: drop this when noColor is part of the (runtime) options - UIState UIState -} diff --git a/lib/runtime_options.go b/lib/runtime_options.go index 7099dc44cad..f4287a96f52 100644 --- a/lib/runtime_options.go +++ b/lib/runtime_options.go @@ -39,12 +39,12 @@ type RuntimeOptions struct { // Environment variables passed onto the runner Env map[string]string `json:"env"` - NoThresholds null.Bool `json:"noThresholds"` - NoSummary null.Bool `json:"noSummary"` - SummaryExtended null.Bool `json:"summaryExtended"` - SummaryExport null.String `json:"summaryExport"` - KeyWriter null.String `json:"-"` - TracesOutput null.String `json:"tracesOutput"` + NoThresholds null.Bool `json:"noThresholds"` + NoSummary null.Bool `json:"noSummary"` + SummaryMode null.String `json:"summaryMode"` + SummaryExport null.String `json:"summaryExport"` + KeyWriter null.String `json:"-"` + TracesOutput null.String `json:"tracesOutput"` } // ValidateCompatibilityMode checks if the provided val is a valid compatibility mode diff --git a/lib/summary.go b/lib/summary.go new file mode 100644 index 00000000000..d253ef2442f --- /dev/null +++ b/lib/summary.go @@ -0,0 +1,100 @@ +package lib + +import ( + "errors" + "time" + + "go.k6.io/k6/metrics" +) + +// Summary contains all of the data the summary handler gets. +type Summary struct { + Metrics map[string]*metrics.Metric + RootGroup *Group + TestRunDuration time.Duration // TODO: use lib.ExecutionState-based interface instead? + NoColor bool // TODO: drop this when noColor is part of the (runtime) options + UIState UIState +} + +// A SummaryMode specifies the mode of the Summary, +// which defines how the end-of-test summary will be rendered. +type SummaryMode int + +// Possible values for SummaryMode. +const ( + SummaryModeCompact = SummaryMode(iota) // Compact mode that only displays the total results. + SummaryModeFull // Extended mode that displays the total and also partial (per-group, etc.) results. + SummaryModeLegacy // Legacy mode, used for backwards compatibility. +) + +// ErrInvalidSummaryMode indicates the serialized summary mode is invalid. +var ErrInvalidSummaryMode = errors.New("invalid summary mode") + +const ( + summaryCompactString = "compact" + summaryFullString = "full" + summaryLegacyString = "legacy" +) + +// MarshalJSON serializes a MetricType as a human readable string. +func (m SummaryMode) MarshalJSON() ([]byte, error) { + txt, err := m.MarshalText() + if err != nil { + return nil, err + } + return []byte(`"` + string(txt) + `"`), nil +} + +// MarshalText serializes a MetricType as a human readable string. +func (m SummaryMode) MarshalText() ([]byte, error) { + switch m { + case SummaryModeCompact: + return []byte(summaryCompactString), nil + case SummaryModeFull: + return []byte(summaryFullString), nil + case SummaryModeLegacy: + return []byte(summaryLegacyString), nil + default: + return nil, ErrInvalidSummaryMode + } +} + +// UnmarshalText deserializes a MetricType from a string representation. +func (m *SummaryMode) UnmarshalText(data []byte) error { + switch string(data) { + case summaryCompactString: + *m = SummaryModeCompact + case summaryFullString: + *m = SummaryModeFull + case summaryLegacyString: + *m = SummaryModeLegacy + default: + return ErrInvalidSummaryMode + } + + return nil +} + +func (m SummaryMode) String() string { + switch m { + case SummaryModeCompact: + return summaryCompactString + case SummaryModeFull: + return summaryFullString + case SummaryModeLegacy: + return summaryLegacyString + default: + return "[INVALID]" + } +} + +// ValidateSummaryMode checks if the provided val is a valid summary mode +func ValidateSummaryMode(val string) (sm SummaryMode, err error) { + if val == "" { + return SummaryModeCompact, nil + } + if err = sm.UnmarshalText([]byte(val)); err != nil { + return 0, err + } + return +} diff --git a/output/summary/summary.go b/output/summary/summary.go index f079c72d7fe..8e9fdd2c59f 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -23,18 +23,23 @@ type Output struct { periodicFlusher *output.PeriodicFlusher logger logrus.FieldLogger - dataModel dataModel - extendedModeEnabled bool + dataModel dataModel + summaryMode lib.SummaryMode } // New returns a new JSON output. func New(params output.Params) (*Output, error) { + sm, err := lib.ValidateSummaryMode(params.RuntimeOptions.SummaryMode.String) + if err != nil { + return nil, err + } + return &Output{ logger: params.Logger.WithFields(logrus.Fields{ "output": "summary", }), - dataModel: newDataModel(), - extendedModeEnabled: false, + dataModel: newDataModel(), + summaryMode: sm, }, nil } @@ -57,10 +62,6 @@ func (o *Output) Stop() error { return nil } -func (o *Output) EnableExtendedMode() { - o.extendedModeEnabled = true -} - func (o *Output) flushMetrics() { samples := o.GetBufferedSamples() for _, sc := range samples { @@ -74,7 +75,9 @@ func (o *Output) flushMetrics() { func (o *Output) flushSample(sample metrics.Sample) { // First, the sample data is stored into the metrics stored at the k6 metrics registry level. o.storeSample(sample) - if !o.extendedModeEnabled { + + skipGroupSamples := o.summaryMode == lib.SummaryModeCompact || o.summaryMode == lib.SummaryModeLegacy + if skipGroupSamples { return } From 27769d873391af3870a4165d38a3f6f18520b01b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Mon, 3 Feb 2025 11:57:43 +0100 Subject: [PATCH 29/42] Consistent naming: report => summary --- internal/cmd/run.go | 6 +- internal/js/runner.go | 2 +- internal/js/summary.go | 2 +- internal/js/summary_test.go | 6 +- .../lib/testutils/minirunner/minirunner.go | 4 +- lib/report.go | 184 ---------------- lib/runner.go | 4 +- lib/summary.go | 196 +++++++++++++++++- output/summary/{report.go => data.go} | 70 +++---- output/summary/summary.go | 22 +- 10 files changed, 244 insertions(+), 252 deletions(-) delete mode 100644 lib/report.go rename output/summary/{report.go => data.go} (82%) diff --git a/internal/cmd/run.go b/internal/cmd/run.go index 9783b9cd799..d46a3604d90 100644 --- a/internal/cmd/run.go +++ b/internal/cmd/run.go @@ -205,7 +205,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { defer func() { logger.Debug("Generating the end-of-test summary...") - testSummary := &lib.Summary{ + legacySummary := &lib.LegacySummary{ Metrics: metricsEngine.ObservedMetrics, RootGroup: testRunState.GroupSummary.Group(), TestRunDuration: executionState.GetCurrentTestRunDuration(), @@ -216,9 +216,9 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { }, } - report := summaryOutput.MetricsReport(testSummary, test.initRunner.GetOptions()) + summary := summaryOutput.Summary(executionState, test.initRunner.GetOptions()) - summaryResult, hsErr := test.initRunner.HandleSummary(globalCtx, testSummary, report) + summaryResult, hsErr := test.initRunner.HandleSummary(globalCtx, legacySummary, summary) if hsErr == nil { hsErr = handleSummaryResult(c.gs.FS, c.gs.Stdout, c.gs.Stderr, summaryResult) } diff --git a/internal/js/runner.go b/internal/js/runner.go index d414be6f781..ae371db822a 100644 --- a/internal/js/runner.go +++ b/internal/js/runner.go @@ -349,7 +349,7 @@ func (r *Runner) IsExecutable(name string) bool { } // HandleSummary calls the specified summary callback, if supplied. -func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary, report lib.Report) (map[string]io.Reader, error) { +func (r *Runner) HandleSummary(ctx context.Context, summary *lib.LegacySummary, report lib.Summary) (map[string]io.Reader, error) { out := make(chan metrics.SampleContainer, 100) defer close(out) diff --git a/internal/js/summary.go b/internal/js/summary.go index d7452244b0e..46138bb9b3b 100644 --- a/internal/js/summary.go +++ b/internal/js/summary.go @@ -55,7 +55,7 @@ func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Durat // summarizeMetricsToObject transforms the summary objects in a way that's // suitable to pass to the JS runtime or export to JSON. -func summarizeMetricsToObject(data *lib.Summary, options lib.Options, setupData []byte) map[string]interface{} { +func summarizeMetricsToObject(data *lib.LegacySummary, options lib.Options, setupData []byte) map[string]interface{} { m := make(map[string]interface{}) m["root_group"] = exportGroup(data.RootGroup) m["options"] = map[string]interface{}{ diff --git a/internal/js/summary_test.go b/internal/js/summary_test.go index 2a2da6c508f..1b6a4509cdb 100644 --- a/internal/js/summary_test.go +++ b/internal/js/summary_test.go @@ -102,7 +102,7 @@ func TestTextSummaryWithSubMetrics(t *testing.T) { subMetricPost.Name: subMetricPost.Metric, } - summary := &lib.Summary{ + summary := &lib.LegacySummary{ Metrics: metrics, RootGroup: &lib.Group{}, TestRunDuration: time.Second, @@ -205,9 +205,9 @@ func createTestMetrics(t *testing.T) (map[string]*metrics.Metric, *lib.Group) { return testMetrics, rootG } -func createTestSummary(t *testing.T) *lib.Summary { +func createTestSummary(t *testing.T) *lib.LegacySummary { metrics, rootG := createTestMetrics(t) - return &lib.Summary{ + return &lib.LegacySummary{ Metrics: metrics, RootGroup: rootG, TestRunDuration: time.Second, diff --git a/internal/lib/testutils/minirunner/minirunner.go b/internal/lib/testutils/minirunner/minirunner.go index a016e2e4379..8559c0160af 100644 --- a/internal/lib/testutils/minirunner/minirunner.go +++ b/internal/lib/testutils/minirunner/minirunner.go @@ -24,7 +24,7 @@ type MiniRunner struct { Fn func(ctx context.Context, state *lib.State, out chan<- metrics.SampleContainer) error SetupFn func(ctx context.Context, out chan<- metrics.SampleContainer) ([]byte, error) TeardownFn func(ctx context.Context, out chan<- metrics.SampleContainer) error - HandleSummaryFn func(context.Context, *lib.Summary) (map[string]io.Reader, error) + HandleSummaryFn func(context.Context, *lib.LegacySummary) (map[string]io.Reader, error) SetupData []byte @@ -108,7 +108,7 @@ func (r *MiniRunner) SetOptions(opts lib.Options) error { } // HandleSummary calls the specified summary callback, if supplied. -func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.Summary, _ lib.Report) (map[string]io.Reader, error) { +func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.LegacySummary, _ lib.Summary) (map[string]io.Reader, error) { if r.HandleSummaryFn != nil { return r.HandleSummaryFn(ctx, s) } diff --git a/lib/report.go b/lib/report.go deleted file mode 100644 index 049d1057be8..00000000000 --- a/lib/report.go +++ /dev/null @@ -1,184 +0,0 @@ -package lib - -import ( - "time" - - "go.k6.io/k6/metrics" -) - -type Report struct { - ReportThresholds `js:"thresholds"` - ReportGroup - Scenarios map[string]ReportGroup -} - -func NewReport() Report { - return Report{ - ReportThresholds: NewReportThresholds(), - ReportGroup: ReportGroup{ - Metrics: NewReportMetrics(), - Groups: make(map[string]ReportGroup), - }, - Scenarios: make(map[string]ReportGroup), - } -} - -type ReportMetricInfo struct { - Name string - Type string - Contains string -} - -type ReportMetric struct { - ReportMetricInfo - Values map[string]float64 -} - -func NewReportMetricFrom( - info ReportMetricInfo, sink metrics.Sink, - testDuration time.Duration, summaryTrendStats []string, -) ReportMetric { - // TODO: we obtain this from [options.SummaryTrendStats] which is a string slice - getMetricValues := metricValueGetter(summaryTrendStats) - - return ReportMetric{ - ReportMetricInfo: info, - Values: getMetricValues(sink, testDuration), - } -} - -type ReportMetrics struct { - // HTTP contains report data specific to HTTP metrics and is used - // to produce the summary HTTP subsection's content. - HTTP map[string]ReportMetric - // Execution contains report data specific to Execution metrics and is used - // to produce the summary Execution subsection's content. - Execution map[string]ReportMetric - // Network contains report data specific to Network metrics and is used - // to produce the summary Network subsection's content. - Network map[string]ReportMetric - - Browser map[string]ReportMetric - - WebVitals map[string]ReportMetric - - Grpc map[string]ReportMetric - - WebSocket map[string]ReportMetric `js:"websocket"` - - // Custom contains user-defined metric results as well as extensions metrics - Custom map[string]ReportMetric -} - -func NewReportMetrics() ReportMetrics { - return ReportMetrics{ - HTTP: make(map[string]ReportMetric), - Execution: make(map[string]ReportMetric), - Network: make(map[string]ReportMetric), - Browser: make(map[string]ReportMetric), - WebVitals: make(map[string]ReportMetric), - Grpc: make(map[string]ReportMetric), - WebSocket: make(map[string]ReportMetric), - Custom: make(map[string]ReportMetric), - } -} - -type ReportChecksMetrics struct { - Total ReportMetric `js:"checks_total"` - Success ReportMetric `js:"checks_succeeded"` - Fail ReportMetric `js:"checks_failed"` -} - -type ReportChecks struct { - Metrics ReportChecksMetrics - OrderedChecks []*Check -} - -func NewReportChecks() *ReportChecks { - initChecksMetricData := func(name string, t metrics.MetricType) ReportMetric { - return ReportMetric{ - ReportMetricInfo: ReportMetricInfo{ - Name: name, - Type: t.String(), - Contains: metrics.Default.String(), - }, - Values: make(map[string]float64), - } - } - - return &ReportChecks{ - Metrics: ReportChecksMetrics{ - Total: initChecksMetricData("checks_total", metrics.Counter), - Success: initChecksMetricData("checks_succeeded", metrics.Rate), - Fail: initChecksMetricData("checks_failed", metrics.Rate), - }, - } -} - -type ReportThreshold struct { - Source string `js:"source"` - Ok bool `js:"ok"` -} - -type MetricThresholds struct { - Metric ReportMetric `js:"metric"` - Thresholds []ReportThreshold `js:"thresholds"` -} - -type ReportThresholds map[string]MetricThresholds - -func NewReportThresholds() ReportThresholds { - thresholds := make(ReportThresholds) - return thresholds -} - -// FIXME (@oleiade): While writing JSDOC I found the name ambiguous, should we rename it? -type ReportGroup struct { - Checks *ReportChecks // Not always present, thus we use a pointer. - Metrics ReportMetrics - Groups map[string]ReportGroup -} - -func NewReportGroup() ReportGroup { - return ReportGroup{ - Metrics: NewReportMetrics(), - Groups: make(map[string]ReportGroup), - } -} - -func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Duration) map[string]float64 { - trendResolvers, err := metrics.GetResolversForTrendColumns(summaryTrendStats) - if err != nil { - panic(err.Error()) // this should have been validated already - } - - return func(sink metrics.Sink, t time.Duration) (result map[string]float64) { - switch sink := sink.(type) { - case *metrics.CounterSink: - result = sink.Format(t) - result["rate"] = calculateCounterRate(sink.Value, t) - case *metrics.GaugeSink: - result = sink.Format(t) - result["min"] = sink.Min - result["max"] = sink.Max - case *metrics.RateSink: - result = sink.Format(t) - result["passes"] = float64(sink.Trues) - result["fails"] = float64(sink.Total - sink.Trues) - case *metrics.TrendSink: - result = make(map[string]float64, len(summaryTrendStats)) - for _, col := range summaryTrendStats { - result[col] = trendResolvers[col](sink) - } - } - - return result - } -} - -func calculateCounterRate(count float64, duration time.Duration) float64 { - if duration == 0 { - return 0 - } - return count / (float64(duration) / float64(time.Second)) -} diff --git a/lib/runner.go b/lib/runner.go index 3e4b4a6850c..3bf7e033232 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -3,7 +3,7 @@ package lib import ( "context" "io" - + "go.k6.io/k6/metrics" ) @@ -80,7 +80,7 @@ type Runner interface { // function in the script. IsExecutable(string) bool - HandleSummary(context.Context, *Summary, Report) (map[string]io.Reader, error) + HandleSummary(context.Context, *LegacySummary, Summary) (map[string]io.Reader, error) } // UIState describes the state of the UI, which might influence what diff --git a/lib/summary.go b/lib/summary.go index d253ef2442f..39b67b5709d 100644 --- a/lib/summary.go +++ b/lib/summary.go @@ -3,19 +3,10 @@ package lib import ( "errors" "time" - + "go.k6.io/k6/metrics" ) -// Summary contains all of the data the summary handler gets. -type Summary struct { - Metrics map[string]*metrics.Metric - RootGroup *Group - TestRunDuration time.Duration // TODO: use lib.ExecutionState-based interface instead? - NoColor bool // TODO: drop this when noColor is part of the (runtime) options - UIState UIState -} - // A SummaryMode specifies the mode of the Summary, // which defines how the end-of-test summary will be rendered. type SummaryMode int @@ -98,3 +89,188 @@ func ValidateSummaryMode(val string) (sm SummaryMode, err error) { } return } + +type Summary struct { + SummaryThresholds `js:"thresholds"` + SummaryGroup + Scenarios map[string]SummaryGroup +} + +func NewSummary() Summary { + return Summary{ + SummaryThresholds: NewSummaryThresholds(), + SummaryGroup: SummaryGroup{ + Metrics: NewSummaryMetrics(), + Groups: make(map[string]SummaryGroup), + }, + Scenarios: make(map[string]SummaryGroup), + } +} + +type SummaryMetricInfo struct { + Name string + Type string + Contains string +} + +type SummaryMetric struct { + SummaryMetricInfo + Values map[string]float64 +} + +func NewSummaryMetricFrom( + info SummaryMetricInfo, sink metrics.Sink, + testDuration time.Duration, summaryTrendStats []string, +) SummaryMetric { + // TODO: we obtain this from [options.SummaryTrendStats] which is a string slice + getMetricValues := metricValueGetter(summaryTrendStats) + + return SummaryMetric{ + SummaryMetricInfo: info, + Values: getMetricValues(sink, testDuration), + } +} + +type SummaryMetrics struct { + // HTTP contains summary data specific to HTTP metrics and is used + // to produce the summary HTTP subsection's content. + HTTP map[string]SummaryMetric + // Execution contains summary data specific to Execution metrics and is used + // to produce the summary Execution subsection's content. + Execution map[string]SummaryMetric + // Network contains summary data specific to Network metrics and is used + // to produce the summary Network subsection's content. + Network map[string]SummaryMetric + + Browser map[string]SummaryMetric + + WebVitals map[string]SummaryMetric + + Grpc map[string]SummaryMetric + + WebSocket map[string]SummaryMetric `js:"websocket"` + + // Custom contains user-defined metric results as well as extensions metrics + Custom map[string]SummaryMetric +} + +func NewSummaryMetrics() SummaryMetrics { + return SummaryMetrics{ + HTTP: make(map[string]SummaryMetric), + Execution: make(map[string]SummaryMetric), + Network: make(map[string]SummaryMetric), + Browser: make(map[string]SummaryMetric), + WebVitals: make(map[string]SummaryMetric), + Grpc: make(map[string]SummaryMetric), + WebSocket: make(map[string]SummaryMetric), + Custom: make(map[string]SummaryMetric), + } +} + +type SummaryChecksMetrics struct { + Total SummaryMetric `js:"checks_total"` + Success SummaryMetric `js:"checks_succeeded"` + Fail SummaryMetric `js:"checks_failed"` +} + +type SummaryChecks struct { + Metrics SummaryChecksMetrics + OrderedChecks []*Check +} + +func NewSummaryChecks() *SummaryChecks { + initChecksMetricData := func(name string, t metrics.MetricType) SummaryMetric { + return SummaryMetric{ + SummaryMetricInfo: SummaryMetricInfo{ + Name: name, + Type: t.String(), + Contains: metrics.Default.String(), + }, + Values: make(map[string]float64), + } + } + + return &SummaryChecks{ + Metrics: SummaryChecksMetrics{ + Total: initChecksMetricData("checks_total", metrics.Counter), + Success: initChecksMetricData("checks_succeeded", metrics.Rate), + Fail: initChecksMetricData("checks_failed", metrics.Rate), + }, + } +} + +type SummaryThreshold struct { + Source string `js:"source"` + Ok bool `js:"ok"` +} + +type MetricThresholds struct { + Metric SummaryMetric `js:"metric"` + Thresholds []SummaryThreshold `js:"thresholds"` +} + +type SummaryThresholds map[string]MetricThresholds + +func NewSummaryThresholds() SummaryThresholds { + thresholds := make(SummaryThresholds) + return thresholds +} + +type SummaryGroup struct { + Checks *SummaryChecks // Not always present, thus we use a pointer. + Metrics SummaryMetrics + Groups map[string]SummaryGroup +} + +func NewSummaryGroup() SummaryGroup { + return SummaryGroup{ + Metrics: NewSummaryMetrics(), + Groups: make(map[string]SummaryGroup), + } +} + +func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Duration) map[string]float64 { + trendResolvers, err := metrics.GetResolversForTrendColumns(summaryTrendStats) + if err != nil { + panic(err.Error()) // this should have been validated already + } + + return func(sink metrics.Sink, t time.Duration) (result map[string]float64) { + switch sink := sink.(type) { + case *metrics.CounterSink: + result = sink.Format(t) + result["rate"] = calculateCounterRate(sink.Value, t) + case *metrics.GaugeSink: + result = sink.Format(t) + result["min"] = sink.Min + result["max"] = sink.Max + case *metrics.RateSink: + result = sink.Format(t) + result["passes"] = float64(sink.Trues) + result["fails"] = float64(sink.Total - sink.Trues) + case *metrics.TrendSink: + result = make(map[string]float64, len(summaryTrendStats)) + for _, col := range summaryTrendStats { + result[col] = trendResolvers[col](sink) + } + } + + return result + } +} + +func calculateCounterRate(count float64, duration time.Duration) float64 { + if duration == 0 { + return 0 + } + return count / (float64(duration) / float64(time.Second)) +} + +// LegacySummary contains all the data the summary handler gets. +type LegacySummary struct { + Metrics map[string]*metrics.Metric + RootGroup *Group + TestRunDuration time.Duration // TODO: use lib.ExecutionState-based interface instead? + NoColor bool // TODO: drop this when noColor is part of the (runtime) options + UIState UIState +} diff --git a/output/summary/report.go b/output/summary/data.go similarity index 82% rename from output/summary/report.go rename to output/summary/data.go index d2f9d80894a..3fb4350d0ce 100644 --- a/output/summary/report.go +++ b/output/summary/data.go @@ -88,7 +88,7 @@ func (a aggregatedGroupData) addSample(sample metrics.Sample) { type aggregatedMetricData map[string]aggregatedMetric // relayMetricFrom stores the metric and the metric sink from the sample. It makes the underlying metric of our -// report's aggregatedMetricData point directly to a metric in the k6 registry, and relies on that specific pointed +// summary's aggregatedMetricData point directly to a metric in the k6 registry, and relies on that specific pointed // at metrics internal state for its computations. func (a aggregatedMetricData) relayMetricFrom(sample metrics.Sample) { a[sample.Metric.Name] = aggregatedMetric{ @@ -154,45 +154,45 @@ func (a *aggregatedChecksData) checkFor(name string) *lib.Check { return check } -func populateReportGroup( - reportGroup *lib.ReportGroup, +func populateSummaryGroup( + summaryGroup *lib.SummaryGroup, groupData aggregatedGroupData, testRunDuration time.Duration, summaryTrendStats []string, ) { // First, we populate the checks metrics, which are treated independently. - populateReportChecks(reportGroup, groupData, testRunDuration, summaryTrendStats) + populateSummaryChecks(summaryGroup, groupData, testRunDuration, summaryTrendStats) // Then, we store the metrics. - storeMetric := func(dest lib.ReportMetrics, info lib.ReportMetricInfo, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string) { - reportMetric := lib.NewReportMetricFrom(info, sink, testDuration, summaryTrendStats) + storeMetric := func(dest lib.SummaryMetrics, info lib.SummaryMetricInfo, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string) { + summaryMetric := lib.NewSummaryMetricFrom(info, sink, testDuration, summaryTrendStats) switch { case isSkippedMetric(info.Name): // Do nothing, just skip. case isHTTPMetric(info.Name): - dest.HTTP[info.Name] = reportMetric + dest.HTTP[info.Name] = summaryMetric case isExecutionMetric(info.Name): - dest.Execution[info.Name] = reportMetric + dest.Execution[info.Name] = summaryMetric case isNetworkMetric(info.Name): - dest.Network[info.Name] = reportMetric + dest.Network[info.Name] = summaryMetric case isBrowserMetric(info.Name): - dest.Browser[info.Name] = reportMetric + dest.Browser[info.Name] = summaryMetric case isGrpcMetric(info.Name): - dest.Grpc[info.Name] = reportMetric + dest.Grpc[info.Name] = summaryMetric case isWebSocketsMetric(info.Name): - dest.WebSocket[info.Name] = reportMetric + dest.WebSocket[info.Name] = summaryMetric case isWebVitalsMetric(info.Name): - dest.WebVitals[info.Name] = reportMetric + dest.WebVitals[info.Name] = summaryMetric default: - dest.Custom[info.Name] = reportMetric + dest.Custom[info.Name] = summaryMetric } } for _, metricData := range groupData.aggregatedMetrics { storeMetric( - reportGroup.Metrics, - lib.ReportMetricInfo{ + summaryGroup.Metrics, + lib.SummaryMetricInfo{ Name: metricData.Metric.Name, Type: metricData.Metric.Type.String(), Contains: metricData.Metric.Contains.String(), @@ -205,17 +205,17 @@ func populateReportGroup( // Finally, we keep moving down the hierarchy and populate the nested groups. for groupName, subGroupData := range groupData.groupsData { - subReportGroup := lib.NewReportGroup() - populateReportGroup(&subReportGroup, subGroupData, testRunDuration, summaryTrendStats) - reportGroup.Groups[groupName] = subReportGroup + summarySubGroup := lib.NewSummaryGroup() + populateSummaryGroup(&summarySubGroup, subGroupData, testRunDuration, summaryTrendStats) + summaryGroup.Groups[groupName] = summarySubGroup } } -func reportThresholds( +func summaryThresholds( thresholds thresholds, testRunDuration time.Duration, summaryTrendStats []string, -) lib.ReportThresholds { +) lib.SummaryThresholds { rts := make(map[string]lib.MetricThresholds, len(thresholds)) for _, threshold := range thresholds { metric := threshold.Metric @@ -223,8 +223,8 @@ func reportThresholds( mt, exists := rts[metric.Name] if !exists { mt = lib.MetricThresholds{ - Metric: lib.NewReportMetricFrom( - lib.ReportMetricInfo{ + Metric: lib.NewSummaryMetricFrom( + lib.SummaryMetricInfo{ Name: metric.Name, Type: metric.Type.String(), Contains: metric.Contains.String(), @@ -236,7 +236,7 @@ func reportThresholds( } } - mt.Thresholds = append(rts[metric.Name].Thresholds, lib.ReportThreshold{ + mt.Thresholds = append(rts[metric.Name].Thresholds, lib.SummaryThreshold{ Source: threshold.Source, Ok: !threshold.LastFailed, }) @@ -247,8 +247,8 @@ func reportThresholds( // FIXME: This function is a bit flurry, we should consider refactoring it. // For instance, it would be possible to directly construct these metrics on-the-fly. -func populateReportChecks( - reportGroup *lib.ReportGroup, +func populateSummaryChecks( + summaryGroup *lib.SummaryGroup, groupData aggregatedGroupData, testRunDuration time.Duration, summaryTrendStats []string, @@ -258,16 +258,16 @@ func populateReportChecks( return } - reportGroup.Checks = lib.NewReportChecks() + summaryGroup.Checks = lib.NewSummaryChecks() totalChecks := float64(checksMetric.Sink.(*metrics.RateSink).Total) successChecks := float64(checksMetric.Sink.(*metrics.RateSink).Trues) - reportGroup.Checks.Metrics.Total.Values["count"] = totalChecks - reportGroup.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, testRunDuration) + summaryGroup.Checks.Metrics.Total.Values["count"] = totalChecks + summaryGroup.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, testRunDuration) - reportGroup.Checks.Metrics.Success = lib.NewReportMetricFrom( - lib.ReportMetricInfo{ + summaryGroup.Checks.Metrics.Success = lib.NewSummaryMetricFrom( + lib.SummaryMetricInfo{ Name: "checks_succeeded", Type: checksMetric.Metric.Type.String(), Contains: checksMetric.Metric.Contains.String(), @@ -277,11 +277,11 @@ func populateReportChecks( summaryTrendStats, ) - reportGroup.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks - reportGroup.Checks.Metrics.Fail.Values["fails"] = successChecks - reportGroup.Checks.Metrics.Fail.Values["rate"] = (totalChecks - successChecks) / totalChecks + summaryGroup.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks + summaryGroup.Checks.Metrics.Fail.Values["fails"] = successChecks + summaryGroup.Checks.Metrics.Fail.Values["rate"] = (totalChecks - successChecks) / totalChecks - reportGroup.Checks.OrderedChecks = groupData.checks.orderedChecks + summaryGroup.Checks.OrderedChecks = groupData.checks.orderedChecks } func isHTTPMetric(metricName string) bool { diff --git a/output/summary/summary.go b/output/summary/summary.go index 8e9fdd2c59f..b782cc38b5f 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -104,18 +104,18 @@ func (o *Output) flushSample(sample metrics.Sample) { } } -func (o *Output) MetricsReport(summary *lib.Summary, options lib.Options) lib.Report { - report := lib.NewReport() +func (o *Output) Summary(executionState *lib.ExecutionState, options lib.Options) lib.Summary { + summary := lib.NewSummary() - testRunDuration := summary.TestRunDuration + testRunDuration := executionState.GetCurrentTestRunDuration() summaryTrendStats := options.SummaryTrendStats // Populate the thresholds. - report.ReportThresholds = reportThresholds(o.dataModel.thresholds, testRunDuration, summaryTrendStats) + summary.SummaryThresholds = summaryThresholds(o.dataModel.thresholds, testRunDuration, summaryTrendStats) // Populate root group and nested groups recursively. - populateReportGroup( - &report.ReportGroup, + populateSummaryGroup( + &summary.SummaryGroup, o.dataModel.aggregatedGroupData, testRunDuration, summaryTrendStats, @@ -123,17 +123,17 @@ func (o *Output) MetricsReport(summary *lib.Summary, options lib.Options) lib.Re // Populate scenario groups and nested groups recursively. for scenarioName, scenarioData := range o.dataModel.scenarios { - scenarioReportGroup := lib.NewReportGroup() - populateReportGroup( - &scenarioReportGroup, + scenarioSummaryGroup := lib.NewSummaryGroup() + populateSummaryGroup( + &scenarioSummaryGroup, scenarioData, testRunDuration, summaryTrendStats, ) - report.Scenarios[scenarioName] = scenarioReportGroup + summary.Scenarios[scenarioName] = scenarioSummaryGroup } - return report + return summary } // storeSample relays the sample to the k6 metrics registry relevant metric. From ce4b92f4d2a9bd559e3889cb3ba41a55e11db592 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Tue, 4 Feb 2025 11:01:32 +0100 Subject: [PATCH 30/42] Keep support for the 'legacy' summary --- internal/cmd/run.go | 86 ++-- internal/js/runner.go | 34 +- internal/js/summary-legacy.js | 426 ++++++++++++++++++ internal/js/summary.go | 5 + .../lib/testutils/minirunner/minirunner.go | 2 +- lib/runner.go | 2 +- lib/summary.go | 8 +- output/summary/summary.go | 2 +- 8 files changed, 524 insertions(+), 41 deletions(-) create mode 100644 internal/js/summary-legacy.js diff --git a/internal/cmd/run.go b/internal/cmd/run.go index d46a3604d90..ade32549df2 100644 --- a/internal/cmd/run.go +++ b/internal/cmd/run.go @@ -191,41 +191,69 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { executionState := execScheduler.GetState() if !testRunState.RuntimeOptions.NoSummary.Bool { - // Instantiates the summary output - summaryOutput, err := summary.New(output.Params{ - RuntimeOptions: testRunState.RuntimeOptions, - Logger: c.gs.Logger, - }) + sm, err := lib.ValidateSummaryMode(testRunState.RuntimeOptions.SummaryMode.String) if err != nil { - logger.WithError(err).Error("failed to initialize the end-of-test summary output") + logger.WithError(err).Error("invalid summary mode, falling back to \"compact\" (default)") } - outputs = append(outputs, summaryOutput) - // At the end of the test run - defer func() { - logger.Debug("Generating the end-of-test summary...") - - legacySummary := &lib.LegacySummary{ - Metrics: metricsEngine.ObservedMetrics, - RootGroup: testRunState.GroupSummary.Group(), - TestRunDuration: executionState.GetCurrentTestRunDuration(), - NoColor: c.gs.Flags.NoColor, - UIState: lib.UIState{ - IsStdOutTTY: c.gs.Stdout.IsTTY, - IsStdErrTTY: c.gs.Stderr.IsTTY, - }, + switch sm { + // TODO: Remove this code block once we stop supporting the legacy summary, and just leave the default. + case lib.SummaryModeLegacy: + // At the end of the test run + defer func() { + logger.Debug("Generating the end-of-test summary...") + + legacySummary := &lib.LegacySummary{ + Metrics: metricsEngine.ObservedMetrics, + RootGroup: testRunState.GroupSummary.Group(), + TestRunDuration: executionState.GetCurrentTestRunDuration(), + NoColor: c.gs.Flags.NoColor, + UIState: lib.UIState{ + IsStdOutTTY: c.gs.Stdout.IsTTY, + IsStdErrTTY: c.gs.Stderr.IsTTY, + }, + } + + summaryResult, hsErr := test.initRunner.HandleSummary(globalCtx, legacySummary, nil) + if hsErr == nil { + hsErr = handleSummaryResult(c.gs.FS, c.gs.Stdout, c.gs.Stderr, summaryResult) + } + if hsErr != nil { + logger.WithError(hsErr).Error("failed to handle the end-of-test summary") + } + }() + default: + // Instantiates the summary output + summaryOutput, err := summary.New(output.Params{ + RuntimeOptions: testRunState.RuntimeOptions, + Logger: c.gs.Logger, + }) + if err != nil { + logger.WithError(err).Error("failed to initialize the end-of-test summary output") } + outputs = append(outputs, summaryOutput) - summary := summaryOutput.Summary(executionState, test.initRunner.GetOptions()) + // At the end of the test run + defer func() { + logger.Debug("Generating the end-of-test summary...") - summaryResult, hsErr := test.initRunner.HandleSummary(globalCtx, legacySummary, summary) - if hsErr == nil { - hsErr = handleSummaryResult(c.gs.FS, c.gs.Stdout, c.gs.Stderr, summaryResult) - } - if hsErr != nil { - logger.WithError(hsErr).Error("failed to handle the end-of-test summary") - } - }() + summary := summaryOutput.Summary(executionState, test.initRunner.GetOptions()) + summary.TestRunDuration = executionState.GetCurrentTestRunDuration() + summary.NoColor = c.gs.Flags.NoColor + summary.UIState = lib.UIState{ + IsStdOutTTY: c.gs.Stdout.IsTTY, + IsStdErrTTY: c.gs.Stderr.IsTTY, + } + + summaryResult, hsErr := test.initRunner.HandleSummary(globalCtx, nil, summary) + if hsErr == nil { + hsErr = handleSummaryResult(c.gs.FS, c.gs.Stdout, c.gs.Stderr, summaryResult) + } + if hsErr != nil { + logger.WithError(hsErr).Error("failed to handle the end-of-test summary") + } + }() + } } waitInitDone := emitEvent(&event.Event{Type: event.Init}) diff --git a/internal/js/runner.go b/internal/js/runner.go index ae371db822a..76687bff475 100644 --- a/internal/js/runner.go +++ b/internal/js/runner.go @@ -349,7 +349,7 @@ func (r *Runner) IsExecutable(name string) bool { } // HandleSummary calls the specified summary callback, if supplied. -func (r *Runner) HandleSummary(ctx context.Context, summary *lib.LegacySummary, report lib.Summary) (map[string]io.Reader, error) { +func (r *Runner) HandleSummary(ctx context.Context, legacy *lib.LegacySummary, summary *lib.Summary) (map[string]io.Reader, error) { out := make(chan metrics.SampleContainer, 100) defer close(out) @@ -371,6 +371,28 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.LegacySummary, }) vu.moduleVUImpl.ctx = summaryCtx + var ( + noColor bool + enableColors bool + summaryDataForJS interface{} + summaryCode string + ) + + // TODO: Remove this code block once we stop supporting the legacy summary. + if legacy != nil { + noColor = legacy.NoColor + enableColors = !legacy.NoColor && legacy.UIState.IsStdOutTTY + summaryDataForJS = summarizeMetricsToObject(legacy, r.Bundle.Options, r.setupData) + summaryCode = jslibSummaryLegacyCode + } + + if summary != nil { + noColor = summary.NoColor + enableColors = !summary.NoColor && summary.UIState.IsStdOutTTY + summaryDataForJS = summary + summaryCode = jslibSummaryCode + } + callbackResult := sobek.Undefined() fn := vu.getExported(consts.HandleSummaryFn) // TODO: rename to UserDefinedHandleSummaryFn? if fn != nil { @@ -379,8 +401,6 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.LegacySummary, return nil, fmt.Errorf("exported identifier %s must be a function", consts.HandleSummaryFn) } - // TODO: Do we want to keep it compatible with the old format? Or do we want to break it? - summaryDataForJS := summarizeMetricsToObject(summary, r.Bundle.Options, r.setupData) callbackResult, _, _, err = vu.runFn(summaryCtx, false, handleSummaryFn, nil, vu.Runtime.ToValue(summaryDataForJS)) if err != nil { errText, fields := errext.Format(err) @@ -388,7 +408,7 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.LegacySummary, } } - wrapper := strings.Replace(summaryWrapperLambdaCode, "/*JSLIB_SUMMARY_CODE*/", jslibSummaryCode, 1) + wrapper := strings.Replace(summaryWrapperLambdaCode, "/*JSLIB_SUMMARY_CODE*/", summaryCode, 1) handleSummaryWrapperRaw, err := vu.Runtime.RunString(wrapper) if err != nil { return nil, fmt.Errorf("unexpected error while getting the summary wrapper: %w", err) @@ -402,14 +422,14 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.LegacySummary, // TODO: improve when we can easily export all option values, including defaults? "summaryTrendStats": r.Bundle.Options.SummaryTrendStats, "summaryTimeUnit": r.Bundle.Options.SummaryTimeUnit.String, - "noColor": summary.NoColor, // TODO: move to the (runtime) options - "enableColors": !summary.NoColor && summary.UIState.IsStdOutTTY, + "noColor": noColor, // TODO: move to the (runtime) options + "enableColors": enableColors, } wrapperArgs := []sobek.Value{ callbackResult, vu.Runtime.ToValue(r.Bundle.preInitState.RuntimeOptions.SummaryExport.String), - vu.Runtime.ToValue(report), + vu.Runtime.ToValue(summaryDataForJS), vu.Runtime.ToValue(options), } rawResult, _, _, err := vu.runFn(summaryCtx, false, handleSummaryWrapper, nil, wrapperArgs...) diff --git a/internal/js/summary-legacy.js b/internal/js/summary-legacy.js new file mode 100644 index 00000000000..ada659d08e9 --- /dev/null +++ b/internal/js/summary-legacy.js @@ -0,0 +1,426 @@ +/** + * NOTE: This file is a legacy version of the summary generation code, and is kept around for + * backwards compatibility, until we decide to remove the support for the old summary format. + * + * This file contains code used to generate a textual summary of tests results, as displayed + * in the user's terminal at the end of a k6 test run, also known as "end of test summary". + * + * The main entry point is the `generateTextSummary` function, which takes the test data, + * and returns a formatted string summarizing the test results, ready to be written to the terminal. + * + * For convenience, the file also exports the `humanizeValue` function. + */ +exports.humanizeValue = humanizeValue +exports.textSummary = generateTextSummary + +var forEach = function (obj, callback) { + for (var key in obj) { + if (obj.hasOwnProperty(key)) { + if (callback(key, obj[key])) { + break + } + } + } +} + +var palette = { + bold: 1, + faint: 2, + red: 31, + green: 32, + cyan: 36, + //TODO: add others? +} + +var groupPrefix = '█' +var detailsPrefix = '↳' +var succMark = '✓' +var failMark = '✗' +var defaultOptions = { + indent: ' ', + enableColors: true, + summaryTimeUnit: null, + summaryTrendStats: null, +} + +// strWidth tries to return the actual width the string will take up on the +// screen, without any terminal formatting, unicode ligatures, etc. +function strWidth(s) { + // TODO: determine if NFC or NFKD are not more appropriate? or just give up? https://hsivonen.fi/string-length/ + var data = s.normalize('NFKC') // This used to be NFKD in Go, but this should be better + var inEscSeq = false + var inLongEscSeq = false + var width = 0 + for (var char of data) { + if (char.done) { + break + } + + // Skip over ANSI escape codes. + if (char == '\x1b') { + inEscSeq = true + continue + } + if (inEscSeq && char == '[') { + inLongEscSeq = true + continue + } + if (inEscSeq && inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x7e) { + inEscSeq = false + inLongEscSeq = false + continue + } + if (inEscSeq && !inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x5f) { + inEscSeq = false + continue + } + + if (!inEscSeq && !inLongEscSeq) { + width++ + } + } + return width +} + +function summarizeCheck(indent, check, decorate) { + if (check.fails == 0) { + return decorate(indent + succMark + ' ' + check.name, palette.green) + } + + var succPercent = Math.floor((100 * check.passes) / (check.passes + check.fails)) + return decorate( + indent + + failMark + + ' ' + + check.name + + '\n' + + indent + + ' ' + + detailsPrefix + + ' ' + + succPercent + + '% — ' + + succMark + + ' ' + + check.passes + + ' / ' + + failMark + + ' ' + + check.fails, + palette.red + ) +} + +function summarizeGroup(indent, group, decorate) { + var result = [] + if (group.name != '') { + result.push(indent + groupPrefix + ' ' + group.name + '\n') + indent = indent + ' ' + } + + for (var i = 0; i < group.checks.length; i++) { + result.push(summarizeCheck(indent, group.checks[i], decorate)) + } + if (group.checks.length > 0) { + result.push('') + } + for (var i = 0; i < group.groups.length; i++) { + Array.prototype.push.apply(result, summarizeGroup(indent, group.groups[i], decorate)) + } + + return result +} + +function displayNameForMetric(name) { + var subMetricPos = name.indexOf('{') + if (subMetricPos >= 0) { + return '{ ' + name.substring(subMetricPos + 1, name.length - 1) + ' }' + } + return name +} + +function indentForMetric(name) { + if (name.indexOf('{') >= 0) { + return ' ' + } + return '' +} + +function humanizeBytes(bytes) { + var units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] + var base = 1000 + if (bytes < 10) { + return bytes + ' B' + } + + var e = Math.floor(Math.log(bytes) / Math.log(base)) + var suffix = units[e | 0] + var val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10 + return val.toFixed(val < 10 ? 1 : 0) + ' ' + suffix +} + +var unitMap = { + s: { unit: 's', coef: 0.001 }, + ms: { unit: 'ms', coef: 1 }, + us: { unit: 'µs', coef: 1000 }, +} + +function toFixedNoTrailingZeros(val, prec) { + // TODO: figure out something better? + return parseFloat(val.toFixed(prec)).toString() +} + +function toFixedNoTrailingZerosTrunc(val, prec) { + var mult = Math.pow(10, prec) + return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec) +} + +function humanizeGenericDuration(dur) { + if (dur === 0) { + return '0s' + } + + if (dur < 0.001) { + // smaller than a microsecond, print nanoseconds + return Math.trunc(dur * 1000000) + 'ns' + } + if (dur < 1) { + // smaller than a millisecond, print microseconds + return toFixedNoTrailingZerosTrunc(dur * 1000, 2) + 'µs' + } + if (dur < 1000) { + // duration is smaller than a second + return toFixedNoTrailingZerosTrunc(dur, 2) + 'ms' + } + + var result = toFixedNoTrailingZerosTrunc((dur % 60000) / 1000, dur > 60000 ? 0 : 2) + 's' + var rem = Math.trunc(dur / 60000) + if (rem < 1) { + // less than a minute + return result + } + result = (rem % 60) + 'm' + result + rem = Math.trunc(rem / 60) + if (rem < 1) { + // less than an hour + return result + } + return rem + 'h' + result +} + +function humanizeDuration(dur, timeUnit) { + if (timeUnit !== '' && unitMap.hasOwnProperty(timeUnit)) { + return (dur * unitMap[timeUnit].coef).toFixed(2) + unitMap[timeUnit].unit + } + + return humanizeGenericDuration(dur) +} + +function humanizeValue(val, metric, timeUnit) { + if (metric.type == 'rate') { + // Truncate instead of round when decreasing precision to 2 decimal places + return (Math.trunc(val * 100 * 100) / 100).toFixed(2) + '%' + } + + switch (metric.contains) { + case 'data': + return humanizeBytes(val) + case 'time': + return humanizeDuration(val, timeUnit) + default: + return toFixedNoTrailingZeros(val, 6) + } +} + +function nonTrendMetricValueForSum(metric, timeUnit) { + switch (metric.type) { + case 'counter': + return [ + humanizeValue(metric.values.count, metric, timeUnit), + humanizeValue(metric.values.rate, metric, timeUnit) + '/s', + ] + case 'gauge': + return [ + humanizeValue(metric.values.value, metric, timeUnit), + 'min=' + humanizeValue(metric.values.min, metric, timeUnit), + 'max=' + humanizeValue(metric.values.max, metric, timeUnit), + ] + case 'rate': + return [ + humanizeValue(metric.values.rate, metric, timeUnit), + `${metric.values.passes} out of ${metric.values.passes + metric.values.fails}`, + ] + default: + return ['[no data]'] + } +} + +function summarizeMetrics(options, data, decorate) { + var indent = options.indent + ' ' + var result = [] + + var names = [] + var nameLenMax = 0 + + var nonTrendValues = {} + var nonTrendValueMaxLen = 0 + var nonTrendExtras = {} + var nonTrendExtraMaxLens = [0, 0] + + var trendCols = {} + var numTrendColumns = options.summaryTrendStats.length + var trendColMaxLens = new Array(numTrendColumns).fill(0) + forEach(data.metrics, function (name, metric) { + names.push(name) + // When calculating widths for metrics, account for the indentation on submetrics. + var displayName = indentForMetric(name) + displayNameForMetric(name) + var displayNameWidth = strWidth(displayName) + if (displayNameWidth > nameLenMax) { + nameLenMax = displayNameWidth + } + + if (metric.type == 'trend') { + var cols = [] + for (var i = 0; i < numTrendColumns; i++) { + var tc = options.summaryTrendStats[i] + var value = metric.values[tc] + if (tc === 'count') { + value = value.toString() + } else { + value = humanizeValue(value, metric, options.summaryTimeUnit) + } + var valLen = strWidth(value) + if (valLen > trendColMaxLens[i]) { + trendColMaxLens[i] = valLen + } + cols[i] = value + } + trendCols[name] = cols + return + } + var values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) + nonTrendValues[name] = values[0] + var valueLen = strWidth(values[0]) + if (valueLen > nonTrendValueMaxLen) { + nonTrendValueMaxLen = valueLen + } + nonTrendExtras[name] = values.slice(1) + for (var i = 1; i < values.length; i++) { + var extraLen = strWidth(values[i]) + if (extraLen > nonTrendExtraMaxLens[i - 1]) { + nonTrendExtraMaxLens[i - 1] = extraLen + } + } + }) + + // sort all metrics but keep sub metrics grouped with their parent metrics + names.sort(function (metric1, metric2) { + var parent1 = metric1.split('{', 1)[0] + var parent2 = metric2.split('{', 1)[0] + var result = parent1.localeCompare(parent2) + if (result !== 0) { + return result + } + var sub1 = metric1.substring(parent1.length) + var sub2 = metric2.substring(parent2.length) + return sub1.localeCompare(sub2) + }) + + var getData = function (name) { + if (trendCols.hasOwnProperty(name)) { + var cols = trendCols[name] + var tmpCols = new Array(numTrendColumns) + for (var i = 0; i < cols.length; i++) { + tmpCols[i] = + options.summaryTrendStats[i] + + '=' + + decorate(cols[i], palette.cyan) + + ' '.repeat(trendColMaxLens[i] - strWidth(cols[i])) + } + return tmpCols.join(' ') + } + + var value = nonTrendValues[name] + var fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) + + var extras = nonTrendExtras[name] + if (extras.length == 1) { + fmtData = fmtData + ' ' + decorate(extras[0], palette.cyan, palette.faint) + } else if (extras.length > 1) { + var parts = new Array(extras.length) + for (var i = 0; i < extras.length; i++) { + parts[i] = + decorate(extras[i], palette.cyan, palette.faint) + + ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(extras[i])) + } + fmtData = fmtData + ' ' + parts.join(' ') + } + + return fmtData + } + + for (var name of names) { + var metric = data.metrics[name] + var mark = ' ' + var markColor = function (text) { + return text + } // noop + + if (metric.thresholds) { + mark = succMark + markColor = function (text) { + return decorate(text, palette.green) + } + forEach(metric.thresholds, function (name, threshold) { + if (!threshold.ok) { + mark = failMark + markColor = function (text) { + return decorate(text, palette.red) + } + return true // break + } + }) + } + var fmtIndent = indentForMetric(name) + var fmtName = displayNameForMetric(name) + fmtName = + fmtName + + decorate( + '.'.repeat(nameLenMax - strWidth(fmtName) - strWidth(fmtIndent) + 3) + ':', + palette.faint + ) + + result.push(indent + fmtIndent + markColor(mark) + ' ' + fmtName + ' ' + getData(name)) + } + + return result +} + +function generateTextSummary(data, options) { + var mergedOpts = Object.assign({}, defaultOptions, data.options, options) + var lines = [] + + // TODO: move all of these functions into an object with methods? + var decorate = function (text) { + return text + } + if (mergedOpts.enableColors) { + decorate = function (text, color /*, ...rest*/) { + var result = '\x1b[' + color + for (var i = 2; i < arguments.length; i++) { + result += ';' + arguments[i] + } + return result + 'm' + text + '\x1b[0m' + } + } + + Array.prototype.push.apply( + lines, + summarizeGroup(mergedOpts.indent + ' ', data.root_group, decorate) + ) + + Array.prototype.push.apply(lines, summarizeMetrics(mergedOpts, data, decorate)) + + return lines.join('\n') +} + diff --git a/internal/js/summary.go b/internal/js/summary.go index 46138bb9b3b..1047eb83d7f 100644 --- a/internal/js/summary.go +++ b/internal/js/summary.go @@ -18,6 +18,11 @@ import ( //go:embed summary.js var jslibSummaryCode string +// TODO: Remove me once we stop supporting the legacy summary. +// +//go:embed summary-legacy.js +var jslibSummaryLegacyCode string + //go:embed summary-wrapper.js var summaryWrapperLambdaCode string diff --git a/internal/lib/testutils/minirunner/minirunner.go b/internal/lib/testutils/minirunner/minirunner.go index 8559c0160af..b99e1e3bb44 100644 --- a/internal/lib/testutils/minirunner/minirunner.go +++ b/internal/lib/testutils/minirunner/minirunner.go @@ -108,7 +108,7 @@ func (r *MiniRunner) SetOptions(opts lib.Options) error { } // HandleSummary calls the specified summary callback, if supplied. -func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.LegacySummary, _ lib.Summary) (map[string]io.Reader, error) { +func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.LegacySummary, _ *lib.Summary) (map[string]io.Reader, error) { if r.HandleSummaryFn != nil { return r.HandleSummaryFn(ctx, s) } diff --git a/lib/runner.go b/lib/runner.go index 3bf7e033232..9afdff91116 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -80,7 +80,7 @@ type Runner interface { // function in the script. IsExecutable(string) bool - HandleSummary(context.Context, *LegacySummary, Summary) (map[string]io.Reader, error) + HandleSummary(context.Context, *LegacySummary, *Summary) (map[string]io.Reader, error) } // UIState describes the state of the UI, which might influence what diff --git a/lib/summary.go b/lib/summary.go index 39b67b5709d..1857ff40b13 100644 --- a/lib/summary.go +++ b/lib/summary.go @@ -94,10 +94,14 @@ type Summary struct { SummaryThresholds `js:"thresholds"` SummaryGroup Scenarios map[string]SummaryGroup + + TestRunDuration time.Duration // TODO: use lib.ExecutionState-based interface instead? + NoColor bool // TODO: drop this when noColor is part of the (runtime) options + UIState UIState } -func NewSummary() Summary { - return Summary{ +func NewSummary() *Summary { + return &Summary{ SummaryThresholds: NewSummaryThresholds(), SummaryGroup: SummaryGroup{ Metrics: NewSummaryMetrics(), diff --git a/output/summary/summary.go b/output/summary/summary.go index b782cc38b5f..266b379f86e 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -104,7 +104,7 @@ func (o *Output) flushSample(sample metrics.Sample) { } } -func (o *Output) Summary(executionState *lib.ExecutionState, options lib.Options) lib.Summary { +func (o *Output) Summary(executionState *lib.ExecutionState, options lib.Options) *lib.Summary { summary := lib.NewSummary() testRunDuration := executionState.GetCurrentTestRunDuration() From cd633bc2a937311badaf9e2f35628db81eb26705 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Wed, 5 Feb 2025 10:15:14 +0100 Subject: [PATCH 31/42] (Try to) fix some tests --- internal/cmd/outputs_test.go | 1 + internal/cmd/run_test.go | 2 +- internal/cmd/runtime_options_test.go | 47 +++++++++++++++++++ internal/cmd/tests/cmd_run_grpc_test.go | 2 +- internal/cmd/tests/cmd_run_test.go | 7 +-- .../js/modules/k6/webcrypto/cmd_run_test.go | 2 +- internal/js/summary-wrapper.js | 4 +- internal/js/summary.js | 2 +- internal/js/summary_test.go | 34 +++++++------- 9 files changed, 75 insertions(+), 26 deletions(-) diff --git a/internal/cmd/outputs_test.go b/internal/cmd/outputs_test.go index 0a7d916d308..02c38759327 100644 --- a/internal/cmd/outputs_test.go +++ b/internal/cmd/outputs_test.go @@ -11,6 +11,7 @@ func TestBuiltinOutputString(t *testing.T) { exp := []string{ "cloud", "csv", "datadog", "experimental-prometheus-rw", "influxdb", "json", "kafka", "statsd", "experimental-opentelemetry", + "summary", } assert.Equal(t, exp, builtinOutputStrings()) } diff --git a/internal/cmd/run_test.go b/internal/cmd/run_test.go index 93e7a902ed2..fbfed6e8276 100644 --- a/internal/cmd/run_test.go +++ b/internal/cmd/run_test.go @@ -354,7 +354,7 @@ func TestThresholdsRuntimeBehavior(t *testing.T) { ts := tests.NewGlobalTestState(t) require.NoError(t, fsext.WriteFile(ts.FS, filepath.Join(ts.Cwd, tc.testFilename), testScript, 0o644)) - ts.CmdArgs = []string{"k6", "run", tc.testFilename} + ts.CmdArgs = []string{"k6", "run", tc.testFilename, "--with-summary", "legacy"} ts.ExpectedExitCode = int(tc.expExitCode) newRootCommand(ts.GlobalState).execute() diff --git a/internal/cmd/runtime_options_test.go b/internal/cmd/runtime_options_test.go index fe9b21eb416..6f5c9dd97a2 100644 --- a/internal/cmd/runtime_options_test.go +++ b/internal/cmd/runtime_options_test.go @@ -114,6 +114,7 @@ func TestRuntimeOptions(t *testing.T) { extendedCompatMode = null.NewString("extended", true) enhancedCompatMode = null.NewString("experimental_enhanced", true) defaultTracesOutput = null.NewString("none", false) + defaultSummaryMode = null.NewString("compact", false) ) runtimeOptionsTestCases := map[string]runtimeOptionsTestCase{ @@ -125,6 +126,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: nil, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by default": { @@ -135,6 +137,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by default with ext compat mode": { @@ -145,6 +148,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: extendedCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by default with experimental_enhanced compat mode": { @@ -155,6 +159,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: enhancedCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by cli 1": { @@ -166,6 +171,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: baseCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by cli 2": { @@ -177,6 +183,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: baseCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by env": { @@ -187,6 +194,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: extendedCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "enabled sys env by env": { @@ -197,6 +205,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: extendedCompatMode, Env: map[string]string{"K6_INCLUDE_SYSTEM_ENV_VARS": "true", "K6_COMPATIBILITY_MODE": "extended"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "enabled sys env by default": { @@ -208,6 +217,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "enabled sys env by cli 1": { @@ -219,6 +229,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "enabled sys env by cli 2": { @@ -230,6 +241,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "run only system env": { @@ -241,6 +253,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "mixed system and cli env": { @@ -252,6 +265,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1", "test2": "", "test3": "val3", "test4": "", "test5": ""}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "mixed system and cli env 2": { @@ -263,6 +277,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1", "test2": "", "test3": "val3", "test4": "", "test5": ""}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled system env with cli params": { @@ -274,6 +289,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test2": "val2"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "overwriting system env with cli param": { @@ -285,6 +301,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1cli"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "error wrong compat mode env var value": { @@ -327,6 +344,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "value 1", "test2": "value 2"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "valid env vars with special chars": { @@ -338,6 +356,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "value 1", "test2": "value,2", "test3": ` , ,,, value, ,, 2!'@#,"`}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "summary and thresholds from env": { @@ -351,6 +370,7 @@ func TestRuntimeOptions(t *testing.T) { NoSummary: null.NewBool(false, true), SummaryExport: null.NewString("foo", true), TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "summary and thresholds from env overwritten by CLI": { @@ -365,6 +385,7 @@ func TestRuntimeOptions(t *testing.T) { NoSummary: null.NewBool(true, true), SummaryExport: null.NewString("bar", true), TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "env var error detected even when CLI flags overwrite 1": { @@ -386,6 +407,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{}, TracesOutput: null.NewString("none", false), + SummaryMode: defaultSummaryMode, }, }, "traces output from env": { @@ -396,6 +418,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{}, TracesOutput: null.NewString("foo", true), + SummaryMode: defaultSummaryMode, }, }, "traces output from env overwritten by CLI": { @@ -407,6 +430,30 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{}, TracesOutput: null.NewString("bar", true), + SummaryMode: defaultSummaryMode, + }, + }, + "summary mode from env": { + useSysEnv: false, + systemEnv: map[string]string{"K6_WITH_SUMMARY": "full"}, + expRTOpts: lib.RuntimeOptions{ + IncludeSystemEnvVars: null.NewBool(false, false), + CompatibilityMode: defaultCompatMode, + Env: map[string]string{}, + TracesOutput: defaultTracesOutput, + SummaryMode: null.NewString("full", true), + }, + }, + "summary mode from env overwritten by CLI": { + useSysEnv: false, + systemEnv: map[string]string{"K6_WITH_SUMMARY": "full"}, + cliFlags: []string{"--with-summary", "legacy"}, + expRTOpts: lib.RuntimeOptions{ + IncludeSystemEnvVars: null.NewBool(false, false), + CompatibilityMode: defaultCompatMode, + Env: map[string]string{}, + TracesOutput: defaultTracesOutput, + SummaryMode: null.NewString("legacy", true), }, }, } diff --git a/internal/cmd/tests/cmd_run_grpc_test.go b/internal/cmd/tests/cmd_run_grpc_test.go index a7399db4428..e6edb90c913 100644 --- a/internal/cmd/tests/cmd_run_grpc_test.go +++ b/internal/cmd/tests/cmd_run_grpc_test.go @@ -105,7 +105,7 @@ func TestGRPCInputOutput(t *testing.T) { script, err := os.ReadFile(test.script) //nolint:forbidigo require.NoError(t, err) - ts := getSingleFileTestState(t, string(script), []string{"-v", "--log-output=stdout", "--no-usage-report"}, 0) + ts := getSingleFileTestState(t, string(script), []string{"-v", "--log-output=stdout", "--no-usage-report", "--with-summary", "legacy"}, 0) ts.Env = map[string]string{ "GRPC_ADDR": tb.Addr, "GRPC_PROTO_PATH": "./proto.proto", diff --git a/internal/cmd/tests/cmd_run_test.go b/internal/cmd/tests/cmd_run_test.go index b306a98e594..224890e5c8e 100644 --- a/internal/cmd/tests/cmd_run_test.go +++ b/internal/cmd/tests/cmd_run_test.go @@ -66,7 +66,7 @@ func TestSimpleTestStdin(t *testing.T) { t.Parallel() ts := NewGlobalTestState(t) - ts.CmdArgs = []string{"k6", "run", "-"} + ts.CmdArgs = []string{"k6", "run", "-", "--with-summary", "legacy"} ts.Stdin = bytes.NewBufferString(`export default function() {};`) cmd.ExecuteWithGlobalState(ts.GlobalState) @@ -241,6 +241,7 @@ func getSingleFileTestState(tb testing.TB, script string, cliFlags []string, exp if cliFlags == nil { cliFlags = []string{"-v", "--log-output=stdout"} } + cliFlags = append(cliFlags, "--with-summary=legacy") ts := NewGlobalTestState(tb) require.NoError(tb, fsext.WriteFile(ts.FS, filepath.Join(ts.Cwd, "test.js"), []byte(script), 0o644)) @@ -546,7 +547,7 @@ func getSimpleCloudOutputTestState( if cliFlags == nil { cliFlags = []string{"-v", "--log-output=stdout"} } - cliFlags = append(cliFlags, "--out", "cloud") + cliFlags = append(cliFlags, "--out", "cloud", "--with-summary=legacy") srv := getCloudTestEndChecker(tb, 111, nil, expRunStatus, expResultStatus) ts := getSingleFileTestState(tb, script, cliFlags, expExitCode) @@ -1890,7 +1891,7 @@ func TestUIRenderOutput(t *testing.T) { t.Parallel() ts := NewGlobalTestState(t) - ts.CmdArgs = []string{"k6", "run"} + ts.CmdArgs = []string{"k6", "run", "--with-summary=legacy"} for _, o := range tc.outputs { ts.CmdArgs = append(ts.CmdArgs, "-o") ts.CmdArgs = append(ts.CmdArgs, o) diff --git a/internal/js/modules/k6/webcrypto/cmd_run_test.go b/internal/js/modules/k6/webcrypto/cmd_run_test.go index 3ee0692fd2f..9cf8365788f 100644 --- a/internal/js/modules/k6/webcrypto/cmd_run_test.go +++ b/internal/js/modules/k6/webcrypto/cmd_run_test.go @@ -81,7 +81,7 @@ func TestExamplesInputOutput(t *testing.T) { script, err := os.ReadFile(filepath.Clean(file)) //nolint:forbidigo // we read an example directly require.NoError(t, err) - ts := getSingleFileTestState(t, string(script), []string{"-v", "--log-output=stdout"}, 0) + ts := getSingleFileTestState(t, string(script), []string{"-v", "--log-output=stdout", "--with-summary=legacy"}, 0) cmd.ExecuteWithGlobalState(ts.GlobalState) diff --git a/internal/js/summary-wrapper.js b/internal/js/summary-wrapper.js index 7164fc27459..dc0d9e48141 100644 --- a/internal/js/summary-wrapper.js +++ b/internal/js/summary-wrapper.js @@ -60,11 +60,11 @@ return JSON.stringify(results, null, 4); }; - return function (summaryCallbackResult, jsonSummaryPath, report, options) { + return function (summaryCallbackResult, jsonSummaryPath, data, options) { let result = summaryCallbackResult; if (!result) { result = { - 'stdout': '\n' + jslib.textSummary(report, options) + '\n\n', + 'stdout': '\n' + jslib.textSummary(data, options) + '\n\n', }; } diff --git a/internal/js/summary.js b/internal/js/summary.js index b3a218be54f..2b8a523f269 100644 --- a/internal/js/summary.js +++ b/internal/js/summary.js @@ -942,7 +942,7 @@ function renderMetricValueForThresholds( formatter, ) { const { trendStats, trendCols, nonTrendValues, nonTrendExtras} = info; - const thresholdAgg = threshold.source.split(/[=><]/)[0]; + const thresholdAgg = threshold.source.split(/[=><]/)[0].trim(); let value; switch (metric.type) { diff --git a/internal/js/summary_test.go b/internal/js/summary_test.go index 1b6a4509cdb..6ecf7027599 100644 --- a/internal/js/summary_test.go +++ b/internal/js/summary_test.go @@ -49,7 +49,7 @@ func TestTextSummary(t *testing.T) { i, tc := i, tc t.Run(fmt.Sprintf("%d_%v", i, tc.stats), func(t *testing.T) { t.Parallel() - summary := createTestSummary(t) + legacySummary := createTestLegacySummary(t) trendStats, err := json.Marshal(tc.stats) require.NoError(t, err) runner, err := getSimpleRunner( @@ -62,7 +62,7 @@ func TestTextSummary(t *testing.T) { ) require.NoError(t, err) - result, err := runner.HandleSummary(context.Background(), summary) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) require.Len(t, result, 1) @@ -102,7 +102,7 @@ func TestTextSummaryWithSubMetrics(t *testing.T) { subMetricPost.Name: subMetricPost.Metric, } - summary := &lib.LegacySummary{ + legacySummary := &lib.LegacySummary{ Metrics: metrics, RootGroup: &lib.Group{}, TestRunDuration: time.Second, @@ -116,7 +116,7 @@ func TestTextSummaryWithSubMetrics(t *testing.T) { ) require.NoError(t, err) - result, err := runner.HandleSummary(context.Background(), summary) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) require.Len(t, result, 1) @@ -205,7 +205,7 @@ func createTestMetrics(t *testing.T) (map[string]*metrics.Metric, *lib.Group) { return testMetrics, rootG } -func createTestSummary(t *testing.T) *lib.LegacySummary { +func createTestLegacySummary(t *testing.T) *lib.LegacySummary { metrics, rootG := createTestMetrics(t) return &lib.LegacySummary{ Metrics: metrics, @@ -306,8 +306,8 @@ func TestOldJSONExport(t *testing.T) { require.NoError(t, err) - summary := createTestSummary(t) - result, err := runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) require.Len(t, result, 2) @@ -576,8 +576,8 @@ func TestRawHandleSummaryData(t *testing.T) { require.NoError(t, err) - summary := createTestSummary(t) - result, err := runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) require.Len(t, result, 2) @@ -611,8 +611,8 @@ func TestRawHandleSummaryDataWithSetupData(t *testing.T) { require.NoError(t, err) runner.SetSetupData([]byte("5")) - summary := createTestSummary(t) - result, err := runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) dataWithSetup, err := io.ReadAll(result["dataWithSetup.json"]) require.NoError(t, err) @@ -634,8 +634,8 @@ func TestRawHandleSummaryPromise(t *testing.T) { require.NoError(t, err) runner.SetSetupData([]byte("5")) - summary := createTestSummary(t) - result, err := runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) dataWithSetup, err := io.ReadAll(result["dataWithSetup.json"]) require.NoError(t, err) @@ -659,8 +659,8 @@ func TestWrongSummaryHandlerExportTypes(t *testing.T) { ) require.NoError(t, err) - summary := createTestSummary(t) - _, err = runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + _, err = runner.HandleSummary(context.Background(), legacySummary, nil) require.Error(t, err) }) } @@ -684,8 +684,8 @@ func TestExceptionInHandleSummaryFallsBackToTextSummary(t *testing.T) { require.NoError(t, err) - summary := createTestSummary(t) - result, err := runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) require.Len(t, result, 1) From f62c2d46bf08c1de4b6595bfa3810a25db03f4dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Thu, 6 Feb 2025 13:13:35 +0100 Subject: [PATCH 32/42] Fix linter complaints --- internal/cmd/run.go | 2 +- lib/summary.go | 22 ++++++++++++++++++++-- output/summary/data.go | 6 +++--- output/summary/doc.go | 4 ++++ output/summary/summary.go | 7 ++++++- 5 files changed, 34 insertions(+), 7 deletions(-) create mode 100644 output/summary/doc.go diff --git a/internal/cmd/run.go b/internal/cmd/run.go index ade32549df2..b9946a24537 100644 --- a/internal/cmd/run.go +++ b/internal/cmd/run.go @@ -190,7 +190,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { } executionState := execScheduler.GetState() - if !testRunState.RuntimeOptions.NoSummary.Bool { + if !testRunState.RuntimeOptions.NoSummary.Bool { //nolint:nestif sm, err := lib.ValidateSummaryMode(testRunState.RuntimeOptions.SummaryMode.String) if err != nil { logger.WithError(err).Error("invalid summary mode, falling back to \"compact\" (default)") diff --git a/lib/summary.go b/lib/summary.go index 1857ff40b13..c0996637894 100644 --- a/lib/summary.go +++ b/lib/summary.go @@ -14,7 +14,7 @@ type SummaryMode int // Possible values for SummaryMode. const ( SummaryModeCompact = SummaryMode(iota) // Compact mode that only displays the total results. - SummaryModeFull // Extended mode that displays the total and also partial (per-group, etc.) results. + SummaryModeFull // Extended mode that displays total and partial results. SummaryModeLegacy // Legacy mode, used for backwards compatibility. ) @@ -90,6 +90,8 @@ func ValidateSummaryMode(val string) (sm SummaryMode, err error) { return } +// Summary is the data structure that holds all the summary data (thresholds, metrics, checks, etc) +// as well as some other information, like certain rendering options. type Summary struct { SummaryThresholds `js:"thresholds"` SummaryGroup @@ -100,6 +102,7 @@ type Summary struct { UIState UIState } +// NewSummary instantiates a new empty Summary. func NewSummary() *Summary { return &Summary{ SummaryThresholds: NewSummaryThresholds(), @@ -111,22 +114,26 @@ func NewSummary() *Summary { } } +// SummaryMetricInfo holds the definition of a metric that will be rendered in the summary, +// including the name of the metric, its type (Counter, Trend, etc.) and what contains (data amounts, times, etc.). type SummaryMetricInfo struct { Name string Type string Contains string } +// SummaryMetric holds all the information needed to display a metric in the summary, +// including its definition and its values. type SummaryMetric struct { SummaryMetricInfo Values map[string]float64 } +// NewSummaryMetricFrom instantiates a new SummaryMetric for a given metrics.Sink and the metric's info. func NewSummaryMetricFrom( info SummaryMetricInfo, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string, ) SummaryMetric { - // TODO: we obtain this from [options.SummaryTrendStats] which is a string slice getMetricValues := metricValueGetter(summaryTrendStats) return SummaryMetric{ @@ -135,6 +142,7 @@ func NewSummaryMetricFrom( } } +// SummaryMetrics is a collection of SummaryMetric grouped by section (http, network, etc). type SummaryMetrics struct { // HTTP contains summary data specific to HTTP metrics and is used // to produce the summary HTTP subsection's content. @@ -158,6 +166,7 @@ type SummaryMetrics struct { Custom map[string]SummaryMetric } +// NewSummaryMetrics instantiates an empty collection of SummaryMetrics. func NewSummaryMetrics() SummaryMetrics { return SummaryMetrics{ HTTP: make(map[string]SummaryMetric), @@ -171,17 +180,20 @@ func NewSummaryMetrics() SummaryMetrics { } } +// SummaryChecksMetrics is the subset of checks-specific metrics. type SummaryChecksMetrics struct { Total SummaryMetric `js:"checks_total"` Success SummaryMetric `js:"checks_succeeded"` Fail SummaryMetric `js:"checks_failed"` } +// SummaryChecks holds the checks information to be rendered in the summary. type SummaryChecks struct { Metrics SummaryChecksMetrics OrderedChecks []*Check } +// NewSummaryChecks instantiates an empty set of SummaryChecks. func NewSummaryChecks() *SummaryChecks { initChecksMetricData := func(name string, t metrics.MetricType) SummaryMetric { return SummaryMetric{ @@ -203,29 +215,35 @@ func NewSummaryChecks() *SummaryChecks { } } +// SummaryThreshold holds the information of a threshold to be rendered in the summary. type SummaryThreshold struct { Source string `js:"source"` Ok bool `js:"ok"` } +// MetricThresholds is the collection of SummaryThreshold that belongs to the same metric. type MetricThresholds struct { Metric SummaryMetric `js:"metric"` Thresholds []SummaryThreshold `js:"thresholds"` } +// SummaryThresholds is a collection of MetricThresholds that will be rendered in the summary. type SummaryThresholds map[string]MetricThresholds +// NewSummaryThresholds instantiates an empty collection of SummaryThresholds. func NewSummaryThresholds() SummaryThresholds { thresholds := make(SummaryThresholds) return thresholds } +// SummaryGroup is a group of metrics and subgroups (recursive) that will be rendered in the summary. type SummaryGroup struct { Checks *SummaryChecks // Not always present, thus we use a pointer. Metrics SummaryMetrics Groups map[string]SummaryGroup } +// NewSummaryGroup instantiates an empty SummaryGroup. func NewSummaryGroup() SummaryGroup { return SummaryGroup{ Metrics: NewSummaryMetrics(), diff --git a/output/summary/data.go b/output/summary/data.go index 3fb4350d0ce..74d42c15e24 100644 --- a/output/summary/data.go +++ b/output/summary/data.go @@ -236,7 +236,7 @@ func summaryThresholds( } } - mt.Thresholds = append(rts[metric.Name].Thresholds, lib.SummaryThreshold{ + mt.Thresholds = append(mt.Thresholds, lib.SummaryThreshold{ Source: threshold.Source, Ok: !threshold.LastFailed, }) @@ -260,8 +260,8 @@ func populateSummaryChecks( summaryGroup.Checks = lib.NewSummaryChecks() - totalChecks := float64(checksMetric.Sink.(*metrics.RateSink).Total) - successChecks := float64(checksMetric.Sink.(*metrics.RateSink).Trues) + totalChecks := float64(checksMetric.Sink.(*metrics.RateSink).Total) //nolint:forcetypeassert + successChecks := float64(checksMetric.Sink.(*metrics.RateSink).Trues) //nolint:forcetypeassert summaryGroup.Checks.Metrics.Total.Values["count"] = totalChecks summaryGroup.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, testRunDuration) diff --git a/output/summary/doc.go b/output/summary/doc.go new file mode 100644 index 00000000000..8cb8857456b --- /dev/null +++ b/output/summary/doc.go @@ -0,0 +1,4 @@ +/* +Package summary implements an output that collects metrics to be displayed in the end-of-test summary +*/ +package summary diff --git a/output/summary/summary.go b/output/summary/summary.go index 266b379f86e..8a7daebbe42 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -43,10 +43,13 @@ func New(params output.Params) (*Output, error) { }, nil } +// Description returns a human-readable description of the output. func (o *Output) Description() string { - return "" + return "summary" } +// Start starts a new output.PeriodicFlusher to collect and flush metrics that will be +// rendered in the end-of-test summary. func (o *Output) Start() error { pf, err := output.NewPeriodicFlusher(flushPeriod, o.flushMetrics) if err != nil { @@ -57,6 +60,7 @@ func (o *Output) Start() error { return nil } +// Stop flushes any remaining metrics and stops the goroutine. func (o *Output) Stop() error { o.periodicFlusher.Stop() return nil @@ -104,6 +108,7 @@ func (o *Output) flushSample(sample metrics.Sample) { } } +// Summary returns a lib.Summary of the test run. func (o *Output) Summary(executionState *lib.ExecutionState, options lib.Options) *lib.Summary { summary := lib.NewSummary() From eee0c992662e7a4e1bb1b95477fe992348de6d05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Thu, 6 Feb 2025 13:13:47 +0100 Subject: [PATCH 33/42] Fix xk6 test --- .github/workflows/xk6-tests/xk6-test.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/xk6-tests/xk6-test.js b/.github/workflows/xk6-tests/xk6-test.js index 093bad76cc6..a20de89a007 100644 --- a/.github/workflows/xk6-tests/xk6-test.js +++ b/.github/workflows/xk6-tests/xk6-test.js @@ -11,7 +11,7 @@ export let options = { export function handleSummary(data) { return { - 'summary-results.txt': data.metrics.foos.values.count.toString(), + 'summary-results.txt': data.metrics.custom.foos.values.count.toString(), }; } From eaf59f577e94b08fd98d4e7c009128b81161ba3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Thu, 6 Feb 2025 14:48:42 +0100 Subject: [PATCH 34/42] Fix more linter complaints --- internal/cmd/runtime_options.go | 4 +++- internal/js/runner.go | 6 +++++- internal/lib/testutils/minirunner/minirunner.go | 6 +++++- output/summary/data.go | 11 +++++++++-- output/summary/summary.go | 3 ++- 5 files changed, 24 insertions(+), 6 deletions(-) diff --git a/internal/cmd/runtime_options.go b/internal/cmd/runtime_options.go index 1f971c7dd0e..ee1306f72a0 100644 --- a/internal/cmd/runtime_options.go +++ b/internal/cmd/runtime_options.go @@ -31,7 +31,8 @@ extended: base + sets "global" as alias for "globalThis" flags.StringArrayP("env", "e", nil, "add/override environment variable with `VAR=value`") flags.Bool("no-thresholds", false, "don't run thresholds") flags.Bool("no-summary", false, "don't show the summary at the end of the test") - flags.String("with-summary", lib.SummaryModeCompact.String(), "determine the summary mode, \"compact\", \"full\" or \"legacy\"") + flags.String("with-summary", lib.SummaryModeCompact.String(), "determine the summary mode,"+ + " \"compact\", \"full\" or \"legacy\"") flags.String( "summary-export", "", @@ -58,6 +59,7 @@ func saveBoolFromEnv(env map[string]string, varName string, placeholder *null.Bo return nil } +//nolint:funlen func getRuntimeOptions(flags *pflag.FlagSet, environment map[string]string) (lib.RuntimeOptions, error) { // TODO: refactor with composable helpers as a part of #883, to reduce copy-paste // TODO: get these options out of the JSON config file as well? diff --git a/internal/js/runner.go b/internal/js/runner.go index 76687bff475..af527978853 100644 --- a/internal/js/runner.go +++ b/internal/js/runner.go @@ -349,7 +349,11 @@ func (r *Runner) IsExecutable(name string) bool { } // HandleSummary calls the specified summary callback, if supplied. -func (r *Runner) HandleSummary(ctx context.Context, legacy *lib.LegacySummary, summary *lib.Summary) (map[string]io.Reader, error) { +func (r *Runner) HandleSummary( + ctx context.Context, + legacy *lib.LegacySummary, + summary *lib.Summary, +) (map[string]io.Reader, error) { out := make(chan metrics.SampleContainer, 100) defer close(out) diff --git a/internal/lib/testutils/minirunner/minirunner.go b/internal/lib/testutils/minirunner/minirunner.go index b99e1e3bb44..f8b8e985f36 100644 --- a/internal/lib/testutils/minirunner/minirunner.go +++ b/internal/lib/testutils/minirunner/minirunner.go @@ -108,7 +108,11 @@ func (r *MiniRunner) SetOptions(opts lib.Options) error { } // HandleSummary calls the specified summary callback, if supplied. -func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.LegacySummary, _ *lib.Summary) (map[string]io.Reader, error) { +func (r *MiniRunner) HandleSummary( + ctx context.Context, + s *lib.LegacySummary, + _ *lib.Summary, +) (map[string]io.Reader, error) { if r.HandleSummaryFn != nil { return r.HandleSummaryFn(ctx, s) } diff --git a/output/summary/data.go b/output/summary/data.go index 74d42c15e24..d17efcf0303 100644 --- a/output/summary/data.go +++ b/output/summary/data.go @@ -72,7 +72,8 @@ func (a aggregatedGroupData) groupDataFor(group string) aggregatedGroupData { func (a aggregatedGroupData) addSample(sample metrics.Sample) { a.aggregatedMetrics.addSample(sample) - if checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()); hasCheckTag && sample.Metric.Name == metrics.ChecksName { + checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()) + if hasCheckTag && sample.Metric.Name == metrics.ChecksName { check := a.checks.checkFor(checkName) if sample.Value == 0 { atomic.AddInt64(&check.Fails, 1) @@ -164,7 +165,13 @@ func populateSummaryGroup( populateSummaryChecks(summaryGroup, groupData, testRunDuration, summaryTrendStats) // Then, we store the metrics. - storeMetric := func(dest lib.SummaryMetrics, info lib.SummaryMetricInfo, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string) { + storeMetric := func( + dest lib.SummaryMetrics, + info lib.SummaryMetricInfo, + sink metrics.Sink, + testDuration time.Duration, + summaryTrendStats []string, + ) { summaryMetric := lib.NewSummaryMetricFrom(info, sink, testDuration, summaryTrendStats) switch { diff --git a/output/summary/summary.go b/output/summary/summary.go index 8a7daebbe42..e82c90aab6c 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -156,7 +156,8 @@ func (o *Output) storeSample(sample metrics.Sample) { } } - if checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()); hasCheckTag && sample.Metric.Name == metrics.ChecksName { + checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()) + if hasCheckTag && sample.Metric.Name == metrics.ChecksName { check := o.dataModel.checks.checkFor(checkName) if sample.Value == 0 { atomic.AddInt64(&check.Fails, 1) From 8cb412c77f420c782cdd0f6c836452c1173ffaca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Fri, 7 Feb 2025 14:40:00 +0100 Subject: [PATCH 35/42] Small refactor --- internal/cmd/runtime_options.go | 5 +- internal/js/runner.go | 111 ++++++++++++------ .../lib/testutils/minirunner/minirunner.go | 6 +- output/summary/data.go | 11 +- output/summary/summary.go | 3 +- 5 files changed, 96 insertions(+), 40 deletions(-) diff --git a/internal/cmd/runtime_options.go b/internal/cmd/runtime_options.go index 1f971c7dd0e..0cf9d5dfb16 100644 --- a/internal/cmd/runtime_options.go +++ b/internal/cmd/runtime_options.go @@ -31,7 +31,8 @@ extended: base + sets "global" as alias for "globalThis" flags.StringArrayP("env", "e", nil, "add/override environment variable with `VAR=value`") flags.Bool("no-thresholds", false, "don't run thresholds") flags.Bool("no-summary", false, "don't show the summary at the end of the test") - flags.String("with-summary", lib.SummaryModeCompact.String(), "determine the summary mode, \"compact\", \"full\" or \"legacy\"") + flags.String("with-summary", lib.SummaryModeCompact.String(), "determine the summary mode,"+ + " \"compact\", \"full\" or \"legacy\"") flags.String( "summary-export", "", @@ -58,7 +59,7 @@ func saveBoolFromEnv(env map[string]string, varName string, placeholder *null.Bo return nil } -func getRuntimeOptions(flags *pflag.FlagSet, environment map[string]string) (lib.RuntimeOptions, error) { +func getRuntimeOptions(flags *pflag.FlagSet, environment map[string]string) (lib.RuntimeOptions, error) { //nolint:funlen // TODO: refactor with composable helpers as a part of #883, to reduce copy-paste // TODO: get these options out of the JSON config file as well? opts := lib.RuntimeOptions{ diff --git a/internal/js/runner.go b/internal/js/runner.go index 76687bff475..9ea08387407 100644 --- a/internal/js/runner.go +++ b/internal/js/runner.go @@ -349,11 +349,15 @@ func (r *Runner) IsExecutable(name string) bool { } // HandleSummary calls the specified summary callback, if supplied. -func (r *Runner) HandleSummary(ctx context.Context, legacy *lib.LegacySummary, summary *lib.Summary) (map[string]io.Reader, error) { +func (r *Runner) HandleSummary( + ctx context.Context, + legacy *lib.LegacySummary, + summary *lib.Summary, +) (map[string]io.Reader, error) { out := make(chan metrics.SampleContainer, 100) defer close(out) - go func() { // discard all metrics + go func() { // discard all metrics for range out { //nolint:revive } }() @@ -371,6 +375,43 @@ func (r *Runner) HandleSummary(ctx context.Context, legacy *lib.LegacySummary, s }) vu.moduleVUImpl.ctx = summaryCtx + noColor, enableColors, summaryDataForJS, summaryCode := prepareHandleSummaryCall(r, legacy, summary) + + handleSummaryDataAsValue := vu.Runtime.ToValue(summaryDataForJS) + callbackResult, err := runUserProvidedHandleSummaryCallback(summaryCtx, vu, handleSummaryDataAsValue) + if err != nil { + return nil, err + } + + wrapper := strings.Replace(summaryWrapperLambdaCode, "/*JSLIB_SUMMARY_CODE*/", summaryCode, 1) + handleSummaryWrapperRaw, err := vu.Runtime.RunString(wrapper) + if err != nil { + return nil, fmt.Errorf("unexpected error while getting the summary wrapper: %w", err) + } + handleSummaryWrapper, ok := sobek.AssertFunction(handleSummaryWrapperRaw) + if !ok { + return nil, fmt.Errorf("unexpected error did not get a callable summary wrapper") + } + + wrapperArgs := prepareHandleWrapperArgs(vu, noColor, enableColors, callbackResult, handleSummaryDataAsValue) + rawResult, _, _, err := vu.runFn(summaryCtx, false, handleSummaryWrapper, nil, wrapperArgs...) + + if deadlineError := r.checkDeadline(summaryCtx, consts.HandleSummaryFn, rawResult, err); deadlineError != nil { + return nil, deadlineError + } + + if err != nil { + return nil, fmt.Errorf("unexpected error while generating the summary: %w", err) + } + + return getSummaryResult(rawResult) +} + +func prepareHandleSummaryCall( + r *Runner, + legacy *lib.LegacySummary, + summary *lib.Summary, +) (bool, bool, interface{}, string) { var ( noColor bool enableColors bool @@ -393,55 +434,57 @@ func (r *Runner) HandleSummary(ctx context.Context, legacy *lib.LegacySummary, s summaryCode = jslibSummaryCode } - callbackResult := sobek.Undefined() - fn := vu.getExported(consts.HandleSummaryFn) // TODO: rename to UserDefinedHandleSummaryFn? - if fn != nil { - handleSummaryFn, ok := sobek.AssertFunction(fn) - if !ok { - return nil, fmt.Errorf("exported identifier %s must be a function", consts.HandleSummaryFn) - } + return noColor, enableColors, summaryDataForJS, summaryCode +} - callbackResult, _, _, err = vu.runFn(summaryCtx, false, handleSummaryFn, nil, vu.Runtime.ToValue(summaryDataForJS)) - if err != nil { - errText, fields := errext.Format(err) - r.preInitState.Logger.WithFields(fields).Error(errText) - } +func runUserProvidedHandleSummaryCallback( + summaryCtx context.Context, + vu *VU, + summaryData sobek.Value, +) (sobek.Value, error) { + fn := vu.getExported(consts.HandleSummaryFn) + if fn == nil { + return sobek.Undefined(), nil } - wrapper := strings.Replace(summaryWrapperLambdaCode, "/*JSLIB_SUMMARY_CODE*/", summaryCode, 1) - handleSummaryWrapperRaw, err := vu.Runtime.RunString(wrapper) - if err != nil { - return nil, fmt.Errorf("unexpected error while getting the summary wrapper: %w", err) - } - handleSummaryWrapper, ok := sobek.AssertFunction(handleSummaryWrapperRaw) + handleSummaryFn, ok := sobek.AssertFunction(fn) if !ok { - return nil, fmt.Errorf("unexpected error did not get a callable summary wrapper") + return nil, fmt.Errorf("exported identifier %s must be a function", consts.HandleSummaryFn) } + callbackResult, _, _, err := vu.runFn(summaryCtx, false, handleSummaryFn, nil, summaryData) + if err != nil { + errText, fields := errext.Format(err) + vu.Runner.preInitState.Logger.WithFields(fields).Error(errText) + } + + // In case of err, we only want to log it, + // but still proceed with the built-in summary handler, so we return nil. + return callbackResult, nil +} + +func prepareHandleWrapperArgs( + vu *VU, + noColor bool, enableColors bool, + callbackResult sobek.Value, + summaryDataForJS interface{}, +) []sobek.Value { options := map[string]interface{}{ // TODO: improve when we can easily export all option values, including defaults? - "summaryTrendStats": r.Bundle.Options.SummaryTrendStats, - "summaryTimeUnit": r.Bundle.Options.SummaryTimeUnit.String, + "summaryTrendStats": vu.Runner.Bundle.Options.SummaryTrendStats, + "summaryTimeUnit": vu.Runner.Bundle.Options.SummaryTimeUnit.String, "noColor": noColor, // TODO: move to the (runtime) options "enableColors": enableColors, } wrapperArgs := []sobek.Value{ callbackResult, - vu.Runtime.ToValue(r.Bundle.preInitState.RuntimeOptions.SummaryExport.String), + vu.Runtime.ToValue(vu.Runner.Bundle.preInitState.RuntimeOptions.SummaryExport.String), vu.Runtime.ToValue(summaryDataForJS), vu.Runtime.ToValue(options), } - rawResult, _, _, err := vu.runFn(summaryCtx, false, handleSummaryWrapper, nil, wrapperArgs...) - - if deadlineError := r.checkDeadline(summaryCtx, consts.HandleSummaryFn, rawResult, err); deadlineError != nil { - return nil, deadlineError - } - if err != nil { - return nil, fmt.Errorf("unexpected error while generating the summary: %w", err) - } - return getSummaryResult(rawResult) + return wrapperArgs } func (r *Runner) checkDeadline(ctx context.Context, name string, result sobek.Value, err error) error { @@ -449,7 +492,7 @@ func (r *Runner) checkDeadline(ctx context.Context, name string, result sobek.Va return nil } - // deadline is reached so we have timeouted but this might've not been registered correctly + // deadline is reached so we have timed-outed but this might've not been registered correctly // we could have an error that is not context.Canceled in which case we should return it instead //nolint:errorlint if err, ok := err.(*sobek.InterruptedError); ok && result != nil && err.Value() != context.Canceled { diff --git a/internal/lib/testutils/minirunner/minirunner.go b/internal/lib/testutils/minirunner/minirunner.go index b99e1e3bb44..f8b8e985f36 100644 --- a/internal/lib/testutils/minirunner/minirunner.go +++ b/internal/lib/testutils/minirunner/minirunner.go @@ -108,7 +108,11 @@ func (r *MiniRunner) SetOptions(opts lib.Options) error { } // HandleSummary calls the specified summary callback, if supplied. -func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.LegacySummary, _ *lib.Summary) (map[string]io.Reader, error) { +func (r *MiniRunner) HandleSummary( + ctx context.Context, + s *lib.LegacySummary, + _ *lib.Summary, +) (map[string]io.Reader, error) { if r.HandleSummaryFn != nil { return r.HandleSummaryFn(ctx, s) } diff --git a/output/summary/data.go b/output/summary/data.go index 74d42c15e24..d17efcf0303 100644 --- a/output/summary/data.go +++ b/output/summary/data.go @@ -72,7 +72,8 @@ func (a aggregatedGroupData) groupDataFor(group string) aggregatedGroupData { func (a aggregatedGroupData) addSample(sample metrics.Sample) { a.aggregatedMetrics.addSample(sample) - if checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()); hasCheckTag && sample.Metric.Name == metrics.ChecksName { + checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()) + if hasCheckTag && sample.Metric.Name == metrics.ChecksName { check := a.checks.checkFor(checkName) if sample.Value == 0 { atomic.AddInt64(&check.Fails, 1) @@ -164,7 +165,13 @@ func populateSummaryGroup( populateSummaryChecks(summaryGroup, groupData, testRunDuration, summaryTrendStats) // Then, we store the metrics. - storeMetric := func(dest lib.SummaryMetrics, info lib.SummaryMetricInfo, sink metrics.Sink, testDuration time.Duration, summaryTrendStats []string) { + storeMetric := func( + dest lib.SummaryMetrics, + info lib.SummaryMetricInfo, + sink metrics.Sink, + testDuration time.Duration, + summaryTrendStats []string, + ) { summaryMetric := lib.NewSummaryMetricFrom(info, sink, testDuration, summaryTrendStats) switch { diff --git a/output/summary/summary.go b/output/summary/summary.go index 8a7daebbe42..e82c90aab6c 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -156,7 +156,8 @@ func (o *Output) storeSample(sample metrics.Sample) { } } - if checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()); hasCheckTag && sample.Metric.Name == metrics.ChecksName { + checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()) + if hasCheckTag && sample.Metric.Name == metrics.ChecksName { check := o.dataModel.checks.checkFor(checkName) if sample.Value == 0 { atomic.AddInt64(&check.Fails, 1) From cf87057277ca7c744d0cb493a061215a1ed5ed34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Fri, 7 Feb 2025 17:20:30 +0100 Subject: [PATCH 36/42] Move full-summary example to internal/cmd/testdata --- .../full-summary => internal/cmd/testdata/summary}/api.js | 0 .../full-summary => internal/cmd/testdata/summary}/browser.js | 0 .../full-summary => internal/cmd/testdata/summary}/grpc.js | 2 +- .../script.js => internal/cmd/testdata/summary/main.js | 0 .../full-summary => internal/cmd/testdata/summary}/ws.js | 0 5 files changed, 1 insertion(+), 1 deletion(-) rename {playground/full-summary => internal/cmd/testdata/summary}/api.js (100%) rename {playground/full-summary => internal/cmd/testdata/summary}/browser.js (100%) rename {playground/full-summary => internal/cmd/testdata/summary}/grpc.js (82%) rename playground/full-summary/script.js => internal/cmd/testdata/summary/main.js (100%) rename {playground/full-summary => internal/cmd/testdata/summary}/ws.js (100%) diff --git a/playground/full-summary/api.js b/internal/cmd/testdata/summary/api.js similarity index 100% rename from playground/full-summary/api.js rename to internal/cmd/testdata/summary/api.js diff --git a/playground/full-summary/browser.js b/internal/cmd/testdata/summary/browser.js similarity index 100% rename from playground/full-summary/browser.js rename to internal/cmd/testdata/summary/browser.js diff --git a/playground/full-summary/grpc.js b/internal/cmd/testdata/summary/grpc.js similarity index 82% rename from playground/full-summary/grpc.js rename to internal/cmd/testdata/summary/grpc.js index f5da2769dc4..725e948a87a 100644 --- a/playground/full-summary/grpc.js +++ b/internal/cmd/testdata/summary/grpc.js @@ -2,7 +2,7 @@ import grpc from 'k6/net/grpc'; import {check} from 'k6' const GRPC_ADDR = __ENV.GRPC_ADDR || '127.0.0.1:10000'; -const GRPC_PROTO_PATH = __ENV.GRPC_PROTO_PATH || '../../internal/lib/testutils/grpcservice/route_guide.proto'; +const GRPC_PROTO_PATH = __ENV.GRPC_PROTO_PATH || '../../../lib/testutils/grpcservice/route_guide.proto'; let client = new grpc.Client(); diff --git a/playground/full-summary/script.js b/internal/cmd/testdata/summary/main.js similarity index 100% rename from playground/full-summary/script.js rename to internal/cmd/testdata/summary/main.js diff --git a/playground/full-summary/ws.js b/internal/cmd/testdata/summary/ws.js similarity index 100% rename from playground/full-summary/ws.js rename to internal/cmd/testdata/summary/ws.js From 1bb7aaf5b33232de8945aac92eb7ca09e6ae829d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Fri, 7 Feb 2025 17:23:01 +0100 Subject: [PATCH 37/42] Pass render options missing to ANSIFormatter --- internal/js/summary.js | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/internal/js/summary.js b/internal/js/summary.js index 2b8a523f269..c3cbfda8835 100644 --- a/internal/js/summary.js +++ b/internal/js/summary.js @@ -25,7 +25,7 @@ function generateTextSummary(report, options) { const context = new RenderContext(0); // Create a formatter with default settings (colors enabled) - const formatter = new ANSIFormatter(); + const formatter = new ANSIFormatter(mergedOpts); const reportGenerator = new TestReportGenerator( formatter, @@ -617,7 +617,7 @@ function renderTitle( title, formatter, renderContext, - options = { prefix: titlePrefix, suffix: '\n' }, + options = {prefix: titlePrefix, suffix: '\n'}, ) { return renderContext.indent( `${options.prefix} ${formatter.boldify(title)} ${options.suffix || ''}`, @@ -649,16 +649,16 @@ function renderCheck(check, formatter, renderContext) { const checkName = formatter.decorate(failMark + ' ' + check.name, 'red'); const results = formatter.decorate( subtitlePrefix + - ' ' + - successfulPct + - '% — ' + - successMark + - ' ' + - check.passes + - ' / ' + - failMark + - ' ' + - check.fails, + ' ' + + successfulPct + + '% — ' + + successMark + + ' ' + + check.passes + + ' / ' + + failMark + + ' ' + + check.fails, 'red', ); @@ -687,7 +687,7 @@ function renderChecks(checks, formatter, renderContext, options = {}) { // Add indentation to the render context for checks renderContext = renderContext.indentedContext(1); - const { showPassedChecks = true, showFailedChecks = true } = options; + const {showPassedChecks = true, showFailedChecks = true} = options; // Process each check and filter based on options const renderedChecks = checks.ordered_checks @@ -878,7 +878,7 @@ function renderMetricLine( formatter, renderContext, ) { - const { maxNameWidth } = info; + const {maxNameWidth} = info; const displayedName = renderMetricDisplayName(name); const fmtIndent = renderContext.indentLevel(); @@ -941,7 +941,7 @@ function renderMetricValueForThresholds( info, formatter, ) { - const { trendStats, trendCols, nonTrendValues, nonTrendExtras} = info; + const {trendStats, trendCols, nonTrendValues, nonTrendExtras} = info; const thresholdAgg = threshold.source.split(/[=><]/)[0].trim(); let value; @@ -1247,7 +1247,7 @@ function strWidth(s) { function renderMetricDisplayName(name) { const subMetricPos = name.indexOf('{'); if (subMetricPos >= 0) { - return '{ ' + name.substring(subMetricPos + 1, name.length - 1) + ' }'; + return ' { ' + name.substring(subMetricPos + 1, name.length - 1) + ' }'; } return name; } @@ -1272,9 +1272,9 @@ function humanizeBytes(bytes) { } const unitMap = { - s: { unit: 's', coef: 0.001 }, - ms: { unit: 'ms', coef: 1 }, - us: { unit: 'µs', coef: 1000 }, + s: {unit: 's', coef: 0.001}, + ms: {unit: 'ms', coef: 1}, + us: {unit: 'µs', coef: 1000}, }; /** From bf19d6c861861a2e75d82b57e22bd34194544a97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Fri, 7 Feb 2025 18:01:43 +0100 Subject: [PATCH 38/42] Accommodate tests to the new summary format --- internal/cmd/run.go | 10 +- internal/cmd/run_test.go | 6 +- internal/cmd/tests/cmd_run_grpc_test.go | 4 +- internal/cmd/tests/cmd_run_test.go | 102 ++++++++++-------- internal/cmd/ui.go | 3 +- .../js/modules/k6/webcrypto/cmd_run_test.go | 2 +- internal/js/runner.go | 2 +- output/summary/summary.go | 31 +++++- 8 files changed, 101 insertions(+), 59 deletions(-) diff --git a/internal/cmd/run.go b/internal/cmd/run.go index b9946a24537..05476c21443 100644 --- a/internal/cmd/run.go +++ b/internal/cmd/run.go @@ -237,8 +237,14 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { defer func() { logger.Debug("Generating the end-of-test summary...") - summary := summaryOutput.Summary(executionState, test.initRunner.GetOptions()) - summary.TestRunDuration = executionState.GetCurrentTestRunDuration() + summary := summaryOutput.Summary( + executionState, + metricsEngine.ObservedMetrics, + test.initRunner.GetOptions(), + ) + + // TODO: We should probably try to move these out of the summary, + // likely as an additional argument like options. summary.NoColor = c.gs.Flags.NoColor summary.UIState = lib.UIState{ IsStdOutTTY: c.gs.Stdout.IsTTY, diff --git a/internal/cmd/run_test.go b/internal/cmd/run_test.go index fbfed6e8276..39ecd27127b 100644 --- a/internal/cmd/run_test.go +++ b/internal/cmd/run_test.go @@ -326,7 +326,7 @@ func TestThresholdsRuntimeBehavior(t *testing.T) { name: "#2518: submetrics without values should be rendered under their parent metric #2518", testFilename: "thresholds/thresholds_on_submetric_without_samples.js", expExitCode: 0, - expStdoutContains: " one..................: 0 0/s\n { tag:xyz }........: 0 0/s\n", + expStdoutContains: " one....................................: 0 0/s\n { tag:xyz }..........................: 0 0/s\n", }, { name: "#2512: parsing threshold names containing parsable tokens should be valid", @@ -337,7 +337,7 @@ func TestThresholdsRuntimeBehavior(t *testing.T) { name: "#2520: thresholds over metrics without values should avoid division by zero and displaying NaN values", testFilename: "thresholds/empty_sink_no_nan.js", expExitCode: 0, - expStdoutContains: "rate.................: 0.00%", + expStdoutContains: "rate...................................: 0.00%", expStdoutNotContains: "NaN", }, } @@ -354,7 +354,7 @@ func TestThresholdsRuntimeBehavior(t *testing.T) { ts := tests.NewGlobalTestState(t) require.NoError(t, fsext.WriteFile(ts.FS, filepath.Join(ts.Cwd, tc.testFilename), testScript, 0o644)) - ts.CmdArgs = []string{"k6", "run", tc.testFilename, "--with-summary", "legacy"} + ts.CmdArgs = []string{"k6", "run", tc.testFilename} ts.ExpectedExitCode = int(tc.expExitCode) newRootCommand(ts.GlobalState).execute() diff --git a/internal/cmd/tests/cmd_run_grpc_test.go b/internal/cmd/tests/cmd_run_grpc_test.go index e6edb90c913..81a594cdbc0 100644 --- a/internal/cmd/tests/cmd_run_grpc_test.go +++ b/internal/cmd/tests/cmd_run_grpc_test.go @@ -15,7 +15,7 @@ import ( const projectRootPath = "../../../" // TestGRPCInputOutput runs same k6's scripts that we have in example folder -// it check that output contains/not contains cetane things +// it checks that output contains/not contains cetane things func TestGRPCInputOutput(t *testing.T) { t.Parallel() @@ -105,7 +105,7 @@ func TestGRPCInputOutput(t *testing.T) { script, err := os.ReadFile(test.script) //nolint:forbidigo require.NoError(t, err) - ts := getSingleFileTestState(t, string(script), []string{"-v", "--log-output=stdout", "--no-usage-report", "--with-summary", "legacy"}, 0) + ts := getSingleFileTestState(t, string(script), []string{"-v", "--log-output=stdout", "--no-usage-report"}, 0) ts.Env = map[string]string{ "GRPC_ADDR": tb.Addr, "GRPC_PROTO_PATH": "./proto.proto", diff --git a/internal/cmd/tests/cmd_run_test.go b/internal/cmd/tests/cmd_run_test.go index 5f1c0970502..4fa84419aba 100644 --- a/internal/cmd/tests/cmd_run_test.go +++ b/internal/cmd/tests/cmd_run_test.go @@ -66,7 +66,7 @@ func TestSimpleTestStdin(t *testing.T) { t.Parallel() ts := NewGlobalTestState(t) - ts.CmdArgs = []string{"k6", "run", "-", "--with-summary", "legacy"} + ts.CmdArgs = []string{"k6", "run", "-"} ts.Stdin = bytes.NewBufferString(`export default function() {};`) cmd.ExecuteWithGlobalState(ts.GlobalState) @@ -241,7 +241,6 @@ func getSingleFileTestState(tb testing.TB, script string, cliFlags []string, exp if cliFlags == nil { cliFlags = []string{"-v", "--log-output=stdout"} } - cliFlags = append(cliFlags, "--with-summary=legacy") ts := NewGlobalTestState(tb) require.NoError(tb, fsext.WriteFile(ts.FS, filepath.Join(ts.Cwd, "test.js"), []byte(script), 0o644)) @@ -330,17 +329,17 @@ func TestMetricsAndThresholds(t *testing.T) { var summary map[string]interface{} require.NoError(t, json.Unmarshal(ts.Stdout.Bytes(), &summary)) - metrics, ok := summary["metrics"].(map[string]interface{}) + thresholds, ok := summary["thresholds"].(map[string]interface{}) require.True(t, ok) - teardownCounter, ok := metrics["teardown_counter"].(map[string]interface{}) + teardownCounter, ok := thresholds["teardown_counter"].(map[string]interface{}) require.True(t, ok) - teardownThresholds, ok := teardownCounter["thresholds"].(map[string]interface{}) + teardownCounterThresholds, ok := teardownCounter["thresholds"].([]interface{}) require.True(t, ok) - expected := map[string]interface{}{"count == 1": map[string]interface{}{"ok": true}} - require.Equal(t, expected, teardownThresholds) + expected := []interface{}{map[string]interface{}{"source": "count == 1", "ok": true}} + require.Equal(t, expected, teardownCounterThresholds) } func TestSSLKEYLOGFILEAbsolute(t *testing.T) { @@ -469,9 +468,9 @@ func TestSubMetricThresholdNoData(t *testing.T) { assert.Len(t, ts.LoggerHook.Drain(), 0) assert.Contains(t, ts.Stdout.String(), ` - one..................: 0 0/s - { tag:xyz }........: 0 0/s - two..................: 42`) + one....................................: 0 0/s + { tag:xyz }..........................: 0 0/s + two....................................: 42`) } func getTestServer(tb testing.TB, routes map[string]http.Handler) *httptest.Server { @@ -547,7 +546,7 @@ func getSimpleCloudOutputTestState( if cliFlags == nil { cliFlags = []string{"-v", "--log-output=stdout"} } - cliFlags = append(cliFlags, "--out", "cloud", "--with-summary=legacy") + cliFlags = append(cliFlags, "--out", "cloud") srv := getCloudTestEndChecker(tb, 111, nil, expRunStatus, expResultStatus) ts := getSingleFileTestState(tb, script, cliFlags, expExitCode) @@ -617,10 +616,10 @@ func TestSetupTeardownThresholds(t *testing.T) { stdOut := ts.Stdout.String() t.Log(stdOut) - assert.Contains(t, stdOut, `✓ checks.........................: 100.00% 8 out of 8`) - assert.Contains(t, stdOut, `✓ http_reqs......................: 8`) - assert.Contains(t, stdOut, `✓ iterations.....................: 5`) - assert.Contains(t, stdOut, `✓ setup_teardown.................: 3`) + assert.Contains(t, stdOut, "checks\n ✓ 'rate == 1' rate=100.00%") + assert.Contains(t, stdOut, "http_reqs\n ✓ 'count == 8' count=8") + assert.Contains(t, stdOut, "iterations\n ✓ 'count == 5' count=5") + assert.Contains(t, stdOut, "setup_teardown\n ✓ 'count == 3' count=3") logMsgs := ts.LoggerHook.Drain() for _, msg := range logMsgs { @@ -670,10 +669,10 @@ func TestThresholdsFailed(t *testing.T) { assert.True(t, testutils.LogContains(ts.LoggerHook.Drain(), logrus.ErrorLevel, expErr)) stdout := ts.Stdout.String() t.Log(stdout) - assert.Contains(t, stdout, ` ✓ iterations...........: 3`) - assert.Contains(t, stdout, ` ✗ { scenario:sc1 }...: 1`) - assert.Contains(t, stdout, ` ✗ { scenario:sc2 }...: 2`) - assert.Contains(t, stdout, ` ✓ { scenario:sc3 }...: 0 0/s`) + assert.Contains(t, stdout, " iterations\n ✓ 'count == 3' count=3") + assert.Contains(t, stdout, " {scenario:sc1}\n ✗ 'count == 2' count=1") + assert.Contains(t, stdout, " ✗ 'count == 2' count=1") + assert.Contains(t, stdout, " {scenario:sc2}\n ✗ 'count == 1' count=2") } func TestAbortedByThreshold(t *testing.T) { @@ -712,7 +711,7 @@ func TestAbortedByThreshold(t *testing.T) { assert.True(t, testutils.LogContains(ts.LoggerHook.Drain(), logrus.ErrorLevel, expErr)) stdOut := ts.Stdout.String() t.Log(stdOut) - assert.Contains(t, stdOut, `✗ iterations`) + assert.Contains(t, stdOut, "iterations\n ✗ 'count == 1' count=2") assert.Contains(t, stdOut, `teardown() called`) assert.Contains(t, stdOut, `level=debug msg="Metrics emission of VUs and VUsMax metrics stopped"`) assert.Contains(t, stdOut, `level=debug msg="Metrics and traces processing finished!"`) @@ -763,9 +762,20 @@ func TestAbortedByUserWithGoodThresholds(t *testing.T) { assert.True(t, testutils.LogContains(logs, logrus.ErrorLevel, `test run was aborted because k6 received a 'interrupt' signal`)) stdout := ts.Stdout.String() t.Log(stdout) - assert.Contains(t, stdout, `✓ iterations`) - assert.Contains(t, stdout, `✓ tc`) - assert.Contains(t, stdout, `✓ { group:::teardown }`) + assert.Contains(t, stdout, ` + iterations + ✓ 'count >= 1' count=3 + + tc + ✓ 'count == 1' count=1 + + {group:::setup} + ✓ 'count == 0' count=0 + ✓ 'count == 0' count=0 + + {group:::teardown} + ✓ 'count == 1' count=1 + ✓ 'count == 1' count=1`) assert.Contains(t, stdout, `Stopping k6 in response to signal`) assert.Contains(t, stdout, `level=debug msg="Metrics emission of VUs and VUsMax metrics stopped"`) assert.Contains(t, stdout, `level=debug msg="Metrics and traces processing finished!"`) @@ -1374,7 +1384,7 @@ func TestMetricTagAndSetupDataIsolation(t *testing.T) { t.Log(stdout) assert.NotContains(t, stdout, "execution: local") // because of --quiet assert.NotContains(t, stdout, "output: cloud") // because of --quiet - assert.Equal(t, 12, strings.Count(stdout, "✓")) + assert.Equal(t, 25, strings.Count(stdout, "✓")) } func getSampleValues(t *testing.T, jsonOutput []byte, metric string, tags map[string]string) []float64 { @@ -1541,7 +1551,7 @@ func TestMinIterationDuration(t *testing.T) { stdout := ts.Stdout.String() t.Log(stdout) - assert.Contains(t, stdout, "✓ test_counter.........: 3") + assert.Contains(t, stdout, "test_counter\n ✓ 'count == 3") } func TestMetricNameError(t *testing.T) { @@ -1709,7 +1719,7 @@ func TestRunWithCloudOutputOverrides(t *testing.T) { t.Log(stdout) assert.Contains(t, stdout, "execution: local") assert.Contains(t, stdout, "output: cloud (https://bogus.url/runs/132), json (results.json)") - assert.Contains(t, stdout, "iterations...........: 1") + assert.Contains(t, stdout, "iterations.............................: 1") } func TestRunWithCloudOutputCustomConfigAndOverridesLegacyCloudOption(t *testing.T) { @@ -1891,7 +1901,7 @@ func TestUIRenderOutput(t *testing.T) { t.Parallel() ts := NewGlobalTestState(t) - ts.CmdArgs = []string{"k6", "run", "--with-summary=legacy"} + ts.CmdArgs = []string{"k6", "run"} for _, o := range tc.outputs { ts.CmdArgs = append(ts.CmdArgs, "-o") ts.CmdArgs = append(ts.CmdArgs, o) @@ -2094,14 +2104,14 @@ func TestEventSystemError(t *testing.T) { test.abort('oops!'); } `, expLog: []string{ - "got event Init with data ''", - "got event TestStart with data ''", - "got event IterStart with data '{Iteration:0 VUID:1 ScenarioName:default Error:}'", - "got event IterEnd with data '{Iteration:0 VUID:1 ScenarioName:default Error:test aborted: oops! at default (file:///-:11:16(5))}'", - "got event TestEnd with data ''", - "got event Exit with data '&{Error:test aborted: oops! at default (file:///-:11:16(5))}'", - "test aborted: oops! at default (file:///-:11:16(5))", - }, + "got event Init with data ''", + "got event TestStart with data ''", + "got event IterStart with data '{Iteration:0 VUID:1 ScenarioName:default Error:}'", + "got event IterEnd with data '{Iteration:0 VUID:1 ScenarioName:default Error:test aborted: oops! at default (file:///-:11:16(5))}'", + "got event TestEnd with data ''", + "got event Exit with data '&{Error:test aborted: oops! at default (file:///-:11:16(5))}'", + "test aborted: oops! at default (file:///-:11:16(5))", + }, expExitCode: exitcodes.ScriptAborted, }, { @@ -2126,17 +2136,17 @@ func TestEventSystemError(t *testing.T) { throw new Error('oops!'); } `, expLog: []string{ - "got event Init with data ''", - "got event TestStart with data ''", - "got event IterStart with data '{Iteration:0 VUID:1 ScenarioName:default Error:}'", - "got event IterEnd with data '{Iteration:0 VUID:1 ScenarioName:default Error:Error: oops!\n\tat default (file:///-:9:12(3))\n}'", - "Error: oops!\n\tat default (file:///-:9:12(3))\n", - "got event IterStart with data '{Iteration:1 VUID:1 ScenarioName:default Error:}'", - "got event IterEnd with data '{Iteration:1 VUID:1 ScenarioName:default Error:Error: oops!\n\tat default (file:///-:9:12(3))\n}'", - "Error: oops!\n\tat default (file:///-:9:12(3))\n", - "got event TestEnd with data ''", - "got event Exit with data '&{Error:}'", - }, + "got event Init with data ''", + "got event TestStart with data ''", + "got event IterStart with data '{Iteration:0 VUID:1 ScenarioName:default Error:}'", + "got event IterEnd with data '{Iteration:0 VUID:1 ScenarioName:default Error:Error: oops!\n\tat default (file:///-:9:12(3))\n}'", + "Error: oops!\n\tat default (file:///-:9:12(3))\n", + "got event IterStart with data '{Iteration:1 VUID:1 ScenarioName:default Error:}'", + "got event IterEnd with data '{Iteration:1 VUID:1 ScenarioName:default Error:Error: oops!\n\tat default (file:///-:9:12(3))\n}'", + "Error: oops!\n\tat default (file:///-:9:12(3))\n", + "got event TestEnd with data ''", + "got event Exit with data '&{Error:}'", + }, expExitCode: 0, }, } diff --git a/internal/cmd/ui.go b/internal/cmd/ui.go index 48723f3a574..df733a502d1 100644 --- a/internal/cmd/ui.go +++ b/internal/cmd/ui.go @@ -23,6 +23,7 @@ import ( "go.k6.io/k6/lib" "go.k6.io/k6/lib/consts" "go.k6.io/k6/output" + "go.k6.io/k6/output/summary" ) const ( @@ -116,7 +117,7 @@ func printExecutionDescription( for _, out := range outputs { desc := out.Description() switch desc { - case engine.IngesterDescription, lib.GroupSummaryDescription: + case engine.IngesterDescription, lib.GroupSummaryDescription, summary.OutputName: continue } if strings.HasPrefix(desc, dashboard.OutputName) { diff --git a/internal/js/modules/k6/webcrypto/cmd_run_test.go b/internal/js/modules/k6/webcrypto/cmd_run_test.go index 9cf8365788f..3ee0692fd2f 100644 --- a/internal/js/modules/k6/webcrypto/cmd_run_test.go +++ b/internal/js/modules/k6/webcrypto/cmd_run_test.go @@ -81,7 +81,7 @@ func TestExamplesInputOutput(t *testing.T) { script, err := os.ReadFile(filepath.Clean(file)) //nolint:forbidigo // we read an example directly require.NoError(t, err) - ts := getSingleFileTestState(t, string(script), []string{"-v", "--log-output=stdout", "--with-summary=legacy"}, 0) + ts := getSingleFileTestState(t, string(script), []string{"-v", "--log-output=stdout"}, 0) cmd.ExecuteWithGlobalState(ts.GlobalState) diff --git a/internal/js/runner.go b/internal/js/runner.go index 9ea08387407..4db794e786b 100644 --- a/internal/js/runner.go +++ b/internal/js/runner.go @@ -357,7 +357,7 @@ func (r *Runner) HandleSummary( out := make(chan metrics.SampleContainer, 100) defer close(out) - go func() { // discard all metrics + go func() { // discard all metrics for range out { //nolint:revive } }() diff --git a/output/summary/summary.go b/output/summary/summary.go index e82c90aab6c..7de50cb7f9b 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -43,9 +43,11 @@ func New(params output.Params) (*Output, error) { }, nil } +const OutputName = "summary" + // Description returns a human-readable description of the output. func (o *Output) Description() string { - return "summary" + return OutputName } // Start starts a new output.PeriodicFlusher to collect and flush metrics that will be @@ -109,12 +111,21 @@ func (o *Output) flushSample(sample metrics.Sample) { } // Summary returns a lib.Summary of the test run. -func (o *Output) Summary(executionState *lib.ExecutionState, options lib.Options) *lib.Summary { +func (o *Output) Summary( + executionState *lib.ExecutionState, + observedMetrics map[string]*metrics.Metric, + options lib.Options) *lib.Summary { + testRunDuration := executionState.GetCurrentTestRunDuration() + summary := lib.NewSummary() + summary.TestRunDuration = testRunDuration - testRunDuration := executionState.GetCurrentTestRunDuration() summaryTrendStats := options.SummaryTrendStats + // Process the observed metrics. This is necessary to ensure that we have collected + // all metrics, even those that have no samples, so that we can render them in the summary. + o.processObservedMetrics(observedMetrics) + // Populate the thresholds. summary.SummaryThresholds = summaryThresholds(o.dataModel.thresholds, testRunDuration, summaryTrendStats) @@ -166,3 +177,17 @@ func (o *Output) storeSample(sample metrics.Sample) { } } } + +// processObservedMetrics is responsible for ensuring that we have collected +// all metrics, even those that have no samples, so that we can render them in the summary. +func (o *Output) processObservedMetrics(observedMetrics map[string]*metrics.Metric) { + for _, m := range observedMetrics { + if _, exists := o.dataModel.aggregatedMetrics[m.Name]; !exists { + o.dataModel.aggregatedMetrics[m.Name] = aggregatedMetric{ + Metric: m, + Sink: m.Sink, + } + o.dataModel.storeThresholdsFor(m) + } + } +} From 75af9ab81a9a855e42a8a7f996d6d60810d0bb26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Fri, 7 Feb 2025 18:37:52 +0100 Subject: [PATCH 39/42] Refine flaky test with undetermined amount of iterations --- internal/cmd/runtime_options.go | 5 ++++- internal/cmd/tests/cmd_run_test.go | 2 +- output/summary/summary.go | 4 +++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/internal/cmd/runtime_options.go b/internal/cmd/runtime_options.go index 0cf9d5dfb16..5c9f88afaed 100644 --- a/internal/cmd/runtime_options.go +++ b/internal/cmd/runtime_options.go @@ -59,7 +59,10 @@ func saveBoolFromEnv(env map[string]string, varName string, placeholder *null.Bo return nil } -func getRuntimeOptions(flags *pflag.FlagSet, environment map[string]string) (lib.RuntimeOptions, error) { //nolint:funlen +func getRuntimeOptions( + flags *pflag.FlagSet, + environment map[string]string, +) (lib.RuntimeOptions, error) { //nolint:funlen // TODO: refactor with composable helpers as a part of #883, to reduce copy-paste // TODO: get these options out of the JSON config file as well? opts := lib.RuntimeOptions{ diff --git a/internal/cmd/tests/cmd_run_test.go b/internal/cmd/tests/cmd_run_test.go index 4fa84419aba..a173675ea6c 100644 --- a/internal/cmd/tests/cmd_run_test.go +++ b/internal/cmd/tests/cmd_run_test.go @@ -711,7 +711,7 @@ func TestAbortedByThreshold(t *testing.T) { assert.True(t, testutils.LogContains(ts.LoggerHook.Drain(), logrus.ErrorLevel, expErr)) stdOut := ts.Stdout.String() t.Log(stdOut) - assert.Contains(t, stdOut, "iterations\n ✗ 'count == 1' count=2") + assert.Contains(t, stdOut, "iterations\n ✗ 'count == 1'") assert.Contains(t, stdOut, `teardown() called`) assert.Contains(t, stdOut, `level=debug msg="Metrics emission of VUs and VUsMax metrics stopped"`) assert.Contains(t, stdOut, `level=debug msg="Metrics and traces processing finished!"`) diff --git a/output/summary/summary.go b/output/summary/summary.go index 7de50cb7f9b..17c10a8cf77 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -43,6 +43,7 @@ func New(params output.Params) (*Output, error) { }, nil } +// OutputName is the name of the output. const OutputName = "summary" // Description returns a human-readable description of the output. @@ -114,7 +115,8 @@ func (o *Output) flushSample(sample metrics.Sample) { func (o *Output) Summary( executionState *lib.ExecutionState, observedMetrics map[string]*metrics.Metric, - options lib.Options) *lib.Summary { + options lib.Options, +) *lib.Summary { testRunDuration := executionState.GetCurrentTestRunDuration() summary := lib.NewSummary() From 9c1ed7028419c25b3a66a501f2b46360dd8310fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Fri, 7 Feb 2025 22:14:05 +0100 Subject: [PATCH 40/42] Refactor runtime options init func --- internal/cmd/runtime_options.go | 121 ++++++++++++++++------------- internal/cmd/tests/cmd_run_test.go | 38 ++++----- 2 files changed, 86 insertions(+), 73 deletions(-) diff --git a/internal/cmd/runtime_options.go b/internal/cmd/runtime_options.go index 5c9f88afaed..dceda9f955f 100644 --- a/internal/cmd/runtime_options.go +++ b/internal/cmd/runtime_options.go @@ -43,28 +43,36 @@ extended: base + sets "global" as alias for "globalThis" return flags } -func saveBoolFromEnv(env map[string]string, varName string, placeholder *null.Bool) error { - strValue, ok := env[varName] - if !ok { - return nil +func getRuntimeOptions( + flags *pflag.FlagSet, + environment map[string]string, +) (lib.RuntimeOptions, error) { + // TODO: refactor with composable helpers as a part of #883, to reduce copy-paste + // TODO: get these options out of the JSON config file as well? + opts, err := populateRuntimeOptionsFromEnv(runtimeOptionsFromFlags(flags), environment) + if err != nil { + return opts, err } - val, err := strconv.ParseBool(strValue) + + // Set/overwrite environment variables with custom user-supplied values + envVars, err := flags.GetStringArray("env") if err != nil { - return fmt.Errorf("env var '%s' is not a valid boolean value: %w", varName, err) + return opts, err } - // Only override if not explicitly set via the CLI flag - if !placeholder.Valid { - *placeholder = null.BoolFrom(val) + + for _, kv := range envVars { + k, v := state.ParseEnvKeyValue(kv) + // Allow only alphanumeric ASCII variable names for now + if !userEnvVarName.MatchString(k) { + return opts, fmt.Errorf("invalid environment variable name '%s'", k) + } + opts.Env[k] = v } - return nil + + return opts, nil } -func getRuntimeOptions( - flags *pflag.FlagSet, - environment map[string]string, -) (lib.RuntimeOptions, error) { //nolint:funlen - // TODO: refactor with composable helpers as a part of #883, to reduce copy-paste - // TODO: get these options out of the JSON config file as well? +func runtimeOptionsFromFlags(flags *pflag.FlagSet) lib.RuntimeOptions { opts := lib.RuntimeOptions{ TestType: getNullString(flags, "type"), IncludeSystemEnvVars: getNullBool(flags, "include-system-env-vars"), @@ -76,73 +84,78 @@ func getRuntimeOptions( TracesOutput: getNullString(flags, "traces-output"), Env: make(map[string]string), } + return opts +} - if envVar, ok := environment["K6_TYPE"]; ok && !opts.TestType.Valid { - // Only override if not explicitly set via the CLI flag +func populateRuntimeOptionsFromEnv(opts lib.RuntimeOptions, environment map[string]string) (lib.RuntimeOptions, error) { + // Only override if not explicitly set via the CLI flag + + if envVar, ok := environment["K6_TYPE"]; !opts.TestType.Valid && ok { opts.TestType = null.StringFrom(envVar) } - if envVar, ok := environment["K6_COMPATIBILITY_MODE"]; ok && !opts.CompatibilityMode.Valid { - // Only override if not explicitly set via the CLI flag + + if envVar, ok := environment["K6_COMPATIBILITY_MODE"]; !opts.CompatibilityMode.Valid && ok { opts.CompatibilityMode = null.StringFrom(envVar) } - if _, err := lib.ValidateCompatibilityMode(opts.CompatibilityMode.String); err != nil { - // some early validation - return opts, err - } - if envVar, ok := environment["K6_WITH_SUMMARY"]; ok && !opts.SummaryMode.Valid { + if envVar, ok := environment["K6_WITH_SUMMARY"]; !opts.SummaryMode.Valid && ok { opts.SummaryMode = null.StringFrom(envVar) } - if _, err := lib.ValidateSummaryMode(opts.SummaryMode.String); err != nil { - // some early validation - return opts, err - } if err := saveBoolFromEnv(environment, "K6_INCLUDE_SYSTEM_ENV_VARS", &opts.IncludeSystemEnvVars); err != nil { return opts, err } + if err := saveBoolFromEnv(environment, "K6_NO_THRESHOLDS", &opts.NoThresholds); err != nil { return opts, err } + if err := saveBoolFromEnv(environment, "K6_NO_SUMMARY", &opts.NoSummary); err != nil { return opts, err } - if envVar, ok := environment["K6_SUMMARY_EXPORT"]; ok { - if !opts.SummaryExport.Valid { - opts.SummaryExport = null.StringFrom(envVar) - } + if _, err := lib.ValidateCompatibilityMode(opts.CompatibilityMode.String); err != nil { + // some early validation + return opts, err } - if envVar, ok := environment["SSLKEYLOGFILE"]; ok { - if !opts.KeyWriter.Valid { - opts.KeyWriter = null.StringFrom(envVar) - } + if _, err := lib.ValidateSummaryMode(opts.SummaryMode.String); err != nil { + // some early validation + return opts, err } - if envVar, ok := environment["K6_TRACES_OUTPUT"]; ok { - if !opts.TracesOutput.Valid { - opts.TracesOutput = null.StringFrom(envVar) - } + if envVar, ok := environment["K6_SUMMARY_EXPORT"]; !opts.SummaryExport.Valid && ok { + opts.SummaryExport = null.StringFrom(envVar) } - if opts.IncludeSystemEnvVars.Bool { // If enabled, gather the actual system environment variables - opts.Env = environment + if envVar, ok := environment["SSLKEYLOGFILE"]; !opts.KeyWriter.Valid && ok { + opts.KeyWriter = null.StringFrom(envVar) } - // Set/overwrite environment variables with custom user-supplied values - envVars, err := flags.GetStringArray("env") - if err != nil { - return opts, err + if envVar, ok := environment["K6_TRACES_OUTPUT"]; !opts.TracesOutput.Valid && ok { + opts.TracesOutput = null.StringFrom(envVar) } - for _, kv := range envVars { - k, v := state.ParseEnvKeyValue(kv) - // Allow only alphanumeric ASCII variable names for now - if !userEnvVarName.MatchString(k) { - return opts, fmt.Errorf("invalid environment variable name '%s'", k) - } - opts.Env[k] = v + + // If enabled, gather the actual system environment variables + if opts.IncludeSystemEnvVars.Bool { + opts.Env = environment } return opts, nil } + +func saveBoolFromEnv(env map[string]string, varName string, placeholder *null.Bool) error { + strValue, ok := env[varName] + if !ok { + return nil + } + val, err := strconv.ParseBool(strValue) + if err != nil { + return fmt.Errorf("env var '%s' is not a valid boolean value: %w", varName, err) + } + // Only override if not explicitly set via the CLI flag + if !placeholder.Valid { + *placeholder = null.BoolFrom(val) + } + return nil +} diff --git a/internal/cmd/tests/cmd_run_test.go b/internal/cmd/tests/cmd_run_test.go index a173675ea6c..a9dc41326ca 100644 --- a/internal/cmd/tests/cmd_run_test.go +++ b/internal/cmd/tests/cmd_run_test.go @@ -2104,14 +2104,14 @@ func TestEventSystemError(t *testing.T) { test.abort('oops!'); } `, expLog: []string{ - "got event Init with data ''", - "got event TestStart with data ''", - "got event IterStart with data '{Iteration:0 VUID:1 ScenarioName:default Error:}'", - "got event IterEnd with data '{Iteration:0 VUID:1 ScenarioName:default Error:test aborted: oops! at default (file:///-:11:16(5))}'", - "got event TestEnd with data ''", - "got event Exit with data '&{Error:test aborted: oops! at default (file:///-:11:16(5))}'", - "test aborted: oops! at default (file:///-:11:16(5))", - }, + "got event Init with data ''", + "got event TestStart with data ''", + "got event IterStart with data '{Iteration:0 VUID:1 ScenarioName:default Error:}'", + "got event IterEnd with data '{Iteration:0 VUID:1 ScenarioName:default Error:test aborted: oops! at default (file:///-:11:16(5))}'", + "got event TestEnd with data ''", + "got event Exit with data '&{Error:test aborted: oops! at default (file:///-:11:16(5))}'", + "test aborted: oops! at default (file:///-:11:16(5))", + }, expExitCode: exitcodes.ScriptAborted, }, { @@ -2136,17 +2136,17 @@ func TestEventSystemError(t *testing.T) { throw new Error('oops!'); } `, expLog: []string{ - "got event Init with data ''", - "got event TestStart with data ''", - "got event IterStart with data '{Iteration:0 VUID:1 ScenarioName:default Error:}'", - "got event IterEnd with data '{Iteration:0 VUID:1 ScenarioName:default Error:Error: oops!\n\tat default (file:///-:9:12(3))\n}'", - "Error: oops!\n\tat default (file:///-:9:12(3))\n", - "got event IterStart with data '{Iteration:1 VUID:1 ScenarioName:default Error:}'", - "got event IterEnd with data '{Iteration:1 VUID:1 ScenarioName:default Error:Error: oops!\n\tat default (file:///-:9:12(3))\n}'", - "Error: oops!\n\tat default (file:///-:9:12(3))\n", - "got event TestEnd with data ''", - "got event Exit with data '&{Error:}'", - }, + "got event Init with data ''", + "got event TestStart with data ''", + "got event IterStart with data '{Iteration:0 VUID:1 ScenarioName:default Error:}'", + "got event IterEnd with data '{Iteration:0 VUID:1 ScenarioName:default Error:Error: oops!\n\tat default (file:///-:9:12(3))\n}'", + "Error: oops!\n\tat default (file:///-:9:12(3))\n", + "got event IterStart with data '{Iteration:1 VUID:1 ScenarioName:default Error:}'", + "got event IterEnd with data '{Iteration:1 VUID:1 ScenarioName:default Error:Error: oops!\n\tat default (file:///-:9:12(3))\n}'", + "Error: oops!\n\tat default (file:///-:9:12(3))\n", + "got event TestEnd with data ''", + "got event Exit with data '&{Error:}'", + }, expExitCode: 0, }, } From 58e3d021ea7509bfa7aba35b0babefb7c54e04cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Tue, 18 Feb 2025 11:56:55 +0100 Subject: [PATCH 41/42] Apply suggestions from code review Co-authored-by: Oleg Bespalov --- internal/cmd/run.go | 7 +- internal/cmd/testdata/summary/browser.js | 2 +- internal/js/runner.go | 2 +- internal/js/summary.go | 9 +- .../lib/testutils/minirunner/minirunner.go | 8 +- lib/models.go | 13 +- lib/summary.go | 9 +- metrics/sink.go | 9 +- output/summary/data.go | 4 +- output/summary/summary.go | 8 +- output/summary/summary_test.go | 294 ++++++++++++++++++ 11 files changed, 331 insertions(+), 34 deletions(-) create mode 100644 output/summary/summary_test.go diff --git a/internal/cmd/run.go b/internal/cmd/run.go index 05476c21443..e34f7ef8af5 100644 --- a/internal/cmd/run.go +++ b/internal/cmd/run.go @@ -193,7 +193,10 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { if !testRunState.RuntimeOptions.NoSummary.Bool { //nolint:nestif sm, err := lib.ValidateSummaryMode(testRunState.RuntimeOptions.SummaryMode.String) if err != nil { - logger.WithError(err).Error("invalid summary mode, falling back to \"compact\" (default)") + logger.WithError(err).Warnf( + "invalid summary mode %q, falling back to \"compact\" (default)", + testRunState.RuntimeOptions.SummaryMode.String, + ) } switch sm { @@ -238,7 +241,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { logger.Debug("Generating the end-of-test summary...") summary := summaryOutput.Summary( - executionState, + executionState.GetCurrentTestRunDuration(), metricsEngine.ObservedMetrics, test.initRunner.GetOptions(), ) diff --git a/internal/cmd/testdata/summary/browser.js b/internal/cmd/testdata/summary/browser.js index 6a04b04724f..df42638b836 100644 --- a/internal/cmd/testdata/summary/browser.js +++ b/internal/cmd/testdata/summary/browser.js @@ -4,7 +4,7 @@ export async function browserTest() { const page = await browser.newPage() try { - await page.goto('https://test.k6.io/') + await page.goto('https://quickpizza.grafana.com') await page.screenshot({path: 'screenshots/screenshot.png'}) } finally { await page.close() diff --git a/internal/js/runner.go b/internal/js/runner.go index 4db794e786b..6038482bdea 100644 --- a/internal/js/runner.go +++ b/internal/js/runner.go @@ -362,7 +362,7 @@ func (r *Runner) HandleSummary( } }() - summaryCtx, cancel := context.WithTimeout(ctx, 10*time.Minute) + summaryCtx, cancel := context.WithTimeout(ctx, r.getTimeoutFor(consts.HandleSummaryFn)) defer cancel() vu, err := r.newVU(summaryCtx, 0, 0, out) diff --git a/internal/js/summary.go b/internal/js/summary.go index 1047eb83d7f..425e9cac8e2 100644 --- a/internal/js/summary.go +++ b/internal/js/summary.go @@ -38,7 +38,7 @@ func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Durat switch sink := sink.(type) { case *metrics.CounterSink: result = sink.Format(t) - result["rate"] = calculateCounterRate(sink.Value, t) + result["rate"] = sink.Rate(t) case *metrics.GaugeSink: result = sink.Format(t) result["min"] = sink.Min @@ -160,10 +160,3 @@ func getSummaryResult(rawResult sobek.Value) (map[string]io.Reader, error) { return results, nil } - -func calculateCounterRate(count float64, duration time.Duration) float64 { - if duration == 0 { - return 0 - } - return count / (float64(duration) / float64(time.Second)) -} diff --git a/internal/lib/testutils/minirunner/minirunner.go b/internal/lib/testutils/minirunner/minirunner.go index f8b8e985f36..0469e2cf09b 100644 --- a/internal/lib/testutils/minirunner/minirunner.go +++ b/internal/lib/testutils/minirunner/minirunner.go @@ -24,7 +24,7 @@ type MiniRunner struct { Fn func(ctx context.Context, state *lib.State, out chan<- metrics.SampleContainer) error SetupFn func(ctx context.Context, out chan<- metrics.SampleContainer) ([]byte, error) TeardownFn func(ctx context.Context, out chan<- metrics.SampleContainer) error - HandleSummaryFn func(context.Context, *lib.LegacySummary) (map[string]io.Reader, error) + HandleSummaryFn func(context.Context, *lib.LegacySummary, *lib.Summary) (map[string]io.Reader, error) SetupData []byte @@ -110,11 +110,11 @@ func (r *MiniRunner) SetOptions(opts lib.Options) error { // HandleSummary calls the specified summary callback, if supplied. func (r *MiniRunner) HandleSummary( ctx context.Context, - s *lib.LegacySummary, - _ *lib.Summary, + legacy *lib.LegacySummary, + summary *lib.Summary, ) (map[string]io.Reader, error) { if r.HandleSummaryFn != nil { - return r.HandleSummaryFn(ctx, s) + return r.HandleSummaryFn(ctx, legacy, summary) } return nil, nil //nolint:nilnil } diff --git a/lib/models.go b/lib/models.go index 41bc34e786a..fbf93273cf4 100644 --- a/lib/models.go +++ b/lib/models.go @@ -24,8 +24,13 @@ const GroupSeparator = "::" // Changing this will be a breaking change and in this way it will be more obvious. const RootGroupPath = "" -// ErrNameContainsGroupSeparator is emitted if you attempt to instantiate a Group or Check that contains the separator. -var ErrNameContainsGroupSeparator = errors.New("group and check names may not contain '" + GroupSeparator + "'") +var ( + // ErrNameContainsGroupSeparator is emitted if you attempt to instantiate a Group or Check that contains the separator. + ErrNameContainsGroupSeparator = errors.New("group and check names may not contain '" + GroupSeparator + "'") + + // ErrCheckGroupIsNil is emitted if you attempt to instantiate a Check (see NewCheck) with a nil Group. + ErrCheckGroupIsNil = errors.New("check's group must not be nil") +) // StageFields defines the fields used for a Stage; this is a dumb hack to make the JSON code // cleaner. pls fix. @@ -209,6 +214,10 @@ type Check struct { // NewCheck creates a new check with the given name and parent group. The group must not be nil. func NewCheck(name string, group *Group) (*Check, error) { + if group == nil { + return nil, ErrCheckGroupIsNil + } + if strings.Contains(name, GroupSeparator) { return nil, ErrNameContainsGroupSeparator } diff --git a/lib/summary.go b/lib/summary.go index c0996637894..3a275400f5c 100644 --- a/lib/summary.go +++ b/lib/summary.go @@ -261,7 +261,7 @@ func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Durat switch sink := sink.(type) { case *metrics.CounterSink: result = sink.Format(t) - result["rate"] = calculateCounterRate(sink.Value, t) + result["rate"] = sink.Rate(t) case *metrics.GaugeSink: result = sink.Format(t) result["min"] = sink.Min @@ -281,13 +281,6 @@ func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Durat } } -func calculateCounterRate(count float64, duration time.Duration) float64 { - if duration == 0 { - return 0 - } - return count / (float64(duration) / float64(time.Second)) -} - // LegacySummary contains all the data the summary handler gets. type LegacySummary struct { Metrics map[string]*metrics.Metric diff --git a/metrics/sink.go b/metrics/sink.go index 46249a4a566..e31656622d4 100644 --- a/metrics/sink.go +++ b/metrics/sink.go @@ -64,10 +64,17 @@ func (c *CounterSink) IsEmpty() bool { return c.First.IsZero() } func (c *CounterSink) Format(t time.Duration) map[string]float64 { return map[string]float64{ "count": c.Value, - "rate": c.Value / (float64(t) / float64(time.Second)), + "rate": c.Rate(t), } } +func (c *CounterSink) Rate(t time.Duration) float64 { + if t == 0 { + return 0 + } + return c.Value / (float64(t) / float64(time.Second)) +} + // GaugeSink is a sink represents a Gauge type GaugeSink struct { Value float64 diff --git a/output/summary/data.go b/output/summary/data.go index d17efcf0303..98c6a4e7385 100644 --- a/output/summary/data.go +++ b/output/summary/data.go @@ -271,7 +271,7 @@ func populateSummaryChecks( successChecks := float64(checksMetric.Sink.(*metrics.RateSink).Trues) //nolint:forcetypeassert summaryGroup.Checks.Metrics.Total.Values["count"] = totalChecks - summaryGroup.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, testRunDuration) + summaryGroup.Checks.Metrics.Total.Values["rate"] = calculateRate(totalChecks, testRunDuration) summaryGroup.Checks.Metrics.Success = lib.NewSummaryMetricFrom( lib.SummaryMetricInfo{ @@ -347,7 +347,7 @@ func oneOfMetrics(metricName string, values ...string) bool { return false } -func calculateCounterRate(count float64, duration time.Duration) float64 { +func calculateRate(count float64, duration time.Duration) float64 { if duration == 0 { return 0 } diff --git a/output/summary/summary.go b/output/summary/summary.go index 17c10a8cf77..0a787870e0f 100644 --- a/output/summary/summary.go +++ b/output/summary/summary.go @@ -16,7 +16,7 @@ const flushPeriod = 200 * time.Millisecond // TODO: make this configurable var _ output.Output = &Output{} -// Output ... +// Output implements the lib.Output interface for collecting metrics' data to be displayed in the end-of-test summary. type Output struct { output.SampleBuffer @@ -27,7 +27,7 @@ type Output struct { summaryMode lib.SummaryMode } -// New returns a new JSON output. +// New returns a new summary output. func New(params output.Params) (*Output, error) { sm, err := lib.ValidateSummaryMode(params.RuntimeOptions.SummaryMode.String) if err != nil { @@ -113,12 +113,10 @@ func (o *Output) flushSample(sample metrics.Sample) { // Summary returns a lib.Summary of the test run. func (o *Output) Summary( - executionState *lib.ExecutionState, + testRunDuration time.Duration, observedMetrics map[string]*metrics.Metric, options lib.Options, ) *lib.Summary { - testRunDuration := executionState.GetCurrentTestRunDuration() - summary := lib.NewSummary() summary.TestRunDuration = testRunDuration diff --git a/output/summary/summary_test.go b/output/summary/summary_test.go new file mode 100644 index 00000000000..e993f20a4c9 --- /dev/null +++ b/output/summary/summary_test.go @@ -0,0 +1,294 @@ +package summary + +import ( + "gopkg.in/guregu/null.v3" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.k6.io/k6/internal/lib/testutils" + "go.k6.io/k6/lib" + "go.k6.io/k6/metrics" + "go.k6.io/k6/output" +) + +func TestOutput_Summary(t *testing.T) { + o, err := New(output.Params{ + Logger: testutils.NewLogger(t), + }) + require.NoError(t, err) + + // Metrics + checksMetric := &metrics.Metric{ + Name: "checks", + Type: metrics.Rate, + Contains: metrics.Default, + Sink: &metrics.RateSink{ + Trues: 3, + Total: 5, + }, + Observed: true, + } + + httpReqsMetric := &metrics.Metric{ + Name: "http_reqs", + Type: metrics.Counter, + Contains: metrics.Default, + Sink: &metrics.CounterSink{ + Value: 4, + First: time.Now(), + }, + Observed: true, + } + + authHttpReqsMetric := &metrics.Metric{ + Name: "http_reqs{group: ::auth}", + Type: metrics.Counter, + Contains: metrics.Default, + Sink: &metrics.CounterSink{ + Value: 1, + First: time.Now(), + }, + Observed: true, + } + + // Thresholds + thresholds := thresholds{ + {Threshold: &metrics.Threshold{ + Source: "count<10", + }, Metric: httpReqsMetric}, + {Threshold: &metrics.Threshold{ + Source: "rate>2", + LastFailed: true, + }, Metric: httpReqsMetric}, + {Threshold: &metrics.Threshold{ + Source: "count>1", + LastFailed: true, + }, Metric: authHttpReqsMetric}, + } + + // Checks + rootGroup, err := lib.NewGroup(lib.RootGroupPath, nil) + require.NoError(t, err) + + quickPizzaIsUp := &lib.Check{ + Name: "quickpizza.grafana.com is up", + Group: rootGroup, + Passes: 3, + Fails: 2, + } + + checks := &aggregatedChecksData{ + checks: map[string]*lib.Check{quickPizzaIsUp.Name: quickPizzaIsUp}, + orderedChecks: []*lib.Check{quickPizzaIsUp}, + } + + // Set up + o.dataModel = dataModel{ + thresholds: thresholds, + aggregatedGroupData: aggregatedGroupData{ + checks: checks, + aggregatedMetrics: map[string]aggregatedMetric{ + checksMetric.Name: { + Metric: checksMetric, + Sink: checksMetric.Sink, + }, + httpReqsMetric.Name: { + Metric: httpReqsMetric, + Sink: httpReqsMetric.Sink, + }, + authHttpReqsMetric.Name: { + Metric: authHttpReqsMetric, + Sink: authHttpReqsMetric.Sink, + }, + }, + groupsData: make(map[string]aggregatedGroupData), + }, + } + + testRunDuration := time.Second + observedMetrics := map[string]*metrics.Metric{ + httpReqsMetric.Name: httpReqsMetric, + authHttpReqsMetric.Name: authHttpReqsMetric, + } + options := lib.Options{ + SummaryTrendStats: []string{"avg", "min", "max"}, + } + + summary := o.Summary(testRunDuration, observedMetrics, options) + + // Assert thresholds + assert.Len(t, summary.SummaryThresholds, 2) + + httpReqsThresholds := summary.SummaryThresholds[httpReqsMetric.Name].Thresholds + assert.Len(t, httpReqsThresholds, 2) + assert.Equal(t, "count<10", httpReqsThresholds[0].Source) + assert.True(t, httpReqsThresholds[0].Ok) + assert.Equal(t, "rate>2", httpReqsThresholds[1].Source) + assert.False(t, httpReqsThresholds[1].Ok) + + httpReqsGroupThresholds := summary.SummaryThresholds[authHttpReqsMetric.Name].Thresholds + assert.Len(t, httpReqsGroupThresholds, 1) + assert.Equal(t, "count>1", httpReqsGroupThresholds[0].Source) + assert.False(t, httpReqsGroupThresholds[0].Ok) + + // Assert checks + checksTotal := summary.Checks.Metrics.Total + assert.Equal(t, "checks_total", checksTotal.Name) + assert.Equal(t, map[string]float64{ + "count": 5, + "rate": 5, + }, checksTotal.Values) + + checksSucceeded := summary.Checks.Metrics.Success + assert.Equal(t, "checks_succeeded", checksSucceeded.Name) + assert.Equal(t, map[string]float64{ + "rate": 0.6, + "passes": 3, + "fails": 2, + }, checksSucceeded.Values) + + checksFailed := summary.Checks.Metrics.Fail + assert.Equal(t, "checks_failed", checksFailed.Name) + assert.Equal(t, map[string]float64{ + "rate": 0.4, + "passes": 2, + "fails": 3, + }, checksFailed.Values) + + assert.Len(t, summary.Checks.OrderedChecks, 1) + assert.Equal(t, quickPizzaIsUp, summary.Checks.OrderedChecks[0]) + + // Assert metrics + assert.Len(t, summary.Metrics.HTTP, 2) + + httpReqsSummaryMetric := summary.Metrics.HTTP[httpReqsMetric.Name] + assert.Equal(t, "http_reqs", httpReqsSummaryMetric.Name) + assert.Equal(t, "counter", httpReqsSummaryMetric.Type) + assert.Equal(t, "default", httpReqsSummaryMetric.Contains) + assert.Equal(t, map[string]float64{ + "count": 4, + "rate": 4, + }, httpReqsSummaryMetric.Values) + + authHttpReqsSummaryMetric := summary.Metrics.HTTP[authHttpReqsMetric.Name] + assert.Equal(t, "http_reqs{group: ::auth}", authHttpReqsSummaryMetric.Name) + assert.Equal(t, "counter", authHttpReqsSummaryMetric.Type) + assert.Equal(t, "default", authHttpReqsSummaryMetric.Contains) + assert.Equal(t, map[string]float64{ + "count": 1, + "rate": 1, + }, authHttpReqsSummaryMetric.Values) + + // Other asserts + assert.Equal(t, testRunDuration, summary.TestRunDuration) +} + +func TestOutput_AddMetricSamples(t *testing.T) { + reg := metrics.NewRegistry() + + httpReqsMetric := &metrics.Metric{ + Name: "http_reqs", + Type: metrics.Counter, + Contains: metrics.Default, + Sink: &metrics.CounterSink{ + Value: 4, + First: time.Now(), + }, + Observed: true, + } + + authHttpReqsMetric := &metrics.Metric{ + Name: "http_reqs{group: ::auth}", + Type: metrics.Counter, + Contains: metrics.Default, + Sink: &metrics.CounterSink{ + Value: 1, + First: time.Now(), + }, + Observed: true, + } + + samples := []metrics.SampleContainer{ + metrics.Samples{ + {TimeSeries: metrics.TimeSeries{ + Metric: httpReqsMetric, + Tags: reg.RootTagSet().With("group", lib.RootGroupPath), + }, + Time: time.Now(), + Value: 1}, + {TimeSeries: metrics.TimeSeries{ + Metric: authHttpReqsMetric, + Tags: reg.RootTagSet().With("group", "::auth"), + }, + Time: time.Now(), + Value: 1}, + }, + metrics.Samples{ + {TimeSeries: metrics.TimeSeries{ + Metric: httpReqsMetric, + Tags: reg.RootTagSet().With("group", lib.RootGroupPath), + }, + Time: time.Now(), + Value: 3}, + }, + } + + t.Run("compact", func(t *testing.T) { + o, err := New(output.Params{ + RuntimeOptions: lib.RuntimeOptions{ + SummaryMode: null.StringFrom("compact"), + }, + Logger: testutils.NewLogger(t), + }) + require.NoError(t, err) + + require.NoError(t, o.Start()) + + o.AddMetricSamples(samples) + + require.NoError(t, o.Stop()) + + assert.Len(t, o.dataModel.aggregatedMetrics, 2) + + httpReqsSummaryMetric := o.dataModel.aggregatedMetrics[httpReqsMetric.Name] + assert.Equal(t, float64(4), httpReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) + + authHttpReqsSummaryMetric := o.dataModel.aggregatedMetrics[authHttpReqsMetric.Name] + assert.Equal(t, float64(1), authHttpReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) + + assert.Len(t, o.dataModel.groupsData, 0) + }) + + t.Run("full", func(t *testing.T) { + o, err := New(output.Params{ + RuntimeOptions: lib.RuntimeOptions{ + SummaryMode: null.StringFrom("full"), + }, + Logger: testutils.NewLogger(t), + }) + require.NoError(t, err) + + require.NoError(t, o.Start()) + + o.AddMetricSamples(samples) + + require.NoError(t, o.Stop()) + + assert.Len(t, o.dataModel.aggregatedMetrics, 2) + + httpReqsSummaryMetric := o.dataModel.aggregatedMetrics[httpReqsMetric.Name] + assert.Equal(t, float64(4), httpReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) + + authHttpReqsSummaryMetric := o.dataModel.aggregatedMetrics[authHttpReqsMetric.Name] + assert.Equal(t, float64(1), authHttpReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) + + assert.Len(t, o.dataModel.groupsData, 1) + assert.Len(t, o.dataModel.groupsData["auth"].aggregatedMetrics, 1) + + authHttpReqsSummaryMetric = o.dataModel.groupsData["auth"].aggregatedMetrics[authHttpReqsMetric.Name] + assert.Equal(t, float64(1), authHttpReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) + }) +} From b8ed6e094b4c15ed720bdd13fe2893824aad4ed7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20L=C3=B3pez=20de=20la=20Franca=20Beltran?= Date: Tue, 18 Feb 2025 16:44:23 +0100 Subject: [PATCH 42/42] Fix linter complaints --- metrics/sink.go | 2 + output/summary/summary_test.go | 82 ++++++++++++++++++++-------------- 2 files changed, 50 insertions(+), 34 deletions(-) diff --git a/metrics/sink.go b/metrics/sink.go index e31656622d4..e399310403f 100644 --- a/metrics/sink.go +++ b/metrics/sink.go @@ -68,6 +68,8 @@ func (c *CounterSink) Format(t time.Duration) map[string]float64 { } } +// Rate calculates the rate (per second) of the counter, +// based on the given duration. func (c *CounterSink) Rate(t time.Duration) float64 { if t == 0 { return 0 diff --git a/output/summary/summary_test.go b/output/summary/summary_test.go index e993f20a4c9..e2ec0251abc 100644 --- a/output/summary/summary_test.go +++ b/output/summary/summary_test.go @@ -1,12 +1,12 @@ package summary import ( - "gopkg.in/guregu/null.v3" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" "go.k6.io/k6/internal/lib/testutils" "go.k6.io/k6/lib" @@ -15,6 +15,8 @@ import ( ) func TestOutput_Summary(t *testing.T) { + t.Parallel() + o, err := New(output.Params{ Logger: testutils.NewLogger(t), }) @@ -43,7 +45,7 @@ func TestOutput_Summary(t *testing.T) { Observed: true, } - authHttpReqsMetric := &metrics.Metric{ + authHTTPReqsMetric := &metrics.Metric{ Name: "http_reqs{group: ::auth}", Type: metrics.Counter, Contains: metrics.Default, @@ -66,7 +68,7 @@ func TestOutput_Summary(t *testing.T) { {Threshold: &metrics.Threshold{ Source: "count>1", LastFailed: true, - }, Metric: authHttpReqsMetric}, + }, Metric: authHTTPReqsMetric}, } // Checks @@ -99,9 +101,9 @@ func TestOutput_Summary(t *testing.T) { Metric: httpReqsMetric, Sink: httpReqsMetric.Sink, }, - authHttpReqsMetric.Name: { - Metric: authHttpReqsMetric, - Sink: authHttpReqsMetric.Sink, + authHTTPReqsMetric.Name: { + Metric: authHTTPReqsMetric, + Sink: authHTTPReqsMetric.Sink, }, }, groupsData: make(map[string]aggregatedGroupData), @@ -111,7 +113,7 @@ func TestOutput_Summary(t *testing.T) { testRunDuration := time.Second observedMetrics := map[string]*metrics.Metric{ httpReqsMetric.Name: httpReqsMetric, - authHttpReqsMetric.Name: authHttpReqsMetric, + authHTTPReqsMetric.Name: authHTTPReqsMetric, } options := lib.Options{ SummaryTrendStats: []string{"avg", "min", "max"}, @@ -129,7 +131,7 @@ func TestOutput_Summary(t *testing.T) { assert.Equal(t, "rate>2", httpReqsThresholds[1].Source) assert.False(t, httpReqsThresholds[1].Ok) - httpReqsGroupThresholds := summary.SummaryThresholds[authHttpReqsMetric.Name].Thresholds + httpReqsGroupThresholds := summary.SummaryThresholds[authHTTPReqsMetric.Name].Thresholds assert.Len(t, httpReqsGroupThresholds, 1) assert.Equal(t, "count>1", httpReqsGroupThresholds[0].Source) assert.False(t, httpReqsGroupThresholds[0].Ok) @@ -173,20 +175,22 @@ func TestOutput_Summary(t *testing.T) { "rate": 4, }, httpReqsSummaryMetric.Values) - authHttpReqsSummaryMetric := summary.Metrics.HTTP[authHttpReqsMetric.Name] - assert.Equal(t, "http_reqs{group: ::auth}", authHttpReqsSummaryMetric.Name) - assert.Equal(t, "counter", authHttpReqsSummaryMetric.Type) - assert.Equal(t, "default", authHttpReqsSummaryMetric.Contains) + authHTTPReqsSummaryMetric := summary.Metrics.HTTP[authHTTPReqsMetric.Name] + assert.Equal(t, "http_reqs{group: ::auth}", authHTTPReqsSummaryMetric.Name) + assert.Equal(t, "counter", authHTTPReqsSummaryMetric.Type) + assert.Equal(t, "default", authHTTPReqsSummaryMetric.Contains) assert.Equal(t, map[string]float64{ "count": 1, "rate": 1, - }, authHttpReqsSummaryMetric.Values) + }, authHTTPReqsSummaryMetric.Values) // Other asserts assert.Equal(t, testRunDuration, summary.TestRunDuration) } func TestOutput_AddMetricSamples(t *testing.T) { + t.Parallel() + reg := metrics.NewRegistry() httpReqsMetric := &metrics.Metric{ @@ -200,7 +204,7 @@ func TestOutput_AddMetricSamples(t *testing.T) { Observed: true, } - authHttpReqsMetric := &metrics.Metric{ + authHTTPReqsMetric := &metrics.Metric{ Name: "http_reqs{group: ::auth}", Type: metrics.Counter, Contains: metrics.Default, @@ -213,30 +217,38 @@ func TestOutput_AddMetricSamples(t *testing.T) { samples := []metrics.SampleContainer{ metrics.Samples{ - {TimeSeries: metrics.TimeSeries{ - Metric: httpReqsMetric, - Tags: reg.RootTagSet().With("group", lib.RootGroupPath), - }, + { + TimeSeries: metrics.TimeSeries{ + Metric: httpReqsMetric, + Tags: reg.RootTagSet().With("group", lib.RootGroupPath), + }, Time: time.Now(), - Value: 1}, - {TimeSeries: metrics.TimeSeries{ - Metric: authHttpReqsMetric, - Tags: reg.RootTagSet().With("group", "::auth"), + Value: 1, }, + { + TimeSeries: metrics.TimeSeries{ + Metric: authHTTPReqsMetric, + Tags: reg.RootTagSet().With("group", "::auth"), + }, Time: time.Now(), - Value: 1}, + Value: 1, + }, }, metrics.Samples{ - {TimeSeries: metrics.TimeSeries{ - Metric: httpReqsMetric, - Tags: reg.RootTagSet().With("group", lib.RootGroupPath), - }, + { + TimeSeries: metrics.TimeSeries{ + Metric: httpReqsMetric, + Tags: reg.RootTagSet().With("group", lib.RootGroupPath), + }, Time: time.Now(), - Value: 3}, + Value: 3, + }, }, } t.Run("compact", func(t *testing.T) { + t.Parallel() + o, err := New(output.Params{ RuntimeOptions: lib.RuntimeOptions{ SummaryMode: null.StringFrom("compact"), @@ -256,13 +268,15 @@ func TestOutput_AddMetricSamples(t *testing.T) { httpReqsSummaryMetric := o.dataModel.aggregatedMetrics[httpReqsMetric.Name] assert.Equal(t, float64(4), httpReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) - authHttpReqsSummaryMetric := o.dataModel.aggregatedMetrics[authHttpReqsMetric.Name] - assert.Equal(t, float64(1), authHttpReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) + authHTTPReqsSummaryMetric := o.dataModel.aggregatedMetrics[authHTTPReqsMetric.Name] + assert.Equal(t, float64(1), authHTTPReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) assert.Len(t, o.dataModel.groupsData, 0) }) t.Run("full", func(t *testing.T) { + t.Parallel() + o, err := New(output.Params{ RuntimeOptions: lib.RuntimeOptions{ SummaryMode: null.StringFrom("full"), @@ -282,13 +296,13 @@ func TestOutput_AddMetricSamples(t *testing.T) { httpReqsSummaryMetric := o.dataModel.aggregatedMetrics[httpReqsMetric.Name] assert.Equal(t, float64(4), httpReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) - authHttpReqsSummaryMetric := o.dataModel.aggregatedMetrics[authHttpReqsMetric.Name] - assert.Equal(t, float64(1), authHttpReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) + authHTTPReqsSummaryMetric := o.dataModel.aggregatedMetrics[authHTTPReqsMetric.Name] + assert.Equal(t, float64(1), authHTTPReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) assert.Len(t, o.dataModel.groupsData, 1) assert.Len(t, o.dataModel.groupsData["auth"].aggregatedMetrics, 1) - authHttpReqsSummaryMetric = o.dataModel.groupsData["auth"].aggregatedMetrics[authHttpReqsMetric.Name] - assert.Equal(t, float64(1), authHttpReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) + authHTTPReqsSummaryMetric = o.dataModel.groupsData["auth"].aggregatedMetrics[authHTTPReqsMetric.Name] + assert.Equal(t, float64(1), authHTTPReqsSummaryMetric.Metric.Sink.(*metrics.CounterSink).Value) }) }