From 044e2fc12044f25ce6ae229085656f2e430f84fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=ABl=20Stemmer?= Date: Thu, 31 Mar 2022 23:05:33 +0100 Subject: [PATCH] parser/gotest: Add support for skipped and failed benchmarks --- pkg/parser/gotest/gotest.go | 45 ++++++++++++++++++------ pkg/parser/gotest/gotest_test.go | 27 ++++++++++++--- pkg/parser/gotest/report_builder.go | 54 +++++++++++++++++++++++++++-- 3 files changed, 110 insertions(+), 16 deletions(-) diff --git a/pkg/parser/gotest/gotest.go b/pkg/parser/gotest/gotest.go index 1111b01..dd95f1e 100644 --- a/pkg/parser/gotest/gotest.go +++ b/pkg/parser/gotest/gotest.go @@ -14,12 +14,14 @@ import ( ) var ( - // regexBenchmark captures 3-5 groups: benchmark name, number of times ran, ns/op (with or without decimal), MB/sec (optional), B/op (optional), and allocs/op (optional). - regexBenchmark = regexp.MustCompile(`^(Benchmark[^ -]+)(?:-\d+\s+|\s+)(\d+)\s+(\d+|\d+\.\d+)\sns\/op(?:\s+(\d+|\d+\.\d+)\sMB\/s)?(?:\s+(\d+)\sB\/op)?(?:\s+(\d+)\sallocs/op)?`) - regexCoverage = regexp.MustCompile(`^coverage:\s+(\d+|\d+\.\d+)%\s+of\s+statements(?:\sin\s(.+))?$`) - regexEndTest = regexp.MustCompile(`((?: )*)--- (PASS|FAIL|SKIP): ([^ ]+) \((\d+\.\d+)(?: seconds|s)\)`) - regexStatus = regexp.MustCompile(`^(PASS|FAIL|SKIP)$`) - regexSummary = regexp.MustCompile(`` + + // regexBenchInfo captures 3-5 groups: benchmark name, number of times ran, ns/op (with or without decimal), MB/sec (optional), B/op (optional), and allocs/op (optional). + regexBenchmark = regexp.MustCompile(`^(Benchmark[^ -]+)$`) + regexBenchSummary = regexp.MustCompile(`^(Benchmark[^ -]+)(?:-\d+\s+|\s+)(\d+)\s+(\d+|\d+\.\d+)\sns\/op(?:\s+(\d+|\d+\.\d+)\sMB\/s)?(?:\s+(\d+)\sB\/op)?(?:\s+(\d+)\sallocs/op)?`) + regexCoverage = regexp.MustCompile(`^coverage:\s+(\d+|\d+\.\d+)%\s+of\s+statements(?:\sin\s(.+))?$`) + regexEndBenchmark = regexp.MustCompile(`^--- (BENCH|FAIL|SKIP): (Benchmark[^ -]+)(?:-\d+)?$`) + regexEndTest = regexp.MustCompile(`((?: )*)--- (PASS|FAIL|SKIP): ([^ ]+) \((\d+\.\d+)(?: seconds|s)\)`) + regexStatus = regexp.MustCompile(`^(PASS|FAIL|SKIP)$`) + regexSummary = regexp.MustCompile(`` + // 1: result `^(\?|ok|FAIL)` + // 2: package name @@ -100,8 +102,12 @@ func (p *Parser) report(events []Event) gtr.Report { rb.ContinueTest(ev.Name) case "end_test": rb.EndTest(ev.Name, ev.Result, ev.Duration, ev.Indent) + case "run_benchmark": + rb.CreateBenchmark(ev.Name) case "benchmark": - rb.Benchmark(ev.Name, ev.Iterations, ev.NsPerOp, ev.MBPerSec, ev.BytesPerOp, ev.AllocsPerOp) + rb.BenchmarkResult(ev.Name, ev.Iterations, ev.NsPerOp, ev.MBPerSec, ev.BytesPerOp, ev.AllocsPerOp) + case "end_benchmark": + rb.EndBenchmark(ev.Name, ev.Result) case "status": rb.End() case "summary": @@ -141,8 +147,12 @@ func (p *Parser) parseLine(line string) { p.summary(matches[1], matches[2], matches[3], matches[4], matches[5], matches[6], matches[7]) } else if matches := regexCoverage.FindStringSubmatch(line); len(matches) == 3 { p.coverage(matches[1], matches[2]) - } else if matches := regexBenchmark.FindStringSubmatch(line); len(matches) == 7 { - p.benchmark(matches[1], matches[2], matches[3], matches[4], matches[5], matches[6]) + } else if matches := regexBenchmark.FindStringSubmatch(line); len(matches) == 2 { + p.runBench(matches[1]) + } else if matches := regexBenchSummary.FindStringSubmatch(line); len(matches) == 7 { + p.benchSummary(matches[1], matches[2], matches[3], matches[4], matches[5], matches[6]) + } else if matches := regexEndBenchmark.FindStringSubmatch(line); len(matches) == 3 { + p.endBench(matches[1], matches[2]) } else if strings.HasPrefix(line, "# ") { // TODO(jstemmer): this should just be output; we should detect build output when building report fields := strings.Fields(strings.TrimPrefix(line, "# ")) @@ -210,7 +220,14 @@ func (p *Parser) coverage(percent, packages string) { }) } -func (p *Parser) benchmark(name, iterations, nsPerOp, mbPerSec, bytesPerOp, allocsPerOp string) { +func (p *Parser) runBench(name string) { + p.add(Event{ + Type: "run_benchmark", + Name: name, + }) +} + +func (p *Parser) benchSummary(name, iterations, nsPerOp, mbPerSec, bytesPerOp, allocsPerOp string) { p.add(Event{ Type: "benchmark", Name: name, @@ -222,6 +239,14 @@ func (p *Parser) benchmark(name, iterations, nsPerOp, mbPerSec, bytesPerOp, allo }) } +func (p *Parser) endBench(result, name string) { + p.add(Event{ + Type: "end_benchmark", + Name: name, + Result: result, + }) +} + func (p *Parser) buildOutput(packageName string) { p.add(Event{ Type: "build_output", diff --git a/pkg/parser/gotest/gotest_test.go b/pkg/parser/gotest/gotest_test.go index 6b5badf..e1ee75f 100644 --- a/pkg/parser/gotest/gotest_test.go +++ b/pkg/parser/gotest/gotest_test.go @@ -119,6 +119,10 @@ var parseLineTests = []parseLineTest{ "coverage: 99.8% of statements in fmt, encoding/xml", Event{Type: "coverage", CovPct: 99.8, CovPackages: []string{"fmt", "encoding/xml"}}, }, + { + "BenchmarkOK", + Event{Type: "run_benchmark", Name: "BenchmarkOK"}, + }, { "BenchmarkOne-8 2000000 604 ns/op", Event{Type: "benchmark", Name: "BenchmarkOne", Iterations: 2_000_000, NsPerOp: 604}, @@ -135,6 +139,18 @@ var parseLineTests = []parseLineTest{ "BenchmarkFour-8 10000 104427 ns/op 95.76 MB/s 40629 B/op 5 allocs/op", Event{Type: "benchmark", Name: "BenchmarkFour", Iterations: 10_000, NsPerOp: 104_427, MBPerSec: 95.76, BytesPerOp: 40_629, AllocsPerOp: 5}, }, + { + "--- BENCH: BenchmarkOK-8", + Event{Type: "end_benchmark", Name: "BenchmarkOK", Result: "BENCH"}, + }, + { + "--- FAIL: BenchmarkError", + Event{Type: "end_benchmark", Name: "BenchmarkError", Result: "FAIL"}, + }, + { + "--- SKIP: BenchmarkSkip", + Event{Type: "end_benchmark", Name: "BenchmarkSkip", Result: "SKIP"}, + }, { "# package/name/failing1", Event{Type: "build_output", Name: "package/name/failing1"}, @@ -213,8 +229,12 @@ func TestReport(t *testing.T) { {Type: "status", Result: "FAIL"}, {Type: "summary", Result: "FAIL", Name: "package/name2", Duration: 1 * time.Millisecond}, {Type: "output", Data: "goarch: amd64"}, + {Type: "run_benchmark", Name: "BenchmarkOne"}, {Type: "benchmark", Name: "BenchmarkOne", NsPerOp: 100}, - {Type: "benchmark", Name: "BenchmarkOne", NsPerOp: 300}, + {Type: "end_benchmark", Name: "BenchmarkOne", Result: "BENCH"}, + {Type: "run_benchmark", Name: "BenchmarkTwo"}, + {Type: "benchmark", Name: "BenchmarkTwo"}, + {Type: "end_benchmark", Name: "BenchmarkTwo", Result: "FAIL"}, {Type: "status", Result: "PASS"}, {Type: "summary", Result: "ok", Name: "package/name3", Duration: 1234 * time.Millisecond}, {Type: "build_output", Name: "package/failing1"}, @@ -269,9 +289,8 @@ func TestReport(t *testing.T) { NsPerOp: 100, }, { - Name: "BenchmarkOne", - Result: gtr.Pass, - NsPerOp: 300, + Name: "BenchmarkTwo", + Result: gtr.Fail, }, }, Output: []string{"goarch: amd64"}, diff --git a/pkg/parser/gotest/report_builder.go b/pkg/parser/gotest/report_builder.go index ffc10f6..4ca6c42 100644 --- a/pkg/parser/gotest/report_builder.go +++ b/pkg/parser/gotest/report_builder.go @@ -113,9 +113,27 @@ func (b *reportBuilder) End() { b.lastId = 0 } -// Benchmark adds a new Benchmark to the report and marks it as active. -func (b *reportBuilder) Benchmark(name string, iterations int64, nsPerOp, mbPerSec float64, bytesPerOp, allocsPerOp int64) { +// CreateBenchmark adds a benchmark with the given name to the report, and +// marks it as active. If more than one benchmark exists with this name, the +// most recently created benchmark will be updated. If no benchmark exists with +// this name, a new benchmark is created. +func (b *reportBuilder) CreateBenchmark(name string) { b.benchmarks[b.newId()] = gtr.Benchmark{ + Name: name, + } +} + +// BenchmarkResult updates an existing or adds a new benchmark with the given +// results and marks it as active. If an existing benchmark with this name +// exists but without result, then that one is updated. Otherwise a new one is +// added to the report. +func (b *reportBuilder) BenchmarkResult(name string, iterations int64, nsPerOp, mbPerSec float64, bytesPerOp, allocsPerOp int64) { + b.lastId = b.findBenchmark(name) + if b.lastId < 0 || b.benchmarks[b.lastId].Result != gtr.Unknown { + b.CreateBenchmark(name) + } + + b.benchmarks[b.lastId] = gtr.Benchmark{ Name: name, Result: gtr.Pass, Iterations: iterations, @@ -126,6 +144,21 @@ func (b *reportBuilder) Benchmark(name string, iterations int64, nsPerOp, mbPerS } } +// EndBenchmark finds the benchmark with the given name, sets the result and +// marks it as active. If more than one benchmark exists with this name, the +// most recently created benchmark will be used. If no benchmark exists with +// this name, a new benchmark is created. +func (b *reportBuilder) EndBenchmark(name, result string) { + b.lastId = b.findBenchmark(name) + if b.lastId < 0 { + b.CreateBenchmark(name) + } + + bm := b.benchmarks[b.lastId] + bm.Result = parseResult(result) + b.benchmarks[b.lastId] = bm +} + // CreateBuildError creates a new build error and marks it as active. func (b *reportBuilder) CreateBuildError(packageName string) { b.buildErrors[b.newId()] = gtr.Error{Name: packageName} @@ -259,6 +292,21 @@ func (b *reportBuilder) findTest(name string) int { return -1 } +// findBenchmark returns the id of the most recently created benchmark with the +// given name, or -1 if no such benchmark exists. +func (b *reportBuilder) findBenchmark(name string) int { + // check if this benchmark was lastId + if bm, ok := b.benchmarks[b.lastId]; ok && bm.Name == name { + return b.lastId + } + for id := len(b.benchmarks); id >= 0; id-- { + if b.benchmarks[id].Name == name { + return id + } + } + return -1 +} + // containsFailingTest return true if the current list of tests contains at // least one failing test or an unknown result. func (b *reportBuilder) containsFailingTest() bool { @@ -279,6 +327,8 @@ func parseResult(r string) gtr.Result { return gtr.Fail case "SKIP": return gtr.Skip + case "BENCH": + return gtr.Pass default: return gtr.Unknown }