gtr,parser/gotest: Remove gtr.Benchmark and use gtr.Test for all tests

This commit is contained in:
Joël Stemmer 2022-06-15 23:33:13 +01:00
parent dc591b8731
commit da6551b8c0
5 changed files with 187 additions and 159 deletions

View File

@ -7,7 +7,7 @@ import (
"time" "time"
) )
// Result is the result of a test or benchmark. // Result is the result of a test.
type Result int type Result int
const ( const (
@ -32,8 +32,7 @@ func (r Result) String() string {
} }
} }
// Report contains the build, test and/or benchmark results of a collection of // Report contains the build and test results of a collection of packages.
// packages.
type Report struct { type Report struct {
Packages []Package Packages []Package
} }
@ -54,7 +53,7 @@ func (r *Report) IsSuccessful() bool {
return true return true
} }
// Package contains build, test and/or benchmark results for a single package. // Package contains build and test results for a single package.
type Package struct { type Package struct {
Name string Name string
Timestamp time.Time Timestamp time.Time
@ -64,7 +63,6 @@ type Package struct {
Properties map[string]string Properties map[string]string
Tests []Test Tests []Test
Benchmarks []Benchmark
BuildError Error BuildError Error
RunError Error RunError Error
@ -88,25 +86,12 @@ type Test struct {
Result Result Result Result
Level int Level int
Output []string Output []string
Data map[string]interface{}
} }
// Benchmark contains the results of a single benchmark. // NewTest creates a new Test with the given id and name.
type Benchmark struct { func NewTest(id int, name string) Test {
ID int return Test{ID: id, Name: name, Data: make(map[string]interface{})}
Name string
Result Result
Output []string
Iterations int64
NsPerOp float64
MBPerSec float64
BytesPerOp int64
AllocsPerOp int64
}
// ApproximateDuration returns the duration calculated by multiplying the
// iterations and average time per iteration (NsPerOp).
func (b Benchmark) ApproximateDuration() time.Duration {
return time.Duration(float64(b.Iterations)*b.NsPerOp) * time.Nanosecond
} }
// Error contains details of a build or runtime error. // Error contains details of a build or runtime error.

View File

@ -0,0 +1,48 @@
package gotest
import (
"time"
"github.com/jstemmer/go-junit-report/v2/gtr"
)
const (
key = "gotest.benchmark"
)
// Benchmark contains benchmark results and is intended to be used as extra
// data in a gtr.Test.
type Benchmark struct {
Iterations int64
NsPerOp float64
MBPerSec float64
BytesPerOp int64
AllocsPerOp int64
}
// ApproximateDuration returns the duration calculated by multiplying the
// iterations and average time per iteration (NsPerOp).
func (b Benchmark) ApproximateDuration() time.Duration {
return time.Duration(float64(b.Iterations)*b.NsPerOp) * time.Nanosecond
}
// GetBenchmarkData is a helper function that returns the benchmark contained
// in the data field of the given gtr.Test t. If no (valid) benchmark is
// present, ok will be set to false.
func GetBenchmarkData(t gtr.Test) (b Benchmark, ok bool) {
if t.Data != nil {
if data, exists := t.Data[key]; exists {
bm, ok := data.(Benchmark)
return bm, ok
}
}
return Benchmark{}, false
}
// SetBenchmarkData is a helper function that writes the benchmark b to the
// data field of the given gtr.Test t.
func SetBenchmarkData(t *gtr.Test, b Benchmark) {
if t.Data != nil {
t.Data[key] = b
}
}

View File

@ -258,12 +258,14 @@ func TestReport(t *testing.T) {
Output: []string{ Output: []string{
"\tHello", // TODO: strip tabs? "\tHello", // TODO: strip tabs?
}, },
Data: map[string]interface{}{},
}, },
{ {
ID: 2, ID: 2,
Name: "TestSkip", Name: "TestSkip",
Duration: 1 * time.Millisecond, Duration: 1 * time.Millisecond,
Result: gtr.Skip, Result: gtr.Skip,
Data: map[string]interface{}{},
}, },
}, },
}, },
@ -280,6 +282,7 @@ func TestReport(t *testing.T) {
Output: []string{ Output: []string{
"\tfile_test.go:10: error", "\tfile_test.go:10: error",
}, },
Data: map[string]interface{}{},
}, },
}, },
}, },
@ -287,17 +290,18 @@ func TestReport(t *testing.T) {
Name: "package/name3", Name: "package/name3",
Duration: 1234 * time.Millisecond, Duration: 1234 * time.Millisecond,
Timestamp: testTimestamp, Timestamp: testTimestamp,
Benchmarks: []gtr.Benchmark{ Tests: []gtr.Test{
{ {
ID: 4, ID: 4,
Name: "BenchmarkOne", Name: "BenchmarkOne",
Result: gtr.Pass, Result: gtr.Pass,
NsPerOp: 100, Data: map[string]interface{}{key: Benchmark{NsPerOp: 100}},
}, },
{ {
ID: 5, ID: 5,
Name: "BenchmarkTwo", Name: "BenchmarkTwo",
Result: gtr.Fail, Result: gtr.Fail,
Data: map[string]interface{}{},
}, },
}, },
Output: []string{"goarch: amd64"}, Output: []string{"goarch: amd64"},
@ -360,6 +364,7 @@ func TestSubtestModes(t *testing.T) {
Duration: 1 * time.Millisecond, Duration: 1 * time.Millisecond,
Result: gtr.Pass, Result: gtr.Pass,
Output: []string{"TestParent before", "TestParent after"}, Output: []string{"TestParent before", "TestParent after"},
Data: map[string]interface{}{},
}, },
{ {
ID: 2, ID: 2,
@ -367,6 +372,7 @@ func TestSubtestModes(t *testing.T) {
Duration: 2 * time.Millisecond, Duration: 2 * time.Millisecond,
Result: gtr.Fail, Result: gtr.Fail,
Output: []string{"Subtest#1 output"}, Output: []string{"Subtest#1 output"},
Data: map[string]interface{}{},
}, },
{ {
ID: 3, ID: 3,
@ -374,6 +380,7 @@ func TestSubtestModes(t *testing.T) {
Duration: 3 * time.Millisecond, Duration: 3 * time.Millisecond,
Result: gtr.Pass, Result: gtr.Pass,
Output: []string{"Subtest#2 output"}, Output: []string{"Subtest#2 output"},
Data: map[string]interface{}{},
}, },
}, },
Output: []string{"output"}, Output: []string{"output"},
@ -397,6 +404,7 @@ func TestSubtestModes(t *testing.T) {
Duration: 2 * time.Millisecond, Duration: 2 * time.Millisecond,
Result: gtr.Fail, Result: gtr.Fail,
Output: []string{"Subtest#1 output"}, Output: []string{"Subtest#1 output"},
Data: map[string]interface{}{},
}, },
{ {
ID: 3, ID: 3,
@ -404,6 +412,7 @@ func TestSubtestModes(t *testing.T) {
Duration: 3 * time.Millisecond, Duration: 3 * time.Millisecond,
Result: gtr.Pass, Result: gtr.Pass,
Output: []string{"Subtest#2 output"}, Output: []string{"Subtest#2 output"},
Data: map[string]interface{}{},
}, },
}, },
Output: []string{"TestParent before", "TestParent after", "output"}, Output: []string{"TestParent before", "TestParent after", "output"},

View File

@ -14,16 +14,14 @@ const (
// reportBuilder helps build a test Report from a collection of events. // reportBuilder helps build a test Report from a collection of events.
// //
// The reportBuilder keeps track of the active context whenever a test, // The reportBuilder keeps track of the active context whenever a test or build
// benchmark or build error is created. This is necessary because the test // error is created. This is necessary because the test parser do not contain
// parser do not contain any state themselves and simply just emit an event for // any state themselves and simply just emit an event for every line that is
// every line that is read. By tracking the active context, any output that is // read. By tracking the active context, any output that is appended to the
// appended to the reportBuilder gets attributed to the correct test, benchmark // reportBuilder gets attributed to the correct test or build error.
// or build error.
type reportBuilder struct { type reportBuilder struct {
packages []gtr.Package packages []gtr.Package
tests map[int]gtr.Test tests map[int]gtr.Test
benchmarks map[int]gtr.Benchmark
buildErrors map[int]gtr.Error buildErrors map[int]gtr.Error
runErrors map[int]gtr.Error runErrors map[int]gtr.Error
@ -44,7 +42,6 @@ type reportBuilder struct {
func newReportBuilder() *reportBuilder { func newReportBuilder() *reportBuilder {
return &reportBuilder{ return &reportBuilder{
tests: make(map[int]gtr.Test), tests: make(map[int]gtr.Test),
benchmarks: make(map[int]gtr.Benchmark),
buildErrors: make(map[int]gtr.Error), buildErrors: make(map[int]gtr.Error),
runErrors: make(map[int]gtr.Error), runErrors: make(map[int]gtr.Error),
nextID: 1, nextID: 1,
@ -62,17 +59,16 @@ func (b *reportBuilder) newID() int {
return id return id
} }
// flush creates a new package in this report containing any tests or // flush creates a new package in this report containing any tests we've
// benchmarks we've collected so far. This is necessary when a test or // collected so far. This is necessary when a test did not end with a summary.
// benchmark did not end with a summary.
func (b *reportBuilder) flush() { func (b *reportBuilder) flush() {
if len(b.tests) > 0 || len(b.benchmarks) > 0 { if len(b.tests) > 0 {
b.CreatePackage(b.packageName, "", 0, "") b.CreatePackage(b.packageName, "", 0, "")
} }
} }
// Build returns the new Report containing all the tests, benchmarks and output // Build returns the new Report containing all the tests and output created so
// created so far. // far.
func (b *reportBuilder) Build() gtr.Report { func (b *reportBuilder) Build() gtr.Report {
b.flush() b.flush()
return gtr.Report{Packages: b.packages} return gtr.Report{Packages: b.packages}
@ -85,7 +81,7 @@ func (b *reportBuilder) CreateTest(name string) {
b.parentIDs[parentID] = struct{}{} b.parentIDs[parentID] = struct{}{}
} }
id := b.newID() id := b.newID()
b.tests[id] = gtr.Test{ID: id, Name: name} b.tests[id] = gtr.NewTest(id, name)
} }
// PauseTest marks the active context as no longer active. Any results or // PauseTest marks the active context as no longer active. Any results or
@ -134,31 +130,26 @@ func (b *reportBuilder) End() {
// most recently created benchmark will be updated. If no benchmark exists with // most recently created benchmark will be updated. If no benchmark exists with
// this name, a new benchmark is created. // this name, a new benchmark is created.
func (b *reportBuilder) CreateBenchmark(name string) { func (b *reportBuilder) CreateBenchmark(name string) {
id := b.newID() b.CreateTest(name)
b.benchmarks[id] = gtr.Benchmark{ID: id, Name: name}
} }
// BenchmarkResult updates an existing or adds a new benchmark with the given // BenchmarkResult updates an existing or adds a new test with the given
// results and marks it as active. If an existing benchmark with this name // results and marks it as active. If an existing test with this name exists
// exists but without result, then that one is updated. Otherwise a new one is // but without result, then that one is updated. Otherwise a new one is added
// added to the report. // to the report.
func (b *reportBuilder) BenchmarkResult(name string, iterations int64, nsPerOp, mbPerSec float64, bytesPerOp, allocsPerOp int64) { func (b *reportBuilder) BenchmarkResult(name string, iterations int64, nsPerOp, mbPerSec float64, bytesPerOp, allocsPerOp int64) {
id, ok := b.findBenchmark(name) id, ok := b.findTest(name)
if !ok || b.benchmarks[id].Result != gtr.Unknown { if !ok || b.tests[id].Result != gtr.Unknown {
b.CreateBenchmark(name) b.CreateTest(name)
id = b.lastID id = b.lastID
} }
b.benchmarks[id] = gtr.Benchmark{ benchmark := Benchmark{iterations, nsPerOp, mbPerSec, bytesPerOp, allocsPerOp}
ID: id, test := gtr.NewTest(id, name)
Name: name, test.Result = gtr.Pass
Result: gtr.Pass, test.Duration = benchmark.ApproximateDuration()
Iterations: iterations, SetBenchmarkData(&test, benchmark)
NsPerOp: nsPerOp, b.tests[id] = test
MBPerSec: mbPerSec,
BytesPerOp: bytesPerOp,
AllocsPerOp: allocsPerOp,
}
} }
// EndBenchmark finds the benchmark with the given name and sets the result. If // EndBenchmark finds the benchmark with the given name and sets the result. If
@ -166,16 +157,7 @@ func (b *reportBuilder) BenchmarkResult(name string, iterations int64, nsPerOp,
// benchmark will be used. If no benchmark exists with this name, a new // benchmark will be used. If no benchmark exists with this name, a new
// benchmark is created. // benchmark is created.
func (b *reportBuilder) EndBenchmark(name, result string) { func (b *reportBuilder) EndBenchmark(name, result string) {
id, ok := b.findBenchmark(name) b.EndTest(name, result, 0, 0)
if !ok {
b.CreateBenchmark(name)
id = b.lastID
}
bm := b.benchmarks[id]
bm.Result = parseResult(result)
b.benchmarks[id] = bm
b.lastID = 0
} }
// CreateBuildError creates a new build error and marks it as active. // CreateBuildError creates a new build error and marks it as active.
@ -202,8 +184,8 @@ func (b *reportBuilder) CreatePackage(name, result string, duration time.Duratio
// about the build errors for this particular package. // about the build errors for this particular package.
for id, buildErr := range b.buildErrors { for id, buildErr := range b.buildErrors {
if buildErr.Name == name { if buildErr.Name == name {
if len(b.tests) > 0 || len(b.benchmarks) > 0 { if len(b.tests) > 0 {
panic("unexpected tests and/or benchmarks found in build error package") panic("unexpected tests found in build error package")
} }
buildErr.ID = id buildErr.ID = id
buildErr.Duration = duration buildErr.Duration = duration
@ -220,9 +202,9 @@ func (b *reportBuilder) CreatePackage(name, result string, duration time.Duratio
} }
} }
// If we've collected output, but there were no tests or benchmarks then // If we've collected output, but there were no tests then either there
// either there were no tests, or there was some other non-build error. // actually were no tests, or there was some other non-build error.
if b.output.Contains(globalID) && len(b.tests) == 0 && len(b.benchmarks) == 0 { if b.output.Contains(globalID) && len(b.tests) == 0 {
if parseResult(result) == gtr.Fail { if parseResult(result) == gtr.Fail {
pkg.RunError = gtr.Error{ pkg.RunError = gtr.Error{
Name: name, Name: name,
@ -236,7 +218,7 @@ func (b *reportBuilder) CreatePackage(name, result string, duration time.Duratio
// If the summary result says we failed, but there were no failing tests // If the summary result says we failed, but there were no failing tests
// then something else must have failed. // then something else must have failed.
if parseResult(result) == gtr.Fail && (len(b.tests) > 0 || len(b.benchmarks) > 0) && !b.containsFailures() { if parseResult(result) == gtr.Fail && len(b.tests) > 0 && !b.containsFailures() {
pkg.RunError = gtr.Error{ pkg.RunError = gtr.Error{
Name: name, Name: name,
Output: b.output.Get(globalID), Output: b.output.Get(globalID),
@ -244,9 +226,8 @@ func (b *reportBuilder) CreatePackage(name, result string, duration time.Duratio
b.output.Clear(globalID) b.output.Clear(globalID)
} }
// Collect tests and benchmarks for this package, maintaining insertion order. // Collect tests for this package, maintaining insertion order.
var tests []gtr.Test var tests []gtr.Test
var benchmarks []gtr.Benchmark
for id := 1; id < b.nextID; id++ { for id := 1; id < b.nextID; id++ {
if t, ok := b.tests[id]; ok { if t, ok := b.tests[id]; ok {
if b.isParent(id) { if b.isParent(id) {
@ -261,18 +242,12 @@ func (b *reportBuilder) CreatePackage(name, result string, duration time.Duratio
tests = append(tests, t) tests = append(tests, t)
continue continue
} }
if bm, ok := b.benchmarks[id]; ok {
bm.Output = b.output.Get(id)
benchmarks = append(benchmarks, bm)
continue
}
} }
tests = b.groupBenchmarksByName(tests)
pkg.Coverage = b.coverage pkg.Coverage = b.coverage
pkg.Output = b.output.Get(globalID) pkg.Output = b.output.Get(globalID)
pkg.Tests = tests pkg.Tests = tests
pkg.Benchmarks = b.groupBenchmarksByName(benchmarks)
b.packages = append(b.packages, pkg) b.packages = append(b.packages, pkg)
// reset state, except for nextID to ensure all id's are unique. // reset state, except for nextID to ensure all id's are unique.
@ -280,7 +255,6 @@ func (b *reportBuilder) CreatePackage(name, result string, duration time.Duratio
b.output.Clear(globalID) b.output.Clear(globalID)
b.coverage = 0 b.coverage = 0
b.tests = make(map[int]gtr.Test) b.tests = make(map[int]gtr.Test)
b.benchmarks = make(map[int]gtr.Benchmark)
b.parentIDs = make(map[int]struct{}) b.parentIDs = make(map[int]struct{})
} }
@ -333,34 +307,14 @@ func dropLastSegment(name string) string {
return "" return ""
} }
// findBenchmark returns the id of the most recently created benchmark with the // containsFailures return true if the current list of tests contains at least
// given name if it exists. // one failing test or an unknown result.
func (b *reportBuilder) findBenchmark(name string) (int, bool) {
// check if this benchmark was lastID
if bm, ok := b.benchmarks[b.lastID]; ok && bm.Name == name {
return b.lastID, true
}
for id := len(b.benchmarks); id > 0; id-- {
if b.benchmarks[id].Name == name {
return id, true
}
}
return 0, false
}
// containsFailures return true if the current list of tests or benchmarks
// contains at least one failing test or an unknown result.
func (b *reportBuilder) containsFailures() bool { func (b *reportBuilder) containsFailures() bool {
for _, test := range b.tests { for _, test := range b.tests {
if test.Result == gtr.Fail || test.Result == gtr.Unknown { if test.Result == gtr.Fail || test.Result == gtr.Unknown {
return true return true
} }
} }
for _, bm := range b.benchmarks {
if bm.Result == gtr.Fail || bm.Result == gtr.Unknown {
return true
}
}
return false return false
} }
@ -380,57 +334,83 @@ func parseResult(r string) gtr.Result {
} }
} }
func (b *reportBuilder) groupBenchmarksByName(benchmarks []gtr.Benchmark) []gtr.Benchmark { func (b *reportBuilder) groupBenchmarksByName(tests []gtr.Test) []gtr.Test {
if len(benchmarks) == 0 { if len(tests) == 0 {
return nil return nil
} }
var grouped []gtr.Benchmark var grouped []gtr.Test
byName := make(map[string][]gtr.Benchmark) byName := make(map[string][]gtr.Test)
for _, bm := range benchmarks { for _, test := range tests {
if _, ok := byName[bm.Name]; !ok { if !strings.HasPrefix(test.Name, "Benchmark") {
grouped = append(grouped, gtr.Benchmark{ID: bm.ID, Name: bm.Name}) // If this test is not a benchmark, we won't group it by name but
// just add it to the final result.
grouped = append(grouped, test)
continue
} }
byName[bm.Name] = append(byName[bm.Name], bm) if _, ok := byName[test.Name]; !ok {
grouped = append(grouped, gtr.NewTest(test.ID, test.Name))
}
byName[test.Name] = append(byName[test.Name], test)
} }
for i, group := range grouped { for i, group := range grouped {
var ids []int if !strings.HasPrefix(group.Name, "Benchmark") {
count := 0
for _, bm := range byName[group.Name] {
ids = append(ids, bm.ID)
if bm.Result != gtr.Pass {
continue continue
} }
group.Iterations += bm.Iterations var (
group.NsPerOp += bm.NsPerOp ids []int
group.MBPerSec += bm.MBPerSec total Benchmark
group.BytesPerOp += bm.BytesPerOp count int
group.AllocsPerOp += bm.AllocsPerOp )
count++ for _, test := range byName[group.Name] {
ids = append(ids, test.ID)
if test.Result != gtr.Pass {
continue
} }
if bench, ok := GetBenchmarkData(test); ok {
total.Iterations += bench.Iterations
total.NsPerOp += bench.NsPerOp
total.MBPerSec += bench.MBPerSec
total.BytesPerOp += bench.BytesPerOp
total.AllocsPerOp += bench.AllocsPerOp
count++
}
}
group.Duration = combinedDuration(byName[group.Name])
group.Result = groupResults(byName[group.Name]) group.Result = groupResults(byName[group.Name])
group.Output = b.output.GetAll(ids...) group.Output = b.output.GetAll(ids...)
if count > 0 { if count > 0 {
group.NsPerOp /= float64(count) total.Iterations /= int64(count)
group.MBPerSec /= float64(count) total.NsPerOp /= float64(count)
group.BytesPerOp /= int64(count) total.MBPerSec /= float64(count)
group.AllocsPerOp /= int64(count) total.BytesPerOp /= int64(count)
total.AllocsPerOp /= int64(count)
SetBenchmarkData(&group, total)
} }
grouped[i] = group grouped[i] = group
} }
return grouped return grouped
} }
func groupResults(benchmarks []gtr.Benchmark) gtr.Result { func combinedDuration(tests []gtr.Test) time.Duration {
var total time.Duration
for _, test := range tests {
total += test.Duration
}
return total
}
func groupResults(tests []gtr.Test) gtr.Result {
var result gtr.Result var result gtr.Result
for _, bm := range benchmarks { for _, test := range tests {
if bm.Result == gtr.Fail { if test.Result == gtr.Fail {
return gtr.Fail return gtr.Fail
} }
if result != gtr.Pass { if result != gtr.Pass {
result = bm.Result result = test.Result
} }
} }
return result return result

View File

@ -10,43 +10,49 @@ import (
func TestGroupBenchmarksByName(t *testing.T) { func TestGroupBenchmarksByName(t *testing.T) {
tests := []struct { tests := []struct {
in []gtr.Benchmark name string
want []gtr.Benchmark in []gtr.Test
want []gtr.Test
}{ }{
{nil, nil}, {"nil", nil, nil},
{ {
[]gtr.Benchmark{{ID: 1, Name: "BenchmarkFailed", Result: gtr.Fail}}, "one failing benchmark",
[]gtr.Benchmark{{ID: 1, Name: "BenchmarkFailed", Result: gtr.Fail}}, []gtr.Test{{ID: 1, Name: "BenchmarkFailed", Result: gtr.Fail, Data: map[string]interface{}{}}},
[]gtr.Test{{ID: 1, Name: "BenchmarkFailed", Result: gtr.Fail, Data: map[string]interface{}{}}},
}, },
{ {
[]gtr.Benchmark{ "four passing benchmarks",
{ID: 1, Name: "BenchmarkOne", Result: gtr.Pass, NsPerOp: 10, MBPerSec: 400, BytesPerOp: 1, AllocsPerOp: 2}, []gtr.Test{
{ID: 2, Name: "BenchmarkOne", Result: gtr.Pass, NsPerOp: 20, MBPerSec: 300, BytesPerOp: 1, AllocsPerOp: 4}, {ID: 1, Name: "BenchmarkOne", Result: gtr.Pass, Data: map[string]interface{}{key: Benchmark{NsPerOp: 10, MBPerSec: 400, BytesPerOp: 1, AllocsPerOp: 2}}},
{ID: 3, Name: "BenchmarkOne", Result: gtr.Pass, NsPerOp: 30, MBPerSec: 200, BytesPerOp: 1, AllocsPerOp: 8}, {ID: 2, Name: "BenchmarkOne", Result: gtr.Pass, Data: map[string]interface{}{key: Benchmark{NsPerOp: 20, MBPerSec: 300, BytesPerOp: 1, AllocsPerOp: 4}}},
{ID: 4, Name: "BenchmarkOne", Result: gtr.Pass, NsPerOp: 40, MBPerSec: 100, BytesPerOp: 5, AllocsPerOp: 2}, {ID: 3, Name: "BenchmarkOne", Result: gtr.Pass, Data: map[string]interface{}{key: Benchmark{NsPerOp: 30, MBPerSec: 200, BytesPerOp: 1, AllocsPerOp: 8}}},
{ID: 4, Name: "BenchmarkOne", Result: gtr.Pass, Data: map[string]interface{}{key: Benchmark{NsPerOp: 40, MBPerSec: 100, BytesPerOp: 5, AllocsPerOp: 2}}},
}, },
[]gtr.Benchmark{ []gtr.Test{
{ID: 1, Name: "BenchmarkOne", Result: gtr.Pass, NsPerOp: 25, MBPerSec: 250, BytesPerOp: 2, AllocsPerOp: 4}, {ID: 1, Name: "BenchmarkOne", Result: gtr.Pass, Data: map[string]interface{}{key: Benchmark{NsPerOp: 25, MBPerSec: 250, BytesPerOp: 2, AllocsPerOp: 4}}},
}, },
}, },
{ {
[]gtr.Benchmark{ "four mixed result benchmarks",
[]gtr.Test{
{ID: 1, Name: "BenchmarkMixed", Result: gtr.Unknown}, {ID: 1, Name: "BenchmarkMixed", Result: gtr.Unknown},
{ID: 2, Name: "BenchmarkMixed", Result: gtr.Pass, NsPerOp: 10, MBPerSec: 400, BytesPerOp: 1, AllocsPerOp: 2}, {ID: 2, Name: "BenchmarkMixed", Result: gtr.Pass, Data: map[string]interface{}{key: Benchmark{NsPerOp: 10, MBPerSec: 400, BytesPerOp: 1, AllocsPerOp: 2}}},
{ID: 3, Name: "BenchmarkMixed", Result: gtr.Pass, NsPerOp: 40, MBPerSec: 100, BytesPerOp: 3, AllocsPerOp: 4}, {ID: 3, Name: "BenchmarkMixed", Result: gtr.Pass, Data: map[string]interface{}{key: Benchmark{NsPerOp: 40, MBPerSec: 100, BytesPerOp: 3, AllocsPerOp: 4}}},
{ID: 4, Name: "BenchmarkMixed", Result: gtr.Fail}, {ID: 4, Name: "BenchmarkMixed", Result: gtr.Fail},
}, },
[]gtr.Benchmark{ []gtr.Test{
{ID: 1, Name: "BenchmarkMixed", Result: gtr.Fail, NsPerOp: 25, MBPerSec: 250, BytesPerOp: 2, AllocsPerOp: 3}, {ID: 1, Name: "BenchmarkMixed", Result: gtr.Fail, Data: map[string]interface{}{key: Benchmark{NsPerOp: 25, MBPerSec: 250, BytesPerOp: 2, AllocsPerOp: 3}}},
}, },
}, },
} }
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
b := newReportBuilder() b := newReportBuilder()
got := b.groupBenchmarksByName(test.in) got := b.groupBenchmarksByName(test.in)
if diff := cmp.Diff(test.want, got); diff != "" { if diff := cmp.Diff(test.want, got); diff != "" {
t.Errorf("groupBenchmarksByName result incorrect, diff (-want, +got):\n%s\n", diff) t.Errorf("groupBenchmarksByName result incorrect, diff (-want, +got):\n%s\n", diff)
} }
})
} }
} }