metrics: Add checkmetrics for kata metrics CI

This PR adds the checkmetrics scripts that will be used for the kata metrics CI.

Fixes #7160

Signed-off-by: Gabriela Cervantes <gabriela.cervantes.tellez@intel.com>
This commit is contained in:
Gabriela Cervantes 2023-06-22 16:29:00 +00:00
parent 2d329125fd
commit c4ee601bf4
9 changed files with 1364 additions and 0 deletions

View File

@ -0,0 +1,43 @@
// Copyright (c) 2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"fmt"
"os"
"github.com/BurntSushi/toml"
log "github.com/sirupsen/logrus"
)
type baseFile struct {
// metrics is the slice of Metrics imported from the TOML config file
Metric []metrics
}
// newBasefile imports the TOML file passed from the path passed in the file
// argument and returns the baseFile slice containing the import if successful
func newBasefile(file string) (*baseFile, error) {
if file == "" {
log.Error("Missing basefile argument")
return nil, fmt.Errorf("missing baseline reference file")
}
configuration, err := os.ReadFile(file)
if err != nil {
return nil, err
}
var basefile baseFile
if err := toml.Unmarshal(configuration, &basefile); err != nil {
return nil, err
}
if len(basefile.Metric) == 0 {
log.Warningf("No entries found in basefile [%s]\n", file)
}
return &basefile, nil
}

View File

@ -0,0 +1,89 @@
// Copyright (c) 2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
const badFileContents = `
this is not a valid toml file
`
func createBadFile(filename string) error {
return os.WriteFile(filename, []byte(badFileContents), os.FileMode(0640))
}
const goodFileContents = `
# This file contains baseline expectations
# for checked results by checkmetrics tool.
[[metric]]
# The name of the metrics test, must match
# that of the generated CSV file
name = "boot-times"
type = "json"
description = "measure container lifecycle timings"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".Results | .[] | .\"to-workload\".Result"
checktype = "mean"
minval = 1.3
maxval = 1.5
# ... repeat this for each metric ...
`
func createGoodFile(filename string) error {
return os.WriteFile(filename, []byte(goodFileContents), os.FileMode(0640))
}
func TestNewBasefile(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "cm-")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
// Should fail to load a nil filename
_, err = newBasefile("")
assert.NotNil(err, "Did not error on empty filename")
// Should fail to load a file that does not exist
_, err = newBasefile("/some/file/that/does/not/exist")
assert.NotNil(err, "Did not error on non-existent file")
// Check a badly formed toml file
badFileName := tmpdir + "badFile.toml"
err = createBadFile(badFileName)
assert.NoError(err)
_, err = newBasefile(badFileName)
assert.NotNil(err, "Did not error on bad file contents")
// Check a well formed toml file
goodFileName := tmpdir + "goodFile.toml"
err = createGoodFile(goodFileName)
assert.NoError(err)
bf, err := newBasefile(goodFileName)
assert.Nil(err, "Error'd on good file contents")
// Now check we did load what we expected from the toml
t.Logf("Entry.Name: %v", bf.Metric[0].Name)
m := bf.Metric[0]
assert.Equal("boot-times", m.Name, "data loaded should match")
assert.Equal("measure container lifecycle timings", m.Description, "data loaded should match")
assert.Equal("json", m.Type, "data loaded should match")
assert.Equal("mean", m.CheckType, "data loaded should match")
assert.Equal(".Results | .[] | .\"to-workload\".Result", m.CheckVar, "data loaded should match")
assert.Equal(1.3, m.MinVal, "data loaded should match")
assert.Equal(1.5, m.MaxVal, "data loaded should match")
// Gap has not been calculated yet...
assert.Equal(0.0, m.Gap, "data loaded should match")
}

View File

@ -0,0 +1,202 @@
// Copyright (c) 2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"errors"
"strconv"
log "github.com/sirupsen/logrus"
)
// metricsCheck is a placeholder struct for us to attach the methods to and make
// it clear they belong this grouping. Maybe there is a better way?
type metricsCheck struct {
}
// reportTitleSlice returns the report table title row as a slice of strings
func (mc *metricsCheck) reportTitleSlice() []string {
// FIXME - now we don't only check the mean, let's re-arrange the order
// to make a little more sense.
// Also, CoV is so much more useful than SD - let's stop printout out
// the SD, and add instead the % gap between the Min and Max Results
return []string{"P/F",
"Name",
// This is the check boundary, not the smallest value in Results
"Flr",
"Mean",
// This is the check boundary, not the largest value in Results
"Ceil",
"Gap",
"Min",
"Max",
"Rng",
"Cov",
"Its"}
}
// genSummaryLine takes in all the relevant report arguments and returns
// a string slice formatted appropriately for the summary table generation
func (mc *metricsCheck) genSummaryLine(
passed bool,
name string,
minval string,
mean string,
maxval string,
gap string,
min string,
max string,
rnge string,
cov string,
iterations string) (summary []string) {
if passed {
summary = append(summary, "P")
} else {
summary = append(summary, "*F*")
}
summary = append(summary,
name,
minval,
mean,
maxval,
gap,
min,
max,
rnge,
cov,
iterations)
return
}
// genErrorLine takes a number of error argument strings and a pass/fail bool
// and returns a string slice formatted appropriately for the summary report.
// It exists to hide some of the inner details of just how the slice is meant
// to be formatted, such as the exact number of columns
func (mc *metricsCheck) genErrorLine(
passed bool,
error1 string,
error2 string,
error3 string) (summary []string) {
summary = mc.genSummaryLine(passed, error1, error2, error3,
"", "", "", "", "", "", "")
return
}
// check takes a basefile metric record and a filled out stats struct and checks
// if the file metrics pass the metrics comparison checks.
// check returns a string slice containing the results of the check.
// The err return will be non-nil if the check fails.
func (mc *metricsCheck) checkstats(m metrics) (summary []string, err error) {
var pass = true
var val float64
log.Debugf("Compare check for [%s]", m.Name)
log.Debugf("Checking value [%s]", m.CheckType)
//Pick out the value we are range checking depending on the
// config. Default if not set is the "mean"
switch m.CheckType {
case "min":
val = m.stats.Min
case "max":
val = m.stats.Max
case "cov":
val = m.stats.CoV
case "sd":
val = m.stats.SD
case "mean":
fallthrough
default:
val = m.stats.Mean
}
log.Debugf(" Check minval (%f < %f)", m.MinVal, val)
if val < m.MinVal {
log.Warnf("Failed Minval (%7f > %7f) for [%s]",
m.MinVal, val,
m.Name)
pass = false
} else {
log.Debug("Passed")
}
log.Debugf(" Check maxval (%f > %f)", m.MaxVal, val)
if val > m.MaxVal {
log.Warnf("Failed Maxval (%7f < %7f) for [%s]",
m.MaxVal, val,
m.Name)
pass = false
} else {
log.Debug("Passed")
}
if !pass {
err = errors.New("Failed")
}
// Note - choosing the precision for the fields is tricky without
// knowledge of the actual metrics tests results. For now set
// precision to 'probably big enough', and later we may want to
// add an annotation to the TOML baselines to give an indication of
// expected values - or, maybe we can derive it from the min/max values
// Are we presenting as a percentage based difference
if showPercentage {
// Work out what our midpoint baseline 'goal' is.
midpoint := (m.MinVal + m.MaxVal) / 2
// Calculate our values as a % based off the mid-point
// of the acceptable range.
floorpc := (m.MinVal / midpoint) * 100.0
ceilpc := (m.MaxVal / midpoint) * 100.0
meanpc := (m.stats.Mean / midpoint) * 100.0
minpc := (m.stats.Min / midpoint) * 100.0
maxpc := (m.stats.Max / midpoint) * 100.0
// Or present as physical values
summary = append(summary, mc.genSummaryLine(
pass,
m.Name,
// Note this is the check boundary, not the smallest Result seen
strconv.FormatFloat(floorpc, 'f', 1, 64)+"%",
strconv.FormatFloat(meanpc, 'f', 1, 64)+"%",
// Note this is the check boundary, not the largest Result seen
strconv.FormatFloat(ceilpc, 'f', 1, 64)+"%",
strconv.FormatFloat(m.Gap, 'f', 1, 64)+"%",
strconv.FormatFloat(minpc, 'f', 1, 64)+"%",
strconv.FormatFloat(maxpc, 'f', 1, 64)+"%",
strconv.FormatFloat(m.stats.RangeSpread, 'f', 1, 64)+"%",
strconv.FormatFloat(m.stats.CoV, 'f', 1, 64)+"%",
strconv.Itoa(m.stats.Iterations))...)
} else {
// Or present as physical values
summary = append(summary, mc.genSummaryLine(
pass,
m.Name,
// Note this is the check boundary, not the smallest Result seen
strconv.FormatFloat(m.MinVal, 'f', 2, 64),
strconv.FormatFloat(m.stats.Mean, 'f', 2, 64),
// Note this is the check boundary, not the largest Result seen
strconv.FormatFloat(m.MaxVal, 'f', 2, 64),
strconv.FormatFloat(m.Gap, 'f', 1, 64)+"%",
strconv.FormatFloat(m.stats.Min, 'f', 2, 64),
strconv.FormatFloat(m.stats.Max, 'f', 2, 64),
strconv.FormatFloat(m.stats.RangeSpread, 'f', 1, 64)+"%",
strconv.FormatFloat(m.stats.CoV, 'f', 1, 64)+"%",
strconv.Itoa(m.stats.Iterations))...)
}
return
}

View File

@ -0,0 +1,312 @@
// Copyright (c) 2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"math"
"testing"
"github.com/stretchr/testify/assert"
)
// Pre-filled out metrics (apart from the calculated stats)
// This should **pass** the "mean" metrics checks by default
var exampleM = metrics{
Name: "name",
Description: "desc",
Type: "type",
CheckType: "json",
CheckVar: "Results",
MinVal: 0.9,
MaxVal: 3.1,
Gap: 0,
stats: statistics{
Results: []float64{1.0, 2.0, 3.0},
Iterations: 3,
Mean: 0.0,
Min: 0.0,
Max: 0.0,
Range: 0.0,
RangeSpread: 0.0,
SD: 0.0,
CoV: 0.0}}
func TestGenSummaryLine(t *testing.T) {
assert := assert.New(t)
var args = []string{
"name",
"minval",
"mean",
"maxval",
"gap",
"min",
"max",
"rnge",
"cov",
"iterations"}
// Check for the 'passed' case
s := (&metricsCheck{}).genSummaryLine(
true, //passed
args[0], //name
args[1], //minval
args[2], //mean
args[3], //maxval
args[4], //gap
args[5], //min
args[6], //max
args[7], //rnge
args[8], //cov
args[9]) //iterations
for n, i := range s {
if n == 0 {
assert.Equal("P", i, "Should be equal")
} else {
assert.Equal(args[n-1], i, "Should be equal")
}
}
// Check for the 'failed' case
s = (&metricsCheck{}).genSummaryLine(
false, //passed
args[0], //name
args[1], //minval
args[2], //mean
args[3], //maxval
args[4], //gap
args[5], //min
args[6], //max
args[7], //rnge
args[8], //cov
args[9]) //iterations
for n, i := range s {
if n == 0 {
assert.Equal("*F*", i, "Should be equal")
} else {
assert.Equal(args[n-1], i, "Should be equal")
}
}
}
func TestCheckStats(t *testing.T) {
assert := assert.New(t)
var m = exampleM
m.Name = "CheckStats"
//Check before we have done the calculations - should fail
_, err := (&metricsCheck{}).checkstats(m)
assert.Error(err)
m.calculate()
// Constants here calculated from info coded in struct above
// Funky rounding of Gap, as float imprecision actually gives us
// 110.00000000000001 - check to within 0.1% then...
roundedGap := math.Round(m.Gap/0.001) * 0.001
assert.Equal(110.0, roundedGap, "Should be equal")
assert.Equal(2.0, m.stats.Mean, "Should be equal")
assert.Equal(1.0, m.stats.Min, "Should be equal")
assert.Equal(3.0, m.stats.Max, "Should be equal")
assert.Equal(2.0, m.stats.Range, "Should be equal")
assert.Equal(200.0, m.stats.RangeSpread, "Should be equal")
assert.Equal(0.816496580927726, m.stats.SD, "Should be equal")
assert.Equal(40.8248290463863, m.stats.CoV, "Should be equal")
s, err := (&metricsCheck{}).checkstats(m)
assert.NoError(err)
assert.Equal("P", s[0], "Should be equal") // Pass
assert.Equal("CheckStats", s[1], "Should be equal") // test name
assert.Equal("0.90", s[2], "Should be equal") // Floor
assert.Equal("2.00", s[3], "Should be equal") // Mean
assert.Equal("3.10", s[4], "Should be equal") // Ceiling
assert.Equal("110.0%", s[5], "Should be equal") // Gap
assert.Equal("1.00", s[6], "Should be equal") // Min
assert.Equal("3.00", s[7], "Should be equal") // Max
assert.Equal("200.0%", s[8], "Should be equal") // Range %
assert.Equal("40.8%", s[9], "Should be equal") // CoV
assert.Equal("3", s[10], "Should be equal") // Iterations
// And check in percentage presentation mode
showPercentage = true
s, err = (&metricsCheck{}).checkstats(m)
assert.NoError(err)
assert.Equal("P", s[0], "Should be equal") // Pass
assert.Equal("CheckStats", s[1], "Should be equal") // test name
assert.Equal("45.0%", s[2], "Should be equal") // Floor
assert.Equal("100.0%", s[3], "Should be equal") // Mean
assert.Equal("155.0%", s[4], "Should be equal") // Ceiling
assert.Equal("110.0%", s[5], "Should be equal") // Gap
assert.Equal("50.0%", s[6], "Should be equal") // Min
assert.Equal("150.0%", s[7], "Should be equal") // Max
assert.Equal("200.0%", s[8], "Should be equal") // Range %
assert.Equal("40.8%", s[9], "Should be equal") // CoV
assert.Equal("3", s[10], "Should be equal") // Iterations
// And put the default back
showPercentage = false
// Funcs called with a Min that fails and a Max that fails
// Presumption is that unmodified metrics should pass
// FIXME - we don't test the actual < vs <= boudary type conditions
// Mean is 2.0
CheckMean(assert, 3.0, 1.0)
// Min is 1.0
CheckMin(assert, 3.0, 0.5)
// Max is 3.0
CheckMax(assert, 4.0, 1.0)
// CoV is 40.8
CheckCoV(assert, 50.0, 1.0)
// SD is 0.8165
CheckSD(assert, 1.0, 0.5)
}
func CheckMean(assert *assert.Assertions, badmin float64, badmax float64) {
m := exampleM
m.CheckType = "mean"
m.Name = "CheckMean"
// Do the stats
m.calculate()
// Defaults should pass
_, err := (&metricsCheck{}).checkstats(m)
assert.NoError(err)
// badmin should fail
old := m.MinVal
m.MinVal = badmin
_, err = (&metricsCheck{}).checkstats(m)
assert.Error(err)
m.MinVal = old
// badmax should fail
m.MaxVal = badmax
_, err = (&metricsCheck{}).checkstats(m)
assert.Error(err)
}
func CheckMin(assert *assert.Assertions, badmin float64, badmax float64) {
m := exampleM
m.CheckType = "min"
m.Name = "CheckMin"
// Do the stats
m.calculate()
// Defaults should pass
_, err := (&metricsCheck{}).checkstats(m)
assert.NoError(err)
// badmin should fail
old := m.MinVal
m.MinVal = badmin
_, err = (&metricsCheck{}).checkstats(m)
assert.Error(err)
m.MinVal = old
// badmax should fail
m.MaxVal = badmax
_, err = (&metricsCheck{}).checkstats(m)
assert.Error(err)
}
func CheckMax(assert *assert.Assertions, badmin float64, badmax float64) {
m := exampleM
m.CheckType = "max"
m.Name = "CheckMax"
// Do the stats
m.calculate()
// Defaults should pass
_, err := (&metricsCheck{}).checkstats(m)
assert.NoError(err)
// badmin should fail
old := m.MinVal
m.MinVal = badmin
_, err = (&metricsCheck{}).checkstats(m)
assert.Error(err)
m.MinVal = old
// badmax should fail
m.MaxVal = badmax
_, err = (&metricsCheck{}).checkstats(m)
assert.Error(err)
}
func CheckSD(assert *assert.Assertions, badmin float64, badmax float64) {
m := exampleM
m.CheckType = "sd"
m.Name = "CheckSD"
// Do the stats
m.calculate()
// Set it up to pass by default
m.MinVal = 0.9 * m.stats.SD
m.MaxVal = 1.1 * m.stats.SD
oldMin := m.MinVal
oldMax := m.MinVal
// Defaults should pass
_, err := (&metricsCheck{}).checkstats(m)
assert.NoError(err)
// badmin should fail
m.MinVal = badmin
_, err = (&metricsCheck{}).checkstats(m)
assert.Error(err)
m.MinVal = oldMin
// badmax should fail
m.MaxVal = badmax
_, err = (&metricsCheck{}).checkstats(m)
assert.Error(err)
m.MaxVal = oldMax
}
func CheckCoV(assert *assert.Assertions, badmin float64, badmax float64) {
m := exampleM
m.CheckType = "cov"
m.Name = "CheckCoV"
// Do the stats
m.calculate()
// Set it up to pass by default
m.MinVal = 0.9 * m.stats.CoV
m.MaxVal = 1.1 * m.stats.CoV
oldMin := m.MinVal
oldMax := m.MinVal
// Defaults should pass
_, err := (&metricsCheck{}).checkstats(m)
assert.NoError(err)
// badmin should fail
m.MinVal = badmin
_, err = (&metricsCheck{}).checkstats(m)
assert.Error(err)
m.MinVal = oldMin
// badmax should fail
m.MaxVal = badmax
_, err = (&metricsCheck{}).checkstats(m)
assert.Error(err)
m.MaxVal = oldMax
}

View File

@ -0,0 +1,97 @@
// Copyright (c) 2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"bufio"
"bytes"
"io"
"os/exec"
"strconv"
log "github.com/sirupsen/logrus"
)
// jsonRecord has no data - the data is loaded and processed and stored
// back into the metrics structure passed in.
type jsonRecord struct {
}
// load reads in a JSON 'Metrics' results file from the file path given
// Parse out the actual results data using the 'jq' query found in the
// respective TOML entry.
func (c *jsonRecord) load(filepath string, metric *metrics) error {
var err error
log.Debugf("in json load of [%s]", filepath)
log.Debugf(" Run jq '%v' %s", metric.CheckVar, filepath)
out, err := exec.Command("jq", metric.CheckVar, filepath).Output()
if err != nil {
log.Warnf("Failed to run [jq %v %v][%v]", metric.CheckVar, filepath, err)
return err
}
log.Debugf(" Got result [%v]", out)
// Try to parse the results as floats first...
floats, err := readFloats(bytes.NewReader(out))
if err != nil {
// And if they are not floats, check if they are ints...
ints, err := readInts(bytes.NewReader(out))
if err != nil {
log.Warnf("Failed to decode [%v]", out)
return err
}
// Always store the internal data as floats
floats = []float64{}
for _, i := range ints {
floats = append(floats, float64(i))
}
}
log.Debugf(" and got output [%v]", floats)
// Store the results back 'up'
metric.stats.Results = floats
// And do the stats on them
metric.calculate()
return nil
}
// Parse a string of ascii ints into a slice of ints
func readInts(r io.Reader) ([]int, error) {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanWords)
var result []int
for scanner.Scan() {
i, err := strconv.Atoi(scanner.Text())
if err != nil {
return result, err
}
result = append(result, i)
}
return result, scanner.Err()
}
// Parse a string of ascii floats into a slice of floats
func readFloats(r io.Reader) ([]float64, error) {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanWords)
var result []float64
for scanner.Scan() {
f, err := strconv.ParseFloat(scanner.Text(), 64)
if err != nil {
return result, err
}
result = append(result, f)
}
return result, scanner.Err()
}

View File

@ -0,0 +1,200 @@
// Copyright (c) 2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"bytes"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
const BadFileContents = `
this is not a valid json file
`
func CreateBadFile(filename string) error {
return os.WriteFile(filename, []byte(BadFileContents), os.FileMode(0640))
}
const GoodFileContents = `
{
"env" : {
"Runtime": "/usr/share/defaults/kata-containers/configuration.toml",
"RuntimeVersion": "0.1.0",
"Hypervisor": "/usr/bin/qemu-lite-system-x86_64",
"HypervisorVersion": " QEMU emulator version 2.7.0, Copyright (c) 2003-2016 Fabrice Bellard and the QEMU Project developers",
"Shim": "/usr/local/bin/containerd-shim-kata-v2",
"ShimVersion": " kata-shim version 2.4.0-rc0"
},
"date" : {
"ns": 1522162042326099526,
"Date": "2018-03-27 15:47:22 +0100"
},
"Config": [
{
"containers": 20,
"ksm": 0,
"auto": "",
"waittime": 5,
"image": "busybox",
"command": "sh"
}
],
"Results": [
{
"average": {
"Result": 10.56,
"Units" : "KB"
},
"qemus": {
"Result": 1.95,
"Units" : "KB"
},
"shims": {
"Result": 2.40,
"Units" : "KB"
},
"proxys": {
"Result": 3.21,
"Units" : "KB"
}
},
{
"average": {
"Result": 20.56,
"Units" : "KB"
},
"qemus": {
"Result": 4.95,
"Units" : "KB"
},
"shims": {
"Result": 5.40,
"Units" : "KB"
},
"proxys": {
"Result": 6.21,
"Units" : "KB"
}
},
{
"average": {
"Result": 30.56,
"Units" : "KB"
},
"qemus": {
"Result": 7.95,
"Units" : "KB"
},
"shims": {
"Result": 8.40,
"Units" : "KB"
},
"proxys": {
"Result": 9.21,
"Units" : "KB"
}
}
]
}
`
func CreateFile(filename string, contents string) error {
return os.WriteFile(filename, []byte(contents), os.FileMode(0640))
}
func TestLoad(t *testing.T) {
assert := assert.New(t)
// Set up and create a json results file
tmpdir, err := os.MkdirTemp("", "cm-")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
// Check a badly formed JSON file
badFileName := tmpdir + "badFile.json"
err = CreateBadFile(badFileName)
assert.NoError(err)
// Set up our basci metrics struct
var m = metrics{
Name: "name",
Description: "desc",
Type: "type",
CheckType: "json",
CheckVar: ".Results | .[] | .average.Result",
MinVal: 1.9,
MaxVal: 2.1,
Gap: 0,
stats: statistics{
Results: []float64{1.0, 2.0, 3.0},
Iterations: 0,
Mean: 0.0,
Min: 0.0,
Max: 0.0,
Range: 0.0,
RangeSpread: 0.0,
SD: 0.0,
CoV: 0.0}}
err = (&jsonRecord{}).load(badFileName, &m)
assert.Error(err, "Did not error on bad file contents")
// Check the well formed file
goodFileName := tmpdir + "goodFile.json"
err = CreateFile(goodFileName, GoodFileContents)
assert.NoError(err)
err = (&jsonRecord{}).load(goodFileName, &m)
assert.NoError(err, "Error'd on good file contents")
t.Logf("m now %+v", m)
// And check some of the values we get from that JSON read
assert.Equal(3, m.stats.Iterations, "Should be equal")
assert.Equal(10.56, m.stats.Min, "Should be equal")
assert.Equal(30.56, m.stats.Max, "Should be equal")
// Check we default to json type
m2 := m
m2.CheckType = ""
err = (&jsonRecord{}).load(goodFileName, &m)
assert.NoError(err, "Error'd on no type file contents")
}
func TestReadInts(t *testing.T) {
assert := assert.New(t)
good := bytes.NewReader([]byte("1 2 3"))
bad := bytes.NewReader([]byte("1 2 3.0"))
_, err := readInts(bad)
assert.Error(err, "Should fail")
ints, err := readInts(good)
assert.NoError(err, "Should fail")
assert.Equal(1, ints[0], "Should be equal")
assert.Equal(2, ints[1], "Should be equal")
assert.Equal(3, ints[2], "Should be equal")
}
func TestReadFloats(t *testing.T) {
assert := assert.New(t)
good := bytes.NewReader([]byte("1.0 2.0 3.0"))
bad := bytes.NewReader([]byte("1.0 2.0 blah"))
_, err := readFloats(bad)
assert.Error(err, "Should fail")
floats, err := readFloats(good)
assert.NoError(err, "Should fail")
assert.Equal(1.0, floats[0], "Should be equal")
assert.Equal(2.0, floats[1], "Should be equal")
assert.Equal(3.0, floats[2], "Should be equal")
}

View File

@ -0,0 +1,216 @@
// Copyright (c) 2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
/*
Program checkmetrics compares the results from a set of metrics
results, stored in JSON files, against a set of baseline metrics
'expectations', defined in a TOML file.
It returns non zero if any of the TOML metrics are not met.
It prints out a tabluated report summary at the end of the run.
*/
package main
import (
"errors"
"fmt"
"os"
"path"
"github.com/olekukonko/tablewriter"
log "github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// name is the name of the program.
const name = "checkmetrics"
// usage is the usage of the program.
const usage = name + ` checks JSON metrics results against a TOML baseline`
var (
// The TOML basefile
ciBasefile *baseFile
// If set then we show results as a relative percentage (to the baseline)
showPercentage = false
// System default path for baseline file
// the value will be set by Makefile
sysBaseFile string
)
// processMetricsBaseline locates the files matching each entry in the TOML
// baseline, loads and processes it, and checks if the metrics were in range.
// Finally it generates a summary report
func processMetricsBaseline(context *cli.Context) (err error) {
var report [][]string // summary report table
var passes int
var fails int
var summary []string
log.Debug("processMetricsBaseline")
// Process each Metrics TOML entry one at a time
// FIXME - this is not structured to be testable - if you need to add a unit
// test here - the *please* re-structure these funcs etc.
for _, m := range ciBasefile.Metric {
log.Debugf("Processing %s", m.Name)
fullpath := path.Join(context.GlobalString("metricsdir"), m.Name)
switch m.Type {
case "":
log.Debugf("No Type, default to JSON for [%s]", m.Name)
fallthrough
case "json":
{
var thisJSON jsonRecord
log.Debug("Process a JSON")
fullpath = fullpath + ".json"
log.Debugf("Fullpath %s", fullpath)
err = thisJSON.load(fullpath, &m)
if err != nil {
log.Warnf("[%s][%v]", fullpath, err)
// Record that this one did not complete successfully
fails++
// Make some sort of note in the summary table that this failed
summary = (&metricsCheck{}).genErrorLine(false, m.Name, "Failed to load JSON", fmt.Sprintf("%s", err))
// Not a fatal error - continue to process any remaining files
break
}
summary, err = (&metricsCheck{}).checkstats(m)
if err != nil {
log.Warnf("Check for [%s] failed [%v]", m.Name, err)
log.Warnf(" with [%s]", summary)
fails++
} else {
log.Debugf("Check for [%s] passed", m.Name)
log.Debugf(" with [%s]", summary)
passes++
}
}
default:
{
log.Warnf("Unknown type [%s] for metric [%s]", m.Type, m.Name)
summary = (&metricsCheck{}).genErrorLine(false, m.Name, "Unsupported Type", fmt.Sprint(m.Type))
fails++
}
}
report = append(report, summary)
log.Debugf("Done %s", m.Name)
}
if fails != 0 {
log.Warn("Overall we failed")
}
fmt.Printf("\n")
// We need to find a better way here to report that some tests failed to even
// get into the table - such as JSON file parse failures
// Actually, now we report file failures into the report as well, we should not
// see this - but, it is nice to leave as a sanity check.
if len(report) < fails+passes {
fmt.Printf("Warning: some tests (%d) failed to report\n", (fails+passes)-len(report))
}
// Note - not logging here - the summary goes to stdout
fmt.Println("Report Summary:")
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader((&metricsCheck{}).reportTitleSlice())
for _, s := range report {
table.Append(s)
}
table.Render()
fmt.Printf("Fails: %d, Passes %d\n", fails, passes)
// Did we see any failures during the run?
if fails != 0 {
err = errors.New("Failed")
} else {
err = nil
}
return
}
// checkmetrics main entry point.
// Do the command line processing, load the TOML file, and do the processing
// against the data files
func main() {
app := cli.NewApp()
app.Name = name
app.Usage = usage
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "basefile",
Usage: "path to baseline TOML metrics file",
},
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output in the log",
},
cli.StringFlag{
Name: "log",
Usage: "set the log file path",
},
cli.StringFlag{
Name: "metricsdir",
Usage: "directory containing metrics results files",
},
cli.BoolFlag{
Name: "percentage",
Usage: "present results as percentage differences",
Destination: &showPercentage,
},
}
app.Before = func(context *cli.Context) error {
var err error
var baseFilePath string
if path := context.GlobalString("log"); path != "" {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0640)
if err != nil {
return err
}
log.SetOutput(f)
}
if context.GlobalBool("debug") {
log.SetLevel(log.DebugLevel)
}
if context.GlobalString("metricsdir") == "" {
log.Error("Must supply metricsdir argument")
return errors.New("Must supply metricsdir argument")
}
baseFilePath = context.GlobalString("basefile")
if baseFilePath == "" {
baseFilePath = sysBaseFile
}
ciBasefile, err = newBasefile(baseFilePath)
return err
}
app.Action = func(context *cli.Context) error {
return processMetricsBaseline(context)
}
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@ -0,0 +1,108 @@
// Copyright (c) 2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"github.com/montanaflynn/stats"
log "github.com/sirupsen/logrus"
)
type statistics struct {
Results []float64 // Result array converted to floats
Iterations int // How many results did we gather
Mean float64 // The 'average'
Min float64 // Smallest value we saw
Max float64 // Largest value we saw
Range float64 // Max - Min
RangeSpread float64 // (Range/Min) * 100
SD float64 // Standard Deviation
CoV float64 // Co-efficient of Variation
}
// metrics represents the repository under test
// The members are Public so the toml reflection can see them, but I quite
// like the lower case toml naming, hence we use the annotation strings to
// get the parser to look for lower case.
type metrics struct {
// Generic to JSON files
// Generally mandatory
Name string `toml:"name"` //Used to locate the results file
Description string `toml:"description"`
// Optional config entries
Type string `toml:"type"` //Default is JSON
// Processing related entries
CheckType string `toml:"checktype"` //Result val to calculate: mean, median, min, max
// default: mean
CheckVar string `toml:"checkvar"` //JSON: which var to (extract and) calculate on
// is a 'jq' query
stats statistics // collection of our stats data, calculated from Results
// For setting 'bounds', you can either set a min/max value pair,
// or you can set a mid-range value and a 'percentage gap'.
// You should set one or the other. Setting both will likely result
// in one of them being chosen first.
// The range we expect the processed result to fall within
// (MinVal <= Result <= MaxVal) == pass
MinVal float64 `toml:"minval"`
MaxVal float64 `toml:"maxval"`
// If we are doing a percentage range check then you need to set
// both a mid-value and a percentage range to check.
MidVal float64 `toml:"midval"`
MinPercent float64 `toml:"minpercent"`
MaxPercent float64 `toml:"maxpercent"`
// Vars that are not in the toml file, but are filled out later
// dynamically
Gap float64 // What is the % gap between the Min and Max vals
}
// Calculate the statistics from the stored Results data
// Although the calculations can fail, we don't fail the function
func (m *metrics) calculate() {
// First we check/calculate some non-stats values to fill out
// our base data.
// We should either have a Min/Max value pair or a percentage/MidVal
// set. If we find a non-0 percentage set, then calculate the Min/Max
// values from them, as the rest of the code base works off the Min/Max
// values.
if (m.MinPercent + m.MaxPercent) != 0 {
m.MinVal = m.MidVal * (1 - (m.MinPercent / 100))
m.MaxVal = m.MidVal * (1 + (m.MaxPercent / 100))
// The rest of the system works off the Min/Max value
// pair - so, if your min/max percentage values are not equal
// then **the values you see in the results table will not look
// like the ones you put in the toml file**, because they are
// based off the mid-value calculation below.
// This is unfortunate, but it keeps the code simpler overall.
}
// the gap is the % swing around the midpoint.
midpoint := (m.MinVal + m.MaxVal) / 2
m.Gap = (((m.MaxVal / midpoint) - 1) * 2) * 100
// And now we work out the actual stats
m.stats.Iterations = len(m.stats.Results)
m.stats.Mean, _ = stats.Mean(m.stats.Results)
m.stats.Min, _ = stats.Min(m.stats.Results)
m.stats.Max, _ = stats.Max(m.stats.Results)
m.stats.Range = m.stats.Max - m.stats.Min
m.stats.RangeSpread = (m.stats.Range / m.stats.Min) * 100.0
m.stats.SD, _ = stats.StandardDeviation(m.stats.Results)
m.stats.CoV = (m.stats.SD / m.stats.Mean) * 100.0
log.Debugf(" Iters is %d", m.stats.Iterations)
log.Debugf(" Min is %f", m.stats.Min)
log.Debugf(" Max is %f", m.stats.Max)
log.Debugf(" Mean is %f", m.stats.Mean)
log.Debugf(" SD is %f", m.stats.SD)
log.Debugf(" CoV is %.2f", m.stats.CoV)
}

View File

@ -0,0 +1,97 @@
// Copyright (c) 2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"math"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCalculate(t *testing.T) {
assert := assert.New(t)
var m = metrics{
Name: "name",
Description: "desc",
Type: "type",
CheckType: "json",
CheckVar: "Results",
MinVal: 1.9,
MaxVal: 2.1,
Gap: 0,
stats: statistics{
Results: []float64{1.0, 2.0, 3.0},
Iterations: 3,
Mean: 0.0,
Min: 0.0,
Max: 0.0,
Range: 0.0,
RangeSpread: 0.0,
SD: 0.0,
CoV: 0.0}}
m.calculate()
// Constants here calculated from info coded in struct above
// We do a little funky math on Gap to round it to within 0.1% - as the actual
// gap math gave us 10.000000000000009 ...
roundedGap := math.Round(m.Gap/0.001) * 0.001
assert.Equal(10.0, roundedGap, "Should be equal")
assert.Equal(2.0, m.stats.Mean, "Should be equal")
assert.Equal(1.0, m.stats.Min, "Should be equal")
assert.Equal(3.0, m.stats.Max, "Should be equal")
assert.Equal(2.0, m.stats.Range, "Should be equal")
assert.Equal(200.0, m.stats.RangeSpread, "Should be equal")
assert.Equal(0.816496580927726, m.stats.SD, "Should be equal")
assert.Equal(40.8248290463863, m.stats.CoV, "Should be equal")
}
// Test that only setting a % range works
func TestCalculate2(t *testing.T) {
assert := assert.New(t)
var m = metrics{
Name: "name",
Description: "desc",
Type: "type",
CheckType: "json",
CheckVar: "Results",
//MinVal: 1.9,
//MaxVal: 2.1,
MinPercent: 20,
MaxPercent: 25,
MidVal: 2.0,
Gap: 0,
stats: statistics{
Results: []float64{1.0, 2.0, 3.0},
Iterations: 3,
Mean: 0.0,
Min: 0.0,
Max: 0.0,
Range: 0.0,
RangeSpread: 0.0,
SD: 0.0,
CoV: 0.0}}
m.calculate()
// Constants here calculated from info coded in struct above
// We do a little funky math on Gap to round it to within 0.1% - as the actual
// gap math gave us 10.000000000000009 ...
roundedGap := math.Round(m.Gap/0.001) * 0.001
// This is not a nice (20+25), as the 'midval' will skew it.
assert.Equal(43.902, roundedGap, "Should be equal")
assert.Equal(2.0, m.stats.Mean, "Should be equal")
assert.Equal(1.0, m.stats.Min, "Should be equal")
assert.Equal(3.0, m.stats.Max, "Should be equal")
assert.Equal(2.0, m.stats.Range, "Should be equal")
assert.Equal(200.0, m.stats.RangeSpread, "Should be equal")
assert.Equal(0.816496580927726, m.stats.SD, "Should be equal")
assert.Equal(40.8248290463863, m.stats.CoV, "Should be equal")
}