metrics: Enables FIO test for kata containers

FIO benchmark is enabled to measure IO in Kata
at different latencies using containerd client,
in order to complement the CI metrics testing set.

This PR asl deprecated the previous Fio bench
based on k8s.

Fixes: #8080

Signed-off-by: David Esparza <david.esparza.borquez@intel.com>
This commit is contained in:
David Esparza 2023-09-27 16:37:53 -06:00
parent a890ad3a16
commit a2159a6361
No known key found for this signature in database
GPG Key ID: EABE0B1A98CC3B7A
50 changed files with 316 additions and 1533 deletions

View File

@ -0,0 +1,26 @@
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
# Set up an Ubuntu image with 'fio io tester' installed
FROM docker.io/library/ubuntu:22.04
# Version of the Dockerfile
LABEL DOCKERFILE_VERSION="1.0"
# URL for the fio tester
ENV FIO_TOOLING_URL "https://github.com/axboe/fio"
RUN apt-get update --quiet && \
apt-get install --quiet --no-install-recommends -y \
bash \
util-linux \
procps \
fio && \
apt-get clean && \
rm -rf /var/lib/apt/lists/
COPY workload/fio_bench.sh /
WORKDIR /
CMD ["/bin/bash"]

View File

@ -0,0 +1,129 @@
#!/bin/bash
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
# Description of the test:
# This test runs the 'fio benchmark' on kata containers
# https://fio.readthedocs.io/en/latest/
set -o pipefail
# FIO variable settings
# io-types supported:
# read, write, randread, randwrite, randrw, readwrite
io_type="read"
block_size="4k"
num_jobs="2"
# FIO default settings
readonly ioengine="libaio"
readonly rate_process="linear"
readonly disable_buffered="1"
readonly iodepth="2"
readonly runtime="10s"
# ramp time
readonly rt="10s"
readonly fname="test.fio"
readonly workload_dir="/"
readonly workload_file="${workload_dir}${fname}"
readonly workload_size="10G"
readonly summary_file_local="/results.json"
# Show help about this script
function help() {
cat << EOF
Usage: $0 <count>
Description:
Runs FIO test using ctr to excercise IO in kata containers.
Params: <Operation> <io-engine>
Operations are:
run-read-4k
run-write-4k
run-randread-4k
run-randwrite-4k
run-read-64k
run-write-64k
run-randread-64k
run-randwrite-64k
<Operation>: [Mandatory]
<io-engine> : [Optional] Any of the FIO supported ioengines, default: libaio.
EOF
}
# Run from the host
function setup_workload() {
# create workload file:
if [ ! -f ${workload_file} ]; then
pushd "${workload_dir}" > /dev/null 2>&1
dd if=/dev/urandom of="${workload_file}" bs=64M count=160 > /dev/null 2>&1
fi
}
# Run inside container
function launch_workload() {
# the parameters used in the test_name are accesible globally
local test_name="${io_type}_${block_size}_nj-${num_jobs}_${rate_process}_iodepth-${iodepth}_io-direct-${disable_buffered}"
setup_workload
rm -f "${summary_file_local}" > /dev/null 2>&1
fio \
--name="${test_name}" \
--output-format="json" \
--filename="${workload_file}" \
--size="${workload_size}" \
--rate_process="${rate_process}" \
--runtime="${runtime}" \
--ioengine="${ioengine}" \
--rw="${io_type}" \
--direct="${disable_buffered}" \
--numjobs="${num_jobs}" \
--blocksize="${block_size}" \
--ramp_time="${rt}" \
--iodepth="${iodepth}" \
--gtod_reduce="1" \
--randrepeat="1" \
| tee -a ${summary_file_local} > /dev/null 2>&1
}
function print_latest_results() {
[ ! -f "${summary_file_local}" ] && echo "Error: no results to display; you must run a test before requesting results display" && exit 1
echo "$(cat ${summary_file_local})"
}
function delete_workload() {
rm -f "${workload_file}" > /dev/null 2>&1
}
function main() {
local action="${1:-}"
num_jobs="${2:-1}"
[[ ! ${num_jobs} =~ ^[0-9]+$ ]] && die "The number of jobs must be a positive integer"
case "${action}" in
run-read-4k) launch_workload ;;
run-read-64k) block_size="64k" && launch_workload ;;
run-write-4k) io_type="write" && launch_workload ;;
run-write-64k) block_size="64k" && io_type="write" && launch_workload ;;
run-randread-4k) io_type="randread" && launch_workload ;;
run-randread-64k) block_size="64k" && io_type="randread" && launch_workload ;;
run-randwrite-4k) io_type="randwrite" && launch_workload ;;
run-randwrite-64k) block_size="64k" && io_type="randwrite" && launch_workload ;;
print-latest-results) print_latest_results ;;
delete-workload) delete_workload ;;
*) >&2 echo "Invalid argument" ; help ; exit 1;;
esac
}
main "$@"

View File

@ -1 +0,0 @@
./cmd/fiotest/fio-k8s

View File

@ -1,31 +0,0 @@
#
# Copyright (c) 2021-2022 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
MKFILE_DIR := $(dir $(MKFILE_PATH))
build:
make -C $(MKFILE_DIR)/cmd/fiotest/ gomod
make -C $(MKFILE_DIR)/cmd/fiotest/ build
test-report:
$(MKFILE_DIR)/scripts/dax-compare-test/report/gen-html-fio-report.sh $(MKFILE_DIR)/cmd/fiotest/test-results/
test-report-interactive:
$(MKFILE_DIR)/scripts/dax-compare-test/report/run-docker-jupyter-server.sh $(MKFILE_DIR)/cmd/fiotest/test-results/
test: build
make -C $(MKFILE_DIR)/cmd/fiotest/ run
make test-report
run: build
make -C $(MKFILE_DIR)/scripts/dax-compare-test/ run
test-qemu: build
make -C $(MKFILE_DIR)/cmd/fiotest/ run-qemu
test-clh: build
make -C $(MKFILE_DIR)/cmd/fiotest/ run-clh

View File

@ -1,30 +0,0 @@
# FIO test in Kubernetes
This is an automation to run `fio` with Kubernetes.
## Requirements:
- Kubernetes cluster running.
- Kata configured as `runtimeclass`.
## Test structure:
- [fio-test]: Program wrapper to launch `fio` in a K8s pod.
- [pkg]: Library code that could be used for more `fio` automation.
- [configs]: Configuration files used by [fio-test].
- [DAX-compare-test]: Script to run [fio-test] to generate `fio` data for Kata with/without `virtio-fs DAX` and K8s bare-metal runtime(`runc`).
- [report] Jupyter Notebook to create reports for data generated by [DAX-compare-test].
## Top-level Makefile targets
- `build`: Build `fio` metrics.
- `test`: quick test, used to verify changes in [fio-test].
- `run`: Run `fio` metrics and generate reports.
- `test-report-interactive`: Run python notebook in `localhost:8888`, useful to edit the report.
- `test-report`: Generate report from data generated by `make test`.
[fio-test]:cmd/fiotest
[configs]:configs
[pkg]:pkg
[report]:scripts/dax-compare-test/report
[DAX-compare-test]:scripts/dax-compare-test/README.md

View File

@ -1,27 +0,0 @@
#
# Copyright (c) 2021-2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
MKFILE_DIR := $(dir $(MKFILE_PATH))
build:
GO111MODULE=on go build
run: build
$(MKFILE_DIR)/fio-k8s --debug --fio.size 10M --output-dir test-results --test-name kata $(MKFILE_DIR)/../../configs/example-config/
$(MKFILE_DIR)/fio-k8s --debug --fio.size 10M --output-dir test-results --test-name runc --container-runtime runc $(MKFILE_DIR)/../../configs/example-config/
gomod:
go mod edit -replace=github.com/kata-containers/kata-containers/tests/metrics/k8s=../../pkg/k8s
go mod edit -replace=github.com/kata-containers/kata-containers/tests/metrics/exec=../../pkg/exec
go mod edit -replace=github.com/kata-containers/kata-containers/tests/metrics/env=../../pkg/env
go mod tidy
run-qemu: build
$(MKFILE_DIR)/fio-k8s --debug --fio.size 10M --output-dir test-results --test-name kata --container-runtime kata-qemu $(MKFILE_DIR)/../../configs/example-config/
run-clh: build
$(MKFILE_DIR)/fio-k8s --debug --fio.size 10M --output-dir test-results --test-name kata --container-runtime kata-clh $(MKFILE_DIR)/../../configs/example-config/

View File

@ -1,24 +0,0 @@
module github.com/kata-containers/kata-containers/tests/metrics/storage/fio-k8s
go 1.19
replace github.com/kata-containers/kata-containers/tests/metrics/exec => ../../pkg/exec
replace github.com/kata-containers/kata-containers/tests/metrics/k8s => ../../pkg/k8s
replace github.com/kata-containers/kata-containers/tests/metrics/env => ../../pkg/env
require (
github.com/kata-containers/kata-containers/tests/metrics/env v0.0.0-00010101000000-000000000000
github.com/kata-containers/kata-containers/tests/metrics/exec v0.0.0-00010101000000-000000000000
github.com/kata-containers/kata-containers/tests/metrics/k8s v0.0.0-00010101000000-000000000000
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.9.3
github.com/urfave/cli v1.22.14
)
require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
)

View File

@ -1,31 +0,0 @@
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk=
github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -1,373 +0,0 @@
// Copyright (c) 2021-2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"encoding/csv"
"encoding/json"
"fmt"
"os"
"path"
"path/filepath"
"strings"
"time"
env "github.com/kata-containers/kata-containers/tests/metrics/env"
exec "github.com/kata-containers/kata-containers/tests/metrics/exec"
"github.com/kata-containers/kata-containers/tests/metrics/k8s"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var log = logrus.New()
var (
optContainerRuntime = "container-runtime"
optDebug = "debug"
optOutputDir = "output-dir"
optTestName = "test-name"
// fio options
optFioBlockSize = "fio.block-size"
optFioDirect = "fio.direct"
optFioIoDepth = "fio.iodepth"
optFioSize = "fio.size"
optFioNumJobs = "fio.numjobs"
)
type RwFioOp struct {
BandwidthKb int `json:"bw"`
IOPS float64 `json:"iops"`
}
type fioResult struct {
GlobalOptions struct {
IOEngine string `json:"ioengine"`
RW string `json:"rw"`
} `json:"global options"`
Jobs []struct {
JobName string `json:"jobname"`
Read RwFioOp `json:"read"`
Write RwFioOp `json:"write"`
} `json:"jobs"`
}
// Run fio in k8s metrics test in K8s
func (c fioTestConfig) run() (result fioResult, err error) {
log.Infof("Running fio config: %s", c.jobFile)
pod := k8s.Pod{YamlPath: c.k8sYaml}
log.Infof("Delete pod if already created")
err = pod.Delete()
if err != nil {
return result, err
}
log.Infof("Create pod: %s", pod.YamlPath)
err = pod.Run()
if err != nil {
return result, err
}
defer func() {
log.Info("Deleting pod")
delErr := pod.Delete()
if delErr != nil {
log.Error(delErr)
if err != nil {
err = errors.Wrapf(err, "Could not delete pod after: %s", delErr)
}
}
}()
destDir := "/home/fio-jobs"
_, err = pod.Exec("mkdir " + destDir)
if err != nil {
return result, err
}
dstJobFile := path.Join(destDir, "jobFile")
err = pod.CopyFromHost(c.jobFile, dstJobFile)
if err != nil {
return result, err
}
_, err = pod.Exec("apt update")
if err != nil {
return result, err
}
_, err = pod.Exec("apt install -y fio")
if err != nil {
return result, err
}
err = env.DropCaches()
if err != nil {
return result, err
}
var directStr string
if c.direct {
directStr = "1"
} else {
directStr = "0"
}
cmdFio := "fio"
cmdFio += " --append-terse "
cmdFio += " --blocksize=" + c.blocksize
cmdFio += " --direct=" + directStr
cmdFio += " --directory=" + c.directory
cmdFio += " --iodepth=" + c.iodepth
cmdFio += " --numjobs=" + c.numjobs
cmdFio += " --runtime=" + c.runtime
cmdFio += " --size=" + c.size
cmdFio += " --output-format=json"
cmdFio += " " + dstJobFile
log.Infof("Exec fio")
output, err := pod.Exec(cmdFio, k8s.ExecOptShowStdOut())
if err != nil {
return result, err
}
err = json.Unmarshal([]byte(output), &result)
if err != nil {
return result, errors.Wrapf(err, "failed to unmarshall output : %s", output)
}
log.Infof("ioengine:%s", result.GlobalOptions.IOEngine)
log.Infof("rw:%s", result.GlobalOptions.RW)
if len(result.Jobs) == 0 {
return result, errors.New("No jobs found after parsing fio results")
}
testDir := path.Join(c.outputDir, filepath.Base(c.jobFile))
err = os.MkdirAll(testDir, 0775)
if err != nil {
return result, errors.Wrapf(err, "failed to create test directory for :%s", c.jobFile)
}
outputFile := path.Join(testDir, "output.json")
log.Infof("Store results output in : %s", outputFile)
err = os.WriteFile(outputFile, []byte(output), 0644)
if err != nil {
return result, err
}
return result, nil
}
type fioTestConfig struct {
//test options
k8sYaml string
containerRuntime string
outputDir string
//fio options
blocksize string
directory string
iodepth string
numjobs string
jobFile string
loops string
runtime string
size string
direct bool
}
func runFioJobs(testDirPath string, cfg fioTestConfig) (results []fioResult, err error) {
fioJobsDir, err := filepath.Abs(path.Join(testDirPath, "fio-jobs"))
if err != nil {
return results, err
}
files, err := os.ReadDir(fioJobsDir)
if err != nil {
log.Fatal(err)
return results, err
}
if cfg.containerRuntime == "" {
return results, errors.New("containerRuntime is empty")
}
podYAMLName := cfg.containerRuntime + ".yaml"
cfg.k8sYaml = path.Join(testDirPath, podYAMLName)
if len(files) == 0 {
return results, errors.New("No fio configs found")
}
for _, file := range files {
cfg.jobFile = path.Join(fioJobsDir, file.Name())
r, err := cfg.run()
if err != nil {
return results, err
}
results = append(results, r)
log.Infof("workload:%s", r.Jobs[0].JobName)
log.Infof("bw_r:%d", r.Jobs[0].Read.BandwidthKb)
log.Infof("IOPS_r:%f", r.Jobs[0].Read.IOPS)
log.Infof("bw_w:%d", r.Jobs[0].Write.BandwidthKb)
log.Infof("IOPS_w:%f", r.Jobs[0].Write.IOPS)
waitTime := 5
log.Debugf("Sleep %d seconds(if not wait sometimes create another pod timesout)", waitTime)
time.Sleep(time.Duration(waitTime) * time.Second)
}
return results, err
}
func generateResultsView(testName string, results []fioResult, outputDir string) error {
outputFile := path.Join(outputDir, "results.csv")
f, err := os.Create(outputFile)
if err != nil {
return err
}
defer f.Close()
log.Infof("Creating results output in %s", outputFile)
w := csv.NewWriter(f)
headers := []string{"NAME", "WORKLOAD", "bw_r", "bw_w", "IOPS_r", "IOPS_w"}
err = w.Write(headers)
if err != nil {
return err
}
for _, r := range results {
if len(r.Jobs) == 0 {
return errors.Errorf("fio result has no jobs: %v", r)
}
row := []string{testName}
row = append(row, r.Jobs[0].JobName)
row = append(row, fmt.Sprintf("%d", r.Jobs[0].Read.BandwidthKb))
row = append(row, fmt.Sprintf("%d", r.Jobs[0].Write.BandwidthKb))
row = append(row, fmt.Sprintf("%f", r.Jobs[0].Read.IOPS))
row = append(row, fmt.Sprintf("%f", r.Jobs[0].Write.IOPS))
if err := w.Write(row); err != nil {
return err
}
}
w.Flush()
return w.Error()
}
func main() {
app := &cli.App{
Flags: []cli.Flag{
&cli.BoolFlag{
Name: optDebug,
Usage: "Logs in debug level",
},
&cli.StringFlag{
Name: optTestName,
Value: "kata-fio-test",
Usage: "Change the fio test name for reports",
},
&cli.StringFlag{
Name: optOutputDir,
Value: ".",
Usage: "Use a file to store results",
},
&cli.StringFlag{
Name: optContainerRuntime,
Value: "kata",
Usage: "Choose the runtime to use",
},
//fio options
&cli.StringFlag{
Name: optFioSize,
Value: "200M",
Usage: "File size to use for tests",
},
&cli.StringFlag{
Name: optFioBlockSize,
Value: "4K",
Usage: "Block size for fio tests",
},
&cli.BoolFlag{
Name: optFioDirect,
Usage: "Use direct io",
},
&cli.StringFlag{
Name: optFioIoDepth,
Value: "16",
Usage: "Number of I/O units to keep in flight against the file",
},
&cli.StringFlag{
Name: optFioNumJobs,
Value: "1",
Usage: "Number of clones (processes/threads performing the same workload) of this job",
},
},
Action: func(c *cli.Context) error {
jobsDir := c.Args().First()
if jobsDir == "" {
cli.SubcommandHelpTemplate = strings.Replace(cli.SubcommandHelpTemplate, "[arguments...]", "<test-config-dir>", -1)
cli.ShowCommandHelp(c, "")
return errors.New("Missing <test-config-dir>")
}
if c.Bool(optDebug) {
log.SetLevel(logrus.DebugLevel)
k8s.Debug = true
env.Debug = true
}
exec.SetLogger(log)
k8s.SetLogger(log)
env.SetLogger(log)
testName := c.String(optTestName)
outputDir, err := filepath.Abs(path.Join(c.String(optOutputDir), testName))
if err != nil {
return err
}
cfg := fioTestConfig{
blocksize: c.String(optFioBlockSize),
direct: c.Bool(optFioDirect),
directory: ".",
iodepth: c.String(optFioIoDepth),
loops: "3",
numjobs: c.String(optFioNumJobs),
runtime: "20",
size: c.String(optFioSize),
containerRuntime: c.String(optContainerRuntime),
outputDir: outputDir,
}
log.Infof("Results will be created in %s", cfg.outputDir)
err = os.MkdirAll(cfg.outputDir, 0775)
if err != nil {
return err
}
results, err := runFioJobs(jobsDir, cfg)
if err != nil {
return err
}
return generateResultsView(c.String(optTestName), results, outputDir)
},
}
err := app.Run(os.Args)
if err != nil {
log.Fatal(err)
}
}

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2022 Intel Corporation
[global]
name=io_uring
filename=fio-file
rw=randrw
rwmixread=75
ioengine=io_uring
[randrw-io_uring]

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=randrw-libaio
filename=fio-file
rw=randrw
rwmixread=75
ioengine=libaio
[randrw-libaio]

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2022 Intel Corporation
[global]
name=sync
filename=fio-file
rw=randrw
rwmixread=75
ioengine=sync
[randrw-sync]

View File

@ -1,17 +0,0 @@
## Copyright (c) 2021 Intel Corporation
#
## SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: iometrics
spec:
runtimeClassName: kata-clh
containers:
- args:
- sleep
- infinity
image: ubuntu:latest
name: iometrics
imagePullPolicy: Always

View File

@ -1,17 +0,0 @@
## Copyright (c) 2021 Intel Corporation
#
## SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: iometrics
spec:
runtimeClassName: kata-qemu
containers:
- args:
- sleep
- infinity
image: ubuntu:latest
name: iometrics
imagePullPolicy: Always

View File

@ -1,16 +0,0 @@
## Copyright (c) 2021 Intel Corporation
#
## SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: iometrics
spec:
runtimeClassName: kata
containers:
- name: iometrics
image: ubuntu:latest
# Just spin & wait forever
command: [ "/bin/bash", "-c", "--" ]
args: [ "sleep infinity" ]

View File

@ -1,15 +0,0 @@
## Copyright (c) 2021 Intel Corporation
#
## SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: iometrics
spec:
containers:
- name: iometrics
image: ubuntu:latest
# Just spin & wait forever
command: [ "/bin/bash", "-c", "--" ]
args: [ "sleep infinity" ]

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=randread-libaio
filename=fio-file
rw=randread
ioengine=libaio
[randread-libaio]

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=randread-mmap
rw=randread
ioengine=mmap
[randread-mmap]
filename=fio-file

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=randrw-libaio
filename=fio-file
rw=randrw
rwmixread=75
ioengine=libaio
[randrw-libaio]

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=randrw-mmap
rw=randrw
rwmixread=75
ioengine=mmap
[randrw-mmap]
filename=fio-file

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=randwrite-libaio
filename=fio-file
rw=randwrite
ioengine=libaio
[randwrite-libaio]

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=randwrite-mmap
rw=randwrite
ioengine=mmap
[randwrite-mmap]
filename=fio-file

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=seqread-libaio
filename=fio-file
rw=read
ioengine=libaio
[seqread-libaio]

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=seqread-mmap
rw=read
ioengine=mmap
[seqread-mmap]
filename=fio-file

View File

@ -1,8 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=seqread-psync
filename=fio-file
rw=read
[seqread-psync]

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=seqwrite-libaio
filename=fio-file
rw=write
ioengine=libaio
[seqwrite-libaio]

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
[global]
name=seqwrite-mmap
filename=fio-file
rw=write
ioengine=mmap
[seqwrite-mmap]
filename=fio-file

View File

@ -1,16 +0,0 @@
## Copyright (c) 2021 Intel Corporation
#
## SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: iometrics
spec:
runtimeClassName: kata
containers:
- name: iometrics
image: ubuntu:latest
# Just spin & wait forever
command: [ "/bin/bash", "-c", "--" ]
args: [ "sleep infinity" ]

View File

@ -1,15 +0,0 @@
## Copyright (c) 2021 Intel Corporation
#
## SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: iometrics
spec:
containers:
- name: iometrics
image: ubuntu:latest
# Just spin & wait forever
command: [ "/bin/bash", "-c", "--" ]
args: [ "sleep infinity" ]

View File

@ -1,86 +0,0 @@
#!/bin/bash
#
# Copyright (c) 2022-2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
set -e
# General env
SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
source "${SCRIPT_PATH}/../../lib/common.bash"
FIO_PATH="${GOPATH}/tests/metrics/storage/fio-k8s"
TEST_NAME="${TEST_NAME:-fio}"
function main() {
cmds=("bc" "jq")
check_cmds "${cmds[@]}"
init_env
check_processes
pushd "${FIO_PATH}"
[ -z "${KATA_HYPERVISOR}" ] && die "Hypervisor ID is missing."
[ "${KATA_HYPERVISOR}" != "qemu" ] && [ "${KATA_HYPERVISOR}" != "clh" ] && die "Hypervisor not recognized: ${KATA_HYPERVISOR}"
echo "INFO: Running K8S FIO test using ${KATA_HYPERVISOR} hypervisor"
make "test-${KATA_HYPERVISOR}"
popd
test_result_file="${FIO_PATH}/cmd/fiotest/test-results/kata/randrw-sync.job/output.json"
metrics_json_init
local read_io=$(cat $test_result_file | grep io_bytes | head -1 | sed 's/[[:blank:]]//g' | cut -f2 -d ':' | cut -f1 -d ',')
local read_bw=$(cat $test_result_file | grep bw_bytes | head -1 | sed 's/[[:blank:]]//g' | cut -f2 -d ':' | cut -f1 -d ',')
local read_90_percentile=$(cat $test_result_file | grep 90.000000 | head -1 | sed 's/[[:blank:]]//g' | cut -f2 -d ':' | cut -f1 -d ',')
local read_95_percentile=$(cat $test_result_file | grep 95.000000 | head -1 | sed 's/[[:blank:]]//g' | cut -f2 -d ':' | cut -f1 -d ',')
local write_io=$(cat $test_result_file | grep io_bytes | head -2 | tail -1 | sed 's/[[:blank:]]//g' | cut -f2 -d ':' | cut -f1 -d ',')
local write_bw=$(cat $test_result_file | grep bw_bytes | head -2 | tail -1 | sed 's/[[:blank:]]//g' | cut -f2 -d ':' | cut -f1 -d ',')
local write_90_percentile=$(cat $test_result_file | grep 90.000000 | head -2 | tail -1 | sed 's/[[:blank:]]//g' | cut -f2 -d ':' | cut -f1 -d ',')
local write_95_percentile=$(cat $test_result_file | grep 95.000000 | head -2 | tail -1 | sed 's/[[:blank:]]//g' | cut -f2 -d ':' | cut -f1 -d ',')
metrics_json_start_array
local json="$(cat << EOF
{
"readio": {
"Result" : $read_io,
"Units" : "bytes"
},
"readbw": {
"Result" : $read_bw,
"Units" : "bytes/sec"
},
"read90percentile": {
"Result" : $read_90_percentile,
"Units" : "ns"
},
"read95percentile": {
"Result" : $read_95_percentile,
"Units" : "ns"
},
"writeio": {
"Result" : $write_io,
"Units" : "bytes"
},
"writebw": {
"Result" : $write_bw,
"Units" : "bytes/sec"
},
"write90percentile": {
"Result" : $write_90_percentile,
"Units" : "ns"
},
"write95percentile": {
"Result" : $write_95_percentile,
"Units" : "ns"
}
}
EOF
)"
metrics_json_add_array_element "$json"
metrics_json_end_array "Results"
metrics_json_save
check_processes
}
main "$@"

View File

@ -1,9 +0,0 @@
#
# Copyright (c) 2021-2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
gomod:
GO111MODULE=on go mod edit -replace=github.com/kata-containers/kata-containers/tests/metrics/exec=../exec
GO111MODULE=on go mod tidy

View File

@ -1,38 +0,0 @@
// Copyright (c) 2021-2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package env
import (
exec "github.com/kata-containers/kata-containers/tests/metrics/exec"
)
// logger interface for pkg
var log logger
var Debug bool = false
type logger interface {
Infof(string, ...interface{})
Debugf(string, ...interface{})
Errorf(string, ...interface{})
}
func SetLogger(l logger) {
log = l
}
var sysDropCachesPath = "/proc/sys/vm/drop_caches"
func DropCaches() (err error) {
log.Infof("drop caches")
_, err = exec.ExecCmd("sync", Debug)
if err != nil {
return err
}
_, err = exec.ExecCmd("echo 3 | sudo tee "+sysDropCachesPath, Debug)
if err != nil {
return err
}
return nil
}

View File

@ -1,10 +0,0 @@
module github.com/kata-containers/kata-containers/tests/metrics/storage/fio-k8s/exec
go 1.19
require (
github.com/kata-containers/kata-containers/tests/metrics/exec v0.0.0-00010101000000-000000000000 // indirect
github.com/pkg/errors v0.9.1 // indirect
)
replace github.com/kata-containers/kata-containers/tests/metrics/exec => ../exec

View File

@ -1,2 +0,0 @@
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=

View File

@ -1,67 +0,0 @@
// Copyright (c) 2021-2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package exec
import (
"bytes"
"io"
"os"
"os/exec"
"github.com/pkg/errors"
)
// logger interface for pkg
var log logger
type logger interface {
Infof(string, ...interface{})
Debugf(string, ...interface{})
Errorf(string, ...interface{})
}
func SetLogger(l logger) {
log = l
}
// Exec a command
// err != nil if command fails to execute
// output is a string with a combined stdout and stderr
func ExecCmd(c string, showInStdout bool) (stdout string, err error) {
if c == "" {
return "", errors.New("command is empty")
}
log.Debugf("Exec: %s", c)
cmd := exec.Command("bash", "-o", "pipefail", "-c", c)
var stdBuffer bytes.Buffer
var writers []io.Writer
writers = append(writers, &stdBuffer)
if showInStdout {
writers = append(writers, os.Stdout)
}
mw := io.MultiWriter(writers...)
cmd.Stdout = mw
cmd.Stderr = mw
err = cmd.Run()
output := stdBuffer.String()
return stdBuffer.String(), errors.Wrap(err, output)
}
// Exec a command
// Send output to Stdout and Stderr
func ExecStdout(c string) error {
if c == "" {
return errors.New("command is empty")
}
log.Debugf("Exec: %s", c)
cmd := exec.Command("bash", "-o", "pipefail", "-c", c)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}

View File

@ -1,5 +0,0 @@
module github.com/kata-containers/kata-containers/tests/metrics/storage/fio-k8s/exec
go 1.19
require github.com/pkg/errors v0.9.1

View File

@ -1,2 +0,0 @@
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=

View File

@ -1,8 +0,0 @@
#
# Copyright (c) 2021-2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
gomod:
GO111MODULE=on go mod edit -replace=github.com/kata-containers/kata-containers/tests/metrics/exec=../exec
GO111MODULE=on go mod tidy

View File

@ -1,34 +0,0 @@
// Copyright (c) 2021-2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package k8s
import (
"fmt"
exec "github.com/kata-containers/kata-containers/tests/metrics/exec"
)
type execOpt struct {
showInStdOut bool
}
type ExecOption func(e *execOpt)
func ExecOptShowStdOut() ExecOption {
return func(e *execOpt) {
e.showInStdOut = true
}
}
func (p *Pod) Exec(cmd string, opts ...ExecOption) (output string, err error) {
log.Debugf("Exec %q in %s", cmd, p.YamlPath)
o := &execOpt{showInStdOut: false}
for _, opt := range opts {
opt(o)
}
execCmd := fmt.Sprintf("kubectl exec -f %s -- /bin/bash -c %q", p.YamlPath, cmd)
return exec.ExecCmd(execCmd, Debug || o.showInStdOut)
}

View File

@ -1,10 +0,0 @@
module github.com/kata-containers/kata-containers/tests/metrics/k8s
go 1.19
replace github.com/kata-containers/kata-containers/tests/metrics/exec => ../exec
require (
github.com/kata-containers/kata-containers/tests/metrics/exec v0.0.0-00010101000000-000000000000 // indirect
github.com/pkg/errors v0.9.1 // indirect
)

View File

@ -1,2 +0,0 @@
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=

View File

@ -1,68 +0,0 @@
// Copyright (c) 2021-2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package k8s
import (
"fmt"
exec "github.com/kata-containers/kata-containers/tests/metrics/exec"
"github.com/pkg/errors"
)
// logger interface for pkg
var log logger
var Debug bool = false
type logger interface {
Infof(string, ...interface{})
Debugf(string, ...interface{})
Errorf(string, ...interface{})
}
func SetLogger(l logger) {
log = l
}
type Pod struct {
YamlPath string
}
func (p *Pod) waitForReady() (err error) {
log.Debugf("Wait for pod %s", p.YamlPath)
_, err = exec.ExecCmd("kubectl wait --for=condition=ready -f "+p.YamlPath, Debug)
return err
}
func (p *Pod) Run() (err error) {
log.Debugf("Creating K8s Pod %s", p.YamlPath)
_, err = exec.ExecCmd("kubectl apply -f "+p.YamlPath, Debug)
if err != nil {
return errors.Wrapf(err, "Failed to run pod %s", p.YamlPath)
}
err = p.waitForReady()
if err != nil {
return errors.Wrapf(err, "Failed to wait for pod %s", p.YamlPath)
}
return err
}
func (p *Pod) Delete() (err error) {
log.Debugf("Delete pod %s", p.YamlPath)
_, err = exec.ExecCmd("kubectl delete --ignore-not-found -f "+p.YamlPath, Debug)
return errors.Wrapf(err, "Failed to delete pod %s", p.YamlPath)
}
func (p *Pod) CopyFromHost(src, dst string) (err error) {
podName, err := exec.ExecCmd("kubectl get -f "+p.YamlPath+" -o jsonpath={.metadata.name}", Debug)
if err != nil {
return nil
}
log.Debugf("Copy from host %q->%q in pod %s", src, dst, p.YamlPath)
execCmd := fmt.Sprintf("kubectl cp %s %s:%s", src, podName, dst)
_, err = exec.ExecCmd(execCmd, Debug)
return err
}

View File

@ -1,10 +0,0 @@
#
# Copyright (c) 2021-2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
MKFILE_DIR := $(dir $(MKFILE_PATH))
run:
$(MKFILE_DIR)/compare-virtiofsd-dax.sh
"$(MKFILE_DIR)/report/gen-html-fio-report.sh" "./results"

View File

@ -1,47 +0,0 @@
# FIO in Kubernetes
This test runs `fio` jobs to measure how Kata Containers work using virtio-fs DAX. The test works using Kubernetes.
The test has to run in a single node cluster, it is needed as the test modifies Kata configuration file.
The `virtio-fs` options that this test will use are:
* `cache mode` Only `auto`, this is the most compatible mode for most of the Kata use cases. Today this is default in Kata.
* `thread pool size` Restrict the number of worker threads per request queue, zero means no thread pool.
* `DAX`
```
File contents can be mapped into a memory window on the host, allowing the guest to directly access data from the host page cache. This has several advantages: The guest page cache is bypassed, reducing the memory footprint. No communication is necessary
to access file contents, improving I/O performance. Shared file access is coherent between virtual machines on the same host even with mmap.
```
This test by default iterates over different `virtio-fs` configurations.
| test name | DAX | thread pool size | cache mode |
|---------------------------|-----|------------------|------------|
| pool_0_cache_auto_no_DAX | no | 0 | auto |
| pool_0_cache_auto_DAX | yes | 0 | auto |
The `fio` options used are:
`ioengine`: How the IO requests are issued to the kernel.
* `libaio`: Supports async IO for both direct and buffered IO.
* `mmap`: File is memory mapped with mmap(2) and data copied to/from using memcpy(3).
`rw type`: Type of I/O pattern.
* `randread`: Random reads.
* `randrw`: Random mixed reads and writes.
* `randwrite`: Random writes.
* `read`: Sequential reads.
* `write`: Sequential writes.
Additional notes: Some jobs contain a `multi` prefix. This means that the same job runs more than once at the same time using its own file.
### Static `fio` values:
Some `fio` values are not modified over all the jobs.
* `runtime`: Tell `fio` to terminate processing after the specified period of time(seconds).
* `loops`: Run the specified number of iterations of this job. Used to repeat the same workload a given number of times.
* `iodepth`: Number of I/O units to keep in flight against the file. Note that increasing `iodepth` beyond 1 will not affect synchronous `ioengine`.
* `size`: The total size of file I/O for each thread of this job.
* `direct`: If value is true, use non-buffered I/O. This is usually O_`DIRECT`.
* `blocksize`: The block size in bytes used for I/O units.

View File

@ -1,151 +0,0 @@
#!/bin/bash
#Copyright (c) 2021-2023 Intel Corporation
#
#SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
script_dir=$(dirname "$(readlink -f "$0")")
runtime_path="/opt/kata/bin/kata-runtime"
kata_config_path="/opt/kata/share/defaults/kata-containers/configuration.toml"
results_dir="$(realpath ./)/results"
KATA_RUNTIME="${KATA_RUNTIME_CLASS:-kata}"
BAREMETAL_RUNTIME="runc"
RUNTIME_CLASS=""
FIO_SIZE="${FIO_SIZE:-500M}"
FIO_BLOCKSIZE="${FIO_BLOCKSIZE:-4K}"
VIRTIOFS_DAX_SIZE=${VIRTIOFS_DAX_SIZE:-600M}
# set the base case for virtiofsd
set_base_virtiofs_config() {
# Running kata-qemu-virtiofs
# Defaults for virtiofs
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache '"auto"'
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache_size ${VIRTIOFS_DAX_SIZE}
}
## helper function: get name of current bash function
fn_name() {
echo "${FUNCNAME[1]}"
}
# directory where results are stored
get_results_dir() {
local test_name
local test_result_dir
test_name="${1}"
test_result_dir="${results_dir}/${test_name}"
mkdir -p "${test_result_dir}"
echo "${test_result_dir}"
}
# Collect kata env
# save kata config toml
# save output from kata-env
kata_env() {
local suffix=${1}
local config_path
local kata_env_bk
local kata_config_bk
kata_env_bk="$(get_results_dir "${suffix}")/kata-env.toml"
kata_config_bk="$(get_results_dir "${suffix}")/kata-config.toml"
${runtime_path} kata-env >"${kata_env_bk}"
config_path="$(${runtime_path} kata-env --json | jq .Runtime.Config.Path -r)"
cp "${config_path}" "${kata_config_bk}"
}
# Collect the command used by virtiofsd
collect_qemu_virtiofs_cmd() {
local rdir
local test_name
test_name="${1}"
rdir=$(get_results_dir "${test_name}")
# TODO
}
# Run metrics runner
run_workload() {
local test_name
local test_result_file
local test_result_dir
test_name="${1}"
test_result_dir="$(get_results_dir "${test_name}")"
test_result_file="${test_result_dir}/test-out.txt"
echo "Running for kata config: ${test_name}"
collect_qemu_virtiofs_cmd "$test_name"
fio_runner_dir="${script_dir}/../../cmd/fiotest/"
fio_jobs="${script_dir}/../../configs/test-config/"
make -C "${fio_runner_dir}" build
pwd
set -x
"${fio_runner_dir}fio-k8s" \
--debug \
--fio.size "${FIO_SIZE}" \
--fio.block-size "${FIO_BLOCKSIZE}" \
--container-runtime "${RUNTIME_CLASS}" \
--test-name "${test_name}" \
--output-dir "$(dirname ${test_result_dir})" \
"${fio_jobs}" |
tee \
"${test_result_file}"
set +x
}
pool_0_cache_auto_dax() {
local suffix="$(fn_name)"
set_base_virtiofs_config
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_extra_args '["--thread-pool-size=0","-o","no_posix_lock","-o","xattr"]'
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache '"auto"'
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache_size 1024
kata_env "${suffix}"
RUNTIME_CLASS="${KATA_RUNTIME}"
run_workload "${suffix}"
}
pool_0_cache_auto_no_dax() {
local suffix="$(fn_name)"
set_base_virtiofs_config
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_extra_args '["--thread-pool-size=0","-o","no_posix_lock","-o","xattr"]'
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache '"auto"'
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache_size 0
kata_env "${suffix}"
RUNTIME_CLASS="${KATA_RUNTIME}"
run_workload "${suffix}"
echo "done"
}
k8s_baremetal() {
local suffix="$(fn_name)"
RUNTIME_CLASS="${BAREMETAL_RUNTIME}"
run_workload "${suffix}"
}
main() {
mkdir -p "${results_dir}"
k8s_baremetal
pool_0_cache_auto_dax
pool_0_cache_auto_no_dax
}
main $*

View File

@ -1,51 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "tWacOPbMYPtc"
},
"source": [
"# FIO comparision"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "jXtTs6yldl_y"
},
"outputs": [],
"source": [
"import fio\n",
"fio.generate_report()"
]
}
],
"metadata": {
"colab": {
"collapsed_sections": [],
"name": "fio.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -1,102 +0,0 @@
# Copyright (c) 2021-2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
import pandas as pd
import os
import re
import io
import glob
from IPython.display import display, Markdown
import matplotlib.pyplot as plt
#Compare the tests results group by fio job.
#Input:
# df: dataset from `import_data()`
# metric: string of metrics provided in `df`
def compare_tests_group_by_fio_job(df, metric):
test_names, metric_df = group_metrics_group_by_testname(df, metric)
show_df(metric_df)
plot_df(metric_df,test_names)
# Given a metric return results per test group by fio job.
# input:
# df: dataset from `import_data()`
# metric: string with the name of the metric to filter.
# output:
# dataset with fomat:
# 'workload' , 'name[0]' , ... , 'name[n]'
#
def group_metrics_group_by_testname(df, metric):
#name of each tests from results
names = set()
# Rows of new data set
rows = []
# map:
# keys: name of fio job
# value: dict[k]:v where k: name of a test, v: value of test for metric`
workload = {}
for k, row in df.iterrows():
# name of a fio job
w = row['WORKLOAD']
# name of tests
tname = row['NAME']
names.add(tname)
# given a fio job name get dict of values
# if not previous values init empty dict
dict_values = workload.get(w, {})
# For a given metric, add it into as value of dict_values[testname]=val
#e.g
# dict_values["test-name"] = row["IOPS"]
dict_values[tname] = row[metric]
workload[w] = dict_values
names = list(names)
cols = ['WORKLOAD'] + list(names)
rdf = pd.DataFrame(workload,columns = cols)
for k in workload:
d = workload[k]
if not d[names[0]] == 0:
d["WORKLOAD"] = k;
rdf = rdf.append(d,ignore_index=True)
rdf = rdf.dropna()
return names, rdf
def plot_df(df, names,sort_key=""):
if sort_key != "":
df.sort_values(sort_key, ascending=False)
df.plot(kind='bar',x="WORKLOAD",y=names, figsize=(30, 10))
plt.show()
def import_data():
frames = []
for f in glob.glob('./results/*/results.csv'):
print("reading:" + f)
df = pd.read_csv(f)
frames.append(df)
return pd.concat(frames)
def show_df(df):
pd.set_option('display.max_rows', df.shape[0]+1)
print(df)
def print_md(s):
display(Markdown(s))
#notebook entrypoint
def generate_report():
#Load the all test results in a single dataset
df_results = import_data()
print_md("Show all data from results")
show_df(df_results)
print_md("### Compare the tests results group by fio job. The metric used to compare is write bandwidth")
compare_tests_group_by_fio_job(df_results, 'bw_w')
print_md("### Compare the tests results group by fio job. The metric used to compare is read bandwidth")
compare_tests_group_by_fio_job(df_results, 'bw_r')
print_md("### Compare the tests results group by fio job. The metric used to compare is write IOPS(Input/Output Operations Per Second)")
compare_tests_group_by_fio_job(df_results, 'IOPS_w')
print_md("### Compare the tests results group by fio job. The metric used to compare is read IOPS(Input/Output Operations Per Second)")
compare_tests_group_by_fio_job(df_results, 'IOPS_r')

View File

@ -1,48 +0,0 @@
#!/bin/bash
#Copyright (c) 2021-2023 Intel Corporation
#
#SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
script_dir=$(dirname "$(readlink -f "$0")")
results_dir=${1:-}
usage(){
echo "$0 <results_dir>"
}
if [ "${results_dir}" == "" ];then
echo "missing results directory"
usage
exit 1
fi
if [ ! -d "${results_dir}" ];then
echo "${results_dir} is not a directory"
usage
exit 1
fi
results_dir=$(realpath "${results_dir}")
generate_report(){
sudo chown "${USER}:${USER}" -R ${results_dir}
sudo docker run --rm -e JUPYTER_ENABLE_LAB=yes \
-v "${script_dir}:/home/jovyan" \
-v "${results_dir}:/home/jovyan/results" \
--user $(id -u):$(id -g) \
jupyter/scipy-notebook:399cbb986c6b \
bash -e -c '
cd results;
jupyter nbconvert --execute /home/jovyan/fio.ipynb --to html;
cp /home/jovyan/fio.html /home/jovyan/results;
'
}
generate_report

View File

@ -1,39 +0,0 @@
#!/bin/bash
#Copyright (c) 2021-2023 Intel Corporation
#
#SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
script_dir=$(dirname "$(readlink -f "$0")")
NOTEBOOK_PORT="8888"
results_dir=${1:-}
usage(){
echo "$0 <results_dir>"
}
if [ "${results_dir}" == "" ];then
echo "missing results directory"
usage
exit 1
fi
if [ ! -d "${results_dir}" ];then
echo "${results_dir} is not a directory"
usage
exit 1
fi
results_dir=$(realpath "${results_dir}")
sudo -E docker run --rm -p "${NOTEBOOK_PORT}:${NOTEBOOK_PORT}" -e JUPYTER_ENABLE_LAB=yes \
-v "${script_dir}:/home/jovyan" \
-v "${results_dir}:/home/jovyan/results" \
jupyter/scipy-notebook:399cbb986c6b \
start.sh jupyter lab --LabApp.token=''

161
tests/metrics/storage/fio_test.sh Executable file
View File

@ -0,0 +1,161 @@
#!/bin/bash
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
# Description of the test:
# This test runs the 'fio benchmark' on kata containers
# https://fio.readthedocs.io/en/latest/
set -o pipefail
SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
source "${SCRIPT_PATH}/../lib/common.bash"
CONTAINER_ID="fio_bench_${RANDOM}"
IMAGE="docker.io/library/fio-bench:latest"
DOCKERFILE="${SCRIPT_PATH}/fio-dockerfile/Dockerfile"
PAYLOAD_ARGS="${PAYLOAD_ARGS:-tail -f /dev/null}"
TEST_NAME="fio"
# Fio default number of jobs
nj=4
function release_resources() {
sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" ${CONTAINER_ID} sh -c "./fio_bench.sh delete-workload"
sleep 0.5
clean_env_ctr
info "fio test end"
}
trap release_resources EXIT
function setup() {
info "setup fio test"
clean_env_ctr
check_cmds "${cmds[@]}"
check_ctr_images "$IMAGE" "$DOCKERFILE"
init_env
# drop caches
sudo sh -c 'echo 3 > /proc/sys/vm/drop_caches'
# launch container
sudo -E "${CTR_EXE}" run -d --runtime "${CTR_RUNTIME}" "${IMAGE}" "${CONTAINER_ID}" sh -c "${PAYLOAD_ARGS}"
}
function parse_results() {
local data="${1}"
local bw=0
local bw_stddev=0
local iops=0
local iops_stddev=0
[ -z "${data}" ] && die "Data results are missing when trying to parsing them."
local io_type="$(echo "${data}" | jq -r '.jobs[0]."job options".rw')"
if [ "${io_type}" = "read" ] || [ "${io_type}" = "randread" ]; then
# Bandwidth
bw="$(echo "${data}" | num_jobs="$nj" jq '[.jobs[] | .read.bw] | add/(env.num_jobs|tonumber) | .*1000|round/1000')"
bw_stddev="$(echo "${data}" | num_jobs="$nj" jq '[.jobs[] | .read.bw_dev] | add/(env.num_jobs|tonumber) | .*1000|round/1000')"
# IOPS
iops="$(echo "${data}" | num_jobs="$nj" jq '[.jobs[] | .read.iops] | add/(env.num_jobs|tonumber) | .*1000|round/1000')"
iops_stddev="$(echo "${data}" | num_jobs="$nj" jq '[.jobs[] | .read.iops_stddev] | add/(env.num_jobs|tonumber) | .*1000|round/1000')"
elif [ "${io_type}" = "write" ] || [ "${io_type}" = "randwrite" ]; then
# Bandwidth
bw="$(echo "${data}" | num_jobs="$nj" jq '[.jobs[] | .write.bw] | add/(env.num_jobs|tonumber) | .*1000|round/1000')"
bw_stddev="$(echo "${data}" | num_jobs="$nj" jq '[.jobs[] | .write.bw_dev] | add/(env.num_jobs|tonumber) | .*1000|round/1000')"
# IOPS
iops="$(echo "${data}" | num_jobs="$nj" jq '[.jobs[] | .write.iops] | add/(env.num_jobs|tonumber) | .*1000|round/1000')"
iops_stddev="$(echo "${data}" | num_jobs="$nj" jq '[.jobs[] | .write.iops_stddev] | add/(env.num_jobs|tonumber) | .*1000|round/1000')"
else
die "io type ${io_type} is not valid when parsing results"
fi
convert_results_to_json "${io_type}" "${bw}" "${bw_stddev}" "${iops}" "${iops_stddev}"
}
function extract_test_params() {
local data="${1}"
[ -z "${data}" ] && die "Missing fio parameters when trying to convert to json format."
local json_params="$(echo "${data}" | jq -r '.jobs[0]."job options" | del(.name) | del(.rw) | del(.filename)')"
local json="$(cat << EOF
{
"Parameters" : ${json_params}
}
EOF
)"
metrics_json_add_array_element "${json}"
}
function convert_results_to_json() {
local io_type="${1}"
local bw="${2}"
local bw_stddev="${3}"
local iops="${4}"
local iops_stddev="${5}"
[ -z "${io_type}" ] || [ -z "${bw}" ] || [ -z "${bw_stddev}" ] || [ -z "${iops}" ] || [ -z "${iops_stddev}" ] && die "Results are missing when trying to convert to json format."
local json="$(cat << EOF
{
"${io_type}" : {
"bw" : "${bw}",
"bw_stddev" : "${bw_stddev}",
"iops" : "${iops}",
"iops_stddev" : "${iops_stddev}",
"units" : "Kb"
}
}
EOF
)"
metrics_json_add_array_element "${json}"
}
function store_results() {
local data_r="${1}"
local data_w="${2}"
local title="${3}"
[ -z "${data_r}" ] || [ -z "${data_w}" ] || [ -z "${title}" ] && die "Missing data and/or title when trying storing results."
metrics_json_start_array
extract_test_params "${data_r}"
parse_results "${data_r}"
parse_results "${data_w}"
metrics_json_end_array "${title}"
}
function main() {
setup
# Collect bs=4K, num_jobs=4, io-direct, io-depth=2
info "Processing sequential type workload"
sudo -E "${CTR_EXE}" t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh run-read-4k ${nj}" >/dev/null 2>&1
local results_read_4K="$(sudo -E "${CTR_EXE}" t exec -t --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results")"
sleep 0.5
sudo -E "${CTR_EXE}" t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh run-write-4k ${nj}" >/dev/null 2>&1
local results_write_4K="$(sudo -E "${CTR_EXE}" t exec -t --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results")"
# Collect bs=64K, num_jobs=4, io-direct, io-depth=2
info "Processing random type workload"
sleep 0.5
sudo -E "${CTR_EXE}" t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh run-randread-64k ${nj}" >/dev/null 2>&1
local results_rand_read_64K="$(sudo -E "${CTR_EXE}" t exec -t --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results")"
sleep 0.5
sudo -E "${CTR_EXE}" t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh run-randwrite-64k ${nj}" >/dev/null 2>&1
local results_rand_write_64K="$(sudo -E "${CTR_EXE}" t exec -t --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results")"
# parse results
metrics_json_init
store_results "${results_read_4K}" "${results_write_4K}" "Results sequential"
store_results "${results_rand_read_64K}" "${results_rand_write_64K}" "Results random"
metrics_json_save
}
main "$@"