Merge pull request #8542 from dborquez/metrics_fix_deployment_cleaning

metrics: cleans k8s iperf deployment when the test finishes.
This commit is contained in:
David Esparza
2023-12-11 13:14:39 -06:00
committed by GitHub
3 changed files with 104 additions and 133 deletions

View File

@@ -29,37 +29,36 @@ COLLECT_ALL="${COLLECT_ALL:-false}"
IPERF_DEPLOYMENT="${SCRIPT_PATH}/runtimeclass_workloads/iperf3-deployment.yaml" IPERF_DEPLOYMENT="${SCRIPT_PATH}/runtimeclass_workloads/iperf3-deployment.yaml"
IPERF_DAEMONSET="${SCRIPT_PATH}/runtimeclass_workloads/iperf3-daemonset.yaml" IPERF_DAEMONSET="${SCRIPT_PATH}/runtimeclass_workloads/iperf3-daemonset.yaml"
function remove_tmp_file() {
rm -rf "${iperf_file}"
}
trap remove_tmp_file EXIT
function iperf3_all_collect_results() { function iperf3_all_collect_results() {
if [ -z "${bandwidth_result}" ] || [ -z "${jitter_result}" ] || [ -z "${cpu_result}" ] || [ -z "${parallel_result}" ]; then
die "iperf couldn't find any results to save."
fi
metrics_json_init metrics_json_init
metrics_json_start_array metrics_json_start_array
local json="$(cat << EOF local json="$(cat << EOF
{ {
"bandwidth": { "bandwidth": {
"Result" : $bandwidth_result, "Result" : "${bandwidth_result}",
"Units" : "$bandwidth_units" "Units" : "${bandwidth_units}"
}, },
"jitter": { "jitter": {
"Result" : $jitter_result, "Result" : "${jitter_result}",
"Units" : "$jitter_units" "Units" : "${jitter_units}"
}, },
"cpu": { "cpu": {
"Result" : $cpu_result, "Result" : "${cpu_result}",
"Units" : "$cpu_units" "Units" : "${cpu_units}"
}, },
"parallel": { "parallel": {
"Result" : $parallel_result, "Result" : "${parallel_result}",
"Units" : "$parallel_units" "Units" : "${parallel_units}"
} }
} }
EOF EOF
)" )"
metrics_json_add_array_element "$json" metrics_json_add_array_element "${json}"
metrics_json_end_array "Results" metrics_json_end_array "Results"
} }
@@ -68,27 +67,27 @@ function iperf3_bandwidth() {
local transmit_timeout="30" local transmit_timeout="30"
kubectl exec -i "$client_pod_name" -- sh -c "iperf3 -J -c ${server_ip_add} -t ${transmit_timeout}" | jq '.end.sum_received.bits_per_second' > "${iperf_file}" kubectl exec -i "$client_pod_name" -- sh -c "iperf3 -J -c ${server_ip_add} -t ${transmit_timeout}" | jq '.end.sum_received.bits_per_second' > "${iperf_file}"
export bandwidth_result=$(cat "${iperf_file}") bandwidth_result=$(cat "${iperf_file}")
export bandwidth_result=$(printf "%.3f\n" ${bandwidth_result})
export bandwidth_units="bits per second" export bandwidth_units="bits per second"
if [ "$COLLECT_ALL" == "true" ]; then [ -z "${bandwidth_result}" ] && die "iperf3 was unable to collect Bandwidth workload results."
iperf3_all_collect_results [ "$COLLECT_ALL" == "true" ] && return
else
metrics_json_init
metrics_json_start_array
local json="$(cat << EOF metrics_json_init
{ metrics_json_start_array
"bandwidth": {
"Result" : $bandwidth_result, local json="$(cat << EOF
"Units" : "$bandwidth_units" {
} "bandwidth": {
"Result" : "${bandwidth_result}",
"Units" : "${bandwidth_units}"
} }
}
EOF EOF
)" )"
metrics_json_add_array_element "$json" metrics_json_add_array_element "${json}"
metrics_json_end_array "Results" metrics_json_end_array "Results"
fi
} }
function iperf3_jitter() { function iperf3_jitter() {
@@ -100,79 +99,77 @@ function iperf3_jitter() {
export jitter_result=$(printf "%0.3f\n" $result) export jitter_result=$(printf "%0.3f\n" $result)
export jitter_units="ms" export jitter_units="ms"
if [ "$COLLECT_ALL" == "true" ]; then [ -z "${jitter_result}" ] && die "Iperf3 was unable to collect Jitter results."
iperf3_all_collect_results [ "$COLLECT_ALL" == "true" ] && return
else
metrics_json_init
metrics_json_start_array
local json="$(cat << EOF metrics_json_init
{ metrics_json_start_array
"jitter": {
"Result" : $jitter_result, local json="$(cat << EOF
"Units" : "ms" {
} "jitter": {
"Result" : "${jitter_result}",
"Units" : "${jitter_units}"
} }
}
EOF EOF
)" )"
metrics_json_add_array_element "$json" metrics_json_add_array_element "${json}"
metrics_json_end_array "Results" metrics_json_end_array "Results"
fi
} }
function iperf3_parallel() { function iperf3_parallel() {
# This will measure four parallel connections with iperf3 # This will measure four parallel connections with iperf3
kubectl exec -i "$client_pod_name" -- sh -c "iperf3 -J -c ${server_ip_add} -P 4" | jq '.end.sum_received.bits_per_second' > "${iperf_file}" kubectl exec -i "$client_pod_name" -- sh -c "iperf3 -J -c ${server_ip_add} -P 4" | jq '.end.sum_received.bits_per_second' > "${iperf_file}"
export parallel_result=$(cat "${iperf_file}") parallel_result=$(cat "${iperf_file}")
export parallel_result=$(printf "%0.3f\n" $parallel_result)
export parallel_units="bits per second" export parallel_units="bits per second"
if [ "$COLLECT_ALL" == "true" ]; then [ -z "${parallel_result}" ] && die "Iperf3 was unable to collect Parallel workload results."
iperf3_all_collect_results [ "$COLLECT_ALL" == "true" ] && return
else
metrics_json_init
metrics_json_start_array
local json="$(cat << EOF metrics_json_init
{ metrics_json_start_array
"parallel": {
"Result" : $parallel_result, local json="$(cat << EOF
"Units" : "$parallel_units" {
} "parallel": {
"Result" : "${parallel_result}",
"Units" : "${parallel_units}"
} }
}
EOF EOF
)" )"
metrics_json_add_array_element "$json" metrics_json_add_array_element "${json}"
metrics_json_end_array "Results" metrics_json_end_array "Results"
fi
} }
function iperf3_cpu() { function iperf3_cpu() {
# Start server
local transmit_timeout="80" local transmit_timeout="80"
kubectl exec -i "$client_pod_name" -- sh -c "iperf3 -J -c ${server_ip_add} -t ${transmit_timeout}" | jq '.end.cpu_utilization_percent.host_total' > "${iperf_file}" kubectl exec -i "$client_pod_name" -- sh -c "iperf3 -J -c ${server_ip_add} -t ${transmit_timeout}" | jq '.end.cpu_utilization_percent.host_total' > "${iperf_file}"
export cpu_result=$(cat "${iperf_file}") cpu_result=$(cat "${iperf_file}")
export cpu_result=$(printf "%.3f\n" ${cpu_result})
export cpu_units="percent" export cpu_units="percent"
if [ "$COLLECT_ALL" == "true" ]; then [ -z "${cpu_result}" ] && die "Iperf3 was unable to collect CPU workload results."
iperf3_all_collect_results [ "$COLLECT_ALL" == "true" ] && return
else
metrics_json_init
metrics_json_start_array
local json="$(cat << EOF metrics_json_init
{ metrics_json_start_array
"cpu": {
"Result" : $cpu_result, local json="$(cat << EOF
"Units" : "$cpu_units" {
} "cpu": {
"Result" : "${cpu_result}",
"Units" : "${cpu_units}"
} }
}
EOF EOF
)" )"
metrics_json_add_array_element "${json}"
metrics_json_add_array_element "$json" metrics_json_end_array "Results"
metrics_json_end_array "Results"
fi
} }
function iperf3_start_deployment() { function iperf3_start_deployment() {
@@ -214,20 +211,16 @@ function iperf3_start_deployment() {
} }
function iperf3_deployment_cleanup() { function iperf3_deployment_cleanup() {
info "iperf: deleting deployments and services" info "Iperf: deleting deployments and services"
kubectl delete pod "${server_pod_name}" "${client_pod_name}" rm -rf "${iperf_file}"
kubectl delete -f "${IPERF_DAEMONSET}" kubectl delete -f "${IPERF_DAEMONSET}"
kubectl delete -f "${IPERF_DEPLOYMENT}" kubectl delete -f "${IPERF_DEPLOYMENT}"
kill_kata_components && sleep 1 kill_kata_components && sleep 1
kill_kata_components kill_kata_components
check_processes check_processes
info "End of iperf3 test" info "End of Iperf3 test"
} }
# The deployment must be removed in
# any case the script terminates.
trap iperf3_deployment_cleanup EXIT
function help() { function help() {
echo "$(cat << EOF echo "$(cat << EOF
Usage: $0 "[options]" Usage: $0 "[options]"
@@ -246,13 +239,10 @@ EOF
} }
function main() { function main() {
init_env
iperf3_start_deployment
local OPTIND local OPTIND
while getopts ":abcjph:" opt while getopts ":abcjph:" opt
do do
case "$opt" in case "${opt}" in
a) # all tests a) # all tests
test_all="1" test_all="1"
;; ;;
@@ -290,6 +280,12 @@ function main() {
[[ -z "$test_all" ]] && \ [[ -z "$test_all" ]] && \
help && die "Must choose at least one test" help && die "Must choose at least one test"
init_env
# The deployment must be removed in
# any case the script terminates.
trap iperf3_deployment_cleanup EXIT
iperf3_start_deployment
if [ "$test_bandwith" == "1" ]; then if [ "$test_bandwith" == "1" ]; then
iperf3_bandwidth iperf3_bandwidth
fi fi
@@ -307,10 +303,15 @@ function main() {
fi fi
if [ "$test_all" == "1" ]; then if [ "$test_all" == "1" ]; then
export COLLECT_ALL=true && iperf3_bandwidth && iperf3_jitter && iperf3_cpu && iperf3_parallel export COLLECT_ALL=true
iperf3_bandwidth
iperf3_jitter
iperf3_cpu
iperf3_parallel
iperf3_all_collect_results
fi fi
info "iperf3: saving test results" info "Iperf3: saving test results"
metrics_json_save metrics_json_save
} }

View File

@@ -42,3 +42,17 @@ spec:
name: server name: server
terminationGracePeriodSeconds: 0 terminationGracePeriodSeconds: 0
runtimeClassName: kata runtimeClassName: kata
---
apiVersion: v1
kind: Service
metadata:
name: iperf3-server
spec:
selector:
app: iperf3-server
ports:
- protocol: TCP
port: 5201
targetPort: server

View File

@@ -1,44 +0,0 @@
#
# Copyright (c) 2021-2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: iperf3-server-deployment
labels:
app: iperf3-server
spec:
replicas: 1
selector:
matchLabels:
app: iperf3-server
template:
metadata:
labels:
app: iperf3-server
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/role
operator: In
values:
- master
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: iperf3-server
image: networkstatic/iperf3
args: ['-s']
ports:
- containerPort: 5201
name: server
terminationGracePeriodSeconds: 0
runtimeClassName: kata