Merge pull request #8339 from fidencio/topic/stable-3.2-backports-oct-31st-2023

stable-3.2 | Backport everything needed after the release till Oct 31st 2023
This commit is contained in:
Fabiano Fidêncio 2023-10-31 14:36:38 +01:00 committed by GitHub
commit 220a2a0300
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 534 additions and 73 deletions

View File

@ -187,7 +187,7 @@ jobs:
echo "/usr/local/go/bin" >> $GITHUB_PATH
- name: Install system dependencies
run: |
sudo apt-get -y install moreutils hunspell pandoc
sudo apt-get -y install moreutils hunspell hunspell-en-gb hunspell-en-us pandoc
- name: Run check
run: |
export PATH=${PATH}:${GOPATH}/bin

View File

@ -1291,7 +1291,7 @@ mod tests {
#[tokio::test]
#[serial]
#[cfg(not(target_arch = "aarch64"))]
#[cfg(not(any(target_arch = "aarch64", target_arch = "s390x")))]
async fn create_tmpfs() {
skip_if_not_root!();

View File

@ -125,14 +125,42 @@ func (endpoint *IPVlanEndpoint) Detach(ctx context.Context, netNsCreated bool, n
})
}
// HotAttach for ipvlan endpoint not supported yet
func (endpoint *IPVlanEndpoint) HotAttach(ctx context.Context, h Hypervisor) error {
return fmt.Errorf("IPVlanEndpoint does not support Hot attach")
span, ctx := ipvlanTrace(ctx, "HotAttach", endpoint)
defer span.End()
if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
networkLogger().WithError(err).Error("Error bridging ipvlan ep")
return err
}
if _, err := h.HotplugAddDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error hotplugging ipvlan ep")
return err
}
return nil
}
// HotDetach for ipvlan endpoint not supported yet
func (endpoint *IPVlanEndpoint) HotDetach(ctx context.Context, h Hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("IPVlanEndpoint does not support Hot detach")
if !netNsCreated {
return nil
}
span, ctx := ipvlanTrace(ctx, "HotDetach", endpoint)
defer span.End()
if err := doNetNS(netNsPath, func(_ ns.NetNS) error {
return xDisconnectVMNetwork(ctx, endpoint)
}); err != nil {
networkLogger().WithError(err).Warn("Error un-bridging ipvlan ep")
}
if _, err := h.HotplugRemoveDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error detach ipvlan ep")
return err
}
return nil
}
func (endpoint *IPVlanEndpoint) save() persistapi.NetworkEndpoint {

View File

@ -122,14 +122,42 @@ func (endpoint *MacvlanEndpoint) Detach(ctx context.Context, netNsCreated bool,
})
}
// HotAttach for bridged macvlan endpoint not supported yet
func (endpoint *MacvlanEndpoint) HotAttach(ctx context.Context, h Hypervisor) error {
return fmt.Errorf("MacvlanEndpoint does not support Hot attach")
span, ctx := macvlanTrace(ctx, "HotAttach", endpoint)
defer span.End()
if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
networkLogger().WithError(err).Error("Error bridging macvlan ep")
return err
}
if _, err := h.HotplugAddDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error hotplugging macvlan ep")
return err
}
return nil
}
// HotDetach for bridged macvlan endpoint not supported yet
func (endpoint *MacvlanEndpoint) HotDetach(ctx context.Context, h Hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("MacvlanEndpoint does not support Hot detach")
if !netNsCreated {
return nil
}
span, ctx := macvlanTrace(ctx, "HotDetach", endpoint)
defer span.End()
if err := doNetNS(netNsPath, func(_ ns.NetNS) error {
return xDisconnectVMNetwork(ctx, endpoint)
}); err != nil {
networkLogger().WithError(err).Warn("Error un-bridging macvlan ep")
}
if _, err := h.HotplugRemoveDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error detach macvlan ep")
return err
}
return nil
}
func (endpoint *MacvlanEndpoint) save() persistapi.NetworkEndpoint {

View File

@ -190,14 +190,18 @@ function clean_env_ctr()
# Kills running shim and hypervisor components
function kill_kata_components() {
local ATTEMPTS=2
local TIMEOUT="30s"
local PID_NAMES=( "containerd-shim-kata-v2" "qemu-system-x86_64" "cloud-hypervisor" )
sudo systemctl stop containerd
# iterate over the list of kata components and stop them
for (( i=1; i<=ATTEMPTS; i++ )); do
for PID_NAME in "${PID_NAMES[@]}"; do
[[ ! -z "$(pidof ${PID_NAME})" ]] && sudo killall "${PID_NAME}" >/dev/null 2>&1 || true
done
sleep 1
done
sudo timeout -s SIGKILL "${TIMEOUT}" systemctl start containerd
}

View File

@ -19,6 +19,8 @@ function add_kata_bot_info() {
function rebase_atop_of_the_latest_target_branch() {
if [ -n "${TARGET_BRANCH}" ]; then
echo "Rebasing atop of the latest ${TARGET_BRANCH}"
# Recover from any previous rebase left halfway
git rebase --abort 2> /dev/null || true
git rebase origin/${TARGET_BRANCH}
fi
}

View File

@ -98,6 +98,58 @@ midval = 98.0
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "fio"
type = "json"
description = "measure sequential read throughput using fio"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = "[.\"fio\".\"Results sequential\"] | .[] | .[] | .read.bw | select( . != null )"
checktype = "mean"
midval = 312776
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "fio"
type = "json"
description = "measure sequential write throughput using fio"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = "[.\"fio\".\"Results sequential\"] | .[] | .[] | .write.bw | select( . != null )"
checktype = "mean"
midval = 307948
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "fio"
type = "json"
description = "measure random read throughput using fio"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = "[.\"fio\".\"Results random\"] | .[] | .[] | .randread.bw | select( . != null )"
checktype = "mean"
midval = 1351339
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "fio"
type = "json"
description = "measure random write throughput using fio"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = "[.\"fio\".\"Results random\"] | .[] | .[] | .randwrite.bw | select( . != null )"
checktype = "mean"
midval = 1440540.7
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "latency"
type = "json"

View File

@ -98,6 +98,58 @@ midval = 98.0
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "fio"
type = "json"
description = "measure sequential read throughput using fio"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = "[.\"fio\".\"Results sequential\"] | .[] | .[] | .read.bw | select( . != null )"
checktype = "mean"
midval = 327066.8
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "fio"
type = "json"
description = "measure sequential write throughput using fio"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = "[.\"fio\".\"Results sequential\"] | .[] | .[] | .write.bw | select( . != null )"
checktype = "mean"
midval = 309023.65
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "fio"
type = "json"
description = "measure random read throughput using fio"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = "[.\"fio\".\"Results random\"] | .[] | .[] | .randread.bw | select( . != null )"
checktype = "mean"
midval = 1301793.45
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "fio"
type = "json"
description = "measure random write throughput using fio"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = "[.\"fio\".\"Results random\"] | .[] | .[] | .randwrite.bw | select( . != null )"
checktype = "mean"
midval = 1457926.8
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "latency"
type = "json"

View File

@ -29,7 +29,7 @@ func (c *jsonRecord) load(filepath string, metric *metrics) error {
log.Debugf(" Run jq '%v' %s", metric.CheckVar, filepath)
out, err := exec.Command("jq", metric.CheckVar, filepath).Output()
out, err := exec.Command("jq", "-r", metric.CheckVar, filepath).Output()
if err != nil {
log.Warnf("Failed to run [jq %v %v][%v]", metric.CheckVar, filepath, err)
return err

View File

@ -84,9 +84,9 @@ function run_test_tensorflow() {
}
function run_test_fio() {
info "Skipping FIO test temporarily using ${KATA_HYPERVISOR} hypervisor"
info "Running FIO test using ${KATA_HYPERVISOR} hypervisor"
# bash tests/metrics/storage/fio-k8s/fio-test-ci.sh
bash tests/metrics/storage/fio_test.sh
}
function run_test_iperf() {

View File

@ -201,8 +201,8 @@ function kill_processes_before_start()
CTR_PROCS=$(sudo "${CTR_EXE}" t list -q)
[[ -n "${CTR_PROCS}" ]] && clean_env_ctr
kill_kata_components && sleep 1
kill_kata_components
check_processes
}

View File

@ -0,0 +1,234 @@
#!/bin/bash
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# This test measures the following UDP network essentials:
# - bandwith simplex
# - parallel bandwidth
#
# These metrics/results will be got from the interconnection between
# a client and a server using iperf3 tool.
# The following cases are covered:
#
# case 1:
# container-server <----> container-client
#
# case 2"
# container-server <----> host-client
set -o pipefail
SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
source "${SCRIPT_PATH}/../../lib/common.bash"
iperf_file=$(mktemp iperfresults.XXXXXXXXXX)
TEST_NAME="${TEST_NAME:-network-iperf3-udp}"
COLLECT_ALL="${COLLECT_ALL:-false}"
IPERF_DEPLOYMENT="${SCRIPT_PATH}/runtimeclass_workloads/iperf3-deployment.yaml"
IPERF_DAEMONSET="${SCRIPT_PATH}/runtimeclass_workloads/iperf3-daemonset.yaml"
function remove_tmp_file() {
rm -rf "${iperf_file}"
}
trap remove_tmp_file EXIT
function iperf3_udp_all_collect_results() {
metrics_json_init
metrics_json_start_array
local json="$(cat << EOF
{
"bandwidth": {
"Result" : $bandwidth_result,
"Units" : "$bandwidth_units"
},
"parallel": {
"Result" : $parallel_result,
"Units" : "$parallel_units"
}
}
EOF
)"
metrics_json_add_array_element "$json"
metrics_json_end_array "Results"
}
function iperf3_udp_bandwidth() {
# Start server
local transmit_timeout="120"
kubectl exec -i "$client_pod_name" -- sh -c "iperf3 -c ${server_ip_add} -u -b 1G -t $transmit_timeout" | grep receiver | cut -d' ' -f13 > "${iperf_file}"
export bandwidth_result=$(cat "${iperf_file}")
export bandwidth_units="Mbits/sec"
if [ "$COLLECT_ALL" == "true" ]; then
iperf3_udp_all_collect_results
else
metrics_json_init
metrics_json_start_array
local json="$(cat << EOF
{
"bandwidth": {
"Result" : $bandwidth_result,
"Units" : "$bandwidth_units"
}
}
EOF
)"
metrics_json_add_array_element "$json"
metrics_json_end_array "Results"
fi
}
function iperf3_udp_parallel() {
# Start server
local transmit_timeout="120"
kubectl exec -i "$client_pod_name" -- sh -c "iperf3 -c ${server_ip_add} -u -J -P 4" | jq '.end.sum.bits_per_second' > "${iperf_file}"
export parallel_result=$(cat "${iperf_file}")
export parallel_units="bits/sec"
if [ "$COLLECT_ALL" == "true" ]; then
iperf3_udp_all_collect_results
else
metrics_json_init
metrics_json_start_array
local json="$(cat << EOF
{
"parallel": {
"Result" : $parallel_result,
"Units" : "$parallel_units"
}
}
EOF
)"
metrics_json_add_array_element "$json"
metrics_json_end_array "Results"
fi
}
function iperf3_udp_start_deployment() {
cmds=("bc")
check_cmds "${cmds[@]}"
# Check no processes are left behind
check_processes
wait_time=20
sleep_time=2
# Create deployment
kubectl create -f "${IPERF_DEPLOYMENT}"
# Check deployment creation
local cmd="kubectl wait --for=condition=Available deployment/iperf3-server-deployment"
waitForProcess "${wait_time}" "${sleep_time}" "${cmd}"
# Create DaemonSet
kubectl create -f "${IPERF_DAEMONSET}"
# Get the names of the server pod
export server_pod_name=$(kubectl get pods -o name | grep server | cut -d '/' -f2)
# Verify the server pod is working
local cmd="kubectl get pod ${server_pod_name} -o yaml | grep 'phase: Running'"
waitForProcess "${wait_time}" "${sleep_time}" "${cmd}"
# Get the names of client pod
export client_pod_name=$(kubectl get pods -o name | grep client | cut -d '/' -f2)
# Verify the client pod is working
local cmd="kubectl get pod ${client_pod_name} -o yaml | grep 'phase: Running'"
waitForProcess "${wait_time}" "${sleep_time}" "${cmd}"
# Get the ip address of the server pod
export server_ip_add=$(kubectl get pod "${server_pod_name}" -o jsonpath='{.status.podIP}')
}
function iperf3_udp_deployment_cleanup() {
info "iperf: deleting deployments and services"
kubectl delete pod "${server_pod_name}" "${client_pod_name}"
kubectl delete -f "${IPERF_DAEMONSET}"
kubectl delete -f "${IPERF_DEPLOYMENT}"
kill_kata_components && sleep 1
kill_kata_components
check_processes
info "End of iperf3 test"
}
# The deployment must be removed in
# any case the script terminates.
trap iperf3_udp_deployment_cleanup EXIT
function help() {
echo "$(cat << EOF
Usage: $0 "[options]"
Description:
This script implements a number of network metrics
using iperf3 with UDP.
Options:
-a Run all tests
-b Run bandwidth tests
-p Run parallel tests
-h Help
EOF
)"
}
function main() {
init_env
iperf3_udp_start_deployment
local OPTIND
while getopts ":abph:" opt
do
case "$opt" in
a) # all tests
test_all="1"
;;
b) # bandwith test
test_bandwith="1"
;;
p) # parallel test
test_parallel="1"
;;
h)
help
exit 0;
;;
:)
echo "Missing argument for -$OPTARG";
help
exit 1;
;;
esac
done
shift $((OPTIND-1))
[[ -z "$test_bandwith" ]] && \
[[ -z "$test_parallel" ]] && \
[[ -z "$test_all" ]] && \
help && die "Must choose at least one test"
if [ "$test_bandwith" == "1" ]; then
iperf3_udp_bandwidth
fi
if [ "$test_parallel" == "1" ]; then
iperf3_udp_parallel
fi
if [ "$test_all" == "1" ]; then
export COLLECT_ALL=true && iperf3_udp_bandwidth && iperf3_udp_parallel
fi
info "iperf3: saving test results"
metrics_json_save
}
main "$@"

View File

@ -16,13 +16,13 @@ set -o pipefail
# read, write, randread, randwrite, randrw, readwrite
io_type="read"
block_size="4k"
num_jobs="2"
num_jobs="4"
# FIO default settings
readonly ioengine="libaio"
readonly rate_process="linear"
readonly disable_buffered="1"
readonly iodepth="2"
readonly iodepth="8"
readonly runtime="10s"
# ramp time
readonly rt="10s"
@ -88,12 +88,12 @@ function launch_workload() {
--iodepth="${iodepth}" \
--gtod_reduce="1" \
--randrepeat="1" \
| tee -a ${summary_file_local} > /dev/null 2>&1
--output "${summary_file_local}" >/dev/null 2>&1
}
function print_latest_results() {
[ ! -f "${summary_file_local}" ] && echo "Error: no results to display; you must run a test before requesting results display" && exit 1
echo "$(cat ${summary_file_local})"
cat "${summary_file_local}"
}
function delete_workload() {

View File

@ -18,24 +18,32 @@ IMAGE="docker.io/library/fio-bench:latest"
DOCKERFILE="${SCRIPT_PATH}/fio-dockerfile/Dockerfile"
PAYLOAD_ARGS="${PAYLOAD_ARGS:-tail -f /dev/null}"
TEST_NAME="fio"
REQUIRED_CMDS=("jq" "script")
TMP_DIR=$(mktemp --tmpdir -d fio.XXXXXXXXXX)
results_file="${TMP_DIR}/fio_results.json"
results_read=""
results_write=""
# Fio default number of jobs
nj=4
function release_resources() {
sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" ${CONTAINER_ID} sh -c "./fio_bench.sh delete-workload"
sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" "${CONTAINER_ID}" sh -c "./fio_bench.sh delete-workload"
sudo -E "${CTR_EXE}" t kill -a -s SIGKILL "${CONTAINER_ID}"
sudo -E "${CTR_EXE}" c rm "${CONTAINER_ID}"
rm -rf "${TMP_DIR}"
sleep 0.5
clean_env_ctr
info "fio test end"
info "removing containers done"
}
trap release_resources EXIT
function setup() {
info "setup fio test"
clean_env_ctr
check_cmds "${cmds[@]}"
check_cmds "${REQUIRED_CMDS[@]}"
check_ctr_images "$IMAGE" "$DOCKERFILE"
clean_env_ctr
init_env
# drop caches
@ -107,7 +115,7 @@ function convert_results_to_json() {
"bw_stddev" : "${bw_stddev}",
"iops" : "${iops}",
"iops_stddev" : "${iops_stddev}",
"units" : "Kb"
"units" : "KB/s"
}
}
EOF
@ -116,46 +124,52 @@ EOF
}
function store_results() {
local data_r="${1}"
local data_w="${2}"
local title="${3}"
local title="${1}"
[ -z "${data_r}" ] || [ -z "${data_w}" ] || [ -z "${title}" ] && die "Missing data and/or title when trying storing results."
[ -z "${results_read}" ] || [ -z "${results_write}" ] || [ -z "${title}" ] && die "Missing data and/or title when trying storing results."
metrics_json_start_array
extract_test_params "${data_r}"
parse_results "${data_r}"
parse_results "${data_w}"
extract_test_params "${results_read}"
parse_results "${results_read}"
parse_results "${results_write}"
metrics_json_end_array "${title}"
}
function main() {
setup
# Collect bs=4K, num_jobs=4, io-direct, io-depth=2
# Collect bs=4K, num_jobs=4, io-direct, io-depth=8
info "Processing sequential type workload"
sudo -E "${CTR_EXE}" t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh run-read-4k ${nj}" >/dev/null 2>&1
local results_read_4K="$(sudo -E "${CTR_EXE}" t exec -t --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results")"
sudo -E ${CTR_EXE} t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results" >"${results_file}"
results_read=$(<"${results_file}")
sleep 0.5
sudo -E "${CTR_EXE}" t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh run-write-4k ${nj}" >/dev/null 2>&1
local results_write_4K="$(sudo -E "${CTR_EXE}" t exec -t --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results")"
sudo -E ${CTR_EXE} t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results" >"${results_file}"
results_write=$(<"${results_file}")
# Collect bs=64K, num_jobs=4, io-direct, io-depth=2
# parse results sequential
metrics_json_init
store_results "Results sequential"
# Collect bs=64K, num_jobs=4, io-direct, io-depth=8
info "Processing random type workload"
sleep 0.5
sudo -E "${CTR_EXE}" t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh run-randread-64k ${nj}" >/dev/null 2>&1
local results_rand_read_64K="$(sudo -E "${CTR_EXE}" t exec -t --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results")"
sudo -E ${CTR_EXE} t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results" >"${results_file}"
results_read=$(<"${results_file}")
sleep 0.5
sudo -E "${CTR_EXE}" t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh run-randwrite-64k ${nj}" >/dev/null 2>&1
local results_rand_write_64K="$(sudo -E "${CTR_EXE}" t exec -t --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results")"
sudo -E ${CTR_EXE} t exec --exec-id "${RANDOM}" ${CONTAINER_ID} sh -c "./fio_bench.sh print-latest-results" >"${results_file}"
results_write=$(<"${results_file}")
# parse results
metrics_json_init
store_results "${results_read_4K}" "${results_write_4K}" "Results sequential"
store_results "${results_rand_read_64K}" "${results_rand_write_64K}" "Results random"
# parse results random
store_results "Results random"
metrics_json_save
}
main "$@"
info "fio test end"

View File

@ -34,6 +34,12 @@ readonly kata_install_dir="${kata_install_dir:-/opt/kata}"
readonly kata_runtime_name="kata"
readonly kata_runtime_type="io.containerd.${kata_runtime_name}.v2"
readonly kata_shim_v2="containerd-shim-${kata_runtime_name}-v2"
readonly kata_configuration="configuration"
readonly kata_clh_runtime_name="kata-clh"
readonly kata_clh_runtime_type="io.containerd.${kata_clh_runtime_name}.v2"
readonly kata_clh_shim_v2="containerd-shim-${kata_clh_runtime_name}-v2"
readonly kata_clh_configuration="configuration-clh"
# Systemd unit name for containerd daemon
readonly containerd_service_name="containerd.service"
@ -96,7 +102,8 @@ github_get_latest_release()
# - The sort(1) call; none of the standard utilities support semver
# so attempt to perform a semver sort manually.
# - Pre-releases are excluded via the select() call.
local latest=$(curl -sL "$url" |\
local latest
latest=$(curl -sL "$url" |\
jq -r '.[].tag_name | select(contains("-") | not)' |\
sort -t "." -k1,1n -k2,2n -k3,3n |\
tail -1 || true)
@ -136,18 +143,23 @@ github_get_release_file_url()
local url="${1:-}"
local version="${2:-}"
local arch=$(uname -m)
# The version, less any leading 'v'
local version_number
version_number=${version#v}
local arch
arch=$(uname -m)
[ "$arch" = "x86_64" ] && arch="amd64"
local regex=""
case "$url" in
*kata*)
regex="kata-static-.*-${arch}.tar.xz"
regex="kata-static-${version}-${arch}.tar.xz"
;;
*containerd*)
[ "$arch" = "x86_64" ] && arch="amd64"
regex="containerd-.*-linux-${arch}.tar.gz"
regex="containerd-${version_number}-linux-${arch}.tar.gz"
;;
*) die "invalid url: '$url'" ;;
@ -157,10 +169,12 @@ github_get_release_file_url()
download_url=$(curl -sL "$url" |\
jq --arg version "$version" \
-r '.[] | select(.tag_name == $version) | .assets[].browser_download_url' |\
-r '.[] |
select( (.tag_name == $version) or (.tag_name == "v" + $version) ) |
.assets[].browser_download_url' |\
grep "/${regex}$")
download_url=$(echo $download_url | awk '{print $1}')
download_url=$(echo "$download_url" | awk '{print $1}')
[ -z "$download_url" ] && die "Cannot determine download URL for version $version ($url)"
@ -181,7 +195,8 @@ github_download_release()
pushd "$tmpdir" >/dev/null
local download_url=$(github_get_release_file_url \
local download_url
download_url=$(github_get_release_file_url \
"$url" \
"$version" || true)
@ -192,7 +207,8 @@ github_download_release()
# progress.
curl -LO "$download_url"
local filename=$(echo "$download_url" | awk -F'/' '{print $NF}')
local filename
filename=$(echo "$download_url" | awk -F'/' '{print $NF}')
ls -d "${PWD}/${filename}"
@ -246,7 +262,7 @@ containerd_installed()
command -v containerd &>/dev/null && return 0
systemctl list-unit-files --type service |\
egrep -q "^${containerd_service_name}\>" \
grep -Eq "^${containerd_service_name}\>" \
&& return 0
return 1
@ -291,8 +307,11 @@ check_deps()
for elem in "${elems[@]}"
do
local cmd=$(echo "$elem"|cut -d: -f1)
local pkg=$(echo "$elem"|cut -d: -f2-)
local cmd
cmd=$(echo "$elem"|cut -d: -f1)
local pkg
pkg=$(echo "$elem"|cut -d: -f2-)
command -v "$cmd" &>/dev/null && continue
@ -301,7 +320,8 @@ check_deps()
[ "${#pkgs_to_install[@]}" -eq 0 ] && return 0
local packages="${pkgs_to_install[@]}"
local packages
packages="${pkgs_to_install[@]}"
info "Installing packages '$packages'"
@ -352,13 +372,15 @@ github_download_package()
[ -z "$releases_url" ] && die "need releases URL"
[ -z "$project" ] && die "need project URL"
local version=$(github_resolve_version_to_download \
local version
version=$(github_resolve_version_to_download \
"$releases_url" \
"$requested_version" || true)
[ -z "$version" ] && die "Unable to determine $project version to download"
local file=$(github_download_release \
local file
file=$(github_download_release \
"$releases_url" \
"$version")
@ -376,15 +398,19 @@ install_containerd()
info "Downloading $project release ($version_desc)"
local results=$(github_download_package \
local results
results=$(github_download_package \
"$containerd_releases_url" \
"$requested_version" \
"$project")
[ -z "$results" ] && die "Cannot download $project release file"
local version=$(echo "$results"|cut -d: -f1)
local file=$(echo "$results"|cut -d: -f2-)
local version
version=$(echo "$results"|cut -d: -f1)
local file
file=$(echo "$results"|cut -d: -f2-)
[ -z "$version" ] && die "Cannot determine $project resolved version"
[ -z "$file" ] && die "Cannot determine $project release file"
@ -423,7 +449,8 @@ configure_containerd()
then
pushd "$tmpdir" >/dev/null
local service_url=$(printf "%s/%s/%s/%s" \
local service_url
service_url=$(printf "%s/%s/%s/%s" \
"https://raw.githubusercontent.com" \
"${containerd_slug}" \
"main" \
@ -451,7 +478,8 @@ configure_containerd()
info "Created $cfg"
}
local original="${cfg}-pre-kata-$(date -I)"
local original
original="${cfg}-pre-kata-$(date -I)"
sudo grep -q "$kata_runtime_type" "$cfg" || {
sudo cp "$cfg" "${original}"
@ -477,6 +505,14 @@ configure_containerd()
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.${kata_runtime_name}]
runtime_type = "${kata_runtime_type}"
privileged_without_host_devices = true
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.${kata_runtime_name}.options]
ConfigPath = "/opt/kata/share/defaults/kata-containers/${kata_configuration}.toml"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.${kata_clh_runtime_name}]
runtime_type = "${kata_clh_runtime_type}"
privileged_without_host_devices = true
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.${kata_clh_runtime_name}.options]
ConfigPath = "/opt/kata/share/defaults/kata-containers/${kata_clh_configuration}.toml"
EOF
modified="true"
@ -520,15 +556,19 @@ install_kata()
info "Downloading $project release ($version_desc)"
local results=$(github_download_package \
local results
results=$(github_download_package \
"$kata_releases_url" \
"$requested_version" \
"$project")
[ -z "$results" ] && die "Cannot download $project release file"
local version=$(echo "$results"|cut -d: -f1)
local file=$(echo "$results"|cut -d: -f2-)
local version
version=$(echo "$results"|cut -d: -f1)
local file
file=$(echo "$results"|cut -d: -f2-)
[ -z "$version" ] && die "Cannot determine $project resolved version"
[ -z "$file" ] && die "Cannot determine $project release file"
@ -541,12 +581,14 @@ install_kata()
create_links_for+=("kata-collect-data.sh")
create_links_for+=("kata-runtime")
local from_dir=$(printf "%s/bin" "$kata_install_dir")
local from_dir
from_dir=$(printf "%s/bin" "$kata_install_dir")
# Since we're unpacking to the root directory, perform a sanity check
# on the archive first.
local unexpected=$(tar -tf "${file}" |\
egrep -v "^(\./$|\./opt/$|\.${kata_install_dir}/)" || true)
local unexpected
unexpected=$(tar -tf "${file}" |\
grep -Ev "^(\./$|\./opt/$|\.${kata_install_dir}/)" || true)
[ -n "$unexpected" ] && die "File '$file' contains unexpected paths: '$unexpected'"
@ -558,7 +600,8 @@ install_kata()
for file in "${create_links_for[@]}"
do
local from_path=$(printf "%s/%s" "$from_dir" "$file")
local from_path
from_path=$(printf "%s/%s" "$from_dir" "$file")
[ -e "$from_path" ] || die "File $from_path not found"
sudo ln -sf "$from_path" "$link_dir"
@ -657,7 +700,8 @@ test_installation()
# Used to prove that the kernel in the container
# is different to the host kernel.
local container_kernel=$(sudo ctr run \
local container_kernel
container_kernel=$(sudo ctr run \
--runtime "$kata_runtime_type" \
--rm \
"$image" \
@ -666,7 +710,8 @@ test_installation()
[ -z "$container_kernel" ] && die "Failed to test $kata_project"
local host_kernel=$(uname -r)
local host_kernel
host_kernel=$(uname -r)
info "Test successful:\n"
@ -749,6 +794,8 @@ handle_args()
r) cleanup="false" ;;
t) disable_test="true" ;;
T) only_run_test="true" ;;
*) die "invalid option: '$opt'" ;;
esac
done