deps: bumping yq to v4.40.7

Since yq frequently updates, let's upgrade to a version from February to
bypass potential issues with versions 4.41-4.43 for now. We can always
upgrade to the newest version if necessary.

Fixes #9354
Depends-on:github.com/kata-containers/tests#5818

Signed-off-by: Beraldo Leal <bleal@redhat.com>
This commit is contained in:
Beraldo Leal
2024-05-31 13:24:10 -04:00
parent 4f6732595d
commit c99ba42d62
47 changed files with 370 additions and 470 deletions

View File

@@ -48,7 +48,7 @@ merge_yaml()
[ -n "$out" ] || die "need output file"
need_yq
yq merge "$file1" --append "$file2" > "$out"
yq eval-all '. as $item ireduce ({}; . *+ $item)' "$file1" "$file2" > "$out"
}
check_yaml()
@@ -58,7 +58,7 @@ check_yaml()
[ -n "$file" ] || die "need file to check"
need_yq
yq read "$file" >/dev/null
yq "$file" >/dev/null
[ -z "$(command -v yamllint)" ] && die "need yamllint installed"

View File

@@ -497,11 +497,19 @@ function ensure_yq() {
# dependency: What we want to get the version from the versions.yaml file
function get_from_kata_deps() {
local dependency="$1"
versions_file="${repo_root_dir}/versions.yaml"
command -v yq &>/dev/null || die 'yq command is not in your $PATH'
result=$("yq" read -X "$versions_file" "$dependency")
yq_version=$(yq --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | cut -d. -f1)
if [ "$yq_version" -eq 3 ]; then
dependency=$(echo "$1" | sed "s/^\.//g")
result=$("yq" read "$versions_file" "$dependency")
else
dependency=$1
result=$("yq" "$dependency | explode (.)" "$versions_file")
fi
[ "$result" = "null" ] && result=""
echo "$result"
}
@@ -743,7 +751,7 @@ function get_dep_from_yaml_db(){
"${repo_root_dir}/ci/install_yq.sh" >&2
result=$("${GOPATH}/bin/yq" r -X "$versions_file" "$dependency")
result=$("${GOPATH}/bin/yq" "$dependency" "$versions_file")
[ "$result" = "null" ] && result=""
echo "$result"
}
@@ -759,7 +767,7 @@ function get_test_version(){
db="${cidir}/../versions.yaml"
get_dep_from_yaml_db "${db}" "${dependency}"
get_dep_from_yaml_db "${db}" ".${dependency}"
}
# Load vhost, vhost_net, vhost_vsock modules.

View File

@@ -29,40 +29,30 @@ setup() {
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Enable debug for Kata Containers
yq write -i \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[1].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[1].value = "true"' \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Create the runtime class only for the shim that's being tested
yq write -i \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[2].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[2].value = \"${KATA_HYPERVISOR}\"" \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Set the tested hypervisor as the default `kata` shim
yq write -i \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[3].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[3].value = \"${KATA_HYPERVISOR}\"" \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[4].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[4].value = "true"' \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[5].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[5].value = "true"' \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
if [ "${KATA_HOST_OS}" = "cbl-mariner" ]; then
yq write -i \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[+].name' \
"HOST_OS"
yq write -i \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[-1].value' \
"${KATA_HOST_OS}"
yq -i \
".spec.template.spec.containers[0].env += [{\"name\": \"HOST_OS\", \"value\": \"${KATA_HOST_OS}\"}]" \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
echo "::group::Final kata-deploy.yaml that is used in the test"
@@ -144,25 +134,21 @@ teardown() {
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i \
"tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[4].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[4].value = "true"' \
"tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Create the runtime class only for the shim that's being tested
yq write -i \
"tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[2].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[2].value = \"${KATA_HYPERVISOR}\"" \
"tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Set the tested hypervisor as the default `kata` shim
yq write -i \
"tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[3].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[3].value = \"${KATA_HYPERVISOR}\"" \
"tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[5].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[5].value = "true"' \
"tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
cat "tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"

View File

@@ -36,15 +36,15 @@ function install_dependencies() {
# - cri-tools
# - containerd
# - cri-container-cni release tarball already includes CNI plugins
cri_tools_version=$(get_from_kata_deps "externals.critools.latest")
cri_tools_version=$(get_from_kata_deps ".externals.critools.latest")
declare -a github_deps
github_deps[0]="cri_tools:${cri_tools_version}"
case "${CONTAINER_ENGINE}" in
containerd)
github_deps[1]="cri_containerd:$(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")"
github_deps[1]="cri_containerd:$(get_from_kata_deps ".externals.containerd.${CONTAINERD_VERSION}")"
;;
crio)
github_deps[1]="cni_plugins:$(get_from_kata_deps "externals.cni-plugins.version")"
github_deps[1]="cni_plugins:$(get_from_kata_deps ".externals.cni-plugins.version")"
;;
esac

View File

@@ -174,8 +174,8 @@ ${environment}
pushd /workspace
source tests/common.bash
ensure_yq
cri_containerd=\$(get_from_kata_deps "externals.containerd.lts")
cri_tools=\$(get_from_kata_deps "externals.critools.latest")
cri_containerd=\$(get_from_kata_deps ".externals.containerd.lts")
cri_tools=\$(get_from_kata_deps ".externals.critools.latest")
install_cri_containerd \${cri_containerd}
install_cri_tools \${cri_tools}

View File

@@ -127,9 +127,9 @@ function install_kustomize() {
fi
ensure_yq
version=$(get_from_kata_deps "externals.kustomize.version")
version=$(get_from_kata_deps ".externals.kustomize.version")
arch=$(arch_to_golang)
checksum=$(get_from_kata_deps "externals.kustomize.checksum.${arch}")
checksum=$(get_from_kata_deps ".externals.kustomize.checksum.${arch}")
local tarball="kustomize_${version}_linux_${arch}.tar.gz"
curl -Lf -o "$tarball" "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${version}/${tarball}"

View File

@@ -67,7 +67,7 @@ shift $(( $OPTIND - 1 ))
go_version="${1:-""}"
if [ -z "$go_version" ] && [ "${USE_VERSIONS_FILE}" = "true" ] ;then
go_version=$(get_from_kata_deps "languages.golang.meta.newest-version")
go_version=$(get_from_kata_deps ".languages.golang.meta.newest-version")
fi
if [ -z "$go_version" ];then

View File

@@ -17,7 +17,7 @@ rustarch=$(arch_to_rust)
version="${1:-""}"
if [ -z "${version}" ]; then
version=$(get_from_kata_deps "languages.rust.meta.newest-version")
version=$(get_from_kata_deps ".languages.rust.meta.newest-version")
fi
echo "Install rust ${version}"

View File

@@ -43,8 +43,8 @@ function install_dependencies() {
# - cri-container-cni release tarball already includes CNI plugins
# - cri-tools
declare -a github_deps
github_deps[0]="cri_containerd:$(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")"
github_deps[1]="cri_tools:$(get_from_kata_deps "externals.critools.latest")"
github_deps[0]="cri_containerd:$(get_from_kata_deps ".externals.containerd.${CONTAINERD_VERSION}")"
github_deps[1]="cri_tools:$(get_from_kata_deps ".externals.critools.latest")"
for github_dep in "${github_deps[@]}"; do
IFS=":" read -r -a dep <<< "${github_dep}"
@@ -53,7 +53,7 @@ function install_dependencies() {
# Clone containerd as we'll need to build it in order to run the tests
# base_version: The version to be intalled in the ${major}.${minor} format
clone_cri_containerd $(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")
clone_cri_containerd $(get_from_kata_deps ".externals.containerd.${CONTAINERD_VERSION}")
}
function run() {

View File

@@ -149,7 +149,7 @@ kbs_install_cli() {
# Mininum required version to build the client (read from versions.yaml)
local rust_version
ensure_yq
rust_version=$(get_from_kata_deps "externals.coco-trustee.toolchain")
rust_version=$(get_from_kata_deps ".externals.coco-trustee.toolchain")
# Currently kata version from version.yaml is 1.72.0
# which doesn't match the requirement, so let's pass
# the required version.
@@ -205,10 +205,10 @@ function kbs_k8s_deploy() {
ensure_yq
# Read from versions.yaml
repo=$(get_from_kata_deps "externals.coco-trustee.url")
version=$(get_from_kata_deps "externals.coco-trustee.version")
image=$(get_from_kata_deps "externals.coco-trustee.image")
image_tag=$(get_from_kata_deps "externals.coco-trustee.image_tag")
repo=$(get_from_kata_deps ".externals.coco-trustee.url")
version=$(get_from_kata_deps ".externals.coco-trustee.version")
image=$(get_from_kata_deps ".externals.coco-trustee.image")
image_tag=$(get_from_kata_deps ".externals.coco-trustee.image_tag")
# The ingress handler for AKS relies on the cluster's name which in turn
# contain the HEAD commit of the kata-containers repository (supposedly the

View File

@@ -26,7 +26,7 @@ main()
${repo_root_dir}/ci/install_yq.sh > /dev/null
fi
local K8S_SKIP_UNION=$("${GOPATH_LOCAL}/bin/yq" read "${K8S_CONFIG_FILE}" "${K8S_FILTER_FLAG}")
local K8S_SKIP_UNION=$("${GOPATH_LOCAL}/bin/yq" ".${K8S_FILTER_FLAG}" "${K8S_CONFIG_FILE}")
[ "${K8S_SKIP_UNION}" == "null" ] && return
mapfile -t _K8S_SKIP_UNION <<< "${K8S_SKIP_UNION}"

View File

@@ -160,75 +160,59 @@ function deploy_kata() {
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Enable debug for Kata Containers
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[1].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[1].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Create the runtime class only for the shim that's being tested
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[2].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[2].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Set the tested hypervisor as the default `kata` shim
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[3].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[3].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[4].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[4].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[5].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[5].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Enable 'default_vcpus' hypervisor annotation
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[6].value' \
"default_vcpus"
yq -i \
'.spec.template.spec.containers[0].env[6].value = "default_vcpus"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
if [ -n "${SNAPSHOTTER}" ]; then
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[7].value' \
"${KATA_HYPERVISOR}:${SNAPSHOTTER}"
yq -i \
".spec.template.spec.containers[0].env[7].value = \"${KATA_HYPERVISOR}:${SNAPSHOTTER}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
if [ "${KATA_HOST_OS}" = "cbl-mariner" ]; then
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[6].value' \
"initrd kernel default_vcpus"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[+].name' \
"HOST_OS"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[-1].value' \
"${KATA_HOST_OS}"
yq -i \
'.spec.template.spec.containers[0].env[6].value = "initrd kernel default_vcpus"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq -i \
".spec.template.spec.containers[0].env += [{\"name\": \"HOST_OS\", \"value\": \"${KATA_HOST_OS}\"}]" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
if [ "${KATA_HYPERVISOR}" = "qemu" ]; then
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[6].value' \
"image initrd kernel default_vcpus"
yq -i \
'.spec.template.spec.containers[0].env[6].value = "image initrd kernel default_vcpus"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
if [ "${KATA_HYPERVISOR}" = "qemu-tdx" ]; then
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[8].value' \
"${HTTPS_PROXY}"
yq -i \
".spec.template.spec.containers[0].env[8].value = \"${HTTPS_PROXY}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[9].value' \
"${NO_PROXY}"
yq -i \
".spec.template.spec.containers[0].env[9].value = \"${NO_PROXY}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
echo "::group::Final kata-deploy.yaml that is used in the test"
@@ -415,25 +399,21 @@ function cleanup_kata_deploy() {
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[4].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[4].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Create the runtime class only for the shim that's being tested
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[2].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[2].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Set the tested hypervisor as the default `kata` shim
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[3].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[3].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[5].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[5].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
@@ -496,8 +476,8 @@ function deploy_nydus_snapshotter() {
rm -rf "${nydus_snapshotter_install_dir}"
fi
mkdir -p "${nydus_snapshotter_install_dir}"
nydus_snapshotter_url=$(get_from_kata_deps "externals.nydus-snapshotter.url")
nydus_snapshotter_version=$(get_from_kata_deps "externals.nydus-snapshotter.version")
nydus_snapshotter_url=$(get_from_kata_deps ".externals.nydus-snapshotter.url")
nydus_snapshotter_version=$(get_from_kata_deps ".externals.nydus-snapshotter.version")
git clone -b "${nydus_snapshotter_version}" "${nydus_snapshotter_url}" "${nydus_snapshotter_install_dir}"
pushd "$nydus_snapshotter_install_dir"
@@ -506,36 +486,31 @@ function deploy_nydus_snapshotter() {
fi
if [ "${PULL_TYPE}" == "guest-pull" ]; then
# Enable guest pull feature in nydus snapshotter
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.FS_DRIVER' \
"proxy" --style=double
yq -i \
'select(.kind == "ConfigMap").data.FS_DRIVER = "proxy"' \
misc/snapshotter/base/nydus-snapshotter.yaml
else
>&2 echo "Invalid pull type"; exit 2
fi
# Disable to read snapshotter config from configmap
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.ENABLE_CONFIG_FROM_VOLUME' \
"false" --style=double
yq -i \
'select(.kind == "ConfigMap").data.ENABLE_CONFIG_FROM_VOLUME = "false"' \
misc/snapshotter/base/nydus-snapshotter.yaml
# Enable to run snapshotter as a systemd service
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.ENABLE_SYSTEMD_SERVICE' \
"true" --style=double
yq -i \
'select(.kind == "ConfigMap").data.ENABLE_SYSTEMD_SERVICE = "true"' \
misc/snapshotter/base/nydus-snapshotter.yaml
# Enable "runtime specific snapshotter" feature in containerd when configuring containerd for snapshotter
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.ENABLE_RUNTIME_SPECIFIC_SNAPSHOTTER' \
"true" --style=double
yq -i \
'select(.kind == "ConfigMap").data.ENABLE_RUNTIME_SPECIFIC_SNAPSHOTTER = "true"' \
misc/snapshotter/base/nydus-snapshotter.yaml
# Pin the version of nydus-snapshotter image.
# TODO: replace with a definitive solution (see https://github.com/kata-containers/kata-containers/issues/9742)
yq write -i -d 1 \
misc/snapshotter/base/nydus-snapshotter.yaml \
'spec.template.spec.containers[0].image' \
"ghcr.io/containerd/nydus-snapshotter:${nydus_snapshotter_version}" --style=double
yq -i \
"select(.kind == \"DaemonSet\").spec.template.spec.containers[0].image = \"ghcr.io/containerd/nydus-snapshotter:${nydus_snapshotter_version}\"" \
misc/snapshotter/base/nydus-snapshotter.yaml
# Deploy nydus snapshotter as a daemonset
kubectl create -f "misc/snapshotter/nydus-snapshotter-rbac.yaml"

View File

@@ -19,9 +19,9 @@ setup() {
@test "Kubectl exec rejected by policy" {
# Add to the YAML file a policy that rejects ExecProcessRequest.
yq write -i "${pod_yaml}" \
'metadata.annotations."io.katacontainers.config.agent.policy"' \
"${allow_all_except_exec_policy}"
yq -i \
".metadata.annotations.\"io.katacontainers.config.agent.policy\" = \"${allow_all_except_exec_policy}\"" \
"${pod_yaml}"
# Create the pod
kubectl create -f "${pod_yaml}"

View File

@@ -69,103 +69,64 @@ test_job_policy_error() {
@test "Policy failure: unexpected environment variable" {
# Changing the job spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
'spec.template.spec.containers[0].env.[+].name' unexpected_variable
yq write -i \
"${incorrect_yaml}" \
'spec.template.spec.containers[0].env.[-1].value' unexpected_value
yq -i \
'.spec.template.spec.containers[0].env += [{"name": "unexpected_variable", "value": "unexpected_value"}]' \
"${incorrect_yaml}"
test_job_policy_error
}
@test "Policy failure: unexpected command line argument" {
# Changing the job spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].args[+]" \
"unexpected_arg"
yq -i \
'.spec.template.spec.containers[0].args += ["unexpected_arg"]' \
"${incorrect_yaml}"
test_job_policy_error
}
@test "Policy failure: unexpected emptyDir volume" {
# Changing the job spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[+].mountPath" \
"/unexpected1"
yq -i \
'.spec.template.spec.containers[0].volumeMounts += [{"mountPath": "/unexpected1", "name": "unexpected-volume1"}]' \
"${incorrect_yaml}"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[-1].name" \
"unexpected-volume1"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes[+].name" \
"unexpected-volume1"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes[-1].emptyDir.medium" \
"Memory"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes[-1].emptyDir.sizeLimit" \
"50M"
yq -i \
'.spec.template.spec.volumes += [{"name": "unexpected-volume1", "emptyDir": {"medium": "Memory", "sizeLimit": "50M"}}]' \
"${incorrect_yaml}"
test_job_policy_error
}
@test "Policy failure: unexpected projected volume" {
# Changing the job spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[+].mountPath" \
"/test-volume"
yq -i \
'.spec.template.spec.containers[0].volumeMounts += [{"mountPath": "/test-volume", "name": "test-volume", "readOnly": true}]' \
"${incorrect_yaml}"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[-1].name" \
"test-volume"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[-1].readOnly" \
"true"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[+].name" \
"test-volume"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[-1].projected.defaultMode" \
"420"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[-1].projected.sources.[+].serviceAccountToken.expirationSeconds" \
"3600"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[-1].projected.sources.[-1].serviceAccountToken.path" \
"token"
yq -i '
.spec.template.spec.volumes += [{
"name": "test-volume",
"projected": {
"defaultMode": 420,
"sources": [{
"serviceAccountToken": {
"expirationSeconds": 3600,
"path": "token"
}
}]
}
}]
' "${incorrect_yaml}"
test_job_policy_error
}
@test "Policy failure: unexpected readOnlyRootFilesystem" {
# Changing the job spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem" \
"false"
yq -i \
".spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem = false" \
"${incorrect_yaml}"
test_job_policy_error
}

View File

@@ -49,69 +49,48 @@ test_pod_policy_error() {
@test "Policy failure: unexpected container image" {
# Change the container image after generating the policy. The different image has
# different attributes (e.g., different command line) so the policy will reject it.
yq write -i \
"${incorrect_pod_yaml}" \
"spec.containers[0].image" \
"quay.io/footloose/ubuntu18.04:latest"
yq -i \
'.spec.containers[0].image = "quay.io/footloose/ubuntu18.04:latest"' \
"${incorrect_pod_yaml}"
test_pod_policy_error
}
@test "Policy failure: unexpected privileged security context" {
# Changing the pod spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_pod_yaml}" \
'spec.containers[0].securityContext.privileged' \
"true"
yq -i \
'.spec.containers[0].securityContext.privileged = true' \
"${incorrect_pod_yaml}"
test_pod_policy_error
}
@test "Policy failure: unexpected terminationMessagePath" {
# Changing the pod spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_pod_yaml}" \
'spec.containers[0].terminationMessagePath' \
"/dev/termination-custom-log"
yq -i \
'.spec.containers[0].terminationMessagePath = "/dev/termination-custom-log"' \
"${incorrect_pod_yaml}"
test_pod_policy_error
}
@test "Policy failure: unexpected hostPath volume mount" {
# Changing the pod spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_pod_yaml}" \
"spec.containers[0].volumeMounts.[+].name" \
"mountpoint-dir"
yq -i \
'.spec.containers[0].volumeMounts += [{"name": "mountpoint-dir", "mountPath": "/var/lib/kubelet/pods"}]' \
"${incorrect_pod_yaml}"
yq write -i \
"${incorrect_pod_yaml}" \
"spec.containers[0].volumeMounts.[-1].mountPath" \
"/var/lib/kubelet/pods"
yq write -i \
"${incorrect_pod_yaml}" \
"spec.volumes.[+].hostPath.path" \
"/var/lib/kubelet/pods"
yq write -i \
"${incorrect_pod_yaml}" \
"spec.volumes.[-1].hostPath.type" \
"DirectoryOrCreate"
yq write -i \
"${incorrect_pod_yaml}" \
"spec.volumes.[-1].name" \
"mountpoint-dir"
yq -i \
'.spec.volumes += [{"hostPath": {"path": "/var/lib/kubelet/pods", "type": "DirectoryOrCreate"}, "name": "mountpoint-dir"}]' \
"${incorrect_pod_yaml}"
test_pod_policy_error
}
@test "Policy failure: unexpected config map" {
yq write -i \
"${incorrect_configmap_yaml}" \
'data.data-2' \
"foo"
yq -i \
'.data.data-2 = "foo"' \
"${incorrect_configmap_yaml}"
# These commands are different from the test_pod_policy_error() commands above
# because in this case an incorrect config map spec is used.
@@ -123,15 +102,13 @@ test_pod_policy_error() {
@test "Policy failure: unexpected lifecycle.postStart.exec.command" {
# Add a postStart command after generating the policy and verify that the post
# start hook command gets blocked by policy.
yq write -i \
"${incorrect_pod_yaml}" \
'spec.containers[0].lifecycle.postStart.exec.command.[+]' \
"echo"
yq -i \
'.spec.containers[0].lifecycle.postStart.exec.command += ["echo"]' \
"${incorrect_pod_yaml}"
yq write -i \
"${incorrect_pod_yaml}" \
'spec.containers[0].lifecycle.postStart.exec.command.[+]' \
"hello"
yq -i \
'.spec.containers[0].lifecycle.postStart.exec.command += ["hello"]' \
"${incorrect_pod_yaml}"
kubectl create -f "${correct_configmap_yaml}"
kubectl create -f "${incorrect_pod_yaml}"

View File

@@ -48,7 +48,7 @@ test_rc_policy() {
# Create replication controller
if [ "${expect_denied_create_container}" = "true" ]; then
kubectl create -f "${incorrect_yaml}"
else
else
kubectl create -f "${correct_yaml}"
fi
@@ -92,65 +92,49 @@ test_rc_policy() {
@test "Policy failure: unexpected container command" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].command.[+]" \
"ls"
yq -i \
'.spec.template.spec.containers[0].command += ["ls"]' \
"${incorrect_yaml}"
test_rc_policy true
}
@test "Policy failure: unexpected volume mountPath" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts[0].mountPath" \
"/host/unexpected"
yq -i \
'.spec.template.spec.containers[0].volumeMounts[0].mountPath = "/host/unexpected"' \
"${incorrect_yaml}"
test_rc_policy true
}
@test "Policy failure: unexpected host device mapping" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[+].mountPath" \
"/dev/ttyS0"
yq -i \
'.spec.template.spec.containers[0].volumeMounts += [{"mountPath": "/dev/ttyS0", "name": "dev-ttys0"}]' \
"${incorrect_yaml}"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[-1].name" \
"dev-ttys0"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[+].name" \
"dev-ttys0"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[-1].hostPath.path" \
"/dev/ttyS0"
yq -i \
'.spec.template.spec.volumes += [{"name": "dev-ttys0", "hostPath": {"path": "/dev/ttyS0"}}]' \
"${incorrect_yaml}"
test_rc_policy true
}
@test "Policy failure: unexpected securityContext.allowPrivilegeEscalation" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation" \
"false"
yq -i \
'.spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation = false' \
"${incorrect_yaml}"
test_rc_policy true
}
@test "Policy failure: unexpected capability" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].securityContext.capabilities.add.[+]" \
"CAP_SYS_CHROOT"
yq -i \
'.spec.template.spec.containers[0].securityContext.capabilities.add += ["CAP_SYS_CHROOT"]' \
"${incorrect_yaml}"
test_rc_policy true
}

View File

@@ -189,7 +189,7 @@ set_metadata_annotation() {
echo "$annotation_key"
# yq set annotations in yaml. Quoting the key because it can have
# dots.
yq write -i --style=double "${yaml}" "${annotation_key}" "${value}"
yq -i ".${annotation_key} = \"${value}\"" "${yaml}"
}
# Set the command for container spec.
@@ -205,10 +205,9 @@ set_container_command() {
shift 2
for command_value in "$@"; do
yq write -i \
"${yaml}" \
"spec.containers[${container_idx}].command[+]" \
--tag '!!str' "${command_value}"
yq -i \
'.spec.containers['"${container_idx}"'].command += ["'"${command_value}"'"]' \
"${yaml}"
done
}
@@ -223,10 +222,9 @@ set_node() {
local node="$2"
[ -n "$node" ] || return 1
yq write -i \
"${yaml}" \
"spec.nodeName" \
"$node"
yq -i \
".spec.nodeName = \"$node\"" \
"${yaml}"
}
# Get the systemd's journal from a worker node

View File

@@ -53,24 +53,26 @@ add_annotations_to_yaml() {
local yaml_file="$1"
local annotation_name="$2"
local annotation_value="$3"
local resource_kind="$(yq read ${yaml_file} kind)"
# Previous version of yq was not ready to handle multiple objects in a single yaml.
# By default was changing only the first object.
# With yq>4 we need to make it explicit during the read and write.
local resource_kind="$(yq .kind ${yaml_file} | head -1)"
case "${resource_kind}" in
Pod)
info "Adding \"${annotation_name}=${annotation_value}\" to ${resource_kind} from ${yaml_file}"
yq write -i \
"${K8S_TEST_YAML}" \
"metadata.annotations[${annotation_name}]" \
"${annotation_value}"
yq -i \
".metadata.annotations.\"${annotation_name}\" = \"${annotation_value}\"" \
"${K8S_TEST_YAML}"
;;
Deployment|Job|ReplicationController)
info "Adding \"${annotation_name}=${annotation_value}\" to ${resource_kind} from ${yaml_file}"
yq write -i \
"${K8S_TEST_YAML}" \
"spec.template.metadata.annotations[${annotation_name}]" \
"${annotation_value}"
yq -i \
".spec.template.metadata.annotations.\"${annotation_name}\" = \"${annotation_value}\"" \
"${K8S_TEST_YAML}"
;;
List)

View File

@@ -274,22 +274,25 @@ add_allow_all_policy_to_yaml() {
policy_tests_enabled || return 0
local yaml_file="$1"
local resource_kind="$(yq read ${yaml_file} kind)"
# Previous version of yq was not ready to handle multiple objects in a single yaml.
# By default was changing only the first object.
# With yq>4 we need to make it explicit during the read and write.
local resource_kind="$(yq .kind ${yaml_file} | head -1)"
case "${resource_kind}" in
Pod)
info "Adding allow all policy to ${resource_kind} from ${yaml_file}"
ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY}" yq write -i "${yaml_file}" \
'metadata.annotations."io.katacontainers.config.agent.policy"' \
"${ALLOW_ALL_POLICY}"
ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY}" yq -i \
".metadata.annotations.\"io.katacontainers.config.agent.policy\" = \"${ALLOW_ALL_POLICY}\"" \
"${yaml_file}"
;;
Deployment|Job|ReplicationController)
info "Adding allow all policy to ${resource_kind} from ${yaml_file}"
ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY}" yq write -i "${yaml_file}" \
'spec.template.metadata.annotations."io.katacontainers.config.agent.policy"' \
"${ALLOW_ALL_POLICY}"
ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY}" yq -i \
".spec.template.metadata.annotations.\"io.katacontainers.config.agent.policy\" = \"${ALLOW_ALL_POLICY}\"" \
"${yaml_file}"
;;
List)

View File

@@ -35,10 +35,10 @@ function install_dependencies() {
# - nydus
# - nydus-snapshotter
declare -a github_deps
github_deps[0]="cri_containerd:$(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")"
github_deps[1]="cri_tools:$(get_from_kata_deps "externals.critools.latest")"
github_deps[2]="nydus:$(get_from_kata_deps "externals.nydus.version")"
github_deps[3]="nydus_snapshotter:$(get_from_kata_deps "externals.nydus-snapshotter.version")"
github_deps[0]="cri_containerd:$(get_from_kata_deps ".externals.containerd.${CONTAINERD_VERSION}")"
github_deps[1]="cri_tools:$(get_from_kata_deps ".externals.critools.latest")"
github_deps[2]="nydus:$(get_from_kata_deps ".externals.nydus.version")"
github_deps[3]="nydus_snapshotter:$(get_from_kata_deps ".externals.nydus-snapshotter.version")"
for github_dep in "${github_deps[@]}"; do
IFS=":" read -r -a dep <<< "${github_dep}"

View File

@@ -33,7 +33,7 @@ function install_dependencies() {
# - containerd
# - cri-container-cni release tarball already includes CNI plugins
declare -a github_deps
github_deps[0]="cri_containerd:$(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")"
github_deps[0]="cri_containerd:$(get_from_kata_deps ".externals.containerd.${CONTAINERD_VERSION}")"
for github_dep in "${github_deps[@]}"; do
IFS=":" read -r -a dep <<< "${github_dep}"

View File

@@ -173,7 +173,7 @@ function init() {
fi
versions_file="${cidir}/../../versions.yaml"
nginx_version=$("${GOPATH}/bin/yq" read "$versions_file" "docker_images.nginx.version")
nginx_version=$("${GOPATH}/bin/yq" ".docker_images.nginx.version" "$versions_file")
nginx_image="docker.io/library/nginx:$nginx_version"
# Pull nginx image