deps: bumping yq to v4.40.7

Since yq frequently updates, let's upgrade to a version from February to
bypass potential issues with versions 4.41-4.43 for now. We can always
upgrade to the newest version if necessary.

Fixes #9354
Depends-on:github.com/kata-containers/tests#5818

Signed-off-by: Beraldo Leal <bleal@redhat.com>
This commit is contained in:
Beraldo Leal
2024-05-31 13:24:10 -04:00
parent 4f6732595d
commit c99ba42d62
47 changed files with 370 additions and 470 deletions

View File

@@ -149,7 +149,7 @@ kbs_install_cli() {
# Mininum required version to build the client (read from versions.yaml)
local rust_version
ensure_yq
rust_version=$(get_from_kata_deps "externals.coco-trustee.toolchain")
rust_version=$(get_from_kata_deps ".externals.coco-trustee.toolchain")
# Currently kata version from version.yaml is 1.72.0
# which doesn't match the requirement, so let's pass
# the required version.
@@ -205,10 +205,10 @@ function kbs_k8s_deploy() {
ensure_yq
# Read from versions.yaml
repo=$(get_from_kata_deps "externals.coco-trustee.url")
version=$(get_from_kata_deps "externals.coco-trustee.version")
image=$(get_from_kata_deps "externals.coco-trustee.image")
image_tag=$(get_from_kata_deps "externals.coco-trustee.image_tag")
repo=$(get_from_kata_deps ".externals.coco-trustee.url")
version=$(get_from_kata_deps ".externals.coco-trustee.version")
image=$(get_from_kata_deps ".externals.coco-trustee.image")
image_tag=$(get_from_kata_deps ".externals.coco-trustee.image_tag")
# The ingress handler for AKS relies on the cluster's name which in turn
# contain the HEAD commit of the kata-containers repository (supposedly the

View File

@@ -26,7 +26,7 @@ main()
${repo_root_dir}/ci/install_yq.sh > /dev/null
fi
local K8S_SKIP_UNION=$("${GOPATH_LOCAL}/bin/yq" read "${K8S_CONFIG_FILE}" "${K8S_FILTER_FLAG}")
local K8S_SKIP_UNION=$("${GOPATH_LOCAL}/bin/yq" ".${K8S_FILTER_FLAG}" "${K8S_CONFIG_FILE}")
[ "${K8S_SKIP_UNION}" == "null" ] && return
mapfile -t _K8S_SKIP_UNION <<< "${K8S_SKIP_UNION}"

View File

@@ -160,75 +160,59 @@ function deploy_kata() {
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Enable debug for Kata Containers
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[1].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[1].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Create the runtime class only for the shim that's being tested
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[2].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[2].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Set the tested hypervisor as the default `kata` shim
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[3].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[3].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[4].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[4].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[5].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[5].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Enable 'default_vcpus' hypervisor annotation
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[6].value' \
"default_vcpus"
yq -i \
'.spec.template.spec.containers[0].env[6].value = "default_vcpus"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
if [ -n "${SNAPSHOTTER}" ]; then
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[7].value' \
"${KATA_HYPERVISOR}:${SNAPSHOTTER}"
yq -i \
".spec.template.spec.containers[0].env[7].value = \"${KATA_HYPERVISOR}:${SNAPSHOTTER}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
if [ "${KATA_HOST_OS}" = "cbl-mariner" ]; then
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[6].value' \
"initrd kernel default_vcpus"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[+].name' \
"HOST_OS"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[-1].value' \
"${KATA_HOST_OS}"
yq -i \
'.spec.template.spec.containers[0].env[6].value = "initrd kernel default_vcpus"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq -i \
".spec.template.spec.containers[0].env += [{\"name\": \"HOST_OS\", \"value\": \"${KATA_HOST_OS}\"}]" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
if [ "${KATA_HYPERVISOR}" = "qemu" ]; then
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[6].value' \
"image initrd kernel default_vcpus"
yq -i \
'.spec.template.spec.containers[0].env[6].value = "image initrd kernel default_vcpus"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
if [ "${KATA_HYPERVISOR}" = "qemu-tdx" ]; then
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[8].value' \
"${HTTPS_PROXY}"
yq -i \
".spec.template.spec.containers[0].env[8].value = \"${HTTPS_PROXY}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[9].value' \
"${NO_PROXY}"
yq -i \
".spec.template.spec.containers[0].env[9].value = \"${NO_PROXY}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
echo "::group::Final kata-deploy.yaml that is used in the test"
@@ -415,25 +399,21 @@ function cleanup_kata_deploy() {
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[4].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[4].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Create the runtime class only for the shim that's being tested
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[2].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[2].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Set the tested hypervisor as the default `kata` shim
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[3].value' \
"${KATA_HYPERVISOR}"
yq -i \
".spec.template.spec.containers[0].env[3].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[5].value' \
--tag '!!str' "true"
yq -i \
'.spec.template.spec.containers[0].env[5].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
@@ -496,8 +476,8 @@ function deploy_nydus_snapshotter() {
rm -rf "${nydus_snapshotter_install_dir}"
fi
mkdir -p "${nydus_snapshotter_install_dir}"
nydus_snapshotter_url=$(get_from_kata_deps "externals.nydus-snapshotter.url")
nydus_snapshotter_version=$(get_from_kata_deps "externals.nydus-snapshotter.version")
nydus_snapshotter_url=$(get_from_kata_deps ".externals.nydus-snapshotter.url")
nydus_snapshotter_version=$(get_from_kata_deps ".externals.nydus-snapshotter.version")
git clone -b "${nydus_snapshotter_version}" "${nydus_snapshotter_url}" "${nydus_snapshotter_install_dir}"
pushd "$nydus_snapshotter_install_dir"
@@ -506,36 +486,31 @@ function deploy_nydus_snapshotter() {
fi
if [ "${PULL_TYPE}" == "guest-pull" ]; then
# Enable guest pull feature in nydus snapshotter
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.FS_DRIVER' \
"proxy" --style=double
yq -i \
'select(.kind == "ConfigMap").data.FS_DRIVER = "proxy"' \
misc/snapshotter/base/nydus-snapshotter.yaml
else
>&2 echo "Invalid pull type"; exit 2
fi
# Disable to read snapshotter config from configmap
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.ENABLE_CONFIG_FROM_VOLUME' \
"false" --style=double
yq -i \
'select(.kind == "ConfigMap").data.ENABLE_CONFIG_FROM_VOLUME = "false"' \
misc/snapshotter/base/nydus-snapshotter.yaml
# Enable to run snapshotter as a systemd service
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.ENABLE_SYSTEMD_SERVICE' \
"true" --style=double
yq -i \
'select(.kind == "ConfigMap").data.ENABLE_SYSTEMD_SERVICE = "true"' \
misc/snapshotter/base/nydus-snapshotter.yaml
# Enable "runtime specific snapshotter" feature in containerd when configuring containerd for snapshotter
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.ENABLE_RUNTIME_SPECIFIC_SNAPSHOTTER' \
"true" --style=double
yq -i \
'select(.kind == "ConfigMap").data.ENABLE_RUNTIME_SPECIFIC_SNAPSHOTTER = "true"' \
misc/snapshotter/base/nydus-snapshotter.yaml
# Pin the version of nydus-snapshotter image.
# TODO: replace with a definitive solution (see https://github.com/kata-containers/kata-containers/issues/9742)
yq write -i -d 1 \
misc/snapshotter/base/nydus-snapshotter.yaml \
'spec.template.spec.containers[0].image' \
"ghcr.io/containerd/nydus-snapshotter:${nydus_snapshotter_version}" --style=double
yq -i \
"select(.kind == \"DaemonSet\").spec.template.spec.containers[0].image = \"ghcr.io/containerd/nydus-snapshotter:${nydus_snapshotter_version}\"" \
misc/snapshotter/base/nydus-snapshotter.yaml
# Deploy nydus snapshotter as a daemonset
kubectl create -f "misc/snapshotter/nydus-snapshotter-rbac.yaml"

View File

@@ -19,9 +19,9 @@ setup() {
@test "Kubectl exec rejected by policy" {
# Add to the YAML file a policy that rejects ExecProcessRequest.
yq write -i "${pod_yaml}" \
'metadata.annotations."io.katacontainers.config.agent.policy"' \
"${allow_all_except_exec_policy}"
yq -i \
".metadata.annotations.\"io.katacontainers.config.agent.policy\" = \"${allow_all_except_exec_policy}\"" \
"${pod_yaml}"
# Create the pod
kubectl create -f "${pod_yaml}"

View File

@@ -69,103 +69,64 @@ test_job_policy_error() {
@test "Policy failure: unexpected environment variable" {
# Changing the job spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
'spec.template.spec.containers[0].env.[+].name' unexpected_variable
yq write -i \
"${incorrect_yaml}" \
'spec.template.spec.containers[0].env.[-1].value' unexpected_value
yq -i \
'.spec.template.spec.containers[0].env += [{"name": "unexpected_variable", "value": "unexpected_value"}]' \
"${incorrect_yaml}"
test_job_policy_error
}
@test "Policy failure: unexpected command line argument" {
# Changing the job spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].args[+]" \
"unexpected_arg"
yq -i \
'.spec.template.spec.containers[0].args += ["unexpected_arg"]' \
"${incorrect_yaml}"
test_job_policy_error
}
@test "Policy failure: unexpected emptyDir volume" {
# Changing the job spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[+].mountPath" \
"/unexpected1"
yq -i \
'.spec.template.spec.containers[0].volumeMounts += [{"mountPath": "/unexpected1", "name": "unexpected-volume1"}]' \
"${incorrect_yaml}"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[-1].name" \
"unexpected-volume1"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes[+].name" \
"unexpected-volume1"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes[-1].emptyDir.medium" \
"Memory"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes[-1].emptyDir.sizeLimit" \
"50M"
yq -i \
'.spec.template.spec.volumes += [{"name": "unexpected-volume1", "emptyDir": {"medium": "Memory", "sizeLimit": "50M"}}]' \
"${incorrect_yaml}"
test_job_policy_error
}
@test "Policy failure: unexpected projected volume" {
# Changing the job spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[+].mountPath" \
"/test-volume"
yq -i \
'.spec.template.spec.containers[0].volumeMounts += [{"mountPath": "/test-volume", "name": "test-volume", "readOnly": true}]' \
"${incorrect_yaml}"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[-1].name" \
"test-volume"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[-1].readOnly" \
"true"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[+].name" \
"test-volume"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[-1].projected.defaultMode" \
"420"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[-1].projected.sources.[+].serviceAccountToken.expirationSeconds" \
"3600"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[-1].projected.sources.[-1].serviceAccountToken.path" \
"token"
yq -i '
.spec.template.spec.volumes += [{
"name": "test-volume",
"projected": {
"defaultMode": 420,
"sources": [{
"serviceAccountToken": {
"expirationSeconds": 3600,
"path": "token"
}
}]
}
}]
' "${incorrect_yaml}"
test_job_policy_error
}
@test "Policy failure: unexpected readOnlyRootFilesystem" {
# Changing the job spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem" \
"false"
yq -i \
".spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem = false" \
"${incorrect_yaml}"
test_job_policy_error
}

View File

@@ -49,69 +49,48 @@ test_pod_policy_error() {
@test "Policy failure: unexpected container image" {
# Change the container image after generating the policy. The different image has
# different attributes (e.g., different command line) so the policy will reject it.
yq write -i \
"${incorrect_pod_yaml}" \
"spec.containers[0].image" \
"quay.io/footloose/ubuntu18.04:latest"
yq -i \
'.spec.containers[0].image = "quay.io/footloose/ubuntu18.04:latest"' \
"${incorrect_pod_yaml}"
test_pod_policy_error
}
@test "Policy failure: unexpected privileged security context" {
# Changing the pod spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_pod_yaml}" \
'spec.containers[0].securityContext.privileged' \
"true"
yq -i \
'.spec.containers[0].securityContext.privileged = true' \
"${incorrect_pod_yaml}"
test_pod_policy_error
}
@test "Policy failure: unexpected terminationMessagePath" {
# Changing the pod spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_pod_yaml}" \
'spec.containers[0].terminationMessagePath' \
"/dev/termination-custom-log"
yq -i \
'.spec.containers[0].terminationMessagePath = "/dev/termination-custom-log"' \
"${incorrect_pod_yaml}"
test_pod_policy_error
}
@test "Policy failure: unexpected hostPath volume mount" {
# Changing the pod spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_pod_yaml}" \
"spec.containers[0].volumeMounts.[+].name" \
"mountpoint-dir"
yq -i \
'.spec.containers[0].volumeMounts += [{"name": "mountpoint-dir", "mountPath": "/var/lib/kubelet/pods"}]' \
"${incorrect_pod_yaml}"
yq write -i \
"${incorrect_pod_yaml}" \
"spec.containers[0].volumeMounts.[-1].mountPath" \
"/var/lib/kubelet/pods"
yq write -i \
"${incorrect_pod_yaml}" \
"spec.volumes.[+].hostPath.path" \
"/var/lib/kubelet/pods"
yq write -i \
"${incorrect_pod_yaml}" \
"spec.volumes.[-1].hostPath.type" \
"DirectoryOrCreate"
yq write -i \
"${incorrect_pod_yaml}" \
"spec.volumes.[-1].name" \
"mountpoint-dir"
yq -i \
'.spec.volumes += [{"hostPath": {"path": "/var/lib/kubelet/pods", "type": "DirectoryOrCreate"}, "name": "mountpoint-dir"}]' \
"${incorrect_pod_yaml}"
test_pod_policy_error
}
@test "Policy failure: unexpected config map" {
yq write -i \
"${incorrect_configmap_yaml}" \
'data.data-2' \
"foo"
yq -i \
'.data.data-2 = "foo"' \
"${incorrect_configmap_yaml}"
# These commands are different from the test_pod_policy_error() commands above
# because in this case an incorrect config map spec is used.
@@ -123,15 +102,13 @@ test_pod_policy_error() {
@test "Policy failure: unexpected lifecycle.postStart.exec.command" {
# Add a postStart command after generating the policy and verify that the post
# start hook command gets blocked by policy.
yq write -i \
"${incorrect_pod_yaml}" \
'spec.containers[0].lifecycle.postStart.exec.command.[+]' \
"echo"
yq -i \
'.spec.containers[0].lifecycle.postStart.exec.command += ["echo"]' \
"${incorrect_pod_yaml}"
yq write -i \
"${incorrect_pod_yaml}" \
'spec.containers[0].lifecycle.postStart.exec.command.[+]' \
"hello"
yq -i \
'.spec.containers[0].lifecycle.postStart.exec.command += ["hello"]' \
"${incorrect_pod_yaml}"
kubectl create -f "${correct_configmap_yaml}"
kubectl create -f "${incorrect_pod_yaml}"

View File

@@ -48,7 +48,7 @@ test_rc_policy() {
# Create replication controller
if [ "${expect_denied_create_container}" = "true" ]; then
kubectl create -f "${incorrect_yaml}"
else
else
kubectl create -f "${correct_yaml}"
fi
@@ -92,65 +92,49 @@ test_rc_policy() {
@test "Policy failure: unexpected container command" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].command.[+]" \
"ls"
yq -i \
'.spec.template.spec.containers[0].command += ["ls"]' \
"${incorrect_yaml}"
test_rc_policy true
}
@test "Policy failure: unexpected volume mountPath" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts[0].mountPath" \
"/host/unexpected"
yq -i \
'.spec.template.spec.containers[0].volumeMounts[0].mountPath = "/host/unexpected"' \
"${incorrect_yaml}"
test_rc_policy true
}
@test "Policy failure: unexpected host device mapping" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[+].mountPath" \
"/dev/ttyS0"
yq -i \
'.spec.template.spec.containers[0].volumeMounts += [{"mountPath": "/dev/ttyS0", "name": "dev-ttys0"}]' \
"${incorrect_yaml}"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].volumeMounts.[-1].name" \
"dev-ttys0"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[+].name" \
"dev-ttys0"
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.volumes.[-1].hostPath.path" \
"/dev/ttyS0"
yq -i \
'.spec.template.spec.volumes += [{"name": "dev-ttys0", "hostPath": {"path": "/dev/ttyS0"}}]' \
"${incorrect_yaml}"
test_rc_policy true
}
@test "Policy failure: unexpected securityContext.allowPrivilegeEscalation" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation" \
"false"
yq -i \
'.spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation = false' \
"${incorrect_yaml}"
test_rc_policy true
}
@test "Policy failure: unexpected capability" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq write -i \
"${incorrect_yaml}" \
"spec.template.spec.containers[0].securityContext.capabilities.add.[+]" \
"CAP_SYS_CHROOT"
yq -i \
'.spec.template.spec.containers[0].securityContext.capabilities.add += ["CAP_SYS_CHROOT"]' \
"${incorrect_yaml}"
test_rc_policy true
}

View File

@@ -189,7 +189,7 @@ set_metadata_annotation() {
echo "$annotation_key"
# yq set annotations in yaml. Quoting the key because it can have
# dots.
yq write -i --style=double "${yaml}" "${annotation_key}" "${value}"
yq -i ".${annotation_key} = \"${value}\"" "${yaml}"
}
# Set the command for container spec.
@@ -205,10 +205,9 @@ set_container_command() {
shift 2
for command_value in "$@"; do
yq write -i \
"${yaml}" \
"spec.containers[${container_idx}].command[+]" \
--tag '!!str' "${command_value}"
yq -i \
'.spec.containers['"${container_idx}"'].command += ["'"${command_value}"'"]' \
"${yaml}"
done
}
@@ -223,10 +222,9 @@ set_node() {
local node="$2"
[ -n "$node" ] || return 1
yq write -i \
"${yaml}" \
"spec.nodeName" \
"$node"
yq -i \
".spec.nodeName = \"$node\"" \
"${yaml}"
}
# Get the systemd's journal from a worker node

View File

@@ -53,24 +53,26 @@ add_annotations_to_yaml() {
local yaml_file="$1"
local annotation_name="$2"
local annotation_value="$3"
local resource_kind="$(yq read ${yaml_file} kind)"
# Previous version of yq was not ready to handle multiple objects in a single yaml.
# By default was changing only the first object.
# With yq>4 we need to make it explicit during the read and write.
local resource_kind="$(yq .kind ${yaml_file} | head -1)"
case "${resource_kind}" in
Pod)
info "Adding \"${annotation_name}=${annotation_value}\" to ${resource_kind} from ${yaml_file}"
yq write -i \
"${K8S_TEST_YAML}" \
"metadata.annotations[${annotation_name}]" \
"${annotation_value}"
yq -i \
".metadata.annotations.\"${annotation_name}\" = \"${annotation_value}\"" \
"${K8S_TEST_YAML}"
;;
Deployment|Job|ReplicationController)
info "Adding \"${annotation_name}=${annotation_value}\" to ${resource_kind} from ${yaml_file}"
yq write -i \
"${K8S_TEST_YAML}" \
"spec.template.metadata.annotations[${annotation_name}]" \
"${annotation_value}"
yq -i \
".spec.template.metadata.annotations.\"${annotation_name}\" = \"${annotation_value}\"" \
"${K8S_TEST_YAML}"
;;
List)

View File

@@ -274,22 +274,25 @@ add_allow_all_policy_to_yaml() {
policy_tests_enabled || return 0
local yaml_file="$1"
local resource_kind="$(yq read ${yaml_file} kind)"
# Previous version of yq was not ready to handle multiple objects in a single yaml.
# By default was changing only the first object.
# With yq>4 we need to make it explicit during the read and write.
local resource_kind="$(yq .kind ${yaml_file} | head -1)"
case "${resource_kind}" in
Pod)
info "Adding allow all policy to ${resource_kind} from ${yaml_file}"
ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY}" yq write -i "${yaml_file}" \
'metadata.annotations."io.katacontainers.config.agent.policy"' \
"${ALLOW_ALL_POLICY}"
ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY}" yq -i \
".metadata.annotations.\"io.katacontainers.config.agent.policy\" = \"${ALLOW_ALL_POLICY}\"" \
"${yaml_file}"
;;
Deployment|Job|ReplicationController)
info "Adding allow all policy to ${resource_kind} from ${yaml_file}"
ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY}" yq write -i "${yaml_file}" \
'spec.template.metadata.annotations."io.katacontainers.config.agent.policy"' \
"${ALLOW_ALL_POLICY}"
ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY}" yq -i \
".spec.template.metadata.annotations.\"io.katacontainers.config.agent.policy\" = \"${ALLOW_ALL_POLICY}\"" \
"${yaml_file}"
;;
List)