Compare commits

..

2 Commits

Author SHA1 Message Date
Alex Lyn
8e6fe45e9e tests: just debug it
debug it with special settings

Signed-off-by: Alex Lyn <alex.lyn@antgroup.com>
2025-11-26 10:52:24 +08:00
Alex Lyn
7489e93298 tests: Enable stability tests for runtime-rs
Test it

Signed-off-by: Alex Lyn <alex.lyn@antgroup.com>
2025-11-26 10:52:24 +08:00
71 changed files with 292 additions and 627 deletions

View File

@@ -25,7 +25,6 @@ self-hosted-runner:
- ppc64le-k8s
- ppc64le-small
- ubuntu-24.04-ppc64le
- ubuntu-24.04-s390x
- metrics
- riscv-builder
- sev-snp

View File

@@ -12,12 +12,7 @@ name: Build checks
jobs:
check:
name: check
runs-on: >-
${{
( contains(inputs.instance, 's390x') && matrix.component.name == 'runtime' ) && 's390x' ||
( contains(inputs.instance, 'ppc64le') && (matrix.component.name == 'runtime' || matrix.component.name == 'agent') ) && 'ppc64le' ||
inputs.instance
}}
runs-on: ${{ matrix.runner || inputs.instance }}
strategy:
fail-fast: false
matrix:
@@ -75,6 +70,36 @@ jobs:
- protobuf-compiler
instance:
- ${{ inputs.instance }}
include:
- component:
name: runtime
path: src/runtime
needs:
- golang
- XDG_RUNTIME_DIR
instance: ubuntu-24.04-s390x
runner: s390x
- component:
name: runtime
path: src/runtime
needs:
- golang
- XDG_RUNTIME_DIR
instance: ubuntu-24.04-ppc64le
runner: ppc64le
- component:
name: agent
path: src/agent
needs:
- rust
- libdevmapper
- libseccomp
- protobuf-compiler
- clang
instance: ubuntu-24.04-ppc64le
runner: ppc64le
steps:
- name: Adjust a permission for repo

View File

@@ -171,8 +171,6 @@ jobs:
- rootfs-image
- rootfs-image-confidential
- rootfs-image-mariner
- rootfs-image-nvidia-gpu
- rootfs-image-nvidia-gpu-confidential
- rootfs-initrd
- rootfs-initrd-confidential
- rootfs-initrd-nvidia-gpu

View File

@@ -150,7 +150,6 @@ jobs:
matrix:
asset:
- rootfs-image
- rootfs-image-nvidia-gpu
- rootfs-initrd
- rootfs-initrd-nvidia-gpu
steps:

View File

@@ -32,7 +32,7 @@ jobs:
permissions:
contents: read
packages: write
runs-on: ubuntu-24.04-ppc64le
runs-on: ppc64le-small
strategy:
matrix:
asset:
@@ -89,7 +89,7 @@ jobs:
build-asset-rootfs:
name: build-asset-rootfs
runs-on: ubuntu-24.04-ppc64le
runs-on: ppc64le-small
needs: build-asset
permissions:
contents: read
@@ -170,7 +170,7 @@ jobs:
build-asset-shim-v2:
name: build-asset-shim-v2
runs-on: ubuntu-24.04-ppc64le
runs-on: ppc64le-small
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
permissions:
contents: read
@@ -230,7 +230,7 @@ jobs:
create-kata-tarball:
name: create-kata-tarball
runs-on: ubuntu-24.04-ppc64le
runs-on: ppc64le-small
needs: [build-asset, build-asset-rootfs, build-asset-shim-v2]
permissions:
contents: read

View File

@@ -32,7 +32,7 @@ permissions: {}
jobs:
build-asset:
name: build-asset
runs-on: ubuntu-24.04-s390x
runs-on: s390x
permissions:
contents: read
packages: write
@@ -257,7 +257,7 @@ jobs:
build-asset-shim-v2:
name: build-asset-shim-v2
runs-on: ubuntu-24.04-s390x
runs-on: s390x
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
permissions:
contents: read
@@ -319,7 +319,7 @@ jobs:
create-kata-tarball:
name: create-kata-tarball
runs-on: ubuntu-24.04-s390x
runs-on: s390x
needs:
- build-asset
- build-asset-rootfs

View File

@@ -147,7 +147,7 @@ jobs:
tag: ${{ inputs.tag }}-s390x
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
runner: ubuntu-24.04-s390x
runner: s390x
arch: s390x
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
@@ -165,7 +165,7 @@ jobs:
tag: ${{ inputs.tag }}-ppc64le
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
runner: ubuntu-24.04-ppc64le
runner: ppc64le-small
arch: ppc64le
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}

View File

@@ -10,9 +10,7 @@ on:
- opened
- synchronize
- reopened
- edited
- labeled
- unlabeled
permissions: {}

View File

@@ -31,7 +31,7 @@ jobs:
permissions:
contents: read
packages: write
runs-on: ubuntu-24.04-ppc64le
runs-on: ppc64le-small
steps:
- name: Login to Kata Containers ghcr.io
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0

View File

@@ -35,7 +35,7 @@ jobs:
permissions:
contents: read
packages: write
runs-on: ubuntu-24.04-s390x
runs-on: s390x
steps:
- name: Login to Kata Containers ghcr.io
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0

View File

@@ -142,10 +142,6 @@ jobs:
timeout-minutes: 60
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Report tests
if: always()
run: bash tests/integration/kubernetes/gha-run.sh report-tests
- name: Refresh OIDC token in case access token expired
if: always()
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0

View File

@@ -68,10 +68,6 @@ jobs:
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Report tests
if: always()
run: bash tests/integration/kubernetes/gha-run.sh report-tests
- name: Collect artifacts ${{ matrix.vmm }}
if: always()
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts

View File

@@ -89,11 +89,6 @@ jobs:
run: bash tests/integration/kubernetes/gha-run.sh run-nv-tests
env:
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
- name: Report tests
if: always()
run: bash tests/integration/kubernetes/gha-run.sh report-tests
- name: Collect artifacts ${{ matrix.environment.vmm }}
if: always()
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts

View File

@@ -75,7 +75,3 @@ jobs:
- name: Run tests
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Report tests
if: always()
run: bash tests/integration/kubernetes/gha-run.sh report-tests

View File

@@ -131,10 +131,6 @@ jobs:
timeout-minutes: 60
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Report tests
if: always()
run: bash tests/integration/kubernetes/gha-run.sh report-tests
- name: Delete kata-deploy
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-zvsi

View File

@@ -140,10 +140,6 @@ jobs:
timeout-minutes: 300
run: bash tests/stability/gha-stability-run.sh run-tests
- name: Report tests
if: always()
run: bash tests/integration/kubernetes/gha-run.sh report-tests
- name: Refresh OIDC token in case access token expired
if: always()
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0

View File

@@ -102,10 +102,6 @@ jobs:
- name: Run tests
run: bash tests/functional/kata-deploy/gha-run.sh run-tests
- name: Report tests
if: always()
run: bash tests/integration/kubernetes/gha-run.sh report-tests
- name: Refresh OIDC token in case access token expired
if: always()
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0

View File

@@ -85,7 +85,3 @@ jobs:
- name: Run tests
run: bash tests/functional/kata-deploy/gha-run.sh run-tests
- name: Report tests
if: always()
run: bash tests/integration/kubernetes/gha-run.sh report-tests

View File

@@ -29,7 +29,7 @@ jobs:
matrix:
instance:
- "ubuntu-24.04-arm"
- "ubuntu-24.04-s390x"
- "s390x"
- "ubuntu-24.04-ppc64le"
uses: ./.github/workflows/build-checks.yaml
with:

View File

@@ -83,7 +83,3 @@ Documents that help to understand and contribute to Kata Containers.
If you have a suggestion for how we can improve the
[website](https://katacontainers.io), please raise an issue (or a PR) on
[the repository that holds the source for the website](https://github.com/OpenStackweb/kata-netlify-refresh).
### Toolchain Guidance
* [Toolchain Guidance](./Toochain-Guidance.md)

View File

@@ -1,39 +0,0 @@
# Toolchains
As a community we want to strike a balance between having up-to-date toolchains, to receive the
latest security fixes and to be able to benefit from new features and packages, whilst not being
too bleeding edge and disrupting downstream and other consumers. As a result we have the following
guidelines (note, not hard rules) for our go and rust toolchains that we are attempting to try out:
## Go toolchain
Go is released [every six months](https://go.dev/wiki/Go-Release-Cycle) with support for the
[last two major release versions](https://go.dev/doc/devel/release#policy). We always want to
ensure that we are on a supported version so we receive security fixes. To try and make
things easier for some of our users, we aim to be using the older of the two supported major
versions, unless there is a compelling reason to adopt the newer version.
In practice this means that we bump our major version of the go toolchain every six months to
version (1.x-1) in response to a new version (1.x) coming out, which makes our current version
(1.x-2) no longer supported. We will bump the minor version whenever required to satisfy
dependency updates, or security fixes.
Our go toolchain version is recorded in [`versions.yaml`](../versions.yaml) under
`.languages.golang.version` and should match with the version in our `go.mod` files.
## Rust toolchain
Rust has a [six week](https://doc.rust-lang.org/book/appendix-05-editions.html#:~:text=The%20Rust%20language%20and%20compiler,these%20tiny%20changes%20add%20up.)
release cycle and they only support the latest stable release, so if we wanted to remain on a
supported release we would only ever build with the latest stable and bump every 6 weeks.
However feedback from our community has indicated that this is a challenge as downstream consumers
often want to get rust from their distro, or downstream fork and these struggle to keep up with
the six week release schedule. As a result the community has agreed to try out a policy of
"stable-2", where we aim to build with a rust version that is two versions behind the latest stable
version.
In practice this should mean that we bump our rust toolchain every six weeks, to version
1.x-2 when 1.x is released as stable and we should be picking up the latest point release
of that version, if there were any.
The rust-toolchain that we are using is recorded in [`rust-toolchain.toml`](../rust-toolchain.toml).

View File

@@ -186,7 +186,7 @@ base64 = "0.22"
sha2 = "0.10.8"
async-compression = { version = "0.4.22", features = ["tokio", "gzip"] }
container-device-interface = "0.1.1"
container-device-interface = "0.1.0"
[target.'cfg(target_arch = "s390x")'.dependencies]
pv_core = { git = "https://github.com/ibm-s390-linux/s390-tools", rev = "4942504a9a2977d49989a5e5b7c1c8e07dc0fa41", package = "s390_pv_core" }

View File

@@ -401,10 +401,11 @@ impl Handle {
}
if let RouteAttribute::Oif(index) = attribute {
route.device = match self.find_link(LinkFilter::Index(*index)).await {
Ok(link) => link.name(),
Err(_) => String::new(),
};
route.device = self
.find_link(LinkFilter::Index(*index))
.await
.context(format!("error looking up device {index}"))?
.name();
}
}
@@ -1004,6 +1005,10 @@ mod tests {
.expect("Failed to list routes");
assert_ne!(all.len(), 0);
for r in &all {
assert_ne!(r.device.len(), 0);
}
}
#[tokio::test]

View File

@@ -85,6 +85,11 @@ impl ConfigPlugin for CloudHypervisorConfig {
if ch.memory_info.memory_slots == 0 {
ch.memory_info.memory_slots = default::DEFAULT_CH_MEMORY_SLOTS;
}
// Apply factory defaults
if ch.factory.template_path.is_empty() {
ch.factory.template_path = default::DEFAULT_TEMPLATE_PATH.to_string();
}
}
Ok(())

View File

@@ -79,6 +79,11 @@ impl ConfigPlugin for DragonballConfig {
if db.memory_info.memory_slots == 0 {
db.memory_info.memory_slots = default::DEFAULT_DRAGONBALL_MEMORY_SLOTS;
}
// Apply factory defaults
if db.factory.template_path.is_empty() {
db.factory.template_path = default::DEFAULT_TEMPLATE_PATH.to_string();
}
}
Ok(())
}

View File

@@ -69,6 +69,11 @@ impl ConfigPlugin for FirecrackerConfig {
firecracker.memory_info.default_memory =
default::DEFAULT_FIRECRACKER_MEMORY_SIZE_MB;
}
// Apply factory defaults
if firecracker.factory.template_path.is_empty() {
firecracker.factory.template_path = default::DEFAULT_TEMPLATE_PATH.to_string();
}
}
Ok(())

View File

@@ -92,6 +92,7 @@ impl ConfigPlugin for QemuConfig {
qemu.memory_info.memory_slots = default::DEFAULT_QEMU_MEMORY_SLOTS;
}
// Apply factory defaults
if qemu.factory.template_path.is_empty() {
qemu.factory.template_path = default::DEFAULT_TEMPLATE_PATH.to_string();
}

View File

@@ -263,6 +263,20 @@ tx_rate_limiter_max_rate = 0
# disable applying SELinux on the VMM process (default false)
disable_selinux = @DEFDISABLESELINUX@
[factory]
# VM templating support. Once enabled, new VMs are created from template
# using vm cloning. They will share the same initial kernel, initramfs and
# agent memory by mapping it readonly. It helps speeding up new container
# creation and saves a lot of memory if there are many kata containers running
# on the same host.
#
# When disabled, new VMs are created from scratch.
#
# Note: Requires "initrd=" to be set ("image=" is not supported).
#
# Default false
enable_template = false
[agent.@PROJECT_TYPE@]
# If enabled, make the agent display debug-level messages.
# (default: disabled)

View File

@@ -9,33 +9,35 @@ load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
nginx_version="${docker_images_nginx_version}"
nginx_image="nginx:$nginx_version"
pod_name="handlers"
get_pod_config_dir
yaml_file="${pod_config_dir}/test-lifecycle-events.yaml"
# Create yaml
set_nginx_image "${pod_config_dir}/lifecycle-events.yaml" "${yaml_file}"
sed -e "s/\${nginx_version}/${nginx_image}/" \
"${pod_config_dir}/lifecycle-events.yaml" > "${yaml_file}"
# Add policy to yaml
policy_settings_dir="$(create_tmp_policy_settings_dir "${pod_config_dir}")"
display_message="cat /usr/share/message"
exec_command=(sh -c "${display_message}")
add_exec_to_policy_settings "${policy_settings_dir}" "${exec_command[@]}"
add_requests_to_policy_settings "${policy_settings_dir}" "ReadStreamRequest"
auto_generate_policy "${policy_settings_dir}" "${yaml_file}"
}
@test "Running with postStart and preStop handlers" {
# Create the pod with postStart and preStop handlers
# kubectl create -f "${yaml_file}"
kubectl create -f "${yaml_file}"
# Check pod creation
# kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Retries
k8s_create_pod_ready "${pod_name}" "${yaml_file}"
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Check postStart message
check_postStart=$(kubectl exec $pod_name -- "${exec_command[@]}")

View File

@@ -82,9 +82,8 @@ setup() {
auto_generate_policy "${policy_settings_dir}" "${tmp_pod_yaml}"
# Start the workload.
# kubectl create -f "$tmp_pod_yaml"
# kubectl wait --for condition=ready --timeout=$timeout "pod/${pod_name}"
k8s_create_pod_ready "${pod_name}" "$tmp_pod_yaml"
kubectl create -f "$tmp_pod_yaml"
kubectl wait --for condition=ready --timeout=$timeout "pod/${pod_name}"
# Verify persistent volume claim is bound
kubectl get "pvc/${volume_claim}" | grep "Bound"

View File

@@ -40,10 +40,9 @@ setup() {
@test "Check capabilities of pod" {
# Create pod
# kubectl create -f "${yaml_file}"
kubectl create -f "${yaml_file}"
# Check pod creation
# kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
k8s_create_pod_ready "${pod_name}" "${yaml_file}"
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Verify expected capabilities for the running container. Add retry to ensure
# that the container had time to execute:

View File

@@ -34,10 +34,10 @@ setup() {
kubectl get configmaps $config_name -o yaml | grep -q "data-"
# Create a pod that consumes the ConfigMap
# kubectl create -f "${pod_yaml_file}"
kubectl create -f "${pod_yaml_file}"
# Check pod creation
# kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
k8s_create_pod_ready "${pod_name}" "${pod_yaml_file}"
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check env
grep_pod_exec_output "${pod_name}" "KUBE_CONFIG_1=value-1" "${exec_command[@]}"

View File

@@ -41,11 +41,10 @@ setup() {
auto_generate_policy "${policy_settings_dir}" "${pod_config}"
# Create pod
# kubectl create -f "${pod_config}"
kubectl create -f "${pod_config}"
# Check pod creation
#kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Retries
k8s_create_pod_ready "${pod_name}" "${pod_config}"
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Create a file
echo "$content" > "$file_name"
@@ -79,11 +78,10 @@ setup() {
auto_generate_policy "${policy_settings_dir}" "${pod_config}"
# Create pod
# kubectl create -f "${pod_config}"
kubectl create -f "${pod_config}"
# Check pod creation
# kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Retries
k8s_create_pod_ready "${pod_name}" "${pod_config}"
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
kubectl logs "$pod_name" || true
kubectl describe pod "$pod_name" || true

View File

@@ -38,11 +38,10 @@ setup() {
@test "Kubectl exec" {
# Create the pod
# kubectl create -f "${test_yaml_file}"
kubectl create -f "${test_yaml_file}"
# Get pod specification
# kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Retries
k8s_create_pod_ready "${pod_name}" "${test_yaml_file}"
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Run commands in Pod
## Cases for -it options

View File

@@ -47,11 +47,10 @@ setup() {
@test "Test readonly volume for pods" {
# Create pod
# kubectl create -f "${test_yaml}"
kubectl create -f "${test_yaml}"
# Check pod creation
# kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Retries
k8s_create_pod_ready "${pod_name}" "${test_yaml}"
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Validate file volume body inside the pod
file_in_container=$(kubectl exec $pod_name -- "${command[@]}")

View File

@@ -37,11 +37,10 @@ setup() {
kubectl create -f "$configmap_yaml"
# Create pod
# kubectl create -f "${pod_yaml}"
kubectl create -f "${pod_yaml}"
# Check pod creation
# kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Retries
k8s_create_pod_ready "${pod_name}" "${pod_yaml}"
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Get pod ip
pod_ip=$(kubectl get pod $pod_name --template={{.status.podIP}})

View File

@@ -88,11 +88,7 @@ EOF
# For debug sake
echo "Pod ${kata_pod}: $(cat ${kata_pod})"
# Default wait timeout is 120
local wait_time=120
[[ "${KATA_HYPERVISOR}" == qemu-coco-dev-runtime-rs ]] && wait_time=300
k8s_create_pod "${kata_pod}" "$wait_time"
k8s_create_pod "${kata_pod}"
echo "Kata pod test-e2e from authenticated image is running"
}
@@ -136,11 +132,7 @@ EOF
# For debug sake
echo "Pod ${kata_pod}: $(cat ${kata_pod})"
# Default wait timeout is 120
local wait_time=120
[[ "${KATA_HYPERVISOR}" == qemu-coco-dev-runtime-rs ]] && wait_time=300
k8s_create_pod "${kata_pod}" "$wait_time"
k8s_create_pod "${kata_pod}"
echo "Kata pod test-e2e from authenticated image is running"
}

View File

@@ -65,11 +65,7 @@ function setup_kbs_decryption_key() {
# For debug sake
echo "Pod ${kata_pod}: $(cat ${kata_pod})"
# Default wait timeout is 120
local wait_time=120
[[ "${KATA_HYPERVISOR}" == qemu-coco-dev-runtime-rs ]] && wait_time=300
k8s_create_pod "${kata_pod}" "$wait_time"
k8s_create_pod "${kata_pod}"
echo "Kata pod test-e2e from encrypted image is running"
}

View File

@@ -98,10 +98,7 @@ EOF
# For debug sake
echo "Pod ${kata_pod}: $(cat ${kata_pod})"
# Default wait timeout is 120
local wait_time=120
[[ "${KATA_HYPERVISOR}" == qemu-coco-dev-runtime-rs ]] && wait_time=300
k8s_create_pod "${kata_pod}" "$wait_time"
k8s_create_pod "${kata_pod}"
echo "Kata pod test-e2e from image security policy is running"
}
@@ -127,10 +124,7 @@ EOF
# For debug sake
echo "Pod ${kata_pod}: $(cat ${kata_pod})"
# Default wait timeout is 120
local wait_time=120
[[ "${KATA_HYPERVISOR}" == qemu-coco-dev-runtime-rs ]] && wait_time=300
k8s_create_pod "${kata_pod}" "$wait_time"
k8s_create_pod "${kata_pod}"
echo "Kata pod test-e2e from image security policy is running"
}
@@ -156,10 +150,7 @@ EOF
# For debug sake
echo "Pod ${kata_pod}: $(cat ${kata_pod})"
# Default wait timeout is 120
local wait_time=120
[[ "${KATA_HYPERVISOR}" == qemu-coco-dev-runtime-rs ]] && wait_time=300
k8s_create_pod "${kata_pod}" "$wait_time"
k8s_create_pod "${kata_pod}"
echo "Kata pod test-e2e from image security policy is running"
}
@@ -176,10 +167,7 @@ EOF
# For debug sake
echo "Pod ${kata_pod}: $(cat ${kata_pod})"
# Default wait timeout is 120
local wait_time=120
[[ "${KATA_HYPERVISOR}" == qemu-coco-dev-runtime-rs ]] && wait_time=300
k8s_create_pod "${kata_pod}" "$wait_time"
k8s_create_pod "${kata_pod}"
echo "Kata pod test-e2e from image security policy is running"
}
@@ -213,10 +201,7 @@ EOF
# For debug sake
echo "Pod ${kata_pod}: $(cat ${kata_pod})"
# Default wait timeout is 120
local wait_time=120
[[ "${KATA_HYPERVISOR}" == qemu-coco-dev-runtime-rs ]] && wait_time=300
k8s_create_pod "${kata_pod}" "$wait_time"
k8s_create_pod "${kata_pod}"
echo "Kata pod test-e2e from image security policy is running"
}
@@ -249,10 +234,7 @@ EOF
# For debug sake
echo "Pod ${kata_pod}: $(cat ${kata_pod})"
# Default wait timeout is 120
local wait_time=120
[[ "${KATA_HYPERVISOR}" == qemu-coco-dev-runtime-rs ]] && wait_time=300
k8s_create_pod "${kata_pod}" "$wait_time"
k8s_create_pod "${kata_pod}"
echo "Kata pod test-e2e from image security policy is running"
}

View File

@@ -29,9 +29,8 @@ setup() {
}
@test "/dev hostPath volume bind mounts the guest device and skips virtio-fs" {
# kubectl apply -f "${yaml_file}"
# kubectl wait --for=condition=Ready --timeout="${timeout}" pod "${pod_name}"
k8s_create_pod_ready "${pod_name}" "${yaml_file}"
kubectl apply -f "${yaml_file}"
kubectl wait --for=condition=Ready --timeout="${timeout}" pod "${pod_name}"
# Check the mount info.

View File

@@ -26,11 +26,10 @@ setup() {
@test "Kill all processes in container" {
# Create the pod
# kubectl create -f "${yaml_file}"
kubectl create -f "${yaml_file}"
# Check pod creation
# kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Retries
k8s_create_pod_ready "${pod_name}" "${yaml_file}"
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Check PID from first container
first_pid_container=$(kubectl exec $pod_name -c $first_container_name \

View File

@@ -27,11 +27,10 @@ setup() {
auto_generate_policy "${pod_config_dir}" "${yaml_file}"
# Create pod
# kubectl create -f "${yaml_file}"
kubectl create -f "${yaml_file}"
# Check pod creation
# kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Retries
k8s_create_pod_ready "${pod_name}" "${yaml_file}"
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check liveness probe returns a success code
kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1"
@@ -53,11 +52,10 @@ setup() {
auto_generate_policy "${pod_config_dir}" "${yaml_file}"
# Create pod
# kubectl create -f "${yaml_file}"
kubectl create -f "${yaml_file}"
# Check pod creation
# kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Retries
k8s_create_pod_ready "${pod_name}" "${yaml_file}"
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check liveness probe returns a success code
kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1"
@@ -80,11 +78,10 @@ setup() {
auto_generate_policy "${pod_config_dir}" "${yaml_file}"
# Create pod
# kubectl create -f "${yaml_file}"
kubectl create -f "${yaml_file}"
# Check pod creation
# kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Retries
k8s_create_pod_ready "${pod_name}" "${yaml_file}"
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check liveness probe returns a success code
kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1"

View File

@@ -11,6 +11,8 @@ load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${CONTAINER_RUNTIME}" == "crio" ] && skip "test not working see: https://github.com/kata-containers/kata-containers/issues/10414"
nginx_version="${docker_images_nginx_version}"
nginx_image="nginx:$nginx_version"
busybox_image="quay.io/prometheus/busybox:latest"
deployment="nginx-deployment"
@@ -18,18 +20,18 @@ setup() {
# Create test .yaml
yaml_file="${pod_config_dir}/test-${deployment}.yaml"
set_nginx_image "${pod_config_dir}/${deployment}.yaml" "${yaml_file}"
sed -e "s/\${nginx_version}/${nginx_image}/" \
"${pod_config_dir}/${deployment}.yaml" > "${yaml_file}"
auto_generate_policy "${pod_config_dir}" "${yaml_file}"
}
@test "Verify nginx connectivity between pods" {
# kubectl create -f "${yaml_file}"
# kubectl wait --for=condition=Available --timeout=$timeout deployment/${deployment}
# Retries
k8s_create_deployment_ready "${yaml_file}" ${deployment}
kubectl expose deployment/${deployment} ${deployment}
kubectl create -f "${yaml_file}"
kubectl wait --for=condition=Available --timeout=$timeout deployment/${deployment}
kubectl expose deployment/${deployment}
busybox_pod="test-nginx"
kubectl run $busybox_pod --restart=Never -it --image="$busybox_image" \

View File

@@ -46,10 +46,10 @@ setup() {
kubectl apply -f "${pod_yaml}"
# Wait for pod to complete successfully (with retry)
kubectl wait --for=jsonpath='{.status.phase}'=Succeeded --timeout="${POD_WAIT_TIMEOUT}" pod "${POD_NAME_CUDA}"
kubectl_retry 10 30 wait --for=jsonpath='{.status.phase}'=Succeeded --timeout="${POD_WAIT_TIMEOUT}" pod "${POD_NAME_CUDA}"
# Get and verify the output contains expected CUDA success message
kubectl logs "${POD_NAME_CUDA}"
kubectl_retry 10 30 logs "${POD_NAME_CUDA}"
output=$(kubectl logs "${POD_NAME_CUDA}")
echo "# CUDA Vector Add Output: ${output}" >&3

View File

@@ -82,10 +82,10 @@ create_inference_pod() {
add_allow_all_policy_to_yaml "${POD_INSTRUCT_YAML}"
kubectl apply -f "${POD_INSTRUCT_YAML}"
kubectl wait --for=condition=Ready --timeout="${POD_READY_TIMEOUT_INSTRUCT}" pod "${POD_NAME_INSTRUCT}"
kubectl_retry 10 30 wait --for=condition=Ready --timeout="${POD_READY_TIMEOUT_INSTRUCT}" pod "${POD_NAME_INSTRUCT}"
# shellcheck disable=SC2030 # Variable is shared via file between BATS tests
kubectl get pod "${POD_NAME_INSTRUCT}" -o jsonpath='{.status.podIP}'
kubectl_retry 10 30 get pod "${POD_NAME_INSTRUCT}" -o jsonpath='{.status.podIP}'
POD_IP_INSTRUCT=$(kubectl get pod "${POD_NAME_INSTRUCT}" -o jsonpath='{.status.podIP}')
[[ -n "${POD_IP_INSTRUCT}" ]]
@@ -98,10 +98,10 @@ create_embedqa_pod() {
add_allow_all_policy_to_yaml "${POD_EMBEDQA_YAML}"
kubectl apply -f "${POD_EMBEDQA_YAML}"
kubectl wait --for=condition=Ready --timeout="${POD_READY_TIMEOUT_EMBEDQA}" pod "${POD_NAME_EMBEDQA}"
kubectl_retry 10 30 wait --for=condition=Ready --timeout="${POD_READY_TIMEOUT_EMBEDQA}" pod "${POD_NAME_EMBEDQA}"
# shellcheck disable=SC2030 # Variable is shared via file between BATS tests
kubectl get pod "${POD_NAME_EMBEDQA}" -o jsonpath='{.status.podIP}'
kubectl_retry 10 30 get pod "${POD_NAME_EMBEDQA}" -o jsonpath='{.status.podIP}'
POD_IP_EMBEDQA=$(kubectl get pod "${POD_NAME_EMBEDQA}" -o jsonpath='{.status.podIP}')
[[ -n "${POD_IP_EMBEDQA}" ]]

View File

@@ -18,11 +18,10 @@ setup() {
@test "Test OOM events for pods" {
# Create pod
# kubectl create -f "${yaml_file}"
kubectl create -f "${yaml_file}"
# Check pod creation
#kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Retries
k8s_create_pod_ready "${pod_name}" "${yaml_file}"
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check if OOMKilled
container_name=$(kubectl get pod "$pod_name" -o jsonpath='{.status.containerStatuses[0].name}')

View File

@@ -34,11 +34,10 @@ setup() {
kubectl create configmap "$config_name"
# Create a pod that consumes the "empty-config" and "optional-missing-config" ConfigMaps as volumes
# kubectl create -f "${pod_yaml}"
kubectl create -f "${pod_yaml}"
# Check pod creation
# kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Retry for ready pod
k8s_create_pod_ready "$pod_name" "${pod_yaml}"
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check configmap folders exist
kubectl exec $pod_name -- "${exec_empty_command[@]}"

View File

@@ -51,66 +51,22 @@ setup() {
# Common function for all test cases that expect CreateContainer to be blocked by policy.
test_job_policy_error() {
local max_attempts=5
local attempt_num
local sleep_between_attempts=5
# Initiate job creation
kubectl apply -f "${incorrect_yaml}"
for attempt_num in $(seq 1 "${max_attempts}"); do
info "Starting attempt #${attempt_num}"
# Wait for the job to be created
cmd="kubectl describe job ${job_name} | grep SuccessfulCreate"
info "Waiting for: ${cmd}"
waitForProcess "${wait_time}" "${sleep_time}" "${cmd}" || return 1
# Cleanup possible previous resources
kubectl delete -f "${incorrect_yaml}" --ignore-not-found=true --now --timeout=120s
# List the pods that belong to the job
pod_names=$(kubectl get pods "--selector=job-name=${job_name}" --output=jsonpath='{.items[*].metadata.name}')
info "pod_names: ${pod_names}"
# 1. Apply Job
kubectl apply -f "${incorrect_yaml}"
if [ $? -ne 0 ]; then
warn "Failed to apply Job. Retrying..."
continue
fi
# 2. Wait for Job creation event
cmd="kubectl describe job ${job_name} | grep SuccessfulCreate"
info "Waiting for: ${cmd}"
run waitForProcess "${wait_time}" "${sleep_time}" "${cmd}"
if [ "$status" -ne 0 ]; then
warn "waitForProcess FAILED on attempt #${attempt_num}"
continue
fi
# 3. Get pod list
pod_names=$(kubectl get pods "--selector=job-name=${job_name}" --output=jsonpath='{.items[*].metadata.name}')
info "pod_names: ${pod_names}"
if [ -z "${pod_names}" ]; then
warn "No pods found for job. Retrying..."
continue
fi
# 4. Check each pod for blocked CreateContainerRequest
for pod_name in ${pod_names[@]}; do
info "Checking pod: ${pod_name}"
run wait_for_blocked_request "CreateContainerRequest" "${pod_name}"
if [ "$status" -eq 0 ]; then
info "wait_for_blocked_request succeeded for pod ${pod_name} on attempt #${attempt_num}"
return 0
else
warn "wait_for_blocked_request FAILED for pod ${pod_name} on attempt #${attempt_num}"
# We break pod loop, but the attempt will continue
break
fi
done
# Retry if not last attempt
if [ "${attempt_num}" -lt "${max_attempts}" ]; then
info "Retrying in ${sleep_between_attempts} seconds..."
sleep "${sleep_between_attempts}"
fi
# CreateContainerRequest must have been denied by the policy.
for pod_name in ${pod_names[@]}; do
wait_for_blocked_request "CreateContainerRequest" "${pod_name}" || return 1
done
error "Test failed after ${max_attempts} attempts."
return 1
}
@test "Policy failure: unexpected environment variable" {
@@ -120,8 +76,6 @@ test_job_policy_error() {
"${incorrect_yaml}"
test_job_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
@test "Policy failure: unexpected command line argument" {
@@ -131,8 +85,6 @@ test_job_policy_error() {
"${incorrect_yaml}"
test_job_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
@test "Policy failure: unexpected emptyDir volume" {
@@ -146,8 +98,6 @@ test_job_policy_error() {
"${incorrect_yaml}"
test_job_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
@test "Policy failure: unexpected projected volume" {
@@ -172,8 +122,6 @@ test_job_policy_error() {
' "${incorrect_yaml}"
test_job_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
@test "Policy failure: unexpected readOnlyRootFilesystem" {
@@ -183,8 +131,6 @@ test_job_policy_error() {
"${incorrect_yaml}"
test_job_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
@test "Policy failure: unexpected UID = 222" {
@@ -194,8 +140,6 @@ test_job_policy_error() {
"${incorrect_yaml}"
test_job_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
teardown() {

View File

@@ -130,47 +130,9 @@ create_and_wait_for_pod_ready() {
# Common function for several test cases from this bats script.
test_pod_policy_error() {
local max_attempts=5
local attempt_num
local sleep_between_attempts=5
for attempt_num in $(seq 1 "${max_attempts}"); do
info "Starting attempt #${attempt_num}"
kubectl delete -f "${incorrect_pod_yaml}" --ignore-not-found=true --now --timeout=120s
kubectl delete -f "${correct_configmap_yaml}" --ignore-not-found=true
# Create ConfigMap
kubectl create -f "${correct_configmap_yaml}"
if [ $? -ne 0 ]; then
warn "Failed to create ConfigMap. Retrying..."
continue
fi
# Create the incorrect pod (expected to be blocked)
kubectl create -f "${incorrect_pod_yaml}"
if [ $? -ne 0 ]; then
warn "Failed to create Pod. Retrying..."
continue
fi
# Wait for CreateContainerRequest to be blocked
run wait_for_blocked_request "CreateContainerRequest" "${pod_name}"
if [ "$status" -eq 0 ]; then
info "wait_for_blocked_request succeeded on attempt #${attempt_num}"
return 0
else
warn "wait_for_blocked_request FAILED on attempt #${attempt_num}"
fi
# Retry if not the last attempt
if [ "${attempt_num}" -lt "${max_attempts}" ]; then
info "Retrying in ${sleep_between_attempts} seconds..."
sleep "${sleep_between_attempts}"
fi
done
error "Test failed after ${max_attempts} attempts."
return 1
kubectl create -f "${correct_configmap_yaml}"
kubectl create -f "${incorrect_pod_yaml}"
wait_for_blocked_request "CreateContainerRequest" "${pod_name}"
}
@test "Policy failure: unexpected container image" {
@@ -181,8 +143,6 @@ test_pod_policy_error() {
"${incorrect_pod_yaml}"
test_pod_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
@test "Policy failure: unexpected privileged security context" {
@@ -192,8 +152,6 @@ test_pod_policy_error() {
"${incorrect_pod_yaml}"
test_pod_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
@test "Policy failure: unexpected terminationMessagePath" {
@@ -203,8 +161,6 @@ test_pod_policy_error() {
"${incorrect_pod_yaml}"
test_pod_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
@test "Policy failure: unexpected hostPath volume mount" {
@@ -218,8 +174,6 @@ test_pod_policy_error() {
"${incorrect_pod_yaml}"
test_pod_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
@test "Policy failure: unexpected config map" {
@@ -311,8 +265,6 @@ test_pod_policy_error() {
"${incorrect_pod_yaml}"
test_pod_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
@test "Policy failure: unexpected UID = 1234" {
@@ -324,8 +276,6 @@ test_pod_policy_error() {
"${incorrect_pod_yaml}"
test_pod_policy_error
test_result=$?
[ "${test_result}" -eq 0 ]
}
teardown() {

View File

@@ -22,7 +22,11 @@ setup() {
# Save some time by executing genpolicy a single time.
if [ "${BATS_TEST_NUMBER}" == "1" ]; then
# Create the correct yaml file
set_nginx_image "${pod_config_dir}/k8s-policy-rc.yaml" "${correct_yaml}"
nginx_version="${docker_images_nginx_version}"
nginx_image="nginx:$nginx_version"
sed -e "s/\${nginx_version}/${nginx_image}/" \
"${pod_config_dir}/k8s-policy-rc.yaml" > "${correct_yaml}"
# Add policy to the correct yaml file
policy_settings_dir="$(create_tmp_policy_settings_dir "${pod_config_dir}")"

View File

@@ -9,11 +9,15 @@ load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
nginx_version="${docker_images_nginx_version}"
nginx_image="nginx:$nginx_version"
get_pod_config_dir
# Create yaml
test_yaml="${pod_config_dir}/test-replication-controller.yaml"
set_nginx_image "${pod_config_dir}/replication-controller.yaml" "${test_yaml}"
sed -e "s/\${nginx_version}/${nginx_image}/" \
"${pod_config_dir}/replication-controller.yaml" > "${test_yaml}"
# Add policy to the yaml file
policy_settings_dir="$(create_tmp_policy_settings_dir "${pod_config_dir}")"

View File

@@ -9,13 +9,16 @@ load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
nginx_version="${docker_images_nginx_version}"
nginx_image="nginx:$nginx_version"
replicas="3"
deployment="nginx-deployment"
get_pod_config_dir
# Create the yaml file
test_yaml="${pod_config_dir}/test-${deployment}.yaml"
set_nginx_image "${pod_config_dir}/${deployment}.yaml" "${test_yaml}"
sed -e "s/\${nginx_version}/${nginx_image}/" \
"${pod_config_dir}/${deployment}.yaml" > "${test_yaml}"
# Add policy to the yaml file
policy_settings_dir="$(create_tmp_policy_settings_dir "${pod_config_dir}")"
@@ -24,11 +27,8 @@ setup() {
}
@test "Scale nginx deployment" {
# kubectl create -f "${test_yaml}"
# kubectl wait --for=condition=Available --timeout=$timeout deployment/${deployment}
# Retries
k8s_create_deployment_ready "${test_yaml}" ${deployment}
kubectl create -f "${test_yaml}"
kubectl wait --for=condition=Available --timeout=$timeout deployment/${deployment}
kubectl expose deployment/${deployment}
kubectl scale deployment/${deployment} --replicas=${replicas}
cmd="kubectl get deployment/${deployment} -o yaml | grep 'availableReplicas: ${replicas}'"

View File

@@ -21,7 +21,7 @@ spec:
runtimeClassName: kata
containers:
- name: nginxtest
image: ${NGINX_IMAGE}
image: quay.io/fidencio/${nginx_version}
ports:
- containerPort: 80
volumeMounts:
@@ -43,4 +43,4 @@ spec:
path: /tmp/results
- name: hostpath-vol-read-only
hostPath:
path: /tmp/results-read-only
path: /tmp/results-read-only

View File

@@ -13,8 +13,7 @@ spec:
runtimeClassName: kata
containers:
- name: handlers-container
image: ${NGINX_IMAGE}
imagePullPolicy: Always
image: quay.io/fidencio/${nginx_version}
lifecycle:
postStart:
exec:

View File

@@ -21,7 +21,6 @@ spec:
runtimeClassName: kata
containers:
- name: nginx
image: ${NGINX_IMAGE}
imagePullPolicy: Always
image: quay.io/fidencio/${nginx_version}
ports:
- containerPort: 80

View File

@@ -21,6 +21,6 @@ spec:
runtimeClassName: kata
containers:
- name: nginxtest
image: ${NGINX_IMAGE}
image: quay.io/fidencio/${nginx_version}
ports:
- containerPort: 80

View File

@@ -11,6 +11,7 @@
# This contains variables and functions common to all e2e tests.
# Variables used by the kubernetes tests
export docker_images_nginx_version="1.15-alpine"
export container_images_agnhost_name="registry.k8s.io/e2e-test-images/agnhost"
export container_images_agnhost_version="2.21"
@@ -21,7 +22,7 @@ export sleep_time=3
# Timeout for use with `kubectl wait`, unless it needs to wait longer.
# Note: try to keep timeout and wait_time equal.
export timeout=90s
export timeout=300s
# issues that can't test yet.
export fc_limitations="https://github.com/kata-containers/documentation/issues/351"
@@ -420,80 +421,6 @@ wait_for_blocked_request() {
waitForProcess "${wait_time}" "${sleep_time}" "${command}" >/dev/null 2>/dev/null
}
# k8s create a ready pod
k8s_create_pod_ready() {
local pod_name="$1"
local pod_yaml="$2"
local wait_time="${3:-300}"
local max_attempts="${4:-5}"
local attempt_num
for attempt_num in $(seq 1 "${max_attempts}"); do
# First,forcefully deleting resources
kubectl delete -f "${pod_yaml}" --ignore-not-found=true --now --timeout=$timeout
kubectl create -f "${pod_yaml}"
if [ $? -ne 0 ]; then
# Failed to create Pod.Aborting test.
continue
fi
# Check pod creation
run kubectl wait --for=condition=Ready --timeout="${wait_time}s" pod "${pod_name}"
if [ "$status" -eq 0 ]; then
# Test Succeeded on attempt #${attempt_num}
info "Waiting ${wait_time} seconds for pod ${pod_name} Ready."
return 0
fi
# Retry
if [ "${attempt_num}" -lt "${max_attempts}" ]; then
info "Waiting for 5 seconds before next attempt..."
sleep 5
fi
done
info "Test Failed after ${max_attempts} attempts for pod ${pod_name}."
return 1
}
k8s_create_deployment_ready() {
local deployment_yaml="$1"
local deployment="$2"
local wait_time=300
local max_attempts=5
local attempt_num
for attempt_num in $(seq 1 "${max_attempts}"); do
# First,forcefully deleting resources
kubectl delete -f "${deployment_yaml}" --ignore-not-found=true --now --timeout=$timeout
kubectl create -f "${deployment_yaml}"
if [ $? -ne 0 ]; then
# Failed to create Pod.Aborting test.
continue
fi
# Check deployment ready
run kubectl wait --for=condition=Available --timeout="${wait_time}s" deployment/${deployment}
if [ "$status" -eq 0 ]; then
# Test Succeeded on attempt #${attempt_num}
return 0
fi
# Retry
if [ "${attempt_num}" -lt "${max_attempts}" ]; then
info "Waiting for 5 seconds before next attempt..."
sleep 5
fi
done
#Test Failed after ${max_attempts} attempts.
return 1
}
# Execute in a pod a command that is allowed by policy.
pod_exec_allowed_command() {
local -r pod_name="$1"
@@ -630,15 +557,3 @@ container_exec_with_retries() {
echo "${cmd_out}"
}
set_nginx_image() {
input_yaml=$1
output_yaml=$2
ensure_yq
nginx_registry=$(get_from_kata_deps ".docker_images.nginx.registry")
nginx_digest=$(get_from_kata_deps ".docker_images.nginx.digest")
nginx_image="${nginx_registry}@${nginx_digest}"
NGINX_IMAGE="${nginx_image}" envsubst < "${input_yaml}" > "${output_yaml}"
}

View File

@@ -79,52 +79,6 @@ function config_kata() {
}
function config_containerd() {
# store pure version number extracted from config
local version_num=""
# store the raw line containing "version = ..."
local version_line=""
# 1) Check if containerd command is available in PATH
if ! command -v containerd >/dev/null 2>&1; then
echo "[ERROR] containerd command not found"
return
fi
# 2) Dump containerd configuration and look for the "version = ..."
# We use awk to match lines starting with "version = X", allowing leading spaces
# The 'exit' ensures we stop at the first match
version_line=$(containerd config dump 2>/dev/null | \
awk '/^[[:space:]]*version[[:space:]]*=/ {print; exit}')
# 3) If no "version = X" line is found, return
if [ -z "$version_line" ]; then
echo "[ERROR] Cannot find version key in containerd config, defaulting to v1 config"
return
fi
# 4) Extract the numeric version from the matched line
# - Remove leading/trailing spaces around the value
# - Remove surrounding double quotes if any
version_num=$(echo "$version_line" | awk -F'=' '
{
gsub(/^[[:space:]]+|[[:space:]]+$/, "", $2) # trim spaces
gsub(/"/, "", $2) # remove double quotes
print $2
}')
# 5) Validate that the extracted value is strictly numeric
# If not numeric, fall back to v1 configuration
if ! echo "$version_num" | grep -Eq '^[0-9]+$'; then
echo "[ERROR] Invalid version format: \"$version_num\". Defaulting to v1 config"
return
fi
# 6) Based on version number, run the appropriate configuration function
echo "[INFO] Running config for containerd version $version_num"
config_containerd_core
}
function config_containerd_core() {
readonly runc_path=$(command -v runc)
sudo mkdir -p /etc/containerd/
if [ -f "$containerd_config" ]; then
@@ -135,26 +89,27 @@ function config_containerd_core() {
fi
cat <<EOF | sudo tee $containerd_config
[debug]
level = "debug"
[proxy_plugins]
[proxy_plugins.nydus]
type = "snapshot"
address = "/run/containerd-nydus/containerd-nydus-grpc.sock"
[plugins]
[plugins.'io.containerd.cri.v1.images']
snapshotter = 'nydus'
disable_snapshot_annotations = false
discard_unpacked_layers = false
[plugins.'io.containerd.cri.v1.runtime']
[plugins.'io.containerd.cri.v1.runtime'.containerd]
[plugins.'io.containerd.cri.v1.runtime'.containerd.runtimes]
[plugins.'io.containerd.cri.v1.runtime'.containerd.runtimes.kata-${KATA_HYPERVISOR}]
runtime_type = "io.containerd.kata-${KATA_HYPERVISOR}.v2"
sandboxer = 'podsandbox'
[plugins.'io.containerd.cri.v1.runtime'.containerd.runtimes.runc]
runtime_type = 'io.containerd.runc.v2'
sandboxer = 'podsandbox'
[plugins.'io.containerd.cri.v1.runtime'.containerd.runtimes.runc.options]
BinaryName = "${runc_path}"
[plugins.cri]
disable_hugetlb_controller = false
[plugins.cri.containerd]
snapshotter = "nydus"
disable_snapshot_annotations = false
[plugins.cri.containerd.runtimes]
[plugins.cri.containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins.cri.containerd.runtimes.runc.options]
BinaryName = "${runc_path}"
Root = ""
[plugins.cri.containerd.runtimes.kata-${KATA_HYPERVISOR}]
runtime_type = "io.containerd.kata-${KATA_HYPERVISOR}.v2"
privileged_without_host_devices = true
EOF
}

View File

@@ -173,9 +173,8 @@ function init() {
fi
versions_file="${cidir}/../../versions.yaml"
nginx_registry=$("${GOPATH}/bin/yq" ".docker_images.nginx.registry" "${versions_file}")
nginx_digest=$("${GOPATH}/bin/yq" ".docker_images.nginx.digest" "${versions_file}")
nginx_image="${nginx_registry}@${nginx_digest}"
nginx_version=$("${GOPATH}/bin/yq" ".docker_images.nginx.version" "$versions_file")
nginx_image="docker.io/library/nginx:$nginx_version"
# Pull nginx image
sudo "${CTR_EXE}" image pull "${nginx_image}"

View File

@@ -208,13 +208,6 @@ check_rootfs() {
# check agent or systemd
case "${AGENT_INIT}" in
"no")
# Check if we have alternative init systems installed
# For now check if we have NVRC
if readlink "${rootfs}/sbin/init" | grep -q "NVRC"; then
OK "init is NVRC"
return 0
fi
for systemd_path in $candidate_systemd_paths; do
systemd="${rootfs}${systemd_path}"
if [ -x "${systemd}" ] || [ -L "${systemd}" ]; then

View File

@@ -1187,13 +1187,6 @@ install_tools_helper() {
[ ${tool} = "trace-forwarder" ] && tool_binary="kata-trace-forwarder"
binary=$(find ${repo_root_dir}/src/tools/${tool}/ -type f -name ${tool_binary})
binary_count=$(echo "${binary}" | grep -c '^' || echo "0")
if [[ "${binary_count}" -eq 0 ]]; then
die "No binary found for ${tool} (expected: ${tool_binary})."
elif [[ "${binary_count}" -gt 1 ]]; then
die "Multiple binaries found for ${tool} (expected single ${tool_binary}). Found:"$'\n'"${binary}"
fi
if [[ "${tool}" == "genpolicy" ]]; then
defaults_path="${destdir}/opt/kata/share/defaults/kata-containers"
mkdir -p "${defaults_path}"

View File

@@ -529,13 +529,11 @@ function adjust_qemu_cmdline() {
# ${dest_dir}/opt/kata/share/kata-qemu/qemu
# ${dest_dir}/opt/kata/share/kata-qemu-snp-experimnental/qemu
# ${dest_dir}/opt/kata/share/kata-qemu-cca-experimental/qemu
[[ "${shim}" == "qemu-nvidia-gpu-snp" ]] && qemu_share=qemu-snp-experimental
[[ "${shim}" == "qemu-nvidia-gpu-tdx" ]] && qemu_share=qemu-tdx-experimental
[[ "${shim}" == "qemu-cca" ]] && qemu_share=qemu-cca-experimental
[[ "${shim}" =~ ^(qemu-nvidia-gpu-snp|qemu-nvidia-gpu-tdx|qemu-cca)$ ]] && qemu_share=${shim}-experimental
# Both qemu and qemu-coco-dev use exactly the same QEMU, so we can adjust
# the shim on the qemu-coco-dev case to qemu
[[ "${shim}" =~ ^(qemu|qemu-runtime-rs|qemu-snp|qemu-se|qemu-se-runtime-rs|qemu-coco-dev|qemu-coco-dev-runtime-rs|qemu-nvidia-gpu)$ ]] && qemu_share="qemu"
[[ "${shim}" =~ ^(qemu|qemu-coco-dev)$ ]] && qemu_share="qemu"
qemu_binary=$(tomlq '.hypervisor.qemu.path' ${config_path} | tr -d \")
qemu_binary_script="${qemu_binary}-installation-prefix"
@@ -854,7 +852,7 @@ function install_artifacts() {
sed -i -e "s|${default_dest_dir}|${dest_dir}|g" "${kata_config_file}"
# Let's only adjust qemu_cmdline for the QEMUs that we build and ship ourselves
[[ "${shim}" =~ ^(qemu|qemu-runtime-rs|qemu-snp|qemu-nvidia-gpu|qemu-nvidia-gpu-snp|qemu-nvidia-gpu-tdx|qemu-se|qemu-se-runtime-rs|qemu-coco-dev|qemu-coco-dev-runtime-rs|qemu-cca)$ ]] && \
[[ "${shim}" =~ ^(qemu|qemu-snp|qemu-nvidia-gpu|qemu-nvidia-gpu-snp|qemu-nvidia-gpu-tdx|qemu-se|qemu-coco-dev|qemu-cca)$ ]] && \
adjust_qemu_cmdline "${shim}" "${kata_config_file}"
fi
fi

View File

@@ -1,2 +0,0 @@
# NUMA setttings
CONFIG_NUMA=y

View File

@@ -1,4 +0,0 @@
# NUMA setttings
CONFIG_NUMA=y
CONFIG_X86_64_ACPI_NUMA=y

View File

@@ -1 +1 @@
173
172

View File

@@ -123,13 +123,13 @@ check_tag() {
local tag="$1"
local entry="$2"
[[ -z "$tag" ]] && die "no tag for entry '$entry'"
[[ -z "$entry" ]] && die "no entry for tag '$tag'"
[ -z "$tag" ] && die "no tag for entry '$entry'"
[ -z "$entry" ] && die "no entry for tag '$tag'"
value="${recognised_tags[$tag]}"
# each tag MUST have a description
[[ -n "$value" ]] && return
[ -n "$value" ] && return
die "invalid tag '$tag' found for entry '$entry'"
}
@@ -138,8 +138,8 @@ check_tags() {
local tags="$1"
local entry="$2"
[[ -z "$tags" ]] && die "entry '$entry' doesn't have any tags"
[[ -z "$entry" ]] && die "no entry for tags '$tags'"
[ -z "$tags" ] && die "entry '$entry' doesn't have any tags"
[ -z "$entry" ] && die "no entry for tags '$tags'"
tags=$(echo "$tags" | tr ',' '\n')
@@ -173,22 +173,22 @@ show_array() {
local suffix
local one_line="no"
[[ "$action" = "dump" ]] && show_tags_header
[ "$action" = "dump" ] && show_tags_header
for entry in "${_array[@]}"; do
[[ -z "$entry" ]] && die "found empty entry"
[ -z "$entry" ] && die "found empty entry"
tags=$(echo "$entry" | cut -s -d: -f1)
elem=$(echo "$entry" | cut -s -d: -f2-)
[[ -z "$elem" ]] && die "no option for entry '$entry'"
[ -z "$elem" ] && die "no option for entry '$entry'"
check_tags "$tags" "$entry"
if [[ "$action" = "dump" ]]; then
if [ "$action" = "dump" ]; then
printf "%s\t\t%s\n" "$tags" "$elem"
elif [[ "$action" = "multi" ]]; then
if [[ $i -eq $size ]]; then
elif [ "$action" = "multi" ]; then
if [ $i -eq $size ]; then
suffix=""
else
suffix=' \'
@@ -203,14 +203,14 @@ show_array() {
i+=1
done
[[ "$one_line" = yes ]] && echo
[ "$one_line" = yes ] && echo
}
generate_qemu_options() {
#---------------------------------------------------------------------
#check if cross-compile is needed
host=$(uname -m)
if [[ "$arch" != "$host" ]]; then
if [ $arch != $host ];then
case $arch in
aarch64) qemu_options+=(size:--cross-prefix=aarch64-linux-gnu-);;
ppc64le) qemu_options+=(size:--cross-prefix=powerpc64le-linux-gnu-);;
@@ -279,7 +279,7 @@ generate_qemu_options() {
s390x) qemu_options+=(size:--disable-tcg) ;;
esac
if [[ "${static}" == "true" ]]; then
if [ "${static}" == "true" ]; then
qemu_options+=(misc:--static)
fi
@@ -416,7 +416,7 @@ generate_qemu_options() {
# Building static binaries for aarch64 requires disabling PIE
# We get an GOT overflow and the OS libraries are only build with fpic
# and not with fPIC which enables unlimited sized GOT tables.
if [[ "${static}" == "true" ]] && [[ "${arch}" == "aarch64" ]]; then
if [ "${static}" == "true" ] && [ "${arch}" == "aarch64" ]; then
qemu_options+=(arch:"--disable-pie")
fi
@@ -435,10 +435,7 @@ generate_qemu_options() {
qemu_options+=(size:--enable-linux-io-uring)
# Support Ceph RADOS Block Device (RBD)
[[ -z "${static}" ]] && qemu_options+=(functionality:--enable-rbd)
# Support NUMA topology
qemu_options+=(functionality:--enable-numa)
[ -z "${static}" ] && qemu_options+=(functionality:--enable-rbd)
# In "passthrough" security mode
# (-fsdev "...,security_model=passthrough,..."), qemu uses a helper
@@ -478,7 +475,7 @@ generate_qemu_options() {
# Other options
# 64-bit only
if [[ "${arch}" = "ppc64le" ]]; then
if [ "${arch}" = "ppc64le" ]; then
qemu_options+=(arch:"--target-list=ppc64-softmmu")
else
qemu_options+=(arch:"--target-list=${arch}-softmmu")
@@ -487,7 +484,7 @@ generate_qemu_options() {
# SECURITY: Create binary as a Position Independant Executable,
# and take advantage of ASLR, making ROP attacks much harder to perform.
# (https://wiki.debian.org/Hardening)
[[ -z "${static}" ]] && qemu_options+=(arch:"--enable-pie")
[ -z "${static}" ] && qemu_options+=(arch:"--enable-pie")
_qemu_cflags=""
@@ -571,17 +568,17 @@ main() {
shift $((OPTIND - 1))
[[ -z "$1" ]] && die "need hypervisor name"
[ -z "$1" ] && die "need hypervisor name"
hypervisor="$1"
local qemu_version_file="VERSION"
[[ -f ${qemu_version_file} ]] || die "QEMU version file '$qemu_version_file' not found"
[ -f ${qemu_version_file} ] || die "QEMU version file '$qemu_version_file' not found"
# Remove any pre-release identifier so that it returns the version on
# major.minor.patch format (e.g 5.2.0-rc4 becomes 5.2.0)
qemu_version="$(awk 'BEGIN {FS = "-"} {print $1}' ${qemu_version_file})"
[[ -n "${qemu_version}" ]] ||
[ -n "${qemu_version}" ] ||
die "cannot determine qemu version from file $qemu_version_file"
if ! gt_eq "${qemu_version}" "6.1.0" ; then
@@ -589,7 +586,7 @@ main() {
fi
local gcc_version_major=$(gcc -dumpversion | cut -f1 -d.)
[[ -n "${gcc_version_major}" ]] ||
[ -n "${gcc_version_major}" ] ||
die "cannot determine gcc major version, please ensure it is installed"
# -dumpversion only returns the major version since GCC 7.0
if gt_eq "${gcc_version_major}" "7.0.0" ; then
@@ -597,7 +594,7 @@ main() {
else
local gcc_version_minor=$(gcc -dumpversion | cut -f2 -d.)
fi
[[ -n "${gcc_version_minor}" ]] ||
[ -n "${gcc_version_minor}" ] ||
die "cannot determine gcc minor version, please ensure it is installed"
local gcc_version="${gcc_version_major}.${gcc_version_minor}"

View File

@@ -50,7 +50,6 @@ RUN apt-get update && apt-get upgrade -y && \
libglib2.0-dev${DPKG_ARCH} git \
libltdl-dev${DPKG_ARCH} \
libmount-dev${DPKG_ARCH} \
libnuma-dev${DPKG_ARCH} \
libpixman-1-dev${DPKG_ARCH} \
libselinux1-dev${DPKG_ARCH} \
libtool${DPKG_ARCH} \

View File

@@ -8,37 +8,30 @@ set -o errexit
set -o nounset
set -o pipefail
CROSS_BUILD="${CROSS_BUILD:-false}"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly qemu_builder="${script_dir}/build-qemu.sh"
# shellcheck source=/dev/null
source "${script_dir}/../../scripts/lib.sh"
# shellcheck source=/dev/null
source "${script_dir}/../qemu.blacklist"
# Ensure repo_root_dir is available
repo_root_dir="${repo_root_dir:-$(git rev-parse --show-toplevel 2>/dev/null || echo "${script_dir}/../../../..")}"
ARCH=${ARCH:-$(uname -m)}
dpkg_arch=":${ARCH}"
[[ "${dpkg_arch}" == ":aarch64" ]] && dpkg_arch=":arm64"
[[ "${dpkg_arch}" == ":x86_64" ]] && dpkg_arch=""
[[ "${dpkg_arch}" == ":ppc64le" ]] && dpkg_arch=":ppc64el"
[ ${dpkg_arch} == ":aarch64" ] && dpkg_arch=":arm64"
[ ${dpkg_arch} == ":x86_64" ] && dpkg_arch=""
[ "${dpkg_arch}" == ":ppc64le" ] && dpkg_arch=":ppc64el"
packaging_dir="${script_dir}/../.."
qemu_destdir="/tmp/qemu-static/"
container_engine="${USE_PODMAN:+podman}"
container_engine="${container_engine:-docker}"
qemu_repo="${qemu_repo:-${1:-}}"
qemu_version="${qemu_version:-${2:-}}"
qemu_repo="${qemu_repo:-$1}"
qemu_version="${qemu_version:-$2}"
build_suffix="${3:-}"
qemu_tar="${4:-}"
[[ -n "${qemu_repo}" ]] || die "qemu repo not provided"
[[ -n "${qemu_version}" ]] || die "qemu version not provided"
[ -n "$qemu_repo" ] || die "qemu repo not provided"
[ -n "$qemu_version" ] || die "qemu version not provided"
info "Build ${qemu_repo} version: ${qemu_version}"
@@ -48,13 +41,13 @@ prefix="${prefix:-"/opt/kata"}"
CACHE_TIMEOUT=$(date +"%Y-%m-%d")
[[ -n "${build_suffix}" ]] && HYPERVISOR_NAME="kata-qemu-${build_suffix}" || HYPERVISOR_NAME="kata-qemu"
[[ -n "${build_suffix}" ]] && PKGVERSION="kata-static-${build_suffix}" || PKGVERSION="kata-static"
[ -n "${build_suffix}" ] && HYPERVISOR_NAME="kata-qemu-${build_suffix}" || HYPERVISOR_NAME="kata-qemu"
[ -n "${build_suffix}" ] && PKGVERSION="kata-static-${build_suffix}" || PKGVERSION="kata-static"
container_image="${QEMU_CONTAINER_BUILDER:-$(get_qemu_image_name)}"
[[ "${CROSS_BUILD}" == "true" ]] && container_image="${container_image}-cross-build"
[ "${CROSS_BUILD}" == "true" ] && container_image="${container_image}-cross-build"
"${container_engine}" pull "${container_image}" || ("${container_engine}" build \
${container_engine} pull ${container_image} || ("${container_engine}" build \
--build-arg CACHE_TIMEOUT="${CACHE_TIMEOUT}" \
--build-arg http_proxy="${http_proxy}" \
--build-arg https_proxy="${https_proxy}" \

View File

@@ -8,15 +8,6 @@ set -o errexit
set -o nounset
set -o pipefail
# Environment variables passed from container
QEMU_REPO="${QEMU_REPO:-}"
QEMU_VERSION_NUM="${QEMU_VERSION_NUM:-}"
HYPERVISOR_NAME="${HYPERVISOR_NAME:-}"
PKGVERSION="${PKGVERSION:-}"
PREFIX="${PREFIX:-}"
QEMU_DESTDIR="${QEMU_DESTDIR:-}"
QEMU_TARBALL="${QEMU_TARBALL:-}"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
kata_packaging_dir="${script_dir}/../.."
@@ -31,15 +22,15 @@ git clone --depth=1 "${QEMU_REPO}" qemu
pushd qemu
git fetch --depth=1 origin "${QEMU_VERSION_NUM}"
git checkout FETCH_HEAD
${kata_packaging_scripts}/patch_qemu.sh "${QEMU_VERSION_NUM}" "${kata_packaging_dir}/qemu/patches"
scripts/git-submodule.sh update meson capstone
"${kata_packaging_scripts}/patch_qemu.sh" "${QEMU_VERSION_NUM}" "${kata_packaging_dir}/qemu/patches"
if [[ "$(uname -m)" != "${ARCH}" ]] && [[ "${ARCH}" == "s390x" ]]; then
PREFIX="${PREFIX}" "${kata_packaging_scripts}/configure-hypervisor.sh" -s "${HYPERVISOR_NAME}" "${ARCH}" | xargs ./configure --with-pkgversion="${PKGVERSION}" --cc=s390x-linux-gnu-gcc --cross-prefix=s390x-linux-gnu- --prefix="${PREFIX}" --target-list=s390x-softmmu
if [ "$(uname -m)" != "${ARCH}" ] && [ "${ARCH}" == "s390x" ]; then
PREFIX="${PREFIX}" ${kata_packaging_scripts}/configure-hypervisor.sh -s "${HYPERVISOR_NAME}" "${ARCH}" | xargs ./configure --with-pkgversion="${PKGVERSION}" --cc=s390x-linux-gnu-gcc --cross-prefix=s390x-linux-gnu- --prefix="${PREFIX}" --target-list=s390x-softmmu
else
PREFIX="${PREFIX}" "${kata_packaging_scripts}/configure-hypervisor.sh" -s "${HYPERVISOR_NAME}" "${ARCH}" | xargs ./configure --with-pkgversion="${PKGVERSION}"
PREFIX="${PREFIX}" ${kata_packaging_scripts}/configure-hypervisor.sh -s "${HYPERVISOR_NAME}" "${ARCH}" | xargs ./configure --with-pkgversion="${PKGVERSION}"
fi
make -j"$(nproc --ignore=1)"
make -j"$(nproc +--ignore 1)"
make install DESTDIR="${QEMU_DESTDIR}"
popd
"${kata_static_build_scripts}/qemu-build-post.sh"
${kata_static_build_scripts}/qemu-build-post.sh
mv "${QEMU_DESTDIR}/${QEMU_TARBALL}" /share/

View File

@@ -85,7 +85,7 @@ mapping:
- Kata Containers CI / kata-containers-ci-on-push / run-k8s-tests-on-aks / run-k8s-tests (ubuntu, qemu, small)
- Kata Containers CI / kata-containers-ci-on-push / run-k8s-tests-on-zvsi / run-k8s-tests (devmapper, qemu, kubeadm)
- Kata Containers CI / kata-containers-ci-on-push / run-k8s-tests-on-zvsi / run-k8s-tests (nydus, qemu-coco-dev, kubeadm)
# - Kata Containers CI / kata-containers-ci-on-push / run-kata-coco-tests / run-k8s-tests-on-tee (sev-snp, qemu-snp)
- Kata Containers CI / kata-containers-ci-on-push / run-kata-coco-tests / run-k8s-tests-on-tee (sev-snp, qemu-snp)
- Kata Containers CI / kata-containers-ci-on-push / run-kata-coco-tests / run-k8s-tests-coco-nontee (qemu-coco-dev, nydus, guest-pull)
- Kata Containers CI / kata-containers-ci-on-push / run-kata-deploy-tests / run-kata-deploy-tests (qemu, k0s)
- Kata Containers CI / kata-containers-ci-on-push / run-kata-deploy-tests / run-kata-deploy-tests (qemu, k3s)
@@ -161,35 +161,36 @@ mapping:
- Static checks / check-kernel-config-version
- Static checks / static-checks (make static-checks)
# static-checks-self-hosted.yaml
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make check, agent, src/agent, rust, libdevmapper, libseccomp, protobuf-compiler, clang, ub...
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make check, dragonball, src/dragonball, rust, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make check, genpolicy, src/tools/genpolicy, rust, protobuf-compiler, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make check, kata-ctl, src/tools/kata-ctl, rust, protobuf-compiler, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make check, runtime-rs, src/runtime-rs, rust, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make check, runtime, src/runtime, golang, XDG_RUNTIME_DIR, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make check, trace-forwarder, src/tools/trace-forwarder, rust, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make check, agent-ctl, src/tools/agent-ctl, rust, protobuf-compiler, clang, ubuntu-24.04-s...
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make test, agent, src/agent, rust, libdevmapper, libseccomp, protobuf-compiler, clang, ubu...
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make test, dragonball, src/dragonball, rust, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make test, genpolicy, src/tools/genpolicy, rust, protobuf-compiler, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make test, kata-ctl, src/tools/kata-ctl, rust, protobuf-compiler, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make test, runtime-rs, src/runtime-rs, rust, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make test, runtime, src/runtime, golang, XDG_RUNTIME_DIR, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make test, trace-forwarder, src/tools/trace-forwarder, rust, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make vendor, agent-ctl, src/tools/agent-ctl, rust, protobuf-compiler, clang, ubuntu-24.04-...
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make vendor, agent, src/agent, rust, libdevmapper, libseccomp, protobuf-compiler, clang, u...
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make vendor, dragonball, src/dragonball, rust, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make vendor, genpolicy, src/tools/genpolicy, rust, protobuf-compiler, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make vendor, kata-ctl, src/tools/kata-ctl, rust, protobuf-compiler, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make vendor, runtime-rs, src/runtime-rs, rust, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make vendor, runtime, src/runtime, golang, XDG_RUNTIME_DIR, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (make vendor, trace-forwarder, src/tools/trace-forwarder, rust, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (sudo -E PATH="$PATH" make test, agent-ctl, src/tools/agent-ctl, rust, protobuf-compiler, c...
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (sudo -E PATH="$PATH" make test, dragonball, src/dragonball, rust, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (sudo -E PATH="$PATH" make test, genpolicy, src/tools/genpolicy, rust, protobuf-compiler, u...
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (sudo -E PATH="$PATH" make test, kata-ctl, src/tools/kata-ctl, rust, protobuf-compiler, ubu...
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (sudo -E PATH="$PATH" make test, runtime-rs, src/runtime-rs, rust, ubuntu-24.04-s390x)
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (sudo -E PATH="$PATH" make test, runtime, src/runtime, golang, XDG_RUNTIME_DIR, ubuntu-24.0...
- Static checks self-hosted / build-checks (ubuntu-24.04-s390x) / check (sudo -E PATH="$PATH" make test, trace-forwarder, src/tools/trace-forwarder, rust, ubuntu-2...
- Static checks self-hosted / build-checks (s390x) / check (make check, agent-ctl, src/tools/agent-ctl, rust, protobuf-compiler, clang, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make check, agent, src/agent, rust, libdevmapper, libseccomp, protobuf-compiler, clang, s3...
- Static checks self-hosted / build-checks (s390x) / check (make check, dragonball, src/dragonball, rust, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make check, genpolicy, src/tools/genpolicy, rust, protobuf-compiler, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make check, kata-ctl, src/tools/kata-ctl, rust, protobuf-compiler, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make check, runtime-rs, src/runtime-rs, rust, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make check, runtime, src/runtime, golang, XDG_RUNTIME_DIR, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make check, trace-forwarder, src/tools/trace-forwarder, rust, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make test, agent-ctl, src/tools/agent-ctl, rust, protobuf-compiler, clang, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make test, agent, src/agent, rust, libdevmapper, libseccomp, protobuf-compiler, clang, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make test, dragonball, src/dragonball, rust, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make test, genpolicy, src/tools/genpolicy, rust, protobuf-compiler, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make test, kata-ctl, src/tools/kata-ctl, rust, protobuf-compiler, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make test, runtime-rs, src/runtime-rs, rust, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make test, runtime, src/runtime, golang, XDG_RUNTIME_DIR, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make test, trace-forwarder, src/tools/trace-forwarder, rust, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make vendor, agent-ctl, src/tools/agent-ctl, rust, protobuf-compiler, clang, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make vendor, agent, src/agent, rust, libdevmapper, libseccomp, protobuf-compiler, clang, s...
- Static checks self-hosted / build-checks (s390x) / check (make vendor, dragonball, src/dragonball, rust, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make vendor, genpolicy, src/tools/genpolicy, rust, protobuf-compiler, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make vendor, kata-ctl, src/tools/kata-ctl, rust, protobuf-compiler, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make vendor, runtime-rs, src/runtime-rs, rust, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make vendor, runtime, src/runtime, golang, XDG_RUNTIME_DIR, s390x)
- Static checks self-hosted / build-checks (s390x) / check (make vendor, trace-forwarder, src/tools/trace-forwarder, rust, s390x)
- Static checks self-hosted / build-checks (s390x) / check (sudo -E PATH="$PATH" make test, agent-ctl, src/tools/agent-ctl, rust, protobuf-compiler, c...
- Static checks self-hosted / build-checks (s390x) / check (sudo -E PATH="$PATH" make test, dragonball, src/dragonball, rust, s390x)
- Static checks self-hosted / build-checks (s390x) / check (sudo -E PATH="$PATH" make test, genpolicy, src/tools/genpolicy, rust, protobuf-compiler, s...
- Static checks self-hosted / build-checks (s390x) / check (sudo -E PATH="$PATH" make test, kata-ctl, src/tools/kata-ctl, rust, protobuf-compiler, s390x)
- Static checks self-hosted / build-checks (s390x) / check (sudo -E PATH="$PATH" make test, runtime-rs, src/runtime-rs, rust, s390x)
- Static checks self-hosted / build-checks (s390x) / check (sudo -E PATH="$PATH" make test, runtime, src/runtime, golang, XDG_RUNTIME_DIR, s390x)
- Static checks self-hosted / build-checks (s390x) / check (sudo -E PATH="$PATH" make test, trace-forwarder, src/tools/trace-forwarder, rust, s390x)
required-labels:
- ok-to-test

View File

@@ -475,10 +475,9 @@ plugins:
version: "b7f6d3e0679796e907ecca88cfab0e32e326850d"
docker_images:
description: "Docker images used for testing"
description: "Docker hub images used for testing"
nginx:
description: "Proxy server for HTTP, HTTPS, SMTP, POP3 and IMAP protocols"
registry: "quay.io/kata-containers/nginx"
# yamllint disable-line rule:line-length
digest: "sha256:a905609e0f9adc2607f06da2f76893c6da07caa396c41f2806fee162064cfb4b" # 1.15-alpine
url: "https://hub.docker.com/_/nginx/"
version: "1.15-alpine"