tests: Move k8s tests to this repo

The first part of simplifying things to have all our tests using GitHub
actions is moving the k8s tests to this repo, as those will be the first
vict^W targets to be migrated to GitHub actions.

Those tests have been slightly adapted, mainly related to what they load
/ import, so they are more self-contained and do not require us bringing
a lot of scripts from the tests repo here.

A few scripts were also dropped along the way, as we no longer plan to
deploy kubernetes as part of every single run, but rather assume there
will always be k8s running whenever we land to run those tests.

It's important to mention that a few tests were not added here:

* k8s-block-volume:
* k8s-file-volume:
* k8s-volume:
* k8s-ro-volume:
  These tests depend on some sort of volume being created on the
  kubernetes node where the test will run, and this won't fly as the
  tests will run from a GitHub runner, targetting a different machine
  where kubernetes will be running.
  * https://github.com/kata-containers/kata-containers/issues/6566

* k8s-hugepages: This test depends a whole lot on the host where it
  lands and right now we cannot assume anything about that anymore, as
  the tests will run from a GitHub runner, targetting a different
  machine where kubernetes will be running.
  * https://github.com/kata-containers/kata-containers/issues/6567

* k8s-expose-ip: This is simply hanging when running on AKS and has to
  be debugged in order to figure out the root cause of that, and then
  adapted to also work on AKS.
  * https://github.com/kata-containers/kata-containers/issues/6578

Till those issues are solved, we'll keep running a jenkins job with
hose tests to avoid any possible regression.

Last but not least, I've decided to **not** keep the history when
bringing those tests here, otherwise we'd end up polluting a lot the
history of this repo, without any clear benefit on doing so.

Signed-off-by: Fabiano Fidêncio <fabiano.fidencio@intel.com>
This commit is contained in:
Fabiano Fidêncio 2023-03-30 22:14:25 +02:00
parent 73be4bd3f9
commit 11e0099fb5
96 changed files with 3372 additions and 0 deletions

47
tests/common.bash Normal file
View File

@ -0,0 +1,47 @@
#!/usr/bin/env bash
#
# Copyright (c) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
# This file contains common functions that
# are being used by our metrics and integration tests
die() {
local msg="$*"
echo -e "[$(basename $0):${BASH_LINENO[0]}] ERROR: $msg" >&2
exit 1
}
warn() {
local msg="$*"
echo -e "[$(basename $0):${BASH_LINENO[0]}] WARNING: $msg"
}
info() {
local msg="$*"
echo -e "[$(basename $0):${BASH_LINENO[0]}] INFO: $msg"
}
handle_error() {
local exit_code="${?}"
local line_number="${1:-}"
echo -e "[$(basename $0):$line_number] ERROR: $(eval echo "$BASH_COMMAND")"
exit "${exit_code}"
}
trap 'handle_error $LINENO' ERR
waitForProcess() {
wait_time="$1"
sleep_time="$2"
cmd="$3"
while [ "$wait_time" -gt 0 ]; do
if eval "$cmd"; then
return 0
else
sleep "$sleep_time"
wait_time=$((wait_time-sleep_time))
fi
done
return 1
}

View File

@ -0,0 +1,48 @@
#!/bin/bash
#
# Copyright (c) 2019 ARM Limited
#
# SPDX-License-Identifier: Apache-2.0
set -o errexit
set -o nounset
set -o pipefail
GOPATH_LOCAL="${GOPATH%%:*}"
KATA_DIR="${GOPATH_LOCAL}/src/github.com/kata-containers"
TEST_DIR="${KATA_DIR}/tests"
CI_DIR="${TEST_DIR}/.ci"
K8S_FILTER_FLAG="kubernetes"
source "${CI_DIR}/lib.sh"
main()
{
local K8S_CONFIG_FILE="$1"
local K8S_TEST_UNION="$2"
local result=()
mapfile -d " " -t _K8S_TEST_UNION <<< "${K8S_TEST_UNION}"
# install yq if not exist
${CI_DIR}/install_yq.sh > /dev/null
local K8S_SKIP_UNION=$("${GOPATH_LOCAL}/bin/yq" read "${K8S_CONFIG_FILE}" "${K8S_FILTER_FLAG}")
[ "${K8S_SKIP_UNION}" == "null" ] && return
mapfile -t _K8S_SKIP_UNION <<< "${K8S_SKIP_UNION}"
for TEST_ENTRY in "${_K8S_TEST_UNION[@]}"
do
local flag="false"
for SKIP_ENTRY in "${_K8S_SKIP_UNION[@]}"
do
SKIP_ENTRY="${SKIP_ENTRY#- }.bats"
[ "$SKIP_ENTRY" == "$TEST_ENTRY" ] && flag="true"
done
[ "$flag" == "false" ] && result+=("$TEST_ENTRY")
done
echo ${result[@]}
}
main "$@"

View File

@ -0,0 +1,23 @@
#
# Copyright (c) 2018 ARM Limited
#
# SPDX-License-Identifier: Apache-2.0
# for now, not all integration test suites are fully passed in aarch64.
# some need to be tested, and some need to be refined.
# sequence of 'test' holds supported integration tests components.
test:
- functional
- kubernetes
- cri-containerd
kubernetes:
- k8s-cpu-ns
- k8s-limit-range
- k8s-number-cpus
- k8s-expose-ip
- k8s-oom
- k8s-block-volume
- k8s-inotify
- k8s-qos-pods
- k8s-footloose

View File

@ -0,0 +1,11 @@
#
# Copyright (c) 2019 IBM
#
# SPDX-License-Identifier: Apache-2.0
kubernetes:
- k8s-block-volume
- k8s-limit-range
- k8s-number-cpus
- k8s-oom
- k8s-inotify

View File

@ -0,0 +1,8 @@
#
# Copyright (c) 2021 IBM
#
# SPDX-License-Identifier: Apache-2.0
kubernetes:
- k8s-caps
- k8s-inotify

View File

@ -0,0 +1,42 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
nginx_version="${docker_images_nginx_version}"
nginx_image="nginx:$nginx_version"
pod_name="handlers"
get_pod_config_dir
}
@test "Running with postStart and preStop handlers" {
# Create yaml
sed -e "s/\${nginx_version}/${nginx_image}/" \
"${pod_config_dir}/lifecycle-events.yaml" > "${pod_config_dir}/test-lifecycle-events.yaml"
# Create the pod with postStart and preStop handlers
kubectl create -f "${pod_config_dir}/test-lifecycle-events.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Check postStart message
display_message="cat /usr/share/message"
check_postStart=$(kubectl exec $pod_name -- sh -c "$display_message" | grep "Hello from the postStart handler")
}
teardown(){
# Debugging information
kubectl describe "pod/$pod_name"
rm -f "${pod_config_dir}/test-lifecycle-events.yaml"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,55 @@
#!/usr/bin/env bats
#
# Copyright (c) 2021 Apple Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
pod_name="pod-caps"
get_pod_config_dir
# We expect the capabilities mask to very per distribution, runtime
# configuration. Even for this, we should expect a few common items to
# not be set in the mask unless we are failing to apply capabilities. If
# we fail to configure, we'll see all bits set for permitted: 0x03fffffffff
# We do expect certain parts of the mask to be common when we set appropriately:
# b20..b23 should be cleared for all (no CAP_SYS_{PACCT, ADMIN, NICE, BOOT})
# b0..b11 are consistent across the distros:
# 0x5fb: 0101 1111 1011
# | | \- should be cleared (CAP_DAC_READ_SEARCH)
# | \- should be cleared (CAP_LINUX_IMMUTABLE)
# \- should be cleared (CAP_NET_BROADCAST)
# Example match:
# CapPrm: 00000000a80425fb
expected="CapPrm.*..0..5fb$"
}
@test "Check capabilities of pod" {
# Create pod
kubectl create -f "${pod_config_dir}/pod-caps.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Verify expected capabilities for the running container. Add retry to ensure
# that the container had time to execute:
wait_time=5
sleep_time=1
cmd="kubectl logs $pod_name | grep -q $expected"
waitForProcess "$wait_time" "$sleep_time" "$cmd"
# Verify expected capabilities from exec context:
kubectl exec "$pod_name" -- sh -c "cat /proc/self/status" | grep -q "$expected"
}
teardown() {
# Debugging information
echo "expected capability mask:"
echo "$expected"
echo "observed: "
kubectl logs "pod/$pod_name"
kubectl exec "$pod_name" -- sh -c "cat /proc/self/status | grep Cap"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,43 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
get_pod_config_dir
}
@test "ConfigMap for a pod" {
config_name="test-configmap"
pod_name="config-env-test-pod"
# Create ConfigMap
kubectl create -f "${pod_config_dir}/configmap.yaml"
# View the values of the keys
kubectl get configmaps $config_name -o yaml | grep -q "data-"
# Create a pod that consumes the ConfigMap
kubectl create -f "${pod_config_dir}/pod-configmap.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check env
cmd="env"
kubectl exec $pod_name -- sh -c $cmd | grep "KUBE_CONFIG_1=value-1"
kubectl exec $pod_name -- sh -c $cmd | grep "KUBE_CONFIG_2=value-2"
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
kubectl delete configmap "$config_name"
}

View File

@ -0,0 +1,83 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
get_pod_config_dir
file_name="file.txt"
content="Hello"
}
@test "Copy file in a pod" {
# Create pod
pod_name="pod-copy-file-from-host"
ctr_name="ctr-copy-file-from-host"
pod_config=$(mktemp --tmpdir pod_config.XXXXXX.yaml)
cp "$pod_config_dir/busybox-template.yaml" "$pod_config"
sed -i "s/POD_NAME/$pod_name/" "$pod_config"
sed -i "s/CTR_NAME/$ctr_name/" "$pod_config"
kubectl create -f "${pod_config}"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Create a file
echo "$content" > "$file_name"
# Copy file into a pod
kubectl cp "$file_name" $pod_name:/tmp
# Print environment variables
kubectl exec $pod_name -- sh -c "cat /tmp/$file_name | grep $content"
}
@test "Copy from pod to host" {
# Create pod
pod_name="pod-copy-file-to-host"
ctr_name="ctr-copy-file-to-host"
pod_config=$(mktemp --tmpdir pod_config.XXXXXX.yaml)
cp "$pod_config_dir/busybox-template.yaml" "$pod_config"
sed -i "s/POD_NAME/$pod_name/" "$pod_config"
sed -i "s/CTR_NAME/$ctr_name/" "$pod_config"
kubectl create -f "${pod_config}"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
kubectl logs "$pod_name" || true
kubectl describe pod "$pod_name" || true
kubectl get pods --all-namespaces
# Create a file in the pod
kubectl exec "$pod_name" -- sh -c "cd /tmp && echo $content > $file_name"
kubectl logs "$pod_name" || true
kubectl describe pod "$pod_name" || true
kubectl get pods --all-namespaces
# Copy file from pod to host
kubectl cp "$pod_name":/tmp/"$file_name" "$file_name"
# Verify content
cat "$file_name" | grep "$content"
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
rm -f "$file_name"
kubectl delete pod "$pod_name"
rm -f "$pod_config"
}

View File

@ -0,0 +1,76 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
pod_name="constraints-cpu-test"
container_name="first-cpu-container"
sharessyspath="/sys/fs/cgroup/cpu/cpu.shares"
quotasyspath="/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
periodsyspath="/sys/fs/cgroup/cpu/cpu.cfs_period_us"
total_cpus=2
total_requests=512
total_cpu_container=1
get_pod_config_dir
}
@test "Check CPU constraints" {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
# Create the pod
kubectl create -f "${pod_config_dir}/pod-cpu.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
retries="10"
num_cpus_cmd='grep -e "^processor" /proc/cpuinfo |wc -l'
# Check the total of cpus
for _ in $(seq 1 "$retries"); do
# Get number of cpus
total_cpus_container=$(kubectl exec pod/"$pod_name" -c "$container_name" \
-- sh -c "$num_cpus_cmd")
# Verify number of cpus
[ "$total_cpus_container" -le "$total_cpus" ]
[ "$total_cpus_container" -eq "$total_cpus" ] && break
sleep 1
done
[ "$total_cpus_container" -eq "$total_cpus" ]
# Check the total of requests
total_requests_container=$(kubectl exec $pod_name -c $container_name \
-- sh -c "cat $sharessyspath")
[ "$total_requests_container" -eq "$total_requests" ]
# Check the cpus inside the container
total_cpu_quota=$(kubectl exec $pod_name -c $container_name \
-- sh -c "cat $quotasyspath")
total_cpu_period=$(kubectl exec $pod_name -c $container_name \
-- sh -c "cat $periodsyspath")
division_quota_period=$(echo $((total_cpu_quota/total_cpu_period)))
[ "$division_quota_period" -eq "$total_cpu_container" ]
}
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,62 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
get_pod_config_dir
}
@test "Credentials using secrets" {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
secret_name="test-secret"
pod_name="secret-test-pod"
second_pod_name="secret-envars-test-pod"
# Create the secret
kubectl create -f "${pod_config_dir}/inject_secret.yaml"
# View information about the secret
kubectl get secret "${secret_name}" -o yaml | grep "type: Opaque"
# Create a pod that has access to the secret through a volume
kubectl create -f "${pod_config_dir}/pod-secret.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# List the files
cmd="ls /tmp/secret-volume"
kubectl exec $pod_name -- sh -c "$cmd" | grep -w "password"
kubectl exec $pod_name -- sh -c "$cmd" | grep -w "username"
# Create a pod that has access to the secret data through environment variables
kubectl create -f "${pod_config_dir}/pod-secret-env.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$second_pod_name"
# Display environment variables
second_cmd="printenv"
kubectl exec $second_pod_name -- sh -c "$second_cmd" | grep -w "SECRET_USERNAME"
kubectl exec $second_pod_name -- sh -c "$second_cmd" | grep -w "SECRET_PASSWORD"
}
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
# Debugging information
kubectl describe "pod/$pod_name"
kubectl describe "pod/$second_pod_name"
kubectl delete pod "$pod_name" "$second_pod_name"
kubectl delete secret "$secret_name"
}

View File

@ -0,0 +1,34 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
pod_name="custom-dns-test"
file_name="/etc/resolv.conf"
get_pod_config_dir
}
@test "Check custom dns" {
# Create the pod
kubectl create -f "${pod_config_dir}/pod-custom-dns.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Check dns config at /etc/resolv.conf
kubectl exec "$pod_name" -- cat "$file_name" | grep -q "nameserver 1.2.3.4"
kubectl exec "$pod_name" -- cat "$file_name" | grep -q "search dns.test.search"
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,74 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
assert_equal() {
local expected=$1
local actual=$2
if [[ "$expected" != "$actual" ]]; then
echo "expected: $expected, got: $actual"
return 1
fi
}
setup() {
pod_name="sharevol-kata"
get_pod_config_dir
pod_logs_file=""
}
@test "Empty dir volumes" {
# Create the pod
kubectl create -f "${pod_config_dir}/pod-empty-dir.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check volume mounts
cmd="mount | grep cache"
kubectl exec $pod_name -- sh -c "$cmd" | grep "/tmp/cache type tmpfs"
# Check it can write up to the volume limit (50M)
cmd="dd if=/dev/zero of=/tmp/cache/file1 bs=1M count=50; echo $?"
kubectl exec $pod_name -- sh -c "$cmd" | tail -1 | grep 0
}
@test "Empty dir volume when FSGroup is specified with non-root container" {
# This is a reproducer of k8s e2e "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is non-root" test
pod_file="${pod_config_dir}/pod-empty-dir-fsgroup.yaml"
agnhost_name="${container_images_agnhost_name}"
agnhost_version="${container_images_agnhost_version}"
image="${agnhost_name}:${agnhost_version}"
# Try to avoid timeout by prefetching the image.
sed -e "s#\${agnhost_image}#${image}#" "$pod_file" |\
kubectl create -f -
cmd="kubectl get pods ${pod_name} | grep Completed"
waitForProcess "${wait_time}" "${sleep_time}" "${cmd}"
pod_logs_file="$(mktemp)"
for container in mounttest-container mounttest-container-2; do
kubectl logs "$pod_name" "$container" > "$pod_logs_file"
# Check owner UID of file
uid=$(cat $pod_logs_file | grep 'owner UID of' | sed 's/.*:\s//')
assert_equal "1001" "$uid"
# Check owner GID of file
gid=$(cat $pod_logs_file | grep 'owner GID of' | sed 's/.*:\s//')
assert_equal "123" "$gid"
done
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
[ ! -f "$pod_logs_file" ] || rm -f "$pod_logs_file"
}

View File

@ -0,0 +1,40 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
pod_name="test-env"
get_pod_config_dir
}
@test "Environment variables" {
# Create pod
kubectl create -f "${pod_config_dir}/pod-env.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Print environment variables
cmd="printenv"
kubectl exec $pod_name -- sh -c $cmd | grep "MY_POD_NAME=$pod_name"
kubectl exec $pod_name -- sh -c $cmd | \
grep "HOST_IP=\([0-9]\+\(\.\|$\)\)\{4\}"
# Requested 32Mi of memory
kubectl exec $pod_name -- sh -c $cmd | \
grep "MEMORY_REQUESTS=$((1024 * 1024 * 32))"
# Memory limits allocated by the node
kubectl exec $pod_name -- sh -c $cmd | grep "MEMORY_LIMITS=[1-9]\+"
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,65 @@
#!/usr/bin/env bats
#
# Copyright (c) 2020 Ant Financial
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
get_pod_config_dir
pod_name="busybox"
first_container_name="first-test-container"
second_container_name="second-test-container"
}
@test "Kubectl exec" {
# Create the pod
kubectl create -f "${pod_config_dir}/busybox-pod.yaml"
# Get pod specification
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Run commands in Pod
## Cases for -it options
# TODO: enable -i option after updated to new CRI-O
# see: https://github.com/kata-containers/tests/issues/2770
# kubectl exec -i "$pod_name" -- ls -tl /
# kubectl exec -it "$pod_name" -- ls -tl /
kubectl exec "$pod_name" -- date
## Case for stdin
kubectl exec -i "$pod_name" -- sh <<-EOF
echo abc > /tmp/abc.txt
grep abc /tmp/abc.txt
exit
EOF
## Case for return value
### Command return non-zero code
run bash -c "kubectl exec -i $pod_name -- sh <<-EOF
exit 123
EOF"
echo "run status: $status" 1>&2
echo "run output: $output" 1>&2
[ "$status" -eq 123 ]
## Cases for target container
### First container
container_name=$(kubectl exec $pod_name -c $first_container_name -- env | grep CONTAINER_NAME)
[ "$container_name" == "CONTAINER_NAME=$first_container_name" ]
### Second container
container_name=$(kubectl exec $pod_name -c $second_container_name -- env | grep CONTAINER_NAME)
[ "$container_name" == "CONTAINER_NAME=$second_container_name" ]
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,58 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
pod_name="footubuntu"
config_name="ssh-config-map"
get_pod_config_dir
# Creates ssh-key
key_path=$(mktemp --tmpdir)
public_key_path="${key_path}.pub"
echo -e 'y\n' | sudo ssh-keygen -t rsa -N "" -f "$key_path"
# Create ConfigMap.yaml
configmap_yaml="${pod_config_dir}/footloose-rsa-configmap.yaml"
sed -e "/\${ssh_key}/r ${public_key_path}" -e "/\${ssh_key}/d" \
"${pod_config_dir}/footloose-configmap.yaml" > "$configmap_yaml"
sed -i 's/ssh-rsa/ ssh-rsa/' "$configmap_yaml"
}
@test "Footloose pod" {
cmd="uname -r"
sleep_connect="10"
# Create ConfigMap
kubectl create -f "$configmap_yaml"
# Create pod
kubectl create -f "${pod_config_dir}/pod-footloose.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Get pod ip
pod_ip=$(kubectl get pod $pod_name --template={{.status.podIP}})
# Exec to the pod
kubectl exec $pod_name -- sh -c "$cmd"
# Connect to the VM
sleep "$sleep_connect"
ssh -i "$key_path" -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no 2>/dev/null root@"$pod_ip" "$cmd"
}
teardown() {
kubectl delete pod "$pod_name"
kubectl delete configmap "$config_name"
sudo rm -rf "$public_key_path"
sudo rm -rf "$key_path"
sudo rm -rf "$configmap_yaml"
}

View File

@ -0,0 +1,46 @@
#!/usr/bin/env bats
#
# Copyright (c) 2021 Apple Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
get_pod_config_dir
}
@test "configmap update works, and preserves symlinks" {
pod_name="inotify-configmap-testing"
# Create configmap for my deployment
kubectl apply -f "${pod_config_dir}"/inotify-configmap.yaml
# Create deployment that expects identity-certs
kubectl apply -f "${pod_config_dir}"/inotify-configmap-pod.yaml
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Update configmap
kubectl apply -f "${pod_config_dir}"/inotify-updated-configmap.yaml
# Ideally we'd wait for the pod to complete...
sleep 120
# Verify we saw the update
result=$(kubectl get pod "$pod_name" --output="jsonpath={.status.containerStatuses[]}")
echo $result | grep -vq Error
kubectl delete configmap cm
}
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,49 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
get_pod_config_dir
}
@test "Run a job to completion" {
job_name="job-pi-test"
# Create job
kubectl apply -f "${pod_config_dir}/job.yaml"
# Verify job
kubectl describe jobs/"$job_name" | grep "SuccessfulCreate"
# List pods that belong to the job
pod_name=$(kubectl get pods --selector=job-name=$job_name --output=jsonpath='{.items[*].metadata.name}')
# Verify that the job is completed
cmd="kubectl get pods -o jsonpath='{.items[*].status.phase}' | grep Succeeded"
waitForProcess "$wait_time" "$sleep_time" "$cmd"
# Verify the output of the pod
pi_number="3.14"
kubectl logs "$pod_name" | grep "$pi_number"
}
teardown() {
kubectl delete pod "$pod_name"
# Verify that pod is not running
run kubectl get pods
echo "$output"
[[ "$output" =~ "No resources found" ]]
kubectl delete jobs/"$job_name"
# Verify that the job is not running
run kubectl get jobs
echo "$output"
[[ "$output" =~ "No resources found" ]]
}

View File

@ -0,0 +1,37 @@
#!/usr/bin/env bats
#
# Copyright (c) 2022 AntGroup Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
pod_name="busybox"
first_container_name="first-test-container"
get_pod_config_dir
}
@test "Check PID namespaces" {
# Create the pod
kubectl create -f "${pod_config_dir}/initcontainer-shareprocesspid.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Check PID from first container
first_pid_container=$(kubectl exec $pod_name -c $first_container_name \
-- ps | grep "tail" || true)
# Verify that the tail process didn't exist
[ -z $first_pid_container ] || die "found processes pid: $first_pid_container"
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,41 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
get_pod_config_dir
namespace_name="default-cpu-example"
pod_name="default-cpu-test"
}
@test "Limit range for storage" {
# Create namespace
kubectl create namespace "$namespace_name"
# Create the LimitRange in the namespace
kubectl create -f "${pod_config_dir}/limit-range.yaml" --namespace=${namespace_name}
# Create the pod
kubectl create -f "${pod_config_dir}/pod-cpu-defaults.yaml" --namespace=${namespace_name}
# Get pod specification
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" --namespace="$namespace_name"
# Check limits
# Find the 500 millicpus specified at the yaml
kubectl describe pod "$pod_name" --namespace="$namespace_name" | grep "500m"
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
kubectl delete namespaces "$namespace_name"
}

View File

@ -0,0 +1,80 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
sleep_liveness=20
agnhost_name="${container_images_agnhost_name}"
agnhost_version="${container_images_agnhost_version}"
get_pod_config_dir
}
@test "Liveness probe" {
pod_name="liveness-exec"
# Create pod
kubectl create -f "${pod_config_dir}/pod-liveness.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check liveness probe returns a success code
kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1"
# Sleep necessary to check liveness probe returns a failure code
sleep "$sleep_liveness"
kubectl describe pod "$pod_name" | grep "Liveness probe failed"
}
@test "Liveness http probe" {
pod_name="liveness-http"
# Create pod
sed -e "s#\${agnhost_image}#${agnhost_name}:${agnhost_version}#" \
"${pod_config_dir}/pod-http-liveness.yaml" |\
kubectl create -f -
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check liveness probe returns a success code
kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1"
# Sleep necessary to check liveness probe returns a failure code
sleep "$sleep_liveness"
kubectl describe pod "$pod_name" | grep "Started container"
}
@test "Liveness tcp probe" {
pod_name="tcptest"
# Create pod
sed -e "s#\${agnhost_image}#${agnhost_name}:${agnhost_version}#" \
"${pod_config_dir}/pod-tcp-liveness.yaml" |\
kubectl create -f -
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check liveness probe returns a success code
kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1"
# Sleep necessary to check liveness probe returns a failure code
sleep "$sleep_liveness"
kubectl describe pod "$pod_name" | grep "Started container"
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,56 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
pod_name="memory-test"
get_pod_config_dir
}
setup_yaml() {
sed \
-e "s/\${memory_size}/${memory_limit_size}/" \
-e "s/\${memory_allocated}/${allocated_size}/" \
"${pod_config_dir}/pod-memory-limit.yaml"
}
@test "Exceeding memory constraints" {
memory_limit_size="50Mi"
allocated_size="250M"
# Create test .yaml
setup_yaml > "${pod_config_dir}/test_exceed_memory.yaml"
# Create the pod exceeding memory constraints
run kubectl create -f "${pod_config_dir}/test_exceed_memory.yaml"
[ "$status" -ne 0 ]
rm -f "${pod_config_dir}/test_exceed_memory.yaml"
}
@test "Running within memory constraints" {
memory_limit_size="600Mi"
allocated_size="150M"
# Create test .yaml
setup_yaml > "${pod_config_dir}/test_within_memory.yaml"
# Create the pod within memory constraints
kubectl create -f "${pod_config_dir}/test_within_memory.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
rm -f "${pod_config_dir}/test_within_memory.yaml"
kubectl delete pod "$pod_name"
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name" || true
}

View File

@ -0,0 +1,39 @@
#!/usr/bin/env bats
#
# Copyright (c) 2021 IBM Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
get_pod_config_dir
pod_name="nested-configmap-secret-pod"
}
@test "Nested mount of a secret volume in a configmap volume for a pod" {
# Creates a configmap, secret and pod that mounts the secret inside the configmap
kubectl create -f "${pod_config_dir}/pod-nested-configmap-secret.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check config/secret value are correct
[ "myconfig" == $(kubectl exec $pod_name -- cat /config/config_key) ]
[ "mysecret" == $(kubectl exec $pod_name -- cat /config/secret/secret_key) ]
}
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
# Debugging information
kubectl describe "pod/$pod_name"
# Delete the configmap, secret, and pod used for testing
kubectl delete -f "${pod_config_dir}/pod-nested-configmap-secret.yaml"
}

View File

@ -0,0 +1,53 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
nginx_version="${docker_images_nginx_version}"
nginx_image="nginx:$nginx_version"
busybox_image="busybox"
deployment="nginx-deployment"
get_pod_config_dir
}
@test "Verify nginx connectivity between pods" {
# Create test .yaml
sed -e "s/\${nginx_version}/${nginx_image}/" \
"${pod_config_dir}/${deployment}.yaml" > "${pod_config_dir}/test-${deployment}.yaml"
kubectl create -f "${pod_config_dir}/test-${deployment}.yaml"
kubectl wait --for=condition=Available --timeout=$timeout deployment/${deployment}
kubectl expose deployment/${deployment}
busybox_pod="test-nginx"
kubectl run $busybox_pod --restart=Never -it --image="$busybox_image" \
-- sh -c 'i=1; while [ $i -le '"$wait_time"' ]; do wget --timeout=5 '"$deployment"' && break; sleep 1; i=$(expr $i + 1); done'
# check pod's status, it should be Succeeded.
# or {.status.containerStatuses[0].state.terminated.reason} = "Completed"
[ $(kubectl get pods/$busybox_pod -o jsonpath="{.status.phase}") = "Succeeded" ]
kubectl logs "$busybox_pod" | grep "index.html"
}
teardown() {
# Debugging information
kubectl describe "pod/$busybox_pod"
kubectl get "pod/$busybox_pod" -o yaml
kubectl logs "$busybox_pod"
kubectl get deployment/${deployment} -o yaml
kubectl get service/${deployment} -o yaml
kubectl get endpoints/${deployment} -o yaml
rm -f "${pod_config_dir}/test-${deployment}.yaml"
kubectl delete deployment "$deployment"
kubectl delete service "$deployment"
kubectl delete pod "$busybox_pod"
}

View File

@ -0,0 +1,47 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
pod_name="cpu-test"
container_name="c1"
get_pod_config_dir
}
# Skip on aarch64 due to missing cpu hotplug related functionality.
@test "Check number of cpus" {
# Create pod
kubectl create -f "${pod_config_dir}/pod-number-cpu.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
retries="10"
max_number_cpus="3"
num_cpus_cmd='cat /proc/cpuinfo |grep processor|wc -l'
for _ in $(seq 1 "$retries"); do
# Get number of cpus
number_cpus=$(kubectl exec pod/"$pod_name" -c "$container_name" \
-- sh -c "$num_cpus_cmd")
if [[ "$number_cpus" =~ ^[0-9]+$ ]]; then
# Verify number of cpus
[ "$number_cpus" -le "$max_number_cpus" ]
[ "$number_cpus" -eq "$max_number_cpus" ] && break
fi
sleep 1
done
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,37 @@
#!/usr/bin/env bats
#
# Copyright (c) 2020 Ant Group
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
pod_name="pod-oom"
get_pod_config_dir
}
@test "Test OOM events for pods" {
# Create pod
kubectl create -f "${pod_config_dir}/$pod_name.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check if OOMKilled
cmd="kubectl get pods "$pod_name" -o jsonpath='{.status.containerStatuses[0].state.terminated.reason}' | grep OOMKilled"
waitForProcess "$wait_time" "$sleep_time" "$cmd"
rm -f "${pod_config_dir}/test_pod_oom.yaml"
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl get "pod/$pod_name" -o yaml
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,39 @@
#!/usr/bin/env bats
#
# Copyright (c) 2021 IBM Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
get_pod_config_dir
}
@test "Optional and Empty ConfigMap Volume for a pod" {
config_name="empty-config"
pod_name="optional-empty-config-test-pod"
# Create Empty ConfigMap
kubectl create configmap "$config_name"
# Create a pod that consumes the "empty-config" and "optional-missing-config" ConfigMaps as volumes
kubectl create -f "${pod_config_dir}/pod-optional-empty-configmap.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check configmap folders exist
kubectl exec $pod_name -- sh -c ls /empty-config
kubectl exec $pod_name -- sh -c ls /optional-missing-config
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
kubectl delete configmap "$config_name"
}

View File

@ -0,0 +1,39 @@
#!/usr/bin/env bats
#
# Copyright (c) 2021 IBM Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
get_pod_config_dir
}
@test "Optional and Empty Secret Volume for a pod" {
secret_name="empty-secret"
pod_name="optional-empty-secret-test-pod"
# Create Empty Secret
kubectl create secret generic "$secret_name"
# Create a pod that consumes the "empty-secret" and "optional-missing-secret" Secrets as volumes
kubectl create -f "${pod_config_dir}/pod-optional-empty-secret.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check secret folders exist
kubectl exec $pod_name -- sh -c ls /empty-secret
kubectl exec $pod_name -- sh -c ls /optional-missing-secret
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
kubectl delete secret "$secret_name"
}

View File

@ -0,0 +1,48 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
get_pod_config_dir
job_name="jobtest"
names=( "test1" "test2" "test3" )
}
@test "Parallel jobs" {
# Create yaml files
for i in "${names[@]}"; do
sed "s/\$ITEM/$i/" ${pod_config_dir}/job-template.yaml > ${pod_config_dir}/job-$i.yaml
done
# Create the jobs
for i in "${names[@]}"; do
kubectl create -f "${pod_config_dir}/job-$i.yaml"
done
# Check the jobs
kubectl get jobs -l jobgroup=${job_name}
# Check the pods
kubectl wait --for=condition=Ready --timeout=$timeout pod -l jobgroup=${job_name}
# Check output of the jobs
for i in $(kubectl get pods -l jobgroup=${job_name} -o name); do
kubectl logs ${i}
done
}
teardown() {
# Delete jobs
kubectl delete jobs -l jobgroup=${job_name}
# Remove generated yaml files
for i in "${names[@]}"; do
rm -f ${pod_config_dir}/job-$i.yaml
done
}

View File

@ -0,0 +1,48 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
pod_name="busybox"
first_container_name="first-test-container"
second_container_name="second-test-container"
get_pod_config_dir
}
@test "Check PID namespaces" {
# Create the pod
kubectl create -f "${pod_config_dir}/busybox-pod.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Check PID from first container
first_pid_container=$(kubectl exec $pod_name -c $first_container_name \
-- ps | grep "/pause")
# Verify that is not empty
check_first_pid=$(echo $first_pid_container | wc -l)
[ "$check_first_pid" == "1" ]
# Check PID from second container
second_pid_container=$(kubectl exec $pod_name -c $second_container_name \
-- ps | grep "/pause")
# Verify that is not empty
check_second_pid=$(echo $second_pid_container | wc -l)
[ "$check_second_pid" == "1" ]
[ "$first_pid_container" == "$second_pid_container" ]
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,37 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
get_pod_config_dir
}
@test "Pod quota" {
resource_name="pod-quota"
deployment_name="deploymenttest"
namespace="test-quota-ns"
# Create the resourcequota
kubectl create -f "${pod_config_dir}/resource-quota.yaml"
# View information about resourcequota
kubectl get -n "$namespace" resourcequota "$resource_name" \
--output=yaml | grep 'pods: "2"'
# Create deployment
kubectl create -f "${pod_config_dir}/pod-quota-deployment.yaml"
# View deployment
kubectl wait --for=condition=Available --timeout=$timeout \
-n "$namespace" deployment/${deployment_name}
}
teardown() {
kubectl delete -n "$namespace" deployment "$deployment_name"
kubectl delete -f "${pod_config_dir}/resource-quota.yaml"
}

View File

@ -0,0 +1,71 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
source "/etc/os-release" || source "/usr/lib/os-release"
issue="https://github.com/kata-containers/runtime/issues/1834"
setup() {
skip "test not working see: ${issue}"
get_pod_config_dir
}
@test "Port forwarding" {
skip "test not working see: ${issue}"
deployment_name="redis-master"
# Create deployment
kubectl apply -f "${pod_config_dir}/redis-master-deployment.yaml"
# Check deployment
kubectl wait --for=condition=Available --timeout=$timeout deployment/"$deployment_name"
kubectl expose deployment/"$deployment_name"
# Get pod name
pod_name=$(kubectl get pods --output=jsonpath={.items..metadata.name})
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# View replicaset
kubectl get rs
# Create service
kubectl apply -f "${pod_config_dir}/redis-master-service.yaml"
# Check service
kubectl get svc | grep redis
# Check redis service
port_redis=$(kubectl get pods $pod_name --template='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}')
# Verify that redis is running in the pod and listening on port
port=6379
[ "$port_redis" -eq "$port" ]
# Forward a local port to a port on the pod
(2&>1 kubectl port-forward "$pod_name" 7000:"$port"> /dev/null) &
# Run redis-cli
retries="10"
ok="0"
for _ in $(seq 1 "$retries"); do
if sudo -E redis-cli -p 7000 ping | grep -q "PONG" ; then
ok="1"
break;
fi
sleep 1
done
[ "$ok" -eq "1" ]
}
teardown() {
skip "test not working see: ${issue}"
kubectl delete -f "${pod_config_dir}/redis-master-deployment.yaml"
kubectl delete -f "${pod_config_dir}/redis-master-service.yaml"
}

View File

@ -0,0 +1,63 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
get_pod_config_dir
}
@test "Projected volume" {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
password="1f2d1e2e67df"
username="admin"
pod_name="test-projected-volume"
TMP_FILE=$(mktemp username.XXXX)
SECOND_TMP_FILE=$(mktemp password.XXXX)
# Create files containing the username and password
echo "$username" > $TMP_FILE
echo "$password" > $SECOND_TMP_FILE
# Package these files into secrets
kubectl create secret generic user --from-file=$TMP_FILE
kubectl create secret generic pass --from-file=$SECOND_TMP_FILE
# Create pod
kubectl create -f "${pod_config_dir}/pod-projected-volume.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check that the projected sources exists
cmd="ls /projected-volume | grep username"
kubectl exec $pod_name -- sh -c "$cmd"
sec_cmd="ls /projected-volume | grep password"
kubectl exec $pod_name -- sh -c "$sec_cmd"
# Check content of the projected sources
check_cmd="cat /projected-volume/username*"
kubectl exec $pod_name -- sh -c "$check_cmd" | grep "$username"
sec_check_cmd="cat /projected-volume/password*"
kubectl exec $pod_name -- sh -c "$sec_check_cmd" | grep "$password"
}
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
# Debugging information
kubectl describe "pod/$pod_name"
rm -f $TMP_FILE $SECOND_TMP_FILE
kubectl delete pod "$pod_name"
kubectl delete secret pass user
}

View File

@ -0,0 +1,58 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
TEST_INITRD="${TEST_INITRD:-no}"
# Not working on ARM CI see https://github.com/kata-containers/tests/issues/4727
setup() {
get_pod_config_dir
}
@test "Guaranteed QoS" {
pod_name="qos-test"
# Create pod
kubectl create -f "${pod_config_dir}/pod-guaranteed.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check pod class
kubectl get pod "$pod_name" --output=yaml | grep "qosClass: Guaranteed"
}
@test "Burstable QoS" {
pod_name="burstable-test"
# Create pod
kubectl create -f "${pod_config_dir}/pod-burstable.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check pod class
kubectl get pod "$pod_name" --output=yaml | grep "qosClass: Burstable"
}
@test "BestEffort QoS" {
pod_name="besteffort-test"
# Create pod
kubectl create -f "${pod_config_dir}/pod-besteffort.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check pod class
kubectl get pod "$pod_name" --output=yaml | grep "qosClass: BestEffort"
}
teardown() {
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,62 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
nginx_version="${docker_images_nginx_version}"
nginx_image="nginx:$nginx_version"
get_pod_config_dir
}
@test "Replication controller" {
replication_name="replicationtest"
# Create yaml
sed -e "s/\${nginx_version}/${nginx_image}/" \
"${pod_config_dir}/replication-controller.yaml" > "${pod_config_dir}/test-replication-controller.yaml"
# Create replication controller
kubectl create -f "${pod_config_dir}/test-replication-controller.yaml"
# Check replication controller
local cmd="kubectl describe replicationcontrollers/$replication_name | grep replication-controller"
waitForProcess "$wait_time" "$sleep_time" "$cmd"
number_of_replicas=$(kubectl get replicationcontrollers/"$replication_name" \
--output=jsonpath='{.spec.replicas}')
[ "${number_of_replicas}" -gt 0 ]
# The replicas pods can be in running, waiting, succeeded or failed
# status. We need them all on running state before proceed.
cmd="kubectl describe rc/\"${replication_name}\""
cmd+="| grep \"Pods Status\" | grep \"${number_of_replicas} Running\""
waitForProcess "$wait_time" "$sleep_time" "$cmd"
# Check number of pods created for the
# replication controller is equal to the
# number of replicas that we defined
launched_pods=($(kubectl get pods --selector=app=nginx-rc-test \
--output=jsonpath={.items..metadata.name}))
[ "${#launched_pods[@]}" -eq "$number_of_replicas" ]
# Check pod creation
for pod_name in ${launched_pods[@]}; do
cmd="kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name"
waitForProcess "$wait_time" "$sleep_time" "$cmd"
done
}
teardown() {
# Debugging information
kubectl describe replicationcontrollers/"$replication_name"
rm -f "${pod_config_dir}/test-replication-controller.yaml"
kubectl delete rc "$replication_name"
}

View File

@ -0,0 +1,36 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
nginx_version="${docker_images_nginx_version}"
nginx_image="nginx:$nginx_version"
replicas="3"
deployment="nginx-deployment"
get_pod_config_dir
}
@test "Scale nginx deployment" {
sed -e "s/\${nginx_version}/${nginx_image}/" \
"${pod_config_dir}/${deployment}.yaml" > "${pod_config_dir}/test-${deployment}.yaml"
kubectl create -f "${pod_config_dir}/test-${deployment}.yaml"
kubectl wait --for=condition=Available --timeout=$timeout deployment/${deployment}
kubectl expose deployment/${deployment}
kubectl scale deployment/${deployment} --replicas=${replicas}
cmd="kubectl get deployment/${deployment} -o yaml | grep 'availableReplicas: ${replicas}'"
waitForProcess "$wait_time" "$sleep_time" "$cmd"
}
teardown() {
rm -f "${pod_config_dir}/test-${deployment}.yaml"
kubectl delete deployment "$deployment"
kubectl delete service "$deployment"
}

View File

@ -0,0 +1,35 @@
#
# Copyright (c) 2021 Red Hat
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
pod_name="seccomp-container"
get_pod_config_dir
}
@test "Support seccomp runtime/default profile" {
expected_seccomp_mode="2"
# Create pod
kubectl create -f "${pod_config_dir}/pod-seccomp.yaml"
# Wait it to complete
cmd="kubectl get pods ${pod_name} | grep Completed"
waitForProcess "${wait_time}" "${sleep_time}" "${cmd}"
# Expect Seccomp on mode 2 (filter)
seccomp_mode="$(kubectl logs ${pod_name} | sed 's/Seccomp:\s*\([0-9]\)/\1/')"
[ "$seccomp_mode" -eq "$expected_seccomp_mode" ]
}
teardown() {
# For debugging purpose
echo "seccomp mode is ${seccomp_mode}, expected $expected_seccomp_mode"
kubectl describe "pod/${pod_name}"
kubectl delete -f "${pod_config_dir}/pod-seccomp.yaml" || true
}

View File

@ -0,0 +1,35 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
get_pod_config_dir
}
@test "Security context" {
pod_name="security-context-test"
# Create pod
kubectl create -f "${pod_config_dir}/pod-security-context.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check user
cmd="ps --user 1000 -f"
process="tail -f /dev/null"
kubectl exec $pod_name -- sh -c $cmd | grep "$process"
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,51 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
get_pod_config_dir
}
@test "Containers with shared volume" {
pod_name="test-shared-volume"
first_container_name="busybox-first-container"
second_container_name="busybox-second-container"
# Create pod
kubectl create -f "${pod_config_dir}/pod-shared-volume.yaml"
# Check pods
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Communicate containers
cmd="cat /tmp/pod-data"
msg="Hello from the $second_container_name"
kubectl exec "$pod_name" -c "$first_container_name" -- sh -c "$cmd" | grep "$msg"
}
@test "initContainer with shared volume" {
pod_name="initcontainer-shared-volume"
last_container="last"
# Create pod
kubectl create -f "${pod_config_dir}/initContainer-shared-volume.yaml"
# Check pods
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
cmd='test $(cat /volume/initContainer) -lt $(cat /volume/container)'
kubectl exec "$pod_name" -c "$last_container" -- sh -c "$cmd"
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,34 @@
#!/usr/bin/env bats
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
pod_name="sysctl-test"
get_pod_config_dir
}
@test "Setting sysctl" {
# Create pod
kubectl apply -f "${pod_config_dir}/pod-sysctl.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name
# Check sysctl configuration
cmd="cat /proc/sys/kernel/shm_rmid_forced"
result=$(kubectl exec $pod_name -- sh -c "$cmd")
[ "${result}" = 0 ]
}
teardown() {
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
}

View File

@ -0,0 +1,68 @@
#!/bin/bash
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -e
kubernetes_dir=$(dirname "$(readlink -f "$0")")
TARGET_ARCH="${TARGET_ARCH:-x86_64}"
KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
K8S_TEST_DEBUG="${K8S_TEST_DEBUG:-false}"
if [ -n "${K8S_TEST_UNION:-}" ]; then
K8S_TEST_UNION=($K8S_TEST_UNION)
else
K8S_TEST_UNION=( \
"k8s-attach-handlers.bats" \
"k8s-caps.bats" \
"k8s-configmap.bats" \
"k8s-copy-file.bats" \
"k8s-cpu-ns.bats" \
"k8s-credentials-secrets.bats" \
"k8s-custom-dns.bats" \
"k8s-empty-dirs.bats" \
"k8s-env.bats" \
"k8s-exec.bats" \
"k8s-inotify.bats" \
"k8s-job.bats" \
"k8s-kill-all-process-in-container.bats" \
"k8s-limit-range.bats" \
"k8s-liveness-probes.bats" \
"k8s-memory.bats" \
"k8s-nested-configmap-secret.bats" \
"k8s-number-cpus.bats" \
"k8s-oom.bats" \
"k8s-optional-empty-configmap.bats" \
"k8s-optional-empty-secret.bats" \
"k8s-parallel.bats" \
"k8s-pid-ns.bats" \
"k8s-pod-quota.bats" \
"k8s-port-forward.bats" \
"k8s-projected-volume.bats" \
"k8s-qos-pods.bats" \
"k8s-replication.bats" \
"k8s-scale-nginx.bats" \
"k8s-seccomp.bats" \
"k8s-sysctls.bats" \
"k8s-security-context.bats" \
"k8s-shared-volume.bats" \
"k8s-nginx-connectivity.bats" \
)
fi
# we may need to skip a few test cases when running on non-x86_64 arch
arch_config_file="${kubernetes_dir}/filter_out_per_arch/${TARGET_ARCH}.yaml"
if [ -f "${arch_config_file}" ]; then
arch_k8s_test_union=$(${kubernetes_dir}/filter_k8s_test.sh ${arch_config_file} "${K8S_TEST_UNION[*]}")
mapfile -d " " -t K8S_TEST_UNION <<< "${arch_k8s_test_union}"
fi
info "Run tests"
for K8S_TEST_ENTRY in ${K8S_TEST_UNION[@]}
do
bats "${K8S_TEST_ENTRY}"
done

View File

@ -0,0 +1,32 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: busybox
spec:
terminationGracePeriodSeconds: 0
shareProcessNamespace: true
runtimeClassName: kata
containers:
- name: first-test-container
image: quay.io/prometheus/busybox:latest
env:
- name: CONTAINER_NAME
value: "first-test-container"
command:
- sleep
- "30"
- name: second-test-container
image: quay.io/prometheus/busybox:latest
env:
- name: CONTAINER_NAME
value: "second-test-container"
command:
- sleep
- "30"
stdin: true
tty: true

View File

@ -0,0 +1,19 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: POD_NAME
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
shareProcessNamespace: true
containers:
- name: CTR_NAME
image: quay.io/prometheus/busybox:latest
command:
- sleep
- "120"

View File

@ -0,0 +1,12 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: ConfigMap
metadata:
name: test-configmap
data:
data-1: value-1
data-2: value-2

View File

@ -0,0 +1,12 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
data:
authorized_keys: |
${ssh_key}
kind: ConfigMap
metadata:
name: ssh-config-map

View File

@ -0,0 +1,29 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: initcontainer-shared-volume
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
initContainers:
- name: first
image: quay.io/prometheus/busybox:latest
command: [ "sh", "-c", "echo ${EPOCHREALTIME//.} > /volume/initContainer" ]
volumeMounts:
- mountPath: /volume
name: volume
containers:
- name: last
image: quay.io/prometheus/busybox:latest
command: [ "sh", "-c", "echo ${EPOCHREALTIME//.} > /volume/container; tail -f /dev/null" ]
volumeMounts:
- mountPath: /volume
name: volume
volumes:
- name: volume
emptyDir: {}

View File

@ -0,0 +1,26 @@
#
# Copyright (c) 2022 AntGroup Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: busybox
spec:
terminationGracePeriodSeconds: 0
shareProcessNamespace: true
runtimeClassName: kata
initContainers:
- name: first
image: quay.io/prometheus/busybox:latest
command: [ "sh", "-c", "echo 'nohup tail -f /dev/null >/dev/null 2>&1 &' > /init.sh && chmod +x /init.sh && /init.sh" ]
containers:
- name: first-test-container
image: quay.io/prometheus/busybox:latest
env:
- name: CONTAINER_NAME
value: "first-test-container"
command:
- sleep
- "300"

View File

@ -0,0 +1,12 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Secret
metadata:
name: test-secret
data:
username: bXktYXBw
password: Mzk1MjgkdmRnN0pi

View File

@ -0,0 +1,32 @@
#
# Copyright (c) 2021 Apple Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: v1
kind: Pod
metadata:
name: inotify-configmap-testing
spec:
containers:
- name: c1
image: quay.io/kata-containers/fsnotify:latest
command: ["bash"]
args: ["-c", "inotifywait --timeout 120 -r /config/ && [[ -L /config/config.toml ]] && echo success" ]
resources:
requests:
cpu: 1
memory: 50Mi
limits:
cpu: 1
memory: 1024Mi
volumeMounts:
- name: config
mountPath: /config
runtimeClassName: kata
restartPolicy: Never
volumes:
- name: config
configMap:
name: cm

View File

@ -0,0 +1,13 @@
#
# Copyright (c) 2021 Apple Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: v1
data:
config.toml: |-
foo original...
kind: ConfigMap
metadata:
name: cm

View File

@ -0,0 +1,14 @@
#
# Copyright (c) 2021 Apple Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: v1
data:
config.toml: |-
foo original...
... updated
kind: ConfigMap
metadata:
name: cm

View File

@ -0,0 +1,25 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: batch/v1
kind: Job
metadata:
name: process-item-$ITEM
labels:
jobgroup: jobtest
spec:
template:
metadata:
name: jobtest
labels:
jobgroup: jobtest
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: test
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]
restartPolicy: Never

View File

@ -0,0 +1,20 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: batch/v1
kind: Job
metadata:
name: job-pi-test
spec:
template:
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: pi
image: quay.io/prometheus/busybox:latest
command: ["/bin/sh", "-c", "echo 'scale=5; 4*a(1)' | bc -l"]
restartPolicy: Never
backoffLimit: 4

View File

@ -0,0 +1,9 @@
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
name: kata
handler: kata
overhead:
podFixed:
memory: "160Mi"
cpu: "250m"

View File

@ -0,0 +1,23 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: handlers
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: handlers-container
image: quay.io/sjenning/${nginx_version}
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
preStop:
exec:
command: ["/usr/sbin/nginx","-s","quit"]

View File

@ -0,0 +1,16 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: LimitRange
metadata:
name: cpu-limit-range
spec:
limits:
- default:
cpu: 1
defaultRequest:
cpu: 0.5
type: Container

View File

@ -0,0 +1,26 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: nginx
image: quay.io/sjenning/${nginx_version}
ports:
- containerPort: 80

View File

@ -0,0 +1,16 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: besteffort-test
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: qos-besteffort
image: quay.io/prometheus/busybox:latest
command: ["/bin/sh", "-c", "tail -f /dev/null"]

View File

@ -0,0 +1,21 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: burstable-test
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: qos-burstable
image: quay.io/prometheus/busybox:latest
command: ["/bin/sh", "-c", "tail -f /dev/null"]
resources:
limits:
memory: "200Mi"
requests:
memory: "100Mi"

View File

@ -0,0 +1,18 @@
#
# Copyright (c) 2021 Apple Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: pod-caps
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: test-container
image: quay.io/prometheus/busybox:latest
command: ["sh"]
args: ["-c", "cat /proc/self/status | grep Cap && sleep infinity"]
restartPolicy: Never

View File

@ -0,0 +1,28 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: config-env-test-pod
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: test-container
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]
env:
- name: KUBE_CONFIG_1
valueFrom:
configMapKeyRef:
name: test-configmap
key: data-1
- name: KUBE_CONFIG_2
valueFrom:
configMapKeyRef:
name: test-configmap
key: data-2
restartPolicy: Never

View File

@ -0,0 +1,16 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: default-cpu-test
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: default-cpu-demo-ctr
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]

View File

@ -0,0 +1,23 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: constraints-cpu-test
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: first-cpu-container
image: quay.io/prometheus/busybox:latest
command:
- sleep
- "30"
resources:
limits:
cpu: "1"
requests:
cpu: "500m"

View File

@ -0,0 +1,23 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
namespace: default
name: custom-dns-test
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: test
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]
dnsPolicy: "None"
dnsConfig:
nameservers:
- 1.2.3.4
searches:
- dns.test.search

View File

@ -0,0 +1,44 @@
#
# Copyright (c) 2021 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: sharevol-kata
spec:
runtimeClassName: kata
restartPolicy: Never
securityContext:
runAsUser: 1001
fsGroup: 123
containers:
- name: mounttest-container
image: ${agnhost_image}
args:
- mounttest
- --fs_type=/test-volume
- --new_file_0660=/test-volume/test-file
- --file_perm=/test-volume/test-file
- --file_owner=/test-volume/test-file
volumeMounts:
- name: emptydir-volume
mountPath: /test-volume
- name: mounttest-container-2
image: ${agnhost_image}
args:
- mounttest
- --fs_type=/test-volume-2
- --new_file_0660=/test-volume-2/test-file
- --file_perm=/test-volume-2/test-file
- --file_owner=/test-volume-2/test-file
volumeMounts:
- name: mem-emptydir-volume
mountPath: /test-volume-2
volumes:
- name: emptydir-volume
emptyDir: {}
- name: mem-emptydir-volume
emptyDir:
medium: Memory

View File

@ -0,0 +1,28 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: sharevol-kata
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: test
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]
volumeMounts:
- name: host-empty-vol
mountPath: "/host/cache"
- name: memory-empty-vol
mountPath: "/tmp/cache"
volumes:
- name: host-empty-vol
emptyDir: {}
- name: memory-empty-vol
emptyDir:
medium: Memory
sizeLimit: "50M"

View File

@ -0,0 +1,46 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: test-env
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: test-container
image: quay.io/prometheus/busybox:latest
command: [ "sh", "-c"]
args:
- while true; do
echo -en '\n';
printenv MY_POD_NAME;
printenv HOST_IP;
printenv MEMORY_REQUESTS;
printenv MEMORY_LIMITS;
sleep 1;
done;
resources:
requests:
memory: "32Mi"
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: MEMORY_REQUESTS
valueFrom:
resourceFieldRef:
resource: requests.memory
- name: MEMORY_LIMITS
valueFrom:
resourceFieldRef:
resource: limits.memory
restartPolicy: Never

View File

@ -0,0 +1,26 @@
#
# Copyright (c) 2022 Ant Group
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: test-file-volume
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
restartPolicy: Never
volumes:
- name: shared-file
hostPath:
path: HOST_FILE
type: File
containers:
- name: busybox-file-volume-container
image: busybox
volumeMounts:
- name: shared-file
mountPath: MOUNT_PATH
command: ["/bin/sh"]
args: ["-c", "tail -f /dev/null"]

View File

@ -0,0 +1,59 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: footubuntu
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
volumes:
- name: runv
emptyDir:
medium: "Memory"
- name: runlockv
emptyDir:
medium: "Memory"
- name: tmpv
emptyDir:
medium: "Memory"
- name: fakecgroup
hostPath:
path: /sys/fs/cgroup
- name: ssh-dir
emptyDir:
medium: "Memory"
- name: ssh-config-map
configMap:
name: ssh-config-map
defaultMode: 384
containers:
- name: vmcontainer
image: quay.io/footloose/ubuntu18.04:latest
command: ["/sbin/init"]
volumeMounts:
- name: runv
mountPath: /run
- name: runlockv
mountPath: /run/lock
- name: tmpv
mountPath: /tmp
- name: fakecgroup
readOnly: true
mountPath: /sys/fs/cgroup
- name: ssh-dir
mountPath: /root/.ssh
- name: ssh-config-map
mountPath: /root/.ssh/authorized_keys
subPath: authorized_keys
# These containers are run during pod initialization
initContainers:
- name: install
image: quay.io/prometheus/busybox:latest
command: ["sh", "-c", "chmod 700 /root/.ssh"]
volumeMounts:
- name: ssh-dir
mountPath: /root/.ssh

View File

@ -0,0 +1,23 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: qos-test
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: qos-guaranteed
image: quay.io/prometheus/busybox:latest
command: ["/bin/sh", "-c", "tail -f /dev/null"]
resources:
limits:
memory: "200Mi"
cpu: "700m"
requests:
memory: "200Mi"
cpu: "700m"

View File

@ -0,0 +1,25 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
labels:
test: liveness-test
name: liveness-http
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: liveness
image: ${agnhost_image}
args:
- liveness
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 3
periodSeconds: 3

View File

@ -0,0 +1,30 @@
#
# Copyright (c) 2022 Ant Group
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: hugepage-pod
spec:
runtimeClassName: kata
containers:
- name: hugepage-container
image: quay.io/prometheus/busybox:latest
command: ["/bin/sh"]
args: ["-c", "tail -f /dev/null"]
volumeMounts:
- mountPath: /hugepages
name: hugepage
resources:
limits:
hugepages-${hugepages_size}: 512Mi
memory: 512Mi
requests:
hugepages-${hugepages_size}: 512Mi
memory: 512Mi
volumes:
- name: hugepage
emptyDir:
medium: HugePages

View File

@ -0,0 +1,28 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
labels:
test: liveness
name: liveness-exec
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: liveness
image: quay.io/prometheus/busybox:latest
args:
- /bin/sh
- -c
- touch /tmp/healthy; echo "Check status"; sleep 6; rm -rf /tmp/healthy; echo "Check dead"; sleep 12
livenessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 3
periodSeconds: 3

View File

@ -0,0 +1,23 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: memory-test
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: memory-test-ctr
image: quay.io/kata-containers/sysbench-kata:latest
imagePullPolicy: IfNotPresent
resources:
limits:
memory: "${memory_size}"
requests:
memory: "500Mi"
command: ["stress"]
args: ["--vm", "1", "--vm-bytes", "${memory_allocated}", "--vm-hang", "1"]

View File

@ -0,0 +1,44 @@
#
# Copyright (c) 2021 IBM Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: ConfigMap
metadata:
name: config
data:
config_key: myconfig
---
apiVersion: v1
data:
secret_key: bXlzZWNyZXQ= #mysecret
kind: Secret
metadata:
name: secret
type: Opaque
---
apiVersion: v1
kind: Pod
metadata:
name: nested-configmap-secret-pod
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: test-container
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]
volumeMounts:
- mountPath: /config
name: config
- mountPath: /config/secret
name: secret
volumes:
- name: secret
secret:
secretName: secret
- name: config
configMap:
name: config
restartPolicy: Never

View File

@ -0,0 +1,27 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: cpu-test
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: c1
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]
resources:
limits:
cpu: "500m"
- name: c2
image: quay.io/prometheus/busybox:latest
command:
- sleep
- "10"
resources:
limits:
cpu: "500m"

View File

@ -0,0 +1,25 @@
#
# Copyright (c) 2020 Ant Group
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: pod-oom
namespace: default
spec:
runtimeClassName: kata
restartPolicy: Never
containers:
- image: quay.io/kata-containers/sysbench-kata:latest
imagePullPolicy: IfNotPresent
name: oom-test
command: ["/bin/sh"]
args: ["-c", "sleep 2; stress --vm 2 --vm-bytes 400M --timeout 30s"]
resources:
limits:
memory: 500Mi
requests:
memory: 500Mi

View File

@ -0,0 +1,30 @@
#
# Copyright (c) 2021 IBM Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: optional-empty-config-test-pod
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: test-container
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]
volumeMounts:
- mountPath: /empty-config
name: empty-config
- mountPath: /optional-missing-config
name: optional-missing-config
volumes:
- name: empty-config
configMap:
name: empty-config
- name: optional-missing-config
configMap:
name: optional-missing-config
optional: true
restartPolicy: Never

View File

@ -0,0 +1,30 @@
#
# Copyright (c) 2021 IBM Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: optional-empty-secret-test-pod
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: test-container
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]
volumeMounts:
- mountPath: /empty-secret
name: empty-secret
- mountPath: /optional-missing-secret
name: optional-missing-secret
volumes:
- name: empty-secret
secret:
secretName: empty-secret
- name: optional-missing-secret
secret:
secretName: optional-missing-secret
optional: true
restartPolicy: Never

View File

@ -0,0 +1,28 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: test-projected-volume
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: test-projected-volume
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]
volumeMounts:
- name: all-in-one
mountPath: "/projected-volume"
readOnly: true
volumes:
- name: all-in-one
projected:
sources:
- secret:
name: user
- secret:
name: pass

View File

@ -0,0 +1,26 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploymenttest
namespace: test-quota-ns
spec:
selector:
matchLabels:
purpose: quota-demo
replicas: 2
template:
metadata:
labels:
purpose: quota-demo
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: pod-quota-demo
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]

View File

@ -0,0 +1,27 @@
#
# Copyright (c) 2021 Ant Group
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: test-readonly-volume
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
restartPolicy: Never
volumes:
- name: shared-data
hostPath:
path: /tmp
type: Directory
containers:
- name: busybox-ro-volume-container
image: busybox
volumeMounts:
- name: shared-data
mountPath: /tmp
readOnly: true
command: ["/bin/sh"]
args: ["-c", "tail -f /dev/null"]

View File

@ -0,0 +1,22 @@
#
# Copyright (c) 2021 Red Hat
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: seccomp-container
annotations:
io.katacontainers.config.runtime.disable_guest_seccomp: "false"
spec:
runtimeClassName: kata
terminationGracePeriodSeconds: 0
restartPolicy: Never
containers:
- name: busybox
image: quay.io/prometheus/busybox:latest
command: ["grep", "Seccomp:", "/proc/self/status"]
securityContext:
seccompProfile:
type: RuntimeDefault

View File

@ -0,0 +1,27 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: secret-envars-test-pod
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: envars-test-container
image: quay.io/prometheus/busybox:latest
command: ["/bin/sh", "-c", "tail -f /dev/null"]
env:
- name: SECRET_USERNAME
valueFrom:
secretKeyRef:
name: test-secret
key: username
- name: SECRET_PASSWORD
valueFrom:
secretKeyRef:
name: test-secret
key: password

View File

@ -0,0 +1,25 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: secret-test-pod
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: test-container
image: quay.io/prometheus/busybox:latest
command: ["/bin/sh", "-c", "tail -f /dev/null"]
volumeMounts:
# name must match the volume name below
- name: secret-volume
mountPath: /tmp/secret-volume
# The secret data is exposed to Containers in the Pod through a Volume.
volumes:
- name: secret-volume
secret:
secretName: test-secret

View File

@ -0,0 +1,18 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: security-context-test
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
securityContext:
runAsUser: 1000
containers:
- name: sec-text
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]

View File

@ -0,0 +1,31 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: test-shared-volume
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
restartPolicy: Never
volumes:
- name: shared-data
emptyDir: {}
containers:
- name: busybox-first-container
image: quay.io/prometheus/busybox:latest
volumeMounts:
- name: shared-data
mountPath: /tmp
command: ["/bin/sh"]
args: ["-c", "tail -f /dev/null"]
- name: busybox-second-container
image: quay.io/prometheus/busybox:latest
volumeMounts:
- name: shared-data
mountPath: /tmp
command: ["/bin/sh"]
args: ["-c", "echo Hello from the busybox-second-container > /tmp/pod-data && tail -f /dev/null"]

View File

@ -0,0 +1,28 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: sysctl-test
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
securityContext:
sysctls:
- name: kernel.shm_rmid_forced
value: "0"
containers:
- name: test
securityContext:
privileged: true
image: quay.io/prometheus/busybox:latest
command: ["tail", "-f", "/dev/null"]
initContainers:
- name: init-sys
securityContext:
privileged: true
image: quay.io/prometheus/busybox:latest
command: ['sh', '-c', 'echo "64000" > /proc/sys/vm/max_map_count']

View File

@ -0,0 +1,31 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: tcptest
labels:
app: tcp-liveness
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: tcp-liveness
image: ${agnhost_image}
args:
- liveness
ports:
- containerPort: 8080
readinessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 15
periodSeconds: 20

View File

@ -0,0 +1,36 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-master
labels:
app: redis
spec:
selector:
matchLabels:
app: redis
role: master
tier: backend
replicas: 1
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: master
image: quay.io/libpod/redis
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379

View File

@ -0,0 +1,21 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
role: master
tier: backend
spec:
ports:
- port: 6379
targetPort: 6379
selector:
app: redis
role: master
tier: backend

View File

@ -0,0 +1,26 @@
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: ReplicationController
metadata:
name: replicationtest
spec:
replicas: 1
selector:
app: nginx-rc-test
template:
metadata:
name: nginx
labels:
app: nginx-rc-test
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: nginxtest
image: quay.io/sjenning/${nginx_version}
ports:
- containerPort: 80

View File

@ -0,0 +1,20 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: Namespace
metadata:
name: test-quota-ns
- apiVersion: v1
kind: ResourceQuota
metadata:
name: pod-quota
namespace: test-quota-ns
spec:
hard:
pods: "2"

View File

@ -0,0 +1,13 @@
#
# Copyright (c) 2021 IBM Corp.
#
# SPDX-License-Identifier: Apache-2.0
# The image has only the 'latest' tag so it needs to ignore DL3007
#hadolint ignore=DL3007
FROM quay.io/libpod/ubuntu:latest
RUN apt-get -y update && \
apt-get -y upgrade && \
apt-get -y --no-install-recommends install stress && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*

View File

@ -0,0 +1,24 @@
#
# Copyright (c) 2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
kind: Pod
metadata:
name: vfio
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: c1
image: quay.io/prometheus/busybox:latest
command:
- sh
tty: true
stdin: true
resources:
limits:
intel.com/virtio_net: "1"
requests:
intel.com/virtio_net: "1"

View File

@ -0,0 +1,38 @@
#
# Copyright (c) 2021 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script is evoked within an OpenShift Build to product the binary image,
# which will contain the Kata Containers installation into a given destination
# directory.
#
# This contains variables and functions common to all e2e tests.
# Variables used by the kubernetes tests
export docker_images_nginx_version="1.15-alpine"
export container_images_agnhost_name="k8s.gcr.io/e2e-test-images/agnhost"
export container_images_agnhost_version="2.21"
# Timeout options, mainly for use with waitForProcess(). Use them unless the
# operation needs to wait longer.
wait_time=90
sleep_time=3
# Timeout for use with `kubectl wait`, unless it needs to wait longer.
# Note: try to keep timeout and wait_time equal.
timeout=90s
# issues that can't test yet.
fc_limitations="https://github.com/kata-containers/documentation/issues/351"
# Path to the kubeconfig file which is used by kubectl and other tools.
# Note: the init script sets that variable but if you want to run the tests in
# your own provisioned cluster and you know what you are doing then you should
# overwrite it.
export KUBECONFIG="${KUBECONFIG:-$HOME/.kube/config}"
get_pod_config_dir() {
pod_config_dir="${BATS_TEST_DIRNAME}/runtimeclass_workloads"
info "k8s configured to use runtimeclass"
}