mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-09-13 12:59:36 +00:00
Merge pull request #9126 from GabyCT/topic/addartifactsk
gha: Storing artifacts for logs of k8s tests garm
This commit is contained in:
10
.github/workflows/run-k8s-tests-on-garm.yaml
vendored
10
.github/workflows/run-k8s-tests-on-garm.yaml
vendored
@@ -85,6 +85,16 @@ jobs:
|
|||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||||
|
|
||||||
|
- name: Collect artifacts ${{ matrix.vmm }}
|
||||||
|
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts
|
||||||
|
|
||||||
|
- name: Archive artifacts ${{ matrix.vmm }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: k8s-tests-garm-${{ matrix.vmm }}
|
||||||
|
path: /tmp/artifacts
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
- name: Delete kata-deploy
|
- name: Delete kata-deploy
|
||||||
if: always()
|
if: always()
|
||||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-garm
|
run: bash tests/integration/kubernetes/gha-run.sh cleanup-garm
|
||||||
|
@@ -70,14 +70,14 @@ EOF
|
|||||||
case "${KUBERNETES}" in
|
case "${KUBERNETES}" in
|
||||||
k3s)
|
k3s)
|
||||||
containerd_config_file="/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl"
|
containerd_config_file="/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl"
|
||||||
sudo cp /var/lib/rancher/k3s/agent/etc/containerd/config.toml ${containerd_config_file}
|
sudo cp /var/lib/rancher/k3s/agent/etc/containerd/config.toml "${containerd_config_file}"
|
||||||
;;
|
;;
|
||||||
*) >&2 echo "${KUBERNETES} flavour is not supported"; exit 2 ;;
|
*) >&2 echo "${KUBERNETES} flavour is not supported"; exit 2 ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# We're not using this with baremetal machines, so we're fine on cutting
|
# We're not using this with baremetal machines, so we're fine on cutting
|
||||||
# corners here and just append this to the configuration file.
|
# corners here and just append this to the configuration file.
|
||||||
cat<<EOF | sudo tee -a ${containerd_config_file}
|
cat<<EOF | sudo tee -a "${containerd_config_file}"
|
||||||
[plugins."io.containerd.snapshotter.v1.devmapper"]
|
[plugins."io.containerd.snapshotter.v1.devmapper"]
|
||||||
pool_name = "contd-thin-pool"
|
pool_name = "contd-thin-pool"
|
||||||
base_image_size = "4096MB"
|
base_image_size = "4096MB"
|
||||||
@@ -85,19 +85,19 @@ EOF
|
|||||||
|
|
||||||
case "${KUBERNETES}" in
|
case "${KUBERNETES}" in
|
||||||
k3s)
|
k3s)
|
||||||
sudo sed -i -e 's/snapshotter = "overlayfs"/snapshotter = "devmapper"/g' ${containerd_config_file}
|
sudo sed -i -e 's/snapshotter = "overlayfs"/snapshotter = "devmapper"/g' "${containerd_config_file}"
|
||||||
sudo systemctl restart k3s ;;
|
sudo systemctl restart k3s ;;
|
||||||
*) >&2 echo "${KUBERNETES} flavour is not supported"; exit 2 ;;
|
*) >&2 echo "${KUBERNETES} flavour is not supported"; exit 2 ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
sleep 60s
|
sleep 60s
|
||||||
sudo cat ${containerd_config_file}
|
sudo cat "${containerd_config_file}"
|
||||||
}
|
}
|
||||||
|
|
||||||
function configure_snapshotter() {
|
function configure_snapshotter() {
|
||||||
echo "::group::Configuring ${SNAPSHOTTER}"
|
echo "::group::Configuring ${SNAPSHOTTER}"
|
||||||
|
|
||||||
case ${SNAPSHOTTER} in
|
case "${SNAPSHOTTER}" in
|
||||||
devmapper) configure_devmapper ;;
|
devmapper) configure_devmapper ;;
|
||||||
*) >&2 echo "${SNAPSHOTTER} flavour is not supported"; exit 2 ;;
|
*) >&2 echo "${SNAPSHOTTER} flavour is not supported"; exit 2 ;;
|
||||||
esac
|
esac
|
||||||
@@ -195,6 +195,7 @@ function run_tests() {
|
|||||||
|
|
||||||
pushd "${kubernetes_dir}"
|
pushd "${kubernetes_dir}"
|
||||||
bash setup.sh
|
bash setup.sh
|
||||||
|
export start_time=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
if [[ "${KATA_HYPERVISOR}" = "dragonball" ]] && [[ "${SNAPSHOTTER}" = "devmapper" ]] || [[ "${KATA_HYPERVISOR}" = "cloud-hypervisor" ]] && [[ "${SNAPSHOTTER}" = "devmapper" ]]; then
|
if [[ "${KATA_HYPERVISOR}" = "dragonball" ]] && [[ "${SNAPSHOTTER}" = "devmapper" ]] || [[ "${KATA_HYPERVISOR}" = "cloud-hypervisor" ]] && [[ "${SNAPSHOTTER}" = "devmapper" ]]; then
|
||||||
# cloud-hypervisor runtime-rs issue is https://github.com/kata-containers/kata-containers/issues/9034
|
# cloud-hypervisor runtime-rs issue is https://github.com/kata-containers/kata-containers/issues/9034
|
||||||
echo "Skipping tests for $KATA_HYPERVISOR using devmapper"
|
echo "Skipping tests for $KATA_HYPERVISOR using devmapper"
|
||||||
@@ -204,6 +205,18 @@ function run_tests() {
|
|||||||
popd
|
popd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function collect_artifacts() {
|
||||||
|
local artifacts_dir="/tmp/artifacts"
|
||||||
|
if [ -d "${artifacts_dir}" ]; then
|
||||||
|
rm -rf "${artifacts_dir}"
|
||||||
|
fi
|
||||||
|
mkdir -p "${artifacts_dir}"
|
||||||
|
info "Collecting artifacts using ${KATA_HYPERVISOR} hypervisor"
|
||||||
|
local journalctl_log_filename="journalctl.log"
|
||||||
|
local journalctl_log_path="${artifacts_dir}/${journalctl_log_filename}"
|
||||||
|
sudo journalctl --since="$start_time" > "${journalctl_log_path}"
|
||||||
|
}
|
||||||
|
|
||||||
function cleanup_kata_deploy() {
|
function cleanup_kata_deploy() {
|
||||||
ensure_yq
|
ensure_yq
|
||||||
|
|
||||||
@@ -252,7 +265,7 @@ function cleanup() {
|
|||||||
get_nodes_and_pods_info
|
get_nodes_and_pods_info
|
||||||
|
|
||||||
if [ "${platform}" = "aks" ]; then
|
if [ "${platform}" = "aks" ]; then
|
||||||
delete_cluster ${test_type}
|
delete_cluster "${test_type}"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -267,7 +280,7 @@ function deploy_snapshotter() {
|
|||||||
case ${SNAPSHOTTER} in
|
case ${SNAPSHOTTER} in
|
||||||
nydus) deploy_nydus_snapshotter ;;
|
nydus) deploy_nydus_snapshotter ;;
|
||||||
*) >&2 echo "${SNAPSHOTTER} flavour is not supported"; exit 2 ;;
|
*) >&2 echo "${SNAPSHOTTER} flavour is not supported"; exit 2 ;;
|
||||||
esac
|
esac
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -299,9 +312,9 @@ function deploy_nydus_snapshotter() {
|
|||||||
# Enable guest pull feature in nydus snapshotter
|
# Enable guest pull feature in nydus snapshotter
|
||||||
yq write -i misc/snapshotter/base/nydus-snapshotter.yaml 'data.FS_DRIVER' "proxy" --style=double
|
yq write -i misc/snapshotter/base/nydus-snapshotter.yaml 'data.FS_DRIVER' "proxy" --style=double
|
||||||
else
|
else
|
||||||
>&2 echo "Invalid pull type"; exit 2
|
>&2 echo "Invalid pull type"; exit 2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Disable to read snapshotter config from configmap
|
# Disable to read snapshotter config from configmap
|
||||||
yq write -i misc/snapshotter/base/nydus-snapshotter.yaml 'data.ENABLE_CONFIG_FROM_VOLUME' "false" --style=double
|
yq write -i misc/snapshotter/base/nydus-snapshotter.yaml 'data.ENABLE_CONFIG_FROM_VOLUME' "false" --style=double
|
||||||
# Enable to run snapshotter as a systemd service
|
# Enable to run snapshotter as a systemd service
|
||||||
@@ -319,12 +332,12 @@ function deploy_nydus_snapshotter() {
|
|||||||
popd
|
popd
|
||||||
|
|
||||||
kubectl rollout status daemonset nydus-snapshotter -n nydus-system --timeout ${SNAPSHOTTER_DEPLOY_WAIT_TIMEOUT}
|
kubectl rollout status daemonset nydus-snapshotter -n nydus-system --timeout ${SNAPSHOTTER_DEPLOY_WAIT_TIMEOUT}
|
||||||
|
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
echo "::group::nydus snapshotter logs"
|
echo "::group::nydus snapshotter logs"
|
||||||
pods_name=$(kubectl get pods --selector=app=nydus-snapshotter -n nydus-system -o=jsonpath='{.items[*].metadata.name}')
|
pods_name=$(kubectl get pods --selector=app=nydus-snapshotter -n nydus-system -o=jsonpath='{.items[*].metadata.name}')
|
||||||
kubectl logs ${pods_name} -n nydus-system
|
kubectl logs "${pods_name}" -n nydus-system
|
||||||
kubectl describe pod ${pods_name} -n nydus-system
|
kubectl describe pod "${pods_name}" -n nydus-system
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -335,7 +348,7 @@ function cleanup_nydus_snapshotter() {
|
|||||||
>&2 echo "nydus snapshotter dir not found"
|
>&2 echo "nydus snapshotter dir not found"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
pushd "$nydus_snapshotter_install_dir"
|
pushd "$nydus_snapshotter_install_dir"
|
||||||
|
|
||||||
if [ "${KUBERNETES}" = "k3s" ]; then
|
if [ "${KUBERNETES}" = "k3s" ]; then
|
||||||
@@ -382,6 +395,7 @@ function main() {
|
|||||||
deploy-snapshotter) deploy_snapshotter ;;
|
deploy-snapshotter) deploy_snapshotter ;;
|
||||||
run-tests) run_tests ;;
|
run-tests) run_tests ;;
|
||||||
run-tests-kcli) run_tests "kcli" ;;
|
run-tests-kcli) run_tests "kcli" ;;
|
||||||
|
collect-artifacts) collect_artifacts ;;
|
||||||
cleanup-kcli) cleanup "kcli" ;;
|
cleanup-kcli) cleanup "kcli" ;;
|
||||||
cleanup-sev) cleanup "sev" ;;
|
cleanup-sev) cleanup "sev" ;;
|
||||||
cleanup-snp) cleanup "snp" ;;
|
cleanup-snp) cleanup "snp" ;;
|
||||||
|
Reference in New Issue
Block a user