mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-04-27 11:31:05 +00:00
ci: shellcheck SC2250
Treat the SC2250 require-variable-braces in CI. There are no functional changes. Related to: #10951 Signed-off-by: Lukáš Doktor <ldoktor@redhat.com>
This commit is contained in:
parent
d37d9feee9
commit
667e26036c
@ -7,16 +7,16 @@
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
runtimedir=$cidir/../src/runtime
|
||||
runtimedir=${cidir}/../src/runtime
|
||||
|
||||
build_working_packages() {
|
||||
# working packages:
|
||||
device_api=$runtimedir/pkg/device/api
|
||||
device_config=$runtimedir/pkg/device/config
|
||||
device_drivers=$runtimedir/pkg/device/drivers
|
||||
device_manager=$runtimedir/pkg/device/manager
|
||||
rc_pkg_dir=$runtimedir/pkg/resourcecontrol/
|
||||
utils_pkg_dir=$runtimedir/virtcontainers/utils
|
||||
device_api=${runtimedir}/pkg/device/api
|
||||
device_config=${runtimedir}/pkg/device/config
|
||||
device_drivers=${runtimedir}/pkg/device/drivers
|
||||
device_manager=${runtimedir}/pkg/device/manager
|
||||
rc_pkg_dir=${runtimedir}/pkg/resourcecontrol/
|
||||
utils_pkg_dir=${runtimedir}/virtcontainers/utils
|
||||
|
||||
# broken packages :( :
|
||||
#katautils=$runtimedir/pkg/katautils
|
||||
@ -24,15 +24,15 @@ build_working_packages() {
|
||||
#vc=$runtimedir/virtcontainers
|
||||
|
||||
pkgs=(
|
||||
"$device_api"
|
||||
"$device_config"
|
||||
"$device_drivers"
|
||||
"$device_manager"
|
||||
"$utils_pkg_dir"
|
||||
"$rc_pkg_dir")
|
||||
"${device_api}"
|
||||
"${device_config}"
|
||||
"${device_drivers}"
|
||||
"${device_manager}"
|
||||
"${utils_pkg_dir}"
|
||||
"${rc_pkg_dir}")
|
||||
for pkg in "${pkgs[@]}"; do
|
||||
echo building "$pkg"
|
||||
pushd "$pkg" &>/dev/null
|
||||
echo building "${pkg}"
|
||||
pushd "${pkg}" &>/dev/null
|
||||
go build
|
||||
go test
|
||||
popd &>/dev/null
|
||||
|
@ -25,7 +25,7 @@ die()
|
||||
usage()
|
||||
{
|
||||
cat <<EOF
|
||||
Usage: $script_name [OPTIONS] [command] [arguments]
|
||||
Usage: ${script_name} [OPTIONS] [command] [arguments]
|
||||
|
||||
Description: Utility to expand the abilities of the GitHub CLI tool, gh.
|
||||
|
||||
@ -48,7 +48,7 @@ Examples:
|
||||
|
||||
- List issues for a Pull Request 123 in kata-containers/kata-containers repo
|
||||
|
||||
$ $script_name list-issues-for-pr 123
|
||||
$ ${script_name} list-issues-for-pr 123
|
||||
EOF
|
||||
}
|
||||
|
||||
@ -57,11 +57,11 @@ list_issues_for_pr()
|
||||
local pr="${1:-}"
|
||||
local repo="${2:-kata-containers/kata-containers}"
|
||||
|
||||
[ -z "$pr" ] && die "need PR"
|
||||
[ -z "${pr}" ] && die "need PR"
|
||||
|
||||
local commits=$(gh pr view ${pr} --repo ${repo} --json commits --jq .commits[].messageBody)
|
||||
|
||||
[ -z "$commits" ] && die "cannot determine commits for PR $pr"
|
||||
[ -z "${commits}" ] && die "cannot determine commits for PR ${pr}"
|
||||
|
||||
# Extract the issue number(s) from the commits.
|
||||
#
|
||||
@ -78,7 +78,7 @@ list_issues_for_pr()
|
||||
#
|
||||
# "<git-commit> <git-commit-msg>"
|
||||
#
|
||||
local issues=$(echo "$commits" |\
|
||||
local issues=$(echo "${commits}" |\
|
||||
grep -v -E "^( | )" |\
|
||||
grep -i -E "fixes:* *(#*[0-9][0-9]*)" |\
|
||||
tr ' ' '\n' |\
|
||||
@ -86,16 +86,16 @@ list_issues_for_pr()
|
||||
sed 's/[.,\#]//g' |\
|
||||
sort -nu || true)
|
||||
|
||||
[ -z "$issues" ] && die "cannot determine issues for PR $pr"
|
||||
[ -z "${issues}" ] && die "cannot determine issues for PR ${pr}"
|
||||
|
||||
echo "# Issues linked to PR"
|
||||
echo "#"
|
||||
echo "# Fields: issue_number"
|
||||
|
||||
local issue
|
||||
echo "$issues"|while read issue
|
||||
echo "${issues}"|while read issue
|
||||
do
|
||||
printf "%s\n" "$issue"
|
||||
printf "%s\n" "${issue}"
|
||||
done
|
||||
}
|
||||
|
||||
@ -103,20 +103,20 @@ list_labels_for_issue()
|
||||
{
|
||||
local issue="${1:-}"
|
||||
|
||||
[ -z "$issue" ] && die "need issue number"
|
||||
[ -z "${issue}" ] && die "need issue number"
|
||||
|
||||
local labels=$(gh issue view ${issue} --repo kata-containers/kata-containers --json labels)
|
||||
|
||||
[ -z "$labels" ] && die "cannot determine labels for issue $issue"
|
||||
[ -z "${labels}" ] && die "cannot determine labels for issue ${issue}"
|
||||
|
||||
printf "$labels"
|
||||
printf "${labels}"
|
||||
}
|
||||
|
||||
setup()
|
||||
{
|
||||
for cmd in gh jq
|
||||
do
|
||||
command -v "$cmd" &>/dev/null || die "need command: $cmd"
|
||||
command -v "${cmd}" &>/dev/null || die "need command: ${cmd}"
|
||||
done
|
||||
}
|
||||
|
||||
@ -129,24 +129,24 @@ handle_args()
|
||||
|
||||
while getopts "ahr:" opt "$@"
|
||||
do
|
||||
case "$opt" in
|
||||
case "${opt}" in
|
||||
a) show_all="true" ;;
|
||||
h) usage && exit 0 ;;
|
||||
r) repo="${OPTARG}" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $(($OPTIND - 1))
|
||||
shift $((${OPTIND} - 1))
|
||||
|
||||
local repo="${repo:-kata-containers/kata-containers}"
|
||||
local cmd="${1:-}"
|
||||
|
||||
case "$cmd" in
|
||||
case "${cmd}" in
|
||||
list-issues-for-pr) ;;
|
||||
list-labels-for-issue) ;;
|
||||
|
||||
"") usage && exit 0 ;;
|
||||
*) die "invalid command: '$cmd'" ;;
|
||||
*) die "invalid command: '${cmd}'" ;;
|
||||
esac
|
||||
|
||||
# Consume the command name
|
||||
@ -155,20 +155,20 @@ handle_args()
|
||||
local issue=""
|
||||
local pr=""
|
||||
|
||||
case "$cmd" in
|
||||
case "${cmd}" in
|
||||
list-issues-for-pr)
|
||||
pr="${1:-}"
|
||||
|
||||
list_issues_for_pr "$pr" "${repo}"
|
||||
list_issues_for_pr "${pr}" "${repo}"
|
||||
;;
|
||||
|
||||
list-labels-for-issue)
|
||||
issue="${1:-}"
|
||||
|
||||
list_labels_for_issue "$issue"
|
||||
list_labels_for_issue "${issue}"
|
||||
;;
|
||||
|
||||
*) die "impossible situation: cmd: '$cmd'" ;;
|
||||
*) die "impossible situation: cmd: '${cmd}'" ;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
|
@ -74,7 +74,7 @@ build_and_install_gperf() {
|
||||
CC= ./configure --prefix="${gperf_install_dir}"
|
||||
make
|
||||
make install
|
||||
export PATH=$PATH:"${gperf_install_dir}"/bin
|
||||
export PATH=${PATH}:"${gperf_install_dir}"/bin
|
||||
popd
|
||||
echo "Gperf installed successfully"
|
||||
}
|
||||
@ -101,7 +101,7 @@ main() {
|
||||
die "Usage: ${0} <libseccomp-install-dir> <gperf-install-dir>"
|
||||
fi
|
||||
|
||||
pushd "$workdir"
|
||||
pushd "${workdir}"
|
||||
# gperf is required for building the libseccomp.
|
||||
build_and_install_gperf
|
||||
build_and_install_libseccomp
|
||||
|
@ -5,20 +5,20 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
[ -n "$DEBUG" ] && set -o xtrace
|
||||
[ -n "${DEBUG}" ] && set -o xtrace
|
||||
|
||||
# If we fail for any reason a message will be displayed
|
||||
die() {
|
||||
msg="$*"
|
||||
echo "ERROR: $msg" >&2
|
||||
echo "ERROR: ${msg}" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
function verify_yq_exists() {
|
||||
local yq_path=$1
|
||||
local yq_version=$2
|
||||
local expected="yq (https://github.com/mikefarah/yq/) version $yq_version"
|
||||
if [ -x "${yq_path}" ] && [ "$($yq_path --version)"X == "$expected"X ]; then
|
||||
local expected="yq (https://github.com/mikefarah/yq/) version ${yq_version}"
|
||||
if [ -x "${yq_path}" ] && [ "$(${yq_path} --version)"X == "${expected}"X ]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
@ -41,7 +41,7 @@ function install_yq() {
|
||||
else
|
||||
yq_path="/usr/local/bin/yq"
|
||||
fi
|
||||
if verify_yq_exists "$yq_path" "$yq_version"; then
|
||||
if verify_yq_exists "${yq_path}" "${yq_version}"; then
|
||||
echo "yq is already installed in correct version"
|
||||
return
|
||||
fi
|
||||
@ -76,7 +76,7 @@ function install_yq() {
|
||||
# If we're on an apple silicon machine, just assign amd64.
|
||||
# The version of yq we use doesn't have a darwin arm build,
|
||||
# but Rosetta can come to the rescue here.
|
||||
if [ $goos == "Darwin" ]; then
|
||||
if [ ${goos} == "Darwin" ]; then
|
||||
goarch=amd64
|
||||
else
|
||||
goarch=arm64
|
||||
|
@ -13,15 +13,15 @@ GOOD="$1"
|
||||
ARCH=amd64
|
||||
REPO="quay.io/kata-containers/kata-deploy-ci"
|
||||
|
||||
TAGS=$(skopeo list-tags "docker://$REPO")
|
||||
TAGS=$(skopeo list-tags "docker://${REPO}")
|
||||
# Only amd64
|
||||
TAGS=$(echo "$TAGS" | jq '.Tags' | jq "map(select(endswith(\"$ARCH\")))" | jq -r '.[]')
|
||||
TAGS=$(echo "${TAGS}" | jq '.Tags' | jq "map(select(endswith(\"${ARCH}\")))" | jq -r '.[]')
|
||||
# Sort by git
|
||||
SORTED=""
|
||||
[ -n "$BAD" ] && LOG_ARGS="$GOOD~1..$BAD" || LOG_ARGS="$GOOD~1.."
|
||||
for TAG in $(git log --merges --pretty=format:%H --reverse $LOG_ARGS); do
|
||||
[[ "$TAGS" =~ "$TAG" ]] && SORTED+="
|
||||
kata-containers-$TAG-$ARCH"
|
||||
[ -n "${BAD}" ] && LOG_ARGS="${GOOD}~1..${BAD}" || LOG_ARGS="${GOOD}~1.."
|
||||
for TAG in $(git log --merges --pretty=format:%H --reverse ${LOG_ARGS}); do
|
||||
[[ "${TAGS}" =~ "${TAG}" ]] && SORTED+="
|
||||
kata-containers-${TAG}-${ARCH}"
|
||||
done
|
||||
# Comma separated tags with repo
|
||||
echo "$SORTED" | tail -n +2 | sed -e "s@^@$REPO:@" | paste -s -d, -
|
||||
echo "${SORTED}" | tail -n +2 | sed -e "s@^@${REPO}:@" | paste -s -d, -
|
||||
|
@ -36,21 +36,21 @@ oc delete -f "${scripts_dir}/smoke/http-server.yaml"
|
||||
|
||||
# Delete test.sh resources
|
||||
oc delete -f "${deployments_dir}/relabel_selinux.yaml"
|
||||
if [[ "$WORKAROUND_9206_CRIO" == "yes" ]]; then
|
||||
if [[ "${WORKAROUND_9206_CRIO}" == "yes" ]]; then
|
||||
oc delete -f "${deployments_dir}/workaround-9206-crio-ds.yaml"
|
||||
oc delete -f "${deployments_dir}/workaround-9206-crio.yaml"
|
||||
fi
|
||||
[ ${SELINUX_PERMISSIVE} == "yes" ] && oc delete -f "${deployments_dir}/machineconfig_selinux.yaml.in"
|
||||
|
||||
# Delete kata-containers
|
||||
pushd "$katacontainers_repo_dir/tools/packaging/kata-deploy"
|
||||
pushd "${katacontainers_repo_dir}/tools/packaging/kata-deploy"
|
||||
oc delete -f kata-deploy/base/kata-deploy.yaml
|
||||
oc -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
|
||||
oc apply -f kata-cleanup/base/kata-cleanup.yaml
|
||||
echo "Wait for all related pods to be gone"
|
||||
( repeats=1; for i in $(seq 1 600); do
|
||||
oc get pods -l name="kubelet-kata-cleanup" --no-headers=true -n kube-system 2>&1 | grep "No resources found" -q && ((repeats++)) || repeats=1
|
||||
[ "$repeats" -gt 5 ] && echo kata-cleanup finished && break
|
||||
[ "${repeats}" -gt 5 ] && echo kata-cleanup finished && break
|
||||
sleep 1
|
||||
done) || { echo "There are still some kata-cleanup related pods after 600 iterations"; oc get all -n kube-system; exit 1; }
|
||||
oc delete -f kata-cleanup/base/kata-cleanup.yaml
|
||||
|
@ -24,7 +24,7 @@ pushd "${webhook_dir}" >/dev/null
|
||||
info "Builds the kata-webhook"
|
||||
./create-certs.sh
|
||||
info "Override our KATA_RUNTIME ConfigMap"
|
||||
sed -i deploy/webhook.yaml -e "s/runtime_class: .*$/runtime_class: $KATA_RUNTIME/g"
|
||||
sed -i deploy/webhook.yaml -e "s/runtime_class: .*$/runtime_class: ${KATA_RUNTIME}/g"
|
||||
info "Deploys the kata-webhook"
|
||||
oc apply -f deploy/
|
||||
|
||||
|
@ -40,13 +40,13 @@ WORKAROUND_9206_CRIO=${WORKAROUND_9206_CRIO:-no}
|
||||
#
|
||||
apply_kata_deploy() {
|
||||
local deploy_file="tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||
pushd "$katacontainers_repo_dir"
|
||||
sed -ri "s#(\s+image:) .*#\1 ${KATA_DEPLOY_IMAGE}#" "$deploy_file"
|
||||
pushd "${katacontainers_repo_dir}"
|
||||
sed -ri "s#(\s+image:) .*#\1 ${KATA_DEPLOY_IMAGE}#" "${deploy_file}"
|
||||
|
||||
info "Applying kata-deploy"
|
||||
oc apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml
|
||||
oc label --overwrite ns kube-system pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=baseline pod-security.kubernetes.io/audit=baseline
|
||||
oc apply -f "$deploy_file"
|
||||
oc apply -f "${deploy_file}"
|
||||
oc -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
|
||||
|
||||
info "Adding the kata runtime classes"
|
||||
@ -68,26 +68,26 @@ wait_for_reboot() {
|
||||
awk '{if ($3 == "worker") { print $1 } }'))
|
||||
# Get the boot ID to compared it changed over time.
|
||||
for node in "${workers[@]}"; do
|
||||
BOOTIDS[$node]=$(oc get -o jsonpath='{.status.nodeInfo.bootID}'\
|
||||
node/$node)
|
||||
echo "Wait $node reboot"
|
||||
BOOTIDS[${node}]=$(oc get -o jsonpath='{.status.nodeInfo.bootID}'\
|
||||
node/${node})
|
||||
echo "Wait ${node} reboot"
|
||||
done
|
||||
|
||||
echo "Set timeout to $delta seconds"
|
||||
echo "Set timeout to ${delta} seconds"
|
||||
timer_start=$(date +%s)
|
||||
while [ ${#workers[@]} -gt 0 ]; do
|
||||
sleep $sleep_time
|
||||
sleep ${sleep_time}
|
||||
now=$(date +%s)
|
||||
if [ $(($timer_start + $delta)) -lt $now ]; then
|
||||
if [ $((${timer_start} + ${delta})) -lt ${now} ]; then
|
||||
echo "Timeout: not all workers rebooted"
|
||||
return 1
|
||||
fi
|
||||
echo "Checking after $(($now - $timer_start)) seconds"
|
||||
echo "Checking after $((${now} - ${timer_start})) seconds"
|
||||
for i in "${!workers[@]}"; do
|
||||
current_id=$(oc get \
|
||||
-o jsonpath='{.status.nodeInfo.bootID}' \
|
||||
node/${workers[i]})
|
||||
if [ "$current_id" != ${BOOTIDS[${workers[i]}]} ]; then
|
||||
if [ "${current_id}" != ${BOOTIDS[${workers[i]}]} ]; then
|
||||
echo "${workers[i]} rebooted"
|
||||
unset workers[i]
|
||||
fi
|
||||
@ -104,30 +104,30 @@ wait_mcp_update() {
|
||||
local degraded_count=0
|
||||
local machine_count=$(oc get mcp worker -o jsonpath='{.status.machineCount}')
|
||||
|
||||
if [[ -z "$machine_count" && "$machine_count" -lt 1 ]]; then
|
||||
if [[ -z "${machine_count}" && "${machine_count}" -lt 1 ]]; then
|
||||
warn "Unabled to obtain the machine count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Set timeout to $delta seconds"
|
||||
local deadline=$(($(date +%s) + $delta))
|
||||
echo "Set timeout to ${delta} seconds"
|
||||
local deadline=$(($(date +%s) + ${delta}))
|
||||
# The ready count might not have changed yet, so wait a little.
|
||||
while [[ "$ready_count" != "$machine_count" && \
|
||||
"$degraded_count" == 0 ]]; do
|
||||
while [[ "${ready_count}" != "${machine_count}" && \
|
||||
"${degraded_count}" == 0 ]]; do
|
||||
# Let's check it hit the timeout (or not).
|
||||
local now=$(date +%s)
|
||||
if [ $deadline -lt $now ]; then
|
||||
if [ ${deadline} -lt ${now} ]; then
|
||||
echo "Timeout: not all workers updated" >&2
|
||||
return 1
|
||||
fi
|
||||
sleep $sleep_time
|
||||
sleep ${sleep_time}
|
||||
ready_count=$(oc get mcp worker \
|
||||
-o jsonpath='{.status.readyMachineCount}')
|
||||
degraded_count=$(oc get mcp worker \
|
||||
-o jsonpath='{.status.degradedMachineCount}')
|
||||
echo "check machineconfigpool - ready_count: $ready_count degraded_count: $degraded_count"
|
||||
echo "check machineconfigpool - ready_count: ${ready_count} degraded_count: ${degraded_count}"
|
||||
done
|
||||
[ $degraded_count -eq 0 ]
|
||||
[ ${degraded_count} -eq 0 ]
|
||||
}
|
||||
|
||||
# Enable the RHCOS extension for the Sandboxed Containers.
|
||||
@ -148,8 +148,8 @@ enable_sandboxedcontainers_extension() {
|
||||
debug_pod() {
|
||||
local pod="$1"
|
||||
info "Debug pod: ${pod}"
|
||||
oc describe pods "$pod"
|
||||
oc logs "$pod"
|
||||
oc describe pods "${pod}"
|
||||
oc logs "${pod}"
|
||||
}
|
||||
|
||||
# Wait for all pods of the app label to contain expected message
|
||||
@ -166,30 +166,30 @@ wait_for_app_pods_message() {
|
||||
local message="$3"
|
||||
local timeout="$4"
|
||||
local namespace="$5"
|
||||
[ -z "$pod_count" ] && pod_count=1
|
||||
[ -z "$timeout" ] && timeout=60
|
||||
[ -n "$namespace" ] && namespace=" -n $namespace "
|
||||
[ -z "${pod_count}" ] && pod_count=1
|
||||
[ -z "${timeout}" ] && timeout=60
|
||||
[ -n "${namespace}" ] && namespace=" -n ${namespace} "
|
||||
local pod
|
||||
local pods
|
||||
local i
|
||||
SECONDS=0
|
||||
while :; do
|
||||
pods=($(oc get pods -l app="$app" --no-headers=true $namespace | awk '{print $1}'))
|
||||
[ "${#pods}" -ge "$pod_count" ] && break
|
||||
if [ "$SECONDS" -gt "$timeout" ]; then
|
||||
printf "Unable to find ${pod_count} pods for '-l app=\"$app\"' in ${SECONDS}s (%s)" "${pods[@]}"
|
||||
pods=($(oc get pods -l app="${app}" --no-headers=true ${namespace} | awk '{print $1}'))
|
||||
[ "${#pods}" -ge "${pod_count}" ] && break
|
||||
if [ "${SECONDS}" -gt "${timeout}" ]; then
|
||||
printf "Unable to find ${pod_count} pods for '-l app=\"${app}\"' in ${SECONDS}s (%s)" "${pods[@]}"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
for pod in "${pods[@]}"; do
|
||||
while :; do
|
||||
local log=$(oc logs $namespace "$pod")
|
||||
echo "$log" | grep "$message" -q && echo "Found $(echo "$log" | grep "$message") in $pod's log ($SECONDS)" && break;
|
||||
if [ "$SECONDS" -gt "$timeout" ]; then
|
||||
echo -n "Message '$message' not present in '${pod}' pod of the '-l app=\"$app\"' "
|
||||
local log=$(oc logs ${namespace} "${pod}")
|
||||
echo "${log}" | grep "${message}" -q && echo "Found $(echo "${log}" | grep "${message}") in ${pod}'s log (${SECONDS})" && break;
|
||||
if [ "${SECONDS}" -gt "${timeout}" ]; then
|
||||
echo -n "Message '${message}' not present in '${pod}' pod of the '-l app=\"${app}\"' "
|
||||
printf "pods after ${SECONDS}s :(%s)\n" "${pods[@]}"
|
||||
echo "Pod $pod's output so far:"
|
||||
echo "$log"
|
||||
echo "Pod ${pod}'s output so far:"
|
||||
echo "${log}"
|
||||
return 1
|
||||
fi
|
||||
sleep 1;
|
||||
@ -200,8 +200,8 @@ wait_for_app_pods_message() {
|
||||
oc config set-context --current --namespace=default
|
||||
|
||||
worker_nodes=$(oc get nodes | awk '{if ($3 == "worker") { print $1 } }')
|
||||
num_nodes=$(echo $worker_nodes | wc -w)
|
||||
[ $num_nodes -ne 0 ] || \
|
||||
num_nodes=$(echo ${worker_nodes} | wc -w)
|
||||
[ ${num_nodes} -ne 0 ] || \
|
||||
die "No worker nodes detected. Something is wrong with the cluster"
|
||||
|
||||
if [ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]; then
|
||||
@ -219,9 +219,9 @@ apply_kata_deploy
|
||||
# Set SELinux to permissive mode
|
||||
if [ ${SELINUX_PERMISSIVE} == "yes" ]; then
|
||||
info "Configuring SELinux"
|
||||
if [ -z "$SELINUX_CONF_BASE64" ]; then
|
||||
if [ -z "${SELINUX_CONF_BASE64}" ]; then
|
||||
export SELINUX_CONF_BASE64=$(echo \
|
||||
$(cat $configs_dir/selinux.conf|base64) | \
|
||||
$(cat ${configs_dir}/selinux.conf|base64) | \
|
||||
sed -e 's/\s//g')
|
||||
fi
|
||||
envsubst < ${deployments_dir}/machineconfig_selinux.yaml.in | \
|
||||
@ -232,14 +232,14 @@ if [ ${SELINUX_PERMISSIVE} == "yes" ]; then
|
||||
wait_for_reboot
|
||||
fi
|
||||
|
||||
if [[ "$WORKAROUND_9206_CRIO" == "yes" ]]; then
|
||||
if [[ "${WORKAROUND_9206_CRIO}" == "yes" ]]; then
|
||||
info "Applying workaround to enable skip_mount_home in crio on OCP 4.13"
|
||||
oc apply -f "${deployments_dir}/workaround-9206-crio.yaml"
|
||||
oc apply -f "${deployments_dir}/workaround-9206-crio-ds.yaml"
|
||||
wait_for_app_pods_message workaround-9206-crio-ds "$num_nodes" "Config file present" 1200 || echo "Failed to apply the workaround, proceeding anyway..."
|
||||
wait_for_app_pods_message workaround-9206-crio-ds "${num_nodes}" "Config file present" 1200 || echo "Failed to apply the workaround, proceeding anyway..."
|
||||
fi
|
||||
|
||||
# FIXME: Remove when https://github.com/kata-containers/kata-containers/pull/8417 is resolved
|
||||
# Selinux context is currently not handled by kata-deploy
|
||||
oc apply -f ${deployments_dir}/relabel_selinux.yaml
|
||||
wait_for_app_pods_message restorecon "$num_nodes" "NSENTER_FINISHED_WITH:" 120 "kube-system" || echo "Failed to treat selinux, proceeding anyway..."
|
||||
wait_for_app_pods_message restorecon "${num_nodes}" "NSENTER_FINISHED_WITH:" 120 "kube-system" || echo "Failed to treat selinux, proceeding anyway..."
|
||||
|
@ -10,11 +10,11 @@ if command -v go > /dev/null; then
|
||||
export GOPATH=${GOPATH:-$(go env GOPATH)}
|
||||
else
|
||||
# if go isn't installed, set default location for GOPATH
|
||||
export GOPATH="${GOPATH:-$HOME/go}"
|
||||
export GOPATH="${GOPATH:-${HOME}/go}"
|
||||
fi
|
||||
|
||||
lib_dir=$(dirname "${BASH_SOURCE[0]}")
|
||||
source "$lib_dir/../../tests/common.bash"
|
||||
source "${lib_dir}/../../tests/common.bash"
|
||||
|
||||
export katacontainers_repo=${katacontainers_repo:="github.com/kata-containers/kata-containers"}
|
||||
export katacontainers_repo_dir="${GOPATH}/src/${katacontainers_repo}"
|
||||
|
@ -15,7 +15,7 @@ pod='http-server'
|
||||
# Create a pod.
|
||||
#
|
||||
info "Creating the ${pod} pod"
|
||||
[ -z "$KATA_RUNTIME" ] && die "Please set the KATA_RUNTIME first"
|
||||
[ -z "${KATA_RUNTIME}" ] && die "Please set the KATA_RUNTIME first"
|
||||
envsubst < "${script_dir}/smoke/${pod}.yaml.in" | \
|
||||
oc apply -f - || \
|
||||
die "failed to create ${pod} pod"
|
||||
@ -27,8 +27,8 @@ sleep_time=5
|
||||
cmd="oc get pod/${pod} -o jsonpath='{.status.containerStatuses[0].state}' | \
|
||||
grep running > /dev/null"
|
||||
info "Wait until the pod gets running"
|
||||
waitForProcess $wait_time $sleep_time "$cmd" || timed_out=$?
|
||||
if [ -n "$timed_out" ]; then
|
||||
waitForProcess ${wait_time} ${sleep_time} "${cmd}" || timed_out=$?
|
||||
if [ -n "${timed_out}" ]; then
|
||||
oc describe pod/${pod}
|
||||
oc delete pod/${pod}
|
||||
die "${pod} not running"
|
||||
@ -39,7 +39,7 @@ info "${pod} is running"
|
||||
#
|
||||
hello_file=/tmp/hello
|
||||
hello_msg='Hello World'
|
||||
oc exec ${pod} -- sh -c "echo $hello_msg > $hello_file"
|
||||
oc exec ${pod} -- sh -c "echo ${hello_msg} > ${hello_file}"
|
||||
|
||||
info "Creating the service and route"
|
||||
if oc apply -f ${script_dir}/smoke/service.yaml; then
|
||||
@ -60,7 +60,7 @@ fi
|
||||
|
||||
info "Wait for the HTTP server to respond"
|
||||
tempfile=$(mktemp)
|
||||
check_cmd="curl -vvv '${host}:${port}${hello_file}' 2>&1 | tee -a '$tempfile' | grep -q '$hello_msg'"
|
||||
check_cmd="curl -vvv '${host}:${port}${hello_file}' 2>&1 | tee -a '${tempfile}' | grep -q '${hello_msg}'"
|
||||
if waitForProcess 60 1 "${check_cmd}"; then
|
||||
test_status=0
|
||||
info "HTTP server is working"
|
||||
@ -78,12 +78,12 @@ else
|
||||
echo "::endgroup::"
|
||||
info "HTTP server is unreachable"
|
||||
fi
|
||||
rm -f "$tempfile"
|
||||
rm -f "${tempfile}"
|
||||
|
||||
# Delete the resources.
|
||||
#
|
||||
info "Deleting the service/route"
|
||||
if [ "$is_ocp" -eq 0 ]; then
|
||||
if [ "${is_ocp}" -eq 0 ]; then
|
||||
oc delete -f ${script_dir}/smoke/service_kubernetes.yaml
|
||||
else
|
||||
oc delete -f ${script_dir}/smoke/service.yaml
|
||||
@ -91,4 +91,4 @@ fi
|
||||
info "Deleting the ${pod} pod"
|
||||
oc delete pod/${pod} || test_status=$?
|
||||
|
||||
exit $test_status
|
||||
exit ${test_status}
|
||||
|
@ -17,12 +17,12 @@ export KATA_RUNTIME="${KATA_RUNTIME:-kata-qemu}"
|
||||
## SETUP
|
||||
# Deploy kata
|
||||
SETUP=0
|
||||
pushd "$KATA_CI_DIR" || { echo "Failed to cd to '$KATA_CI_DIR'"; exit 255; }
|
||||
pushd "${KATA_CI_DIR}" || { echo "Failed to cd to '${KATA_CI_DIR}'"; exit 255; }
|
||||
./test.sh || SETUP=125
|
||||
cluster/deploy_webhook.sh || SETUP=125
|
||||
if [ $SETUP != 0 ]; then
|
||||
if [ ${SETUP} != 0 ]; then
|
||||
./cleanup.sh
|
||||
exit "$SETUP"
|
||||
exit "${SETUP}"
|
||||
fi
|
||||
popd || true
|
||||
# Disable security
|
||||
@ -32,19 +32,19 @@ oc label --overwrite ns default pod-security.kubernetes.io/enforce=privileged po
|
||||
|
||||
## TEST EXECUTION
|
||||
# Run the testing
|
||||
pushd "$OCP_DIR" || { echo "Failed to cd to '$OCP_DIR'"; exit 255; }
|
||||
echo "$E2E_TEST" > /tmp/tsts
|
||||
pushd "${OCP_DIR}" || { echo "Failed to cd to '${OCP_DIR}'"; exit 255; }
|
||||
echo "${E2E_TEST}" > /tmp/tsts
|
||||
# Remove previously-existing temporarily files as well as previous results
|
||||
OUT=RESULTS/tmp
|
||||
rm -Rf /tmp/*test* /tmp/e2e-*
|
||||
rm -R $OUT
|
||||
mkdir -p $OUT
|
||||
rm -R ${OUT}
|
||||
mkdir -p ${OUT}
|
||||
# Run the tests ignoring the monitor health checks
|
||||
./openshift-tests run --provider azure -o "$OUT/job.log" --junit-dir "$OUT" --file /tmp/tsts --max-parallel-tests 5 --cluster-stability Disruptive
|
||||
./openshift-tests run --provider azure -o "${OUT}/job.log" --junit-dir "${OUT}" --file /tmp/tsts --max-parallel-tests 5 --cluster-stability Disruptive
|
||||
RET=$?
|
||||
popd || true
|
||||
|
||||
## CLEANUP
|
||||
./cleanup.sh
|
||||
exit "$RET"
|
||||
exit "${RET}"
|
||||
|
||||
|
@ -17,7 +17,7 @@ if [ -z "$1" ]; then
|
||||
fi
|
||||
|
||||
# Make oc and kubectl visible
|
||||
export PATH=/tmp/shared:$PATH
|
||||
export PATH=/tmp/shared:${PATH}
|
||||
|
||||
oc version || die "Test cluster is unreachable"
|
||||
|
||||
@ -25,8 +25,8 @@ info "Install and configure kata into the test cluster"
|
||||
export SELINUX_PERMISSIVE="no"
|
||||
${script_dir}/cluster/install_kata.sh || die "Failed to install kata-containers"
|
||||
|
||||
info "Run test suite: $suite"
|
||||
info "Run test suite: ${suite}"
|
||||
test_status='PASS'
|
||||
${script_dir}/run_${suite}_test.sh || test_status='FAIL'
|
||||
info "Test suite: $suite: $test_status"
|
||||
[ "$test_status" == "PASS" ]
|
||||
info "Test suite: ${suite}: ${test_status}"
|
||||
[ "${test_status}" == "PASS" ]
|
||||
|
Loading…
Reference in New Issue
Block a user