mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-04-27 19:35:32 +00:00
ci: shellcheck SC2250
Treat the SC2250 require-variable-braces in CI. There are no functional changes. Related to: #10951 Signed-off-by: Lukáš Doktor <ldoktor@redhat.com>
This commit is contained in:
parent
d37d9feee9
commit
667e26036c
@ -7,16 +7,16 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
cidir=$(dirname "$0")
|
cidir=$(dirname "$0")
|
||||||
runtimedir=$cidir/../src/runtime
|
runtimedir=${cidir}/../src/runtime
|
||||||
|
|
||||||
build_working_packages() {
|
build_working_packages() {
|
||||||
# working packages:
|
# working packages:
|
||||||
device_api=$runtimedir/pkg/device/api
|
device_api=${runtimedir}/pkg/device/api
|
||||||
device_config=$runtimedir/pkg/device/config
|
device_config=${runtimedir}/pkg/device/config
|
||||||
device_drivers=$runtimedir/pkg/device/drivers
|
device_drivers=${runtimedir}/pkg/device/drivers
|
||||||
device_manager=$runtimedir/pkg/device/manager
|
device_manager=${runtimedir}/pkg/device/manager
|
||||||
rc_pkg_dir=$runtimedir/pkg/resourcecontrol/
|
rc_pkg_dir=${runtimedir}/pkg/resourcecontrol/
|
||||||
utils_pkg_dir=$runtimedir/virtcontainers/utils
|
utils_pkg_dir=${runtimedir}/virtcontainers/utils
|
||||||
|
|
||||||
# broken packages :( :
|
# broken packages :( :
|
||||||
#katautils=$runtimedir/pkg/katautils
|
#katautils=$runtimedir/pkg/katautils
|
||||||
@ -24,15 +24,15 @@ build_working_packages() {
|
|||||||
#vc=$runtimedir/virtcontainers
|
#vc=$runtimedir/virtcontainers
|
||||||
|
|
||||||
pkgs=(
|
pkgs=(
|
||||||
"$device_api"
|
"${device_api}"
|
||||||
"$device_config"
|
"${device_config}"
|
||||||
"$device_drivers"
|
"${device_drivers}"
|
||||||
"$device_manager"
|
"${device_manager}"
|
||||||
"$utils_pkg_dir"
|
"${utils_pkg_dir}"
|
||||||
"$rc_pkg_dir")
|
"${rc_pkg_dir}")
|
||||||
for pkg in "${pkgs[@]}"; do
|
for pkg in "${pkgs[@]}"; do
|
||||||
echo building "$pkg"
|
echo building "${pkg}"
|
||||||
pushd "$pkg" &>/dev/null
|
pushd "${pkg}" &>/dev/null
|
||||||
go build
|
go build
|
||||||
go test
|
go test
|
||||||
popd &>/dev/null
|
popd &>/dev/null
|
||||||
|
@ -25,7 +25,7 @@ die()
|
|||||||
usage()
|
usage()
|
||||||
{
|
{
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
Usage: $script_name [OPTIONS] [command] [arguments]
|
Usage: ${script_name} [OPTIONS] [command] [arguments]
|
||||||
|
|
||||||
Description: Utility to expand the abilities of the GitHub CLI tool, gh.
|
Description: Utility to expand the abilities of the GitHub CLI tool, gh.
|
||||||
|
|
||||||
@ -48,7 +48,7 @@ Examples:
|
|||||||
|
|
||||||
- List issues for a Pull Request 123 in kata-containers/kata-containers repo
|
- List issues for a Pull Request 123 in kata-containers/kata-containers repo
|
||||||
|
|
||||||
$ $script_name list-issues-for-pr 123
|
$ ${script_name} list-issues-for-pr 123
|
||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,11 +57,11 @@ list_issues_for_pr()
|
|||||||
local pr="${1:-}"
|
local pr="${1:-}"
|
||||||
local repo="${2:-kata-containers/kata-containers}"
|
local repo="${2:-kata-containers/kata-containers}"
|
||||||
|
|
||||||
[ -z "$pr" ] && die "need PR"
|
[ -z "${pr}" ] && die "need PR"
|
||||||
|
|
||||||
local commits=$(gh pr view ${pr} --repo ${repo} --json commits --jq .commits[].messageBody)
|
local commits=$(gh pr view ${pr} --repo ${repo} --json commits --jq .commits[].messageBody)
|
||||||
|
|
||||||
[ -z "$commits" ] && die "cannot determine commits for PR $pr"
|
[ -z "${commits}" ] && die "cannot determine commits for PR ${pr}"
|
||||||
|
|
||||||
# Extract the issue number(s) from the commits.
|
# Extract the issue number(s) from the commits.
|
||||||
#
|
#
|
||||||
@ -78,7 +78,7 @@ list_issues_for_pr()
|
|||||||
#
|
#
|
||||||
# "<git-commit> <git-commit-msg>"
|
# "<git-commit> <git-commit-msg>"
|
||||||
#
|
#
|
||||||
local issues=$(echo "$commits" |\
|
local issues=$(echo "${commits}" |\
|
||||||
grep -v -E "^( | )" |\
|
grep -v -E "^( | )" |\
|
||||||
grep -i -E "fixes:* *(#*[0-9][0-9]*)" |\
|
grep -i -E "fixes:* *(#*[0-9][0-9]*)" |\
|
||||||
tr ' ' '\n' |\
|
tr ' ' '\n' |\
|
||||||
@ -86,16 +86,16 @@ list_issues_for_pr()
|
|||||||
sed 's/[.,\#]//g' |\
|
sed 's/[.,\#]//g' |\
|
||||||
sort -nu || true)
|
sort -nu || true)
|
||||||
|
|
||||||
[ -z "$issues" ] && die "cannot determine issues for PR $pr"
|
[ -z "${issues}" ] && die "cannot determine issues for PR ${pr}"
|
||||||
|
|
||||||
echo "# Issues linked to PR"
|
echo "# Issues linked to PR"
|
||||||
echo "#"
|
echo "#"
|
||||||
echo "# Fields: issue_number"
|
echo "# Fields: issue_number"
|
||||||
|
|
||||||
local issue
|
local issue
|
||||||
echo "$issues"|while read issue
|
echo "${issues}"|while read issue
|
||||||
do
|
do
|
||||||
printf "%s\n" "$issue"
|
printf "%s\n" "${issue}"
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,20 +103,20 @@ list_labels_for_issue()
|
|||||||
{
|
{
|
||||||
local issue="${1:-}"
|
local issue="${1:-}"
|
||||||
|
|
||||||
[ -z "$issue" ] && die "need issue number"
|
[ -z "${issue}" ] && die "need issue number"
|
||||||
|
|
||||||
local labels=$(gh issue view ${issue} --repo kata-containers/kata-containers --json labels)
|
local labels=$(gh issue view ${issue} --repo kata-containers/kata-containers --json labels)
|
||||||
|
|
||||||
[ -z "$labels" ] && die "cannot determine labels for issue $issue"
|
[ -z "${labels}" ] && die "cannot determine labels for issue ${issue}"
|
||||||
|
|
||||||
printf "$labels"
|
printf "${labels}"
|
||||||
}
|
}
|
||||||
|
|
||||||
setup()
|
setup()
|
||||||
{
|
{
|
||||||
for cmd in gh jq
|
for cmd in gh jq
|
||||||
do
|
do
|
||||||
command -v "$cmd" &>/dev/null || die "need command: $cmd"
|
command -v "${cmd}" &>/dev/null || die "need command: ${cmd}"
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,24 +129,24 @@ handle_args()
|
|||||||
|
|
||||||
while getopts "ahr:" opt "$@"
|
while getopts "ahr:" opt "$@"
|
||||||
do
|
do
|
||||||
case "$opt" in
|
case "${opt}" in
|
||||||
a) show_all="true" ;;
|
a) show_all="true" ;;
|
||||||
h) usage && exit 0 ;;
|
h) usage && exit 0 ;;
|
||||||
r) repo="${OPTARG}" ;;
|
r) repo="${OPTARG}" ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
shift $(($OPTIND - 1))
|
shift $((${OPTIND} - 1))
|
||||||
|
|
||||||
local repo="${repo:-kata-containers/kata-containers}"
|
local repo="${repo:-kata-containers/kata-containers}"
|
||||||
local cmd="${1:-}"
|
local cmd="${1:-}"
|
||||||
|
|
||||||
case "$cmd" in
|
case "${cmd}" in
|
||||||
list-issues-for-pr) ;;
|
list-issues-for-pr) ;;
|
||||||
list-labels-for-issue) ;;
|
list-labels-for-issue) ;;
|
||||||
|
|
||||||
"") usage && exit 0 ;;
|
"") usage && exit 0 ;;
|
||||||
*) die "invalid command: '$cmd'" ;;
|
*) die "invalid command: '${cmd}'" ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# Consume the command name
|
# Consume the command name
|
||||||
@ -155,20 +155,20 @@ handle_args()
|
|||||||
local issue=""
|
local issue=""
|
||||||
local pr=""
|
local pr=""
|
||||||
|
|
||||||
case "$cmd" in
|
case "${cmd}" in
|
||||||
list-issues-for-pr)
|
list-issues-for-pr)
|
||||||
pr="${1:-}"
|
pr="${1:-}"
|
||||||
|
|
||||||
list_issues_for_pr "$pr" "${repo}"
|
list_issues_for_pr "${pr}" "${repo}"
|
||||||
;;
|
;;
|
||||||
|
|
||||||
list-labels-for-issue)
|
list-labels-for-issue)
|
||||||
issue="${1:-}"
|
issue="${1:-}"
|
||||||
|
|
||||||
list_labels_for_issue "$issue"
|
list_labels_for_issue "${issue}"
|
||||||
;;
|
;;
|
||||||
|
|
||||||
*) die "impossible situation: cmd: '$cmd'" ;;
|
*) die "impossible situation: cmd: '${cmd}'" ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
@ -74,7 +74,7 @@ build_and_install_gperf() {
|
|||||||
CC= ./configure --prefix="${gperf_install_dir}"
|
CC= ./configure --prefix="${gperf_install_dir}"
|
||||||
make
|
make
|
||||||
make install
|
make install
|
||||||
export PATH=$PATH:"${gperf_install_dir}"/bin
|
export PATH=${PATH}:"${gperf_install_dir}"/bin
|
||||||
popd
|
popd
|
||||||
echo "Gperf installed successfully"
|
echo "Gperf installed successfully"
|
||||||
}
|
}
|
||||||
@ -101,7 +101,7 @@ main() {
|
|||||||
die "Usage: ${0} <libseccomp-install-dir> <gperf-install-dir>"
|
die "Usage: ${0} <libseccomp-install-dir> <gperf-install-dir>"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
pushd "$workdir"
|
pushd "${workdir}"
|
||||||
# gperf is required for building the libseccomp.
|
# gperf is required for building the libseccomp.
|
||||||
build_and_install_gperf
|
build_and_install_gperf
|
||||||
build_and_install_libseccomp
|
build_and_install_libseccomp
|
||||||
|
@ -5,20 +5,20 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
[ -n "$DEBUG" ] && set -o xtrace
|
[ -n "${DEBUG}" ] && set -o xtrace
|
||||||
|
|
||||||
# If we fail for any reason a message will be displayed
|
# If we fail for any reason a message will be displayed
|
||||||
die() {
|
die() {
|
||||||
msg="$*"
|
msg="$*"
|
||||||
echo "ERROR: $msg" >&2
|
echo "ERROR: ${msg}" >&2
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
function verify_yq_exists() {
|
function verify_yq_exists() {
|
||||||
local yq_path=$1
|
local yq_path=$1
|
||||||
local yq_version=$2
|
local yq_version=$2
|
||||||
local expected="yq (https://github.com/mikefarah/yq/) version $yq_version"
|
local expected="yq (https://github.com/mikefarah/yq/) version ${yq_version}"
|
||||||
if [ -x "${yq_path}" ] && [ "$($yq_path --version)"X == "$expected"X ]; then
|
if [ -x "${yq_path}" ] && [ "$(${yq_path} --version)"X == "${expected}"X ]; then
|
||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
return 1
|
return 1
|
||||||
@ -41,7 +41,7 @@ function install_yq() {
|
|||||||
else
|
else
|
||||||
yq_path="/usr/local/bin/yq"
|
yq_path="/usr/local/bin/yq"
|
||||||
fi
|
fi
|
||||||
if verify_yq_exists "$yq_path" "$yq_version"; then
|
if verify_yq_exists "${yq_path}" "${yq_version}"; then
|
||||||
echo "yq is already installed in correct version"
|
echo "yq is already installed in correct version"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
@ -76,7 +76,7 @@ function install_yq() {
|
|||||||
# If we're on an apple silicon machine, just assign amd64.
|
# If we're on an apple silicon machine, just assign amd64.
|
||||||
# The version of yq we use doesn't have a darwin arm build,
|
# The version of yq we use doesn't have a darwin arm build,
|
||||||
# but Rosetta can come to the rescue here.
|
# but Rosetta can come to the rescue here.
|
||||||
if [ $goos == "Darwin" ]; then
|
if [ ${goos} == "Darwin" ]; then
|
||||||
goarch=amd64
|
goarch=amd64
|
||||||
else
|
else
|
||||||
goarch=arm64
|
goarch=arm64
|
||||||
|
@ -13,15 +13,15 @@ GOOD="$1"
|
|||||||
ARCH=amd64
|
ARCH=amd64
|
||||||
REPO="quay.io/kata-containers/kata-deploy-ci"
|
REPO="quay.io/kata-containers/kata-deploy-ci"
|
||||||
|
|
||||||
TAGS=$(skopeo list-tags "docker://$REPO")
|
TAGS=$(skopeo list-tags "docker://${REPO}")
|
||||||
# Only amd64
|
# Only amd64
|
||||||
TAGS=$(echo "$TAGS" | jq '.Tags' | jq "map(select(endswith(\"$ARCH\")))" | jq -r '.[]')
|
TAGS=$(echo "${TAGS}" | jq '.Tags' | jq "map(select(endswith(\"${ARCH}\")))" | jq -r '.[]')
|
||||||
# Sort by git
|
# Sort by git
|
||||||
SORTED=""
|
SORTED=""
|
||||||
[ -n "$BAD" ] && LOG_ARGS="$GOOD~1..$BAD" || LOG_ARGS="$GOOD~1.."
|
[ -n "${BAD}" ] && LOG_ARGS="${GOOD}~1..${BAD}" || LOG_ARGS="${GOOD}~1.."
|
||||||
for TAG in $(git log --merges --pretty=format:%H --reverse $LOG_ARGS); do
|
for TAG in $(git log --merges --pretty=format:%H --reverse ${LOG_ARGS}); do
|
||||||
[[ "$TAGS" =~ "$TAG" ]] && SORTED+="
|
[[ "${TAGS}" =~ "${TAG}" ]] && SORTED+="
|
||||||
kata-containers-$TAG-$ARCH"
|
kata-containers-${TAG}-${ARCH}"
|
||||||
done
|
done
|
||||||
# Comma separated tags with repo
|
# Comma separated tags with repo
|
||||||
echo "$SORTED" | tail -n +2 | sed -e "s@^@$REPO:@" | paste -s -d, -
|
echo "${SORTED}" | tail -n +2 | sed -e "s@^@${REPO}:@" | paste -s -d, -
|
||||||
|
@ -36,21 +36,21 @@ oc delete -f "${scripts_dir}/smoke/http-server.yaml"
|
|||||||
|
|
||||||
# Delete test.sh resources
|
# Delete test.sh resources
|
||||||
oc delete -f "${deployments_dir}/relabel_selinux.yaml"
|
oc delete -f "${deployments_dir}/relabel_selinux.yaml"
|
||||||
if [[ "$WORKAROUND_9206_CRIO" == "yes" ]]; then
|
if [[ "${WORKAROUND_9206_CRIO}" == "yes" ]]; then
|
||||||
oc delete -f "${deployments_dir}/workaround-9206-crio-ds.yaml"
|
oc delete -f "${deployments_dir}/workaround-9206-crio-ds.yaml"
|
||||||
oc delete -f "${deployments_dir}/workaround-9206-crio.yaml"
|
oc delete -f "${deployments_dir}/workaround-9206-crio.yaml"
|
||||||
fi
|
fi
|
||||||
[ ${SELINUX_PERMISSIVE} == "yes" ] && oc delete -f "${deployments_dir}/machineconfig_selinux.yaml.in"
|
[ ${SELINUX_PERMISSIVE} == "yes" ] && oc delete -f "${deployments_dir}/machineconfig_selinux.yaml.in"
|
||||||
|
|
||||||
# Delete kata-containers
|
# Delete kata-containers
|
||||||
pushd "$katacontainers_repo_dir/tools/packaging/kata-deploy"
|
pushd "${katacontainers_repo_dir}/tools/packaging/kata-deploy"
|
||||||
oc delete -f kata-deploy/base/kata-deploy.yaml
|
oc delete -f kata-deploy/base/kata-deploy.yaml
|
||||||
oc -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
|
oc -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
|
||||||
oc apply -f kata-cleanup/base/kata-cleanup.yaml
|
oc apply -f kata-cleanup/base/kata-cleanup.yaml
|
||||||
echo "Wait for all related pods to be gone"
|
echo "Wait for all related pods to be gone"
|
||||||
( repeats=1; for i in $(seq 1 600); do
|
( repeats=1; for i in $(seq 1 600); do
|
||||||
oc get pods -l name="kubelet-kata-cleanup" --no-headers=true -n kube-system 2>&1 | grep "No resources found" -q && ((repeats++)) || repeats=1
|
oc get pods -l name="kubelet-kata-cleanup" --no-headers=true -n kube-system 2>&1 | grep "No resources found" -q && ((repeats++)) || repeats=1
|
||||||
[ "$repeats" -gt 5 ] && echo kata-cleanup finished && break
|
[ "${repeats}" -gt 5 ] && echo kata-cleanup finished && break
|
||||||
sleep 1
|
sleep 1
|
||||||
done) || { echo "There are still some kata-cleanup related pods after 600 iterations"; oc get all -n kube-system; exit 1; }
|
done) || { echo "There are still some kata-cleanup related pods after 600 iterations"; oc get all -n kube-system; exit 1; }
|
||||||
oc delete -f kata-cleanup/base/kata-cleanup.yaml
|
oc delete -f kata-cleanup/base/kata-cleanup.yaml
|
||||||
|
@ -24,7 +24,7 @@ pushd "${webhook_dir}" >/dev/null
|
|||||||
info "Builds the kata-webhook"
|
info "Builds the kata-webhook"
|
||||||
./create-certs.sh
|
./create-certs.sh
|
||||||
info "Override our KATA_RUNTIME ConfigMap"
|
info "Override our KATA_RUNTIME ConfigMap"
|
||||||
sed -i deploy/webhook.yaml -e "s/runtime_class: .*$/runtime_class: $KATA_RUNTIME/g"
|
sed -i deploy/webhook.yaml -e "s/runtime_class: .*$/runtime_class: ${KATA_RUNTIME}/g"
|
||||||
info "Deploys the kata-webhook"
|
info "Deploys the kata-webhook"
|
||||||
oc apply -f deploy/
|
oc apply -f deploy/
|
||||||
|
|
||||||
|
@ -40,13 +40,13 @@ WORKAROUND_9206_CRIO=${WORKAROUND_9206_CRIO:-no}
|
|||||||
#
|
#
|
||||||
apply_kata_deploy() {
|
apply_kata_deploy() {
|
||||||
local deploy_file="tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
local deploy_file="tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||||
pushd "$katacontainers_repo_dir"
|
pushd "${katacontainers_repo_dir}"
|
||||||
sed -ri "s#(\s+image:) .*#\1 ${KATA_DEPLOY_IMAGE}#" "$deploy_file"
|
sed -ri "s#(\s+image:) .*#\1 ${KATA_DEPLOY_IMAGE}#" "${deploy_file}"
|
||||||
|
|
||||||
info "Applying kata-deploy"
|
info "Applying kata-deploy"
|
||||||
oc apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml
|
oc apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml
|
||||||
oc label --overwrite ns kube-system pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=baseline pod-security.kubernetes.io/audit=baseline
|
oc label --overwrite ns kube-system pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=baseline pod-security.kubernetes.io/audit=baseline
|
||||||
oc apply -f "$deploy_file"
|
oc apply -f "${deploy_file}"
|
||||||
oc -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
|
oc -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
|
||||||
|
|
||||||
info "Adding the kata runtime classes"
|
info "Adding the kata runtime classes"
|
||||||
@ -68,26 +68,26 @@ wait_for_reboot() {
|
|||||||
awk '{if ($3 == "worker") { print $1 } }'))
|
awk '{if ($3 == "worker") { print $1 } }'))
|
||||||
# Get the boot ID to compared it changed over time.
|
# Get the boot ID to compared it changed over time.
|
||||||
for node in "${workers[@]}"; do
|
for node in "${workers[@]}"; do
|
||||||
BOOTIDS[$node]=$(oc get -o jsonpath='{.status.nodeInfo.bootID}'\
|
BOOTIDS[${node}]=$(oc get -o jsonpath='{.status.nodeInfo.bootID}'\
|
||||||
node/$node)
|
node/${node})
|
||||||
echo "Wait $node reboot"
|
echo "Wait ${node} reboot"
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "Set timeout to $delta seconds"
|
echo "Set timeout to ${delta} seconds"
|
||||||
timer_start=$(date +%s)
|
timer_start=$(date +%s)
|
||||||
while [ ${#workers[@]} -gt 0 ]; do
|
while [ ${#workers[@]} -gt 0 ]; do
|
||||||
sleep $sleep_time
|
sleep ${sleep_time}
|
||||||
now=$(date +%s)
|
now=$(date +%s)
|
||||||
if [ $(($timer_start + $delta)) -lt $now ]; then
|
if [ $((${timer_start} + ${delta})) -lt ${now} ]; then
|
||||||
echo "Timeout: not all workers rebooted"
|
echo "Timeout: not all workers rebooted"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
echo "Checking after $(($now - $timer_start)) seconds"
|
echo "Checking after $((${now} - ${timer_start})) seconds"
|
||||||
for i in "${!workers[@]}"; do
|
for i in "${!workers[@]}"; do
|
||||||
current_id=$(oc get \
|
current_id=$(oc get \
|
||||||
-o jsonpath='{.status.nodeInfo.bootID}' \
|
-o jsonpath='{.status.nodeInfo.bootID}' \
|
||||||
node/${workers[i]})
|
node/${workers[i]})
|
||||||
if [ "$current_id" != ${BOOTIDS[${workers[i]}]} ]; then
|
if [ "${current_id}" != ${BOOTIDS[${workers[i]}]} ]; then
|
||||||
echo "${workers[i]} rebooted"
|
echo "${workers[i]} rebooted"
|
||||||
unset workers[i]
|
unset workers[i]
|
||||||
fi
|
fi
|
||||||
@ -104,30 +104,30 @@ wait_mcp_update() {
|
|||||||
local degraded_count=0
|
local degraded_count=0
|
||||||
local machine_count=$(oc get mcp worker -o jsonpath='{.status.machineCount}')
|
local machine_count=$(oc get mcp worker -o jsonpath='{.status.machineCount}')
|
||||||
|
|
||||||
if [[ -z "$machine_count" && "$machine_count" -lt 1 ]]; then
|
if [[ -z "${machine_count}" && "${machine_count}" -lt 1 ]]; then
|
||||||
warn "Unabled to obtain the machine count"
|
warn "Unabled to obtain the machine count"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Set timeout to $delta seconds"
|
echo "Set timeout to ${delta} seconds"
|
||||||
local deadline=$(($(date +%s) + $delta))
|
local deadline=$(($(date +%s) + ${delta}))
|
||||||
# The ready count might not have changed yet, so wait a little.
|
# The ready count might not have changed yet, so wait a little.
|
||||||
while [[ "$ready_count" != "$machine_count" && \
|
while [[ "${ready_count}" != "${machine_count}" && \
|
||||||
"$degraded_count" == 0 ]]; do
|
"${degraded_count}" == 0 ]]; do
|
||||||
# Let's check it hit the timeout (or not).
|
# Let's check it hit the timeout (or not).
|
||||||
local now=$(date +%s)
|
local now=$(date +%s)
|
||||||
if [ $deadline -lt $now ]; then
|
if [ ${deadline} -lt ${now} ]; then
|
||||||
echo "Timeout: not all workers updated" >&2
|
echo "Timeout: not all workers updated" >&2
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
sleep $sleep_time
|
sleep ${sleep_time}
|
||||||
ready_count=$(oc get mcp worker \
|
ready_count=$(oc get mcp worker \
|
||||||
-o jsonpath='{.status.readyMachineCount}')
|
-o jsonpath='{.status.readyMachineCount}')
|
||||||
degraded_count=$(oc get mcp worker \
|
degraded_count=$(oc get mcp worker \
|
||||||
-o jsonpath='{.status.degradedMachineCount}')
|
-o jsonpath='{.status.degradedMachineCount}')
|
||||||
echo "check machineconfigpool - ready_count: $ready_count degraded_count: $degraded_count"
|
echo "check machineconfigpool - ready_count: ${ready_count} degraded_count: ${degraded_count}"
|
||||||
done
|
done
|
||||||
[ $degraded_count -eq 0 ]
|
[ ${degraded_count} -eq 0 ]
|
||||||
}
|
}
|
||||||
|
|
||||||
# Enable the RHCOS extension for the Sandboxed Containers.
|
# Enable the RHCOS extension for the Sandboxed Containers.
|
||||||
@ -148,8 +148,8 @@ enable_sandboxedcontainers_extension() {
|
|||||||
debug_pod() {
|
debug_pod() {
|
||||||
local pod="$1"
|
local pod="$1"
|
||||||
info "Debug pod: ${pod}"
|
info "Debug pod: ${pod}"
|
||||||
oc describe pods "$pod"
|
oc describe pods "${pod}"
|
||||||
oc logs "$pod"
|
oc logs "${pod}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Wait for all pods of the app label to contain expected message
|
# Wait for all pods of the app label to contain expected message
|
||||||
@ -166,30 +166,30 @@ wait_for_app_pods_message() {
|
|||||||
local message="$3"
|
local message="$3"
|
||||||
local timeout="$4"
|
local timeout="$4"
|
||||||
local namespace="$5"
|
local namespace="$5"
|
||||||
[ -z "$pod_count" ] && pod_count=1
|
[ -z "${pod_count}" ] && pod_count=1
|
||||||
[ -z "$timeout" ] && timeout=60
|
[ -z "${timeout}" ] && timeout=60
|
||||||
[ -n "$namespace" ] && namespace=" -n $namespace "
|
[ -n "${namespace}" ] && namespace=" -n ${namespace} "
|
||||||
local pod
|
local pod
|
||||||
local pods
|
local pods
|
||||||
local i
|
local i
|
||||||
SECONDS=0
|
SECONDS=0
|
||||||
while :; do
|
while :; do
|
||||||
pods=($(oc get pods -l app="$app" --no-headers=true $namespace | awk '{print $1}'))
|
pods=($(oc get pods -l app="${app}" --no-headers=true ${namespace} | awk '{print $1}'))
|
||||||
[ "${#pods}" -ge "$pod_count" ] && break
|
[ "${#pods}" -ge "${pod_count}" ] && break
|
||||||
if [ "$SECONDS" -gt "$timeout" ]; then
|
if [ "${SECONDS}" -gt "${timeout}" ]; then
|
||||||
printf "Unable to find ${pod_count} pods for '-l app=\"$app\"' in ${SECONDS}s (%s)" "${pods[@]}"
|
printf "Unable to find ${pod_count} pods for '-l app=\"${app}\"' in ${SECONDS}s (%s)" "${pods[@]}"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
for pod in "${pods[@]}"; do
|
for pod in "${pods[@]}"; do
|
||||||
while :; do
|
while :; do
|
||||||
local log=$(oc logs $namespace "$pod")
|
local log=$(oc logs ${namespace} "${pod}")
|
||||||
echo "$log" | grep "$message" -q && echo "Found $(echo "$log" | grep "$message") in $pod's log ($SECONDS)" && break;
|
echo "${log}" | grep "${message}" -q && echo "Found $(echo "${log}" | grep "${message}") in ${pod}'s log (${SECONDS})" && break;
|
||||||
if [ "$SECONDS" -gt "$timeout" ]; then
|
if [ "${SECONDS}" -gt "${timeout}" ]; then
|
||||||
echo -n "Message '$message' not present in '${pod}' pod of the '-l app=\"$app\"' "
|
echo -n "Message '${message}' not present in '${pod}' pod of the '-l app=\"${app}\"' "
|
||||||
printf "pods after ${SECONDS}s :(%s)\n" "${pods[@]}"
|
printf "pods after ${SECONDS}s :(%s)\n" "${pods[@]}"
|
||||||
echo "Pod $pod's output so far:"
|
echo "Pod ${pod}'s output so far:"
|
||||||
echo "$log"
|
echo "${log}"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
sleep 1;
|
sleep 1;
|
||||||
@ -200,8 +200,8 @@ wait_for_app_pods_message() {
|
|||||||
oc config set-context --current --namespace=default
|
oc config set-context --current --namespace=default
|
||||||
|
|
||||||
worker_nodes=$(oc get nodes | awk '{if ($3 == "worker") { print $1 } }')
|
worker_nodes=$(oc get nodes | awk '{if ($3 == "worker") { print $1 } }')
|
||||||
num_nodes=$(echo $worker_nodes | wc -w)
|
num_nodes=$(echo ${worker_nodes} | wc -w)
|
||||||
[ $num_nodes -ne 0 ] || \
|
[ ${num_nodes} -ne 0 ] || \
|
||||||
die "No worker nodes detected. Something is wrong with the cluster"
|
die "No worker nodes detected. Something is wrong with the cluster"
|
||||||
|
|
||||||
if [ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]; then
|
if [ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]; then
|
||||||
@ -219,9 +219,9 @@ apply_kata_deploy
|
|||||||
# Set SELinux to permissive mode
|
# Set SELinux to permissive mode
|
||||||
if [ ${SELINUX_PERMISSIVE} == "yes" ]; then
|
if [ ${SELINUX_PERMISSIVE} == "yes" ]; then
|
||||||
info "Configuring SELinux"
|
info "Configuring SELinux"
|
||||||
if [ -z "$SELINUX_CONF_BASE64" ]; then
|
if [ -z "${SELINUX_CONF_BASE64}" ]; then
|
||||||
export SELINUX_CONF_BASE64=$(echo \
|
export SELINUX_CONF_BASE64=$(echo \
|
||||||
$(cat $configs_dir/selinux.conf|base64) | \
|
$(cat ${configs_dir}/selinux.conf|base64) | \
|
||||||
sed -e 's/\s//g')
|
sed -e 's/\s//g')
|
||||||
fi
|
fi
|
||||||
envsubst < ${deployments_dir}/machineconfig_selinux.yaml.in | \
|
envsubst < ${deployments_dir}/machineconfig_selinux.yaml.in | \
|
||||||
@ -232,14 +232,14 @@ if [ ${SELINUX_PERMISSIVE} == "yes" ]; then
|
|||||||
wait_for_reboot
|
wait_for_reboot
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$WORKAROUND_9206_CRIO" == "yes" ]]; then
|
if [[ "${WORKAROUND_9206_CRIO}" == "yes" ]]; then
|
||||||
info "Applying workaround to enable skip_mount_home in crio on OCP 4.13"
|
info "Applying workaround to enable skip_mount_home in crio on OCP 4.13"
|
||||||
oc apply -f "${deployments_dir}/workaround-9206-crio.yaml"
|
oc apply -f "${deployments_dir}/workaround-9206-crio.yaml"
|
||||||
oc apply -f "${deployments_dir}/workaround-9206-crio-ds.yaml"
|
oc apply -f "${deployments_dir}/workaround-9206-crio-ds.yaml"
|
||||||
wait_for_app_pods_message workaround-9206-crio-ds "$num_nodes" "Config file present" 1200 || echo "Failed to apply the workaround, proceeding anyway..."
|
wait_for_app_pods_message workaround-9206-crio-ds "${num_nodes}" "Config file present" 1200 || echo "Failed to apply the workaround, proceeding anyway..."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# FIXME: Remove when https://github.com/kata-containers/kata-containers/pull/8417 is resolved
|
# FIXME: Remove when https://github.com/kata-containers/kata-containers/pull/8417 is resolved
|
||||||
# Selinux context is currently not handled by kata-deploy
|
# Selinux context is currently not handled by kata-deploy
|
||||||
oc apply -f ${deployments_dir}/relabel_selinux.yaml
|
oc apply -f ${deployments_dir}/relabel_selinux.yaml
|
||||||
wait_for_app_pods_message restorecon "$num_nodes" "NSENTER_FINISHED_WITH:" 120 "kube-system" || echo "Failed to treat selinux, proceeding anyway..."
|
wait_for_app_pods_message restorecon "${num_nodes}" "NSENTER_FINISHED_WITH:" 120 "kube-system" || echo "Failed to treat selinux, proceeding anyway..."
|
||||||
|
@ -10,11 +10,11 @@ if command -v go > /dev/null; then
|
|||||||
export GOPATH=${GOPATH:-$(go env GOPATH)}
|
export GOPATH=${GOPATH:-$(go env GOPATH)}
|
||||||
else
|
else
|
||||||
# if go isn't installed, set default location for GOPATH
|
# if go isn't installed, set default location for GOPATH
|
||||||
export GOPATH="${GOPATH:-$HOME/go}"
|
export GOPATH="${GOPATH:-${HOME}/go}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
lib_dir=$(dirname "${BASH_SOURCE[0]}")
|
lib_dir=$(dirname "${BASH_SOURCE[0]}")
|
||||||
source "$lib_dir/../../tests/common.bash"
|
source "${lib_dir}/../../tests/common.bash"
|
||||||
|
|
||||||
export katacontainers_repo=${katacontainers_repo:="github.com/kata-containers/kata-containers"}
|
export katacontainers_repo=${katacontainers_repo:="github.com/kata-containers/kata-containers"}
|
||||||
export katacontainers_repo_dir="${GOPATH}/src/${katacontainers_repo}"
|
export katacontainers_repo_dir="${GOPATH}/src/${katacontainers_repo}"
|
||||||
|
@ -15,7 +15,7 @@ pod='http-server'
|
|||||||
# Create a pod.
|
# Create a pod.
|
||||||
#
|
#
|
||||||
info "Creating the ${pod} pod"
|
info "Creating the ${pod} pod"
|
||||||
[ -z "$KATA_RUNTIME" ] && die "Please set the KATA_RUNTIME first"
|
[ -z "${KATA_RUNTIME}" ] && die "Please set the KATA_RUNTIME first"
|
||||||
envsubst < "${script_dir}/smoke/${pod}.yaml.in" | \
|
envsubst < "${script_dir}/smoke/${pod}.yaml.in" | \
|
||||||
oc apply -f - || \
|
oc apply -f - || \
|
||||||
die "failed to create ${pod} pod"
|
die "failed to create ${pod} pod"
|
||||||
@ -27,8 +27,8 @@ sleep_time=5
|
|||||||
cmd="oc get pod/${pod} -o jsonpath='{.status.containerStatuses[0].state}' | \
|
cmd="oc get pod/${pod} -o jsonpath='{.status.containerStatuses[0].state}' | \
|
||||||
grep running > /dev/null"
|
grep running > /dev/null"
|
||||||
info "Wait until the pod gets running"
|
info "Wait until the pod gets running"
|
||||||
waitForProcess $wait_time $sleep_time "$cmd" || timed_out=$?
|
waitForProcess ${wait_time} ${sleep_time} "${cmd}" || timed_out=$?
|
||||||
if [ -n "$timed_out" ]; then
|
if [ -n "${timed_out}" ]; then
|
||||||
oc describe pod/${pod}
|
oc describe pod/${pod}
|
||||||
oc delete pod/${pod}
|
oc delete pod/${pod}
|
||||||
die "${pod} not running"
|
die "${pod} not running"
|
||||||
@ -39,7 +39,7 @@ info "${pod} is running"
|
|||||||
#
|
#
|
||||||
hello_file=/tmp/hello
|
hello_file=/tmp/hello
|
||||||
hello_msg='Hello World'
|
hello_msg='Hello World'
|
||||||
oc exec ${pod} -- sh -c "echo $hello_msg > $hello_file"
|
oc exec ${pod} -- sh -c "echo ${hello_msg} > ${hello_file}"
|
||||||
|
|
||||||
info "Creating the service and route"
|
info "Creating the service and route"
|
||||||
if oc apply -f ${script_dir}/smoke/service.yaml; then
|
if oc apply -f ${script_dir}/smoke/service.yaml; then
|
||||||
@ -60,7 +60,7 @@ fi
|
|||||||
|
|
||||||
info "Wait for the HTTP server to respond"
|
info "Wait for the HTTP server to respond"
|
||||||
tempfile=$(mktemp)
|
tempfile=$(mktemp)
|
||||||
check_cmd="curl -vvv '${host}:${port}${hello_file}' 2>&1 | tee -a '$tempfile' | grep -q '$hello_msg'"
|
check_cmd="curl -vvv '${host}:${port}${hello_file}' 2>&1 | tee -a '${tempfile}' | grep -q '${hello_msg}'"
|
||||||
if waitForProcess 60 1 "${check_cmd}"; then
|
if waitForProcess 60 1 "${check_cmd}"; then
|
||||||
test_status=0
|
test_status=0
|
||||||
info "HTTP server is working"
|
info "HTTP server is working"
|
||||||
@ -78,12 +78,12 @@ else
|
|||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
info "HTTP server is unreachable"
|
info "HTTP server is unreachable"
|
||||||
fi
|
fi
|
||||||
rm -f "$tempfile"
|
rm -f "${tempfile}"
|
||||||
|
|
||||||
# Delete the resources.
|
# Delete the resources.
|
||||||
#
|
#
|
||||||
info "Deleting the service/route"
|
info "Deleting the service/route"
|
||||||
if [ "$is_ocp" -eq 0 ]; then
|
if [ "${is_ocp}" -eq 0 ]; then
|
||||||
oc delete -f ${script_dir}/smoke/service_kubernetes.yaml
|
oc delete -f ${script_dir}/smoke/service_kubernetes.yaml
|
||||||
else
|
else
|
||||||
oc delete -f ${script_dir}/smoke/service.yaml
|
oc delete -f ${script_dir}/smoke/service.yaml
|
||||||
@ -91,4 +91,4 @@ fi
|
|||||||
info "Deleting the ${pod} pod"
|
info "Deleting the ${pod} pod"
|
||||||
oc delete pod/${pod} || test_status=$?
|
oc delete pod/${pod} || test_status=$?
|
||||||
|
|
||||||
exit $test_status
|
exit ${test_status}
|
||||||
|
@ -17,12 +17,12 @@ export KATA_RUNTIME="${KATA_RUNTIME:-kata-qemu}"
|
|||||||
## SETUP
|
## SETUP
|
||||||
# Deploy kata
|
# Deploy kata
|
||||||
SETUP=0
|
SETUP=0
|
||||||
pushd "$KATA_CI_DIR" || { echo "Failed to cd to '$KATA_CI_DIR'"; exit 255; }
|
pushd "${KATA_CI_DIR}" || { echo "Failed to cd to '${KATA_CI_DIR}'"; exit 255; }
|
||||||
./test.sh || SETUP=125
|
./test.sh || SETUP=125
|
||||||
cluster/deploy_webhook.sh || SETUP=125
|
cluster/deploy_webhook.sh || SETUP=125
|
||||||
if [ $SETUP != 0 ]; then
|
if [ ${SETUP} != 0 ]; then
|
||||||
./cleanup.sh
|
./cleanup.sh
|
||||||
exit "$SETUP"
|
exit "${SETUP}"
|
||||||
fi
|
fi
|
||||||
popd || true
|
popd || true
|
||||||
# Disable security
|
# Disable security
|
||||||
@ -32,19 +32,19 @@ oc label --overwrite ns default pod-security.kubernetes.io/enforce=privileged po
|
|||||||
|
|
||||||
## TEST EXECUTION
|
## TEST EXECUTION
|
||||||
# Run the testing
|
# Run the testing
|
||||||
pushd "$OCP_DIR" || { echo "Failed to cd to '$OCP_DIR'"; exit 255; }
|
pushd "${OCP_DIR}" || { echo "Failed to cd to '${OCP_DIR}'"; exit 255; }
|
||||||
echo "$E2E_TEST" > /tmp/tsts
|
echo "${E2E_TEST}" > /tmp/tsts
|
||||||
# Remove previously-existing temporarily files as well as previous results
|
# Remove previously-existing temporarily files as well as previous results
|
||||||
OUT=RESULTS/tmp
|
OUT=RESULTS/tmp
|
||||||
rm -Rf /tmp/*test* /tmp/e2e-*
|
rm -Rf /tmp/*test* /tmp/e2e-*
|
||||||
rm -R $OUT
|
rm -R ${OUT}
|
||||||
mkdir -p $OUT
|
mkdir -p ${OUT}
|
||||||
# Run the tests ignoring the monitor health checks
|
# Run the tests ignoring the monitor health checks
|
||||||
./openshift-tests run --provider azure -o "$OUT/job.log" --junit-dir "$OUT" --file /tmp/tsts --max-parallel-tests 5 --cluster-stability Disruptive
|
./openshift-tests run --provider azure -o "${OUT}/job.log" --junit-dir "${OUT}" --file /tmp/tsts --max-parallel-tests 5 --cluster-stability Disruptive
|
||||||
RET=$?
|
RET=$?
|
||||||
popd || true
|
popd || true
|
||||||
|
|
||||||
## CLEANUP
|
## CLEANUP
|
||||||
./cleanup.sh
|
./cleanup.sh
|
||||||
exit "$RET"
|
exit "${RET}"
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ if [ -z "$1" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Make oc and kubectl visible
|
# Make oc and kubectl visible
|
||||||
export PATH=/tmp/shared:$PATH
|
export PATH=/tmp/shared:${PATH}
|
||||||
|
|
||||||
oc version || die "Test cluster is unreachable"
|
oc version || die "Test cluster is unreachable"
|
||||||
|
|
||||||
@ -25,8 +25,8 @@ info "Install and configure kata into the test cluster"
|
|||||||
export SELINUX_PERMISSIVE="no"
|
export SELINUX_PERMISSIVE="no"
|
||||||
${script_dir}/cluster/install_kata.sh || die "Failed to install kata-containers"
|
${script_dir}/cluster/install_kata.sh || die "Failed to install kata-containers"
|
||||||
|
|
||||||
info "Run test suite: $suite"
|
info "Run test suite: ${suite}"
|
||||||
test_status='PASS'
|
test_status='PASS'
|
||||||
${script_dir}/run_${suite}_test.sh || test_status='FAIL'
|
${script_dir}/run_${suite}_test.sh || test_status='FAIL'
|
||||||
info "Test suite: $suite: $test_status"
|
info "Test suite: ${suite}: ${test_status}"
|
||||||
[ "$test_status" == "PASS" ]
|
[ "${test_status}" == "PASS" ]
|
||||||
|
Loading…
Reference in New Issue
Block a user