diff --git a/ci/darwin-test.sh b/ci/darwin-test.sh index f9d32e743d..6ed583b3a1 100755 --- a/ci/darwin-test.sh +++ b/ci/darwin-test.sh @@ -7,16 +7,16 @@ set -e cidir=$(dirname "$0") -runtimedir=$cidir/../src/runtime +runtimedir=${cidir}/../src/runtime build_working_packages() { # working packages: - device_api=$runtimedir/pkg/device/api - device_config=$runtimedir/pkg/device/config - device_drivers=$runtimedir/pkg/device/drivers - device_manager=$runtimedir/pkg/device/manager - rc_pkg_dir=$runtimedir/pkg/resourcecontrol/ - utils_pkg_dir=$runtimedir/virtcontainers/utils + device_api=${runtimedir}/pkg/device/api + device_config=${runtimedir}/pkg/device/config + device_drivers=${runtimedir}/pkg/device/drivers + device_manager=${runtimedir}/pkg/device/manager + rc_pkg_dir=${runtimedir}/pkg/resourcecontrol/ + utils_pkg_dir=${runtimedir}/virtcontainers/utils # broken packages :( : #katautils=$runtimedir/pkg/katautils @@ -24,15 +24,15 @@ build_working_packages() { #vc=$runtimedir/virtcontainers pkgs=( - "$device_api" - "$device_config" - "$device_drivers" - "$device_manager" - "$utils_pkg_dir" - "$rc_pkg_dir") + "${device_api}" + "${device_config}" + "${device_drivers}" + "${device_manager}" + "${utils_pkg_dir}" + "${rc_pkg_dir}") for pkg in "${pkgs[@]}"; do - echo building "$pkg" - pushd "$pkg" &>/dev/null + echo building "${pkg}" + pushd "${pkg}" &>/dev/null go build go test popd &>/dev/null diff --git a/ci/gh-util.sh b/ci/gh-util.sh index 9f8f91eda7..250eb646c2 100755 --- a/ci/gh-util.sh +++ b/ci/gh-util.sh @@ -10,7 +10,7 @@ set -o errtrace set -o nounset set -o pipefail -[ -n "${DEBUG:-}" ] && set -o xtrace +[[ -n "${DEBUG:-}" ]] && set -o xtrace script_name=${0##*/} @@ -25,7 +25,7 @@ die() usage() { cat < " # - local issues=$(echo "$commits" |\ + local issues + issues=$(echo "${commits}" |\ grep -v -E "^( | )" |\ grep -i -E "fixes:* *(#*[0-9][0-9]*)" |\ tr ' ' '\n' |\ @@ -86,16 +88,16 @@ list_issues_for_pr() sed 's/[.,\#]//g' |\ sort -nu || true) - [ -z "$issues" ] && die "cannot determine issues for PR $pr" + [[ -z "${issues}" ]] && die "cannot determine issues for PR ${pr}" echo "# Issues linked to PR" echo "#" echo "# Fields: issue_number" local issue - echo "$issues"|while read issue + echo "${issues}" | while read -r issue do - printf "%s\n" "$issue" + printf "%s\n" "${issue}" done } @@ -103,20 +105,21 @@ list_labels_for_issue() { local issue="${1:-}" - [ -z "$issue" ] && die "need issue number" + [[ -z "${issue}" ]] && die "need issue number" - local labels=$(gh issue view ${issue} --repo kata-containers/kata-containers --json labels) + local labels + labels=$(gh issue view "${issue}" --repo kata-containers/kata-containers --json labels) - [ -z "$labels" ] && die "cannot determine labels for issue $issue" + [[ -z "${labels}" ]] && die "cannot determine labels for issue ${issue}" - printf "$labels" + echo "${labels}" } setup() { for cmd in gh jq do - command -v "$cmd" &>/dev/null || die "need command: $cmd" + command -v "${cmd}" &>/dev/null || die "need command: ${cmd}" done } @@ -124,29 +127,28 @@ handle_args() { setup - local show_all="false" local opt - while getopts "ahr:" opt "$@" + while getopts "hr:" opt "$@" do - case "$opt" in - a) show_all="true" ;; + case "${opt}" in h) usage && exit 0 ;; r) repo="${OPTARG}" ;; + *) echo "use '-h' to get list of supprted aruments" && exit 1 ;; esac done - shift $(($OPTIND - 1)) + shift $((OPTIND - 1)) local repo="${repo:-kata-containers/kata-containers}" local cmd="${1:-}" - case "$cmd" in + case "${cmd}" in list-issues-for-pr) ;; list-labels-for-issue) ;; "") usage && exit 0 ;; - *) die "invalid command: '$cmd'" ;; + *) die "invalid command: '${cmd}'" ;; esac # Consume the command name @@ -155,20 +157,20 @@ handle_args() local issue="" local pr="" - case "$cmd" in + case "${cmd}" in list-issues-for-pr) pr="${1:-}" - list_issues_for_pr "$pr" "${repo}" + list_issues_for_pr "${pr}" "${repo}" ;; list-labels-for-issue) issue="${1:-}" - list_labels_for_issue "$issue" + list_labels_for_issue "${issue}" ;; - *) die "impossible situation: cmd: '$cmd'" ;; + *) die "impossible situation: cmd: '${cmd}'" ;; esac exit 0 diff --git a/ci/install_libseccomp.sh b/ci/install_libseccomp.sh index 25b306344b..a18ce72686 100755 --- a/ci/install_libseccomp.sh +++ b/ci/install_libseccomp.sh @@ -8,7 +8,6 @@ set -o errexit script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -script_name="$(basename "${BASH_SOURCE[0]}")" source "${script_dir}/../tests/common.bash" @@ -22,11 +21,11 @@ workdir="$(mktemp -d --tmpdir build-libseccomp.XXXXX)" # Variables for libseccomp libseccomp_version="${LIBSECCOMP_VERSION:-""}" -if [ -z "${libseccomp_version}" ]; then +if [[ -z "${libseccomp_version}" ]]; then libseccomp_version=$(get_from_kata_deps ".externals.libseccomp.version") fi libseccomp_url="${LIBSECCOMP_URL:-""}" -if [ -z "${libseccomp_url}" ]; then +if [[ -z "${libseccomp_url}" ]]; then libseccomp_url=$(get_from_kata_deps ".externals.libseccomp.url") fi libseccomp_tarball="libseccomp-${libseccomp_version}.tar.gz" @@ -35,11 +34,11 @@ cflags="-O2" # Variables for gperf gperf_version="${GPERF_VERSION:-""}" -if [ -z "${gperf_version}" ]; then +if [[ -z "${gperf_version}" ]]; then gperf_version=$(get_from_kata_deps ".externals.gperf.version") fi gperf_url="${GPERF_URL:-""}" -if [ -z "${gperf_url}" ]; then +if [[ -z "${gperf_url}" ]]; then gperf_url=$(get_from_kata_deps ".externals.gperf.url") fi gperf_tarball="gperf-${gperf_version}.tar.gz" @@ -47,7 +46,7 @@ gperf_tarball_url="${gperf_url}/${gperf_tarball}" # We need to build the libseccomp library from sources to create a static library for the musl libc. # However, ppc64le and s390x have no musl targets in Rust. Hence, we do not set cflags for the musl libc. -if ([ "${arch}" != "ppc64le" ] && [ "${arch}" != "s390x" ]); then +if [[ "${arch}" != "ppc64le" ]] && [[ "${arch}" != "s390x" ]]; then # Set FORTIFY_SOURCE=1 because the musl-libc does not have some functions about FORTIFY_SOURCE=2 cflags="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 -O2" fi @@ -71,10 +70,10 @@ build_and_install_gperf() { tar -xf "${gperf_tarball}" pushd "gperf-${gperf_version}" # Unset $CC for configure, we will always use native for gperf - CC= ./configure --prefix="${gperf_install_dir}" + CC="" ./configure --prefix="${gperf_install_dir}" make make install - export PATH=$PATH:"${gperf_install_dir}"/bin + export PATH=${PATH}:"${gperf_install_dir}"/bin popd echo "Gperf installed successfully" } @@ -85,7 +84,7 @@ build_and_install_libseccomp() { curl -sLO "${libseccomp_tarball_url}" tar -xf "${libseccomp_tarball}" pushd "libseccomp-${libseccomp_version}" - [ "${arch}" == $(uname -m) ] && cc_name="" || cc_name="${arch}-linux-gnu-gcc" + [[ "${arch}" == $(uname -m) ]] && cc_name="" || cc_name="${arch}-linux-gnu-gcc" CC=${cc_name} ./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static --host="${arch}" make make install @@ -97,11 +96,11 @@ main() { local libseccomp_install_dir="${1:-}" local gperf_install_dir="${2:-}" - if [ -z "${libseccomp_install_dir}" ] || [ -z "${gperf_install_dir}" ]; then + if [[ -z "${libseccomp_install_dir}" ]] || [[ -z "${gperf_install_dir}" ]]; then die "Usage: ${0} " fi - pushd "$workdir" + pushd "${workdir}" # gperf is required for building the libseccomp. build_and_install_gperf build_and_install_libseccomp diff --git a/ci/install_yq.sh b/ci/install_yq.sh index c2d004ff1e..ffc91d3dfa 100755 --- a/ci/install_yq.sh +++ b/ci/install_yq.sh @@ -5,20 +5,20 @@ # SPDX-License-Identifier: Apache-2.0 # -[ -n "$DEBUG" ] && set -o xtrace +[[ -n "${DEBUG}" ]] && set -o xtrace # If we fail for any reason a message will be displayed die() { msg="$*" - echo "ERROR: $msg" >&2 + echo "ERROR: ${msg}" >&2 exit 1 } function verify_yq_exists() { local yq_path=$1 local yq_version=$2 - local expected="yq (https://github.com/mikefarah/yq/) version $yq_version" - if [ -x "${yq_path}" ] && [ "$($yq_path --version)"X == "$expected"X ]; then + local expected="yq (https://github.com/mikefarah/yq/) version ${yq_version}" + if [[ -x "${yq_path}" ]] && [[ "$(${yq_path} --version)"X == "${expected}"X ]]; then return 0 else return 1 @@ -34,20 +34,20 @@ function install_yq() { local yq_path="" INSTALL_IN_GOPATH=${INSTALL_IN_GOPATH:-true} - if [ "${INSTALL_IN_GOPATH}" == "true" ]; then + if [[ "${INSTALL_IN_GOPATH}" == "true" ]]; then GOPATH=${GOPATH:-${HOME}/go} mkdir -p "${GOPATH}/bin" yq_path="${GOPATH}/bin/yq" else yq_path="/usr/local/bin/yq" fi - if verify_yq_exists "$yq_path" "$yq_version"; then + if verify_yq_exists "${yq_path}" "${yq_version}"; then echo "yq is already installed in correct version" return fi - if [ "${yq_path}" == "/usr/local/bin/yq" ]; then + if [[ "${yq_path}" == "/usr/local/bin/yq" ]]; then # Check if we need sudo to install yq - if [ ! -w "/usr/local/bin" ]; then + if [[ ! -w "/usr/local/bin" ]]; then # Check if we have sudo privileges if ! sudo -n true 2>/dev/null; then die "Please provide sudo privileges to install yq" @@ -76,7 +76,7 @@ function install_yq() { # If we're on an apple silicon machine, just assign amd64. # The version of yq we use doesn't have a darwin arm build, # but Rosetta can come to the rescue here. - if [ $goos == "Darwin" ]; then + if [[ ${goos} == "Darwin" ]]; then goarch=amd64 else goarch=arm64 @@ -107,8 +107,7 @@ function install_yq() { ## NOTE: ${var,,} => gives lowercase value of var local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}" - ${precmd} curl -o "${yq_path}" -LSsf "${yq_url}" - [ $? -ne 0 ] && die "Download ${yq_url} failed" + ${precmd} curl -o "${yq_path}" -LSsf "${yq_url}" || die "Download ${yq_url} failed" ${precmd} chmod +x "${yq_path}" if ! command -v "${yq_path}" >/dev/null; then diff --git a/ci/openshift-ci/bisect-range.sh b/ci/openshift-ci/bisect-range.sh index cd6e1030e8..7cc49ab27f 100755 --- a/ci/openshift-ci/bisect-range.sh +++ b/ci/openshift-ci/bisect-range.sh @@ -3,25 +3,28 @@ # # SPDX-License-Identifier: Apache-2.0 # -if [ "$#" -gt 2 ] || [ "$#" -lt 1 ] ; then +if [[ "$#" -gt 2 ]] || [[ "$#" -lt 1 ]] ; then echo "Usage: $0 GOOD [BAD]" echo "Prints list of available kata-deploy-ci tags between GOOD and BAD commits (by default BAD is the latest available tag)" exit 255 fi GOOD="$1" -[ -n "$2" ] && BAD="$2" +[[ -n "$2" ]] && BAD="$2" ARCH=amd64 REPO="quay.io/kata-containers/kata-deploy-ci" -TAGS=$(skopeo list-tags "docker://$REPO") +TAGS=$(skopeo list-tags "docker://${REPO}") +# For testing +#echo "$TAGS" > tags +#TAGS=$(cat tags) # Only amd64 -TAGS=$(echo "$TAGS" | jq '.Tags' | jq "map(select(endswith(\"$ARCH\")))" | jq -r '.[]') +TAGS=$(echo "${TAGS}" | jq '.Tags' | jq "map(select(endswith(\"${ARCH}\")))" | jq -r '.[]') # Sort by git SORTED="" -[ -n "$BAD" ] && LOG_ARGS="$GOOD~1..$BAD" || LOG_ARGS="$GOOD~1.." -for TAG in $(git log --merges --pretty=format:%H --reverse $LOG_ARGS); do - [[ "$TAGS" =~ "$TAG" ]] && SORTED+=" -kata-containers-$TAG-$ARCH" +[[ -n "${BAD}" ]] && LOG_ARGS="${GOOD}~1..${BAD}" || LOG_ARGS="${GOOD}~1.." +for TAG in $(git log --merges --pretty=format:%H --reverse "${LOG_ARGS}"); do + [[ "${TAGS}" =~ ${TAG} ]] && SORTED+=" +kata-containers-${TAG}-${ARCH}" done # Comma separated tags with repo -echo "$SORTED" | tail -n +2 | sed -e "s@^@$REPO:@" | paste -s -d, - +echo "${SORTED}" | tail -n +2 | sed -e "s@^@${REPO}:@" | paste -s -d, - diff --git a/ci/openshift-ci/cleanup.sh b/ci/openshift-ci/cleanup.sh index b05a3f98f4..36e359c10b 100755 --- a/ci/openshift-ci/cleanup.sh +++ b/ci/openshift-ci/cleanup.sh @@ -7,11 +7,14 @@ # This script tries to removes most of the resources added by `test.sh` script # from the cluster. -scripts_dir=$(dirname $0) +scripts_dir=$(dirname "$0") deployments_dir=${scripts_dir}/cluster/deployments -configs_dir=${scripts_dir}/configs -source ${scripts_dir}/lib.sh +# shellcheck disable=SC1091 # import based on variable +source "${scripts_dir}/lib.sh" + +# Set your katacontainers repo dir location +[[ -z "${katacontainers_repo_dir}" ]] && echo "Please set katacontainers_repo_dir variable to your kata repo" # Set to 'yes' if you want to configure SELinux to permissive on the cluster # workers. @@ -36,21 +39,21 @@ oc delete -f "${scripts_dir}/smoke/http-server.yaml" # Delete test.sh resources oc delete -f "${deployments_dir}/relabel_selinux.yaml" -if [[ "$WORKAROUND_9206_CRIO" == "yes" ]]; then +if [[ "${WORKAROUND_9206_CRIO}" == "yes" ]]; then oc delete -f "${deployments_dir}/workaround-9206-crio-ds.yaml" oc delete -f "${deployments_dir}/workaround-9206-crio.yaml" fi -[ ${SELINUX_PERMISSIVE} == "yes" ] && oc delete -f "${deployments_dir}/machineconfig_selinux.yaml.in" +[[ ${SELINUX_PERMISSIVE} == "yes" ]] && oc delete -f "${deployments_dir}/machineconfig_selinux.yaml.in" # Delete kata-containers -pushd "$katacontainers_repo_dir/tools/packaging/kata-deploy" +pushd "${katacontainers_repo_dir}/tools/packaging/kata-deploy" || { echo "Failed to push to ${katacontainers_repo_dir}/tools/packaging/kata-deploy"; exit 125; } oc delete -f kata-deploy/base/kata-deploy.yaml oc -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod oc apply -f kata-cleanup/base/kata-cleanup.yaml echo "Wait for all related pods to be gone" -( repeats=1; for i in $(seq 1 600); do +( repeats=1; for _ in $(seq 1 600); do oc get pods -l name="kubelet-kata-cleanup" --no-headers=true -n kube-system 2>&1 | grep "No resources found" -q && ((repeats++)) || repeats=1 - [ "$repeats" -gt 5 ] && echo kata-cleanup finished && break + [[ "${repeats}" -gt 5 ]] && echo kata-cleanup finished && break sleep 1 done) || { echo "There are still some kata-cleanup related pods after 600 iterations"; oc get all -n kube-system; exit 1; } oc delete -f kata-cleanup/base/kata-cleanup.yaml diff --git a/ci/openshift-ci/cluster/deploy_webhook.sh b/ci/openshift-ci/cluster/deploy_webhook.sh index 0b732da695..b981192e70 100755 --- a/ci/openshift-ci/cluster/deploy_webhook.sh +++ b/ci/openshift-ci/cluster/deploy_webhook.sh @@ -13,8 +13,9 @@ set -e set -o nounset set -o pipefail -script_dir="$(realpath $(dirname $0))" +script_dir="$(realpath "$(dirname "$0")")" webhook_dir="${script_dir}/../../../tools/testing/kata-webhook" +# shellcheck disable=SC1091 # import based on variable source "${script_dir}/../lib.sh" KATA_RUNTIME=${KATA_RUNTIME:-kata-ci} @@ -24,7 +25,7 @@ pushd "${webhook_dir}" >/dev/null info "Builds the kata-webhook" ./create-certs.sh info "Override our KATA_RUNTIME ConfigMap" -sed -i deploy/webhook.yaml -e "s/runtime_class: .*$/runtime_class: $KATA_RUNTIME/g" +sed -i deploy/webhook.yaml -e "s/runtime_class: .*$/runtime_class: ${KATA_RUNTIME}/g" info "Deploys the kata-webhook" oc apply -f deploy/ diff --git a/ci/openshift-ci/cluster/install_kata.sh b/ci/openshift-ci/cluster/install_kata.sh index 8e5d8cd130..314fd0d1bb 100755 --- a/ci/openshift-ci/cluster/install_kata.sh +++ b/ci/openshift-ci/cluster/install_kata.sh @@ -7,11 +7,15 @@ # This script installs the built kata-containers in the test cluster, # and configure a runtime. -scripts_dir=$(dirname $0) +scripts_dir=$(dirname "$0") deployments_dir=${scripts_dir}/deployments configs_dir=${scripts_dir}/configs -source ${scripts_dir}/../lib.sh +# shellcheck disable=SC1091 # import based on variable +source "${scripts_dir}/../lib.sh" + +# Set your katacontainers repo dir location +[[ -z "${katacontainers_repo_dir}" ]] && echo "Please set katacontainers_repo_dir variable to your kata repo" # Set to 'yes' if you want to configure SELinux to permissive on the cluster # workers. @@ -40,18 +44,18 @@ WORKAROUND_9206_CRIO=${WORKAROUND_9206_CRIO:-no} # apply_kata_deploy() { local deploy_file="tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" - pushd "$katacontainers_repo_dir" - sed -ri "s#(\s+image:) .*#\1 ${KATA_DEPLOY_IMAGE}#" "$deploy_file" + pushd "${katacontainers_repo_dir}" || die + sed -ri "s#(\s+image:) .*#\1 ${KATA_DEPLOY_IMAGE}#" "${deploy_file}" info "Applying kata-deploy" oc apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml oc label --overwrite ns kube-system pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=baseline pod-security.kubernetes.io/audit=baseline - oc apply -f "$deploy_file" + oc apply -f "${deploy_file}" oc -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod info "Adding the kata runtime classes" oc apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml - popd + popd || die } @@ -64,32 +68,32 @@ wait_for_reboot() { local delta="${1:-900}" local sleep_time=60 declare -A BOOTIDS - local workers=($(oc get nodes | \ - awk '{if ($3 == "worker") { print $1 } }')) + local workers + mapfile -t workers < <(oc get nodes | awk '{if ($3 == "worker") { print $1 } }') # Get the boot ID to compared it changed over time. for node in "${workers[@]}"; do - BOOTIDS[$node]=$(oc get -o jsonpath='{.status.nodeInfo.bootID}'\ - node/$node) - echo "Wait $node reboot" + BOOTIDS[${node}]=$(oc get -o jsonpath='{.status.nodeInfo.bootID}'\ + "node/${node}") + echo "Wait ${node} reboot" done - echo "Set timeout to $delta seconds" + echo "Set timeout to ${delta} seconds" timer_start=$(date +%s) - while [ ${#workers[@]} -gt 0 ]; do - sleep $sleep_time + while [[ ${#workers[@]} -gt 0 ]]; do + sleep "${sleep_time}" now=$(date +%s) - if [ $(($timer_start + $delta)) -lt $now ]; then + if [[ $((timer_start + delta)) -lt ${now} ]]; then echo "Timeout: not all workers rebooted" return 1 fi - echo "Checking after $(($now - $timer_start)) seconds" + echo "Checking after $((now - timer_start)) seconds" for i in "${!workers[@]}"; do current_id=$(oc get \ -o jsonpath='{.status.nodeInfo.bootID}' \ - node/${workers[i]}) - if [ "$current_id" != ${BOOTIDS[${workers[i]}]} ]; then + "node/${workers[i]}") + if [[ "${current_id}" != "${BOOTIDS[${workers[i]}]}" ]]; then echo "${workers[i]} rebooted" - unset workers[i] + unset "workers[i]" fi done done @@ -102,32 +106,34 @@ wait_mcp_update() { # and none are degraded. local ready_count=0 local degraded_count=0 - local machine_count=$(oc get mcp worker -o jsonpath='{.status.machineCount}') + local machine_count + machine_count=$(oc get mcp worker -o jsonpath='{.status.machineCount}') - if [[ -z "$machine_count" && "$machine_count" -lt 1 ]]; then + if [[ -z "${machine_count}" && "${machine_count}" -lt 1 ]]; then warn "Unabled to obtain the machine count" return 1 fi - echo "Set timeout to $delta seconds" - local deadline=$(($(date +%s) + $delta)) + echo "Set timeout to ${delta} seconds" + local deadline=$(($(date +%s) + delta)) + local now # The ready count might not have changed yet, so wait a little. - while [[ "$ready_count" != "$machine_count" && \ - "$degraded_count" == 0 ]]; do + while [[ "${ready_count}" != "${machine_count}" && \ + "${degraded_count}" == 0 ]]; do # Let's check it hit the timeout (or not). - local now=$(date +%s) - if [ $deadline -lt $now ]; then + now=$(date +%s) + if [[ ${deadline} -lt ${now} ]]; then echo "Timeout: not all workers updated" >&2 return 1 fi - sleep $sleep_time + sleep "${sleep_time}" ready_count=$(oc get mcp worker \ -o jsonpath='{.status.readyMachineCount}') degraded_count=$(oc get mcp worker \ -o jsonpath='{.status.degradedMachineCount}') - echo "check machineconfigpool - ready_count: $ready_count degraded_count: $degraded_count" + echo "check machineconfigpool - ready_count: ${ready_count} degraded_count: ${degraded_count}" done - [ $degraded_count -eq 0 ] + [[ ${degraded_count} -eq 0 ]] } # Enable the RHCOS extension for the Sandboxed Containers. @@ -135,10 +141,10 @@ wait_mcp_update() { enable_sandboxedcontainers_extension() { info "Enabling the RHCOS extension for Sandboxed Containers" local deployment_file="${deployments_dir}/machineconfig_sandboxedcontainers_extension.yaml" - oc apply -f ${deployment_file} - oc get -f ${deployment_file} || \ + oc apply -f "${deployment_file}" + oc get -f "${deployment_file}" || \ die "Sandboxed Containers extension machineconfig not found" - wait_mcp_update || die "Failed to update the machineconfigpool" + wait_mcp_update 3600 || die "Failed to update the machineconfigpool" } # Print useful information for debugging. @@ -148,8 +154,8 @@ enable_sandboxedcontainers_extension() { debug_pod() { local pod="$1" info "Debug pod: ${pod}" - oc describe pods "$pod" - oc logs "$pod" + oc describe pods "${pod}" + oc logs "${pod}" } # Wait for all pods of the app label to contain expected message @@ -166,30 +172,31 @@ wait_for_app_pods_message() { local message="$3" local timeout="$4" local namespace="$5" - [ -z "$pod_count" ] && pod_count=1 - [ -z "$timeout" ] && timeout=60 - [ -n "$namespace" ] && namespace=" -n $namespace " + [[ -z "${pod_count}" ]] && pod_count=1 + [[ -z "${timeout}" ]] && timeout=60 + [[ -n "${namespace}" ]] && namespace=" -n ${namespace} " local pod local pods local i SECONDS=0 while :; do - pods=($(oc get pods -l app="$app" --no-headers=true $namespace | awk '{print $1}')) - [ "${#pods}" -ge "$pod_count" ] && break - if [ "$SECONDS" -gt "$timeout" ]; then - printf "Unable to find ${pod_count} pods for '-l app=\"$app\"' in ${SECONDS}s (%s)" "${pods[@]}" + mapfile -t pods < <(oc get pods -l app="${app}" --no-headers=true "${namespace}" | awk '{print $1}') + [[ "${#pods}" -ge "${pod_count}" ]] && break + if [[ "${SECONDS}" -gt "${timeout}" ]]; then + printf "Unable to find ${pod_count} pods for '-l app=\"${app}\"' in ${SECONDS}s (%s)" "${pods[@]}" return 1 fi done + local log for pod in "${pods[@]}"; do while :; do - local log=$(oc logs $namespace "$pod") - echo "$log" | grep "$message" -q && echo "Found $(echo "$log" | grep "$message") in $pod's log ($SECONDS)" && break; - if [ "$SECONDS" -gt "$timeout" ]; then - echo -n "Message '$message' not present in '${pod}' pod of the '-l app=\"$app\"' " + log=$(oc logs "${namespace}" "${pod}") + echo "${log}" | grep "${message}" -q && echo "Found $(echo "${log}" | grep "${message}") in ${pod}'s log (${SECONDS})" && break; + if [[ "${SECONDS}" -gt "${timeout}" ]]; then + echo -n "Message '${message}' not present in '${pod}' pod of the '-l app=\"${app}\"' " printf "pods after ${SECONDS}s :(%s)\n" "${pods[@]}" - echo "Pod $pod's output so far:" - echo "$log" + echo "Pod ${pod}'s output so far:" + echo "${log}" return 1 fi sleep 1; @@ -200,46 +207,45 @@ wait_for_app_pods_message() { oc config set-context --current --namespace=default worker_nodes=$(oc get nodes | awk '{if ($3 == "worker") { print $1 } }') -num_nodes=$(echo $worker_nodes | wc -w) -[ $num_nodes -ne 0 ] || \ +num_nodes=$(echo "${worker_nodes}" | wc -w) +[[ ${num_nodes} -ne 0 ]] || \ die "No worker nodes detected. Something is wrong with the cluster" -if [ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]; then +if [[ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]]; then # QEMU is deployed on the workers via RCHOS extension. enable_sandboxedcontainers_extension - oc apply -f ${deployments_dir}/configmap_installer_qemu.yaml + oc apply -f "${deployments_dir}/configmap_installer_qemu.yaml" fi -if [ "${KATA_WITH_HOST_KERNEL}" == "yes" ]; then - oc apply -f ${deployments_dir}/configmap_installer_kernel.yaml +if [[ "${KATA_WITH_HOST_KERNEL}" == "yes" ]]; then + oc apply -f "${deployments_dir}/configmap_installer_kernel.yaml" fi apply_kata_deploy # Set SELinux to permissive mode -if [ ${SELINUX_PERMISSIVE} == "yes" ]; then +if [[ ${SELINUX_PERMISSIVE} == "yes" ]]; then info "Configuring SELinux" - if [ -z "$SELINUX_CONF_BASE64" ]; then - export SELINUX_CONF_BASE64=$(echo \ - $(cat $configs_dir/selinux.conf|base64) | \ - sed -e 's/\s//g') + if [[ -z "${SELINUX_CONF_BASE64}" ]]; then + SELINUX_CONF_BASE64=$(base64 -w0 < "${configs_dir}/selinux.conf") + export SELINUX_CONF_BASE64 fi - envsubst < ${deployments_dir}/machineconfig_selinux.yaml.in | \ + envsubst < "${deployments_dir}"/machineconfig_selinux.yaml.in | \ oc apply -f - oc get machineconfig/51-kata-selinux || \ die "SELinux machineconfig not found" # The new SELinux configuration will trigger another reboot. - wait_for_reboot + wait_for_reboot 900 fi -if [[ "$WORKAROUND_9206_CRIO" == "yes" ]]; then +if [[ "${WORKAROUND_9206_CRIO}" == "yes" ]]; then info "Applying workaround to enable skip_mount_home in crio on OCP 4.13" oc apply -f "${deployments_dir}/workaround-9206-crio.yaml" oc apply -f "${deployments_dir}/workaround-9206-crio-ds.yaml" - wait_for_app_pods_message workaround-9206-crio-ds "$num_nodes" "Config file present" 1200 || echo "Failed to apply the workaround, proceeding anyway..." + wait_for_app_pods_message workaround-9206-crio-ds "${num_nodes}" "Config file present" 1200 || echo "Failed to apply the workaround, proceeding anyway..." fi # FIXME: Remove when https://github.com/kata-containers/kata-containers/pull/8417 is resolved # Selinux context is currently not handled by kata-deploy -oc apply -f ${deployments_dir}/relabel_selinux.yaml -wait_for_app_pods_message restorecon "$num_nodes" "NSENTER_FINISHED_WITH:" 120 "kube-system" || echo "Failed to treat selinux, proceeding anyway..." +oc apply -f "${deployments_dir}/relabel_selinux.yaml" +wait_for_app_pods_message restorecon "${num_nodes}" "NSENTER_FINISHED_WITH:" 120 "kube-system" || echo "Failed to treat selinux, proceeding anyway..." diff --git a/ci/openshift-ci/lib.sh b/ci/openshift-ci/lib.sh index 618844074d..dcbed1e700 100644 --- a/ci/openshift-ci/lib.sh +++ b/ci/openshift-ci/lib.sh @@ -10,11 +10,12 @@ if command -v go > /dev/null; then export GOPATH=${GOPATH:-$(go env GOPATH)} else # if go isn't installed, set default location for GOPATH - export GOPATH="${GOPATH:-$HOME/go}" + export GOPATH="${GOPATH:-${HOME}/go}" fi lib_dir=$(dirname "${BASH_SOURCE[0]}") -source "$lib_dir/../../tests/common.bash" +# shellcheck disable=SC1091 # import based on variable +source "${lib_dir}/../../tests/common.bash" export katacontainers_repo=${katacontainers_repo:="github.com/kata-containers/kata-containers"} export katacontainers_repo_dir="${GOPATH}/src/${katacontainers_repo}" diff --git a/ci/openshift-ci/run_smoke_test.sh b/ci/openshift-ci/run_smoke_test.sh index 548c605de5..69d54baf3c 100755 --- a/ci/openshift-ci/run_smoke_test.sh +++ b/ci/openshift-ci/run_smoke_test.sh @@ -7,15 +7,16 @@ # Run a smoke test. # -script_dir=$(dirname $0) -source ${script_dir}/lib.sh +script_dir=$(dirname "$0") +# shellcheck disable=SC1091 # import based on variable +source "${script_dir}/lib.sh" pod='http-server' # Create a pod. # info "Creating the ${pod} pod" -[ -z "$KATA_RUNTIME" ] && die "Please set the KATA_RUNTIME first" +[[ -z "${KATA_RUNTIME}" ]] && die "Please set the KATA_RUNTIME first" envsubst < "${script_dir}/smoke/${pod}.yaml.in" | \ oc apply -f - || \ die "failed to create ${pod} pod" @@ -27,10 +28,10 @@ sleep_time=5 cmd="oc get pod/${pod} -o jsonpath='{.status.containerStatuses[0].state}' | \ grep running > /dev/null" info "Wait until the pod gets running" -waitForProcess $wait_time $sleep_time "$cmd" || timed_out=$? -if [ -n "$timed_out" ]; then - oc describe pod/${pod} - oc delete pod/${pod} +waitForProcess "${wait_time}" "${sleep_time}" "${cmd}" || timed_out=$? +if [[ -n "${timed_out}" ]]; then + oc describe "pod/${pod}" + oc delete "pod/${pod}" die "${pod} not running" fi info "${pod} is running" @@ -39,13 +40,13 @@ info "${pod} is running" # hello_file=/tmp/hello hello_msg='Hello World' -oc exec ${pod} -- sh -c "echo $hello_msg > $hello_file" +oc exec "${pod}" -- sh -c "echo ${hello_msg} > ${hello_file}" info "Creating the service and route" -if oc apply -f ${script_dir}/smoke/service.yaml; then +if oc apply -f "${script_dir}/smoke/service.yaml"; then # Likely on OCP, use service is_ocp=1 - host=$(oc get route/http-server-route -o jsonpath={.spec.host}) + host=$(oc get route/http-server-route -o jsonpath="{.spec.host}") port=80 else # Likely on plain kubernetes, test using another container @@ -60,7 +61,7 @@ fi info "Wait for the HTTP server to respond" tempfile=$(mktemp) -check_cmd="curl -vvv '${host}:${port}${hello_file}' 2>&1 | tee -a '$tempfile' | grep -q '$hello_msg'" +check_cmd="curl -vvv '${host}:${port}${hello_file}' 2>&1 | tee -a '${tempfile}' | grep -q '${hello_msg}'" if waitForProcess 60 1 "${check_cmd}"; then test_status=0 info "HTTP server is working" @@ -78,17 +79,17 @@ else echo "::endgroup::" info "HTTP server is unreachable" fi -rm -f "$tempfile" +rm -f "${tempfile}" # Delete the resources. # info "Deleting the service/route" -if [ "$is_ocp" -eq 0 ]; then - oc delete -f ${script_dir}/smoke/service_kubernetes.yaml +if [[ "${is_ocp}" -eq 0 ]]; then + oc delete -f "${script_dir}/smoke/service_kubernetes.yaml" else - oc delete -f ${script_dir}/smoke/service.yaml + oc delete -f "${script_dir}/smoke/service.yaml" fi info "Deleting the ${pod} pod" -oc delete pod/${pod} || test_status=$? +oc delete "pod/${pod}" || test_status=$? -exit $test_status +exit "${test_status}" diff --git a/ci/openshift-ci/sample-test-reproducer.sh b/ci/openshift-ci/sample-test-reproducer.sh index 3d080d80a1..deee526545 100755 --- a/ci/openshift-ci/sample-test-reproducer.sh +++ b/ci/openshift-ci/sample-test-reproducer.sh @@ -7,7 +7,7 @@ # afterwards OCP cluster using kata-containers primarily created for use # with https://github.com/ldoktor/bisecter -[ "$#" -ne 1 ] && echo "Provide image as the first and only argument" && exit 255 +[[ "$#" -ne 1 ]] && echo "Provide image as the first and only argument" && exit 255 export KATA_DEPLOY_IMAGE="$1" OCP_DIR="${OCP_DIR:-/path/to/your/openshift/release/}" E2E_TEST="${E2E_TEST:-'"[sig-node] Container Runtime blackbox test on terminated container should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]"'}" @@ -17,12 +17,12 @@ export KATA_RUNTIME="${KATA_RUNTIME:-kata-qemu}" ## SETUP # Deploy kata SETUP=0 -pushd "$KATA_CI_DIR" || { echo "Failed to cd to '$KATA_CI_DIR'"; exit 255; } +pushd "${KATA_CI_DIR}" || { echo "Failed to cd to '${KATA_CI_DIR}'"; exit 255; } ./test.sh || SETUP=125 cluster/deploy_webhook.sh || SETUP=125 -if [ $SETUP != 0 ]; then +if [[ ${SETUP} != 0 ]]; then ./cleanup.sh - exit "$SETUP" + exit "${SETUP}" fi popd || true # Disable security @@ -32,19 +32,19 @@ oc label --overwrite ns default pod-security.kubernetes.io/enforce=privileged po ## TEST EXECUTION # Run the testing -pushd "$OCP_DIR" || { echo "Failed to cd to '$OCP_DIR'"; exit 255; } -echo "$E2E_TEST" > /tmp/tsts +pushd "${OCP_DIR}" || { echo "Failed to cd to '${OCP_DIR}'"; exit 255; } +echo "${E2E_TEST}" > /tmp/tsts # Remove previously-existing temporarily files as well as previous results OUT=RESULTS/tmp rm -Rf /tmp/*test* /tmp/e2e-* -rm -R $OUT -mkdir -p $OUT +rm -R "${OUT}" +mkdir -p "${OUT}" # Run the tests ignoring the monitor health checks -./openshift-tests run --provider azure -o "$OUT/job.log" --junit-dir "$OUT" --file /tmp/tsts --max-parallel-tests 5 --cluster-stability Disruptive +./openshift-tests run --provider azure -o "${OUT}/job.log" --junit-dir "${OUT}" --file /tmp/tsts --max-parallel-tests 5 --cluster-stability Disruptive RET=$? popd || true ## CLEANUP ./cleanup.sh -exit "$RET" +exit "${RET}" diff --git a/ci/openshift-ci/test.sh b/ci/openshift-ci/test.sh index 4347e6c27f..d012e5670f 100755 --- a/ci/openshift-ci/test.sh +++ b/ci/openshift-ci/test.sh @@ -8,25 +8,26 @@ # The kata shim to be used export KATA_RUNTIME=${KATA_RUNTIME:-kata-qemu} -script_dir=$(dirname $0) -source ${script_dir}/lib.sh +script_dir=$(dirname "$0") +# shellcheck disable=SC1091 # import based on variable +source "${script_dir}/lib.sh" suite=$1 -if [ -z "$1" ]; then +if [[ -z "$1" ]]; then suite='smoke' fi # Make oc and kubectl visible -export PATH=/tmp/shared:$PATH +export PATH=/tmp/shared:${PATH} oc version || die "Test cluster is unreachable" info "Install and configure kata into the test cluster" export SELINUX_PERMISSIVE="no" -${script_dir}/cluster/install_kata.sh || die "Failed to install kata-containers" +"${script_dir}/cluster/install_kata.sh" || die "Failed to install kata-containers" -info "Run test suite: $suite" +info "Run test suite: ${suite}" test_status='PASS' -${script_dir}/run_${suite}_test.sh || test_status='FAIL' -info "Test suite: $suite: $test_status" -[ "$test_status" == "PASS" ] +"${script_dir}/run_${suite}_test.sh" || test_status='FAIL' +info "Test suite: ${suite}: ${test_status}" +[[ "${test_status}" == "PASS" ]]