ci: shellcheck SC2292

SC2292 (style): Prefer [[ ]] over [ ] for tests in Bash/Ksh. This might
result in different handling of globs and some ops which we don't use.

Related to: #10951

Signed-off-by: Lukáš Doktor <ldoktor@redhat.com>
This commit is contained in:
Lukáš Doktor 2025-03-19 10:06:09 +01:00
parent 667e26036c
commit 154a4ddc00
No known key found for this signature in database
GPG Key ID: 26B362E47FCF22C1
9 changed files with 48 additions and 48 deletions

View File

@ -10,7 +10,7 @@ set -o errtrace
set -o nounset
set -o pipefail
[ -n "${DEBUG:-}" ] && set -o xtrace
[[ -n "${DEBUG:-}" ]] && set -o xtrace
script_name=${0##*/}
@ -57,11 +57,11 @@ list_issues_for_pr()
local pr="${1:-}"
local repo="${2:-kata-containers/kata-containers}"
[ -z "${pr}" ] && die "need PR"
[[ -z "${pr}" ]] && die "need PR"
local commits=$(gh pr view ${pr} --repo ${repo} --json commits --jq .commits[].messageBody)
[ -z "${commits}" ] && die "cannot determine commits for PR ${pr}"
[[ -z "${commits}" ]] && die "cannot determine commits for PR ${pr}"
# Extract the issue number(s) from the commits.
#
@ -86,7 +86,7 @@ list_issues_for_pr()
sed 's/[.,\#]//g' |\
sort -nu || true)
[ -z "${issues}" ] && die "cannot determine issues for PR ${pr}"
[[ -z "${issues}" ]] && die "cannot determine issues for PR ${pr}"
echo "# Issues linked to PR"
echo "#"
@ -103,11 +103,11 @@ list_labels_for_issue()
{
local issue="${1:-}"
[ -z "${issue}" ] && die "need issue number"
[[ -z "${issue}" ]] && die "need issue number"
local labels=$(gh issue view ${issue} --repo kata-containers/kata-containers --json labels)
[ -z "${labels}" ] && die "cannot determine labels for issue ${issue}"
[[ -z "${labels}" ]] && die "cannot determine labels for issue ${issue}"
printf "${labels}"
}

View File

@ -22,11 +22,11 @@ workdir="$(mktemp -d --tmpdir build-libseccomp.XXXXX)"
# Variables for libseccomp
libseccomp_version="${LIBSECCOMP_VERSION:-""}"
if [ -z "${libseccomp_version}" ]; then
if [[ -z "${libseccomp_version}" ]]; then
libseccomp_version=$(get_from_kata_deps ".externals.libseccomp.version")
fi
libseccomp_url="${LIBSECCOMP_URL:-""}"
if [ -z "${libseccomp_url}" ]; then
if [[ -z "${libseccomp_url}" ]]; then
libseccomp_url=$(get_from_kata_deps ".externals.libseccomp.url")
fi
libseccomp_tarball="libseccomp-${libseccomp_version}.tar.gz"
@ -35,11 +35,11 @@ cflags="-O2"
# Variables for gperf
gperf_version="${GPERF_VERSION:-""}"
if [ -z "${gperf_version}" ]; then
if [[ -z "${gperf_version}" ]]; then
gperf_version=$(get_from_kata_deps ".externals.gperf.version")
fi
gperf_url="${GPERF_URL:-""}"
if [ -z "${gperf_url}" ]; then
if [[ -z "${gperf_url}" ]]; then
gperf_url=$(get_from_kata_deps ".externals.gperf.url")
fi
gperf_tarball="gperf-${gperf_version}.tar.gz"
@ -47,7 +47,7 @@ gperf_tarball_url="${gperf_url}/${gperf_tarball}"
# We need to build the libseccomp library from sources to create a static library for the musl libc.
# However, ppc64le and s390x have no musl targets in Rust. Hence, we do not set cflags for the musl libc.
if ([ "${arch}" != "ppc64le" ] && [ "${arch}" != "s390x" ]); then
if ([[ "${arch}" != "ppc64le" ]] && [[ "${arch}" != "s390x" ]]); then
# Set FORTIFY_SOURCE=1 because the musl-libc does not have some functions about FORTIFY_SOURCE=2
cflags="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 -O2"
fi
@ -85,7 +85,7 @@ build_and_install_libseccomp() {
curl -sLO "${libseccomp_tarball_url}"
tar -xf "${libseccomp_tarball}"
pushd "libseccomp-${libseccomp_version}"
[ "${arch}" == $(uname -m) ] && cc_name="" || cc_name="${arch}-linux-gnu-gcc"
[[ "${arch}" == $(uname -m) ]] && cc_name="" || cc_name="${arch}-linux-gnu-gcc"
CC=${cc_name} ./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static --host="${arch}"
make
make install
@ -97,7 +97,7 @@ main() {
local libseccomp_install_dir="${1:-}"
local gperf_install_dir="${2:-}"
if [ -z "${libseccomp_install_dir}" ] || [ -z "${gperf_install_dir}" ]; then
if [[ -z "${libseccomp_install_dir}" ]] || [[ -z "${gperf_install_dir}" ]]; then
die "Usage: ${0} <libseccomp-install-dir> <gperf-install-dir>"
fi

View File

@ -5,7 +5,7 @@
# SPDX-License-Identifier: Apache-2.0
#
[ -n "${DEBUG}" ] && set -o xtrace
[[ -n "${DEBUG}" ]] && set -o xtrace
# If we fail for any reason a message will be displayed
die() {
@ -18,7 +18,7 @@ function verify_yq_exists() {
local yq_path=$1
local yq_version=$2
local expected="yq (https://github.com/mikefarah/yq/) version ${yq_version}"
if [ -x "${yq_path}" ] && [ "$(${yq_path} --version)"X == "${expected}"X ]; then
if [[ -x "${yq_path}" ]] && [[ "$(${yq_path} --version)"X == "${expected}"X ]]; then
return 0
else
return 1
@ -34,7 +34,7 @@ function install_yq() {
local yq_path=""
INSTALL_IN_GOPATH=${INSTALL_IN_GOPATH:-true}
if [ "${INSTALL_IN_GOPATH}" == "true" ]; then
if [[ "${INSTALL_IN_GOPATH}" == "true" ]]; then
GOPATH=${GOPATH:-${HOME}/go}
mkdir -p "${GOPATH}/bin"
yq_path="${GOPATH}/bin/yq"
@ -45,9 +45,9 @@ function install_yq() {
echo "yq is already installed in correct version"
return
fi
if [ "${yq_path}" == "/usr/local/bin/yq" ]; then
if [[ "${yq_path}" == "/usr/local/bin/yq" ]]; then
# Check if we need sudo to install yq
if [ ! -w "/usr/local/bin" ]; then
if [[ ! -w "/usr/local/bin" ]]; then
# Check if we have sudo privileges
if ! sudo -n true 2>/dev/null; then
die "Please provide sudo privileges to install yq"
@ -76,7 +76,7 @@ function install_yq() {
# If we're on an apple silicon machine, just assign amd64.
# The version of yq we use doesn't have a darwin arm build,
# but Rosetta can come to the rescue here.
if [ ${goos} == "Darwin" ]; then
if [[ ${goos} == "Darwin" ]]; then
goarch=amd64
else
goarch=arm64
@ -108,7 +108,7 @@ function install_yq() {
## NOTE: ${var,,} => gives lowercase value of var
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
${precmd} curl -o "${yq_path}" -LSsf "${yq_url}"
[ $? -ne 0 ] && die "Download ${yq_url} failed"
[[ $? -ne 0 ]] && die "Download ${yq_url} failed"
${precmd} chmod +x "${yq_path}"
if ! command -v "${yq_path}" >/dev/null; then

View File

@ -3,13 +3,13 @@
#
# SPDX-License-Identifier: Apache-2.0
#
if [ "$#" -gt 2 ] || [ "$#" -lt 1 ] ; then
if [[ "$#" -gt 2 ]] || [[ "$#" -lt 1 ]] ; then
echo "Usage: $0 GOOD [BAD]"
echo "Prints list of available kata-deploy-ci tags between GOOD and BAD commits (by default BAD is the latest available tag)"
exit 255
fi
GOOD="$1"
[ -n "$2" ] && BAD="$2"
[[ -n "$2" ]] && BAD="$2"
ARCH=amd64
REPO="quay.io/kata-containers/kata-deploy-ci"
@ -18,7 +18,7 @@ TAGS=$(skopeo list-tags "docker://${REPO}")
TAGS=$(echo "${TAGS}" | jq '.Tags' | jq "map(select(endswith(\"${ARCH}\")))" | jq -r '.[]')
# Sort by git
SORTED=""
[ -n "${BAD}" ] && LOG_ARGS="${GOOD}~1..${BAD}" || LOG_ARGS="${GOOD}~1.."
[[ -n "${BAD}" ]] && LOG_ARGS="${GOOD}~1..${BAD}" || LOG_ARGS="${GOOD}~1.."
for TAG in $(git log --merges --pretty=format:%H --reverse ${LOG_ARGS}); do
[[ "${TAGS}" =~ "${TAG}" ]] && SORTED+="
kata-containers-${TAG}-${ARCH}"

View File

@ -40,7 +40,7 @@ if [[ "${WORKAROUND_9206_CRIO}" == "yes" ]]; then
oc delete -f "${deployments_dir}/workaround-9206-crio-ds.yaml"
oc delete -f "${deployments_dir}/workaround-9206-crio.yaml"
fi
[ ${SELINUX_PERMISSIVE} == "yes" ] && oc delete -f "${deployments_dir}/machineconfig_selinux.yaml.in"
[[ ${SELINUX_PERMISSIVE} == "yes" ]] && oc delete -f "${deployments_dir}/machineconfig_selinux.yaml.in"
# Delete kata-containers
pushd "${katacontainers_repo_dir}/tools/packaging/kata-deploy"
@ -50,7 +50,7 @@ oc apply -f kata-cleanup/base/kata-cleanup.yaml
echo "Wait for all related pods to be gone"
( repeats=1; for i in $(seq 1 600); do
oc get pods -l name="kubelet-kata-cleanup" --no-headers=true -n kube-system 2>&1 | grep "No resources found" -q && ((repeats++)) || repeats=1
[ "${repeats}" -gt 5 ] && echo kata-cleanup finished && break
[[ "${repeats}" -gt 5 ]] && echo kata-cleanup finished && break
sleep 1
done) || { echo "There are still some kata-cleanup related pods after 600 iterations"; oc get all -n kube-system; exit 1; }
oc delete -f kata-cleanup/base/kata-cleanup.yaml

View File

@ -75,10 +75,10 @@ wait_for_reboot() {
echo "Set timeout to ${delta} seconds"
timer_start=$(date +%s)
while [ ${#workers[@]} -gt 0 ]; do
while [[ ${#workers[@]} -gt 0 ]]; do
sleep ${sleep_time}
now=$(date +%s)
if [ $((${timer_start} + ${delta})) -lt ${now} ]; then
if [[ $((${timer_start} + ${delta})) -lt ${now} ]]; then
echo "Timeout: not all workers rebooted"
return 1
fi
@ -87,7 +87,7 @@ wait_for_reboot() {
current_id=$(oc get \
-o jsonpath='{.status.nodeInfo.bootID}' \
node/${workers[i]})
if [ "${current_id}" != ${BOOTIDS[${workers[i]}]} ]; then
if [[ "${current_id}" != ${BOOTIDS[${workers[i]}]} ]]; then
echo "${workers[i]} rebooted"
unset workers[i]
fi
@ -116,7 +116,7 @@ wait_mcp_update() {
"${degraded_count}" == 0 ]]; do
# Let's check it hit the timeout (or not).
local now=$(date +%s)
if [ ${deadline} -lt ${now} ]; then
if [[ ${deadline} -lt ${now} ]]; then
echo "Timeout: not all workers updated" >&2
return 1
fi
@ -127,7 +127,7 @@ wait_mcp_update() {
-o jsonpath='{.status.degradedMachineCount}')
echo "check machineconfigpool - ready_count: ${ready_count} degraded_count: ${degraded_count}"
done
[ ${degraded_count} -eq 0 ]
[[ ${degraded_count} -eq 0 ]]
}
# Enable the RHCOS extension for the Sandboxed Containers.
@ -166,17 +166,17 @@ wait_for_app_pods_message() {
local message="$3"
local timeout="$4"
local namespace="$5"
[ -z "${pod_count}" ] && pod_count=1
[ -z "${timeout}" ] && timeout=60
[ -n "${namespace}" ] && namespace=" -n ${namespace} "
[[ -z "${pod_count}" ]] && pod_count=1
[[ -z "${timeout}" ]] && timeout=60
[[ -n "${namespace}" ]] && namespace=" -n ${namespace} "
local pod
local pods
local i
SECONDS=0
while :; do
pods=($(oc get pods -l app="${app}" --no-headers=true ${namespace} | awk '{print $1}'))
[ "${#pods}" -ge "${pod_count}" ] && break
if [ "${SECONDS}" -gt "${timeout}" ]; then
[[ "${#pods}" -ge "${pod_count}" ]] && break
if [[ "${SECONDS}" -gt "${timeout}" ]]; then
printf "Unable to find ${pod_count} pods for '-l app=\"${app}\"' in ${SECONDS}s (%s)" "${pods[@]}"
return 1
fi
@ -185,7 +185,7 @@ wait_for_app_pods_message() {
while :; do
local log=$(oc logs ${namespace} "${pod}")
echo "${log}" | grep "${message}" -q && echo "Found $(echo "${log}" | grep "${message}") in ${pod}'s log (${SECONDS})" && break;
if [ "${SECONDS}" -gt "${timeout}" ]; then
if [[ "${SECONDS}" -gt "${timeout}" ]]; then
echo -n "Message '${message}' not present in '${pod}' pod of the '-l app=\"${app}\"' "
printf "pods after ${SECONDS}s :(%s)\n" "${pods[@]}"
echo "Pod ${pod}'s output so far:"
@ -201,25 +201,25 @@ oc config set-context --current --namespace=default
worker_nodes=$(oc get nodes | awk '{if ($3 == "worker") { print $1 } }')
num_nodes=$(echo ${worker_nodes} | wc -w)
[ ${num_nodes} -ne 0 ] || \
[[ ${num_nodes} -ne 0 ]] || \
die "No worker nodes detected. Something is wrong with the cluster"
if [ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]; then
if [[ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]]; then
# QEMU is deployed on the workers via RCHOS extension.
enable_sandboxedcontainers_extension
oc apply -f ${deployments_dir}/configmap_installer_qemu.yaml
fi
if [ "${KATA_WITH_HOST_KERNEL}" == "yes" ]; then
if [[ "${KATA_WITH_HOST_KERNEL}" == "yes" ]]; then
oc apply -f ${deployments_dir}/configmap_installer_kernel.yaml
fi
apply_kata_deploy
# Set SELinux to permissive mode
if [ ${SELINUX_PERMISSIVE} == "yes" ]; then
if [[ ${SELINUX_PERMISSIVE} == "yes" ]]; then
info "Configuring SELinux"
if [ -z "${SELINUX_CONF_BASE64}" ]; then
if [[ -z "${SELINUX_CONF_BASE64}" ]]; then
export SELINUX_CONF_BASE64=$(echo \
$(cat ${configs_dir}/selinux.conf|base64) | \
sed -e 's/\s//g')

View File

@ -15,7 +15,7 @@ pod='http-server'
# Create a pod.
#
info "Creating the ${pod} pod"
[ -z "${KATA_RUNTIME}" ] && die "Please set the KATA_RUNTIME first"
[[ -z "${KATA_RUNTIME}" ]] && die "Please set the KATA_RUNTIME first"
envsubst < "${script_dir}/smoke/${pod}.yaml.in" | \
oc apply -f - || \
die "failed to create ${pod} pod"
@ -28,7 +28,7 @@ cmd="oc get pod/${pod} -o jsonpath='{.status.containerStatuses[0].state}' | \
grep running > /dev/null"
info "Wait until the pod gets running"
waitForProcess ${wait_time} ${sleep_time} "${cmd}" || timed_out=$?
if [ -n "${timed_out}" ]; then
if [[ -n "${timed_out}" ]]; then
oc describe pod/${pod}
oc delete pod/${pod}
die "${pod} not running"
@ -83,7 +83,7 @@ rm -f "${tempfile}"
# Delete the resources.
#
info "Deleting the service/route"
if [ "${is_ocp}" -eq 0 ]; then
if [[ "${is_ocp}" -eq 0 ]]; then
oc delete -f ${script_dir}/smoke/service_kubernetes.yaml
else
oc delete -f ${script_dir}/smoke/service.yaml

View File

@ -7,7 +7,7 @@
# afterwards OCP cluster using kata-containers primarily created for use
# with https://github.com/ldoktor/bisecter
[ "$#" -ne 1 ] && echo "Provide image as the first and only argument" && exit 255
[[ "$#" -ne 1 ]] && echo "Provide image as the first and only argument" && exit 255
export KATA_DEPLOY_IMAGE="$1"
OCP_DIR="${OCP_DIR:-/path/to/your/openshift/release/}"
E2E_TEST="${E2E_TEST:-'"[sig-node] Container Runtime blackbox test on terminated container should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]"'}"
@ -20,7 +20,7 @@ SETUP=0
pushd "${KATA_CI_DIR}" || { echo "Failed to cd to '${KATA_CI_DIR}'"; exit 255; }
./test.sh || SETUP=125
cluster/deploy_webhook.sh || SETUP=125
if [ ${SETUP} != 0 ]; then
if [[ ${SETUP} != 0 ]]; then
./cleanup.sh
exit "${SETUP}"
fi

View File

@ -12,7 +12,7 @@ script_dir=$(dirname $0)
source ${script_dir}/lib.sh
suite=$1
if [ -z "$1" ]; then
if [[ -z "$1" ]]; then
suite='smoke'
fi
@ -29,4 +29,4 @@ info "Run test suite: ${suite}"
test_status='PASS'
${script_dir}/run_${suite}_test.sh || test_status='FAIL'
info "Test suite: ${suite}: ${test_status}"
[ "${test_status}" == "PASS" ]
[[ "${test_status}" == "PASS" ]]