mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-09-18 15:28:10 +00:00
Merge pull request #11164 from wainersm/fix_kbs_on_aks
tests/k8s: fix kbs installation on Azure AKS
This commit is contained in:
5
.github/workflows/run-kata-coco-tests.yaml
vendored
5
.github/workflows/run-kata-coco-tests.yaml
vendored
@@ -222,6 +222,11 @@ jobs:
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
# Caution: current ingress controller used to expose the KBS service
|
||||
# requires much vCPUs, lefting only a few for the tests. Depending on the
|
||||
# host type chose it will result on the creation of a cluster with
|
||||
# insufficient resources.
|
||||
K8S_TEST_HOST_TYPE: "all"
|
||||
USING_NFD: "false"
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
|
@@ -41,7 +41,7 @@ function _print_instance_type() {
|
||||
small)
|
||||
echo "Standard_D2s_v5"
|
||||
;;
|
||||
normal)
|
||||
all|normal)
|
||||
echo "Standard_D4s_v5"
|
||||
;;
|
||||
*)
|
||||
@@ -75,10 +75,10 @@ function _print_rg_name() {
|
||||
echo "${AZ_RG:-"kataCI-$(_print_cluster_name "${test_type}")"}"
|
||||
}
|
||||
|
||||
# Enable the HTTP application routing add-on to AKS.
|
||||
# Enable the approuting routing add-on to AKS.
|
||||
# Use with ingress to expose a service API externally.
|
||||
#
|
||||
function enable_cluster_http_application_routing() {
|
||||
function enable_cluster_approuting() {
|
||||
local test_type="${1:-k8s}"
|
||||
local cluster_name
|
||||
local rg
|
||||
@@ -86,8 +86,7 @@ function enable_cluster_http_application_routing() {
|
||||
rg="$(_print_rg_name "${test_type}")"
|
||||
cluster_name="$(_print_cluster_name "${test_type}")"
|
||||
|
||||
az aks enable-addons -g "${rg}" -n "${cluster_name}" \
|
||||
--addons http_application_routing
|
||||
az aks approuting enable -g "${rg}" -n "${cluster_name}"
|
||||
}
|
||||
|
||||
function install_azure_cli() {
|
||||
@@ -194,24 +193,6 @@ function get_cluster_credentials() {
|
||||
-n "$(_print_cluster_name "${test_type}")"
|
||||
}
|
||||
|
||||
|
||||
# Get the AKS DNS zone name of HTTP application routing.
|
||||
#
|
||||
# Note: if the HTTP application routing add-on isn't installed in the cluster
|
||||
# then it will return an empty string.
|
||||
#
|
||||
function get_cluster_specific_dns_zone() {
|
||||
local test_type="${1:-k8s}"
|
||||
local cluster_name
|
||||
local rg
|
||||
local q="addonProfiles.httpApplicationRouting.config.HTTPApplicationRoutingZoneName"
|
||||
|
||||
rg="$(_print_rg_name "${test_type}")"
|
||||
cluster_name="$(_print_cluster_name "${test_type}")"
|
||||
|
||||
az aks show -g "${rg}" -n "${cluster_name}" --query "${q}" | tr -d \"
|
||||
}
|
||||
|
||||
function delete_cluster() {
|
||||
test_type="${1:-k8s}"
|
||||
local rg
|
||||
|
@@ -372,6 +372,10 @@ function kbs_k8s_deploy() {
|
||||
fi
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::Post deploy actions"
|
||||
_post_deploy "${ingress}"
|
||||
echo "::endgroup::"
|
||||
|
||||
# By default, the KBS service is reachable within the cluster only,
|
||||
# thus the following healthy checker should run from a pod. So start a
|
||||
# debug pod where it will try to get a response from the service. The
|
||||
@@ -419,13 +423,20 @@ function kbs_k8s_deploy() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Return the kbs service host name in case ingress is configured
|
||||
# Return the kbs service public IP in case ingress is configured
|
||||
# otherwise the cluster IP.
|
||||
#
|
||||
kbs_k8s_svc_host() {
|
||||
if kubectl get ingress -n "$KBS_NS" 2>/dev/null | grep -q kbs; then
|
||||
kubectl get ingress "$KBS_INGRESS_NAME" -n "$KBS_NS" \
|
||||
-o jsonpath='{.spec.rules[0].host}' 2>/dev/null
|
||||
local host
|
||||
# The ingress IP address can take a while to show up.
|
||||
SECONDS=0
|
||||
while true; do
|
||||
host=$(kubectl get ingress "${KBS_INGRESS_NAME}" -n "${KBS_NS}" -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
[[ -z "${host}" && ${SECONDS} -lt 30 ]] || break
|
||||
sleep 5
|
||||
done
|
||||
echo "${host}"
|
||||
elif kubectl get svc "$KBS_SVC_NAME" -n "$KBS_NS" &>/dev/null; then
|
||||
local host
|
||||
host=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}' -n "$KBS_NS")
|
||||
@@ -526,29 +537,17 @@ _handle_ingress() {
|
||||
# Implement the ingress handler for AKS.
|
||||
#
|
||||
_handle_ingress_aks() {
|
||||
local dns_zone
|
||||
|
||||
dns_zone=$(get_cluster_specific_dns_zone "")
|
||||
|
||||
# In case the DNS zone name is empty, the cluster might not have the HTTP
|
||||
# application routing add-on. Let's try to enable it.
|
||||
if [ -z "$dns_zone" ]; then
|
||||
echo "::group::Enable HTTP application routing add-on"
|
||||
enable_cluster_http_application_routing ""
|
||||
echo "::endgroup::"
|
||||
dns_zone=$(get_cluster_specific_dns_zone "")
|
||||
fi
|
||||
|
||||
if [ -z "$dns_zone" ]; then
|
||||
echo "ERROR: the DNS zone name is nil, it cannot configure Ingress"
|
||||
return 1
|
||||
fi
|
||||
echo "::group::Enable approuting (application routing) add-on"
|
||||
enable_cluster_approuting ""
|
||||
echo "::endgroup::"
|
||||
|
||||
pushd "${COCO_KBS_DIR}/config/kubernetes/overlays/"
|
||||
|
||||
echo "::group::$(pwd)/ingress.yaml"
|
||||
KBS_INGRESS_CLASS="addon-http-application-routing" \
|
||||
KBS_INGRESS_HOST="kbs.${dns_zone}" \
|
||||
# We don't use a cluster DNS zone, instead get the ingress public IP,
|
||||
# thus KBS_INGRESS_HOST is set empty.
|
||||
KBS_INGRESS_CLASS="webapprouting.kubernetes.azure.com" \
|
||||
KBS_INGRESS_HOST="\"\"" \
|
||||
envsubst < ingress.yaml | tee ingress.yaml.tmp
|
||||
echo "::endgroup::"
|
||||
mv ingress.yaml.tmp ingress.yaml
|
||||
@@ -564,6 +563,22 @@ _handle_ingress_nodeport() {
|
||||
export DEPLOYMENT_DIR=nodeport
|
||||
}
|
||||
|
||||
# Run further actions after the kbs was deployed, usually to apply further
|
||||
# configurations.
|
||||
#
|
||||
_post_deploy() {
|
||||
local ingress="${1:-}"
|
||||
|
||||
if [[ "${ingress}" = "aks" ]]; then
|
||||
# The AKS managed ingress controller defaults to two nginx pod
|
||||
# replicas where both request 500m of CPU. On cluster made of small
|
||||
# VMs (e.g. 2 vCPU) one of the pod might not even start. We need only
|
||||
# one nginx, so patching the controller to keep only one replica.
|
||||
echo "Patch the ingress controller to have only one replica of nginx"
|
||||
waitForProcess "20" "5" \
|
||||
"kubectl patch nginxingresscontroller/default -n app-routing-system --type=merge -p='{\"spec\":{\"scaling\": {\"minReplicas\": 1}}}'"
|
||||
fi
|
||||
}
|
||||
|
||||
# Prepare necessary resources for qemu-se runtime
|
||||
# Documentation: https://github.com/confidential-containers/trustee/tree/main/attestation-service/verifier/src/se
|
||||
|
Reference in New Issue
Block a user