2023-01-19 14:54:26 +00:00
|
|
|
#!/bin/bash
|
|
|
|
|
|
|
|
set -e
|
|
|
|
|
2023-02-02 16:06:07 +00:00
|
|
|
# This scripts prepares a cluster where we install the kcrypt CRDs.
|
|
|
|
# This is where sealed volumes are created.
|
2023-01-19 14:54:26 +00:00
|
|
|
|
|
|
|
GINKGO_NODES="${GINKGO_NODES:-1}"
|
2023-02-02 16:06:07 +00:00
|
|
|
K3S_IMAGE="rancher/k3s:v1.26.1-k3s1"
|
2023-01-19 14:54:26 +00:00
|
|
|
|
|
|
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
|
|
|
CLUSTER_NAME=$(echo $RANDOM | md5sum | head -c 10; echo;)
|
2023-02-17 13:53:02 +00:00
|
|
|
export KUBECONFIG=$(mktemp)
|
2023-01-19 14:54:26 +00:00
|
|
|
|
|
|
|
# https://unix.stackexchange.com/a/423052
|
|
|
|
getFreePort() {
|
|
|
|
echo $(comm -23 <(seq "30000" "30200" | sort) <(ss -Htan | awk '{print $4}' | cut -d':' -f2 | sort -u) | shuf | head -n "1")
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup() {
|
|
|
|
echo "Cleaning up $CLUSTER_NAME"
|
|
|
|
k3d cluster delete "$CLUSTER_NAME" || true
|
|
|
|
rm -rf "$KUBECONFIG"
|
|
|
|
}
|
|
|
|
trap cleanup EXIT
|
|
|
|
|
2023-02-02 16:06:07 +00:00
|
|
|
# Create a cluster and bind ports 80 and 443 on the host
|
|
|
|
# This will allow us to access challenger server on 10.0.2.2 which is the IP
|
|
|
|
# on which qemu "sees" the host.
|
2023-02-17 09:44:00 +00:00
|
|
|
# We change the CIDR because k3s creates iptables rules that block DNS traffic to this CIDR
|
|
|
|
# (something like that). If you run k3d inside a k3s cluster (inside a Pod), DNS won't work
|
|
|
|
# inside the k3d server container unless you use a different CIDR.
|
|
|
|
# Here we are avoiding CIDR "10.43.x.x"
|
|
|
|
k3d cluster create "$CLUSTER_NAME" --k3s-arg "--cluster-cidr=10.49.0.1/16@server:0" --k3s-arg "--service-cidr=10.48.0.1/16@server:0" -p '80:80@server:0' -p '443:443@server:0' --image "$K3S_IMAGE"
|
2023-01-19 14:54:26 +00:00
|
|
|
k3d kubeconfig get "$CLUSTER_NAME" > "$KUBECONFIG"
|
|
|
|
|
2024-07-11 15:51:49 +00:00
|
|
|
# Import the controller image that we built at the start into to the cluster
|
|
|
|
# this image has to exists and be available in the local docker
|
2023-02-02 16:06:07 +00:00
|
|
|
k3d image import -c "$CLUSTER_NAME" controller:latest
|
|
|
|
|
2023-01-19 14:54:26 +00:00
|
|
|
# Install cert manager
|
|
|
|
kubectl apply -f https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml
|
2023-02-02 16:06:07 +00:00
|
|
|
kubectl wait --for=condition=Available deployment --timeout=2m -n cert-manager --all
|
2023-01-19 14:54:26 +00:00
|
|
|
|
2023-02-02 16:06:07 +00:00
|
|
|
# Replace the CLUSTER_IP in the kustomize resource
|
|
|
|
# Only needed for debugging so that we can access the server from the host
|
|
|
|
# (the 10.0.2.2 IP address is only useful from within qemu)
|
|
|
|
export CLUSTER_IP=$(docker inspect "k3d-${CLUSTER_NAME}-server-0" | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
|
|
|
|
envsubst \
|
|
|
|
< "$SCRIPT_DIR/../tests/assets/challenger-server-ingress.template.yaml" \
|
|
|
|
> "$SCRIPT_DIR/../tests/assets/challenger-server-ingress.yaml"
|
2023-01-19 14:54:26 +00:00
|
|
|
|
2023-02-02 16:06:07 +00:00
|
|
|
# Install the challenger server kustomization
|
|
|
|
kubectl apply -k "$SCRIPT_DIR/../tests/assets/"
|
2023-01-19 14:54:26 +00:00
|
|
|
|
|
|
|
# 10.0.2.2 is where the vm sees the host
|
|
|
|
# https://stackoverflow.com/a/6752280
|
2023-02-02 16:06:07 +00:00
|
|
|
export KMS_ADDRESS="10.0.2.2.challenger.sslip.io"
|
2023-01-19 14:54:26 +00:00
|
|
|
|
2023-05-09 22:24:58 +00:00
|
|
|
go run github.com/onsi/ginkgo/v2/ginkgo -v --nodes $GINKGO_NODES --label-filter $LABEL --fail-fast -r ./tests/
|