Subdirectory CNI chain loading e2e tests

Adds a test for plain subdirectory chaining and also using passthru CNI with auxiliaryCNIChainName
This commit is contained in:
dougbtv 2025-04-09 14:33:41 -04:00
parent 528d4f150c
commit 4104fea90d
7 changed files with 354 additions and 0 deletions

View File

@ -89,6 +89,16 @@ jobs:
# working-directory: ./e2e
# run: ./test-dra-integration.sh
- name: Test subdirectory CNI chaining
if: ${{ matrix.multus-manifest == 'multus-daemonset-thick.yml' }}
working-directory: ./e2e
run: ./test-subdirectory-chaining.sh
- name: Test subdirectory CNI chaining with passthru CNI / auxiliaryCNIChainName
if: ${{ matrix.multus-manifest == 'multus-daemonset-thick.yml' }}
working-directory: ./e2e
run: ./test-subdirectory-chaining-passthru.sh
- name: Export kind logs
if: always()
run: |

View File

@ -0,0 +1,26 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-daemon-config
namespace: kube-system
labels:
tier: node
app: multus
data:
daemon-config.json: |
{
"confDir": "/host/etc/cni/net.d",
"logToStderr": true,
"logLevel": "debug",
"logFile": "/tmp/multus.log",
"binDir": "/host/opt/cni/bin",
"cniDir": "/var/lib/cni/multus",
"socketDir": "/host/run/multus",
"cniVersion": "{{ CNI_VERSION }}",
"cniConfigDir": "/host/etc/cni/net.d",
"multusConfigFile": "auto",
"forceCNIVersion": true,
"multusAutoconfigDir": "/host/etc/cni/net.d",
"auxiliaryCNIChainName": "vendor-cni-chain"
}

View File

@ -0,0 +1,94 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cni-setup-script
namespace: default
data:
setup.sh: |
#!/bin/bash
set -euxo pipefail
DEFAULT_NETWORK_CNI_NAME="vendor-cni-chain"
cleanup() {
echo "Cleaning up..."
rm -f /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}/sysctltwiddle.conf
if [ $? -ne 0 ]; then
echo "Failed to remove sysctltwiddle.conf" >&2
exit 1
fi
echo "Cleanup completed successfully"
}
trap cleanup EXIT
# Create the chained CNI directory if it doesn't exist
mkdir -p /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}
if [ $? -ne 0 ]; then
echo "Failed to create directory /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}" >&2
exit 1
fi
# Write the chained tuning CNI config
cat <<EOF > /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}/sysctltwiddle.conf
{
"cniVersion": "{{ CNI_VERSION }}",
"name": "sysctltwiddle",
"type": "tuning",
"sysctl": {
"net.ipv4.conf.eth0.arp_filter": "1"
}
}
EOF
if [ $? -ne 0 ]; then
echo "Failed to create chained CNI config" >&2
exit 1
fi
echo "CNI chained setup completed successfully."
sleep infinity
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cni-setup-daemonset
namespace: default
labels:
app: cni-setup
spec:
selector:
matchLabels:
app: cni-setup
template:
metadata:
labels:
app: cni-setup
spec:
tolerations:
- operator: Exists
effect: NoSchedule
- operator: Exists
effect: NoExecute
containers:
- name: setup
image: quay.io/fedora/fedora:40
securityContext:
privileged: true
volumeMounts:
- name: cni-config
mountPath: /host/etc/cni/net.d
- name: script-volume
mountPath: /scripts
command: ["/bin/bash", "/scripts/setup.sh"]
volumes:
- name: cni-config
hostPath:
path: /etc/cni/net.d
type: Directory
- name: script-volume
configMap:
name: cni-setup-script
items:
- key: setup.sh
path: setup.sh

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: Pod
metadata:
name: sysctl-modified
spec:
containers:
- name: sysctl
image: quay.io/dosmith/fedora-procps
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
securityContext:
privileged: true

View File

@ -0,0 +1,95 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cni-setup-script
namespace: default
data:
setup.sh: |
#!/bin/bash
set -euxo pipefail
DEFAULT_NETWORK_CNI_NAME="kindnet"
cleanup() {
echo "Cleaning up..."
rm -f /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}/sysctltwiddle.conf
if [ $? -ne 0 ]; then
echo "Failed to remove sysctltwiddle.conf" >&2
exit 1
fi
echo "Cleanup completed successfully"
}
trap cleanup EXIT
# Create the chained CNI directory if it doesn't exist
mkdir -p /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}
if [ $? -ne 0 ]; then
echo "Failed to create directory /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}" >&2
exit 1
fi
# Write the chained tuning CNI config
cat <<EOF > /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}/sysctltwiddle.conf
{
"cniVersion": "{{ CNI_VERSION }}",
"name": "sysctltwiddle",
"type": "tuning",
"sysctl": {
"net.ipv4.conf.IFNAME.arp_filter": "1"
}
}
EOF
if [ $? -ne 0 ]; then
echo "Failed to create chained CNI config" >&2
exit 1
fi
echo "CNI chained setup completed successfully."
sleep infinity
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cni-setup-daemonset
namespace: default
labels:
app: cni-setup
spec:
selector:
matchLabels:
app: cni-setup
template:
metadata:
labels:
app: cni-setup
spec:
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
- operator: Exists
effect: NoExecute
containers:
- name: setup
image: quay.io/fedora/fedora:40
securityContext:
privileged: true
volumeMounts:
- name: cni-config
mountPath: /host/etc/cni/net.d
- name: script-volume
mountPath: /scripts
command: ["/bin/bash", "/scripts/setup.sh"]
volumes:
- name: cni-config
hostPath:
path: /etc/cni/net.d
type: Directory
- name: script-volume
configMap:
name: cni-setup-script
items:
- key: setup.sh
path: setup.sh

View File

@ -0,0 +1,81 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
export PATH=${PATH}:./bin
TEST_POD_NAME="sysctl-modified"
EXPECTED_BINARIES="${EXPECTED_BINARIES:-/opt/cni/bin/ptp /opt/cni/bin/portmap /opt/cni/bin/tuning}"
EXPECTED_CNI_DIR="/etc/cni/net.d"
# Reconfigure multus
echo "Applying subdirectory chain passthru config..."
kubectl apply -f yamls/subdirectory-chain-passthru-configupdate.yml
# Restart the multus daemonset to pick up the new config
echo "Restarting Multus DaemonSet..."
kubectl rollout restart daemonset kube-multus-ds-amd64 -n kube-system
kubectl rollout status daemonset/kube-multus-ds-amd64 -n kube-system
# Debug: show CNI configs and binaries inside each Kind node
echo "Checking CNI configs and binaries on nodes..."
for node in $(kubectl get nodes --no-headers | awk '{print $1}'); do
container_name=$(docker ps --format '{{.Names}}' | grep "^${node}$")
echo "------"
echo "Node: ${node} (container: ${container_name})"
echo "Listing /opt/cni/bin contents..."
docker exec "${container_name}" ls -l /opt/cni/bin || echo "WARNING: /opt/cni/bin missing!"
echo "Checking expected binaries..."
for bin in $EXPECTED_BINARIES; do
echo "Checking for ${bin}..."
if docker exec "${container_name}" test -f "${bin}"; then
echo "SUCCESS: ${bin} found."
else
echo "FAIL: ${bin} NOT found!"
fi
done
echo "Listing /etc/cni/net.d configs..."
docker exec "${container_name}" ls -l ${EXPECTED_CNI_DIR} || echo "WARNING: ${EXPECTED_CNI_DIR} missing!"
done
echo "------"
# Deploy the daemonset that will lay down the chained CNI config
echo "Applying CNI setup DaemonSet..."
kubectl apply -f yamls/subdirectory-chaining-passthru.yml
# Wait for the daemonset pods to be ready (make sure they set up CNI config)
echo "Waiting for CNI setup DaemonSet to be Ready..."
kubectl rollout status daemonset/cni-setup-daemonset --timeout=300s
# Deploy a test pod that will get chained CNI applied
echo "Applying test pod..."
kubectl apply -f yamls/subdirectory-chaining-pod.yml
# Wait for the pod to be Ready
echo "Waiting for test pod to be Ready..."
kubectl wait --for=condition=ready pod/${TEST_POD_NAME} --timeout=300s
# Check that the sysctl got set
echo "Verifying sysctl arp_filter is set to 1 on eth0..."
SYSCTL_VALUE=$(kubectl exec ${TEST_POD_NAME} -- sysctl -n net.ipv4.conf.eth0.arp_filter)
if [ "$SYSCTL_VALUE" != "1" ]; then
echo "FAIL: net.ipv4.conf.eth0.arp_filter is not set to 1, got ${SYSCTL_VALUE}" >&2
exit 1
else
echo "SUCCESS: net.ipv4.conf.eth0.arp_filter is set correctly."
fi
# Cleanup
echo "Cleaning up test resources..."
kubectl delete -f yamls/subdirectory-chaining-pod.yml
kubectl delete -f yamls/subdirectory-chaining-passthru.yml
echo "Test completed successfully."
exit 0

View File

@ -0,0 +1,37 @@
#!/bin/sh
set -o errexit
export PATH=${PATH}:./bin
TEST_POD_NAME="sysctl-modified"
# Deploy the daemonset that will lay down the chained CNI config
kubectl apply -f yamls/subdirectory-chaining.yml
# Wait for the daemonset pods to be ready (we need the config to be laid down)
kubectl rollout status daemonset/cni-setup-daemonset
# Deploy a test pod that will get chained CNI applied
kubectl apply -f yamls/subdirectory-chaining-pod.yml
# Wait for the pod to be Ready
kubectl wait --for=condition=ready pod/sysctl-modified --timeout=300s
# Check that the sysctl got set properly inside the pod's eth0 interface
echo "Verifying sysctl arp_filter is set to 1 on eth0"
SYSCTL_VALUE=$(kubectl exec sysctl-modified -- sysctl -n net.ipv4.conf.eth0.arp_filter)
if [ "$SYSCTL_VALUE" != "1" ]; then
echo "FAIL: net.ipv4.conf.eth0.arp_filter is not set to 1, got ${SYSCTL_VALUE}" >&2
exit 1
else
echo "SUCCESS: net.ipv4.conf.eth0.arp_filter is set correctly."
fi
# 6. Clean up
echo "Cleaning up test resources"
kubectl delete -f yamls/subdirectory-chaining-pod.yml
kubectl delete -f yamls/subdirectory-chaining.yml
exit 0