mirror of
https://github.com/k8snetworkplumbingwg/multus-cni.git
synced 2025-08-19 08:43:45 +00:00
CI, e2e tests: fix legacy
Our CI is currently mistakenly executing the thick img on the e2e legacy lanes. Furthermore, the e2e daemonset spec provided features (and uses) the kubeconfig / multus conf generation binaries provided only on the thick image. This commit addresses these by enabling the e2e `setup_cluster.sh` script user to specify the path to the desired deployment configuration. Github workflows are updated accordingly. Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>
This commit is contained in:
parent
130db696ca
commit
70660236a8
2
.github/workflows/legacy-kind-e2e.yml
vendored
2
.github/workflows/legacy-kind-e2e.yml
vendored
@ -25,7 +25,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Setup cluster
|
- name: Setup cluster
|
||||||
working-directory: ./e2e
|
working-directory: ./e2e
|
||||||
run: ./setup_cluster.sh
|
run: MULTUS_MANIFEST=legacy-multus-daemonset.yml ./setup_cluster.sh
|
||||||
|
|
||||||
- name: Test simple pod
|
- name: Test simple pod
|
||||||
working-directory: ./e2e
|
working-directory: ./e2e
|
||||||
|
@ -182,41 +182,6 @@ spec:
|
|||||||
- name: cnibin
|
- name: cnibin
|
||||||
mountPath: /host/opt/cni/bin
|
mountPath: /host/opt/cni/bin
|
||||||
mountPropagation: Bidirectional
|
mountPropagation: Bidirectional
|
||||||
- name: generate-kubeconfig
|
|
||||||
image: localhost:5000/multus:e2e
|
|
||||||
command:
|
|
||||||
- "/usr/src/multus-cni/bin/generate-kubeconfig"
|
|
||||||
args:
|
|
||||||
- "-k8s-service-host=$(KUBERNETES_SERVICE_HOST)"
|
|
||||||
- "-k8s-service-port=$(KUBERNETES_SERVICE_PORT)"
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: "10m"
|
|
||||||
memory: "15Mi"
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
volumeMounts:
|
|
||||||
- name: cni
|
|
||||||
mountPath: /host/etc/cni/net.d
|
|
||||||
mountPropagation: Bidirectional
|
|
||||||
- name: generate-multus-config
|
|
||||||
image: localhost:5000/multus:e2e
|
|
||||||
command:
|
|
||||||
- "/usr/src/multus-cni/bin/generate-multus-cni-config"
|
|
||||||
args:
|
|
||||||
- "-cni-version=0.3.1"
|
|
||||||
- "-cni-config-dir=/host/etc/cni/net.d"
|
|
||||||
- "-multus-autoconfig-dir=/host/etc/cni/net.d"
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: "10m"
|
|
||||||
memory: "15Mi"
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
volumeMounts:
|
|
||||||
- name: cni
|
|
||||||
mountPath: /host/etc/cni/net.d
|
|
||||||
mountPropagation: Bidirectional
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cni
|
- name: cni
|
||||||
hostPath:
|
hostPath:
|
||||||
|
@ -7,6 +7,11 @@ export PATH=${PATH}:./bin
|
|||||||
# Defaults to `docker`.
|
# Defaults to `docker`.
|
||||||
OCI_BIN="${OCI_BIN:-docker}"
|
OCI_BIN="${OCI_BIN:-docker}"
|
||||||
|
|
||||||
|
# define the deployment spec to use when deploying multus.
|
||||||
|
# Acceptable values are `legacy-multus-daemonset.yml`. `multus-daemonset.yml`.
|
||||||
|
# Defaults to `multus-daemonset.yml`.
|
||||||
|
MULTUS_MANIFEST="${MULTUS_MANIFEST:-multus-daemonset.yml}"
|
||||||
|
|
||||||
kind_network='kind'
|
kind_network='kind'
|
||||||
reg_name='kind-registry'
|
reg_name='kind-registry'
|
||||||
reg_port='5000'
|
reg_port='5000'
|
||||||
@ -67,7 +72,7 @@ kind export kubeconfig
|
|||||||
sudo env PATH=${PATH} koko -p "$worker1_pid,eth1" -p "$worker2_pid,eth1"
|
sudo env PATH=${PATH} koko -p "$worker1_pid,eth1" -p "$worker2_pid,eth1"
|
||||||
sleep 1
|
sleep 1
|
||||||
kubectl -n kube-system wait --for=condition=available deploy/coredns --timeout=300s
|
kubectl -n kube-system wait --for=condition=available deploy/coredns --timeout=300s
|
||||||
kubectl create -f multus-daemonset.yml
|
kubectl create -f "$MULTUS_MANIFEST"
|
||||||
sleep 1
|
sleep 1
|
||||||
kubectl -n kube-system wait --for=condition=ready -l name=multus pod --timeout=300s
|
kubectl -n kube-system wait --for=condition=ready -l name=multus pod --timeout=300s
|
||||||
kubectl create -f cni-install.yml
|
kubectl create -f cni-install.yml
|
||||||
|
Loading…
Reference in New Issue
Block a user