mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
kube-proxy wait for cluster cidr skip delete events
This commit is contained in:
parent
a38b9363ec
commit
75913e9949
@ -360,10 +360,20 @@ func waitForPodCIDR(client clientset.Interface, nodeName string) (*v1.Node, erro
|
||||
},
|
||||
}
|
||||
condition := func(event watch.Event) (bool, error) {
|
||||
if n, ok := event.Object.(*v1.Node); ok {
|
||||
return n.Spec.PodCIDR != "" && len(n.Spec.PodCIDRs) > 0, nil
|
||||
// don't process delete events
|
||||
if event.Type != watch.Modified && event.Type != watch.Added {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("event object not of type Node")
|
||||
|
||||
n, ok := event.Object.(*v1.Node)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("event object not of type Node")
|
||||
}
|
||||
// don't consider the node if is going to be deleted and keep waiting
|
||||
if !n.DeletionTimestamp.IsZero() {
|
||||
return false, nil
|
||||
}
|
||||
return n.Spec.PodCIDR != "" && len(n.Spec.PodCIDRs) > 0, nil
|
||||
}
|
||||
|
||||
evt, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition)
|
||||
|
@ -31,9 +31,12 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
netutils "k8s.io/utils/net"
|
||||
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
clientgotesting "k8s.io/client-go/testing"
|
||||
|
||||
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
|
||||
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
|
||||
@ -743,3 +746,52 @@ detectLocalMode: "BridgeInterface"`)
|
||||
tearDown(file, tempDir)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_waitForPodCIDR(t *testing.T) {
|
||||
expected := []string{"192.168.0.0/24", "fd00:1:2::/64"}
|
||||
nodeName := "test-node"
|
||||
oldNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
ResourceVersion: "1000",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
PodCIDR: "10.0.0.0/24",
|
||||
PodCIDRs: []string{"10.0.0.0/24", "2001:db2:1/64"},
|
||||
},
|
||||
}
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
}
|
||||
updatedNode := node.DeepCopy()
|
||||
updatedNode.Spec.PodCIDRs = expected
|
||||
updatedNode.Spec.PodCIDR = expected[0]
|
||||
|
||||
// start with the new node
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
client.AddReactor("list", "nodes", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &v1.NodeList{}
|
||||
return true, obj, nil
|
||||
})
|
||||
fakeWatch := watch.NewFake()
|
||||
client.PrependWatchReactor("nodes", clientgotesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
go func() {
|
||||
fakeWatch.Add(node)
|
||||
// receive a delete event for the old node
|
||||
fakeWatch.Delete(oldNode)
|
||||
// set the PodCIDRs on the new node
|
||||
fakeWatch.Modify(updatedNode)
|
||||
}()
|
||||
got, err := waitForPodCIDR(client, node.Name)
|
||||
if err != nil {
|
||||
t.Errorf("waitForPodCIDR() unexpected error %v", err)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got.Spec.PodCIDRs, expected) {
|
||||
t.Errorf("waitForPodCIDR() got %v expected to be %v ", got.Spec.PodCIDRs, expected)
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user