diff --git a/test/integration/clustercidr/ipam_test.go b/test/integration/clustercidr/ipam_test.go index 3814d3ccbef..167370e157c 100644 --- a/test/integration/clustercidr/ipam_test.go +++ b/test/integration/clustercidr/ipam_test.go @@ -25,7 +25,6 @@ import ( v1 "k8s.io/api/core/v1" networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -41,7 +40,7 @@ import ( netutils "k8s.io/utils/net" ) -func TestIPAMMultiCIDRRangeAllocatorCIDRAllocate(t *testing.T) { +func TestIPAMMultiCIDRRangeAllocatorType(t *testing.T) { // set the feature gate to enable MultiCIDRRangeAllocator defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() @@ -86,19 +85,19 @@ func TestIPAMMultiCIDRRangeAllocatorCIDRAllocate(t *testing.T) { }, { name: "Single stack IPv4 Pod CIDR assigned to a node", - clusterCIDR: makeClusterCIDR("ipv4-cc", "10.0.0.0/16", "", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "singlestack": {"true"}})), + clusterCIDR: makeClusterCIDR("ipv4-cc", "10.0.0.0/16", "", nodeSelector(map[string][]string{"ipv4": {"true"}, "singlestack": {"true"}})), node: makeNode("ipv4-node", map[string]string{"ipv4": "true", "singlestack": "true"}), expectedPodCIDRs: []string{"10.0.0.0/24"}, }, { name: "Single stack IPv6 Pod CIDR assigned to a node", - clusterCIDR: makeClusterCIDR("ipv6-cc", "", "fd00:20:100::/112", 8, nodeSelector(map[string][]string{"ipv6": {"true"}})), + clusterCIDR: makeClusterCIDR("ipv6-cc", "", "fd00:20:100::/112", nodeSelector(map[string][]string{"ipv6": {"true"}})), node: makeNode("ipv6-node", map[string]string{"ipv6": "true"}), expectedPodCIDRs: []string{"fd00:20:100::/120"}, }, { name: "DualStack Pod CIDRs assigned to a node", - clusterCIDR: makeClusterCIDR("dualstack-cc", "192.168.0.0/16", "fd00:30:100::/112", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), + clusterCIDR: makeClusterCIDR("dualstack-cc", "192.168.0.0/16", "fd00:30:100::/112", nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"}), expectedPodCIDRs: []string{"192.168.0.0/24", "fd00:30:100::/120"}, }, @@ -111,424 +110,30 @@ func TestIPAMMultiCIDRRangeAllocatorCIDRAllocate(t *testing.T) { if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), test.clusterCIDR, metav1.CreateOptions{}); err != nil { t.Fatal(err) } - } - // Sleep for one second to make sure the controller process the new created ClusterCIDR. - time.Sleep(1 * time.Second) - - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), test.node, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, test.node.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(test.expectedPodCIDRs, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", test.expectedPodCIDRs, gotPodCIDRs) - } - }) - } -} - -func TestIPAMMultiCIDRRangeAllocatorCIDRRelease(t *testing.T) { - // set the feature gate to enable MultiCIDRRangeAllocator - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ - ModifyServerRunOptions: func(opts *options.ServerRunOptions) { - // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. - opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} - opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true") - }, - }) - defer tearDownFn() - - clientSet := clientset.NewForConfigOrDie(kubeConfig) - sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) - - ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go ipamController.Run(ctx.Done()) - sharedInformer.Start(ctx.Done()) - - t.Run("Pod CIDR release after node delete", func(t *testing.T) { - // Create the test ClusterCIDR. - clusterCIDR := makeClusterCIDR("dualstack-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})) - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - - // Sleep for one second to make sure the controller process the new created ClusterCIDR. - time.Sleep(1 * time.Second) - - // Create 1st node and validate that Pod CIDRs are correctly assigned. - node1 := makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs1 := []string{"192.168.0.0/24", "fd00:30:100::/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node1, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, node1.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs1, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs1, gotPodCIDRs) - } - - // Create 2nd node and validate that Pod CIDRs are correctly assigned. - node2 := makeNode("dualstack-node-2", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs2 := []string{"192.168.1.0/24", "fd00:30:100::100/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node2, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, node2.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs2, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs2, gotPodCIDRs) - } - - // Delete the 1st node, to validate that the PodCIDRs are released. - if err := clientSet.CoreV1().Nodes().Delete(context.TODO(), node1.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Create 3rd node, validate that it has Pod CIDRs assigned from the released CIDR. - node3 := makeNode("dualstack-node-3", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs3 := []string{"192.168.0.0/24", "fd00:30:100::/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node3, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, node3.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs3, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs3, gotPodCIDRs) - } - }) -} - -func TestIPAMMultiCIDRRangeAllocatorClusterCIDRDelete(t *testing.T) { - // set the feature gate to enable MultiCIDRRangeAllocator. - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ - ModifyServerRunOptions: func(opts *options.ServerRunOptions) { - // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. - opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} - opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true") - }, - }) - defer tearDownFn() - - clientSet := clientset.NewForConfigOrDie(kubeConfig) - sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) - - ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go ipamController.Run(ctx.Done()) - sharedInformer.Start(ctx.Done()) - - t.Run("delete cc with node associated", func(t *testing.T) { - - // Create a ClusterCIDR. - clusterCIDR := makeClusterCIDR("dualstack-cc-del", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})) - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - - // Sleep for one second to make sure the controller processes the newly created ClusterCIDR. - time.Sleep(1 * time.Second) - - // Create a node, which gets pod CIDR from the clusterCIDR created above. - node := makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs := []string{"192.168.0.0/24", "fd00:30:100::/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, node.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs, gotPodCIDRs) - } - - // Delete the ClusterCIDR - if err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Delete(context.TODO(), clusterCIDR.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Sleep for five seconds to make sure the ClusterCIDR exists with a deletion timestamp after marked for deletion. - time.Sleep(5 * time.Second) - - // Make sure that the ClusterCIDR is not deleted, as there is a node associated with it. - cc, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - if cc == nil { - t.Fatalf("expected Cluster CIDR got nil") - } - if cc.DeletionTimestamp.IsZero() { - t.Fatalf("expected Cluster CIDR to have set a deletion timestamp ") - } - - //Delete the node. - if err := clientSet.CoreV1().Nodes().Delete(context.TODO(), node.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Poll to make sure that the Node is deleted. - if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { - _, err := clientSet.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return false, err - } - return apierrors.IsNotFound(err), nil - }); err != nil { - t.Fatalf("failed while waiting for Node %q to be deleted: %v", node.Name, err) - } - - // Poll to make sure that the ClusterCIDR is now deleted, as there is no node associated with it. - if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { - _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return false, err - } - return apierrors.IsNotFound(err), nil - }); err != nil { - t.Fatalf("failed while waiting for ClusterCIDR %q to be deleted: %v", clusterCIDR.Name, err) - } - }) -} - -func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTerminate(t *testing.T) { - // set the feature gate to enable MultiCIDRRangeAllocator. - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ - ModifyServerRunOptions: func(opts *options.ServerRunOptions) { - // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. - opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} - opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true") - }, - }) - defer tearDownFn() - - clientSet := clientset.NewForConfigOrDie(kubeConfig) - sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) - - ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go ipamController.Run(ctx.Done()) - sharedInformer.Start(ctx.Done()) - - t.Run("Pod CIDRS must not be allocated from a terminating CC", func(t *testing.T) { - - // Create a ClusterCIDR which is the best match based on number of matching labels. - clusterCIDR := makeClusterCIDR("dualstack-cc-del", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})) - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - - // Create a ClusterCIDR which has fewer matching labels than the previous ClusterCIDR. - clusterCIDR2 := makeClusterCIDR("few-label-match-cc-del", "10.1.0.0/23", "fd12:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}})) - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR2, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - - // Sleep for one second to make sure the controller processes the newly created ClusterCIDR. - time.Sleep(1 * time.Second) - - // Create a node, which gets pod CIDR from the clusterCIDR created above. - node := makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs := []string{"192.168.0.0/24", "fd00:30:100::/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, node.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs, gotPodCIDRs) - } - - // Delete the ClusterCIDR - if err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Delete(context.TODO(), clusterCIDR.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Make sure that the ClusterCIDR is not deleted, as there is a node associated with it. - cc, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - if cc == nil { - t.Fatalf("expected Cluster CIDR got nil") - } - if cc.DeletionTimestamp.IsZero() { - t.Fatalf("expected Cluster CIDR to have set a deletion timestamp ") - } - - // Create a node, which should get Pod CIDRs from the ClusterCIDR with fewer matching label Count, - // as the best match ClusterCIDR is marked as terminating. - node2 := makeNode("dualstack-node-2", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs2 := []string{"10.1.0.0/24", "fd12:30:100::/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node2, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs2, err := nodePodCIDRs(clientSet, node2.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs2, gotPodCIDRs2) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs2, gotPodCIDRs2) - } - }) -} - -func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTieBreak(t *testing.T) { - // set the feature gate to enable MultiCIDRRangeAllocator - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ - ModifyServerRunOptions: func(opts *options.ServerRunOptions) { - // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. - opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} - opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true") - }, - }) - defer tearDownFn() - - clientSet := clientset.NewForConfigOrDie(kubeConfig) - sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) - - ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go ipamController.Run(ctx.Done()) - sharedInformer.Start(ctx.Done()) - - tests := []struct { - name string - clusterCIDRs []*networkingv1alpha1.ClusterCIDR - node *v1.Node - expectedPodCIDRs []string - }{ - { - name: "ClusterCIDR with highest matching labels", - clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{ - makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})), - makeClusterCIDR("double-label-match-cc", "10.0.0.0/23", "fd12:30:200::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - }, - node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true", "match": "single"}), - expectedPodCIDRs: []string{"10.0.0.0/24", "fd12:30:200::/120"}, - }, - { - name: "ClusterCIDR with fewer allocatable Pod CIDRs", - clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{ - makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})), - makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - }, - node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true", "match": "single"}), - expectedPodCIDRs: []string{"172.16.0.0/24", "fd34:30:100::/120"}, - }, - { - name: "ClusterCIDR with lower perNodeHostBits", - clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{ - makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})), - makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("low-pernodehostbits-cc", "172.31.0.0/24", "fd35:30:100::/120", 7, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - }, - node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true", "match": "single"}), - expectedPodCIDRs: []string{"172.31.0.0/25", "fd35:30:100::/121"}, - }, - { - name: "ClusterCIDR with label having lower alphanumeric value", - clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{ - makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})), - makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("low-pernodehostbits-cc", "172.31.0.0/24", "fd35:30:100::/120", 7, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("low-alpha-cc", "192.169.0.0/24", "fd12:40:100::/120", 7, nodeSelector(map[string][]string{"apv4": {"true"}, "bpv6": {"true"}})), - }, - node: makeNode("dualstack-node", map[string]string{"apv4": "true", "bpv6": "true", "ipv4": "true", "ipv6": "true", "match": "single"}), - expectedPodCIDRs: []string{"192.169.0.0/25", "fd12:40:100::/121"}, - }, - { - name: "ClusterCIDR with alphanumerically smaller IP address", - clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{ - makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})), - makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("low-pernodehostbits-cc", "172.31.0.0/24", "fd35:30:100::/120", 7, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("low-alpha-cc", "192.169.0.0/24", "fd12:40:100::/120", 7, nodeSelector(map[string][]string{"apv4": {"true"}, "bpv6": {"true"}})), - makeClusterCIDR("low-ip-cc", "10.1.0.0/24", "fd00:10:100::/120", 7, nodeSelector(map[string][]string{"apv4": {"true"}, "bpv6": {"true"}})), - }, - node: makeNode("dualstack-node", map[string]string{"apv4": "true", "bpv6": "true", "ipv4": "true", "ipv6": "true", "match": "single"}), - expectedPodCIDRs: []string{"10.1.0.0/25", "fd00:10:100::/121"}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - for _, clusterCIDR := range test.clusterCIDRs { - // Create the test ClusterCIDR - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - } - - // Sleep for one second to make sure the controller process the new created ClusterCIDR. - time.Sleep(1 * time.Second) - - // Create a node and validate that Pod CIDRs are correctly assigned. - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), test.node, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, test.node.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(test.expectedPodCIDRs, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", test.expectedPodCIDRs, gotPodCIDRs) - } - - // Delete the node. - if err := clientSet.CoreV1().Nodes().Delete(context.TODO(), test.node.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Wait till the Node is deleted. - if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { - _, err := clientSet.CoreV1().Nodes().Get(context.TODO(), test.node.Name, metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return false, err - } - return apierrors.IsNotFound(err), nil - }); err != nil { - t.Fatalf("failed while waiting for Node %q to be deleted: %v", test.node.Name, err) - } - - // Delete the Cluster CIDRs. - for _, clusterCIDR := range test.clusterCIDRs { - // Delete the test ClusterCIDR. - if err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Delete(context.TODO(), clusterCIDR.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Wait till the ClusterCIDR is deleted. - if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { - _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { + // Wait for the ClusterCIDR to be created + if err := wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) { + cc, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), test.clusterCIDR.Name, metav1.GetOptions{}) + if err != nil { return false, err } - return apierrors.IsNotFound(err), nil + return cc != nil, nil }); err != nil { - t.Fatalf("failed while waiting for ClusterCIDR %q to be deleted: %v", clusterCIDR.Name, err) + t.Fatalf("failed while waiting for ClusterCIDR %q to be created: %v", test.clusterCIDR.Name, err) } } + + // Sleep for one second to make sure the controller process the new created ClusterCIDR. + time.Sleep(1 * time.Second) + + if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), test.node, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + if gotPodCIDRs, err := nodePodCIDRs(clientSet, test.node.Name); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(test.expectedPodCIDRs, gotPodCIDRs) { + t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", test.expectedPodCIDRs, gotPodCIDRs) + } }) } } @@ -546,9 +151,9 @@ func booststrapMultiCIDRRangeAllocator(t *testing.T, clusterCIDRs := []*net.IPNet{clusterCIDRv4, clusterCIDRv6} nodeMaskCIDRs := []int{24, 120} - // set the current state of the informer, we can pre-seed nodes and ClusterCIDRs, so that we + // set the current state of the informer, we can preseed nodes and ClusterCIDRs so we // can simulate the bootstrap - initialCC := makeClusterCIDR("initial-cc", "10.2.0.0/16", "fd00:20:96::/112", 8, nodeSelector(map[string][]string{"bootstrap": {"true"}})) + initialCC := makeClusterCIDR("initial-cc", "10.2.0.0/16", "fd00:20:96::/112", nodeSelector(map[string][]string{"bootstrap": {"true"}})) if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), initialCC, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -596,13 +201,13 @@ func makeNode(name string, labels map[string]string) *v1.Node { } } -func makeClusterCIDR(name, ipv4CIDR, ipv6CIDR string, perNodeHostBits int32, nodeSelector *v1.NodeSelector) *networkingv1alpha1.ClusterCIDR { +func makeClusterCIDR(name, ipv4CIDR, ipv6CIDR string, nodeSelector *v1.NodeSelector) *networkingv1alpha1.ClusterCIDR { return &networkingv1alpha1.ClusterCIDR{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, Spec: networkingv1alpha1.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, + PerNodeHostBits: 8, IPv4: ipv4CIDR, IPv6: ipv6CIDR, NodeSelector: nodeSelector, @@ -631,7 +236,7 @@ func nodeSelector(labels map[string][]string) *v1.NodeSelector { func nodePodCIDRs(c clientset.Interface, name string) ([]string, error) { var node *v1.Node - nodePollErr := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { + nodePollErr := wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) { var err error node, err = c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil {