Merge pull request #115453 from sarveshr7/multicidr-tests

Add integration tests for MultiCIDRRangeAllocator
This commit is contained in:
Kubernetes Prow Robot 2023-02-01 12:21:45 -08:00 committed by GitHub
commit f97d14c6c8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 431 additions and 49 deletions

View File

@ -732,8 +732,8 @@ func (r *multiCIDRRangeAllocator) updateCIDRsAllocation(data multiCIDRNodeReserv
// defaultNodeSelector generates a label with defaultClusterCIDRKey as the key and // defaultNodeSelector generates a label with defaultClusterCIDRKey as the key and
// defaultClusterCIDRValue as the value, it is an internal nodeSelector matching all // defaultClusterCIDRValue as the value, it is an internal nodeSelector matching all
// nodes. Only used if no ClusterCIDR selects the node. // nodes. Only used if no ClusterCIDR selects the node.
func defaultNodeSelector() ([]byte, error) { func defaultNodeSelector() *v1.NodeSelector {
nodeSelector := &v1.NodeSelector{ return &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{ NodeSelectorTerms: []v1.NodeSelectorTerm{
{ {
MatchExpressions: []v1.NodeSelectorRequirement{ MatchExpressions: []v1.NodeSelectorRequirement{
@ -746,13 +746,6 @@ func defaultNodeSelector() ([]byte, error) {
}, },
}, },
} }
marshalledSelector, err := nodeSelector.Marshal()
if err != nil {
return nil, err
}
return marshalledSelector, nil
} }
// prioritizedCIDRs returns a list of CIDRs to be allocated to the node. // prioritizedCIDRs returns a list of CIDRs to be allocated to the node.
@ -883,7 +876,7 @@ func (r *multiCIDRRangeAllocator) orderedMatchingClusterCIDRs(node *v1.Node, occ
pq := make(PriorityQueue, 0) pq := make(PriorityQueue, 0)
for label, clusterCIDRList := range r.cidrMap { for label, clusterCIDRList := range r.cidrMap {
labelsMatch, matchCnt, err := r.matchCIDRLabels(node, []byte(label)) labelsMatch, matchCnt, err := r.matchCIDRLabels(node, label)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -916,11 +909,11 @@ func (r *multiCIDRRangeAllocator) orderedMatchingClusterCIDRs(node *v1.Node, occ
} }
// Append the catch all CIDR config. // Append the catch all CIDR config.
defaultSelector, err := defaultNodeSelector() defaultSelector, err := v1helper.NodeSelectorAsSelector(defaultNodeSelector())
if err != nil { if err != nil {
return nil, err return nil, err
} }
if clusterCIDRList, ok := r.cidrMap[string(defaultSelector)]; ok { if clusterCIDRList, ok := r.cidrMap[defaultSelector.String()]; ok {
matchingCIDRs = append(matchingCIDRs, clusterCIDRList...) matchingCIDRs = append(matchingCIDRs, clusterCIDRList...)
} }
return matchingCIDRs, nil return matchingCIDRs, nil
@ -928,21 +921,14 @@ func (r *multiCIDRRangeAllocator) orderedMatchingClusterCIDRs(node *v1.Node, occ
// matchCIDRLabels Matches the Node labels to CIDR Configs. // matchCIDRLabels Matches the Node labels to CIDR Configs.
// Returns true only if all the labels match, also returns the count of matching labels. // Returns true only if all the labels match, also returns the count of matching labels.
func (r *multiCIDRRangeAllocator) matchCIDRLabels(node *v1.Node, label []byte) (bool, int, error) { func (r *multiCIDRRangeAllocator) matchCIDRLabels(node *v1.Node, label string) (bool, int, error) {
var labelSet labels.Set var labelSet labels.Set
var matchCnt int var matchCnt int
labelsMatch := false labelsMatch := false
selector := &v1.NodeSelector{}
err := selector.Unmarshal(label)
if err != nil {
klog.Errorf("Unable to unmarshal node selector for label %v: %v", label, err)
return labelsMatch, 0, err
}
ls, err := v1helper.NodeSelectorAsSelector(selector) ls, err := labels.Parse(label)
if err != nil { if err != nil {
klog.Errorf("Unable to convert NodeSelector to labels.Selector: %v", err) klog.Errorf("Unable to parse label %s to labels.Selector: %v", label, err)
return labelsMatch, 0, err return labelsMatch, 0, err
} }
reqs, selectable := ls.Requirements() reqs, selectable := ls.Requirements()
@ -959,7 +945,7 @@ func (r *multiCIDRRangeAllocator) matchCIDRLabels(node *v1.Node, label []byte) (
labelsMatch = true labelsMatch = true
} }
} }
return labelsMatch, matchCnt, err return labelsMatch, matchCnt, nil
} }
// Methods for handling ClusterCIDRs. // Methods for handling ClusterCIDRs.
@ -1234,20 +1220,20 @@ func (r *multiCIDRRangeAllocator) deleteClusterCIDR(clusterCIDR *networkingv1alp
} }
func (r *multiCIDRRangeAllocator) nodeSelectorKey(clusterCIDR *networkingv1alpha1.ClusterCIDR) (string, error) { func (r *multiCIDRRangeAllocator) nodeSelectorKey(clusterCIDR *networkingv1alpha1.ClusterCIDR) (string, error) {
var nodeSelector []byte var nodeSelector labels.Selector
var err error var err error
if clusterCIDR.Spec.NodeSelector != nil { if clusterCIDR.Spec.NodeSelector != nil {
nodeSelector, err = clusterCIDR.Spec.NodeSelector.Marshal() nodeSelector, err = v1helper.NodeSelectorAsSelector(clusterCIDR.Spec.NodeSelector)
} else { } else {
nodeSelector, err = defaultNodeSelector() nodeSelector, err = v1helper.NodeSelectorAsSelector(defaultNodeSelector())
} }
if err != nil { if err != nil {
return "", err return "", err
} }
return string(nodeSelector), nil return nodeSelector.String(), nil
} }
func listClusterCIDRs(kubeClient clientset.Interface) (*networkingv1alpha1.ClusterCIDRList, error) { func listClusterCIDRs(kubeClient clientset.Interface) (*networkingv1alpha1.ClusterCIDRList, error) {

View File

@ -34,6 +34,7 @@ import (
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing" k8stesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
@ -82,8 +83,8 @@ func getTestNodeSelector(requirements []testNodeSelectorRequirement) string {
testNodeSelector.NodeSelectorTerms = append(testNodeSelector.NodeSelectorTerms, nst) testNodeSelector.NodeSelectorTerms = append(testNodeSelector.NodeSelectorTerms, nst)
} }
marshalledSelector, _ := testNodeSelector.Marshal() selector, _ := v1helper.NodeSelectorAsSelector(testNodeSelector)
return string(marshalledSelector) return selector.String()
} }
func getTestCidrMap(testClusterCIDRMap map[string][]*testClusterCIDR) map[string][]*cidrset.ClusterCIDR { func getTestCidrMap(testClusterCIDRMap map[string][]*testClusterCIDR) map[string][]*cidrset.ClusterCIDR {

View File

@ -25,6 +25,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -40,7 +41,7 @@ import (
netutils "k8s.io/utils/net" netutils "k8s.io/utils/net"
) )
func TestIPAMMultiCIDRRangeAllocatorType(t *testing.T) { func TestIPAMMultiCIDRRangeAllocatorCIDRAllocate(t *testing.T) {
// set the feature gate to enable MultiCIDRRangeAllocator // set the feature gate to enable MultiCIDRRangeAllocator
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)()
@ -85,19 +86,19 @@ func TestIPAMMultiCIDRRangeAllocatorType(t *testing.T) {
}, },
{ {
name: "Single stack IPv4 Pod CIDR assigned to a node", name: "Single stack IPv4 Pod CIDR assigned to a node",
clusterCIDR: makeClusterCIDR("ipv4-cc", "10.0.0.0/16", "", nodeSelector(map[string][]string{"ipv4": {"true"}, "singlestack": {"true"}})), clusterCIDR: makeClusterCIDR("ipv4-cc", "10.0.0.0/16", "", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "singlestack": {"true"}})),
node: makeNode("ipv4-node", map[string]string{"ipv4": "true", "singlestack": "true"}), node: makeNode("ipv4-node", map[string]string{"ipv4": "true", "singlestack": "true"}),
expectedPodCIDRs: []string{"10.0.0.0/24"}, expectedPodCIDRs: []string{"10.0.0.0/24"},
}, },
{ {
name: "Single stack IPv6 Pod CIDR assigned to a node", name: "Single stack IPv6 Pod CIDR assigned to a node",
clusterCIDR: makeClusterCIDR("ipv6-cc", "", "fd00:20:100::/112", nodeSelector(map[string][]string{"ipv6": {"true"}})), clusterCIDR: makeClusterCIDR("ipv6-cc", "", "fd00:20:100::/112", 8, nodeSelector(map[string][]string{"ipv6": {"true"}})),
node: makeNode("ipv6-node", map[string]string{"ipv6": "true"}), node: makeNode("ipv6-node", map[string]string{"ipv6": "true"}),
expectedPodCIDRs: []string{"fd00:20:100::/120"}, expectedPodCIDRs: []string{"fd00:20:100::/120"},
}, },
{ {
name: "DualStack Pod CIDRs assigned to a node", name: "DualStack Pod CIDRs assigned to a node",
clusterCIDR: makeClusterCIDR("dualstack-cc", "192.168.0.0/16", "fd00:30:100::/112", nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), clusterCIDR: makeClusterCIDR("dualstack-cc", "192.168.0.0/16", "fd00:30:100::/112", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"}), node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"}),
expectedPodCIDRs: []string{"192.168.0.0/24", "fd00:30:100::/120"}, expectedPodCIDRs: []string{"192.168.0.0/24", "fd00:30:100::/120"},
}, },
@ -110,17 +111,6 @@ func TestIPAMMultiCIDRRangeAllocatorType(t *testing.T) {
if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), test.clusterCIDR, metav1.CreateOptions{}); err != nil { if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), test.clusterCIDR, metav1.CreateOptions{}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Wait for the ClusterCIDR to be created
if err := wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) {
cc, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), test.clusterCIDR.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return cc != nil, nil
}); err != nil {
t.Fatalf("failed while waiting for ClusterCIDR %q to be created: %v", test.clusterCIDR.Name, err)
}
} }
// Sleep for one second to make sure the controller process the new created ClusterCIDR. // Sleep for one second to make sure the controller process the new created ClusterCIDR.
@ -138,6 +128,411 @@ func TestIPAMMultiCIDRRangeAllocatorType(t *testing.T) {
} }
} }
func TestIPAMMultiCIDRRangeAllocatorCIDRRelease(t *testing.T) {
// set the feature gate to enable MultiCIDRRangeAllocator
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)()
_, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"}
opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true")
},
})
defer tearDownFn()
clientSet := clientset.NewForConfigOrDie(kubeConfig)
sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go ipamController.Run(ctx.Done())
sharedInformer.Start(ctx.Done())
t.Run("Pod CIDR release after node delete", func(t *testing.T) {
// Create the test ClusterCIDR.
clusterCIDR := makeClusterCIDR("dualstack-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}}))
if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
// Sleep for one second to make sure the controller process the new created ClusterCIDR.
time.Sleep(1 * time.Second)
// Create 1st node and validate that Pod CIDRs are correctly assigned.
node1 := makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"})
expectedPodCIDRs1 := []string{"192.168.0.0/24", "fd00:30:100::/120"}
if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node1, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if gotPodCIDRs, err := nodePodCIDRs(clientSet, node1.Name); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(expectedPodCIDRs1, gotPodCIDRs) {
t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs1, gotPodCIDRs)
}
// Create 2nd node and validate that Pod CIDRs are correctly assigned.
node2 := makeNode("dualstack-node-2", map[string]string{"ipv4": "true", "ipv6": "true"})
expectedPodCIDRs2 := []string{"192.168.1.0/24", "fd00:30:100::100/120"}
if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node2, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if gotPodCIDRs, err := nodePodCIDRs(clientSet, node2.Name); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(expectedPodCIDRs2, gotPodCIDRs) {
t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs2, gotPodCIDRs)
}
// Delete the 1st node, to validate that the PodCIDRs are released.
if err := clientSet.CoreV1().Nodes().Delete(context.TODO(), node1.Name, metav1.DeleteOptions{}); err != nil {
t.Fatal(err)
}
// Create 3rd node, validate that it has Pod CIDRs assigned from the released CIDR.
node3 := makeNode("dualstack-node-3", map[string]string{"ipv4": "true", "ipv6": "true"})
expectedPodCIDRs3 := []string{"192.168.0.0/24", "fd00:30:100::/120"}
if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node3, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if gotPodCIDRs, err := nodePodCIDRs(clientSet, node3.Name); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(expectedPodCIDRs3, gotPodCIDRs) {
t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs3, gotPodCIDRs)
}
})
}
func TestIPAMMultiCIDRRangeAllocatorClusterCIDRDelete(t *testing.T) {
// set the feature gate to enable MultiCIDRRangeAllocator.
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)()
_, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"}
opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true")
},
})
defer tearDownFn()
clientSet := clientset.NewForConfigOrDie(kubeConfig)
sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go ipamController.Run(ctx.Done())
sharedInformer.Start(ctx.Done())
t.Run("delete cc with node associated", func(t *testing.T) {
// Create a ClusterCIDR.
clusterCIDR := makeClusterCIDR("dualstack-cc-del", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}}))
if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
// Sleep for one second to make sure the controller processes the newly created ClusterCIDR.
time.Sleep(1 * time.Second)
// Create a node, which gets pod CIDR from the clusterCIDR created above.
node := makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"})
expectedPodCIDRs := []string{"192.168.0.0/24", "fd00:30:100::/120"}
if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if gotPodCIDRs, err := nodePodCIDRs(clientSet, node.Name); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(expectedPodCIDRs, gotPodCIDRs) {
t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs, gotPodCIDRs)
}
// Delete the ClusterCIDR.
if err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Delete(context.TODO(), clusterCIDR.Name, metav1.DeleteOptions{}); err != nil {
t.Fatal(err)
}
// Sleep for five seconds to make sure the ClusterCIDR exists with a deletion timestamp after marked for deletion.
time.Sleep(5 * time.Second)
// Make sure that the ClusterCIDR is not deleted, as there is a node associated with it.
cc, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if cc == nil {
t.Fatalf("expected Cluster CIDR got nil")
}
if cc.DeletionTimestamp.IsZero() {
t.Fatalf("expected Cluster CIDR to have set a deletion timestamp ")
}
//Delete the node.
if err := clientSet.CoreV1().Nodes().Delete(context.TODO(), node.Name, metav1.DeleteOptions{}); err != nil {
t.Fatal(err)
}
// Poll to make sure that the Node is deleted.
if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientSet.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
return apierrors.IsNotFound(err), nil
}); err != nil {
t.Fatalf("failed while waiting for Node %q to be deleted: %v", node.Name, err)
}
// Poll to make sure that the ClusterCIDR is now deleted, as there is no node associated with it.
if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
return apierrors.IsNotFound(err), nil
}); err != nil {
t.Fatalf("failed while waiting for ClusterCIDR %q to be deleted: %v", clusterCIDR.Name, err)
}
})
}
func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTerminate(t *testing.T) {
// set the feature gate to enable MultiCIDRRangeAllocator.
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)()
_, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"}
opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true")
},
})
defer tearDownFn()
clientSet := clientset.NewForConfigOrDie(kubeConfig)
sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go ipamController.Run(ctx.Done())
sharedInformer.Start(ctx.Done())
t.Run("Pod CIDRS must not be allocated from a terminating CC", func(t *testing.T) {
// Create a ClusterCIDR which is the best match based on number of matching labels.
clusterCIDR := makeClusterCIDR("dualstack-cc-del", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}}))
if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
// Create a ClusterCIDR which has fewer matching labels than the previous ClusterCIDR.
clusterCIDR2 := makeClusterCIDR("few-label-match-cc-del", "10.1.0.0/23", "fd12:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}}))
if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR2, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
// Sleep for one second to make sure the controller processes the newly created ClusterCIDR.
time.Sleep(1 * time.Second)
// Create a node, which gets pod CIDR from the clusterCIDR created above.
node := makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"})
expectedPodCIDRs := []string{"192.168.0.0/24", "fd00:30:100::/120"}
if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if gotPodCIDRs, err := nodePodCIDRs(clientSet, node.Name); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(expectedPodCIDRs, gotPodCIDRs) {
t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs, gotPodCIDRs)
}
// Delete the ClusterCIDR
if err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Delete(context.TODO(), clusterCIDR.Name, metav1.DeleteOptions{}); err != nil {
t.Fatal(err)
}
// Make sure that the ClusterCIDR is not deleted, as there is a node associated with it.
cc, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if cc == nil {
t.Fatalf("expected Cluster CIDR got nil")
}
if cc.DeletionTimestamp.IsZero() {
t.Fatalf("expected Cluster CIDR to have set a deletion timestamp ")
}
// Create a node, which should get Pod CIDRs from the ClusterCIDR with fewer matching label Count,
// as the best match ClusterCIDR is marked as terminating.
node2 := makeNode("dualstack-node-2", map[string]string{"ipv4": "true", "ipv6": "true"})
expectedPodCIDRs2 := []string{"10.1.0.0/24", "fd12:30:100::/120"}
if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node2, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if gotPodCIDRs2, err := nodePodCIDRs(clientSet, node2.Name); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(expectedPodCIDRs2, gotPodCIDRs2) {
t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs2, gotPodCIDRs2)
}
})
}
func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTieBreak(t *testing.T) {
// set the feature gate to enable MultiCIDRRangeAllocator
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)()
_, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"}
opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true")
},
})
defer tearDownFn()
clientSet := clientset.NewForConfigOrDie(kubeConfig)
sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go ipamController.Run(ctx.Done())
sharedInformer.Start(ctx.Done())
tests := []struct {
name string
clusterCIDRs []*networkingv1alpha1.ClusterCIDR
node *v1.Node
expectedPodCIDRs []string
}{
{
name: "ClusterCIDR with highest matching labels",
clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{
makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})),
makeClusterCIDR("double-label-match-cc", "10.0.0.0/23", "fd12:30:200::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
},
node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true", "match": "single"}),
expectedPodCIDRs: []string{"10.0.0.0/24", "fd12:30:200::/120"},
},
{
name: "ClusterCIDR with fewer allocatable Pod CIDRs",
clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{
makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})),
makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
},
node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true", "match": "single"}),
expectedPodCIDRs: []string{"172.16.0.0/24", "fd34:30:100::/120"},
},
{
name: "ClusterCIDR with lower perNodeHostBits",
clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{
makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})),
makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
makeClusterCIDR("low-pernodehostbits-cc", "172.31.0.0/24", "fd35:30:100::/120", 7, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
},
node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true", "match": "single"}),
expectedPodCIDRs: []string{"172.31.0.0/25", "fd35:30:100::/121"},
},
{
name: "ClusterCIDR with label having lower alphanumeric value",
clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{
makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})),
makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
makeClusterCIDR("low-pernodehostbits-cc", "172.31.0.0/24", "fd35:30:100::/120", 7, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
makeClusterCIDR("low-alpha-cc", "192.169.0.0/24", "fd12:40:100::/120", 7, nodeSelector(map[string][]string{"apv4": {"true"}, "bpv6": {"true"}})),
},
node: makeNode("dualstack-node", map[string]string{"apv4": "true", "bpv6": "true", "ipv4": "true", "ipv6": "true", "match": "single"}),
expectedPodCIDRs: []string{"192.169.0.0/25", "fd12:40:100::/121"},
},
{
name: "ClusterCIDR with alphanumerically smaller IP address",
clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{
makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})),
makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
makeClusterCIDR("low-pernodehostbits-cc", "172.31.0.0/24", "fd35:30:100::/120", 7, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})),
makeClusterCIDR("low-alpha-cc", "192.169.0.0/24", "fd12:40:100::/120", 7, nodeSelector(map[string][]string{"apv4": {"true"}, "bpv6": {"true"}})),
makeClusterCIDR("low-ip-cc", "10.1.0.0/24", "fd00:10:100::/120", 7, nodeSelector(map[string][]string{"apv4": {"true"}, "bpv6": {"true"}})),
},
node: makeNode("dualstack-node", map[string]string{"apv4": "true", "bpv6": "true", "ipv4": "true", "ipv6": "true", "match": "single"}),
expectedPodCIDRs: []string{"10.1.0.0/25", "fd00:10:100::/121"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
for _, clusterCIDR := range test.clusterCIDRs {
// Create the test ClusterCIDR
if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
// Sleep for one second to make sure the controller process the new created ClusterCIDR.
time.Sleep(1 * time.Second)
// Create a node and validate that Pod CIDRs are correctly assigned.
if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), test.node, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if gotPodCIDRs, err := nodePodCIDRs(clientSet, test.node.Name); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(test.expectedPodCIDRs, gotPodCIDRs) {
t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", test.expectedPodCIDRs, gotPodCIDRs)
}
// Delete the node.
if err := clientSet.CoreV1().Nodes().Delete(context.TODO(), test.node.Name, metav1.DeleteOptions{}); err != nil {
t.Fatal(err)
}
// Wait till the Node is deleted.
if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientSet.CoreV1().Nodes().Get(context.TODO(), test.node.Name, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
return apierrors.IsNotFound(err), nil
}); err != nil {
t.Fatalf("failed while waiting for Node %q to be deleted: %v", test.node.Name, err)
}
// Delete the Cluster CIDRs.
for _, clusterCIDR := range test.clusterCIDRs {
// Delete the test ClusterCIDR.
if err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Delete(context.TODO(), clusterCIDR.Name, metav1.DeleteOptions{}); err != nil {
t.Fatal(err)
}
// Wait till the ClusterCIDR is deleted.
if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
return apierrors.IsNotFound(err), nil
}); err != nil {
t.Fatalf("failed while waiting for ClusterCIDR %q to be deleted: %v", clusterCIDR.Name, err)
}
}
})
}
}
func booststrapMultiCIDRRangeAllocator(t *testing.T, func booststrapMultiCIDRRangeAllocator(t *testing.T,
clientSet clientset.Interface, clientSet clientset.Interface,
sharedInformer informers.SharedInformerFactory, sharedInformer informers.SharedInformerFactory,
@ -151,9 +546,9 @@ func booststrapMultiCIDRRangeAllocator(t *testing.T,
clusterCIDRs := []*net.IPNet{clusterCIDRv4, clusterCIDRv6} clusterCIDRs := []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
nodeMaskCIDRs := []int{24, 120} nodeMaskCIDRs := []int{24, 120}
// set the current state of the informer, we can preseed nodes and ClusterCIDRs so we // set the current state of the informer, we can pre-seed nodes and ClusterCIDRs, so that we
// can simulate the bootstrap // can simulate the bootstrap
initialCC := makeClusterCIDR("initial-cc", "10.2.0.0/16", "fd00:20:96::/112", nodeSelector(map[string][]string{"bootstrap": {"true"}})) initialCC := makeClusterCIDR("initial-cc", "10.2.0.0/16", "fd00:20:96::/112", 8, nodeSelector(map[string][]string{"bootstrap": {"true"}}))
if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), initialCC, metav1.CreateOptions{}); err != nil { if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), initialCC, metav1.CreateOptions{}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -201,13 +596,13 @@ func makeNode(name string, labels map[string]string) *v1.Node {
} }
} }
func makeClusterCIDR(name, ipv4CIDR, ipv6CIDR string, nodeSelector *v1.NodeSelector) *networkingv1alpha1.ClusterCIDR { func makeClusterCIDR(name, ipv4CIDR, ipv6CIDR string, perNodeHostBits int32, nodeSelector *v1.NodeSelector) *networkingv1alpha1.ClusterCIDR {
return &networkingv1alpha1.ClusterCIDR{ return &networkingv1alpha1.ClusterCIDR{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: networkingv1alpha1.ClusterCIDRSpec{ Spec: networkingv1alpha1.ClusterCIDRSpec{
PerNodeHostBits: 8, PerNodeHostBits: perNodeHostBits,
IPv4: ipv4CIDR, IPv4: ipv4CIDR,
IPv6: ipv6CIDR, IPv6: ipv6CIDR,
NodeSelector: nodeSelector, NodeSelector: nodeSelector,
@ -236,7 +631,7 @@ func nodeSelector(labels map[string][]string) *v1.NodeSelector {
func nodePodCIDRs(c clientset.Interface, name string) ([]string, error) { func nodePodCIDRs(c clientset.Interface, name string) ([]string, error) {
var node *v1.Node var node *v1.Node
nodePollErr := wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) { nodePollErr := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
var err error var err error
node, err = c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) node, err = c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { if err != nil {