Merge pull request #84732 from khenidak/fix-disable-dualstack

Fix a CM panic when ipam tries to lock an out of range pre existing cidr
This commit is contained in:
Kubernetes Prow Robot 2019-11-14 11:19:35 -08:00 committed by GitHub
commit ba9f7419f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 248 additions and 0 deletions

View File

@ -228,6 +228,13 @@ func (r *rangeAllocator) occupyCIDRs(node *v1.Node) error {
if err != nil {
return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR)
}
// If node has a pre allocate cidr that does not exist in our cidrs.
// This will happen if cluster went from dualstack(multi cidrs) to non-dualstack
// then we have now way of locking it
if idx >= len(r.cidrSets) {
return fmt.Errorf("node:%s has an allocated cidr: %v at index:%v that does not exist in cluster cidrs configuration", node.Name, cidr, idx)
}
if err := r.cidrSets[idx].Occupy(podCIDR); err != nil {
return fmt.Errorf("failed to mark cidr[%v] at idx [%v] as occupied for node: %v: %v", podCIDR, idx, node.Name, err)
}
@ -284,6 +291,13 @@ func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
return fmt.Errorf("failed to parse CIDR %s on Node %v: %v", cidr, node.Name, err)
}
// If node has a pre allocate cidr that does not exist in our cidrs.
// This will happen if cluster went from dualstack(multi cidrs) to non-dualstack
// then we have now way of locking it
if idx >= len(r.cidrSets) {
return fmt.Errorf("node:%s has an allocated cidr: %v at index:%v that does not exist in cluster cidrs configuration", node.Name, cidr, idx)
}
klog.V(4).Infof("release CIDR %s for node:%v", cidr, node.Name)
if err = r.cidrSets[idx].Release(podCIDR); err != nil {
return fmt.Errorf("error when releasing CIDR %v: %v", cidr, err)

View File

@ -69,6 +69,240 @@ type testCase struct {
// key is index of the cidr allocated
expectedAllocatedCIDR map[int]string
allocatedCIDRs map[int][]string
// should controller creation fail?
ctrlCreateFail bool
}
func TestOccupyPreExistingCIDR(t *testing.T) {
// all tests operate on a single node
testCases := []testCase{
{
description: "success, single stack no node allocation",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4}
}(),
serviceCIDR: nil,
secondaryServiceCIDR: nil,
subNetMaskSize: 24,
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
ctrlCreateFail: false,
},
{
description: "success, dual stack no node allocation",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(),
serviceCIDR: nil,
secondaryServiceCIDR: nil,
subNetMaskSize: 24,
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
ctrlCreateFail: false,
},
{
description: "success, single stack correct node allocation",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
Spec: v1.NodeSpec{
PodCIDRs: []string{"10.10.0.1/24"},
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4}
}(),
serviceCIDR: nil,
secondaryServiceCIDR: nil,
subNetMaskSize: 24,
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
ctrlCreateFail: false,
},
{
description: "success, dual stack both allocated correctly",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
Spec: v1.NodeSpec{
PodCIDRs: []string{"10.10.0.1/24", "a00::/86"},
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(),
serviceCIDR: nil,
secondaryServiceCIDR: nil,
subNetMaskSize: 24,
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
ctrlCreateFail: false,
},
// failure cases
{
description: "fail, single stack incorrect node allocation",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
Spec: v1.NodeSpec{
PodCIDRs: []string{"172.10.0.1/24"},
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4}
}(),
serviceCIDR: nil,
secondaryServiceCIDR: nil,
subNetMaskSize: 24,
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
ctrlCreateFail: true,
},
{
description: "fail, dualstack node allocating from non existing cidr",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
Spec: v1.NodeSpec{
PodCIDRs: []string{"10.10.0.1/24", "a00::/86"},
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4}
}(),
serviceCIDR: nil,
secondaryServiceCIDR: nil,
subNetMaskSize: 24,
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
ctrlCreateFail: true,
},
{
description: "fail, dualstack node allocating bad v4",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
Spec: v1.NodeSpec{
PodCIDRs: []string{"172.10.0.1/24", "a00::/86"},
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(),
serviceCIDR: nil,
secondaryServiceCIDR: nil,
subNetMaskSize: 24,
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
ctrlCreateFail: true,
},
{
description: "fail, dualstack node allocating bad v6",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
Spec: v1.NodeSpec{
PodCIDRs: []string{"10.10.0.1/24", "cdd::/86"},
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(),
serviceCIDR: nil,
secondaryServiceCIDR: nil,
subNetMaskSize: 24,
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
ctrlCreateFail: true,
},
}
// test function
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
// Initialize the range allocator.
fakeNodeInformer := getFakeNodeInformer(tc.fakeNodeHandler)
nodeList, _ := tc.fakeNodeHandler.List(metav1.ListOptions{})
_, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, tc.clusterCIDRs, tc.serviceCIDR, tc.secondaryServiceCIDR, tc.subNetMaskSize, nodeList)
if err == nil && tc.ctrlCreateFail {
t.Fatalf("creating range allocator was expected to fail, but it did not")
}
if err != nil && !tc.ctrlCreateFail {
t.Fatalf("creating range allocator was expected to succeed, but it did not")
}
})
}
}
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {