Adding IPV6 (Dual Stack) support to handle IPV6 pod cidrs

This commit is contained in:
Basant Amarkhed 2021-04-15 16:51:38 +00:00 committed by basantsa1989
parent 2147937c41
commit a013c6a2db
2 changed files with 167 additions and 17 deletions

View File

@ -25,6 +25,7 @@ import (
"sync"
"time"
"github.com/google/go-cmp/cmp"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
@ -45,6 +46,7 @@ import (
utilnode "k8s.io/kubernetes/pkg/util/node"
utiltaints "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/legacy-cloud-providers/gce"
netutils "k8s.io/utils/net"
)
// nodeProcessingInfo tracks information related to current nodes in processing
@ -246,34 +248,40 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
if errors.IsNotFound(err) {
return nil // node no longer available, skip processing
}
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
klog.ErrorS(err, "Failed while getting the node for updating Node.Spec.PodCIDR", "nodeName", nodeName)
return err
}
if node.Spec.ProviderID == "" {
return fmt.Errorf("node %s doesn't have providerID", nodeName)
}
cidrs, err := ca.cloud.AliasRangesByProviderID(node.Spec.ProviderID)
cidrStrings, err := ca.cloud.AliasRangesByProviderID(node.Spec.ProviderID)
if err != nil {
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr: %v", err)
return fmt.Errorf("failed to get cidr(s) from provider: %v", err)
}
if len(cidrs) == 0 {
if len(cidrStrings) == 0 {
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr: Node %v has no CIDRs", node.Name)
}
_, cidr, err := net.ParseCIDR(cidrs[0])
if err != nil {
return fmt.Errorf("failed to parse string '%s' as a CIDR: %v", cidrs[0], err)
//Can have at most 2 ips (one for v4 and one for v6)
if len(cidrStrings) > 2 {
klog.InfoS("Got more than 2 ips, truncating to 2", "cidrStrings", cidrStrings)
cidrStrings = cidrStrings[:2]
}
podCIDR := cidr.String()
if node.Spec.PodCIDR == podCIDR {
klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
// We don't return here, in order to set the NetworkUnavailable condition later below.
} else {
cidrs, err := netutils.ParseCIDRs(cidrStrings)
if err != nil {
return fmt.Errorf("failed to parse strings %v as CIDRs: %v", cidrStrings, err)
}
needUpdate, err := needPodCIDRsUpdate(node, cidrs)
if err != nil {
return fmt.Errorf("err: %v, CIDRS: %v", err, cidrStrings)
}
if needUpdate {
if node.Spec.PodCIDR != "" {
klog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR)
klog.ErrorS(nil, "PodCIDR being reassigned!", "nodeName", node.Name, "node.Spec.PodCIDRs", node.Spec.PodCIDRs, "cidrStrings", cidrStrings)
// We fall through and set the CIDR despite this error. This
// implements the same logic as implemented in the
// rangeAllocator.
@ -281,15 +289,15 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
// See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248
}
for i := 0; i < cidrUpdateRetries; i++ {
if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil {
klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
if err = utilnode.PatchNodeCIDRs(ca.client, types.NodeName(node.Name), cidrStrings); err == nil {
klog.InfoS("Set the node PodCIDRs", "nodeName", node.Name, "cidrStrings", cidrStrings)
break
}
}
}
if err != nil {
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
klog.ErrorS(err, "Failed to update the node PodCIDR after multiple attempts", "nodeName", node.Name, "cidrStrings", cidrStrings)
return err
}
@ -301,11 +309,49 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
LastTransitionTime: metav1.Now(),
})
if err != nil {
klog.Errorf("Error setting route status for node %v: %v", node.Name, err)
klog.ErrorS(err, "Error setting route status for the node", "nodeName", node.Name)
}
return err
}
func needPodCIDRsUpdate(node *v1.Node, podCIDRs []*net.IPNet) (bool, error) {
if node.Spec.PodCIDR == "" {
return true, nil
}
_, nodePodCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
klog.ErrorS(err, "Found invalid node.Spec.PodCIDR", "node.Spec.PodCIDR", node.Spec.PodCIDR)
// We will try to overwrite with new CIDR(s)
return true, nil
}
nodePodCIDRs, err := netutils.ParseCIDRs(node.Spec.PodCIDRs)
if err != nil {
klog.ErrorS(err, "Found invalid node.Spec.PodCIDRs", "node.Spec.PodCIDRs", node.Spec.PodCIDRs)
// We will try to overwrite with new CIDR(s)
return true, nil
}
if len(podCIDRs) == 1 {
if cmp.Equal(nodePodCIDR, podCIDRs[0]) {
klog.V(4).InfoS("Node already has allocated CIDR. It matches the proposed one.", "nodeName", node.Name, "podCIDRs[0]", podCIDRs[0])
return false, nil
}
} else if len(nodePodCIDRs) == len(podCIDRs) {
if dualStack, _ := netutils.IsDualStackCIDRs(podCIDRs); !dualStack {
return false, fmt.Errorf("IPs are not dual stack")
}
for idx, cidr := range podCIDRs {
if !cmp.Equal(nodePodCIDRs[idx], cidr) {
return true, nil
}
}
klog.V(4).InfoS("Node already has allocated CIDRs. It matches the proposed one.", "nodeName", node.Name, "podCIDRs", podCIDRs)
return false, nil
}
return true, nil
}
func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error {
klog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)",
node.Name, node.Spec.PodCIDR)

View File

@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
netutils "k8s.io/utils/net"
)
func hasNodeInProcessing(ca *cloudCIDRAllocator, name string) bool {
@ -83,3 +84,106 @@ func TestNodeUpdateRetryTimeout(t *testing.T) {
})
}
}
func TestNeedPodCIDRsUpdate(t *testing.T) {
for _, tc := range []struct {
desc string
cidrs []string
nodePodCIDR string
nodePodCIDRs []string
want bool
wantErr bool
}{
{
desc: "want error - invalid cidr",
cidrs: []string{"10.10.10.0/24"},
nodePodCIDR: "10.10..0/24",
nodePodCIDRs: []string{"10.10..0/24"},
want: true,
},
{
desc: "want error - cidr len 2 but not dual stack",
cidrs: []string{"10.10.10.0/24", "10.10.11.0/24"},
nodePodCIDR: "10.10.10.0/24",
nodePodCIDRs: []string{"10.10.10.0/24", "2001:db8::/64"},
wantErr: true,
},
{
desc: "want false - matching v4 only cidr",
cidrs: []string{"10.10.10.0/24"},
nodePodCIDR: "10.10.10.0/24",
nodePodCIDRs: []string{"10.10.10.0/24"},
want: false,
},
{
desc: "want false - nil node.Spec.PodCIDR",
cidrs: []string{"10.10.10.0/24"},
want: true,
},
{
desc: "want true - non matching v4 only cidr",
cidrs: []string{"10.10.10.0/24"},
nodePodCIDR: "10.10.11.0/24",
nodePodCIDRs: []string{"10.10.11.0/24"},
want: true,
},
{
desc: "want false - matching v4 and v6 cidrs",
cidrs: []string{"10.10.10.0/24", "2001:db8::/64"},
nodePodCIDR: "10.10.10.0/24",
nodePodCIDRs: []string{"10.10.10.0/24", "2001:db8::/64"},
want: false,
},
{
desc: "want false - matching v4 and v6 cidrs, different strings but same CIDRs",
cidrs: []string{"10.10.10.0/24", "2001:db8::/64"},
nodePodCIDR: "10.10.10.0/24",
nodePodCIDRs: []string{"10.10.10.0/24", "2001:db8:0::/64"},
want: false,
},
{
desc: "want true - matching v4 and non matching v6 cidrs",
cidrs: []string{"10.10.10.0/24", "2001:db8::/64"},
nodePodCIDR: "10.10.10.0/24",
nodePodCIDRs: []string{"10.10.10.0/24", "2001:dba::/64"},
want: true,
},
{
desc: "want true - nil node.Spec.PodCIDRs",
cidrs: []string{"10.10.10.0/24", "2001:db8::/64"},
want: true,
},
{
desc: "want true - matching v6 and non matching v4 cidrs",
cidrs: []string{"10.10.10.0/24", "2001:db8::/64"},
nodePodCIDR: "10.10.1.0/24",
nodePodCIDRs: []string{"10.10.1.0/24", "2001:db8::/64"},
want: true,
},
{
desc: "want true - missing v6",
cidrs: []string{"10.10.10.0/24", "2001:db8::/64"},
nodePodCIDR: "10.10.10.0/24",
nodePodCIDRs: []string{"10.10.10.0/24"},
want: true,
},
} {
var node v1.Node
node.Spec.PodCIDR = tc.nodePodCIDR
node.Spec.PodCIDRs = tc.nodePodCIDRs
netCIDRs, err := netutils.ParseCIDRs(tc.cidrs)
if err != nil {
t.Errorf("failed to parse %v as CIDRs: %v", tc.cidrs, err)
}
t.Run(tc.desc, func(t *testing.T) {
got, err := needPodCIDRsUpdate(&node, netCIDRs)
if tc.wantErr == (err == nil) {
t.Errorf("err: %v, wantErr: %v", err, tc.wantErr)
}
if err == nil && got != tc.want {
t.Errorf("got: %v, want: %v", got, tc.want)
}
})
}
}