mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
route controller + azure v6 routes
This commit is contained in:
parent
584d7103e2
commit
a51b8ce456
@ -21,6 +21,7 @@ limitations under the License.
|
|||||||
package app
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
@ -31,6 +32,10 @@ import (
|
|||||||
cloudcontrollers "k8s.io/kubernetes/pkg/controller/cloud"
|
cloudcontrollers "k8s.io/kubernetes/pkg/controller/cloud"
|
||||||
routecontroller "k8s.io/kubernetes/pkg/controller/route"
|
routecontroller "k8s.io/kubernetes/pkg/controller/route"
|
||||||
servicecontroller "k8s.io/kubernetes/pkg/controller/service"
|
servicecontroller "k8s.io/kubernetes/pkg/controller/service"
|
||||||
|
netutils "k8s.io/utils/net"
|
||||||
|
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
func startCloudNodeController(ctx *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
|
func startCloudNodeController(ctx *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
|
||||||
@ -98,13 +103,26 @@ func startRouteController(ctx *cloudcontrollerconfig.CompletedConfig, cloud clou
|
|||||||
klog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
|
klog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
var clusterCIDR *net.IPNet
|
|
||||||
var err error
|
// failure: bad cidrs in config
|
||||||
if len(strings.TrimSpace(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 {
|
clusterCIDRs, dualStack, err := processCIDRs(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
|
||||||
_, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
|
if err != nil {
|
||||||
if err != nil {
|
return nil, false, err
|
||||||
klog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err)
|
}
|
||||||
}
|
|
||||||
|
// failure: more than one cidr and dual stack is not enabled
|
||||||
|
if len(clusterCIDRs) > 1 && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.IPv6DualStack) {
|
||||||
|
return nil, false, fmt.Errorf("len of ClusterCIDRs==%v and dualstack feature is not enabled", len(clusterCIDRs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// failure: more than one cidr but they are not configured as dual stack
|
||||||
|
if len(clusterCIDRs) > 1 && !dualStack {
|
||||||
|
return nil, false, fmt.Errorf("len of ClusterCIDRs==%v and they are not configured as dual stack (at least one from each IPFamily", len(clusterCIDRs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// failure: more than cidrs is not allowed even with dual stack
|
||||||
|
if len(clusterCIDRs) > 2 {
|
||||||
|
return nil, false, fmt.Errorf("length of clusterCIDRs is:%v more than max allowed of 2", len(clusterCIDRs))
|
||||||
}
|
}
|
||||||
|
|
||||||
routeController := routecontroller.New(
|
routeController := routecontroller.New(
|
||||||
@ -112,9 +130,28 @@ func startRouteController(ctx *cloudcontrollerconfig.CompletedConfig, cloud clou
|
|||||||
ctx.ClientBuilder.ClientOrDie("route-controller"),
|
ctx.ClientBuilder.ClientOrDie("route-controller"),
|
||||||
ctx.SharedInformers.Core().V1().Nodes(),
|
ctx.SharedInformers.Core().V1().Nodes(),
|
||||||
ctx.ComponentConfig.KubeCloudShared.ClusterName,
|
ctx.ComponentConfig.KubeCloudShared.ClusterName,
|
||||||
clusterCIDR,
|
clusterCIDRs,
|
||||||
)
|
)
|
||||||
go routeController.Run(stopCh, ctx.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
|
go routeController.Run(stopCh, ctx.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
|
||||||
|
|
||||||
return nil, true, nil
|
return nil, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// processCIDRs is a helper function that works on a comma separated cidrs and returns
|
||||||
|
// a list of typed cidrs
|
||||||
|
// a flag if cidrs represents a dual stack
|
||||||
|
// error if failed to parse any of the cidrs
|
||||||
|
func processCIDRs(cidrsList string) ([]*net.IPNet, bool, error) {
|
||||||
|
cidrsSplit := strings.Split(strings.TrimSpace(cidrsList), ",")
|
||||||
|
|
||||||
|
cidrs, err := netutils.ParseCIDRs(cidrsSplit)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// if cidrs has an error then the previous call will fail
|
||||||
|
// safe to ignore error checking on next call
|
||||||
|
dualstack, _ := netutils.IsDualStackCIDRs(cidrs)
|
||||||
|
|
||||||
|
return cidrs, dualstack, nil
|
||||||
|
}
|
||||||
|
@ -194,11 +194,33 @@ func startRouteController(ctx ControllerContext) (http.Handler, bool, error) {
|
|||||||
klog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
|
klog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
_, clusterCIDR, err := net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
|
|
||||||
|
// failure: bad cidrs in config
|
||||||
|
clusterCIDRs, dualStack, err := processCIDRs(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err)
|
return nil, false, err
|
||||||
}
|
}
|
||||||
routeController := routecontroller.New(routes, ctx.ClientBuilder.ClientOrDie("route-controller"), ctx.InformerFactory.Core().V1().Nodes(), ctx.ComponentConfig.KubeCloudShared.ClusterName, clusterCIDR)
|
|
||||||
|
// failure: more than one cidr and dual stack is not enabled
|
||||||
|
if len(clusterCIDRs) > 1 && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.IPv6DualStack) {
|
||||||
|
return nil, false, fmt.Errorf("len of ClusterCIDRs==%v and dualstack feature is not enabled", len(clusterCIDRs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// failure: more than one cidr but they are not configured as dual stack
|
||||||
|
if len(clusterCIDRs) > 1 && !dualStack {
|
||||||
|
return nil, false, fmt.Errorf("len of ClusterCIDRs==%v and they are not configured as dual stack (at least one from each IPFamily", len(clusterCIDRs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// failure: more than cidrs is not allowed even with dual stack
|
||||||
|
if len(clusterCIDRs) > 2 {
|
||||||
|
return nil, false, fmt.Errorf("length of clusterCIDRs is:%v more than max allowed of 2", len(clusterCIDRs))
|
||||||
|
}
|
||||||
|
|
||||||
|
routeController := routecontroller.New(routes,
|
||||||
|
ctx.ClientBuilder.ClientOrDie("route-controller"),
|
||||||
|
ctx.InformerFactory.Core().V1().Nodes(),
|
||||||
|
ctx.ComponentConfig.KubeCloudShared.ClusterName,
|
||||||
|
clusterCIDRs)
|
||||||
go routeController.Run(ctx.Stop, ctx.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
|
go routeController.Run(ctx.Stop, ctx.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
|
||||||
return nil, true, nil
|
return nil, true, nil
|
||||||
}
|
}
|
||||||
|
@ -62,19 +62,19 @@ type RouteController struct {
|
|||||||
routes cloudprovider.Routes
|
routes cloudprovider.Routes
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
clusterName string
|
clusterName string
|
||||||
clusterCIDR *net.IPNet
|
clusterCIDRs []*net.IPNet
|
||||||
nodeLister corelisters.NodeLister
|
nodeLister corelisters.NodeLister
|
||||||
nodeListerSynced cache.InformerSynced
|
nodeListerSynced cache.InformerSynced
|
||||||
broadcaster record.EventBroadcaster
|
broadcaster record.EventBroadcaster
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, nodeInformer coreinformers.NodeInformer, clusterName string, clusterCIDR *net.IPNet) *RouteController {
|
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, nodeInformer coreinformers.NodeInformer, clusterName string, clusterCIDRs []*net.IPNet) *RouteController {
|
||||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||||
}
|
}
|
||||||
|
|
||||||
if clusterCIDR == nil {
|
if len(clusterCIDRs) == 0 {
|
||||||
klog.Fatal("RouteController: Must specify clusterCIDR.")
|
klog.Fatal("RouteController: Must specify clusterCIDR.")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, nodeInform
|
|||||||
routes: routes,
|
routes: routes,
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
clusterName: clusterName,
|
clusterName: clusterName,
|
||||||
clusterCIDR: clusterCIDR,
|
clusterCIDRs: clusterCIDRs,
|
||||||
nodeLister: nodeInformer.Lister(),
|
nodeLister: nodeInformer.Lister(),
|
||||||
nodeListerSynced: nodeInformer.Informer().HasSynced,
|
nodeListerSynced: nodeInformer.Informer().HasSynced,
|
||||||
broadcaster: eventBroadcaster,
|
broadcaster: eventBroadcaster,
|
||||||
@ -137,33 +137,52 @@ func (rc *RouteController) reconcileNodeRoutes() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.Route) error {
|
func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.Route) error {
|
||||||
// nodeCIDRs maps nodeName->nodeCIDR
|
var l sync.Mutex
|
||||||
nodeCIDRs := make(map[types.NodeName]string)
|
// for each node a map of podCIDRs and their created status
|
||||||
|
nodeRoutesStatuses := make(map[types.NodeName]map[string]bool)
|
||||||
// routeMap maps routeTargetNode->route
|
// routeMap maps routeTargetNode->route
|
||||||
routeMap := make(map[types.NodeName]*cloudprovider.Route)
|
routeMap := make(map[types.NodeName][]*cloudprovider.Route)
|
||||||
for _, route := range routes {
|
for _, route := range routes {
|
||||||
if route.TargetNode != "" {
|
if route.TargetNode != "" {
|
||||||
routeMap[route.TargetNode] = route
|
routeMap[route.TargetNode] = append(routeMap[route.TargetNode], route)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
rateLimiter := make(chan struct{}, maxConcurrentRouteCreations)
|
rateLimiter := make(chan struct{}, maxConcurrentRouteCreations)
|
||||||
|
// searches existing routes by node for a matching route
|
||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
// Skip if the node hasn't been assigned a CIDR yet.
|
// Skip if the node hasn't been assigned a CIDR yet.
|
||||||
if node.Spec.PodCIDR == "" {
|
if len(node.Spec.PodCIDRs) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
nodeName := types.NodeName(node.Name)
|
nodeName := types.NodeName(node.Name)
|
||||||
// Check if we have a route for this node w/ the correct CIDR.
|
l.Lock()
|
||||||
r := routeMap[nodeName]
|
nodeRoutesStatuses[nodeName] = make(map[string]bool)
|
||||||
if r == nil || r.DestinationCIDR != node.Spec.PodCIDR {
|
l.Unlock()
|
||||||
// If not, create the route.
|
// for every node, for every cidr
|
||||||
|
for _, podCIDR := range node.Spec.PodCIDRs {
|
||||||
|
// we add it to our nodeCIDRs map here because add and delete go routines run at the same time
|
||||||
|
l.Lock()
|
||||||
|
nodeRoutesStatuses[nodeName][podCIDR] = false
|
||||||
|
l.Unlock()
|
||||||
|
// ignore if already created
|
||||||
|
if hasRoute(routeMap, nodeName, podCIDR) {
|
||||||
|
l.Lock()
|
||||||
|
nodeRoutesStatuses[nodeName][podCIDR] = true // a route for this podCIDR is already created
|
||||||
|
l.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// if we are here, then a route needs to be created for this node
|
||||||
route := &cloudprovider.Route{
|
route := &cloudprovider.Route{
|
||||||
TargetNode: nodeName,
|
TargetNode: nodeName,
|
||||||
DestinationCIDR: node.Spec.PodCIDR,
|
DestinationCIDR: podCIDR,
|
||||||
}
|
}
|
||||||
|
// cloud providers that:
|
||||||
|
// - depend on nameHint
|
||||||
|
// - trying to support dual stack
|
||||||
|
// will have to carefully generate new route names that allow node->(multi cidr)
|
||||||
nameHint := string(node.UID)
|
nameHint := string(node.UID)
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(nodeName types.NodeName, nameHint string, route *cloudprovider.Route) {
|
go func(nodeName types.NodeName, nameHint string, route *cloudprovider.Route) {
|
||||||
@ -176,8 +195,6 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
|||||||
klog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
|
klog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
|
||||||
err := rc.routes.CreateRoute(context.TODO(), rc.clusterName, nameHint, route)
|
err := rc.routes.CreateRoute(context.TODO(), rc.clusterName, nameHint, route)
|
||||||
<-rateLimiter
|
<-rateLimiter
|
||||||
|
|
||||||
rc.updateNetworkingCondition(nodeName, err == nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Since(startTime), err)
|
msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Since(startTime), err)
|
||||||
if rc.recorder != nil {
|
if rc.recorder != nil {
|
||||||
@ -188,10 +205,13 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
|||||||
UID: types.UID(nodeName),
|
UID: types.UID(nodeName),
|
||||||
Namespace: "",
|
Namespace: "",
|
||||||
}, v1.EventTypeWarning, "FailedToCreateRoute", msg)
|
}, v1.EventTypeWarning, "FailedToCreateRoute", msg)
|
||||||
|
klog.V(4).Infof(msg)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
klog.V(4).Infof(msg)
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
l.Lock()
|
||||||
|
nodeRoutesStatuses[nodeName][route.DestinationCIDR] = true
|
||||||
|
l.Unlock()
|
||||||
klog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
|
klog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -199,23 +219,31 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
|||||||
klog.Errorf("Could not create route %s %s for node %s: %v", nameHint, route.DestinationCIDR, nodeName, err)
|
klog.Errorf("Could not create route %s %s for node %s: %v", nameHint, route.DestinationCIDR, nodeName, err)
|
||||||
}
|
}
|
||||||
}(nodeName, nameHint, route)
|
}(nodeName, nameHint, route)
|
||||||
} else {
|
|
||||||
// Update condition only if it doesn't reflect the current state.
|
|
||||||
_, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
|
|
||||||
if condition == nil || condition.Status != v1.ConditionFalse {
|
|
||||||
rc.updateNetworkingCondition(types.NodeName(node.Name), true)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
nodeCIDRs[nodeName] = node.Spec.PodCIDR
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// searches our bag of node->cidrs for a match
|
||||||
|
nodeHasCidr := func(nodeName types.NodeName, cidr string) bool {
|
||||||
|
l.Lock()
|
||||||
|
defer l.Unlock()
|
||||||
|
|
||||||
|
nodeRoutes := nodeRoutesStatuses[nodeName]
|
||||||
|
if nodeRoutes == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, exist := nodeRoutes[cidr]
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
// delete routes that are not in use
|
||||||
for _, route := range routes {
|
for _, route := range routes {
|
||||||
if rc.isResponsibleForRoute(route) {
|
if rc.isResponsibleForRoute(route) {
|
||||||
// Check if this route is a blackhole, or applies to a node we know about & has an incorrect CIDR.
|
// Check if this route is a blackhole, or applies to a node we know about & has an incorrect CIDR.
|
||||||
if route.Blackhole || (nodeCIDRs[route.TargetNode] != route.DestinationCIDR) {
|
if route.Blackhole || !nodeHasCidr(route.TargetNode, route.DestinationCIDR) {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
// Delete the route.
|
// Delete the route.
|
||||||
go func(route *cloudprovider.Route, startTime time.Time) {
|
go func(route *cloudprovider.Route, startTime time.Time) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
// respect the rate limiter
|
||||||
rateLimiter <- struct{}{}
|
rateLimiter <- struct{}{}
|
||||||
klog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR)
|
klog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR)
|
||||||
if err := rc.routes.DeleteRoute(context.TODO(), rc.clusterName, route); err != nil {
|
if err := rc.routes.DeleteRoute(context.TODO(), rc.clusterName, route); err != nil {
|
||||||
@ -229,17 +257,62 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
|
// after all routes have been created (or not), we start updating
|
||||||
|
// all nodes' statuses with the outcome
|
||||||
|
for _, node := range nodes {
|
||||||
|
wg.Add(1)
|
||||||
|
nodeRoutes := nodeRoutesStatuses[types.NodeName(node.Name)]
|
||||||
|
allRoutesCreated := true
|
||||||
|
|
||||||
|
if len(nodeRoutes) == 0 {
|
||||||
|
go func(n *v1.Node) {
|
||||||
|
defer wg.Done()
|
||||||
|
klog.Infof("node %v has no routes assigned to it. NodeNetworkUnavailable will be set to true", n.Name)
|
||||||
|
rc.updateNetworkingCondition(n, false)
|
||||||
|
}(node)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if all routes were created. if so, then it should be ready
|
||||||
|
for _, created := range nodeRoutes {
|
||||||
|
if !created {
|
||||||
|
allRoutesCreated = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
go func(n *v1.Node) {
|
||||||
|
defer wg.Done()
|
||||||
|
rc.updateNetworkingCondition(n, allRoutesCreated)
|
||||||
|
}(node)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, routeCreated bool) error {
|
func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreated bool) error {
|
||||||
|
_, condition := nodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable)
|
||||||
|
if routesCreated && condition != nil && condition.Status == v1.ConditionFalse {
|
||||||
|
klog.V(2).Infof("set node %v with NodeNetworkUnavailable=false was canceled because it is already set", node.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !routesCreated && condition != nil && condition.Status == v1.ConditionTrue {
|
||||||
|
klog.V(2).Infof("set node %v with NodeNetworkUnavailable=true was canceled because it is already set", node.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
klog.Infof("Patching node status %v with %v previous condition was:%+v", node.Name, routesCreated, condition)
|
||||||
|
|
||||||
|
// either condition is not there, or has a value != to what we need
|
||||||
|
// start setting it
|
||||||
err := clientretry.RetryOnConflict(updateNetworkConditionBackoff, func() error {
|
err := clientretry.RetryOnConflict(updateNetworkConditionBackoff, func() error {
|
||||||
var err error
|
var err error
|
||||||
// Patch could also fail, even though the chance is very slim. So we still do
|
// Patch could also fail, even though the chance is very slim. So we still do
|
||||||
// patch in the retry loop.
|
// patch in the retry loop.
|
||||||
currentTime := metav1.Now()
|
currentTime := metav1.Now()
|
||||||
if routeCreated {
|
if routesCreated {
|
||||||
err = utilnode.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{
|
err = utilnode.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
||||||
Type: v1.NodeNetworkUnavailable,
|
Type: v1.NodeNetworkUnavailable,
|
||||||
Status: v1.ConditionFalse,
|
Status: v1.ConditionFalse,
|
||||||
Reason: "RouteCreated",
|
Reason: "RouteCreated",
|
||||||
@ -247,7 +320,7 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro
|
|||||||
LastTransitionTime: currentTime,
|
LastTransitionTime: currentTime,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
err = utilnode.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{
|
err = utilnode.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
||||||
Type: v1.NodeNetworkUnavailable,
|
Type: v1.NodeNetworkUnavailable,
|
||||||
Status: v1.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
Reason: "NoRouteCreated",
|
Reason: "NoRouteCreated",
|
||||||
@ -256,13 +329,13 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("Error updating node %s, retrying: %v", nodeName, err)
|
klog.V(4).Infof("Error updating node %s, retrying: %v", types.NodeName(node.Name), err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Error updating node %s: %v", nodeName, err)
|
klog.Errorf("Error updating node %s: %v", node.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@ -279,8 +352,24 @@ func (rc *RouteController) isResponsibleForRoute(route *cloudprovider.Route) boo
|
|||||||
for i := range lastIP {
|
for i := range lastIP {
|
||||||
lastIP[i] = cidr.IP[i] | ^cidr.Mask[i]
|
lastIP[i] = cidr.IP[i] | ^cidr.Mask[i]
|
||||||
}
|
}
|
||||||
if !rc.clusterCIDR.Contains(cidr.IP) || !rc.clusterCIDR.Contains(lastIP) {
|
|
||||||
return false
|
// check across all cluster cidrs
|
||||||
|
for _, clusterCIDR := range rc.clusterCIDRs {
|
||||||
|
if clusterCIDR.Contains(cidr.IP) || clusterCIDR.Contains(lastIP) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return true
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// checks if a node owns a route with a specific cidr
|
||||||
|
func hasRoute(rm map[types.NodeName][]*cloudprovider.Route, nodeName types.NodeName, cidr string) bool {
|
||||||
|
if routes, ok := rm[nodeName]; ok {
|
||||||
|
for _, route := range routes {
|
||||||
|
if route.DestinationCIDR == cidr {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
@ -51,12 +51,14 @@ func TestIsResponsibleForRoute(t *testing.T) {
|
|||||||
{"10.244.0.0/16", myClusterRoute, "10.244.255.0/24", true},
|
{"10.244.0.0/16", myClusterRoute, "10.244.255.0/24", true},
|
||||||
{"10.244.0.0/14", myClusterRoute, "10.244.0.0/24", true},
|
{"10.244.0.0/14", myClusterRoute, "10.244.0.0/24", true},
|
||||||
{"10.244.0.0/14", myClusterRoute, "10.247.255.0/24", true},
|
{"10.244.0.0/14", myClusterRoute, "10.247.255.0/24", true},
|
||||||
|
{"a00:100::/10", myClusterRoute, "a00:100::/24", true},
|
||||||
// Routes that match our naming/tagging scheme, but are outside our cidr
|
// Routes that match our naming/tagging scheme, but are outside our cidr
|
||||||
{"10.244.0.0/16", myClusterRoute, "10.224.0.0/24", false},
|
{"10.244.0.0/16", myClusterRoute, "10.224.0.0/24", false},
|
||||||
{"10.244.0.0/16", myClusterRoute, "10.0.10.0/24", false},
|
{"10.244.0.0/16", myClusterRoute, "10.0.10.0/24", false},
|
||||||
{"10.244.0.0/16", myClusterRoute, "10.255.255.0/24", false},
|
{"10.244.0.0/16", myClusterRoute, "10.255.255.0/24", false},
|
||||||
{"10.244.0.0/14", myClusterRoute, "10.248.0.0/24", false},
|
{"10.244.0.0/14", myClusterRoute, "10.248.0.0/24", false},
|
||||||
{"10.244.0.0/14", myClusterRoute, "10.243.255.0/24", false},
|
{"10.244.0.0/14", myClusterRoute, "10.243.255.0/24", false},
|
||||||
|
{"a00:100::/10", myClusterRoute, "b00:100::/24", false},
|
||||||
}
|
}
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
_, cidr, err := net.ParseCIDR(testCase.clusterCIDR)
|
_, cidr, err := net.ParseCIDR(testCase.clusterCIDR)
|
||||||
@ -65,7 +67,7 @@ func TestIsResponsibleForRoute(t *testing.T) {
|
|||||||
}
|
}
|
||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||||
rc := New(nil, nil, informerFactory.Core().V1().Nodes(), myClusterName, cidr)
|
rc := New(nil, nil, informerFactory.Core().V1().Nodes(), myClusterName, []*net.IPNet{cidr})
|
||||||
rc.nodeListerSynced = alwaysReady
|
rc.nodeListerSynced = alwaysReady
|
||||||
route := &cloudprovider.Route{
|
route := &cloudprovider.Route{
|
||||||
Name: testCase.routeName,
|
Name: testCase.routeName,
|
||||||
@ -80,17 +82,137 @@ func TestIsResponsibleForRoute(t *testing.T) {
|
|||||||
|
|
||||||
func TestReconcile(t *testing.T) {
|
func TestReconcile(t *testing.T) {
|
||||||
cluster := "my-k8s"
|
cluster := "my-k8s"
|
||||||
node1 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-1", UID: "01"}, Spec: v1.NodeSpec{PodCIDR: "10.120.0.0/24"}}
|
node1 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-1", UID: "01"}, Spec: v1.NodeSpec{PodCIDR: "10.120.0.0/24", PodCIDRs: []string{"10.120.0.0/24"}}}
|
||||||
node2 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: "10.120.1.0/24"}}
|
node2 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: "10.120.1.0/24", PodCIDRs: []string{"10.120.1.0/24"}}}
|
||||||
nodeNoCidr := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: ""}}
|
nodeNoCidr := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: ""}}
|
||||||
|
|
||||||
|
node3 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-3", UID: "03"}, Spec: v1.NodeSpec{PodCIDR: "10.120.0.0/24", PodCIDRs: []string{"10.120.0.0/24", "a00:100::/24"}}}
|
||||||
|
node4 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-4", UID: "04"}, Spec: v1.NodeSpec{PodCIDR: "10.120.1.0/24", PodCIDRs: []string{"10.120.1.0/24", "a00:200::/24"}}}
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
initialRoutes []*cloudprovider.Route
|
initialRoutes []*cloudprovider.Route
|
||||||
expectedRoutes []*cloudprovider.Route
|
expectedRoutes []*cloudprovider.Route
|
||||||
expectedNetworkUnavailable []bool
|
expectedNetworkUnavailable []bool
|
||||||
clientset *fake.Clientset
|
clientset *fake.Clientset
|
||||||
|
dualStack bool
|
||||||
}{
|
}{
|
||||||
|
// multicidr
|
||||||
|
// 2 nodes, no routes yet
|
||||||
|
{
|
||||||
|
dualStack: true,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
&node3,
|
||||||
|
&node4,
|
||||||
|
},
|
||||||
|
initialRoutes: []*cloudprovider.Route{},
|
||||||
|
expectedRoutes: []*cloudprovider.Route{
|
||||||
|
{Name: cluster + "-01", TargetNode: "node-3", DestinationCIDR: "10.120.0.0/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-02", TargetNode: "node-4", DestinationCIDR: "10.120.1.0/24", Blackhole: false},
|
||||||
|
|
||||||
|
{Name: cluster + "-03", TargetNode: "node-3", DestinationCIDR: "a00:100::/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-04", TargetNode: "node-4", DestinationCIDR: "a00:200::/24", Blackhole: false},
|
||||||
|
},
|
||||||
|
expectedNetworkUnavailable: []bool{true, true},
|
||||||
|
clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
|
||||||
|
},
|
||||||
|
// 2 nodes, all routes already created
|
||||||
|
{
|
||||||
|
dualStack: true,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
&node3,
|
||||||
|
&node4,
|
||||||
|
},
|
||||||
|
initialRoutes: []*cloudprovider.Route{
|
||||||
|
{Name: cluster + "-01", TargetNode: "node-3", DestinationCIDR: "10.120.0.0/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-02", TargetNode: "node-4", DestinationCIDR: "10.120.1.0/24", Blackhole: false},
|
||||||
|
|
||||||
|
{Name: cluster + "-03", TargetNode: "node-3", DestinationCIDR: "a00:100::/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-04", TargetNode: "node-4", DestinationCIDR: "a00:200::/24", Blackhole: false},
|
||||||
|
},
|
||||||
|
expectedRoutes: []*cloudprovider.Route{
|
||||||
|
{Name: cluster + "-01", TargetNode: "node-3", DestinationCIDR: "10.120.0.0/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-02", TargetNode: "node-4", DestinationCIDR: "10.120.1.0/24", Blackhole: false},
|
||||||
|
|
||||||
|
{Name: cluster + "-03", TargetNode: "node-3", DestinationCIDR: "a00:100::/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-04", TargetNode: "node-4", DestinationCIDR: "a00:200::/24", Blackhole: false},
|
||||||
|
},
|
||||||
|
expectedNetworkUnavailable: []bool{true, true},
|
||||||
|
clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
|
||||||
|
},
|
||||||
|
// 2 nodes, few wrong routes
|
||||||
|
{
|
||||||
|
dualStack: true,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
&node3,
|
||||||
|
&node4,
|
||||||
|
},
|
||||||
|
initialRoutes: []*cloudprovider.Route{
|
||||||
|
{Name: cluster + "-01", TargetNode: "node-3", DestinationCIDR: "10.120.1.0/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-02", TargetNode: "node-4", DestinationCIDR: "10.120.0.0/24", Blackhole: false},
|
||||||
|
|
||||||
|
{Name: cluster + "-03", TargetNode: "node-3", DestinationCIDR: "a00:200::/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-04", TargetNode: "node-4", DestinationCIDR: "a00:100::/24", Blackhole: false},
|
||||||
|
},
|
||||||
|
expectedRoutes: []*cloudprovider.Route{
|
||||||
|
{Name: cluster + "-01", TargetNode: "node-3", DestinationCIDR: "10.120.0.0/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-02", TargetNode: "node-4", DestinationCIDR: "10.120.1.0/24", Blackhole: false},
|
||||||
|
|
||||||
|
{Name: cluster + "-03", TargetNode: "node-3", DestinationCIDR: "a00:100::/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-04", TargetNode: "node-4", DestinationCIDR: "a00:200::/24", Blackhole: false},
|
||||||
|
},
|
||||||
|
expectedNetworkUnavailable: []bool{true, true},
|
||||||
|
clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
|
||||||
|
},
|
||||||
|
// 2 nodes, some routes already created
|
||||||
|
{
|
||||||
|
dualStack: true,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
&node3,
|
||||||
|
&node4,
|
||||||
|
},
|
||||||
|
initialRoutes: []*cloudprovider.Route{
|
||||||
|
{Name: cluster + "-01", TargetNode: "node-3", DestinationCIDR: "10.120.0.0/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-04", TargetNode: "node-4", DestinationCIDR: "a00:200::/24", Blackhole: false},
|
||||||
|
},
|
||||||
|
expectedRoutes: []*cloudprovider.Route{
|
||||||
|
{Name: cluster + "-01", TargetNode: "node-3", DestinationCIDR: "10.120.0.0/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-02", TargetNode: "node-4", DestinationCIDR: "10.120.1.0/24", Blackhole: false},
|
||||||
|
|
||||||
|
{Name: cluster + "-03", TargetNode: "node-3", DestinationCIDR: "a00:100::/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-04", TargetNode: "node-4", DestinationCIDR: "a00:200::/24", Blackhole: false},
|
||||||
|
},
|
||||||
|
expectedNetworkUnavailable: []bool{true, true},
|
||||||
|
clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
|
||||||
|
},
|
||||||
|
// 2 nodes, too many routes
|
||||||
|
{
|
||||||
|
dualStack: true,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
&node3,
|
||||||
|
&node4,
|
||||||
|
},
|
||||||
|
initialRoutes: []*cloudprovider.Route{
|
||||||
|
{Name: cluster + "-01", TargetNode: "node-3", DestinationCIDR: "10.120.0.0/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-02", TargetNode: "node-4", DestinationCIDR: "10.120.1.0/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-001", TargetNode: "node-x", DestinationCIDR: "10.120.2.0/24", Blackhole: false},
|
||||||
|
|
||||||
|
{Name: cluster + "-03", TargetNode: "node-3", DestinationCIDR: "a00:100::/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-04", TargetNode: "node-4", DestinationCIDR: "a00:200::/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-0002", TargetNode: "node-y", DestinationCIDR: "a00:300::/24", Blackhole: false},
|
||||||
|
},
|
||||||
|
expectedRoutes: []*cloudprovider.Route{
|
||||||
|
{Name: cluster + "-01", TargetNode: "node-3", DestinationCIDR: "10.120.0.0/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-02", TargetNode: "node-4", DestinationCIDR: "10.120.1.0/24", Blackhole: false},
|
||||||
|
|
||||||
|
{Name: cluster + "-03", TargetNode: "node-3", DestinationCIDR: "a00:100::/24", Blackhole: false},
|
||||||
|
{Name: cluster + "-04", TargetNode: "node-4", DestinationCIDR: "a00:200::/24", Blackhole: false},
|
||||||
|
},
|
||||||
|
expectedNetworkUnavailable: []bool{true, true},
|
||||||
|
clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
|
||||||
|
},
|
||||||
|
|
||||||
|
// single cidr
|
||||||
// 2 nodes, routes already there
|
// 2 nodes, routes already there
|
||||||
{
|
{
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
@ -237,9 +359,16 @@ func TestReconcile(t *testing.T) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
t.Error("Error in test: fakecloud doesn't support Routes()")
|
t.Error("Error in test: fakecloud doesn't support Routes()")
|
||||||
}
|
}
|
||||||
|
cidrs := make([]*net.IPNet, 0)
|
||||||
_, cidr, _ := net.ParseCIDR("10.120.0.0/16")
|
_, cidr, _ := net.ParseCIDR("10.120.0.0/16")
|
||||||
|
cidrs = append(cidrs, cidr)
|
||||||
|
if testCase.dualStack {
|
||||||
|
_, cidrv6, _ := net.ParseCIDR("ace:cab:deca::/8")
|
||||||
|
cidrs = append(cidrs, cidrv6)
|
||||||
|
}
|
||||||
|
|
||||||
informerFactory := informers.NewSharedInformerFactory(testCase.clientset, controller.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(testCase.clientset, controller.NoResyncPeriodFunc())
|
||||||
rc := New(routes, testCase.clientset, informerFactory.Core().V1().Nodes(), cluster, cidr)
|
rc := New(routes, testCase.clientset, informerFactory.Core().V1().Nodes(), cluster, cidrs)
|
||||||
rc.nodeListerSynced = alwaysReady
|
rc.nodeListerSynced = alwaysReady
|
||||||
if err := rc.reconcile(testCase.nodes, testCase.initialRoutes); err != nil {
|
if err := rc.reconcile(testCase.nodes, testCase.initialRoutes); err != nil {
|
||||||
t.Errorf("%d. Error from rc.reconcile(): %v", i, err)
|
t.Errorf("%d. Error from rc.reconcile(): %v", i, err)
|
||||||
@ -284,7 +413,7 @@ func TestReconcile(t *testing.T) {
|
|||||||
break poll
|
break poll
|
||||||
}
|
}
|
||||||
case <-timeoutChan:
|
case <-timeoutChan:
|
||||||
t.Errorf("%d. rc.reconcile() = %v, routes:\n%v\nexpected: nil, routes:\n%v\n", i, err, flatten(finalRoutes), flatten(testCase.expectedRoutes))
|
t.Errorf("%d. rc.reconcile() = %v,\nfound routes:\n%v\nexpected routes:\n%v\n", i, err, flatten(finalRoutes), flatten(testCase.expectedRoutes))
|
||||||
break poll
|
break poll
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -295,16 +424,22 @@ func routeListEqual(list1, list2 []*cloudprovider.Route) bool {
|
|||||||
if len(list1) != len(list2) {
|
if len(list1) != len(list2) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
routeMap1 := make(map[string]*cloudprovider.Route)
|
|
||||||
|
// nodename+cidr:bool
|
||||||
|
seen := make(map[string]bool)
|
||||||
|
|
||||||
for _, route1 := range list1 {
|
for _, route1 := range list1 {
|
||||||
routeMap1[route1.Name] = route1
|
for _, route2 := range list2 {
|
||||||
}
|
if route1.DestinationCIDR == route2.DestinationCIDR && route1.TargetNode == route2.TargetNode {
|
||||||
for _, route2 := range list2 {
|
seen[string(route1.TargetNode)+route1.DestinationCIDR] = true
|
||||||
if route1, exists := routeMap1[route2.Name]; !exists || *route1 != *route2 {
|
break
|
||||||
return false
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
if len(seen) == len(list1) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func flatten(list []*cloudprovider.Route) []cloudprovider.Route {
|
func flatten(list []*cloudprovider.Route) []cloudprovider.Route {
|
||||||
|
@ -343,7 +343,7 @@ func (f *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st
|
|||||||
f.Lock.Lock()
|
f.Lock.Lock()
|
||||||
defer f.Lock.Unlock()
|
defer f.Lock.Unlock()
|
||||||
f.addCall("create-route")
|
f.addCall("create-route")
|
||||||
name := clusterName + "-" + nameHint
|
name := clusterName + "-" + string(route.TargetNode) + "-" + route.DestinationCIDR
|
||||||
if _, exists := f.RouteMap[name]; exists {
|
if _, exists := f.RouteMap[name]; exists {
|
||||||
f.Err = fmt.Errorf("route %q already exists", name)
|
f.Err = fmt.Errorf("route %q already exists", name)
|
||||||
return f.Err
|
return f.Err
|
||||||
@ -362,11 +362,21 @@ func (f *Cloud) DeleteRoute(ctx context.Context, clusterName string, route *clou
|
|||||||
f.Lock.Lock()
|
f.Lock.Lock()
|
||||||
defer f.Lock.Unlock()
|
defer f.Lock.Unlock()
|
||||||
f.addCall("delete-route")
|
f.addCall("delete-route")
|
||||||
name := route.Name
|
name := ""
|
||||||
if _, exists := f.RouteMap[name]; !exists {
|
for key, saved := range f.RouteMap {
|
||||||
f.Err = fmt.Errorf("no route found with name %q", name)
|
if route.DestinationCIDR == saved.Route.DestinationCIDR &&
|
||||||
|
route.TargetNode == saved.Route.TargetNode &&
|
||||||
|
clusterName == saved.ClusterName {
|
||||||
|
name = key
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(name) == 0 {
|
||||||
|
f.Err = fmt.Errorf("no route found for node:%v with DestinationCIDR== %v", route.TargetNode, route.DestinationCIDR)
|
||||||
return f.Err
|
return f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(f.RouteMap, name)
|
delete(f.RouteMap, name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -117,6 +117,31 @@ func (az *Cloud) ListVirtualMachines(resourceGroup string) ([]compute.VirtualMac
|
|||||||
return az.ListVirtualMachinesWithRetry(resourceGroup)
|
return az.ListVirtualMachinesWithRetry(resourceGroup)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getPrivateIPsForMachine is wrapper for optional backoff getting private ips
|
||||||
|
// list of a node by name
|
||||||
|
func (az *Cloud) getPrivateIPsForMachine(nodeName types.NodeName) ([]string, error) {
|
||||||
|
if az.Config.shouldOmitCloudProviderBackoff() {
|
||||||
|
return az.vmSet.GetPrivateIPsByNodeName(string(nodeName))
|
||||||
|
}
|
||||||
|
|
||||||
|
return az.getPrivateIPsForMachineWithRetry(nodeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (az *Cloud) getPrivateIPsForMachineWithRetry(nodeName types.NodeName) ([]string, error) {
|
||||||
|
var privateIPs []string
|
||||||
|
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||||
|
var retryErr error
|
||||||
|
privateIPs, retryErr = az.vmSet.GetPrivateIPsByNodeName(string(nodeName))
|
||||||
|
if retryErr != nil {
|
||||||
|
klog.Errorf("GetPrivateIPsByNodeName(%s): backoff failure, will retry,err=%v", nodeName, retryErr)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
klog.V(2).Infof("GetPrivateIPsByNodeName(%s): backoff success", nodeName)
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
return privateIPs, err
|
||||||
|
}
|
||||||
|
|
||||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
|
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
|
||||||
if az.Config.shouldOmitCloudProviderBackoff() {
|
if az.Config.shouldOmitCloudProviderBackoff() {
|
||||||
return az.vmSet.GetIPByNodeName(string(nodeName))
|
return az.vmSet.GetIPByNodeName(string(nodeName))
|
||||||
|
@ -879,6 +879,10 @@ func (f *fakeVMSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
|||||||
return "", fmt.Errorf("unimplemented")
|
return "", fmt.Errorf("unimplemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *fakeVMSet) GetPrivateIPsByNodeName(nodeName string) ([]string, error) {
|
||||||
|
return []string{}, fmt.Errorf("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
func (f *fakeVMSet) GetIPByNodeName(name string) (string, string, error) {
|
func (f *fakeVMSet) GetIPByNodeName(name string) (string, string, error) {
|
||||||
ip, found := f.NodeToIP[name]
|
ip, found := f.NodeToIP[name]
|
||||||
if !found {
|
if !found {
|
||||||
|
@ -175,7 +175,7 @@ func TestFindRule(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Name: to.StringPtr("probe1"),
|
Name: to.StringPtr("probe1"),
|
||||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||||
LoadDistribution: network.Default,
|
LoadDistribution: network.SourceIP,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -19,6 +19,7 @@ package azure
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network"
|
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network"
|
||||||
"github.com/Azure/go-autorest/autorest/to"
|
"github.com/Azure/go-autorest/autorest/to"
|
||||||
@ -26,6 +27,20 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
utilnet "k8s.io/utils/net"
|
||||||
|
|
||||||
|
// Azure route controller changes behavior if ipv6dual stack feature is turned on
|
||||||
|
// remove this once the feature graduates
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
"k8s.io/component-base/featuregate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// copied to minimize the number of cross reference
|
||||||
|
// and exceptions in publishing and allowed imports.
|
||||||
|
const (
|
||||||
|
IPv6DualStack featuregate.Feature = "IPv6DualStack"
|
||||||
|
routeNameFmt = "%s____%s"
|
||||||
|
routeNameSeparator = "____"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||||
@ -119,12 +134,17 @@ func (az *Cloud) createRouteTable() error {
|
|||||||
// to create a more user-meaningful name.
|
// to create a more user-meaningful name.
|
||||||
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||||
// Returns for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
// Returns for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||||
|
var targetIP string
|
||||||
nodeName := string(kubeRoute.TargetNode)
|
nodeName := string(kubeRoute.TargetNode)
|
||||||
unmanaged, err := az.IsNodeUnmanaged(nodeName)
|
unmanaged, err := az.IsNodeUnmanaged(nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if unmanaged {
|
if unmanaged {
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) {
|
||||||
|
//TODO (khenidak) add support for unmanaged nodes when the feature reaches beta
|
||||||
|
return fmt.Errorf("unmanaged nodes are not supported in dual stack mode")
|
||||||
|
}
|
||||||
klog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
|
klog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
|
||||||
az.routeCIDRsLock.Lock()
|
az.routeCIDRsLock.Lock()
|
||||||
defer az.routeCIDRsLock.Unlock()
|
defer az.routeCIDRsLock.Unlock()
|
||||||
@ -136,12 +156,29 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s
|
|||||||
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
targetIP, _, err := az.getIPForMachine(kubeRoute.TargetNode)
|
if !utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) {
|
||||||
if err != nil {
|
targetIP, _, err = az.getIPForMachine(kubeRoute.TargetNode)
|
||||||
return err
|
if err != nil {
|
||||||
}
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// for dual stack we need to select
|
||||||
|
// a private ip that matches family of the cidr
|
||||||
|
klog.V(4).Infof("CreateRoute: create route instance=%q cidr=%q is in dual stack mode", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
|
CIDRv6 := utilnet.IsIPv6CIDRString(string(kubeRoute.DestinationCIDR))
|
||||||
|
nodePrivateIPs, err := az.getPrivateIPsForMachine(kubeRoute.TargetNode)
|
||||||
|
if nil != err {
|
||||||
|
klog.V(3).Infof("CreateRoute: create route: failed(GetPrivateIPsByNodeName) instance=%q cidr=%q with error=%v", kubeRoute.TargetNode, kubeRoute.DestinationCIDR, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
targetIP, err = findFirstIPByFamily(nodePrivateIPs, CIDRv6)
|
||||||
|
if nil != err {
|
||||||
|
klog.V(3).Infof("CreateRoute: create route: failed(findFirstIpByFamily) instance=%q cidr=%q with error=%v", kubeRoute.TargetNode, kubeRoute.DestinationCIDR, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode, string(kubeRoute.DestinationCIDR))
|
||||||
route := network.Route{
|
route := network.Route{
|
||||||
Name: to.StringPtr(routeName),
|
Name: to.StringPtr(routeName),
|
||||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||||
@ -180,7 +217,7 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute
|
|||||||
|
|
||||||
klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
|
|
||||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode, string(kubeRoute.DestinationCIDR))
|
||||||
err = az.DeleteRouteWithName(routeName)
|
err = az.DeleteRouteWithName(routeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -194,11 +231,42 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute
|
|||||||
// These two functions enable stashing the instance name in the route
|
// These two functions enable stashing the instance name in the route
|
||||||
// and then retrieving it later when listing. This is needed because
|
// and then retrieving it later when listing. This is needed because
|
||||||
// Azure does not let you put tags/descriptions on the Route itself.
|
// Azure does not let you put tags/descriptions on the Route itself.
|
||||||
func mapNodeNameToRouteName(nodeName types.NodeName) string {
|
func mapNodeNameToRouteName(nodeName types.NodeName, cidr string) string {
|
||||||
return fmt.Sprintf("%s", nodeName)
|
if !utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) {
|
||||||
|
return fmt.Sprintf("%s", nodeName)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(routeNameFmt, nodeName, cidrtoRfc1035(cidr))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used with mapNodeNameToRouteName. See comment on mapNodeNameToRouteName.
|
// Used with mapNodeNameToRouteName. See comment on mapNodeNameToRouteName.
|
||||||
func mapRouteNameToNodeName(routeName string) types.NodeName {
|
func mapRouteNameToNodeName(routeName string) types.NodeName {
|
||||||
return types.NodeName(fmt.Sprintf("%s", routeName))
|
if !utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) {
|
||||||
|
return types.NodeName(fmt.Sprintf("%s", routeName))
|
||||||
|
}
|
||||||
|
parts := strings.Split(routeName, routeNameSeparator)
|
||||||
|
nodeName := parts[0]
|
||||||
|
return types.NodeName(nodeName)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// given a list of ips, return the first one
|
||||||
|
// that matches the family requested
|
||||||
|
// error if no match, or failure to parse
|
||||||
|
// any of the ips
|
||||||
|
func findFirstIPByFamily(ips []string, v6 bool) (string, error) {
|
||||||
|
for _, ip := range ips {
|
||||||
|
bIPv6 := utilnet.IsIPv6String(ip)
|
||||||
|
if v6 == bIPv6 {
|
||||||
|
return ip, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("no match found matching the ipfamily requested")
|
||||||
|
}
|
||||||
|
|
||||||
|
//strips : . /
|
||||||
|
func cidrtoRfc1035(cidr string) string {
|
||||||
|
cidr = strings.ReplaceAll(cidr, ":", "")
|
||||||
|
cidr = strings.ReplaceAll(cidr, ".", "")
|
||||||
|
cidr = strings.ReplaceAll(cidr, "/", "")
|
||||||
|
return cidr
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,8 @@ import (
|
|||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network"
|
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network"
|
||||||
"github.com/Azure/go-autorest/autorest/to"
|
"github.com/Azure/go-autorest/autorest/to"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDeleteRoute(t *testing.T) {
|
func TestDeleteRoute(t *testing.T) {
|
||||||
@ -43,7 +45,7 @@ func TestDeleteRoute(t *testing.T) {
|
|||||||
nodeInformerSynced: func() bool { return true },
|
nodeInformerSynced: func() bool { return true },
|
||||||
}
|
}
|
||||||
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
||||||
routeName := mapNodeNameToRouteName(route.TargetNode)
|
routeName := mapNodeNameToRouteName(route.TargetNode, route.DestinationCIDR)
|
||||||
|
|
||||||
fakeRoutes.FakeStore = map[string]map[string]network.Route{
|
fakeRoutes.FakeStore = map[string]map[string]network.Route{
|
||||||
cloud.RouteTableName: {
|
cloud.RouteTableName: {
|
||||||
@ -134,7 +136,7 @@ func TestCreateRoute(t *testing.T) {
|
|||||||
t.Errorf("unexpected calls create if not exists, exists: %v", fakeTable.Calls)
|
t.Errorf("unexpected calls create if not exists, exists: %v", fakeTable.Calls)
|
||||||
}
|
}
|
||||||
|
|
||||||
routeName := mapNodeNameToRouteName(route.TargetNode)
|
routeName := mapNodeNameToRouteName(route.TargetNode, string(route.DestinationCIDR))
|
||||||
routeInfo, found := fakeRoutes.FakeStore[cloud.RouteTableName][routeName]
|
routeInfo, found := fakeRoutes.FakeStore[cloud.RouteTableName][routeName]
|
||||||
if !found {
|
if !found {
|
||||||
t.Errorf("could not find route: %v in %v", routeName, fakeRoutes.FakeStore)
|
t.Errorf("could not find route: %v in %v", routeName, fakeRoutes.FakeStore)
|
||||||
@ -390,3 +392,40 @@ func TestProcessRoutes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func errorNotNil(t *testing.T, err error) {
|
||||||
|
if nil != err {
|
||||||
|
t.Errorf("%s: failure error: %v", t.Name(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestFindFirstIPByFamily(t *testing.T) {
|
||||||
|
firstIPv4 := "10.0.0.1"
|
||||||
|
firstIPv6 := "2001:1234:5678:9abc::9"
|
||||||
|
ips := []string{
|
||||||
|
firstIPv4,
|
||||||
|
"11.0.0.1",
|
||||||
|
firstIPv6,
|
||||||
|
"fda4:6dee:effc:62a0:0:0:0:0",
|
||||||
|
}
|
||||||
|
outIPV4, err := findFirstIPByFamily(ips, false)
|
||||||
|
errorNotNil(t, err)
|
||||||
|
assert.Equal(t, outIPV4, firstIPv4)
|
||||||
|
|
||||||
|
outIPv6, err := findFirstIPByFamily(ips, true)
|
||||||
|
errorNotNil(t, err)
|
||||||
|
assert.Equal(t, outIPv6, firstIPv6)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRouteNameFuncs(t *testing.T) {
|
||||||
|
v4CIDR := "10.0.0.1/16"
|
||||||
|
v6CIDR := "fd3e:5f02:6ec0:30ba::/64"
|
||||||
|
nodeName := "thisNode"
|
||||||
|
|
||||||
|
routeName := mapNodeNameToRouteName(types.NodeName(nodeName), v4CIDR)
|
||||||
|
outNodeName := mapRouteNameToNodeName(routeName)
|
||||||
|
assert.Equal(t, string(outNodeName), nodeName)
|
||||||
|
|
||||||
|
routeName = mapNodeNameToRouteName(types.NodeName(nodeName), v6CIDR)
|
||||||
|
outNodeName = mapRouteNameToNodeName(routeName)
|
||||||
|
assert.Equal(t, string(outNodeName), nodeName)
|
||||||
|
}
|
||||||
|
@ -460,6 +460,29 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error)
|
|||||||
return privateIP, publicIP, nil
|
return privateIP, publicIP, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// returns a list of private ips assigned to node
|
||||||
|
// TODO (khenidak): This should read all nics, not just the primary
|
||||||
|
// allowing users to split ipv4/v6 on multiple nics
|
||||||
|
func (as *availabilitySet) GetPrivateIPsByNodeName(name string) ([]string, error) {
|
||||||
|
ips := make([]string, 0)
|
||||||
|
nic, err := as.GetPrimaryInterface(name)
|
||||||
|
if err != nil {
|
||||||
|
return ips, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if nic.IPConfigurations == nil {
|
||||||
|
return ips, fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", *nic.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ipConfig := range *(nic.IPConfigurations) {
|
||||||
|
if ipConfig.PrivateIPAddress != nil {
|
||||||
|
ips = append(ips, *(ipConfig.PrivateIPAddress))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ips, nil
|
||||||
|
}
|
||||||
|
|
||||||
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
|
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
|
||||||
// a list of availability sets that match the nodes available to k8s.
|
// a list of availability sets that match the nodes available to k8s.
|
||||||
func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
||||||
|
@ -72,4 +72,7 @@ type VMSet interface {
|
|||||||
|
|
||||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||||
GetPowerStatusByNodeName(name string) (string, error)
|
GetPowerStatusByNodeName(name string) (string, error)
|
||||||
|
|
||||||
|
// GetPrivateIPsByNodeName returns a slice of all private ips assigned to node (ipv6 and ipv4)
|
||||||
|
GetPrivateIPsByNodeName(name string) ([]string, error)
|
||||||
}
|
}
|
||||||
|
@ -335,6 +335,30 @@ func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) {
|
|||||||
return internalIP, publicIP, nil
|
return internalIP, publicIP, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// returns a list of private ips assigned to node
|
||||||
|
// TODO (khenidak): This should read all nics, not just the primary
|
||||||
|
// allowing users to split ipv4/v6 on multiple nics
|
||||||
|
func (ss *scaleSet) GetPrivateIPsByNodeName(nodeName string) ([]string, error) {
|
||||||
|
ips := make([]string, 0)
|
||||||
|
nic, err := ss.GetPrimaryInterface(nodeName)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
|
||||||
|
return ips, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if nic.IPConfigurations == nil {
|
||||||
|
return ips, fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", *nic.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ipConfig := range *(nic.IPConfigurations) {
|
||||||
|
if ipConfig.PrivateIPAddress != nil {
|
||||||
|
ips = append(ips, *(ipConfig.PrivateIPAddress))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ips, nil
|
||||||
|
}
|
||||||
|
|
||||||
// This returns the full identifier of the primary NIC for the given VM.
|
// This returns the full identifier of the primary NIC for the given VM.
|
||||||
func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) {
|
func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) {
|
||||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||||
|
@ -74,6 +74,7 @@ func (g *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st
|
|||||||
return mc.Observe(err)
|
return mc.Observe(err)
|
||||||
}
|
}
|
||||||
cr := &compute.Route{
|
cr := &compute.Route{
|
||||||
|
// TODO(thockin): generate a unique name for node + route cidr. Don't depend on name hints.
|
||||||
Name: truncateClusterName(clusterName) + "-" + nameHint,
|
Name: truncateClusterName(clusterName) + "-" + nameHint,
|
||||||
DestRange: route.DestinationCIDR,
|
DestRange: route.DestinationCIDR,
|
||||||
NextHopInstance: fmt.Sprintf("zones/%s/instances/%s", targetInstance.Zone, targetInstance.Name),
|
NextHopInstance: fmt.Sprintf("zones/%s/instances/%s", targetInstance.Zone, targetInstance.Name),
|
||||||
|
Loading…
Reference in New Issue
Block a user