Merge pull request #83501 from yastij/remove-node-cond-dep

remove Get/Set node condition dependency for the ccm controllers
This commit is contained in:
Kubernetes Prow Robot 2019-10-04 16:31:26 -07:00 committed by GitHub
commit e0f651a0be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 82 additions and 30 deletions

View File

@ -15,7 +15,6 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/controller/cloud", importpath = "k8s.io/kubernetes/pkg/controller/cloud",
deps = [ deps = [
"//pkg/controller:go_default_library", "//pkg/controller:go_default_library",
"//pkg/controller/util/node:go_default_library",
"//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/util/node:go_default_library", "//pkg/util/node:go_default_library",
@ -34,6 +33,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
"//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/klog:go_default_library",
], ],
) )
@ -46,7 +46,6 @@ go_test(
], ],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/testutil:go_default_library", "//pkg/controller/testutil:go_default_library",
"//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",

View File

@ -35,6 +35,7 @@ import (
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
clientretry "k8s.io/client-go/util/retry" clientretry "k8s.io/client-go/util/retry"
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
"k8s.io/klog" "k8s.io/klog"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
@ -234,7 +235,7 @@ func (cnc *CloudNodeController) initializeNode(node *v1.Node) {
// Since there are node taints, do we still need this? // Since there are node taints, do we still need this?
// This condition marks the node as unusable until routes are initialized in the cloud provider // This condition marks the node as unusable until routes are initialized in the cloud provider
if cnc.cloud.ProviderName() == "gce" { if cnc.cloud.ProviderName() == "gce" {
if err := nodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{ if err := cloudnodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
Type: v1.NodeNetworkUnavailable, Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "NoRouteCreated", Reason: "NoRouteCreated",

View File

@ -21,7 +21,7 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
@ -31,7 +31,6 @@ import (
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
fakecloud "k8s.io/cloud-provider/fake" fakecloud "k8s.io/cloud-provider/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/testutil" "k8s.io/kubernetes/pkg/controller/testutil"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
@ -197,7 +196,7 @@ func TestNodeInitialized(t *testing.T) {
DeleteWaitChan: make(chan struct{}), DeleteWaitChan: make(chan struct{}),
} }
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{ fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{ InstanceTypes: map[types.NodeName]string{
@ -262,7 +261,7 @@ func TestNodeIgnored(t *testing.T) {
DeleteWaitChan: make(chan struct{}), DeleteWaitChan: make(chan struct{}),
} }
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{ fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{ InstanceTypes: map[types.NodeName]string{
@ -334,7 +333,7 @@ func TestGCECondition(t *testing.T) {
DeleteWaitChan: make(chan struct{}), DeleteWaitChan: make(chan struct{}),
} }
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{ fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{ InstanceTypes: map[types.NodeName]string{
@ -419,7 +418,7 @@ func TestZoneInitialized(t *testing.T) {
DeleteWaitChan: make(chan struct{}), DeleteWaitChan: make(chan struct{}),
} }
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{ fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{ InstanceTypes: map[types.NodeName]string{
@ -509,7 +508,7 @@ func TestNodeAddresses(t *testing.T) {
DeleteWaitChan: make(chan struct{}), DeleteWaitChan: make(chan struct{}),
} }
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{ fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{}, InstanceTypes: map[types.NodeName]string{},
@ -622,7 +621,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
DeleteWaitChan: make(chan struct{}), DeleteWaitChan: make(chan struct{}),
} }
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{ fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{ InstanceTypes: map[types.NodeName]string{
@ -837,7 +836,7 @@ func TestNodeAddressesNotUpdate(t *testing.T) {
}, },
} }
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{ fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{}, InstanceTypes: map[types.NodeName]string{},
@ -912,7 +911,7 @@ func TestNodeProviderID(t *testing.T) {
DeleteWaitChan: make(chan struct{}), DeleteWaitChan: make(chan struct{}),
} }
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{ fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{}, InstanceTypes: map[types.NodeName]string{},
@ -995,7 +994,7 @@ func TestNodeProviderIDAlreadySet(t *testing.T) {
DeleteWaitChan: make(chan struct{}), DeleteWaitChan: make(chan struct{}),
} }
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{ fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{}, InstanceTypes: map[types.NodeName]string{},

View File

@ -34,9 +34,9 @@ import (
v1lister "k8s.io/client-go/listers/core/v1" v1lister "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
) )
@ -133,7 +133,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes() {
for _, node := range nodes { for _, node := range nodes {
// Default NodeReady status to v1.ConditionUnknown // Default NodeReady status to v1.ConditionUnknown
status := v1.ConditionUnknown status := v1.ConditionUnknown
if _, c := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady); c != nil { if _, c := cloudnodeutil.GetNodeCondition(&node.Status, v1.NodeReady); c != nil {
status = c.Status status = c.Status
} }

View File

@ -14,9 +14,7 @@ go_library(
], ],
importpath = "k8s.io/kubernetes/pkg/controller/route", importpath = "k8s.io/kubernetes/pkg/controller/route",
deps = [ deps = [
"//pkg/controller/util/node:go_default_library",
"//pkg/util/metrics:go_default_library", "//pkg/util/metrics:go_default_library",
"//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
@ -32,6 +30,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
"//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/klog:go_default_library",
], ],
) )
@ -41,8 +40,6 @@ go_test(
srcs = ["route_controller_test.go"], srcs = ["route_controller_test.go"],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
@ -51,6 +48,7 @@ go_test(
"//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/cloud-provider/fake:go_default_library", "//staging/src/k8s.io/cloud-provider/fake:go_default_library",
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
], ],
) )

View File

@ -40,9 +40,8 @@ import (
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
clientretry "k8s.io/client-go/util/retry" clientretry "k8s.io/client-go/util/retry"
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node" cloudnodeutil "k8s.io/cloud-provider/node/helpers"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
utilnode "k8s.io/kubernetes/pkg/util/node"
) )
const ( const (
@ -290,7 +289,7 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
} }
func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreated bool) error { func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreated bool) error {
_, condition := nodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable) _, condition := cloudnodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable)
if routesCreated && condition != nil && condition.Status == v1.ConditionFalse { if routesCreated && condition != nil && condition.Status == v1.ConditionFalse {
klog.V(2).Infof("set node %v with NodeNetworkUnavailable=false was canceled because it is already set", node.Name) klog.V(2).Infof("set node %v with NodeNetworkUnavailable=false was canceled because it is already set", node.Name)
return nil return nil
@ -311,7 +310,7 @@ func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreate
// patch in the retry loop. // patch in the retry loop.
currentTime := metav1.Now() currentTime := metav1.Now()
if routesCreated { if routesCreated {
err = utilnode.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{ err = cloudnodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
Type: v1.NodeNetworkUnavailable, Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionFalse, Status: v1.ConditionFalse,
Reason: "RouteCreated", Reason: "RouteCreated",
@ -319,7 +318,7 @@ func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreate
LastTransitionTime: currentTime, LastTransitionTime: currentTime,
}) })
} else { } else {
err = utilnode.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{ err = cloudnodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
Type: v1.NodeNetworkUnavailable, Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "NoRouteCreated", Reason: "NoRouteCreated",

View File

@ -30,8 +30,7 @@ import (
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
fakecloud "k8s.io/cloud-provider/fake" fakecloud "k8s.io/cloud-provider/fake"
"k8s.io/kubernetes/pkg/controller" cloudnodeutil "k8s.io/cloud-provider/node/helpers"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
) )
func alwaysReady() bool { return true } func alwaysReady() bool { return true }
@ -66,7 +65,7 @@ func TestIsResponsibleForRoute(t *testing.T) {
t.Errorf("%d. Error in test case: unparsable cidr %q", i, testCase.clusterCIDR) t.Errorf("%d. Error in test case: unparsable cidr %q", i, testCase.clusterCIDR)
} }
client := fake.NewSimpleClientset() client := fake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) informerFactory := informers.NewSharedInformerFactory(client, 0)
rc := New(nil, nil, informerFactory.Core().V1().Nodes(), myClusterName, []*net.IPNet{cidr}) rc := New(nil, nil, informerFactory.Core().V1().Nodes(), myClusterName, []*net.IPNet{cidr})
rc.nodeListerSynced = alwaysReady rc.nodeListerSynced = alwaysReady
route := &cloudprovider.Route{ route := &cloudprovider.Route{
@ -367,7 +366,7 @@ func TestReconcile(t *testing.T) {
cidrs = append(cidrs, cidrv6) cidrs = append(cidrs, cidrv6)
} }
informerFactory := informers.NewSharedInformerFactory(testCase.clientset, controller.NoResyncPeriodFunc()) informerFactory := informers.NewSharedInformerFactory(testCase.clientset, 0)
rc := New(routes, testCase.clientset, informerFactory.Core().V1().Nodes(), cluster, cidrs) rc := New(routes, testCase.clientset, informerFactory.Core().V1().Nodes(), cluster, cidrs)
rc.nodeListerSynced = alwaysReady rc.nodeListerSynced = alwaysReady
if err := rc.reconcile(testCase.nodes, testCase.initialRoutes); err != nil { if err := rc.reconcile(testCase.nodes, testCase.initialRoutes); err != nil {
@ -376,7 +375,7 @@ func TestReconcile(t *testing.T) {
for _, action := range testCase.clientset.Actions() { for _, action := range testCase.clientset.Actions() {
if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" { if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" {
node := action.(core.UpdateAction).GetObject().(*v1.Node) node := action.(core.UpdateAction).GetObject().(*v1.Node)
_, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable) _, condition := cloudnodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
if condition == nil { if condition == nil {
t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name) t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name)
} else { } else {

View File

@ -4,6 +4,7 @@ go_library(
name = "go_default_library", name = "go_default_library",
srcs = [ srcs = [
"address.go", "address.go",
"conditions.go",
"taints.go", "taints.go",
], ],
importmap = "k8s.io/kubernetes/vendor/k8s.io/cloud-provider/node/helpers", importmap = "k8s.io/kubernetes/vendor/k8s.io/cloud-provider/node/helpers",

View File

@ -0,0 +1,56 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helpers
import (
"encoding/json"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
)
// GetNodeCondition extracts the provided condition from the given status and returns that.
// Returns nil and -1 if the condition is not present, and the index of the located condition.
func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) (int, *v1.NodeCondition) {
if status == nil {
return -1, nil
}
for i := range status.Conditions {
if status.Conditions[i].Type == conditionType {
return i, &status.Conditions[i]
}
}
return -1, nil
}
// SetNodeCondition updates specific node condition with patch operation.
func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error {
condition.LastHeartbeatTime = metav1.NewTime(time.Now())
patch, err := json.Marshal(map[string]interface{}{
"status": map[string]interface{}{
"conditions": []v1.NodeCondition{condition},
},
})
if err != nil {
return err
}
_, err = c.CoreV1().Nodes().PatchStatus(string(node), patch)
return err
}