mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #83501 from yastij/remove-node-cond-dep
remove Get/Set node condition dependency for the ccm controllers
This commit is contained in:
commit
e0f651a0be
@ -15,7 +15,6 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/cloud",
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
@ -34,6 +33,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -46,7 +46,6 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
clientretry "k8s.io/client-go/util/retry"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
|
||||
"k8s.io/klog"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
@ -234,7 +235,7 @@ func (cnc *CloudNodeController) initializeNode(node *v1.Node) {
|
||||
// Since there are node taints, do we still need this?
|
||||
// This condition marks the node as unusable until routes are initialized in the cloud provider
|
||||
if cnc.cloud.ProviderName() == "gce" {
|
||||
if err := nodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
||||
if err := cloudnodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "NoRouteCreated",
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
@ -31,7 +31,6 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
fakecloud "k8s.io/cloud-provider/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
@ -197,7 +196,7 @@ func TestNodeInitialized(t *testing.T) {
|
||||
DeleteWaitChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
|
||||
factory := informers.NewSharedInformerFactory(fnh, 0)
|
||||
|
||||
fakeCloud := &fakecloud.Cloud{
|
||||
InstanceTypes: map[types.NodeName]string{
|
||||
@ -262,7 +261,7 @@ func TestNodeIgnored(t *testing.T) {
|
||||
DeleteWaitChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
|
||||
factory := informers.NewSharedInformerFactory(fnh, 0)
|
||||
|
||||
fakeCloud := &fakecloud.Cloud{
|
||||
InstanceTypes: map[types.NodeName]string{
|
||||
@ -334,7 +333,7 @@ func TestGCECondition(t *testing.T) {
|
||||
DeleteWaitChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
|
||||
factory := informers.NewSharedInformerFactory(fnh, 0)
|
||||
|
||||
fakeCloud := &fakecloud.Cloud{
|
||||
InstanceTypes: map[types.NodeName]string{
|
||||
@ -419,7 +418,7 @@ func TestZoneInitialized(t *testing.T) {
|
||||
DeleteWaitChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
|
||||
factory := informers.NewSharedInformerFactory(fnh, 0)
|
||||
|
||||
fakeCloud := &fakecloud.Cloud{
|
||||
InstanceTypes: map[types.NodeName]string{
|
||||
@ -509,7 +508,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
DeleteWaitChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
|
||||
factory := informers.NewSharedInformerFactory(fnh, 0)
|
||||
|
||||
fakeCloud := &fakecloud.Cloud{
|
||||
InstanceTypes: map[types.NodeName]string{},
|
||||
@ -622,7 +621,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
|
||||
DeleteWaitChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
|
||||
factory := informers.NewSharedInformerFactory(fnh, 0)
|
||||
|
||||
fakeCloud := &fakecloud.Cloud{
|
||||
InstanceTypes: map[types.NodeName]string{
|
||||
@ -837,7 +836,7 @@ func TestNodeAddressesNotUpdate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
|
||||
factory := informers.NewSharedInformerFactory(fnh, 0)
|
||||
|
||||
fakeCloud := &fakecloud.Cloud{
|
||||
InstanceTypes: map[types.NodeName]string{},
|
||||
@ -912,7 +911,7 @@ func TestNodeProviderID(t *testing.T) {
|
||||
DeleteWaitChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
|
||||
factory := informers.NewSharedInformerFactory(fnh, 0)
|
||||
|
||||
fakeCloud := &fakecloud.Cloud{
|
||||
InstanceTypes: map[types.NodeName]string{},
|
||||
@ -995,7 +994,7 @@ func TestNodeProviderIDAlreadySet(t *testing.T) {
|
||||
DeleteWaitChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
|
||||
factory := informers.NewSharedInformerFactory(fnh, 0)
|
||||
|
||||
fakeCloud := &fakecloud.Cloud{
|
||||
InstanceTypes: map[types.NodeName]string{},
|
||||
|
@ -34,9 +34,9 @@ import (
|
||||
v1lister "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
@ -133,7 +133,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes() {
|
||||
for _, node := range nodes {
|
||||
// Default NodeReady status to v1.ConditionUnknown
|
||||
status := v1.ConditionUnknown
|
||||
if _, c := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady); c != nil {
|
||||
if _, c := cloudnodeutil.GetNodeCondition(&node.Status, v1.NodeReady); c != nil {
|
||||
status = c.Status
|
||||
}
|
||||
|
||||
|
@ -14,9 +14,7 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/route",
|
||||
deps = [
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
@ -32,6 +30,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -41,8 +40,6 @@ go_test(
|
||||
srcs = ["route_controller_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
@ -51,6 +48,7 @@ go_test(
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/fake:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -40,9 +40,8 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
clientretry "k8s.io/client-go/util/retry"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -290,7 +289,7 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
||||
}
|
||||
|
||||
func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreated bool) error {
|
||||
_, condition := nodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable)
|
||||
_, condition := cloudnodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable)
|
||||
if routesCreated && condition != nil && condition.Status == v1.ConditionFalse {
|
||||
klog.V(2).Infof("set node %v with NodeNetworkUnavailable=false was canceled because it is already set", node.Name)
|
||||
return nil
|
||||
@ -311,7 +310,7 @@ func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreate
|
||||
// patch in the retry loop.
|
||||
currentTime := metav1.Now()
|
||||
if routesCreated {
|
||||
err = utilnode.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
||||
err = cloudnodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "RouteCreated",
|
||||
@ -319,7 +318,7 @@ func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreate
|
||||
LastTransitionTime: currentTime,
|
||||
})
|
||||
} else {
|
||||
err = utilnode.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
||||
err = cloudnodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "NoRouteCreated",
|
||||
|
@ -30,8 +30,7 @@ import (
|
||||
core "k8s.io/client-go/testing"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
fakecloud "k8s.io/cloud-provider/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
|
||||
)
|
||||
|
||||
func alwaysReady() bool { return true }
|
||||
@ -66,7 +65,7 @@ func TestIsResponsibleForRoute(t *testing.T) {
|
||||
t.Errorf("%d. Error in test case: unparsable cidr %q", i, testCase.clusterCIDR)
|
||||
}
|
||||
client := fake.NewSimpleClientset()
|
||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
rc := New(nil, nil, informerFactory.Core().V1().Nodes(), myClusterName, []*net.IPNet{cidr})
|
||||
rc.nodeListerSynced = alwaysReady
|
||||
route := &cloudprovider.Route{
|
||||
@ -367,7 +366,7 @@ func TestReconcile(t *testing.T) {
|
||||
cidrs = append(cidrs, cidrv6)
|
||||
}
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(testCase.clientset, controller.NoResyncPeriodFunc())
|
||||
informerFactory := informers.NewSharedInformerFactory(testCase.clientset, 0)
|
||||
rc := New(routes, testCase.clientset, informerFactory.Core().V1().Nodes(), cluster, cidrs)
|
||||
rc.nodeListerSynced = alwaysReady
|
||||
if err := rc.reconcile(testCase.nodes, testCase.initialRoutes); err != nil {
|
||||
@ -376,7 +375,7 @@ func TestReconcile(t *testing.T) {
|
||||
for _, action := range testCase.clientset.Actions() {
|
||||
if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" {
|
||||
node := action.(core.UpdateAction).GetObject().(*v1.Node)
|
||||
_, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
|
||||
_, condition := cloudnodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
|
||||
if condition == nil {
|
||||
t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name)
|
||||
} else {
|
||||
|
@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"address.go",
|
||||
"conditions.go",
|
||||
"taints.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/k8s.io/cloud-provider/node/helpers",
|
||||
|
56
staging/src/k8s.io/cloud-provider/node/helpers/conditions.go
Normal file
56
staging/src/k8s.io/cloud-provider/node/helpers/conditions.go
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// GetNodeCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) (int, *v1.NodeCondition) {
|
||||
if status == nil {
|
||||
return -1, nil
|
||||
}
|
||||
for i := range status.Conditions {
|
||||
if status.Conditions[i].Type == conditionType {
|
||||
return i, &status.Conditions[i]
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// SetNodeCondition updates specific node condition with patch operation.
|
||||
func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error {
|
||||
condition.LastHeartbeatTime = metav1.NewTime(time.Now())
|
||||
patch, err := json.Marshal(map[string]interface{}{
|
||||
"status": map[string]interface{}{
|
||||
"conditions": []v1.NodeCondition{condition},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.CoreV1().Nodes().PatchStatus(string(node), patch)
|
||||
return err
|
||||
}
|
Loading…
Reference in New Issue
Block a user