Merge pull request #80238 from smarterclayton/disable_node_role

Clarify use of node-role labels within Kubernetes
This commit is contained in:
Kubernetes Prow Robot 2019-08-28 12:01:27 -07:00 committed by GitHub
commit 92a320aeb6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 188 additions and 55 deletions

View File

@ -244,7 +244,6 @@
"k8s.io/kubernetes/pkg/util/mount",
"k8s.io/kubernetes/pkg/util/node",
"k8s.io/kubernetes/pkg/util/slice",
"k8s.io/kubernetes/pkg/util/system",
"k8s.io/kubernetes/pkg/util/taints",
"k8s.io/kubernetes/pkg/volume",
"k8s.io/kubernetes/pkg/volume/util",

View File

@ -17,7 +17,6 @@ go_library(
"//pkg/scheduler/api:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/taints:go_default_library",
"//staging/src/k8s.io/api/coordination/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",

View File

@ -23,13 +23,14 @@ package nodelifecycle
import (
"fmt"
"strings"
"sync"
"time"
"k8s.io/klog"
coordv1beta1 "k8s.io/api/coordination/v1beta1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -54,11 +55,11 @@ import (
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
"k8s.io/kubernetes/pkg/features"
kubefeatures "k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/pkg/util/metrics"
utilnode "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/system"
taintutils "k8s.io/kubernetes/pkg/util/taints"
)
@ -715,8 +716,8 @@ func (nc *Controller) monitorNodeHealth() error {
continue
}
// We do not treat a master node as a part of the cluster for network disruption checking.
if !system.IsMasterNode(node.Name) {
// Some nodes may be excluded from disruption checking
if !isNodeExcludedFromDisruptionChecks(node) {
zoneToNodeConditions[utilnode.GetZoneKey(node)] = append(zoneToNodeConditions[utilnode.GetZoneKey(node)], currentReadyCondition)
}
@ -806,6 +807,45 @@ func (nc *Controller) monitorNodeHealth() error {
return nil
}
// labelNodeDisruptionExclusion is a label on nodes that controls whether they are
// excluded from being considered for disruption checks by the node controller.
const labelNodeDisruptionExclusion = "node.kubernetes.io/exclude-disruption"
func isNodeExcludedFromDisruptionChecks(node *v1.Node) bool {
// DEPRECATED: will be removed in 1.19
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LegacyNodeRoleBehavior) {
if legacyIsMasterNode(node.Name) {
return true
}
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NodeDisruptionExclusion) {
if _, ok := node.Labels[labelNodeDisruptionExclusion]; ok {
return true
}
}
return false
}
// legacyIsMasterNode returns true if given node is a registered master according
// to the logic historically used for this function. This code path is deprecated
// and the node disruption exclusion label should be used in the future.
// This code will not be allowed to update to use the node-role label, since
// node-roles may not be used for feature enablement.
// DEPRECATED: Will be removed in 1.19
func legacyIsMasterNode(nodeName string) bool {
// We are trying to capture "master(-...)?$" regexp.
// However, using regexp.MatchString() results even in more than 35%
// of all space allocations in ControllerManager spent in this function.
// That's why we are trying to be a bit smarter.
if strings.HasSuffix(nodeName, "master") {
return true
}
if len(nodeName) >= 10 {
return strings.HasSuffix(nodeName[:len(nodeName)-3], "master-")
}
return false
}
// tryUpdateNodeHealth checks a given node's conditions and tries to update it. Returns grace period to
// which given node is entitled, state of current and last observed Ready Condition, and an error if it occurred.
func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.NodeCondition, *v1.NodeCondition, error) {

View File

@ -23,7 +23,7 @@ import (
apps "k8s.io/api/apps/v1"
coordv1beta1 "k8s.io/api/coordination/v1beta1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -3270,3 +3270,35 @@ func TestTryUpdateNodeHealth(t *testing.T) {
})
}
}
func Test_isNodeExcludedFromDisruptionChecks(t *testing.T) {
validNodeStatus := v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: "Test"}}}
tests := []struct {
name string
enableExclusion bool
enableLegacy bool
input *v1.Node
want bool
}{
{want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}}},
{want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Name: "master-abc"}}},
{want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeDisruptionExclusion: ""}}}},
{want: false, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Name: "master-abc"}}},
{want: false, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeDisruptionExclusion: ""}}}},
{want: true, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Name: "master-abc"}}},
{want: true, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeDisruptionExclusion: ""}}}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeDisruptionExclusion, tt.enableExclusion)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LegacyNodeRoleBehavior, tt.enableLegacy)()
if result := isNodeExcludedFromDisruptionChecks(tt.input); result != tt.want {
t.Errorf("isNodeExcludedFromDisruptionChecks() = %v, want %v", result, tt.want)
}
})
}
}

View File

@ -58,23 +58,38 @@ const (
minRetryDelay = 5 * time.Second
maxRetryDelay = 300 * time.Second
// LabelNodeRoleMaster specifies that a node is a master
// It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112
LabelNodeRoleMaster = "node-role.kubernetes.io/master"
// labelNodeRoleMaster specifies that a node is a master. The use of this label within the
// controller is deprecated and only considered when the LegacyNodeRoleBehavior feature gate
// is on.
labelNodeRoleMaster = "node-role.kubernetes.io/master"
// LabelNodeRoleExcludeBalancer specifies that the node should be
// exclude from load balancers created by a cloud provider.
LabelNodeRoleExcludeBalancer = "alpha.service-controller.kubernetes.io/exclude-balancer"
// labelNodeRoleExcludeBalancer specifies that the node should not be considered as a target
// for external load-balancers which use nodes as a second hop (e.g. many cloud LBs which only
// understand nodes). For services that use externalTrafficPolicy=Local, this may mean that
// any backends on excluded nodes are not reachable by those external load-balancers.
// Implementations of this exclusion may vary based on provider. This label is honored starting
// in 1.16 when the ServiceNodeExclusion gate is on.
labelNodeRoleExcludeBalancer = "node.kubernetes.io/exclude-from-external-load-balancers"
// labelAlphaNodeRoleExcludeBalancer specifies that the node should be
// exclude from load balancers created by a cloud provider. This label is deprecated and will
// be removed in 1.17.
labelAlphaNodeRoleExcludeBalancer = "alpha.service-controller.kubernetes.io/exclude-balancer"
// serviceNodeExclusionFeature is the feature gate name that
// enables nodes to exclude themselves from service load balancers
// originated from: https://github.com/kubernetes/kubernetes/blob/28e800245e/pkg/features/kube_features.go#L178
serviceNodeExclusionFeature = "ServiceNodeExclusion"
// ServiceLoadBalancerFinalizerFeature is the feature gate name that
// serviceLoadBalancerFinalizerFeature is the feature gate name that
// enables Finalizer Protection for Service LoadBalancers.
// orginated from: https://github.com/kubernetes/kubernetes/blob/28e800245e/pkg/features/kube_features.go#L433
serviceLoadBalancerFinalizerFeature = "ServiceLoadBalancerFinalizer"
// legacyNodeRoleBehaviro is the feature gate name that enables legacy
// behavior to vary cluster functionality on the node-role.kubernetes.io
// labels.
legacyNodeRoleBehaviorFeature = "LegacyNodeRoleBehavior"
)
type cachedService struct {
@ -625,14 +640,19 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate {
return false
}
// As of 1.6, we will taint the master, but not necessarily mark it unschedulable.
// Recognize nodes labeled as master, and filter them also, as we were doing previously.
if _, hasMasterRoleLabel := node.Labels[LabelNodeRoleMaster]; hasMasterRoleLabel {
return false
if utilfeature.DefaultFeatureGate.Enabled(legacyNodeRoleBehaviorFeature) {
// As of 1.6, we will taint the master, but not necessarily mark it unschedulable.
// Recognize nodes labeled as master, and filter them also, as we were doing previously.
if _, hasMasterRoleLabel := node.Labels[labelNodeRoleMaster]; hasMasterRoleLabel {
return false
}
}
if utilfeature.DefaultFeatureGate.Enabled(serviceNodeExclusionFeature) {
if _, hasExcludeBalancerLabel := node.Labels[LabelNodeRoleExcludeBalancer]; hasExcludeBalancerLabel {
// Will be removed in 1.17
if _, hasExcludeBalancerLabel := node.Labels[labelAlphaNodeRoleExcludeBalancer]; hasExcludeBalancerLabel {
return false
}
if _, hasExcludeBalancerLabel := node.Labels[labelNodeRoleExcludeBalancer]; hasExcludeBalancerLabel {
return false
}
}

View File

@ -1396,3 +1396,37 @@ func TestPatchStatus(t *testing.T) {
})
}
}
func Test_getNodeConditionPredicate(t *testing.T) {
validNodeStatus := v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: "Test"}}}
tests := []struct {
name string
enableExclusion bool
enableLegacy bool
input *v1.Node
want bool
}{
{want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}}},
{want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleMaster: ""}}}},
{want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleExcludeBalancer: ""}}}},
{want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelAlphaNodeRoleExcludeBalancer: ""}}}},
{want: true, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleMaster: ""}}}},
{want: true, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleExcludeBalancer: ""}}}},
{want: false, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleMaster: ""}}}},
{want: false, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelAlphaNodeRoleExcludeBalancer: ""}}}},
{want: false, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleExcludeBalancer: ""}}}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, serviceNodeExclusionFeature, tt.enableExclusion)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, legacyNodeRoleBehaviorFeature, tt.enableLegacy)()
if result := getNodeConditionPredicate()(tt.input); result != tt.want {
t.Errorf("getNodeConditionPredicate() = %v, want %v", result, tt.want)
}
})
}
}

View File

@ -172,12 +172,24 @@ const (
// Enable pods to set sysctls on a pod
Sysctls featuregate.Feature = "Sysctls"
// owner @smarterclayton
// alpha: v1.16
//
// Enable legacy behavior to vary cluster functionality on the node-role.kubernetes.io labels. On by default (legacy), will be turned off in 1.18.
LegacyNodeRoleBehavior featuregate.Feature = "LegacyNodeRoleBehavior"
// owner @brendandburns
// alpha: v1.9
//
// Enable nodes to exclude themselves from service load balancers
ServiceNodeExclusion featuregate.Feature = "ServiceNodeExclusion"
// owner @smarterclayton
// alpha: v1.16
//
// Enable nodes to exclude themselves from network disruption checks
NodeDisruptionExclusion featuregate.Feature = "NodeDisruptionExclusion"
// owner: @jsafrane
// alpha: v1.9
//
@ -502,6 +514,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
TopologyManager: {Default: false, PreRelease: featuregate.Alpha},
ServiceNodeExclusion: {Default: false, PreRelease: featuregate.Alpha},
NodeDisruptionExclusion: {Default: false, PreRelease: featuregate.Alpha},
MountContainers: {Default: false, PreRelease: featuregate.Alpha},
CSIDriverRegistry: {Default: true, PreRelease: featuregate.Beta},
CSINodeInfo: {Default: true, PreRelease: featuregate.Beta},
@ -570,5 +583,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
// features that enable backwards compatibility but are scheduled to be removed
// ...
HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha},
HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha},
LegacyNodeRoleBehavior: {Default: true, PreRelease: featuregate.Alpha},
}

View File

@ -47,7 +47,6 @@ filegroup(
"//pkg/util/selinux:all-srcs",
"//pkg/util/slice:all-srcs",
"//pkg/util/sysctl:all-srcs",
"//pkg/util/system:all-srcs",
"//pkg/util/tail:all-srcs",
"//pkg/util/taints:all-srcs",
"//pkg/util/tolerations:all-srcs",

View File

@ -113,6 +113,7 @@ filegroup(
"//test/e2e/scheduling:all-srcs",
"//test/e2e/servicecatalog:all-srcs",
"//test/e2e/storage:all-srcs",
"//test/e2e/system:all-srcs",
"//test/e2e/testing-manifests:all-srcs",
"//test/e2e/ui:all-srcs",
"//test/e2e/upgrades:all-srcs",

View File

@ -34,7 +34,6 @@ go_library(
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/service:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis/config:go_default_library",
"//pkg/kubelet/events:go_default_library",
@ -42,7 +41,6 @@ go_library(
"//pkg/master/ports:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/taints:go_default_library",
"//pkg/version:go_default_library",
"//pkg/volume/util:go_default_library",
@ -96,6 +94,7 @@ go_library(
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/e2e/system:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@ -84,7 +84,7 @@ func TestFailureOutput(t *testing.T) {
output: "INFO: before\nFAIL: hard-coded error\nUnexpected error:\n <*errors.errorString>: {\n s: \"an error with a long, useless description\",\n }\n an error with a long, useless description\noccurred\nINFO: after\nFAIL: true is never false either\nExpected\n <bool>: true\nto equal\n <bool>: false\n",
failure: "hard-coded error\nUnexpected error:\n <*errors.errorString>: {\n s: \"an error with a long, useless description\",\n }\n an error with a long, useless description\noccurred",
// TODO: should start with k8s.io/kubernetes/test/e2e/framework/log_test.glob..func1.4()
stack: "\tutil.go:1369\nk8s.io/kubernetes/test/e2e/framework.ExpectNoError()\n\tutil.go:1363\nk8s.io/kubernetes/test/e2e/framework/log_test.glob..func1.4()\n\tlogger_test.go:49\nk8s.io/kubernetes/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).runSync()\n\tlogger_test.go:65\n",
stack: "\tutil.go:1368\nk8s.io/kubernetes/test/e2e/framework.ExpectNoError()\n\tutil.go:1362\nk8s.io/kubernetes/test/e2e/framework/log_test.glob..func1.4()\n\tlogger_test.go:49\nk8s.io/kubernetes/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).runSync()\n\tlogger_test.go:65\n",
},
testResult{
name: "[Top Level] log fails",

View File

@ -30,7 +30,6 @@ go_library(
"//pkg/kubelet/metrics:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/scheduler/metrics:go_default_library",
"//pkg/util/system:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
@ -38,6 +37,7 @@ go_library(
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/perftype:go_default_library",
"//test/e2e/system:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",

View File

@ -30,9 +30,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/master/ports"
schedulermetric "k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/util/system"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/system"
"github.com/onsi/gomega"
@ -210,7 +210,7 @@ func sendRestRequestToScheduler(c clientset.Interface, op, provider, cloudMaster
var masterRegistered = false
for _, node := range nodes.Items {
if system.IsMasterNode(node.Name) {
if system.DeprecatedMightBeMasterNode(node.Name) {
masterRegistered = true
}
}

View File

@ -24,7 +24,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/util/system"
"k8s.io/kubernetes/test/e2e/system"
"k8s.io/klog"
)
@ -63,7 +63,7 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b
klog.Warning("Can't find any Nodes in the API server to grab metrics from")
}
for _, node := range nodeList.Items {
if system.IsMasterNode(node.Name) {
if system.DeprecatedMightBeMasterNode(node.Name) {
registeredMaster = true
masterName = node.Name
break

View File

@ -12,7 +12,6 @@ go_library(
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/system:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
@ -20,6 +19,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/system:go_default_library",
"//test/utils:go_default_library",
],
)

View File

@ -30,8 +30,8 @@ import (
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/system"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/system"
testutils "k8s.io/kubernetes/test/utils"
)
@ -371,7 +371,7 @@ func GetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList,
return nil, nil, fmt.Errorf("get nodes error: %s", err)
}
for _, n := range all.Items {
if system.IsMasterNode(n.Name) {
if system.DeprecatedMightBeMasterNode(n.Name) {
masters.Insert(n.Name)
} else if isNodeSchedulable(&n) && isNodeUntainted(&n) {
nodes.Items = append(nodes.Items, n)

View File

@ -26,8 +26,8 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/system"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/system"
testutils "k8s.io/kubernetes/test/utils"
)
@ -83,7 +83,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.IsMasterNode(node.Name) {
if !system.DeprecatedMightBeMasterNode(node.Name) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {

View File

@ -27,14 +27,14 @@ import (
"text/tabwriter"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/system"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/system"
)
// ResourceConstraint is a struct to hold constraints.
@ -280,10 +280,10 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
}
dnsNodes := make(map[string]bool)
for _, pod := range pods.Items {
if (options.Nodes == MasterNodes) && !system.IsMasterNode(pod.Spec.NodeName) {
if (options.Nodes == MasterNodes) && !system.DeprecatedMightBeMasterNode(pod.Spec.NodeName) {
continue
}
if (options.Nodes == MasterAndDNSNodes) && !system.IsMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" {
if (options.Nodes == MasterAndDNSNodes) && !system.DeprecatedMightBeMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" {
continue
}
for _, container := range pod.Status.InitContainerStatuses {
@ -303,7 +303,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
}
for _, node := range nodeList.Items {
if options.Nodes == AllNodes || system.IsMasterNode(node.Name) || dnsNodes[node.Name] {
if options.Nodes == AllNodes || system.DeprecatedMightBeMasterNode(node.Name) || dnsNodes[node.Name] {
g.workerWg.Add(1)
g.workers = append(g.workers, resourceGatherWorker{
c: c,

View File

@ -74,7 +74,6 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/service"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
@ -1974,7 +1973,7 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
}
for i := range nodes.Items {
node := &nodes.Items[i]
if _, hasMasterRoleLabel := node.ObjectMeta.Labels[service.LabelNodeRoleMaster]; hasMasterRoleLabel {
if _, hasMasterRoleLabel := node.ObjectMeta.Labels["node-role.kubernetes.io/master"]; hasMasterRoleLabel {
// Kops clusters have masters with spec.unscheduable = false and
// node-role.kubernetes.io/master NoSchedule taint.
// Don't wait for them.

View File

@ -1,15 +1,10 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["system_utils.go"],
importpath = "k8s.io/kubernetes/pkg/util/system",
importpath = "k8s.io/kubernetes/test/e2e/system",
visibility = ["//visibility:public"],
)
go_test(
@ -33,4 +28,5 @@ filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -20,9 +20,11 @@ import (
"strings"
)
// IsMasterNode returns true if given node is a registered master.
// TODO: find a better way of figuring out if given node is a registered master.
func IsMasterNode(nodeName string) bool {
// DeprecatedMightBeMasterNode returns true if given node is a registered master.
// This code must not be updated to use node role labels, since node role labels
// may not change behavior of the system.
// DEPRECATED: use a label selector provided by test initialization.
func DeprecatedMightBeMasterNode(nodeName string) bool {
// We are trying to capture "master(-...)?$" regexp.
// However, using regexp.MatchString() results even in more than 35%
// of all space allocations in ControllerManager spent in this function.

View File

@ -39,7 +39,7 @@ func TestIsMasterNode(t *testing.T) {
for _, tc := range testCases {
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: tc.input}}
res := IsMasterNode(node.Name)
res := DeprecatedMightBeMasterNode(node.Name)
if res != tc.result {
t.Errorf("case \"%s\": expected %t, got %t", tc.input, tc.result, res)
}

View File

@ -793,7 +793,6 @@ k8s.io/kubernetes/pkg/util/oom,vishh,0,
k8s.io/kubernetes/pkg/util/parsers,derekwaynecarr,1,
k8s.io/kubernetes/pkg/util/procfs,roberthbailey,1,
k8s.io/kubernetes/pkg/util/slice,quinton-hoole,0,
k8s.io/kubernetes/pkg/util/system,mwielgus,0,
k8s.io/kubernetes/pkg/util/tail,zmerlynn,1,
k8s.io/kubernetes/pkg/util/taints,rrati,0,
k8s.io/kubernetes/pkg/util/term,davidopp,1,

1 name owner auto-assigned sig
793 k8s.io/kubernetes/pkg/util/parsers derekwaynecarr 1
794 k8s.io/kubernetes/pkg/util/procfs roberthbailey 1
795 k8s.io/kubernetes/pkg/util/slice quinton-hoole 0
k8s.io/kubernetes/pkg/util/system mwielgus 0
796 k8s.io/kubernetes/pkg/util/tail zmerlynn 1
797 k8s.io/kubernetes/pkg/util/taints rrati 0
798 k8s.io/kubernetes/pkg/util/term davidopp 1