mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
move labels to components which own the APIs
This commit is contained in:
parent
f0962765a7
commit
954eb3ceb9
@ -14,6 +14,7 @@ go_library(
|
||||
"env.go",
|
||||
"register.go",
|
||||
"types.go",
|
||||
"well_known_labels.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
43
cmd/kubeadm/app/apis/kubeadm/well_known_labels.go
Normal file
43
cmd/kubeadm/app/apis/kubeadm/well_known_labels.go
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubeadm
|
||||
|
||||
// Role labels are applied to Nodes to mark their purpose. In particular, we
|
||||
// usually want to distinguish the master, so that we can isolate privileged
|
||||
// pods and operations.
|
||||
//
|
||||
// Originally we relied on not registering the master, on the fact that the
|
||||
// master was Unschedulable, and on static manifests for master components.
|
||||
// But we now do register masters in many environments, are generally moving
|
||||
// away from static manifests (for better manageability), and working towards
|
||||
// deprecating the unschedulable field (replacing it with taints & tolerations
|
||||
// instead).
|
||||
//
|
||||
// Even with tainting, a label remains the easiest way of making a positive
|
||||
// selection, so that pods can schedule only to master nodes for example, and
|
||||
// thus installations will likely define a label for their master nodes.
|
||||
//
|
||||
// So that we can recognize master nodes in consequent places though (such as
|
||||
// kubectl get nodes), we encourage installations to use the well-known labels.
|
||||
// We define NodeLabelRole, which is the preferred form, but we will also recognize
|
||||
// other forms that are known to be in widespread use (NodeLabelKubeadmAlphaRole).
|
||||
|
||||
const (
|
||||
// NodeLabelKubeadmAlphaRole is a label that kubeadm applies to a Node as a hint that it has a particular purpose.
|
||||
// Use of NodeLabelRole is preferred.
|
||||
NodeLabelKubeadmAlphaRole = "kubeadm.alpha.kubernetes.io/role"
|
||||
)
|
@ -17,6 +17,7 @@ go_library(
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//pkg/bootstrap/api:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
@ -47,7 +48,7 @@ func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error
|
||||
}
|
||||
// The node may appear to have no labels at first,
|
||||
// so we wait for it to get hostname label.
|
||||
_, found := n.ObjectMeta.Labels[metav1.LabelHostname]
|
||||
_, found := n.ObjectMeta.Labels[kubeletapis.LabelHostname]
|
||||
return found, nil
|
||||
})
|
||||
|
||||
|
@ -25,6 +25,7 @@ go_library(
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -117,23 +118,23 @@ func (self *ClusterClient) GetClusterZones() (zones []string, region string, err
|
||||
// Find the name of the zone in which a Node is running
|
||||
func getZoneNameForNode(node api.Node) (string, error) {
|
||||
for key, value := range node.Labels {
|
||||
if key == metav1.LabelZoneFailureDomain {
|
||||
if key == kubeletapis.LabelZoneFailureDomain {
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Zone name for node %s not found. No label with key %s",
|
||||
node.Name, metav1.LabelZoneFailureDomain)
|
||||
node.Name, kubeletapis.LabelZoneFailureDomain)
|
||||
}
|
||||
|
||||
// Find the name of the region in which a Node is running
|
||||
func getRegionNameForNode(node api.Node) (string, error) {
|
||||
for key, value := range node.Labels {
|
||||
if key == metav1.LabelZoneRegion {
|
||||
if key == kubeletapis.LabelZoneRegion {
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Region name for node %s not found. No label with key %s",
|
||||
node.Name, metav1.LabelZoneRegion)
|
||||
node.Name, kubeletapis.LabelZoneRegion)
|
||||
}
|
||||
|
||||
// Find the names of all zones and the region in which we have nodes in this cluster.
|
||||
|
@ -3905,8 +3905,4 @@ const (
|
||||
// When the --hard-pod-affinity-weight scheduler flag is not specified,
|
||||
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
|
||||
DefaultHardPodAffinitySymmetricWeight int = 1
|
||||
|
||||
// When the --failure-domains scheduler flag is not specified,
|
||||
// DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity.
|
||||
DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion
|
||||
)
|
||||
|
@ -4486,10 +4486,6 @@ const (
|
||||
// When the --hard-pod-affinity-weight scheduler flag is not specified,
|
||||
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
|
||||
DefaultHardPodAffinitySymmetricWeight int = 1
|
||||
|
||||
// When the --failure-domains scheduler flag is not specified,
|
||||
// DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity.
|
||||
DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion
|
||||
)
|
||||
|
||||
// Sysctl defines a kernel parameter to be set
|
||||
|
@ -25,6 +25,7 @@ go_library(
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/leaderelection/resourcelock:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
kruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
rl "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
@ -160,7 +161,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) {
|
||||
obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight
|
||||
}
|
||||
if obj.FailureDomains == "" {
|
||||
obj.FailureDomains = api.DefaultFailureDomains
|
||||
obj.FailureDomains = kubeletapis.DefaultFailureDomains
|
||||
}
|
||||
if obj.LockObjectNamespace == "" {
|
||||
obj.LockObjectNamespace = SchedulerDefaultLockObjectNamespace
|
||||
|
@ -32,6 +32,7 @@ go_library(
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/credentialprovider/aws:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
|
||||
@ -46,7 +47,6 @@ go_library(
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
@ -66,6 +66,7 @@ go_test(
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
|
||||
|
@ -42,7 +42,6 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -50,6 +49,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
@ -1794,12 +1794,12 @@ func (c *Cloud) GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]strin
|
||||
return nil, fmt.Errorf("volume did not have AZ information: %q", info.VolumeId)
|
||||
}
|
||||
|
||||
labels[metav1.LabelZoneFailureDomain] = az
|
||||
labels[kubeletapis.LabelZoneFailureDomain] = az
|
||||
region, err := azToRegion(az)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
labels[metav1.LabelZoneRegion] = region
|
||||
labels[kubeletapis.LabelZoneRegion] = region
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
)
|
||||
|
||||
const TestClusterId = "clusterid.test"
|
||||
@ -1083,8 +1084,8 @@ func TestGetVolumeLabels(t *testing.T) {
|
||||
|
||||
assert.Nil(t, err, "Error creating Volume %v", err)
|
||||
assert.Equal(t, map[string]string{
|
||||
metav1.LabelZoneFailureDomain: "us-east-1a",
|
||||
metav1.LabelZoneRegion: "us-east-1"}, labels)
|
||||
kubeletapis.LabelZoneFailureDomain: "us-east-1a",
|
||||
kubeletapis.LabelZoneRegion: "us-east-1"}, labels)
|
||||
awsServices.ec2.AssertExpectations(t)
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,7 @@ go_library(
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/util/net/sets:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
@ -80,9 +81,9 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -24,9 +24,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -313,8 +313,8 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st
|
||||
}
|
||||
|
||||
labels := make(map[string]string)
|
||||
labels[metav1.LabelZoneFailureDomain] = zone
|
||||
labels[metav1.LabelZoneRegion] = region
|
||||
labels[kubeletapis.LabelZoneFailureDomain] = zone
|
||||
labels[kubeletapis.LabelZoneRegion] = region
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ import (
|
||||
"fmt"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
)
|
||||
|
||||
func TestCreateDisk_Basic(t *testing.T) {
|
||||
@ -301,12 +301,12 @@ func TestGetAutoLabelsForPD_Basic(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if labels[metav1.LabelZoneFailureDomain] != zone {
|
||||
if labels[kubeletapis.LabelZoneFailureDomain] != zone {
|
||||
t.Errorf("Failure domain is '%v', but zone is '%v'",
|
||||
labels[metav1.LabelZoneFailureDomain], zone)
|
||||
labels[kubeletapis.LabelZoneFailureDomain], zone)
|
||||
}
|
||||
if labels[metav1.LabelZoneRegion] != "us-central1" {
|
||||
t.Errorf("Region is '%v', but zone is 'us-central1'", labels[metav1.LabelZoneRegion])
|
||||
if labels[kubeletapis.LabelZoneRegion] != "us-central1" {
|
||||
t.Errorf("Region is '%v', but zone is 'us-central1'", labels[kubeletapis.LabelZoneRegion])
|
||||
}
|
||||
}
|
||||
|
||||
@ -327,12 +327,12 @@ func TestGetAutoLabelsForPD_NoZone(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if labels[metav1.LabelZoneFailureDomain] != zone {
|
||||
if labels[kubeletapis.LabelZoneFailureDomain] != zone {
|
||||
t.Errorf("Failure domain is '%v', but zone is '%v'",
|
||||
labels[metav1.LabelZoneFailureDomain], zone)
|
||||
labels[kubeletapis.LabelZoneFailureDomain], zone)
|
||||
}
|
||||
if labels[metav1.LabelZoneRegion] != "europe-west1" {
|
||||
t.Errorf("Region is '%v', but zone is 'europe-west1'", labels[metav1.LabelZoneRegion])
|
||||
if labels[kubeletapis.LabelZoneRegion] != "europe-west1" {
|
||||
t.Errorf("Region is '%v', but zone is 'europe-west1'", labels[kubeletapis.LabelZoneRegion])
|
||||
}
|
||||
}
|
||||
|
||||
@ -387,12 +387,12 @@ func TestGetAutoLabelsForPD_DupDisk(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Disk name and zone uniquely identifies a disk, yet an error is returned.")
|
||||
}
|
||||
if labels[metav1.LabelZoneFailureDomain] != zone {
|
||||
if labels[kubeletapis.LabelZoneFailureDomain] != zone {
|
||||
t.Errorf("Failure domain is '%v', but zone is '%v'",
|
||||
labels[metav1.LabelZoneFailureDomain], zone)
|
||||
labels[kubeletapis.LabelZoneFailureDomain], zone)
|
||||
}
|
||||
if labels[metav1.LabelZoneRegion] != "us-west1" {
|
||||
t.Errorf("Region is '%v', but zone is 'us-west1'", labels[metav1.LabelZoneRegion])
|
||||
if labels[kubeletapis.LabelZoneRegion] != "us-west1" {
|
||||
t.Errorf("Region is '%v', but zone is 'us-west1'", labels[kubeletapis.LabelZoneRegion])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,9 @@ go_library(
|
||||
"//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library",
|
||||
"//pkg/client/retry:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
@ -47,6 +49,8 @@ go_test(
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/node/testutil:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
|
@ -37,7 +37,9 @@ import (
|
||||
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1"
|
||||
clientretry "k8s.io/kubernetes/pkg/client/retry"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
var UpdateNodeSpecBackoff = wait.Backoff{
|
||||
@ -302,8 +304,8 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
glog.Errorf("%v", err)
|
||||
return err
|
||||
} else if instanceType != "" {
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelInstanceType, instanceType)
|
||||
curNode.ObjectMeta.Labels[metav1.LabelInstanceType] = instanceType
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType)
|
||||
curNode.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType
|
||||
}
|
||||
|
||||
// TODO(wlan0): Move this logic to the route controller using the node taint instead of condition
|
||||
@ -325,12 +327,12 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
return fmt.Errorf("failed to get zone from cloud provider: %v", err)
|
||||
}
|
||||
if zone.FailureDomain != "" {
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelZoneFailureDomain, zone.FailureDomain)
|
||||
curNode.ObjectMeta.Labels[metav1.LabelZoneFailureDomain] = zone.FailureDomain
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain)
|
||||
curNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain
|
||||
}
|
||||
if zone.Region != "" {
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelZoneRegion, zone.Region)
|
||||
curNode.ObjectMeta.Labels[metav1.LabelZoneRegion] = zone.Region
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region)
|
||||
curNode.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region
|
||||
}
|
||||
}
|
||||
|
||||
@ -353,7 +355,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
|
||||
func getCloudTaint(taints []v1.Taint) *v1.Taint {
|
||||
for _, taint := range taints {
|
||||
if taint.Key == metav1.TaintExternalCloudProvider {
|
||||
if taint.Key == algorithm.TaintExternalCloudProvider {
|
||||
return &taint
|
||||
}
|
||||
}
|
||||
@ -406,7 +408,7 @@ func nodeAddressesChangeDetected(addressSet1, addressSet2 []v1.NodeAddress) bool
|
||||
func ensureNodeProvidedIPExists(node *v1.Node, nodeAddresses []v1.NodeAddress) (*v1.NodeAddress, bool) {
|
||||
var nodeIP *v1.NodeAddress
|
||||
nodeIPExists := false
|
||||
if providedIP, ok := node.ObjectMeta.Annotations[metav1.AnnotationProvidedIPAddr]; ok {
|
||||
if providedIP, ok := node.ObjectMeta.Annotations[kubeletapis.AnnotationProvidedIPAddr]; ok {
|
||||
nodeIPExists = true
|
||||
for i := range nodeAddresses {
|
||||
if nodeAddresses[i].Address == providedIP {
|
||||
|
@ -36,6 +36,8 @@ import (
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/node/testutil"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
// This test checks that the node is deleted when kubelet stops reporting
|
||||
@ -147,7 +149,7 @@ func TestNodeInitialized(t *testing.T) {
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: metav1.TaintExternalCloudProvider,
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@ -294,7 +296,7 @@ func TestGCECondition(t *testing.T) {
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: metav1.TaintExternalCloudProvider,
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@ -386,7 +388,7 @@ func TestZoneInitialized(t *testing.T) {
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: metav1.TaintExternalCloudProvider,
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@ -446,11 +448,11 @@ func TestZoneInitialized(t *testing.T) {
|
||||
t.Errorf("Node label for Region and Zone were not set")
|
||||
}
|
||||
|
||||
if fnh.UpdatedNodes[0].ObjectMeta.Labels[metav1.LabelZoneRegion] != "us-west" {
|
||||
if fnh.UpdatedNodes[0].ObjectMeta.Labels[kubeletapis.LabelZoneRegion] != "us-west" {
|
||||
t.Errorf("Node Region not correctly updated")
|
||||
}
|
||||
|
||||
if fnh.UpdatedNodes[0].ObjectMeta.Labels[metav1.LabelZoneFailureDomain] != "us-west-1a" {
|
||||
if fnh.UpdatedNodes[0].ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] != "us-west-1a" {
|
||||
t.Errorf("Node FailureDomain not correctly updated")
|
||||
}
|
||||
}
|
||||
@ -484,7 +486,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: metav1.TaintExternalCloudProvider,
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@ -576,7 +578,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
metav1.AnnotationProvidedIPAddr: "10.0.0.1",
|
||||
kubeletapis.AnnotationProvidedIPAddr: "10.0.0.1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -603,7 +605,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: metav1.TaintExternalCloudProvider,
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
|
@ -72,6 +72,7 @@ go_test(
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
|
@ -855,7 +855,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns not ready.
|
||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
@ -865,7 +865,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns unreachable.
|
||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
|
@ -44,6 +44,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -61,13 +62,13 @@ var (
|
||||
|
||||
var (
|
||||
nodeNotReady = []v1.Taint{{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TimeAdded: metav1.Now(),
|
||||
}}
|
||||
|
||||
nodeUnreachable = []v1.Taint{{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TimeAdded: metav1.Now(),
|
||||
}}
|
||||
|
@ -19,6 +19,7 @@ go_library(
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
// GetPodTemplateWithHash returns copy of provided template with additional
|
||||
@ -38,7 +39,7 @@ func GetPodTemplateWithGeneration(template v1.PodTemplateSpec, generation int64)
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns not ready.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
@ -48,7 +49,7 @@ func GetPodTemplateWithGeneration(template v1.PodTemplateSpec, generation int64)
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns unreachable.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
|
@ -43,6 +43,7 @@ go_library(
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
@ -89,6 +90,7 @@ go_test(
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/node/testutil:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
|
@ -53,6 +53,7 @@ import (
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
@ -71,12 +72,12 @@ var (
|
||||
podStatusReconciliationVersion = utilversion.MustParseSemantic("v1.2.0")
|
||||
|
||||
UnreachableTaintTemplate = &v1.Taint{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
NotReadyTaintTemplate = &v1.Taint{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
)
|
||||
|
@ -41,6 +41,7 @@ import (
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/node/testutil"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
@ -166,8 +167,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: fakeNow,
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -176,8 +177,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -210,8 +211,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -230,8 +231,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -274,8 +275,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -294,8 +295,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -365,8 +366,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -385,8 +386,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -429,8 +430,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -449,8 +450,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -493,8 +494,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -513,8 +514,8 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -654,8 +655,8 @@ func TestPodStatusChange(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -674,8 +675,8 @@ func TestPodStatusChange(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -810,8 +811,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -830,8 +831,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -865,8 +866,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -885,8 +886,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region2",
|
||||
metav1.LabelZoneFailureDomain: "zone2",
|
||||
kubeletapis.LabelZoneRegion: "region2",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone2",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -927,8 +928,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -947,8 +948,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone2",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone2",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -988,8 +989,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1008,8 +1009,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node-master",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1047,8 +1048,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1067,8 +1068,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone2",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone2",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1109,8 +1110,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1129,8 +1130,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1149,8 +1150,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node2",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1169,8 +1170,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node3",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1189,8 +1190,8 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node4",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1873,8 +1874,8 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1896,8 +1897,8 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
|
@ -52,6 +52,7 @@ go_library(
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/kubelet/certificate:go_default_library",
|
||||
@ -109,6 +110,7 @@ go_library(
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//third_party/forked/golang/expansion:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
@ -242,8 +244,7 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/apis/cri:all-srcs",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:all-srcs",
|
||||
"//pkg/kubelet/apis:all-srcs",
|
||||
"//pkg/kubelet/cadvisor:all-srcs",
|
||||
"//pkg/kubelet/certificate:all-srcs",
|
||||
"//pkg/kubelet/client:all-srcs",
|
||||
|
34
pkg/kubelet/apis/BUILD
Normal file
34
pkg/kubelet/apis/BUILD
Normal file
@ -0,0 +1,34 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"well_known_annotations.go",
|
||||
"well_known_labels.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/apis/cri:all-srcs",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
package apis
|
||||
|
||||
const (
|
||||
// When kubelet is started with the "external" cloud provider, then
|
32
pkg/kubelet/apis/well_known_labels.go
Normal file
32
pkg/kubelet/apis/well_known_labels.go
Normal file
@ -0,0 +1,32 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apis
|
||||
|
||||
const (
|
||||
LabelHostname = "kubernetes.io/hostname"
|
||||
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
|
||||
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
|
||||
|
||||
LabelInstanceType = "beta.kubernetes.io/instance-type"
|
||||
|
||||
LabelOS = "beta.kubernetes.io/os"
|
||||
LabelArch = "beta.kubernetes.io/arch"
|
||||
)
|
||||
|
||||
// When the --failure-domains scheduler flag is not specified,
|
||||
// DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity.
|
||||
var DefaultFailureDomains string = LabelHostname + "," + LabelZoneFailureDomain + "," + LabelZoneRegion
|
@ -35,6 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
@ -42,6 +43,7 @@ import (
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -193,9 +195,9 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: string(kl.nodeName),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelHostname: kl.hostname,
|
||||
metav1.LabelOS: goruntime.GOOS,
|
||||
metav1.LabelArch: goruntime.GOARCH,
|
||||
kubeletapis.LabelHostname: kl.hostname,
|
||||
kubeletapis.LabelOS: goruntime.GOOS,
|
||||
kubeletapis.LabelArch: goruntime.GOARCH,
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
@ -214,7 +216,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
||||
}
|
||||
if kl.externalCloudProvider {
|
||||
taint := v1.Taint{
|
||||
Key: metav1.TaintExternalCloudProvider,
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
@ -296,8 +298,8 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
||||
return nil, err
|
||||
}
|
||||
if instanceType != "" {
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelInstanceType, instanceType)
|
||||
node.ObjectMeta.Labels[metav1.LabelInstanceType] = instanceType
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType)
|
||||
node.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType
|
||||
}
|
||||
// If the cloud has zone information, label the node with the zone information
|
||||
zones, ok := kl.cloud.Zones()
|
||||
@ -307,12 +309,12 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
||||
return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err)
|
||||
}
|
||||
if zone.FailureDomain != "" {
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelZoneFailureDomain, zone.FailureDomain)
|
||||
node.ObjectMeta.Labels[metav1.LabelZoneFailureDomain] = zone.FailureDomain
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain)
|
||||
node.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain
|
||||
}
|
||||
if zone.Region != "" {
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelZoneRegion, zone.Region)
|
||||
node.ObjectMeta.Labels[metav1.LabelZoneRegion] = zone.Region
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region)
|
||||
node.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -470,7 +472,7 @@ func (kl *Kubelet) setNodeAddress(node *v1.Node) error {
|
||||
// 4) Try to get the IP from the network interface used as default gateway
|
||||
if kl.nodeIP != nil {
|
||||
ipAddr = kl.nodeIP
|
||||
node.ObjectMeta.Annotations[metav1.AnnotationProvidedIPAddr] = kl.nodeIP.String()
|
||||
node.ObjectMeta.Annotations[kubeletapis.AnnotationProvidedIPAddr] = kl.nodeIP.String()
|
||||
} else if addr := net.ParseIP(kl.hostname); addr != nil {
|
||||
ipAddr = addr
|
||||
} else {
|
||||
|
@ -58,6 +58,7 @@ go_library(
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//federation/apis/federation:go_default_library",
|
||||
"//federation/client/clientset_generated/federation_internalclientset:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/federation/apis/federation"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/events"
|
||||
@ -1163,10 +1164,7 @@ func getNodeExternalIP(node *api.Node) string {
|
||||
// * a kubeadm.alpha.kubernetes.io/role label
|
||||
// If no role is found, ("", nil) is returned
|
||||
func findNodeRole(node *api.Node) string {
|
||||
if role := node.Labels[metav1.NodeLabelRole]; role != "" {
|
||||
return role
|
||||
}
|
||||
if role := node.Labels[metav1.NodeLabelKubeadmAlphaRole]; role != "" {
|
||||
if role := node.Labels[kubeadm.NodeLabelKubeadmAlphaRole]; role != "" {
|
||||
return role
|
||||
}
|
||||
// No role found
|
||||
|
@ -692,26 +692,6 @@ func TestPrintNodeStatus(t *testing.T) {
|
||||
},
|
||||
status: "Unknown,SchedulingDisabled",
|
||||
},
|
||||
{
|
||||
node: api.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo10",
|
||||
Labels: map[string]string{"kubernetes.io/role": "master"},
|
||||
},
|
||||
Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}},
|
||||
},
|
||||
status: "Ready,master",
|
||||
},
|
||||
{
|
||||
node: api.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo11",
|
||||
Labels: map[string]string{"kubernetes.io/role": "node"},
|
||||
},
|
||||
Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}},
|
||||
},
|
||||
status: "Ready,node",
|
||||
},
|
||||
{
|
||||
node: api.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -16,6 +16,7 @@ go_library(
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
@ -30,6 +31,7 @@ go_test(
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -64,7 +65,7 @@ func GetPreferredNodeAddress(node *v1.Node, preferredAddressTypes []v1.NodeAddre
|
||||
// If hostname was requested and no Hostname address was registered...
|
||||
if addressType == v1.NodeHostName {
|
||||
// ...fall back to the kubernetes.io/hostname label for compatibility with kubelets before 1.5
|
||||
if hostname, ok := node.Labels[metav1.LabelHostname]; ok && len(hostname) > 0 {
|
||||
if hostname, ok := node.Labels[kubeletapis.LabelHostname]; ok && len(hostname) > 0 {
|
||||
return hostname, nil
|
||||
}
|
||||
}
|
||||
@ -116,8 +117,8 @@ func GetZoneKey(node *v1.Node) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
region, _ := labels[metav1.LabelZoneRegion]
|
||||
failureDomain, _ := labels[metav1.LabelZoneFailureDomain]
|
||||
region, _ := labels[kubeletapis.LabelZoneRegion]
|
||||
failureDomain, _ := labels[kubeletapis.LabelZoneFailureDomain]
|
||||
|
||||
if region == "" && failureDomain == "" {
|
||||
return ""
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
)
|
||||
|
||||
func TestGetPreferredAddress(t *testing.T) {
|
||||
@ -52,7 +53,7 @@ func TestGetPreferredAddress(t *testing.T) {
|
||||
ExpectAddress: "1.2.3.5",
|
||||
},
|
||||
"found hostname address": {
|
||||
Labels: map[string]string{metav1.LabelHostname: "label-hostname"},
|
||||
Labels: map[string]string{kubeletapis.LabelHostname: "label-hostname"},
|
||||
Addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeExternalIP, Address: "1.2.3.5"},
|
||||
{Type: v1.NodeHostName, Address: "status-hostname"},
|
||||
@ -61,7 +62,7 @@ func TestGetPreferredAddress(t *testing.T) {
|
||||
ExpectAddress: "status-hostname",
|
||||
},
|
||||
"found label address": {
|
||||
Labels: map[string]string{metav1.LabelHostname: "label-hostname"},
|
||||
Labels: map[string]string{kubeletapis.LabelHostname: "label-hostname"},
|
||||
Addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeExternalIP, Address: "1.2.3.5"},
|
||||
},
|
||||
|
@ -23,6 +23,7 @@ go_library(
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/openstack:go_default_library",
|
||||
"//pkg/cloudprovider/providers/rackspace:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/keymutex:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
@ -149,7 +150,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
|
||||
return zones, err
|
||||
}
|
||||
for _, node := range nodes.Items {
|
||||
if zone, ok := node.Labels[metav1.LabelZoneFailureDomain]; ok {
|
||||
if zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]; ok {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
@ -210,7 +211,7 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s
|
||||
|
||||
// these are needed that pod is spawning to same AZ
|
||||
volumeLabels = make(map[string]string)
|
||||
volumeLabels[metav1.LabelZoneFailureDomain] = volumeAZ
|
||||
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
|
||||
|
||||
return volumeID, volSizeGB, volumeLabels, nil
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ go_library(
|
||||
"//pkg/apis/componentconfig/v1alpha1:go_default_library",
|
||||
"//pkg/client/leaderelection:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
|
||||
// add the kubernetes feature gates
|
||||
@ -88,7 +89,7 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
"RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding "+
|
||||
"to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule.")
|
||||
fs.MarkDeprecated("hard-pod-affinity-symmetric-weight", "This option was moved to the policy configuration file")
|
||||
fs.StringVar(&s.FailureDomains, "failure-domains", api.DefaultFailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.")
|
||||
fs.StringVar(&s.FailureDomains, "failure-domains", kubeletapis.DefaultFailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.")
|
||||
fs.MarkDeprecated("failure-domains", "Doesn't have any effect. Will be removed in future version.")
|
||||
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||
utilfeature.DefaultFeatureGate.AddFlag(fs)
|
||||
|
@ -17,8 +17,8 @@ go_library(
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -30,6 +30,7 @@ go_test(
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
|
@ -21,9 +21,9 @@ import (
|
||||
"io"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
)
|
||||
|
||||
// Register registers a plugin
|
||||
@ -45,7 +45,7 @@ func NewInterPodAntiAffinity() admission.Interface {
|
||||
}
|
||||
}
|
||||
|
||||
// Admit will deny any pod that defines AntiAffinity topology key other than metav1.LabelHostname i.e. "kubernetes.io/hostname"
|
||||
// Admit will deny any pod that defines AntiAffinity topology key other than kubeletapis.LabelHostname i.e. "kubernetes.io/hostname"
|
||||
// in requiredDuringSchedulingRequiredDuringExecution and requiredDuringSchedulingIgnoredDuringExecution.
|
||||
func (p *plugin) Admit(attributes admission.Attributes) (err error) {
|
||||
// Ignore all calls to subresources or resources other than pods.
|
||||
@ -67,8 +67,8 @@ func (p *plugin) Admit(attributes admission.Attributes) (err error) {
|
||||
// podAntiAffinityTerms = append(podAntiAffinityTerms, affinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
for _, v := range podAntiAffinityTerms {
|
||||
if v.TopologyKey != metav1.LabelHostname {
|
||||
return apierrors.NewForbidden(attributes.GetResource().GroupResource(), pod.Name, fmt.Errorf("affinity.PodAntiAffinity.RequiredDuringScheduling has TopologyKey %v but only key %v is allowed", v.TopologyKey, metav1.LabelHostname))
|
||||
if v.TopologyKey != kubeletapis.LabelHostname {
|
||||
return apierrors.NewForbidden(attributes.GetResource().GroupResource(), pod.Name, fmt.Errorf("affinity.PodAntiAffinity.RequiredDuringScheduling has TopologyKey %v but only key %v is allowed", v.TopologyKey, kubeletapis.LabelHostname))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
)
|
||||
|
||||
// ensures the hard PodAntiAffinity is denied if it defines TopologyKey other than kubernetes.io/hostname.
|
||||
@ -98,7 +99,7 @@ func TestInterPodAffinityAdmission(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: metav1.LabelHostname,
|
||||
TopologyKey: kubeletapis.LabelHostname,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -120,7 +121,7 @@ func TestInterPodAffinityAdmission(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: metav1.LabelHostname,
|
||||
TopologyKey: kubeletapis.LabelHostname,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -185,7 +186,7 @@ func TestInterPodAffinityAdmission(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: metav1.LabelHostname,
|
||||
TopologyKey: kubeletapis.LabelHostname,
|
||||
}, {
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
@ -207,7 +208,7 @@ func TestInterPodAffinityAdmission(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: metav1.LabelHostname,
|
||||
TopologyKey: kubeletapis.LabelHostname,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
// LimitPodHardAntiAffinityTopology admission controller rejects any pod
|
||||
// that specifies "hard" (RequiredDuringScheduling) anti-affinity
|
||||
// with a TopologyKey other than metav1.LabelHostname.
|
||||
// with a TopologyKey other than kubeletapis.LabelHostname.
|
||||
// Because anti-affinity is symmetric, without this admission controller,
|
||||
// a user could maliciously or accidentally specify that their pod (once it has scheduled)
|
||||
// should block other pods from scheduling into the same zone or some other large topology,
|
||||
|
@ -16,7 +16,7 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/helper:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -28,8 +28,8 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/helper:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -22,10 +22,10 @@ import (
|
||||
"io"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/helper"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -82,12 +82,12 @@ func (p *plugin) Admit(attributes admission.Attributes) (err error) {
|
||||
toleratesNodeNotReady := false
|
||||
toleratesNodeUnreachable := false
|
||||
for _, toleration := range tolerations {
|
||||
if (toleration.Key == metav1.TaintNodeNotReady || len(toleration.Key) == 0) &&
|
||||
if (toleration.Key == algorithm.TaintNodeNotReady || len(toleration.Key) == 0) &&
|
||||
(toleration.Effect == api.TaintEffectNoExecute || len(toleration.Effect) == 0) {
|
||||
toleratesNodeNotReady = true
|
||||
}
|
||||
|
||||
if (toleration.Key == metav1.TaintNodeUnreachable || len(toleration.Key) == 0) &&
|
||||
if (toleration.Key == algorithm.TaintNodeUnreachable || len(toleration.Key) == 0) &&
|
||||
(toleration.Effect == api.TaintEffectNoExecute || len(toleration.Effect) == 0) {
|
||||
toleratesNodeUnreachable = true
|
||||
}
|
||||
@ -100,7 +100,7 @@ func (p *plugin) Admit(attributes admission.Attributes) (err error) {
|
||||
|
||||
if !toleratesNodeNotReady {
|
||||
helper.AddOrUpdateTolerationInPod(pod, &api.Toleration{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: defaultNotReadyTolerationSeconds,
|
||||
@ -109,7 +109,7 @@ func (p *plugin) Admit(attributes admission.Attributes) (err error) {
|
||||
|
||||
if !toleratesNodeUnreachable {
|
||||
helper.AddOrUpdateTolerationInPod(pod, &api.Toleration{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: defaultUnreachableTolerationSeconds,
|
||||
|
@ -19,10 +19,10 @@ package defaulttolerationseconds
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/helper"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
func TestForgivenessAdmission(t *testing.T) {
|
||||
@ -47,13 +47,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -88,13 +88,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -109,7 +109,7 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -121,13 +121,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -142,7 +142,7 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -154,13 +154,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -175,13 +175,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -193,13 +193,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -214,7 +214,7 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
@ -225,12 +225,12 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(300),
|
||||
|
@ -21,8 +21,8 @@ go_library(
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -22,13 +22,13 @@ import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
@ -174,7 +174,7 @@ func (l *persistentVolumeLabel) findGCEPDLabels(volume *api.PersistentVolume) (m
|
||||
}
|
||||
|
||||
// If the zone is already labeled, honor the hint
|
||||
zone := volume.Labels[metav1.LabelZoneFailureDomain]
|
||||
zone := volume.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
|
||||
labels, err := provider.GetAutoLabelsForPD(volume.Spec.GCEPersistentDisk.PDName, zone)
|
||||
if err != nil {
|
||||
|
@ -14,6 +14,7 @@ go_library(
|
||||
"doc.go",
|
||||
"scheduler_interface.go",
|
||||
"types.go",
|
||||
"well_known_labels.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
@ -23,6 +23,7 @@ go_library(
|
||||
"//pkg/api/v1/helper/qos:go_default_library",
|
||||
"//pkg/client/listers/core/v1:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
@ -49,6 +50,7 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/helper:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//plugin/pkg/scheduler/testing:go_default_library",
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos"
|
||||
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||
@ -412,7 +413,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *s
|
||||
|
||||
nodeConstraints := make(map[string]string)
|
||||
for k, v := range node.ObjectMeta.Labels {
|
||||
if k != metav1.LabelZoneFailureDomain && k != metav1.LabelZoneRegion {
|
||||
if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion {
|
||||
continue
|
||||
}
|
||||
nodeConstraints[k] = v
|
||||
@ -458,7 +459,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *s
|
||||
}
|
||||
|
||||
for k, v := range pv.ObjectMeta.Labels {
|
||||
if k != metav1.LabelZoneFailureDomain && k != metav1.LabelZoneRegion {
|
||||
if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion {
|
||||
continue
|
||||
}
|
||||
nodeV, _ := nodeConstraints[k]
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing"
|
||||
@ -3420,13 +3421,13 @@ func createPodWithVolume(pod, pv, pvc string) *v1.Pod {
|
||||
func TestVolumeZonePredicate(t *testing.T) {
|
||||
pvInfo := FakePersistentVolumeInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{metav1.LabelZoneFailureDomain: "zone_1"}},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "zone_1"}},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_2", Labels: map[string]string{metav1.LabelZoneRegion: "zone_2", "uselessLabel": "none"}},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_2", Labels: map[string]string{kubeletapis.LabelZoneRegion: "zone_2", "uselessLabel": "none"}},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_3", Labels: map[string]string{metav1.LabelZoneRegion: "zone_3"}},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_3", Labels: map[string]string{kubeletapis.LabelZoneRegion: "zone_3"}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -3463,7 +3464,7 @@ func TestVolumeZonePredicate(t *testing.T) {
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
Labels: map[string]string{metav1.LabelZoneFailureDomain: "zone_1"},
|
||||
Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "zone_1"},
|
||||
},
|
||||
},
|
||||
Fits: true,
|
||||
@ -3484,7 +3485,7 @@ func TestVolumeZonePredicate(t *testing.T) {
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
Labels: map[string]string{metav1.LabelZoneFailureDomain: "zone_1", "uselessLabel": "none"},
|
||||
Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "zone_1", "uselessLabel": "none"},
|
||||
},
|
||||
},
|
||||
Fits: true,
|
||||
@ -3495,7 +3496,7 @@ func TestVolumeZonePredicate(t *testing.T) {
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
Labels: map[string]string{metav1.LabelZoneRegion: "zone_2", "uselessLabel": "none"},
|
||||
Labels: map[string]string{kubeletapis.LabelZoneRegion: "zone_2", "uselessLabel": "none"},
|
||||
},
|
||||
},
|
||||
Fits: true,
|
||||
@ -3506,7 +3507,7 @@ func TestVolumeZonePredicate(t *testing.T) {
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
Labels: map[string]string{metav1.LabelZoneRegion: "no_zone_2", "uselessLabel": "none"},
|
||||
Labels: map[string]string{kubeletapis.LabelZoneRegion: "no_zone_2", "uselessLabel": "none"},
|
||||
},
|
||||
},
|
||||
Fits: false,
|
||||
@ -3517,7 +3518,7 @@ func TestVolumeZonePredicate(t *testing.T) {
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
Labels: map[string]string{metav1.LabelZoneFailureDomain: "no_zone_1", "uselessLabel": "none"},
|
||||
Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "no_zone_1", "uselessLabel": "none"},
|
||||
},
|
||||
},
|
||||
Fits: false,
|
||||
|
@ -28,6 +28,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/helper:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
@ -63,6 +64,7 @@ go_test(
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/apps/v1beta1:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//plugin/pkg/scheduler/api:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||
@ -70,7 +71,7 @@ func newPodAffinityPriorityMap(nodes []*v1.Node) *podAffinityPriorityMap {
|
||||
return &podAffinityPriorityMap{
|
||||
nodes: nodes,
|
||||
counts: make(map[string]float64, len(nodes)),
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(v1.DefaultFailureDomains, ",")},
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(kubeletapis.DefaultFailureDomains, ",")},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing"
|
||||
@ -381,7 +382,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
|
||||
buildNodeLabels := func(failureDomain string) map[string]string {
|
||||
labels := map[string]string{
|
||||
metav1.LabelZoneFailureDomain: failureDomain,
|
||||
kubeletapis.LabelZoneFailureDomain: failureDomain,
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
36
plugin/pkg/scheduler/algorithm/well_known_labels.go
Normal file
36
plugin/pkg/scheduler/algorithm/well_known_labels.go
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package algorithm
|
||||
|
||||
const (
|
||||
// When feature-gate for TaintBasedEvictions=true flag is enabled,
|
||||
// TaintNodeNotReady would be automatically added by node controller
|
||||
// when node is not ready, and removed when node becomes ready.
|
||||
TaintNodeNotReady = "node.alpha.kubernetes.io/notReady"
|
||||
|
||||
// When feature-gate for TaintBasedEvictions=true flag is enabled,
|
||||
// TaintNodeUnreachable would be automatically added by node controller
|
||||
// when node becomes unreachable (corresponding to NodeReady status ConditionUnknown)
|
||||
// and removed when node becomes reachable (NodeReady status ConditionTrue).
|
||||
TaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable"
|
||||
|
||||
// When kubelet is started with the "external" cloud provider, then
|
||||
// it sets this taint on a node to mark it as unusable, until a controller
|
||||
// from the cloud-controller-manager intitializes this node, and then removes
|
||||
// the taint
|
||||
TaintExternalCloudProvider = "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
)
|
@ -47,8 +47,6 @@ go_library(
|
||||
"types.go",
|
||||
"types_swagger_doc_generated.go",
|
||||
"watch.go",
|
||||
"well_known_annotations.go",
|
||||
"well_known_labels.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
"zz_generated.defaults.go",
|
||||
],
|
||||
|
@ -1,82 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
const (
|
||||
LabelHostname = "kubernetes.io/hostname"
|
||||
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
|
||||
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
|
||||
|
||||
LabelInstanceType = "beta.kubernetes.io/instance-type"
|
||||
|
||||
LabelOS = "beta.kubernetes.io/os"
|
||||
LabelArch = "beta.kubernetes.io/arch"
|
||||
|
||||
// When feature-gate for TaintBasedEvictions=true flag is enabled,
|
||||
// TaintNodeNotReady would be automatically added by node controller
|
||||
// when node is not ready, and removed when node becomes ready.
|
||||
TaintNodeNotReady = "node.alpha.kubernetes.io/notReady"
|
||||
|
||||
// When feature-gate for TaintBasedEvictions=true flag is enabled,
|
||||
// TaintNodeUnreachable would be automatically added by node controller
|
||||
// when node becomes unreachable (corresponding to NodeReady status ConditionUnknown)
|
||||
// and removed when node becomes reachable (NodeReady status ConditionTrue).
|
||||
TaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable"
|
||||
|
||||
// When kubelet is started with the "external" cloud provider, then
|
||||
// it sets this taint on a node to mark it as unusable, until a controller
|
||||
// from the cloud-controller-manager intitializes this node, and then removes
|
||||
// the taint
|
||||
TaintExternalCloudProvider = "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
)
|
||||
|
||||
// Role labels are applied to Nodes to mark their purpose. In particular, we
|
||||
// usually want to distinguish the master, so that we can isolate privileged
|
||||
// pods and operations.
|
||||
//
|
||||
// Originally we relied on not registering the master, on the fact that the
|
||||
// master was Unschedulable, and on static manifests for master components.
|
||||
// But we now do register masters in many environments, are generally moving
|
||||
// away from static manifests (for better manageability), and working towards
|
||||
// deprecating the unschedulable field (replacing it with taints & tolerations
|
||||
// instead).
|
||||
//
|
||||
// Even with tainting, a label remains the easiest way of making a positive
|
||||
// selection, so that pods can schedule only to master nodes for example, and
|
||||
// thus installations will likely define a label for their master nodes.
|
||||
//
|
||||
// So that we can recognize master nodes in consequent places though (such as
|
||||
// kubectl get nodes), we encourage installations to use the well-known labels.
|
||||
// We define NodeLabelRole, which is the preferred form, but we will also recognize
|
||||
// other forms that are known to be in widespread use (NodeLabelKubeadmAlphaRole).
|
||||
|
||||
const (
|
||||
// NodeLabelRole is the preferred label applied to a Node as a hint that it has a particular purpose (defined by the value).
|
||||
NodeLabelRole = "kubernetes.io/role"
|
||||
|
||||
// NodeLabelKubeadmAlphaRole is a label that kubeadm applies to a Node as a hint that it has a particular purpose.
|
||||
// Use of NodeLabelRole is preferred.
|
||||
NodeLabelKubeadmAlphaRole = "kubeadm.alpha.kubernetes.io/role"
|
||||
|
||||
// NodeLabelRoleMaster is the value of a NodeLabelRole or NodeLabelKubeadmAlphaRole label, indicating a master node.
|
||||
// A master node typically runs kubernetes system components and will not typically run user workloads.
|
||||
NodeLabelRoleMaster = "master"
|
||||
|
||||
// NodeLabelRoleNode is the value of a NodeLabelRole or NodeLabelKubeadmAlphaRole label, indicating a "normal" node,
|
||||
// as opposed to a RoleMaster node.
|
||||
NodeLabelRoleNode = "node"
|
||||
)
|
@ -3905,8 +3905,4 @@ const (
|
||||
// When the --hard-pod-affinity-weight scheduler flag is not specified,
|
||||
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
|
||||
DefaultHardPodAffinitySymmetricWeight int = 1
|
||||
|
||||
// When the --failure-domains scheduler flag is not specified,
|
||||
// DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity.
|
||||
DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion
|
||||
)
|
||||
|
@ -4486,10 +4486,6 @@ const (
|
||||
// When the --hard-pod-affinity-weight scheduler flag is not specified,
|
||||
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
|
||||
DefaultHardPodAffinitySymmetricWeight int = 1
|
||||
|
||||
// When the --failure-domains scheduler flag is not specified,
|
||||
// DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity.
|
||||
DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion
|
||||
)
|
||||
|
||||
// Sysctl defines a kernel parameter to be set
|
||||
|
@ -144,6 +144,7 @@ go_library(
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
|
@ -39,6 +39,7 @@ go_library(
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/cloudprovider/providers/vsphere:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
|
@ -43,6 +43,7 @@ import (
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
|
||||
storagebeta "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
@ -935,7 +936,7 @@ func getRandomCloudZone(c clientset.Interface) string {
|
||||
// collect values of zone label from all nodes
|
||||
zones := sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
if zone, found := node.Labels[metav1.LabelZoneFailureDomain]; found {
|
||||
if zone, found := node.Labels[kubeletapis.LabelZoneFailureDomain]; found {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
@ -116,12 +117,12 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
|
||||
// Find the name of the zone in which a Node is running
|
||||
func getZoneNameForNode(node v1.Node) (string, error) {
|
||||
for key, value := range node.Labels {
|
||||
if key == metav1.LabelZoneFailureDomain {
|
||||
if key == kubeletapis.LabelZoneFailureDomain {
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Zone name for node %s not found. No label with key %s",
|
||||
node.Name, metav1.LabelZoneFailureDomain)
|
||||
node.Name, kubeletapis.LabelZoneFailureDomain)
|
||||
}
|
||||
|
||||
// Find the names of all zones in which we have nodes in this cluster.
|
||||
|
@ -21,6 +21,7 @@ go_test(
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//plugin/pkg/admission/defaulttolerationseconds:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -64,14 +65,14 @@ func TestAdmission(t *testing.T) {
|
||||
|
||||
var defaultSeconds int64 = 300
|
||||
nodeNotReady := v1.Toleration{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultSeconds,
|
||||
}
|
||||
|
||||
nodeUnreachable := v1.Toleration{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultSeconds,
|
||||
|
Loading…
Reference in New Issue
Block a user