mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
Move out const strings in pkg/scheduler/api/well_known_labels.go
This commit is contained in:
parent
cbe7c6e3be
commit
dd74205bcf
@ -49,7 +49,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
@ -75,13 +74,13 @@ func nowPointer() *metav1.Time {
|
||||
|
||||
var (
|
||||
nodeNotReady = []v1.Taint{{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TimeAdded: nowPointer(),
|
||||
}}
|
||||
|
||||
nodeUnreachable = []v1.Taint{{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TimeAdded: nowPointer(),
|
||||
}}
|
||||
@ -528,7 +527,7 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
|
||||
}
|
||||
|
||||
field := nodeSelector.NodeSelectorTerms[0].MatchFields[0]
|
||||
if field.Key == schedulerapi.NodeFieldSelectorKeyNodeName {
|
||||
if field.Key == api.ObjectNameField {
|
||||
if field.Operator != v1.NodeSelectorOpIn {
|
||||
t.Fatalf("the operation of hostname NodeAffinity is not %v", v1.NodeSelectorOpIn)
|
||||
}
|
||||
@ -1517,9 +1516,9 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
|
||||
{Type: v1.NodePIDPressure, Status: v1.ConditionTrue},
|
||||
}
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{Key: schedulerapi.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: schedulerapi.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: schedulerapi.TaintNodePIDPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: v1.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: v1.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: v1.TaintNodePIDPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
}
|
||||
manager.nodeStore.Add(node)
|
||||
|
||||
@ -2161,7 +2160,7 @@ func TestDeleteUnscheduledPodForNotExistingNode(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-2"},
|
||||
},
|
||||
|
@ -25,8 +25,8 @@ import (
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the
|
||||
@ -52,7 +52,7 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec) {
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns not ready.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
@ -62,7 +62,7 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec) {
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns unreachable.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
@ -70,32 +70,32 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec) {
|
||||
// According to TaintNodesByCondition feature, all DaemonSet pods should tolerate
|
||||
// MemoryPressure, DiskPressure, PIDPressure, Unschedulable and NetworkUnavailable taints.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Key: v1.TaintNodeDiskPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Key: v1.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodePIDPressure,
|
||||
Key: v1.TaintNodePIDPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
if spec.HostNetwork {
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Key: v1.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
@ -151,7 +151,7 @@ func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*
|
||||
// Note that this function assumes that no NodeAffinity conflicts with the selected nodeName.
|
||||
func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename string) *v1.Affinity {
|
||||
nodeSelReq := v1.NodeSelectorRequirement{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{nodename},
|
||||
}
|
||||
@ -220,11 +220,11 @@ func GetTargetNodeName(pod *v1.Pod) (string, error) {
|
||||
|
||||
for _, term := range terms {
|
||||
for _, exp := range term.MatchFields {
|
||||
if exp.Key == schedulerapi.NodeFieldSelectorKeyNodeName &&
|
||||
if exp.Key == api.ObjectNameField &&
|
||||
exp.Operator == v1.NodeSelectorOpIn {
|
||||
if len(exp.Values) != 1 {
|
||||
return "", fmt.Errorf("the matchFields value of '%s' is not unique for pod %s/%s",
|
||||
schedulerapi.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
|
||||
api.ObjectNameField, pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
return exp.Values[0], nil
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
@ -185,7 +185,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -222,7 +222,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -272,7 +272,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -291,7 +291,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1", "host_2"},
|
||||
},
|
||||
@ -309,7 +309,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -330,7 +330,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -358,7 +358,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
@ -376,7 +376,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -395,7 +395,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpNotIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
@ -413,7 +413,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -453,7 +453,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -519,7 +519,7 @@ func TestGetTargetNodeName(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1"},
|
||||
},
|
||||
@ -547,7 +547,7 @@ func TestGetTargetNodeName(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1", "node-2"},
|
||||
},
|
||||
|
@ -42,7 +42,6 @@ import (
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
utiltaints "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/legacy-cloud-providers/gce"
|
||||
@ -117,7 +116,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
}
|
||||
// Even if PodCIDR is assigned, but NetworkUnavailable condition is
|
||||
// set to true, we need to process the node to set the condition.
|
||||
networkUnavailableTaint := &v1.Taint{Key: schedulerapi.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
|
||||
networkUnavailableTaint := &v1.Taint{Key: v1.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
|
||||
_, cond := nodeutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
|
||||
if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) {
|
||||
return ca.AllocateOrOccupyCIDR(newNode)
|
||||
|
@ -60,7 +60,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
)
|
||||
@ -73,14 +72,14 @@ func init() {
|
||||
var (
|
||||
// UnreachableTaintTemplate is the taint for when a node becomes unreachable.
|
||||
UnreachableTaintTemplate = &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
// NotReadyTaintTemplate is the taint for when a node is not ready for
|
||||
// executing pods
|
||||
NotReadyTaintTemplate = &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
@ -90,30 +89,30 @@ var (
|
||||
// for certain NodeConditionType, there are multiple {ConditionStatus,TaintKey} pairs
|
||||
nodeConditionToTaintKeyStatusMap = map[v1.NodeConditionType]map[v1.ConditionStatus]string{
|
||||
v1.NodeReady: {
|
||||
v1.ConditionFalse: schedulerapi.TaintNodeNotReady,
|
||||
v1.ConditionUnknown: schedulerapi.TaintNodeUnreachable,
|
||||
v1.ConditionFalse: v1.TaintNodeNotReady,
|
||||
v1.ConditionUnknown: v1.TaintNodeUnreachable,
|
||||
},
|
||||
v1.NodeMemoryPressure: {
|
||||
v1.ConditionTrue: schedulerapi.TaintNodeMemoryPressure,
|
||||
v1.ConditionTrue: v1.TaintNodeMemoryPressure,
|
||||
},
|
||||
v1.NodeDiskPressure: {
|
||||
v1.ConditionTrue: schedulerapi.TaintNodeDiskPressure,
|
||||
v1.ConditionTrue: v1.TaintNodeDiskPressure,
|
||||
},
|
||||
v1.NodeNetworkUnavailable: {
|
||||
v1.ConditionTrue: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
v1.ConditionTrue: v1.TaintNodeNetworkUnavailable,
|
||||
},
|
||||
v1.NodePIDPressure: {
|
||||
v1.ConditionTrue: schedulerapi.TaintNodePIDPressure,
|
||||
v1.ConditionTrue: v1.TaintNodePIDPressure,
|
||||
},
|
||||
}
|
||||
|
||||
taintKeyToNodeConditionMap = map[string]v1.NodeConditionType{
|
||||
schedulerapi.TaintNodeNotReady: v1.NodeReady,
|
||||
schedulerapi.TaintNodeUnreachable: v1.NodeReady,
|
||||
schedulerapi.TaintNodeNetworkUnavailable: v1.NodeNetworkUnavailable,
|
||||
schedulerapi.TaintNodeMemoryPressure: v1.NodeMemoryPressure,
|
||||
schedulerapi.TaintNodeDiskPressure: v1.NodeDiskPressure,
|
||||
schedulerapi.TaintNodePIDPressure: v1.NodePIDPressure,
|
||||
v1.TaintNodeNotReady: v1.NodeReady,
|
||||
v1.TaintNodeUnreachable: v1.NodeReady,
|
||||
v1.TaintNodeNetworkUnavailable: v1.NodeNetworkUnavailable,
|
||||
v1.TaintNodeMemoryPressure: v1.NodeMemoryPressure,
|
||||
v1.TaintNodeDiskPressure: v1.NodeDiskPressure,
|
||||
v1.TaintNodePIDPressure: v1.NodePIDPressure,
|
||||
}
|
||||
)
|
||||
|
||||
@ -584,7 +583,7 @@ func (nc *Controller) doNoScheduleTaintingPass(nodeName string) error {
|
||||
if node.Spec.Unschedulable {
|
||||
// If unschedulable, append related taint.
|
||||
taints = append(taints, v1.Taint{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
}
|
||||
@ -596,7 +595,7 @@ func (nc *Controller) doNoScheduleTaintingPass(nodeName string) error {
|
||||
return false
|
||||
}
|
||||
// Find unschedulable taint of node.
|
||||
if t.Key == schedulerapi.TaintNodeUnschedulable {
|
||||
if t.Key == v1.TaintNodeUnschedulable {
|
||||
return true
|
||||
}
|
||||
// Find node condition taints of node.
|
||||
|
@ -47,7 +47,6 @@ import (
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/utils/pointer"
|
||||
@ -2879,15 +2878,15 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset)
|
||||
|
||||
networkUnavailableTaint := &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Key: v1.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
notReadyTaint := &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
unreachableTaint := &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -149,7 +148,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
|
||||
// When node has memory pressure, check BestEffort Pod's toleration:
|
||||
// admit it if tolerates memory pressure taint, fail for other tolerations, e.g. DiskPressure.
|
||||
if v1helper.TolerationsTolerateTaint(attrs.Pod.Spec.Tolerations, &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Key: v1.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}) {
|
||||
return lifecycle.PodAdmitResult{Admit: true}
|
||||
|
@ -242,7 +242,7 @@ func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) {
|
||||
}
|
||||
|
||||
unschedulableTaint := v1.Taint{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
||||
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
taintutil "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
@ -1991,7 +1990,7 @@ func TestRegisterWithApiServerWithTaint(t *testing.T) {
|
||||
// Check the unschedulable taint.
|
||||
got := gotNode.(*v1.Node)
|
||||
unschedulableTaint := &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
legacyapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
@ -1520,7 +1519,7 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta PredicateMetadata, nodeIn
|
||||
|
||||
// If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`.
|
||||
podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{
|
||||
Key: legacyapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
|
@ -31,9 +31,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
legacyapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
@ -1447,7 +1447,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: legacyapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -1473,7 +1473,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: legacyapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -1499,7 +1499,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: legacyapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -1535,7 +1535,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: legacyapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -1569,7 +1569,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: legacyapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -1603,7 +1603,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: legacyapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -4690,7 +4690,7 @@ func TestCheckNodeUnschedulablePredicate(t *testing.T) {
|
||||
Spec: v1.PodSpec{
|
||||
Tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: legacyapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
|
@ -23,13 +23,13 @@ import (
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
legacyapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
// NodeFieldSelectorKeys is a map that: the keys are node field selector keys; the values are
|
||||
// the functions to get the value of the node field.
|
||||
var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{
|
||||
legacyapi.NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name },
|
||||
api.ObjectNameField: func(n *v1.Node) string { return n.Name },
|
||||
}
|
||||
|
||||
var _ corelisters.ReplicationControllerLister = &EmptyControllerLister{}
|
||||
|
@ -16,47 +16,7 @@ limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
const (
|
||||
// TaintNodeNotReady will be added when node is not ready
|
||||
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||
// and removed when node becomes ready.
|
||||
TaintNodeNotReady = "node.kubernetes.io/not-ready"
|
||||
|
||||
// TaintNodeUnreachable will be added when node becomes unreachable
|
||||
// (corresponding to NodeReady status ConditionUnknown)
|
||||
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||
// and removed when node becomes reachable (NodeReady status ConditionTrue).
|
||||
TaintNodeUnreachable = "node.kubernetes.io/unreachable"
|
||||
|
||||
// TaintNodeUnschedulable will be added when node becomes unschedulable
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node becomes scheduable.
|
||||
TaintNodeUnschedulable = "node.kubernetes.io/unschedulable"
|
||||
|
||||
// TaintNodeMemoryPressure will be added when node has memory pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough memory.
|
||||
TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure"
|
||||
|
||||
// TaintNodeDiskPressure will be added when node has disk pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough disk.
|
||||
TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure"
|
||||
|
||||
// TaintNodeNetworkUnavailable will be added when node's network is unavailable
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when network becomes ready.
|
||||
TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable"
|
||||
|
||||
// TaintNodePIDPressure will be added when node has pid pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough disk.
|
||||
TaintNodePIDPressure = "node.kubernetes.io/pid-pressure"
|
||||
|
||||
// TaintExternalCloudProvider sets this taint on a node to mark it as unusable,
|
||||
// when kubelet is started with the "external" cloud provider, until a controller
|
||||
// from the cloud-controller-manager intitializes this node, and then removes
|
||||
@ -65,8 +25,4 @@ const (
|
||||
|
||||
// TaintNodeShutdown when node is shutdown in external cloud provider
|
||||
TaintNodeShutdown = "node.cloudprovider.kubernetes.io/shutdown"
|
||||
|
||||
// NodeFieldSelectorKeyNodeName ('metadata.name') uses this as node field selector key
|
||||
// when selecting node by node's name.
|
||||
NodeFieldSelectorKeyNodeName = api.ObjectNameField
|
||||
)
|
||||
|
@ -23,8 +23,8 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
@ -508,7 +508,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -533,7 +533,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -559,7 +559,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -594,7 +594,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -628,7 +628,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -661,7 +661,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
@ -60,7 +59,7 @@ func TestNodeUnschedulable(t *testing.T) {
|
||||
Spec: v1.PodSpec{
|
||||
Tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
|
@ -22,10 +22,10 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
// PluginName indicates name of admission plugin.
|
||||
@ -41,14 +41,14 @@ var (
|
||||
" that is added by default to every pod that does not already have such a toleration.")
|
||||
|
||||
notReadyToleration = api.Toleration{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: defaultNotReadyTolerationSeconds,
|
||||
}
|
||||
|
||||
unreachableToleration = api.Toleration{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: defaultUnreachableTolerationSeconds,
|
||||
@ -102,12 +102,12 @@ func (p *Plugin) Admit(ctx context.Context, attributes admission.Attributes, o a
|
||||
toleratesNodeNotReady := false
|
||||
toleratesNodeUnreachable := false
|
||||
for _, toleration := range tolerations {
|
||||
if (toleration.Key == schedulerapi.TaintNodeNotReady || len(toleration.Key) == 0) &&
|
||||
if (toleration.Key == v1.TaintNodeNotReady || len(toleration.Key) == 0) &&
|
||||
(toleration.Effect == api.TaintEffectNoExecute || len(toleration.Effect) == 0) {
|
||||
toleratesNodeNotReady = true
|
||||
}
|
||||
|
||||
if (toleration.Key == schedulerapi.TaintNodeUnreachable || len(toleration.Key) == 0) &&
|
||||
if (toleration.Key == v1.TaintNodeUnreachable || len(toleration.Key) == 0) &&
|
||||
(toleration.Effect == api.TaintEffectNoExecute || len(toleration.Effect) == 0) {
|
||||
toleratesNodeUnreachable = true
|
||||
}
|
||||
|
@ -20,11 +20,11 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
admissiontesting "k8s.io/apiserver/pkg/admission/testing"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
func TestForgivenessAdmission(t *testing.T) {
|
||||
@ -50,13 +50,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -91,13 +91,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -112,7 +112,7 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -124,13 +124,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -145,7 +145,7 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -157,13 +157,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -178,13 +178,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -196,13 +196,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -217,7 +217,7 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
@ -228,12 +228,12 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(300),
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
@ -28,8 +29,6 @@ import (
|
||||
const (
|
||||
// PluginName is the name of the plugin.
|
||||
PluginName = "TaintNodesByCondition"
|
||||
// TaintNodeNotReady is the not-ready label as specified in the API.
|
||||
TaintNodeNotReady = "node.kubernetes.io/not-ready"
|
||||
)
|
||||
|
||||
// Register registers a plugin
|
||||
@ -83,7 +82,7 @@ func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.
|
||||
|
||||
func addNotReadyTaint(node *api.Node) {
|
||||
notReadyTaint := api.Taint{
|
||||
Key: TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Effect: api.TaintEffectNoSchedule,
|
||||
}
|
||||
for _, taint := range node.Spec.Taints {
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
@ -33,7 +34,7 @@ func Test_nodeTaints(t *testing.T) {
|
||||
var (
|
||||
mynode = &user.DefaultInfo{Name: "system:node:mynode", Groups: []string{"system:nodes"}}
|
||||
resource = api.Resource("nodes").WithVersion("v1")
|
||||
notReadyTaint = api.Taint{Key: TaintNodeNotReady, Effect: api.TaintEffectNoSchedule}
|
||||
notReadyTaint = api.Taint{Key: v1.TaintNodeNotReady, Effect: api.TaintEffectNoSchedule}
|
||||
notReadyCondition = api.NodeCondition{Type: api.NodeReady, Status: api.ConditionFalse}
|
||||
myNodeObjMeta = metav1.ObjectMeta{Name: "mynode"}
|
||||
myNodeObj = api.Node{ObjectMeta: myNodeObjMeta}
|
||||
|
@ -35,7 +35,6 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
qoshelper "k8s.io/kubernetes/pkg/apis/core/helper/qos"
|
||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/util/tolerations"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
)
|
||||
@ -102,7 +101,7 @@ func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.
|
||||
|
||||
if qoshelper.GetPodQOS(pod) != api.PodQOSBestEffort {
|
||||
extraTolerations = append(extraTolerations, api.Toleration{
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Key: corev1.TaintNodeMemoryPressure,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoSchedule,
|
||||
})
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
@ -33,7 +34,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
)
|
||||
|
||||
@ -193,7 +193,7 @@ func TestPodAdmission(t *testing.T) {
|
||||
whitelist: []api.Toleration{},
|
||||
podTolerations: []api.Toleration{},
|
||||
mergedTolerations: []api.Toleration{
|
||||
{Key: schedulerapi.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
||||
{Key: v1.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
||||
{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil},
|
||||
},
|
||||
admit: true,
|
||||
@ -219,7 +219,7 @@ func TestPodAdmission(t *testing.T) {
|
||||
whitelist: []api.Toleration{},
|
||||
podTolerations: []api.Toleration{},
|
||||
mergedTolerations: []api.Toleration{
|
||||
{Key: schedulerapi.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
||||
{Key: v1.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
||||
{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil},
|
||||
},
|
||||
admit: true,
|
||||
|
55
staging/src/k8s.io/api/core/v1/well_known_taints.go
Normal file
55
staging/src/k8s.io/api/core/v1/well_known_taints.go
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
const (
|
||||
// TaintNodeNotReady will be added when node is not ready
|
||||
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||
// and removed when node becomes ready.
|
||||
TaintNodeNotReady = "node.kubernetes.io/not-ready"
|
||||
|
||||
// TaintNodeUnreachable will be added when node becomes unreachable
|
||||
// (corresponding to NodeReady status ConditionUnknown)
|
||||
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||
// and removed when node becomes reachable (NodeReady status ConditionTrue).
|
||||
TaintNodeUnreachable = "node.kubernetes.io/unreachable"
|
||||
|
||||
// TaintNodeUnschedulable will be added when node becomes unschedulable
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node becomes scheduable.
|
||||
TaintNodeUnschedulable = "node.kubernetes.io/unschedulable"
|
||||
|
||||
// TaintNodeMemoryPressure will be added when node has memory pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough memory.
|
||||
TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure"
|
||||
|
||||
// TaintNodeDiskPressure will be added when node has disk pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough disk.
|
||||
TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure"
|
||||
|
||||
// TaintNodeNetworkUnavailable will be added when node's network is unavailable
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when network becomes ready.
|
||||
TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable"
|
||||
|
||||
// TaintNodePIDPressure will be added when node has pid pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough disk.
|
||||
TaintNodePIDPressure = "node.kubernetes.io/pid-pressure"
|
||||
)
|
@ -42,11 +42,11 @@ import (
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@ -549,7 +549,7 @@ func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1"},
|
||||
},
|
||||
@ -899,7 +899,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
node.Spec.Unschedulable = true
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
@ -917,7 +917,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
}
|
||||
nodeNU.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Key: v1.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@ -64,14 +63,14 @@ func TestAdmission(t *testing.T) {
|
||||
|
||||
var defaultSeconds int64 = 300
|
||||
nodeNotReady := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultSeconds,
|
||||
}
|
||||
|
||||
nodeUnreachable := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultSeconds,
|
||||
|
@ -36,7 +36,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
@ -135,37 +134,37 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
}
|
||||
|
||||
notReadyToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unschedulableToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
memoryPressureToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Key: v1.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
diskPressureToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Key: v1.TaintNodeDiskPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
networkUnavailableToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Key: v1.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
pidPressureToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodePIDPressure,
|
||||
Key: v1.TaintNodePIDPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
@ -199,7 +198,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -234,7 +233,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -272,7 +271,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Key: v1.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -317,7 +316,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Key: v1.TaintNodeDiskPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -361,7 +360,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Key: v1.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -401,11 +400,11 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Key: v1.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -453,7 +452,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodePIDPressure,
|
||||
Key: v1.TaintNodePIDPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -499,15 +498,15 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Key: v1.TaintNodeDiskPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Key: v1.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodePIDPressure,
|
||||
Key: v1.TaintNodePIDPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -591,7 +590,7 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
},
|
||||
Tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
},
|
||||
@ -609,14 +608,14 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "Taint based evictions for NodeNotReady and 200 tolerationseconds",
|
||||
nodeTaints: []v1.Taint{{Key: schedulerapi.TaintNodeNotReady, Effect: v1.TaintEffectNoExecute}},
|
||||
nodeTaints: []v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoExecute}},
|
||||
nodeConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}},
|
||||
pod: testPod,
|
||||
waitForPodCondition: "updated with tolerationSeconds of 200",
|
||||
},
|
||||
{
|
||||
name: "Taint based evictions for NodeNotReady with no pod tolerations",
|
||||
nodeTaints: []v1.Taint{{Key: schedulerapi.TaintNodeNotReady, Effect: v1.TaintEffectNoExecute}},
|
||||
nodeTaints: []v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoExecute}},
|
||||
nodeConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testpod1"},
|
||||
@ -630,14 +629,14 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Taint based evictions for NodeNotReady and 0 tolerationseconds",
|
||||
nodeTaints: []v1.Taint{{Key: schedulerapi.TaintNodeNotReady, Effect: v1.TaintEffectNoExecute}},
|
||||
nodeTaints: []v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoExecute}},
|
||||
nodeConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}},
|
||||
pod: testPod,
|
||||
waitForPodCondition: "terminating",
|
||||
},
|
||||
{
|
||||
name: "Taint based evictions for NodeUnreachable",
|
||||
nodeTaints: []v1.Taint{{Key: schedulerapi.TaintNodeUnreachable, Effect: v1.TaintEffectNoExecute}},
|
||||
nodeTaints: []v1.Taint{{Key: v1.TaintNodeUnreachable, Effect: v1.TaintEffectNoExecute}},
|
||||
nodeConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionUnknown}},
|
||||
},
|
||||
}
|
||||
@ -835,7 +834,7 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
|
||||
func getTolerationSeconds(tolerations []v1.Toleration) (int64, error) {
|
||||
for _, t := range tolerations {
|
||||
if t.Key == schedulerapi.TaintNodeNotReady && t.Effect == v1.TaintEffectNoExecute && t.Operator == v1.TolerationOpExists {
|
||||
if t.Key == v1.TaintNodeNotReady && t.Effect == v1.TaintEffectNoExecute && t.Operator == v1.TolerationOpExists {
|
||||
return *t.TolerationSeconds, nil
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user