mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
Move pkg/scheduler/algorithm/well_known_labels.go out
This commit is contained in:
parent
e4200cea9c
commit
b7c7966b9f
@ -55,7 +55,7 @@ go_library(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
|
@ -20,7 +20,7 @@ go_library(
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
@ -59,7 +59,7 @@ go_test(
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -41,7 +41,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodectrlutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
@ -425,7 +425,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
|
||||
func getCloudTaint(taints []v1.Taint) *v1.Taint {
|
||||
for _, taint := range taints {
|
||||
if taint.Key == algorithm.TaintExternalCloudProvider {
|
||||
if taint.Key == schedulerapi.TaintExternalCloudProvider {
|
||||
return &taint
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -209,7 +209,7 @@ func TestNodeShutdown(t *testing.T) {
|
||||
ProviderID: "node0",
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeShutdown,
|
||||
Key: schedulerapi.TaintNodeShutdown,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -385,7 +385,7 @@ func TestNodeInitialized(t *testing.T) {
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Key: schedulerapi.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@ -524,7 +524,7 @@ func TestGCECondition(t *testing.T) {
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Key: schedulerapi.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@ -610,7 +610,7 @@ func TestZoneInitialized(t *testing.T) {
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Key: schedulerapi.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@ -701,7 +701,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Key: schedulerapi.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@ -816,7 +816,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Key: schedulerapi.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@ -1110,7 +1110,7 @@ func TestNodeProviderID(t *testing.T) {
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Key: schedulerapi.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@ -1194,7 +1194,7 @@ func TestNodeProviderIDAlreadySet(t *testing.T) {
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Key: schedulerapi.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
|
@ -47,7 +47,7 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
|
||||
@ -90,7 +90,7 @@ var UpdateTaintBackoff = wait.Backoff{
|
||||
}
|
||||
|
||||
var ShutdownTaint = &v1.Taint{
|
||||
Key: algorithm.TaintNodeShutdown,
|
||||
Key: schedulerapi.TaintNodeShutdown,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ go_test(
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
|
@ -48,7 +48,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
@ -78,13 +78,13 @@ func nowPointer() *metav1.Time {
|
||||
|
||||
var (
|
||||
nodeNotReady = []v1.Taint{{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TimeAdded: nowPointer(),
|
||||
}}
|
||||
|
||||
nodeUnreachable = []v1.Taint{{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TimeAdded: nowPointer(),
|
||||
}}
|
||||
@ -529,7 +529,7 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
|
||||
}
|
||||
|
||||
field := nodeSelector.NodeSelectorTerms[0].MatchFields[0]
|
||||
if field.Key == algorithm.NodeFieldSelectorKeyNodeName {
|
||||
if field.Key == schedulerapi.NodeFieldSelectorKeyNodeName {
|
||||
if field.Operator != v1.NodeSelectorOpIn {
|
||||
t.Fatalf("the operation of hostname NodeAffinity is not %v", v1.NodeSelectorOpIn)
|
||||
}
|
||||
@ -1759,7 +1759,7 @@ func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
||||
|
||||
node := newNode("not-enough-disk", nil)
|
||||
node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
|
||||
node.Spec.Taints = []v1.Taint{{Key: algorithm.TaintNodeOutOfDisk, Effect: v1.TaintEffectNoSchedule}}
|
||||
node.Spec.Taints = []v1.Taint{{Key: schedulerapi.TaintNodeOutOfDisk, Effect: v1.TaintEffectNoSchedule}}
|
||||
manager.nodeStore.Add(node)
|
||||
|
||||
// NOTE: Whether or not TaintNodesByCondition is enabled, it'll add toleration to DaemonSet pods.
|
||||
@ -1801,8 +1801,8 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
|
||||
{Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue},
|
||||
}
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{Key: algorithm.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: algorithm.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: schedulerapi.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: schedulerapi.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
}
|
||||
manager.nodeStore.Add(node)
|
||||
|
||||
|
@ -15,7 +15,7 @@ go_library(
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
@ -45,7 +45,7 @@ go_test(
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the
|
||||
@ -55,7 +55,7 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) {
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns not ready.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
@ -65,7 +65,7 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) {
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns unreachable.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
@ -74,26 +74,26 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) {
|
||||
// MemoryPressure, DisPressure, Unschedulable and NetworkUnavailable taints,
|
||||
// and the critical pods should tolerate OutOfDisk taint.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
if spec.HostNetwork {
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
@ -102,12 +102,12 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) {
|
||||
// TODO(#48843) OutOfDisk taints will be removed in 1.10
|
||||
if isCritical {
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
Key: schedulerapi.TaintNodeOutOfDisk,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
Key: schedulerapi.TaintNodeOutOfDisk,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
@ -167,7 +167,7 @@ func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*
|
||||
// Note that this function assumes that no NodeAffinity conflicts with the selected nodeName.
|
||||
func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename string) *v1.Affinity {
|
||||
nodeSelReq := v1.NodeSelectorRequirement{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{nodename},
|
||||
}
|
||||
@ -236,11 +236,11 @@ func GetTargetNodeName(pod *v1.Pod) (string, error) {
|
||||
|
||||
for _, term := range terms {
|
||||
for _, exp := range term.MatchFields {
|
||||
if exp.Key == algorithm.NodeFieldSelectorKeyNodeName &&
|
||||
if exp.Key == schedulerapi.NodeFieldSelectorKeyNodeName &&
|
||||
exp.Operator == v1.NodeSelectorOpIn {
|
||||
if len(exp.Values) != 1 {
|
||||
return "", fmt.Errorf("the matchFields value of '%s' is not unique for pod %s/%s",
|
||||
algorithm.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
|
||||
schedulerapi.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
return exp.Values[0], nil
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
||||
@ -190,7 +190,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -227,7 +227,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -277,7 +277,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -296,7 +296,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1", "host_2"},
|
||||
},
|
||||
@ -314,7 +314,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -335,7 +335,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -363,7 +363,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
@ -381,7 +381,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -400,7 +400,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpNotIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
@ -418,7 +418,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -458,7 +458,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -529,7 +529,7 @@ func TestGetTargetNodeName(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1"},
|
||||
},
|
||||
@ -557,7 +557,7 @@ func TestGetTargetNodeName(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1", "node-2"},
|
||||
},
|
||||
|
@ -48,7 +48,7 @@ go_library(
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/sync:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
@ -43,7 +43,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
utiltaints "k8s.io/kubernetes/pkg/util/taints"
|
||||
)
|
||||
@ -117,7 +117,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
}
|
||||
// Even if PodCIDR is assigned, but NetworkUnavailable condition is
|
||||
// set to true, we need to process the node to set the condition.
|
||||
networkUnavailableTaint := &v1.Taint{Key: algorithm.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
|
||||
networkUnavailableTaint := &v1.Taint{Key: schedulerapi.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
|
||||
_, cond := v1node.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
|
||||
if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) {
|
||||
return ca.AllocateOrOccupyCIDR(newNode)
|
||||
|
@ -14,7 +14,7 @@ go_library(
|
||||
"//pkg/controller/nodelifecycle/scheduler:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
@ -76,7 +76,7 @@ go_test(
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//staging/src/k8s.io/api/coordination/v1beta1:go_default_library",
|
||||
|
@ -59,7 +59,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
@ -74,14 +74,14 @@ func init() {
|
||||
var (
|
||||
// UnreachableTaintTemplate is the taint for when a node becomes unreachable.
|
||||
UnreachableTaintTemplate = &v1.Taint{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
// NotReadyTaintTemplate is the taint for when a node is not ready for
|
||||
// executing pods
|
||||
NotReadyTaintTemplate = &v1.Taint{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
@ -91,34 +91,34 @@ var (
|
||||
// for certain NodeConditionType, there are multiple {ConditionStatus,TaintKey} pairs
|
||||
nodeConditionToTaintKeyStatusMap = map[v1.NodeConditionType]map[v1.ConditionStatus]string{
|
||||
v1.NodeReady: {
|
||||
v1.ConditionFalse: algorithm.TaintNodeNotReady,
|
||||
v1.ConditionUnknown: algorithm.TaintNodeUnreachable,
|
||||
v1.ConditionFalse: schedulerapi.TaintNodeNotReady,
|
||||
v1.ConditionUnknown: schedulerapi.TaintNodeUnreachable,
|
||||
},
|
||||
v1.NodeMemoryPressure: {
|
||||
v1.ConditionTrue: algorithm.TaintNodeMemoryPressure,
|
||||
v1.ConditionTrue: schedulerapi.TaintNodeMemoryPressure,
|
||||
},
|
||||
v1.NodeOutOfDisk: {
|
||||
v1.ConditionTrue: algorithm.TaintNodeOutOfDisk,
|
||||
v1.ConditionTrue: schedulerapi.TaintNodeOutOfDisk,
|
||||
},
|
||||
v1.NodeDiskPressure: {
|
||||
v1.ConditionTrue: algorithm.TaintNodeDiskPressure,
|
||||
v1.ConditionTrue: schedulerapi.TaintNodeDiskPressure,
|
||||
},
|
||||
v1.NodeNetworkUnavailable: {
|
||||
v1.ConditionTrue: algorithm.TaintNodeNetworkUnavailable,
|
||||
v1.ConditionTrue: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
},
|
||||
v1.NodePIDPressure: {
|
||||
v1.ConditionTrue: algorithm.TaintNodePIDPressure,
|
||||
v1.ConditionTrue: schedulerapi.TaintNodePIDPressure,
|
||||
},
|
||||
}
|
||||
|
||||
taintKeyToNodeConditionMap = map[string]v1.NodeConditionType{
|
||||
algorithm.TaintNodeNotReady: v1.NodeReady,
|
||||
algorithm.TaintNodeUnreachable: v1.NodeReady,
|
||||
algorithm.TaintNodeNetworkUnavailable: v1.NodeNetworkUnavailable,
|
||||
algorithm.TaintNodeMemoryPressure: v1.NodeMemoryPressure,
|
||||
algorithm.TaintNodeOutOfDisk: v1.NodeOutOfDisk,
|
||||
algorithm.TaintNodeDiskPressure: v1.NodeDiskPressure,
|
||||
algorithm.TaintNodePIDPressure: v1.NodePIDPressure,
|
||||
schedulerapi.TaintNodeNotReady: v1.NodeReady,
|
||||
schedulerapi.TaintNodeUnreachable: v1.NodeReady,
|
||||
schedulerapi.TaintNodeNetworkUnavailable: v1.NodeNetworkUnavailable,
|
||||
schedulerapi.TaintNodeMemoryPressure: v1.NodeMemoryPressure,
|
||||
schedulerapi.TaintNodeOutOfDisk: v1.NodeOutOfDisk,
|
||||
schedulerapi.TaintNodeDiskPressure: v1.NodeDiskPressure,
|
||||
schedulerapi.TaintNodePIDPressure: v1.NodePIDPressure,
|
||||
}
|
||||
)
|
||||
|
||||
@ -455,21 +455,21 @@ func (nc *Controller) doFixDeprecatedTaintKeyPass(node *v1.Node) error {
|
||||
taintsToDel := []*v1.Taint{}
|
||||
|
||||
for _, taint := range node.Spec.Taints {
|
||||
if taint.Key == algorithm.DeprecatedTaintNodeNotReady {
|
||||
if taint.Key == schedulerapi.DeprecatedTaintNodeNotReady {
|
||||
tDel := taint
|
||||
taintsToDel = append(taintsToDel, &tDel)
|
||||
|
||||
tAdd := taint
|
||||
tAdd.Key = algorithm.TaintNodeNotReady
|
||||
tAdd.Key = schedulerapi.TaintNodeNotReady
|
||||
taintsToAdd = append(taintsToAdd, &tAdd)
|
||||
}
|
||||
|
||||
if taint.Key == algorithm.DeprecatedTaintNodeUnreachable {
|
||||
if taint.Key == schedulerapi.DeprecatedTaintNodeUnreachable {
|
||||
tDel := taint
|
||||
taintsToDel = append(taintsToDel, &tDel)
|
||||
|
||||
tAdd := taint
|
||||
tAdd.Key = algorithm.TaintNodeUnreachable
|
||||
tAdd.Key = schedulerapi.TaintNodeUnreachable
|
||||
taintsToAdd = append(taintsToAdd, &tAdd)
|
||||
}
|
||||
}
|
||||
@ -530,7 +530,7 @@ func (nc *Controller) doNoScheduleTaintingPass(nodeName string) error {
|
||||
if node.Spec.Unschedulable {
|
||||
// If unschedulable, append related taint.
|
||||
taints = append(taints, v1.Taint{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
}
|
||||
@ -542,7 +542,7 @@ func (nc *Controller) doNoScheduleTaintingPass(nodeName string) error {
|
||||
return false
|
||||
}
|
||||
// Find unschedulable taint of node.
|
||||
if t.Key == algorithm.TaintNodeUnschedulable {
|
||||
if t.Key == schedulerapi.TaintNodeUnschedulable {
|
||||
return true
|
||||
}
|
||||
// Find node condition taints of node.
|
||||
|
@ -48,7 +48,7 @@ import (
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/utils/pointer"
|
||||
@ -1435,7 +1435,7 @@ func TestCloudProviderNodeShutdown(t *testing.T) {
|
||||
ProviderID: "node0",
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeShutdown,
|
||||
Key: schedulerapi.TaintNodeShutdown,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -2806,19 +2806,19 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
|
||||
outOfDiskTaint := &v1.Taint{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
Key: schedulerapi.TaintNodeOutOfDisk,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
networkUnavailableTaint := &v1.Taint{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
notReadyTaint := &v1.Taint{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
unreachableTaint := &v1.Taint{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
@ -3115,22 +3115,22 @@ func TestFixDeprecatedTaintKey(t *testing.T) {
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
|
||||
deprecatedNotReadyTaint := &v1.Taint{
|
||||
Key: algorithm.DeprecatedTaintNodeNotReady,
|
||||
Key: schedulerapi.DeprecatedTaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
nodeNotReadyTaint := &v1.Taint{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
deprecatedUnreachableTaint := &v1.Taint{
|
||||
Key: algorithm.DeprecatedTaintNodeUnreachable,
|
||||
Key: schedulerapi.DeprecatedTaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
nodeUnreachableTaint := &v1.Taint{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
|
@ -96,8 +96,8 @@ go_library(
|
||||
"//pkg/kubelet/util/queue:go_default_library",
|
||||
"//pkg/kubelet/util/sliceutils:go_default_library",
|
||||
"//pkg/kubelet/volumemanager:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/sysctl:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
@ -205,7 +205,7 @@ go_test(
|
||||
"//pkg/kubelet/util/queue:go_default_library",
|
||||
"//pkg/kubelet/util/sliceutils:go_default_library",
|
||||
"//pkg/kubelet/volumemanager:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
|
@ -59,7 +59,7 @@ go_library(
|
||||
"//pkg/kubelet/server/stats:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
|
@ -41,7 +41,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -145,7 +145,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
|
||||
// admit it if tolerates memory pressure taint, fail for other tolerations, e.g. OutOfDisk.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) &&
|
||||
v1helper.TolerationsTolerateTaint(attrs.Pod.Spec.Tolerations, &v1.Taint{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}) {
|
||||
return lifecycle.PodAdmitResult{Admit: true}
|
||||
|
@ -39,7 +39,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
taintutil "k8s.io/kubernetes/pkg/util/taints"
|
||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
@ -232,7 +232,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
||||
}
|
||||
|
||||
unschedulableTaint := v1.Taint{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
@ -247,7 +247,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
||||
|
||||
if kl.externalCloudProvider {
|
||||
taint := v1.Taint{
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Key: schedulerapi.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ import (
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
taintutil "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
@ -1549,7 +1549,7 @@ func TestRegisterWithApiServerWithTaint(t *testing.T) {
|
||||
// Check the unschedulable taint.
|
||||
got := gotNode.(*v1.Node)
|
||||
unschedulableTaint := &v1.Taint{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
|
@ -12,11 +12,9 @@ go_library(
|
||||
"doc.go",
|
||||
"scheduler_interface.go",
|
||||
"types.go",
|
||||
"well_known_labels.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
|
@ -24,6 +24,7 @@ go_library(
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
@ -59,6 +60,7 @@ go_test(
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
|
@ -42,6 +42,7 @@ import (
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
@ -1472,7 +1473,7 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetada
|
||||
|
||||
// If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`.
|
||||
podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
@ -1417,7 +1418,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -1443,7 +1444,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -1469,7 +1470,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -1505,7 +1506,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -1539,7 +1540,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -1573,7 +1574,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node_1"},
|
||||
},
|
||||
@ -5016,7 +5017,7 @@ func TestCheckNodeUnschedulablePredicate(t *testing.T) {
|
||||
Spec: v1.PodSpec{
|
||||
Tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
// NodeFieldSelectorKeys is a map that: the key are node field selector keys; the values are
|
||||
// the functions to get the value of the node field.
|
||||
var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{
|
||||
NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name },
|
||||
schedulerapi.NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name },
|
||||
}
|
||||
|
||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||
|
@ -11,10 +11,12 @@ go_library(
|
||||
"doc.go",
|
||||
"register.go",
|
||||
"types.go",
|
||||
"well_known_labels.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/api",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package algorithm
|
||||
package api
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
@ -13,7 +13,7 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -24,7 +24,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
],
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
// PluginName indicates name of admission plugin.
|
||||
@ -40,14 +40,14 @@ var (
|
||||
" that is added by default to every pod that does not already have such a toleration.")
|
||||
|
||||
notReadyToleration = api.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: defaultNotReadyTolerationSeconds,
|
||||
}
|
||||
|
||||
unreachableToleration = api.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: defaultUnreachableTolerationSeconds,
|
||||
@ -101,12 +101,12 @@ func (p *Plugin) Admit(attributes admission.Attributes) (err error) {
|
||||
toleratesNodeNotReady := false
|
||||
toleratesNodeUnreachable := false
|
||||
for _, toleration := range tolerations {
|
||||
if (toleration.Key == algorithm.TaintNodeNotReady || len(toleration.Key) == 0) &&
|
||||
if (toleration.Key == schedulerapi.TaintNodeNotReady || len(toleration.Key) == 0) &&
|
||||
(toleration.Effect == api.TaintEffectNoExecute || len(toleration.Effect) == 0) {
|
||||
toleratesNodeNotReady = true
|
||||
}
|
||||
|
||||
if (toleration.Key == algorithm.TaintNodeUnreachable || len(toleration.Key) == 0) &&
|
||||
if (toleration.Key == schedulerapi.TaintNodeUnreachable || len(toleration.Key) == 0) &&
|
||||
(toleration.Effect == api.TaintEffectNoExecute || len(toleration.Effect) == 0) {
|
||||
toleratesNodeUnreachable = true
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
func TestForgivenessAdmission(t *testing.T) {
|
||||
@ -48,13 +48,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -70,13 +70,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.DeprecatedTaintNodeNotReady,
|
||||
Key: schedulerapi.DeprecatedTaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: algorithm.DeprecatedTaintNodeUnreachable,
|
||||
Key: schedulerapi.DeprecatedTaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -88,25 +88,25 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.DeprecatedTaintNodeNotReady,
|
||||
Key: schedulerapi.DeprecatedTaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: algorithm.DeprecatedTaintNodeUnreachable,
|
||||
Key: schedulerapi.DeprecatedTaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -122,7 +122,7 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.DeprecatedTaintNodeNotReady,
|
||||
Key: schedulerapi.DeprecatedTaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -134,19 +134,19 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.DeprecatedTaintNodeNotReady,
|
||||
Key: schedulerapi.DeprecatedTaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -162,7 +162,7 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.DeprecatedTaintNodeUnreachable,
|
||||
Key: schedulerapi.DeprecatedTaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -174,19 +174,19 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.DeprecatedTaintNodeUnreachable,
|
||||
Key: schedulerapi.DeprecatedTaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -221,13 +221,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -242,7 +242,7 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -254,13 +254,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -275,7 +275,7 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -287,13 +287,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultTolerationSeconds,
|
||||
@ -308,13 +308,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -326,13 +326,13 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
@ -347,7 +347,7 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
@ -358,12 +358,12 @@ func TestForgivenessAdmission(t *testing.T) {
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
TolerationSeconds: genTolerationSeconds(700),
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: genTolerationSeconds(300),
|
||||
|
@ -16,7 +16,7 @@ go_test(
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/util/tolerations:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
@ -42,7 +42,7 @@ go_library(
|
||||
"//pkg/client/listers/core/internalversion:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/kubeapiserver/admission/util:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/util/tolerations:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/install:go_default_library",
|
||||
|
@ -35,7 +35,7 @@ import (
|
||||
corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion"
|
||||
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
"k8s.io/kubernetes/pkg/kubeapiserver/admission/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/util/tolerations"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
)
|
||||
@ -129,7 +129,7 @@ func (p *podTolerationsPlugin) Admit(a admission.Attributes) error {
|
||||
if qoshelper.GetPodQOS(pod) != api.PodQOSBestEffort {
|
||||
finalTolerations = tolerations.MergeTolerations(finalTolerations, []api.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoSchedule,
|
||||
},
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/util/tolerations"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
)
|
||||
@ -194,7 +194,7 @@ func TestPodAdmission(t *testing.T) {
|
||||
whitelist: []api.Toleration{},
|
||||
podTolerations: []api.Toleration{},
|
||||
mergedTolerations: []api.Toleration{
|
||||
{Key: algorithm.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
||||
{Key: schedulerapi.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
||||
{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil},
|
||||
},
|
||||
admit: true,
|
||||
@ -207,7 +207,7 @@ func TestPodAdmission(t *testing.T) {
|
||||
whitelist: []api.Toleration{},
|
||||
podTolerations: []api.Toleration{},
|
||||
mergedTolerations: []api.Toleration{
|
||||
{Key: algorithm.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
||||
{Key: schedulerapi.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
||||
{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil},
|
||||
},
|
||||
admit: true,
|
||||
|
@ -20,8 +20,8 @@ go_test(
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
|
@ -47,9 +47,9 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@ -630,7 +630,7 @@ func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1"},
|
||||
},
|
||||
@ -1059,7 +1059,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
node.Spec.Unschedulable = true
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
@ -1077,7 +1077,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
}
|
||||
nodeNU.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ go_test(
|
||||
],
|
||||
deps = [
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//plugin/pkg/admission/defaulttolerationseconds:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@ -64,14 +64,14 @@ func TestAdmission(t *testing.T) {
|
||||
|
||||
var defaultSeconds int64 = 300
|
||||
nodeNotReady := v1.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultSeconds,
|
||||
}
|
||||
|
||||
nodeUnreachable := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultSeconds,
|
||||
|
@ -33,8 +33,8 @@ import (
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
)
|
||||
@ -144,49 +144,49 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
}
|
||||
|
||||
notReadyToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unreachableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unschedulableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
outOfDiskToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
Key: schedulerapi.TaintNodeOutOfDisk,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
memoryPressureToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
diskPressureToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
networkUnavailableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
pidPressureToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodePIDPressure,
|
||||
Key: schedulerapi.TaintNodePIDPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
@ -220,7 +220,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -248,7 +248,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
name: "unreachable node",
|
||||
existingTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -260,7 +260,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -295,7 +295,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -333,7 +333,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
Key: schedulerapi.TaintNodeOutOfDisk,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -377,7 +377,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -422,7 +422,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -466,7 +466,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -506,11 +506,11 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -558,7 +558,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodePIDPressure,
|
||||
Key: schedulerapi.TaintNodePIDPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
@ -604,15 +604,15 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodePIDPressure,
|
||||
Key: schedulerapi.TaintNodePIDPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user