Cleanup obsolete NodeInfo methods

This commit is contained in:
Abdullah Gharaibeh 2020-04-10 13:49:39 -04:00
parent 3641d40a98
commit bed9b2f23b
29 changed files with 78 additions and 229 deletions

View File

@ -20,7 +20,6 @@ go_library(
"//pkg/controller:go_default_library", "//pkg/controller:go_default_library",
"//pkg/controller/daemon/util:go_default_library", "//pkg/controller/daemon/util:go_default_library",
"//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/labels:go_default_library", "//pkg/util/labels:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",

View File

@ -53,7 +53,6 @@ import (
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/daemon/util" "k8s.io/kubernetes/pkg/controller/daemon/util"
pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/utils/integer" "k8s.io/utils/integer"
) )
@ -1200,14 +1199,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
return false, false, nil return false, false, nil
} }
nodeInfo := schedulernodeinfo.NewNodeInfo() taints := node.Spec.Taints
nodeInfo.SetNode(node)
taints, err := nodeInfo.Taints()
if err != nil {
klog.Warningf("failed to get node %q taints: %v", node.Name, err)
return false, false, err
}
fitsNodeName, fitsNodeAffinity, fitsTaints := Predicates(pod, node, taints) fitsNodeName, fitsNodeAffinity, fitsTaints := Predicates(pod, node, taints)
if !fitsNodeName || !fitsNodeAffinity { if !fitsNodeName || !fitsNodeAffinity {
return false, false, nil return false, false, nil

View File

@ -213,7 +213,7 @@ go_test(
"//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/queue:go_default_library",
"//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library",
"//pkg/kubelet/volumemanager:go_default_library", "//pkg/kubelet/volumemanager:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/util/taints:go_default_library", "//pkg/util/taints:go_default_library",
"//pkg/volume:go_default_library", "//pkg/volume:go_default_library",
"//pkg/volume/awsebs:go_default_library", "//pkg/volume/awsebs:go_default_library",

View File

@ -36,7 +36,7 @@ go_library(
"//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/pluginmanager/cache:go_default_library", "//pkg/kubelet/pluginmanager/cache:go_default_library",
"//pkg/kubelet/status:go_default_library", "//pkg/kubelet/status:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",

View File

@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"fmt" "fmt"
"strconv" "strconv"
@ -91,7 +91,7 @@ type ContainerManager interface {
// Otherwise, it updates allocatableResource in nodeInfo if necessary, // Otherwise, it updates allocatableResource in nodeInfo if necessary,
// to make sure it is at least equal to the pod's requested capacity for // to make sure it is at least equal to the pod's requested capacity for
// any registered device plugin resource // any registered device plugin resource
UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error
InternalContainerLifecycle() InternalContainerLifecycle InternalContainerLifecycle() InternalContainerLifecycle

View File

@ -62,7 +62,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs" "k8s.io/kubernetes/pkg/util/procfs"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
@ -675,7 +675,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe
return opts, nil return opts, nil
} }
func (cm *containerManagerImpl) UpdatePluginResources(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { func (cm *containerManagerImpl) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
return cm.deviceManager.UpdatePluginResources(node, attrs) return cm.deviceManager.UpdatePluginResources(node, attrs)
} }

View File

@ -30,7 +30,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
type containerManagerStub struct { type containerManagerStub struct {
@ -97,7 +97,7 @@ func (cm *containerManagerStub) GetResources(pod *v1.Pod, container *v1.Containe
return &kubecontainer.RunContainerOptions{}, nil return &kubecontainer.RunContainerOptions{}, nil
} }
func (cm *containerManagerStub) UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error { func (cm *containerManagerStub) UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error {
return nil return nil
} }

View File

@ -42,7 +42,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
type containerManagerImpl struct { type containerManagerImpl struct {
@ -165,7 +165,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe
return &kubecontainer.RunContainerOptions{}, nil return &kubecontainer.RunContainerOptions{}, nil
} }
func (cm *containerManagerImpl) UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error { func (cm *containerManagerImpl) UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error {
return nil return nil
} }

View File

@ -28,7 +28,7 @@ go_library(
"//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/pluginmanager/cache:go_default_library", "//pkg/kubelet/pluginmanager/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/util/selinux:go_default_library", "//pkg/util/selinux:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
@ -57,7 +57,7 @@ go_test(
"//pkg/kubelet/config:go_default_library", "//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/pluginmanager:go_default_library", "//pkg/kubelet/pluginmanager:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -48,7 +48,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/util/selinux" "k8s.io/kubernetes/pkg/util/selinux"
) )
@ -391,7 +391,7 @@ func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error {
} }
// UpdatePluginResources updates node resources based on devices already allocated to pods. // UpdatePluginResources updates node resources based on devices already allocated to pods.
func (m *ManagerImpl) UpdatePluginResources(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { func (m *ManagerImpl) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
pod := attrs.Pod pod := attrs.Pod
m.mutex.Lock() m.mutex.Lock()
@ -924,9 +924,9 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s
// and if necessary, updates allocatableResource in nodeInfo to at least equal to // and if necessary, updates allocatableResource in nodeInfo to at least equal to
// the allocated capacity. This allows pods that have already been scheduled on // the allocated capacity. This allows pods that have already been scheduled on
// the node to pass GeneralPredicates admission checking even upon device plugin failure. // the node to pass GeneralPredicates admission checking even upon device plugin failure.
func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulernodeinfo.NodeInfo) { func (m *ManagerImpl) sanitizeNodeAllocatable(node *framework.NodeInfo) {
var newAllocatableResource *schedulernodeinfo.Resource var newAllocatableResource *framework.Resource
allocatableResource := node.AllocatableResource() allocatableResource := node.Allocatable
if allocatableResource.ScalarResources == nil { if allocatableResource.ScalarResources == nil {
allocatableResource.ScalarResources = make(map[v1.ResourceName]int64) allocatableResource.ScalarResources = make(map[v1.ResourceName]int64)
} }
@ -944,7 +944,7 @@ func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulernodeinfo.NodeInfo)
newAllocatableResource.ScalarResources[v1.ResourceName(resource)] = int64(needed) newAllocatableResource.ScalarResources[v1.ResourceName(resource)] = int64(needed)
} }
if newAllocatableResource != nil { if newAllocatableResource != nil {
node.SetAllocatableResource(newAllocatableResource) node.Allocatable = newAllocatableResource
} }
} }

View File

@ -23,7 +23,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
// ManagerStub provides a simple stub implementation for the Device Manager. // ManagerStub provides a simple stub implementation for the Device Manager.
@ -50,7 +50,7 @@ func (h *ManagerStub) Allocate(pod *v1.Pod, container *v1.Container) error {
} }
// UpdatePluginResources simply returns nil. // UpdatePluginResources simply returns nil.
func (h *ManagerStub) UpdatePluginResources(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { func (h *ManagerStub) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
return nil return nil
} }

View File

@ -42,7 +42,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager" "k8s.io/kubernetes/pkg/kubelet/pluginmanager"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
const ( const (
@ -889,12 +889,12 @@ func TestUpdatePluginResources(t *testing.T) {
}, },
}, },
} }
nodeInfo := &schedulernodeinfo.NodeInfo{} nodeInfo := &framework.NodeInfo{}
nodeInfo.SetNode(cachedNode) nodeInfo.SetNode(cachedNode)
testManager.UpdatePluginResources(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod}) testManager.UpdatePluginResources(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod})
allocatableScalarResources := nodeInfo.AllocatableResource().ScalarResources allocatableScalarResources := nodeInfo.Allocatable.ScalarResources
// allocatable in nodeInfo is less than needed, should update // allocatable in nodeInfo is less than needed, should update
as.Equal(1, int(allocatableScalarResources[v1.ResourceName(resourceName1)])) as.Equal(1, int(allocatableScalarResources[v1.ResourceName(resourceName1)]))
// allocatable in nodeInfo is more than needed, should skip updating // allocatable in nodeInfo is more than needed, should skip updating

View File

@ -26,7 +26,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
// Manager manages all the Device Plugins running on a node. // Manager manages all the Device Plugins running on a node.
@ -44,7 +44,7 @@ type Manager interface {
// UpdatePluginResources updates node resources based on devices already // UpdatePluginResources updates node resources based on devices already
// allocated to pods. The node object is provided for the device manager to // allocated to pods. The node object is provided for the device manager to
// update the node capacity to reflect the currently available devices. // update the node capacity to reflect the currently available devices.
UpdatePluginResources(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error
// Stop stops the manager. // Stop stops the manager.
Stop() error Stop() error

View File

@ -67,7 +67,7 @@ import (
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/util/queue"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/awsebs" "k8s.io/kubernetes/pkg/volume/awsebs"
"k8s.io/kubernetes/pkg/volume/azure_dd" "k8s.io/kubernetes/pkg/volume/azure_dd"
@ -658,7 +658,7 @@ func TestHandlePluginResources(t *testing.T) {
} }
kl.nodeLister = testNodeLister{nodes: nodes} kl.nodeLister = testNodeLister{nodes: nodes}
updatePluginResourcesFunc := func(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { updatePluginResourcesFunc := func(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
// Maps from resourceName to the value we use to set node.allocatableResource[resourceName]. // Maps from resourceName to the value we use to set node.allocatableResource[resourceName].
// A resource with invalid value (< 0) causes the function to return an error // A resource with invalid value (< 0) causes the function to return an error
// to emulate resource Allocation failure. // to emulate resource Allocation failure.
@ -670,8 +670,7 @@ func TestHandlePluginResources(t *testing.T) {
failedResource: resourceQuantityInvalid, failedResource: resourceQuantityInvalid,
} }
pod := attrs.Pod pod := attrs.Pod
allocatableResource := node.AllocatableResource() newAllocatableResource := node.Allocatable.Clone()
newAllocatableResource := allocatableResource.Clone()
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
for resource := range container.Resources.Requests { for resource := range container.Resources.Requests {
newQuantity, exist := updateResourceMap[resource] newQuantity, exist := updateResourceMap[resource]
@ -684,7 +683,7 @@ func TestHandlePluginResources(t *testing.T) {
newAllocatableResource.ScalarResources[resource] = newQuantity.Value() newAllocatableResource.ScalarResources[resource] = newQuantity.Value()
} }
} }
node.SetAllocatableResource(newAllocatableResource) node.Allocatable = newAllocatableResource
return nil return nil
} }

View File

@ -26,7 +26,7 @@ go_library(
"//pkg/scheduler/framework/plugins/nodename:go_default_library", "//pkg/scheduler/framework/plugins/nodename:go_default_library",
"//pkg/scheduler/framework/plugins/nodeports:go_default_library", "//pkg/scheduler/framework/plugins/nodeports:go_default_library",
"//pkg/scheduler/framework/plugins/noderesources:go_default_library", "//pkg/scheduler/framework/plugins/noderesources:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/security/apparmor:go_default_library", "//pkg/security/apparmor:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
@ -49,7 +49,7 @@ go_test(
"//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/util/format:go_default_library",
"//pkg/scheduler/framework/plugins/nodename:go_default_library", "//pkg/scheduler/framework/plugins/nodename:go_default_library",
"//pkg/scheduler/framework/plugins/nodeports:go_default_library", "//pkg/scheduler/framework/plugins/nodeports:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -19,22 +19,21 @@ package lifecycle
import ( import (
"fmt" "fmt"
"k8s.io/api/core/v1"
"k8s.io/klog" "k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/kubelet/util/format"
pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/api/core/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/kubelet/util/format"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
type getNodeAnyWayFuncType func() (*v1.Node, error) type getNodeAnyWayFuncType func() (*v1.Node, error)
type pluginResourceUpdateFuncType func(*schedulernodeinfo.NodeInfo, *PodAdmitAttributes) error type pluginResourceUpdateFuncType func(*framework.NodeInfo, *PodAdmitAttributes) error
// AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod. // AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod.
// This allows for the graceful handling of pod admission failure. // This allows for the graceful handling of pod admission failure.
@ -70,7 +69,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
} }
admitPod := attrs.Pod admitPod := attrs.Pod
pods := attrs.OtherPods pods := attrs.OtherPods
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...) nodeInfo := framework.NewNodeInfo(pods...)
nodeInfo.SetNode(node) nodeInfo.SetNode(node)
// ensure the node has enough plugin resources for that required in pods // ensure the node has enough plugin resources for that required in pods
if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil { if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil {
@ -156,7 +155,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
} }
} }
func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *v1.Pod { func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *framework.NodeInfo) *v1.Pod {
podCopy := pod.DeepCopy() podCopy := pod.DeepCopy()
for i, c := range pod.Spec.Containers { for i, c := range pod.Spec.Containers {
// We only handle requests in Requests but not Limits because the // We only handle requests in Requests but not Limits because the
@ -165,7 +164,7 @@ func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulernodeinfo.Nod
podCopy.Spec.Containers[i].Resources.Requests = make(v1.ResourceList) podCopy.Spec.Containers[i].Resources.Requests = make(v1.ResourceList)
for rName, rQuant := range c.Resources.Requests { for rName, rQuant := range c.Resources.Requests {
if v1helper.IsExtendedResourceName(rName) { if v1helper.IsExtendedResourceName(rName) {
if _, found := nodeInfo.AllocatableResource().ScalarResources[rName]; !found { if _, found := nodeInfo.Allocatable.ScalarResources[rName]; !found {
continue continue
} }
} }
@ -220,7 +219,7 @@ func (e *PredicateFailureError) GetReason() string {
} }
// GeneralPredicates checks a group of predicates that the kubelet cares about. // GeneralPredicates checks a group of predicates that the kubelet cares about.
func GeneralPredicates(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) ([]PredicateFailureReason, error) { func GeneralPredicates(pod *v1.Pod, nodeInfo *framework.NodeInfo) ([]PredicateFailureReason, error) {
if nodeInfo.Node() == nil { if nodeInfo.Node() == nil {
return nil, fmt.Errorf("node not found") return nil, fmt.Errorf("node not found")
} }

View File

@ -26,7 +26,7 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
var ( var (
@ -83,7 +83,7 @@ func TestRemoveMissingExtendedResources(t *testing.T) {
), ),
}, },
} { } {
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
pod := removeMissingExtendedResources(test.pod, nodeInfo) pod := removeMissingExtendedResources(test.pod, nodeInfo)
if !reflect.DeepEqual(pod, test.expectedPod) { if !reflect.DeepEqual(pod, test.expectedPod) {
@ -144,7 +144,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
} }
} }
func newResourcePod(usage ...schedulernodeinfo.Resource) *v1.Pod { func newResourcePod(usage ...framework.Resource) *v1.Pod {
containers := []v1.Container{} containers := []v1.Container{}
for _, req := range usage { for _, req := range usage {
containers = append(containers, v1.Container{ containers = append(containers, v1.Container{
@ -177,7 +177,7 @@ func newPodWithPort(hostPorts ...int) *v1.Pod {
func TestGeneralPredicates(t *testing.T) { func TestGeneralPredicates(t *testing.T) {
resourceTests := []struct { resourceTests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *framework.NodeInfo
node *v1.Node node *v1.Node
fits bool fits bool
name string name string
@ -186,8 +186,8 @@ func TestGeneralPredicates(t *testing.T) {
}{ }{
{ {
pod: &v1.Pod{}, pod: &v1.Pod{},
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: framework.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@ -197,9 +197,9 @@ func TestGeneralPredicates(t *testing.T) {
name: "no resources/port/host requested always fits", name: "no resources/port/host requested always fits",
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 10}), pod: newResourcePod(framework.Resource{MilliCPU: 8, Memory: 10}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: framework.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@ -218,7 +218,7 @@ func TestGeneralPredicates(t *testing.T) {
NodeName: "machine2", NodeName: "machine2",
}, },
}, },
nodeInfo: schedulernodeinfo.NewNodeInfo(), nodeInfo: framework.NewNodeInfo(),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@ -230,7 +230,7 @@ func TestGeneralPredicates(t *testing.T) {
}, },
{ {
pod: newPodWithPort(123), pod: newPodWithPort(123),
nodeInfo: schedulernodeinfo.NewNodeInfo(newPodWithPort(123)), nodeInfo: framework.NewNodeInfo(newPodWithPort(123)),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},

View File

@ -126,7 +126,6 @@ filegroup(
"//pkg/scheduler/internal/parallelize:all-srcs", "//pkg/scheduler/internal/parallelize:all-srcs",
"//pkg/scheduler/internal/queue:all-srcs", "//pkg/scheduler/internal/queue:all-srcs",
"//pkg/scheduler/metrics:all-srcs", "//pkg/scheduler/metrics:all-srcs",
"//pkg/scheduler/nodeinfo:all-srcs",
"//pkg/scheduler/profile:all-srcs", "//pkg/scheduler/profile:all-srcs",
"//pkg/scheduler/testing:all-srcs", "//pkg/scheduler/testing:all-srcs",
"//pkg/scheduler/util:all-srcs", "//pkg/scheduler/util:all-srcs",

View File

@ -56,17 +56,12 @@ func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleSta
return framework.NewStatus(framework.Error, "invalid nodeInfo") return framework.NewStatus(framework.Error, "invalid nodeInfo")
} }
taints, err := nodeInfo.Taints()
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
}
filterPredicate := func(t *v1.Taint) bool { filterPredicate := func(t *v1.Taint) bool {
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints. // PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
} }
taint, isUntolerated := v1helper.FindMatchingUntoleratedTaint(taints, pod.Spec.Tolerations, filterPredicate) taint, isUntolerated := v1helper.FindMatchingUntoleratedTaint(nodeInfo.Node().Spec.Taints, pod.Spec.Tolerations, filterPredicate)
if !isUntolerated { if !isUntolerated {
return nil return nil
} }

View File

@ -32,10 +32,7 @@ import (
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
var ( var generation int64
emptyResource = Resource{}
generation int64
)
// PodInfo is a wrapper to a Pod with additional information for purposes such as tracking // PodInfo is a wrapper to a Pod with additional information for purposes such as tracking
// the timestamp when it's added to the queue or recording per-pod metrics. // the timestamp when it's added to the queue or recording per-pod metrics.
@ -319,31 +316,6 @@ func (n *NodeInfo) Node() *v1.Node {
return n.node return n.node
} }
// Taints returns the taints list on this node.
// TODO(#89528): Exists only because of kubelet dependency, remove.
func (n *NodeInfo) Taints() ([]v1.Taint, error) {
if n == nil || n.node.Spec.Taints == nil {
return nil, nil
}
return n.node.Spec.Taints, nil
}
// AllocatableResource returns allocatable resources on a given node.
// TODO(#89528): Exists only because of kubelet dependency, remove.
func (n *NodeInfo) AllocatableResource() Resource {
if n == nil {
return emptyResource
}
return *n.Allocatable
}
// SetAllocatableResource sets the allocatableResource information of given node.
// TODO(#89528): Exists only because of kubelet dependency, remove.
func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) {
n.Allocatable = allocatableResource
n.Generation = nextGeneration()
}
// Clone returns a copy of this node. // Clone returns a copy of this node.
func (n *NodeInfo) Clone() *NodeInfo { func (n *NodeInfo) Clone() *NodeInfo {
clone := &NodeInfo{ clone := &NodeInfo{

View File

@ -1,26 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["node_info.go"],
importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo",
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,43 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeinfo
import (
v1 "k8s.io/api/core/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// TODO(#89528): This file defines temporary aliases of types used by kubelet.
// Those will be removed and the underlying types defined in scheduler/types will be used directly.
// NodeInfo is node level aggregated information.
type NodeInfo = framework.NodeInfo
// Resource is a collection of compute resource.
type Resource = framework.Resource
// NewResource creates a Resource from ResourceList
func NewResource(rl v1.ResourceList) *Resource {
return framework.NewResource(rl)
}
// NewNodeInfo returns a ready to use empty NodeInfo object.
// If any pods are given in arguments, their information will be aggregated in
// the returned object.
func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
return framework.NewNodeInfo(pods...)
}

View File

@ -1,9 +1,4 @@
package(default_visibility = ["//visibility:public"]) load("@io_bazel_rules_go//go:def.bzl", "go_library")
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library( go_library(
name = "go_default_library", name = "go_default_library",
@ -23,6 +18,7 @@ go_library(
"wait.go", "wait.go",
], ],
importpath = "k8s.io/kubernetes/test/e2e/apps", importpath = "k8s.io/kubernetes/test/e2e/apps",
visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/api/v1/pod:go_default_library", "//pkg/api/v1/pod:go_default_library",
"//pkg/apis/apps:go_default_library", "//pkg/apis/apps:go_default_library",
@ -37,7 +33,6 @@ go_library(
"//pkg/controller/replication:go_default_library", "//pkg/controller/replication:go_default_library",
"//pkg/master/ports:go_default_library", "//pkg/master/ports:go_default_library",
"//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library",
@ -102,4 +97,5 @@ filegroup(
name = "all-srcs", name = "all-srcs",
srcs = [":package-srcs"], srcs = [":package-srcs"],
tags = ["automanaged"], tags = ["automanaged"],
visibility = ["//visibility:public"],
) )

View File

@ -36,7 +36,6 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/daemon"
schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
@ -688,14 +687,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node // canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool { func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool {
newPod := daemon.NewPod(ds, node.Name) newPod := daemon.NewPod(ds, node.Name)
nodeInfo := schedfwk.NewNodeInfo() fitsNodeName, fitsNodeAffinity, fitsTaints := daemon.Predicates(newPod, &node, node.Spec.Taints)
nodeInfo.SetNode(&node)
taints, err := nodeInfo.Taints()
if err != nil {
framework.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err)
return false
}
fitsNodeName, fitsNodeAffinity, fitsTaints := daemon.Predicates(newPod, &node, taints)
return fitsNodeName && fitsNodeAffinity && fitsTaints return fitsNodeName && fitsNodeAffinity && fitsTaints
} }

View File

@ -188,9 +188,7 @@
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources", "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources",
"k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1", "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1",
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize", "k8s.io/kubernetes/pkg/scheduler/internal/parallelize",
"k8s.io/kubernetes/pkg/scheduler/listers",
"k8s.io/kubernetes/pkg/scheduler/metrics", "k8s.io/kubernetes/pkg/scheduler/metrics",
"k8s.io/kubernetes/pkg/scheduler/nodeinfo",
"k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/scheduler/util",
"k8s.io/kubernetes/pkg/scheduler/volumebinder", "k8s.io/kubernetes/pkg/scheduler/volumebinder",
"k8s.io/kubernetes/pkg/security/apparmor", "k8s.io/kubernetes/pkg/security/apparmor",

View File

@ -11,7 +11,6 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/controller:go_default_library", "//pkg/controller:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
@ -29,20 +28,6 @@ go_library(
], ],
) )
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test( go_test(
name = "go_default_test", name = "go_default_test",
srcs = ["wait_test.go"], srcs = ["wait_test.go"],
@ -56,3 +41,17 @@ go_test(
"//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library",
], ],
) )
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/system" "k8s.io/kubernetes/test/e2e/system"
) )
@ -393,8 +392,6 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
}, },
} }
nodeInfo := schedfwk.NewNodeInfo()
// Simple lookup for nonblocking taints based on comma-delimited list. // Simple lookup for nonblocking taints based on comma-delimited list.
nonblockingTaintsMap := map[string]struct{}{} nonblockingTaintsMap := map[string]struct{}{}
for _, t := range strings.Split(nonblockingTaints, ",") { for _, t := range strings.Split(nonblockingTaints, ",") {
@ -403,6 +400,7 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
} }
} }
n := node
if len(nonblockingTaintsMap) > 0 { if len(nonblockingTaintsMap) > 0 {
nodeCopy := node.DeepCopy() nodeCopy := node.DeepCopy()
nodeCopy.Spec.Taints = []v1.Taint{} nodeCopy.Spec.Taints = []v1.Taint{}
@ -411,18 +409,9 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, v) nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, v)
} }
} }
nodeInfo.SetNode(nodeCopy) n = nodeCopy
} else {
nodeInfo.SetNode(node)
} }
return toleratesTaintsWithNoScheduleNoExecuteEffects(n.Spec.Taints, fakePod.Spec.Tolerations)
taints, err := nodeInfo.Taints()
if err != nil {
e2elog.Failf("Can't test predicates for node %s: %v", node.Name, err)
return false
}
return toleratesTaintsWithNoScheduleNoExecuteEffects(taints, fakePod.Spec.Tolerations)
} }
func toleratesTaintsWithNoScheduleNoExecuteEffects(taints []v1.Taint, tolerations []v1.Toleration) bool { func toleratesTaintsWithNoScheduleNoExecuteEffects(taints []v1.Taint, tolerations []v1.Toleration) bool {

View File

@ -44,7 +44,6 @@ go_library(
"//pkg/kubeapiserver:go_default_library", "//pkg/kubeapiserver:go_default_library",
"//pkg/kubelet/client:go_default_library", "//pkg/kubelet/client:go_default_library",
"//pkg/master:go_default_library", "//pkg/master:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/util/env:go_default_library", "//pkg/util/env:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View File

@ -34,7 +34,6 @@ import (
"k8s.io/klog" "k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle" nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
@ -250,8 +249,6 @@ func isNodeUntainted(node *v1.Node) bool {
}, },
} }
nodeInfo := schedfwk.NewNodeInfo()
// Simple lookup for nonblocking taints based on comma-delimited list. // Simple lookup for nonblocking taints based on comma-delimited list.
nonblockingTaintsMap := map[string]struct{}{} nonblockingTaintsMap := map[string]struct{}{}
for _, t := range strings.Split(nonblockingTaints, ",") { for _, t := range strings.Split(nonblockingTaints, ",") {
@ -260,6 +257,7 @@ func isNodeUntainted(node *v1.Node) bool {
} }
} }
n := node
if len(nonblockingTaintsMap) > 0 { if len(nonblockingTaintsMap) > 0 {
nodeCopy := node.DeepCopy() nodeCopy := node.DeepCopy()
nodeCopy.Spec.Taints = []v1.Taint{} nodeCopy.Spec.Taints = []v1.Taint{}
@ -268,18 +266,10 @@ func isNodeUntainted(node *v1.Node) bool {
nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, v) nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, v)
} }
} }
nodeInfo.SetNode(nodeCopy) n = nodeCopy
} else {
nodeInfo.SetNode(node)
} }
taints, err := nodeInfo.Taints() return v1helper.TolerationsTolerateTaintsWithFilter(fakePod.Spec.Tolerations, n.Spec.Taints, func(t *v1.Taint) bool {
if err != nil {
klog.Fatalf("Can't test predicates for node %s: %v", node.Name, err)
return false
}
return v1helper.TolerationsTolerateTaintsWithFilter(fakePod.Spec.Tolerations, taints, func(t *v1.Taint) bool {
return t.Effect == v1.TaintEffectNoExecute || t.Effect == v1.TaintEffectNoSchedule return t.Effect == v1.TaintEffectNoExecute || t.Effect == v1.TaintEffectNoSchedule
}) })
} }