diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index 3ac43ce7b4c..7aedb92226b 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -20,7 +20,6 @@ go_library( "//pkg/controller:go_default_library", "//pkg/controller/daemon/util:go_default_library", "//pkg/scheduler/framework/plugins/helper:go_default_library", - "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/util/labels:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index c8b4bfdc8b2..e105d207d2c 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -53,7 +53,6 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/daemon/util" pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/utils/integer" ) @@ -1200,14 +1199,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps. return false, false, nil } - nodeInfo := schedulernodeinfo.NewNodeInfo() - nodeInfo.SetNode(node) - taints, err := nodeInfo.Taints() - if err != nil { - klog.Warningf("failed to get node %q taints: %v", node.Name, err) - return false, false, err - } - + taints := node.Spec.Taints fitsNodeName, fitsNodeAffinity, fitsTaints := Predicates(pod, node, taints) if !fitsNodeName || !fitsNodeAffinity { return false, false, nil diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index 64106eac63b..ffe1b4dda69 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -213,7 +213,7 @@ go_test( "//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/volumemanager:go_default_library", - "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/util/taints:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/awsebs:go_default_library", diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index 1ad243e0b3a..1487c4398bd 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -36,7 +36,7 @@ go_library( "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/pluginmanager/cache:go_default_library", "//pkg/kubelet/status:go_default_library", - "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index 8e60fab396d..fb0762d3044 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/status" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "fmt" "strconv" @@ -91,7 +91,7 @@ type ContainerManager interface { // Otherwise, it updates allocatableResource in nodeInfo if necessary, // to make sure it is at least equal to the pod's requested capacity for // any registered device plugin resource - UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error + UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error InternalContainerLifecycle() InternalContainerLifecycle diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index d6af9835883..9fc83fd9651 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -62,7 +62,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" "k8s.io/kubernetes/pkg/kubelet/status" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/procfs" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" @@ -675,7 +675,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe return opts, nil } -func (cm *containerManagerImpl) UpdatePluginResources(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { +func (cm *containerManagerImpl) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { return cm.deviceManager.UpdatePluginResources(node, attrs) } diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index 3ed3d264052..201fb0442ef 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -30,7 +30,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/status" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) type containerManagerStub struct { @@ -97,7 +97,7 @@ func (cm *containerManagerStub) GetResources(pod *v1.Pod, container *v1.Containe return &kubecontainer.RunContainerOptions{}, nil } -func (cm *containerManagerStub) UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error { +func (cm *containerManagerStub) UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error { return nil } diff --git a/pkg/kubelet/cm/container_manager_windows.go b/pkg/kubelet/cm/container_manager_windows.go index 50e25ca4b58..b4b93f7a7b5 100644 --- a/pkg/kubelet/cm/container_manager_windows.go +++ b/pkg/kubelet/cm/container_manager_windows.go @@ -42,7 +42,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/status" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) type containerManagerImpl struct { @@ -165,7 +165,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe return &kubecontainer.RunContainerOptions{}, nil } -func (cm *containerManagerImpl) UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error { +func (cm *containerManagerImpl) UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error { return nil } diff --git a/pkg/kubelet/cm/devicemanager/BUILD b/pkg/kubelet/cm/devicemanager/BUILD index 3299f7f4e9a..bd773258211 100644 --- a/pkg/kubelet/cm/devicemanager/BUILD +++ b/pkg/kubelet/cm/devicemanager/BUILD @@ -28,7 +28,7 @@ go_library( "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/pluginmanager/cache:go_default_library", - "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/util/selinux:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -57,7 +57,7 @@ go_test( "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/pluginmanager:go_default_library", - "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go index 65b91393ae4..81189f3d3fc 100644 --- a/pkg/kubelet/cm/devicemanager/manager.go +++ b/pkg/kubelet/cm/devicemanager/manager.go @@ -48,7 +48,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/util/selinux" ) @@ -391,7 +391,7 @@ func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error { } // UpdatePluginResources updates node resources based on devices already allocated to pods. -func (m *ManagerImpl) UpdatePluginResources(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { +func (m *ManagerImpl) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { pod := attrs.Pod m.mutex.Lock() @@ -924,9 +924,9 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s // and if necessary, updates allocatableResource in nodeInfo to at least equal to // the allocated capacity. This allows pods that have already been scheduled on // the node to pass GeneralPredicates admission checking even upon device plugin failure. -func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulernodeinfo.NodeInfo) { - var newAllocatableResource *schedulernodeinfo.Resource - allocatableResource := node.AllocatableResource() +func (m *ManagerImpl) sanitizeNodeAllocatable(node *framework.NodeInfo) { + var newAllocatableResource *framework.Resource + allocatableResource := node.Allocatable if allocatableResource.ScalarResources == nil { allocatableResource.ScalarResources = make(map[v1.ResourceName]int64) } @@ -944,7 +944,7 @@ func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulernodeinfo.NodeInfo) newAllocatableResource.ScalarResources[v1.ResourceName(resource)] = int64(needed) } if newAllocatableResource != nil { - node.SetAllocatableResource(newAllocatableResource) + node.Allocatable = newAllocatableResource } } diff --git a/pkg/kubelet/cm/devicemanager/manager_stub.go b/pkg/kubelet/cm/devicemanager/manager_stub.go index ed6fb41e58e..6b8e4284437 100644 --- a/pkg/kubelet/cm/devicemanager/manager_stub.go +++ b/pkg/kubelet/cm/devicemanager/manager_stub.go @@ -23,7 +23,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) // ManagerStub provides a simple stub implementation for the Device Manager. @@ -50,7 +50,7 @@ func (h *ManagerStub) Allocate(pod *v1.Pod, container *v1.Container) error { } // UpdatePluginResources simply returns nil. -func (h *ManagerStub) UpdatePluginResources(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { +func (h *ManagerStub) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { return nil } diff --git a/pkg/kubelet/cm/devicemanager/manager_test.go b/pkg/kubelet/cm/devicemanager/manager_test.go index cefd25937a0..6fc985dfaa5 100644 --- a/pkg/kubelet/cm/devicemanager/manager_test.go +++ b/pkg/kubelet/cm/devicemanager/manager_test.go @@ -42,7 +42,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/pluginmanager" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) const ( @@ -889,12 +889,12 @@ func TestUpdatePluginResources(t *testing.T) { }, }, } - nodeInfo := &schedulernodeinfo.NodeInfo{} + nodeInfo := &framework.NodeInfo{} nodeInfo.SetNode(cachedNode) testManager.UpdatePluginResources(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod}) - allocatableScalarResources := nodeInfo.AllocatableResource().ScalarResources + allocatableScalarResources := nodeInfo.Allocatable.ScalarResources // allocatable in nodeInfo is less than needed, should update as.Equal(1, int(allocatableScalarResources[v1.ResourceName(resourceName1)])) // allocatable in nodeInfo is more than needed, should skip updating diff --git a/pkg/kubelet/cm/devicemanager/types.go b/pkg/kubelet/cm/devicemanager/types.go index 9fcafe53ec4..3c5dbbaa524 100644 --- a/pkg/kubelet/cm/devicemanager/types.go +++ b/pkg/kubelet/cm/devicemanager/types.go @@ -26,7 +26,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) // Manager manages all the Device Plugins running on a node. @@ -44,7 +44,7 @@ type Manager interface { // UpdatePluginResources updates node resources based on devices already // allocated to pods. The node object is provided for the device manager to // update the node capacity to reflect the currently available devices. - UpdatePluginResources(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error + UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error // Stop stops the manager. Stop() error diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index db1b8d1efc5..2c0ebcb1426 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -67,7 +67,7 @@ import ( kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/queue" kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/awsebs" "k8s.io/kubernetes/pkg/volume/azure_dd" @@ -658,7 +658,7 @@ func TestHandlePluginResources(t *testing.T) { } kl.nodeLister = testNodeLister{nodes: nodes} - updatePluginResourcesFunc := func(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { + updatePluginResourcesFunc := func(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { // Maps from resourceName to the value we use to set node.allocatableResource[resourceName]. // A resource with invalid value (< 0) causes the function to return an error // to emulate resource Allocation failure. @@ -670,8 +670,7 @@ func TestHandlePluginResources(t *testing.T) { failedResource: resourceQuantityInvalid, } pod := attrs.Pod - allocatableResource := node.AllocatableResource() - newAllocatableResource := allocatableResource.Clone() + newAllocatableResource := node.Allocatable.Clone() for _, container := range pod.Spec.Containers { for resource := range container.Resources.Requests { newQuantity, exist := updateResourceMap[resource] @@ -684,7 +683,7 @@ func TestHandlePluginResources(t *testing.T) { newAllocatableResource.ScalarResources[resource] = newQuantity.Value() } } - node.SetAllocatableResource(newAllocatableResource) + node.Allocatable = newAllocatableResource return nil } diff --git a/pkg/kubelet/lifecycle/BUILD b/pkg/kubelet/lifecycle/BUILD index 3b57741a58d..c9b55077469 100644 --- a/pkg/kubelet/lifecycle/BUILD +++ b/pkg/kubelet/lifecycle/BUILD @@ -26,7 +26,7 @@ go_library( "//pkg/scheduler/framework/plugins/nodename:go_default_library", "//pkg/scheduler/framework/plugins/nodeports:go_default_library", "//pkg/scheduler/framework/plugins/noderesources:go_default_library", - "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/security/apparmor:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", @@ -49,7 +49,7 @@ go_test( "//pkg/kubelet/util/format:go_default_library", "//pkg/scheduler/framework/plugins/nodename:go_default_library", "//pkg/scheduler/framework/plugins/nodeports:go_default_library", - "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/kubelet/lifecycle/predicate.go b/pkg/kubelet/lifecycle/predicate.go index 2a0ef704636..1603fe447c9 100644 --- a/pkg/kubelet/lifecycle/predicate.go +++ b/pkg/kubelet/lifecycle/predicate.go @@ -19,22 +19,21 @@ package lifecycle import ( "fmt" + "k8s.io/api/core/v1" "k8s.io/klog" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/kubelet/util/format" pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" - - "k8s.io/api/core/v1" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/kubelet/util/format" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) type getNodeAnyWayFuncType func() (*v1.Node, error) -type pluginResourceUpdateFuncType func(*schedulernodeinfo.NodeInfo, *PodAdmitAttributes) error +type pluginResourceUpdateFuncType func(*framework.NodeInfo, *PodAdmitAttributes) error // AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod. // This allows for the graceful handling of pod admission failure. @@ -70,7 +69,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult } admitPod := attrs.Pod pods := attrs.OtherPods - nodeInfo := schedulernodeinfo.NewNodeInfo(pods...) + nodeInfo := framework.NewNodeInfo(pods...) nodeInfo.SetNode(node) // ensure the node has enough plugin resources for that required in pods if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil { @@ -156,7 +155,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult } } -func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *v1.Pod { +func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *framework.NodeInfo) *v1.Pod { podCopy := pod.DeepCopy() for i, c := range pod.Spec.Containers { // We only handle requests in Requests but not Limits because the @@ -165,7 +164,7 @@ func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulernodeinfo.Nod podCopy.Spec.Containers[i].Resources.Requests = make(v1.ResourceList) for rName, rQuant := range c.Resources.Requests { if v1helper.IsExtendedResourceName(rName) { - if _, found := nodeInfo.AllocatableResource().ScalarResources[rName]; !found { + if _, found := nodeInfo.Allocatable.ScalarResources[rName]; !found { continue } } @@ -220,7 +219,7 @@ func (e *PredicateFailureError) GetReason() string { } // GeneralPredicates checks a group of predicates that the kubelet cares about. -func GeneralPredicates(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) ([]PredicateFailureReason, error) { +func GeneralPredicates(pod *v1.Pod, nodeInfo *framework.NodeInfo) ([]PredicateFailureReason, error) { if nodeInfo.Node() == nil { return nil, fmt.Errorf("node not found") } diff --git a/pkg/kubelet/lifecycle/predicate_test.go b/pkg/kubelet/lifecycle/predicate_test.go index 82839dd6169..8e43f164760 100644 --- a/pkg/kubelet/lifecycle/predicate_test.go +++ b/pkg/kubelet/lifecycle/predicate_test.go @@ -26,7 +26,7 @@ import ( v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) var ( @@ -83,7 +83,7 @@ func TestRemoveMissingExtendedResources(t *testing.T) { ), }, } { - nodeInfo := schedulernodeinfo.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(test.node) pod := removeMissingExtendedResources(test.pod, nodeInfo) if !reflect.DeepEqual(pod, test.expectedPod) { @@ -144,7 +144,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa } } -func newResourcePod(usage ...schedulernodeinfo.Resource) *v1.Pod { +func newResourcePod(usage ...framework.Resource) *v1.Pod { containers := []v1.Container{} for _, req := range usage { containers = append(containers, v1.Container{ @@ -177,7 +177,7 @@ func newPodWithPort(hostPorts ...int) *v1.Pod { func TestGeneralPredicates(t *testing.T) { resourceTests := []struct { pod *v1.Pod - nodeInfo *schedulernodeinfo.NodeInfo + nodeInfo *framework.NodeInfo node *v1.Node fits bool name string @@ -186,8 +186,8 @@ func TestGeneralPredicates(t *testing.T) { }{ { pod: &v1.Pod{}, - nodeInfo: schedulernodeinfo.NewNodeInfo( - newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})), node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, @@ -197,9 +197,9 @@ func TestGeneralPredicates(t *testing.T) { name: "no resources/port/host requested always fits", }, { - pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 10}), - nodeInfo: schedulernodeinfo.NewNodeInfo( - newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourcePod(framework.Resource{MilliCPU: 8, Memory: 10}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})), node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, @@ -218,7 +218,7 @@ func TestGeneralPredicates(t *testing.T) { NodeName: "machine2", }, }, - nodeInfo: schedulernodeinfo.NewNodeInfo(), + nodeInfo: framework.NewNodeInfo(), node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, @@ -230,7 +230,7 @@ func TestGeneralPredicates(t *testing.T) { }, { pod: newPodWithPort(123), - nodeInfo: schedulernodeinfo.NewNodeInfo(newPodWithPort(123)), + nodeInfo: framework.NewNodeInfo(newPodWithPort(123)), node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, diff --git a/pkg/scheduler/BUILD b/pkg/scheduler/BUILD index baf11c614e8..66f9dfa2e3f 100644 --- a/pkg/scheduler/BUILD +++ b/pkg/scheduler/BUILD @@ -126,7 +126,6 @@ filegroup( "//pkg/scheduler/internal/parallelize:all-srcs", "//pkg/scheduler/internal/queue:all-srcs", "//pkg/scheduler/metrics:all-srcs", - "//pkg/scheduler/nodeinfo:all-srcs", "//pkg/scheduler/profile:all-srcs", "//pkg/scheduler/testing:all-srcs", "//pkg/scheduler/util:all-srcs", diff --git a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go index 73bfea7e5a9..2d9b6c617a5 100644 --- a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go +++ b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go @@ -56,17 +56,12 @@ func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleSta return framework.NewStatus(framework.Error, "invalid nodeInfo") } - taints, err := nodeInfo.Taints() - if err != nil { - return framework.NewStatus(framework.Error, err.Error()) - } - filterPredicate := func(t *v1.Taint) bool { // PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints. return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute } - taint, isUntolerated := v1helper.FindMatchingUntoleratedTaint(taints, pod.Spec.Tolerations, filterPredicate) + taint, isUntolerated := v1helper.FindMatchingUntoleratedTaint(nodeInfo.Node().Spec.Taints, pod.Spec.Tolerations, filterPredicate) if !isUntolerated { return nil } diff --git a/pkg/scheduler/framework/v1alpha1/types.go b/pkg/scheduler/framework/v1alpha1/types.go index a440916d92c..946d2440ca9 100644 --- a/pkg/scheduler/framework/v1alpha1/types.go +++ b/pkg/scheduler/framework/v1alpha1/types.go @@ -32,10 +32,7 @@ import ( schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) -var ( - emptyResource = Resource{} - generation int64 -) +var generation int64 // PodInfo is a wrapper to a Pod with additional information for purposes such as tracking // the timestamp when it's added to the queue or recording per-pod metrics. @@ -319,31 +316,6 @@ func (n *NodeInfo) Node() *v1.Node { return n.node } -// Taints returns the taints list on this node. -// TODO(#89528): Exists only because of kubelet dependency, remove. -func (n *NodeInfo) Taints() ([]v1.Taint, error) { - if n == nil || n.node.Spec.Taints == nil { - return nil, nil - } - return n.node.Spec.Taints, nil -} - -// AllocatableResource returns allocatable resources on a given node. -// TODO(#89528): Exists only because of kubelet dependency, remove. -func (n *NodeInfo) AllocatableResource() Resource { - if n == nil { - return emptyResource - } - return *n.Allocatable -} - -// SetAllocatableResource sets the allocatableResource information of given node. -// TODO(#89528): Exists only because of kubelet dependency, remove. -func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) { - n.Allocatable = allocatableResource - n.Generation = nextGeneration() -} - // Clone returns a copy of this node. func (n *NodeInfo) Clone() *NodeInfo { clone := &NodeInfo{ diff --git a/pkg/scheduler/nodeinfo/BUILD b/pkg/scheduler/nodeinfo/BUILD deleted file mode 100644 index 818dd307299..00000000000 --- a/pkg/scheduler/nodeinfo/BUILD +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["node_info.go"], - importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo", - visibility = ["//visibility:public"], - deps = [ - "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/pkg/scheduler/nodeinfo/node_info.go b/pkg/scheduler/nodeinfo/node_info.go deleted file mode 100644 index 42cd5031158..00000000000 --- a/pkg/scheduler/nodeinfo/node_info.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodeinfo - -import ( - v1 "k8s.io/api/core/v1" - framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" -) - -// TODO(#89528): This file defines temporary aliases of types used by kubelet. -// Those will be removed and the underlying types defined in scheduler/types will be used directly. - -// NodeInfo is node level aggregated information. -type NodeInfo = framework.NodeInfo - -// Resource is a collection of compute resource. -type Resource = framework.Resource - -// NewResource creates a Resource from ResourceList -func NewResource(rl v1.ResourceList) *Resource { - return framework.NewResource(rl) -} - -// NewNodeInfo returns a ready to use empty NodeInfo object. -// If any pods are given in arguments, their information will be aggregated in -// the returned object. -func NewNodeInfo(pods ...*v1.Pod) *NodeInfo { - return framework.NewNodeInfo(pods...) -} diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index 047325935c2..957e001ca62 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -23,6 +18,7 @@ go_library( "wait.go", ], importpath = "k8s.io/kubernetes/test/e2e/apps", + visibility = ["//visibility:public"], deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/apis/apps:go_default_library", @@ -37,7 +33,6 @@ go_library( "//pkg/controller/replication:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/scheduler/apis/config:go_default_library", - "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library", @@ -102,4 +97,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 123b2a763e2..89e20b3ae42 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -36,7 +36,6 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/controller/daemon" - schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" @@ -688,14 +687,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st // canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool { newPod := daemon.NewPod(ds, node.Name) - nodeInfo := schedfwk.NewNodeInfo() - nodeInfo.SetNode(&node) - taints, err := nodeInfo.Taints() - if err != nil { - framework.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err) - return false - } - fitsNodeName, fitsNodeAffinity, fitsTaints := daemon.Predicates(newPod, &node, taints) + fitsNodeName, fitsNodeAffinity, fitsTaints := daemon.Predicates(newPod, &node, node.Spec.Taints) return fitsNodeName && fitsNodeAffinity && fitsTaints } diff --git a/test/e2e/framework/.import-restrictions b/test/e2e/framework/.import-restrictions index 7ecf913d6ec..30edc2d76fc 100644 --- a/test/e2e/framework/.import-restrictions +++ b/test/e2e/framework/.import-restrictions @@ -188,9 +188,7 @@ "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources", "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1", "k8s.io/kubernetes/pkg/scheduler/internal/parallelize", - "k8s.io/kubernetes/pkg/scheduler/listers", "k8s.io/kubernetes/pkg/scheduler/metrics", - "k8s.io/kubernetes/pkg/scheduler/nodeinfo", "k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/scheduler/volumebinder", "k8s.io/kubernetes/pkg/security/apparmor", diff --git a/test/e2e/framework/node/BUILD b/test/e2e/framework/node/BUILD index c05930b48a4..345cfecc3cf 100644 --- a/test/e2e/framework/node/BUILD +++ b/test/e2e/framework/node/BUILD @@ -11,7 +11,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/controller:go_default_library", - "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", @@ -29,20 +28,6 @@ go_library( ], ) -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) - go_test( name = "go_default_test", srcs = ["wait_test.go"], @@ -56,3 +41,17 @@ go_test( "//staging/src/k8s.io/client-go/testing:go_default_library", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/framework/node/resource.go b/test/e2e/framework/node/resource.go index 5a9c70d1dd5..9365b76fc1e 100644 --- a/test/e2e/framework/node/resource.go +++ b/test/e2e/framework/node/resource.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/controller" - schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/system" ) @@ -393,8 +392,6 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo }, } - nodeInfo := schedfwk.NewNodeInfo() - // Simple lookup for nonblocking taints based on comma-delimited list. nonblockingTaintsMap := map[string]struct{}{} for _, t := range strings.Split(nonblockingTaints, ",") { @@ -403,6 +400,7 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo } } + n := node if len(nonblockingTaintsMap) > 0 { nodeCopy := node.DeepCopy() nodeCopy.Spec.Taints = []v1.Taint{} @@ -411,18 +409,9 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, v) } } - nodeInfo.SetNode(nodeCopy) - } else { - nodeInfo.SetNode(node) + n = nodeCopy } - - taints, err := nodeInfo.Taints() - if err != nil { - e2elog.Failf("Can't test predicates for node %s: %v", node.Name, err) - return false - } - - return toleratesTaintsWithNoScheduleNoExecuteEffects(taints, fakePod.Spec.Tolerations) + return toleratesTaintsWithNoScheduleNoExecuteEffects(n.Spec.Taints, fakePod.Spec.Tolerations) } func toleratesTaintsWithNoScheduleNoExecuteEffects(taints []v1.Taint, tolerations []v1.Toleration) bool { diff --git a/test/integration/framework/BUILD b/test/integration/framework/BUILD index eea9f1b2292..6b46ce43a6f 100644 --- a/test/integration/framework/BUILD +++ b/test/integration/framework/BUILD @@ -44,7 +44,6 @@ go_library( "//pkg/kubeapiserver:go_default_library", "//pkg/kubelet/client:go_default_library", "//pkg/master:go_default_library", - "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/util/env:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/test/integration/framework/util.go b/test/integration/framework/util.go index 089c089ff34..d947da2b3ff 100644 --- a/test/integration/framework/util.go +++ b/test/integration/framework/util.go @@ -34,7 +34,6 @@ import ( "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle" - schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" testutils "k8s.io/kubernetes/test/utils" ) @@ -250,8 +249,6 @@ func isNodeUntainted(node *v1.Node) bool { }, } - nodeInfo := schedfwk.NewNodeInfo() - // Simple lookup for nonblocking taints based on comma-delimited list. nonblockingTaintsMap := map[string]struct{}{} for _, t := range strings.Split(nonblockingTaints, ",") { @@ -260,6 +257,7 @@ func isNodeUntainted(node *v1.Node) bool { } } + n := node if len(nonblockingTaintsMap) > 0 { nodeCopy := node.DeepCopy() nodeCopy.Spec.Taints = []v1.Taint{} @@ -268,18 +266,10 @@ func isNodeUntainted(node *v1.Node) bool { nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, v) } } - nodeInfo.SetNode(nodeCopy) - } else { - nodeInfo.SetNode(node) + n = nodeCopy } - taints, err := nodeInfo.Taints() - if err != nil { - klog.Fatalf("Can't test predicates for node %s: %v", node.Name, err) - return false - } - - return v1helper.TolerationsTolerateTaintsWithFilter(fakePod.Spec.Tolerations, taints, func(t *v1.Taint) bool { + return v1helper.TolerationsTolerateTaintsWithFilter(fakePod.Spec.Tolerations, n.Spec.Taints, func(t *v1.Taint) bool { return t.Effect == v1.TaintEffectNoExecute || t.Effect == v1.TaintEffectNoSchedule }) }