Cleanup obsolete NodeInfo methods

This commit is contained in:
Abdullah Gharaibeh
2020-04-10 13:49:39 -04:00
parent 3641d40a98
commit bed9b2f23b
29 changed files with 78 additions and 229 deletions

View File

@@ -26,7 +26,7 @@ go_library(
"//pkg/scheduler/framework/plugins/nodename:go_default_library",
"//pkg/scheduler/framework/plugins/nodeports:go_default_library",
"//pkg/scheduler/framework/plugins/noderesources:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
@@ -49,7 +49,7 @@ go_test(
"//pkg/kubelet/util/format:go_default_library",
"//pkg/scheduler/framework/plugins/nodename:go_default_library",
"//pkg/scheduler/framework/plugins/nodeports:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@@ -19,22 +19,21 @@ package lifecycle
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/kubelet/util/format"
pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"k8s.io/api/core/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/kubelet/util/format"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
type getNodeAnyWayFuncType func() (*v1.Node, error)
type pluginResourceUpdateFuncType func(*schedulernodeinfo.NodeInfo, *PodAdmitAttributes) error
type pluginResourceUpdateFuncType func(*framework.NodeInfo, *PodAdmitAttributes) error
// AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod.
// This allows for the graceful handling of pod admission failure.
@@ -70,7 +69,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
}
admitPod := attrs.Pod
pods := attrs.OtherPods
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
nodeInfo := framework.NewNodeInfo(pods...)
nodeInfo.SetNode(node)
// ensure the node has enough plugin resources for that required in pods
if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil {
@@ -156,7 +155,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
}
}
func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *v1.Pod {
func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *framework.NodeInfo) *v1.Pod {
podCopy := pod.DeepCopy()
for i, c := range pod.Spec.Containers {
// We only handle requests in Requests but not Limits because the
@@ -165,7 +164,7 @@ func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulernodeinfo.Nod
podCopy.Spec.Containers[i].Resources.Requests = make(v1.ResourceList)
for rName, rQuant := range c.Resources.Requests {
if v1helper.IsExtendedResourceName(rName) {
if _, found := nodeInfo.AllocatableResource().ScalarResources[rName]; !found {
if _, found := nodeInfo.Allocatable.ScalarResources[rName]; !found {
continue
}
}
@@ -220,7 +219,7 @@ func (e *PredicateFailureError) GetReason() string {
}
// GeneralPredicates checks a group of predicates that the kubelet cares about.
func GeneralPredicates(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) ([]PredicateFailureReason, error) {
func GeneralPredicates(pod *v1.Pod, nodeInfo *framework.NodeInfo) ([]PredicateFailureReason, error) {
if nodeInfo.Node() == nil {
return nil, fmt.Errorf("node not found")
}

View File

@@ -26,7 +26,7 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
var (
@@ -83,7 +83,7 @@ func TestRemoveMissingExtendedResources(t *testing.T) {
),
},
} {
nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(test.node)
pod := removeMissingExtendedResources(test.pod, nodeInfo)
if !reflect.DeepEqual(pod, test.expectedPod) {
@@ -144,7 +144,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
}
}
func newResourcePod(usage ...schedulernodeinfo.Resource) *v1.Pod {
func newResourcePod(usage ...framework.Resource) *v1.Pod {
containers := []v1.Container{}
for _, req := range usage {
containers = append(containers, v1.Container{
@@ -177,7 +177,7 @@ func newPodWithPort(hostPorts ...int) *v1.Pod {
func TestGeneralPredicates(t *testing.T) {
resourceTests := []struct {
pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo
nodeInfo *framework.NodeInfo
node *v1.Node
fits bool
name string
@@ -186,8 +186,8 @@ func TestGeneralPredicates(t *testing.T) {
}{
{
pod: &v1.Pod{},
nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@@ -197,9 +197,9 @@ func TestGeneralPredicates(t *testing.T) {
name: "no resources/port/host requested always fits",
},
{
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 10}),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourcePod(framework.Resource{MilliCPU: 8, Memory: 10}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@@ -218,7 +218,7 @@ func TestGeneralPredicates(t *testing.T) {
NodeName: "machine2",
},
},
nodeInfo: schedulernodeinfo.NewNodeInfo(),
nodeInfo: framework.NewNodeInfo(),
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@@ -230,7 +230,7 @@ func TestGeneralPredicates(t *testing.T) {
},
{
pod: newPodWithPort(123),
nodeInfo: schedulernodeinfo.NewNodeInfo(newPodWithPort(123)),
nodeInfo: framework.NewNodeInfo(newPodWithPort(123)),
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},