move nodeinfo type to framework pkg

This commit is contained in:
Abdullah Gharaibeh
2020-04-06 21:40:15 -04:00
parent 87966c39f1
commit a5d8172715
98 changed files with 837 additions and 1053 deletions

View File

@@ -19,7 +19,6 @@ go_library(
"//pkg/features:go_default_library",
"//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
@@ -61,7 +60,6 @@ go_test(
"//pkg/features:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@@ -27,7 +27,6 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
var _ framework.PreFilterPlugin = &Fit{}
@@ -56,7 +55,7 @@ type FitArgs struct {
// preFilterState computed at PreFilter and used at Filter.
type preFilterState struct {
schedulertypes.Resource
framework.Resource
}
// Clone the prefilter state.
@@ -69,7 +68,7 @@ func (f *Fit) Name() string {
return FitName
}
// computePodResourceRequest returns a schedulertypes.Resource that covers the largest
// computePodResourceRequest returns a framework.Resource that covers the largest
// width in each resource dimension. Because init-containers run sequentially, we collect
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
// regular containers since they run simultaneously.
@@ -143,7 +142,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error
// Filter invoked at the filter extension point.
// Checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
// It returns a list of insufficient resources, if empty, then the node has all the resources requested by the pod.
func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
@@ -174,11 +173,11 @@ type InsufficientResource struct {
}
// Fits checks if node have enough resources to host the pod.
func Fits(pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
return fitsRequest(computePodResourceRequest(pod), nodeInfo, ignoredExtendedResources)
}
func fitsRequest(podRequest *preFilterState, nodeInfo *schedulertypes.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
insufficientResources := make([]InsufficientResource, 0, 4)
allowedPodNumber := nodeInfo.AllowedPodNumber()

View File

@@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
var (
@@ -62,7 +61,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
}
}
func newResourcePod(usage ...schedulertypes.Resource) *v1.Pod {
func newResourcePod(usage ...framework.Resource) *v1.Pod {
containers := []v1.Container{}
for _, req := range usage {
containers = append(containers, v1.Container{
@@ -76,7 +75,7 @@ func newResourcePod(usage ...schedulertypes.Resource) *v1.Pod {
}
}
func newResourceInitPod(pod *v1.Pod, usage ...schedulertypes.Resource) *v1.Pod {
func newResourceInitPod(pod *v1.Pod, usage ...framework.Resource) *v1.Pod {
pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
return pod
}
@@ -93,7 +92,7 @@ func getErrReason(rn v1.ResourceName) string {
func TestEnoughRequests(t *testing.T) {
enoughPodsTests := []struct {
pod *v1.Pod
nodeInfo *schedulertypes.NodeInfo
nodeInfo *framework.NodeInfo
name string
ignoredResources []byte
wantInsufficientResources []InsufficientResource
@@ -101,266 +100,266 @@ func TestEnoughRequests(t *testing.T) {
}{
{
pod: &v1.Pod{},
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})),
name: "no resources requested always fits",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})),
name: "too many resources fails",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 1, 10, 10}, {v1.ResourceMemory, getErrReason(v1.ResourceMemory), 1, 20, 20}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 3, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 8, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})),
name: "too many resources fails due to init container cpu",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 3, Memory: 1}, schedulertypes.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 8, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}, framework.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})),
name: "too many resources fails due to highest init container cpu",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 3}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
name: "too many resources fails due to init container memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 3}, schedulertypes.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}, framework.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
name: "too many resources fails due to highest init container memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
name: "init container fits because it's the max, not sum, of containers and init containers",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 1}, schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 1}, framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
name: "multiple init containers fit because it's the max, not sum, of containers and init containers",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "both resources fit",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 5})),
pod: newResourcePod(framework.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 5})),
name: "one resource memory fits",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 2, 9, 10}},
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "one resource cpu fits",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 2, 19, 20}},
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "equal edge case",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 4, Memory: 1}), schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 4, Memory: 1}), framework.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "equal edge case for init container",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourcePod(schedulertypes.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{})),
pod: newResourcePod(framework.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{})),
name: "extended resource fits",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}), schedulertypes.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}), framework.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{})),
name: "extended resource fits for init container",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
name: "extended resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
name: "extended resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
name: "extended resource allocatable enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
name: "extended resource allocatable enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
name: "extended resource allocatable enforced for multiple containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
name: "extended resource allocatable admits multiple init containers",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}},
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}},
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
name: "extended resource allocatable enforced for multiple init containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "extended resource allocatable enforced for unknown resource",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "extended resource allocatable enforced for unknown resource for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "kubernetes.io resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)),
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceA, getErrReason(kubernetesIOResourceA), 10, 0, 0}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "kubernetes.io resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)),
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceB, getErrReason(kubernetesIOResourceB), 10, 0, 0}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
name: "hugepages resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
name: "hugepages resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}},
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}},
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
name: "hugepages resource allocatable enforced for multiple containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 6, 2, 5}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
ignoredResources: []byte(`{"IgnoredResources" : ["example.com/bbb"]}`),
name: "skip checking ignored extended resource",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceOverheadPod(
newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")},
),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "resources + pod overhead fits",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceOverheadPod(
newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")},
),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "requests + overhead does not fit for memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 16, 5, 20}},
@@ -395,7 +394,7 @@ func TestEnoughRequests(t *testing.T) {
func TestPreFilterDisabled(t *testing.T) {
pod := &v1.Pod{}
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
node := v1.Node{}
nodeInfo.SetNode(&node)
p, _ := NewFit(nil, nil)
@@ -410,32 +409,32 @@ func TestPreFilterDisabled(t *testing.T) {
func TestNotEnoughRequests(t *testing.T) {
notEnoughPodsTests := []struct {
pod *v1.Pod
nodeInfo *schedulertypes.NodeInfo
nodeInfo *framework.NodeInfo
fits bool
name string
wantStatus *framework.Status
}{
{
pod: &v1.Pod{},
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})),
name: "even without specified resources predicate fails when there's no space for additional pod",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "even if both resources fit predicate fails when there's no space for additional pod",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "even for equal edge case predicate fails when there's no space for additional pod",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}), schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}), framework.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
},
@@ -464,34 +463,34 @@ func TestNotEnoughRequests(t *testing.T) {
func TestStorageRequests(t *testing.T) {
storagePodsTests := []struct {
pod *v1.Pod
nodeInfo *schedulertypes.NodeInfo
nodeInfo *framework.NodeInfo
name string
wantStatus *framework.Status
}{
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 10})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 10, Memory: 10})),
name: "due to container scratch disk",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 10})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 2, Memory: 10})),
name: "pod fit",
},
{
pod: newResourcePod(schedulertypes.Resource{EphemeralStorage: 25}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 2})),
pod: newResourcePod(framework.Resource{EphemeralStorage: 25}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})),
name: "storage ephemeral local storage request exceeds allocatable",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceEphemeralStorage)),
},
{
pod: newResourcePod(schedulertypes.Resource{EphemeralStorage: 10}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 2})),
pod: newResourcePod(framework.Resource{EphemeralStorage: 10}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})),
name: "pod fits",
},
}

View File

@@ -23,7 +23,6 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
@@ -46,7 +45,7 @@ type resourceToValueMap map[v1.ResourceName]int64
// score will use `scorer` function to calculate the score.
func (r *resourceAllocationScorer) score(
pod *v1.Pod,
nodeInfo *schedulertypes.NodeInfo) (int64, *framework.Status) {
nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
node := nodeInfo.Node()
if node == nil {
return 0, framework.NewStatus(framework.Error, "node not found")
@@ -90,7 +89,7 @@ func (r *resourceAllocationScorer) score(
}
// calculateResourceAllocatableRequest returns resources Allocatable and Requested values
func calculateResourceAllocatableRequest(nodeInfo *schedulertypes.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) {
func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) {
allocatable := nodeInfo.AllocatableResource()
requested := nodeInfo.RequestedResource()
podRequest := calculatePodResourceRequest(pod, resource)

View File

@@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// ResourceLimits is a score plugin that increases score of input node by 1 if the node satisfies
@@ -46,7 +45,7 @@ const (
// preScoreState computed at PreScore and used at Score.
type preScoreState struct {
podResourceRequest *schedulertypes.Resource
podResourceRequest *framework.Resource
}
// Clone the preScore state.
@@ -81,7 +80,7 @@ func (rl *ResourceLimits) PreScore(
return nil
}
func getPodResource(cycleState *framework.CycleState) (*schedulertypes.Resource, error) {
func getPodResource(cycleState *framework.CycleState) (*framework.Resource, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("Error reading %q from cycleState: %v", preScoreStateKey, err)
@@ -136,9 +135,9 @@ func NewResourceLimits(_ *runtime.Unknown, h framework.FrameworkHandle) (framewo
// getResourceLimits computes resource limits for input pod.
// The reason to create this new function is to be consistent with other
// priority functions because most or perhaps all priority functions work
// with schedulertypes.Resource.
func getResourceLimits(pod *v1.Pod) *schedulertypes.Resource {
result := &schedulertypes.Resource{}
// with framework.Resource.
func getResourceLimits(pod *v1.Pod) *framework.Resource {
result := &framework.Resource{}
for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Limits)
}