mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #103934 from boenn/tainttoleration
De-duplicate predicate (known as filter now) logic shared in kubelet and scheduler
This commit is contained in:
commit
0dcd6eaa0d
@ -21,16 +21,10 @@ import (
|
||||
"runtime"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
v1affinityhelper "k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
"k8s.io/klog/v2"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
|
||||
)
|
||||
|
||||
type getNodeAnyWayFuncType func() (*v1.Node, error)
|
||||
@ -96,17 +90,8 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
|
||||
// the Resource Class API in the future.
|
||||
podWithoutMissingExtendedResources := removeMissingExtendedResources(admitPod, nodeInfo)
|
||||
|
||||
reasons, err := GeneralPredicates(podWithoutMissingExtendedResources, nodeInfo)
|
||||
fit := len(reasons) == 0 && err == nil
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("GeneralPredicates failed due to %v, which is unexpected.", err)
|
||||
klog.InfoS("Failed to admit pod, GeneralPredicates failed", "pod", klog.KObj(admitPod), "err", err)
|
||||
return PodAdmitResult{
|
||||
Admit: fit,
|
||||
Reason: "UnexpectedAdmissionError",
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
reasons := generalFilter(podWithoutMissingExtendedResources, nodeInfo)
|
||||
fit := len(reasons) == 0
|
||||
if !fit {
|
||||
reasons, err = w.admissionFailureHandler.HandleAdmissionFailure(admitPod, reasons)
|
||||
fit = len(reasons) == 0 && err == nil
|
||||
@ -269,33 +254,21 @@ func (e *PredicateFailureError) GetReason() string {
|
||||
return e.PredicateDesc
|
||||
}
|
||||
|
||||
// GeneralPredicates checks a group of predicates that the kubelet cares about.
|
||||
func GeneralPredicates(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) ([]PredicateFailureReason, error) {
|
||||
if nodeInfo.Node() == nil {
|
||||
return nil, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
// generalFilter checks a group of filterings that the kubelet cares about.
|
||||
func generalFilter(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) []PredicateFailureReason {
|
||||
admissionResults := scheduler.AdmissionCheck(pod, nodeInfo, true)
|
||||
var reasons []PredicateFailureReason
|
||||
for _, r := range noderesources.Fits(pod, nodeInfo, feature.DefaultFeatureGate.Enabled(features.PodOverhead)) {
|
||||
reasons = append(reasons, &InsufficientResourceError{
|
||||
ResourceName: r.ResourceName,
|
||||
Requested: r.Requested,
|
||||
Used: r.Used,
|
||||
Capacity: r.Capacity,
|
||||
})
|
||||
for _, r := range admissionResults {
|
||||
if r.InsufficientResource != nil {
|
||||
reasons = append(reasons, &InsufficientResourceError{
|
||||
ResourceName: r.InsufficientResource.ResourceName,
|
||||
Requested: r.InsufficientResource.Requested,
|
||||
Used: r.InsufficientResource.Used,
|
||||
Capacity: r.InsufficientResource.Capacity,
|
||||
})
|
||||
} else {
|
||||
reasons = append(reasons, &PredicateFailureError{r.Name, r.Reason})
|
||||
}
|
||||
}
|
||||
|
||||
// Ignore parsing errors for backwards compatibility.
|
||||
match, _ := v1affinityhelper.GetRequiredNodeAffinity(pod).Match(nodeInfo.Node())
|
||||
if !match {
|
||||
reasons = append(reasons, &PredicateFailureError{nodeaffinity.Name, nodeaffinity.ErrReasonPod})
|
||||
}
|
||||
if !nodename.Fits(pod, nodeInfo) {
|
||||
reasons = append(reasons, &PredicateFailureError{nodename.Name, nodename.ErrReason})
|
||||
}
|
||||
if !nodeports.Fits(pod, nodeInfo) {
|
||||
reasons = append(reasons, &PredicateFailureError{nodeports.Name, nodeports.ErrReason})
|
||||
}
|
||||
|
||||
return reasons, nil
|
||||
return reasons
|
||||
}
|
||||
|
@ -17,10 +17,10 @@ limitations under the License.
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
goruntime "runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -87,8 +87,8 @@ func TestRemoveMissingExtendedResources(t *testing.T) {
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(test.node)
|
||||
pod := removeMissingExtendedResources(test.pod, nodeInfo)
|
||||
if !reflect.DeepEqual(pod, test.expectedPod) {
|
||||
t.Errorf("%s: Expected pod\n%v\ngot\n%v\n", test.desc, test.expectedPod, pod)
|
||||
if diff := cmp.Diff(test.expectedPod, pod); diff != "" {
|
||||
t.Errorf("unexpected pod (-want, +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -180,9 +180,7 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
pod *v1.Pod
|
||||
nodeInfo *schedulerframework.NodeInfo
|
||||
node *v1.Node
|
||||
fits bool
|
||||
name string
|
||||
wErr error
|
||||
reasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
@ -196,8 +194,6 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
fits: true,
|
||||
wErr: nil,
|
||||
name: "no resources/port/host requested always fits",
|
||||
},
|
||||
{
|
||||
@ -214,8 +210,6 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []PredicateFailureReason{
|
||||
&InsufficientResourceError{ResourceName: v1.ResourceCPU, Requested: 8, Used: 5, Capacity: 10},
|
||||
&InsufficientResourceError{ResourceName: v1.ResourceMemory, Requested: 10, Used: 19, Capacity: 20},
|
||||
@ -233,8 +227,6 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []PredicateFailureReason{&PredicateFailureError{nodename.Name, nodename.ErrReason}},
|
||||
name: "host not match",
|
||||
},
|
||||
@ -245,8 +237,6 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []PredicateFailureReason{&PredicateFailureError{nodeports.Name, nodeports.ErrReason}},
|
||||
name: "hostport conflict",
|
||||
},
|
||||
@ -254,16 +244,9 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
for _, test := range resourceTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
test.nodeInfo.SetNode(test.node)
|
||||
reasons, err := GeneralPredicates(test.pod, test.nodeInfo)
|
||||
fits := len(reasons) == 0 && err == nil
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !fits && !reflect.DeepEqual(reasons, test.reasons) {
|
||||
t.Errorf("unexpected failure reasons: %v, want: %v", reasons, test.reasons)
|
||||
}
|
||||
if fits != test.fits {
|
||||
t.Errorf("expected: %v got %v", test.fits, fits)
|
||||
reasons := generalFilter(test.pod, test.nodeInfo)
|
||||
if diff := cmp.Diff(test.reasons, reasons); diff != "" {
|
||||
t.Errorf("unexpected failure reasons (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -21,20 +21,20 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
v1helper "k8s.io/component-helpers/scheduling/corev1"
|
||||
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
||||
corev1nodeaffinity "k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
|
||||
@ -470,37 +470,62 @@ func nodeSpecUnschedulableChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
}
|
||||
|
||||
func preCheckForNode(nodeInfo *framework.NodeInfo) queue.PreEnqueueCheck {
|
||||
// In addition to the checks in kubelet (pkg/kubelet/lifecycle/predicate.go#GeneralPredicates),
|
||||
// the following logic appends a taint/toleration check.
|
||||
// TODO: verify if kubelet should also apply the taint/toleration check, and then unify the
|
||||
// logic with kubelet and move to a shared place.
|
||||
//
|
||||
// Note: the following checks doesn't take preemption into considerations, in very rare
|
||||
// cases (e.g., node resizing), "pod" may still fail a check but preemption helps. We deliberately
|
||||
// chose to ignore those cases as unschedulable pods will be re-queued eventually.
|
||||
return func(pod *v1.Pod) bool {
|
||||
if len(noderesources.Fits(pod, nodeInfo, feature.DefaultFeatureGate.Enabled(features.PodOverhead))) != 0 {
|
||||
admissionResults := AdmissionCheck(pod, nodeInfo, false)
|
||||
if len(admissionResults) != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore parsing errors for backwards compatibility.
|
||||
matches, _ := nodeaffinity.GetRequiredNodeAffinity(pod).Match(nodeInfo.Node())
|
||||
if !matches {
|
||||
return false
|
||||
}
|
||||
|
||||
if !nodename.Fits(pod, nodeInfo) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !nodeports.Fits(pod, nodeInfo) {
|
||||
return false
|
||||
}
|
||||
|
||||
_, isUntolerated := v1helper.FindMatchingUntoleratedTaint(nodeInfo.Node().Spec.Taints, pod.Spec.Tolerations, func(t *v1.Taint) bool {
|
||||
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
|
||||
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
|
||||
_, isUntolerated := corev1helpers.FindMatchingUntoleratedTaint(nodeInfo.Node().Spec.Taints, pod.Spec.Tolerations, func(t *v1.Taint) bool {
|
||||
return t.Effect == v1.TaintEffectNoSchedule
|
||||
})
|
||||
return !isUntolerated
|
||||
}
|
||||
}
|
||||
|
||||
// AdmissionCheck calls the filtering logic of noderesources/nodeport/nodeAffinity/nodename
|
||||
// and returns the failure reasons. It's used in kubelet(pkg/kubelet/lifecycle/predicate.go) and scheduler.
|
||||
// It returns the first failure if `includeAllFailures` is set to false; otherwise
|
||||
// returns all failures.
|
||||
func AdmissionCheck(pod *v1.Pod, nodeInfo *framework.NodeInfo, includeAllFailures bool) []AdmissionResult {
|
||||
var admissionResults []AdmissionResult
|
||||
insufficientResources := noderesources.Fits(pod, nodeInfo, feature.DefaultFeatureGate.Enabled(features.PodOverhead))
|
||||
if len(insufficientResources) != 0 {
|
||||
for i := range insufficientResources {
|
||||
admissionResults = append(admissionResults, AdmissionResult{InsufficientResource: &insufficientResources[i]})
|
||||
}
|
||||
if !includeAllFailures {
|
||||
return admissionResults
|
||||
}
|
||||
}
|
||||
|
||||
if matches, _ := corev1nodeaffinity.GetRequiredNodeAffinity(pod).Match(nodeInfo.Node()); !matches {
|
||||
admissionResults = append(admissionResults, AdmissionResult{Name: nodeaffinity.Name, Reason: nodeaffinity.ErrReasonPod})
|
||||
if !includeAllFailures {
|
||||
return admissionResults
|
||||
}
|
||||
}
|
||||
if !nodename.Fits(pod, nodeInfo) {
|
||||
admissionResults = append(admissionResults, AdmissionResult{Name: nodename.Name, Reason: nodename.ErrReason})
|
||||
if !includeAllFailures {
|
||||
return admissionResults
|
||||
}
|
||||
}
|
||||
if !nodeports.Fits(pod, nodeInfo) {
|
||||
admissionResults = append(admissionResults, AdmissionResult{Name: nodeports.Name, Reason: nodeports.ErrReason})
|
||||
if !includeAllFailures {
|
||||
return admissionResults
|
||||
}
|
||||
}
|
||||
return admissionResults
|
||||
}
|
||||
|
||||
// AdmissionResult describes the reason why Scheduler can't admit the pod.
|
||||
// If the reason is a resource fit one, then AdmissionResult.InsufficientResource includes the details.
|
||||
type AdmissionResult struct {
|
||||
Name string
|
||||
Reason string
|
||||
InsufficientResource *noderesources.InsufficientResource
|
||||
}
|
||||
|
@ -29,13 +29,22 @@ import (
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||
dyfake "k8s.io/client-go/dynamic/fake"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
@ -450,3 +459,62 @@ func TestAddAllEventHandlers(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdmissionCheck(t *testing.T) {
|
||||
nodeaffinityError := AdmissionResult{Name: nodeaffinity.Name, Reason: nodeaffinity.ErrReasonPod}
|
||||
nodenameError := AdmissionResult{Name: nodename.Name, Reason: nodename.ErrReason}
|
||||
nodeportsError := AdmissionResult{Name: nodeports.Name, Reason: nodeports.ErrReason}
|
||||
podOverheadError := AdmissionResult{InsufficientResource: &noderesources.InsufficientResource{ResourceName: v1.ResourceCPU, Reason: "Insufficient cpu", Requested: 2000, Used: 7000, Capacity: 8000}}
|
||||
cpu := map[v1.ResourceName]string{v1.ResourceCPU: "8"}
|
||||
tests := []struct {
|
||||
name string
|
||||
node *v1.Node
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
wantAdmissionResults [][]AdmissionResult
|
||||
}{
|
||||
{
|
||||
name: "check nodeAffinity and nodeports, nodeAffinity need fail quickly if includeAllFailures is false",
|
||||
node: st.MakeNode().Name("fake-node").Label("foo", "bar").Obj(),
|
||||
pod: st.MakePod().Name("pod2").HostPort(80).NodeSelector(map[string]string{"foo": "bar1"}).Obj(),
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Name("pod1").HostPort(80).Obj(),
|
||||
},
|
||||
wantAdmissionResults: [][]AdmissionResult{{nodeaffinityError, nodeportsError}, {nodeaffinityError}},
|
||||
},
|
||||
{
|
||||
name: "check PodOverhead and nodeAffinity, PodOverhead need fail quickly if includeAllFailures is false",
|
||||
node: st.MakeNode().Name("fake-node").Label("foo", "bar").Capacity(cpu).Obj(),
|
||||
pod: st.MakePod().Name("pod2").Container("c").Overhead(v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}).Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).NodeSelector(map[string]string{"foo": "bar1"}).Obj(),
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "7"}).Node("fake-node").Obj(),
|
||||
},
|
||||
wantAdmissionResults: [][]AdmissionResult{{podOverheadError, nodeaffinityError}, {podOverheadError}},
|
||||
},
|
||||
{
|
||||
name: "check nodename and nodeports, nodename need fail quickly if includeAllFailures is false",
|
||||
node: st.MakeNode().Name("fake-node").Obj(),
|
||||
pod: st.MakePod().Name("pod2").HostPort(80).Node("fake-node1").Obj(),
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Name("pod1").HostPort(80).Node("fake-node").Obj(),
|
||||
},
|
||||
wantAdmissionResults: [][]AdmissionResult{{nodenameError, nodeportsError}, {nodenameError}},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||
nodeInfo := framework.NewNodeInfo(tt.existingPods...)
|
||||
nodeInfo.SetNode(tt.node)
|
||||
|
||||
flags := []bool{true, false}
|
||||
for i := range flags {
|
||||
admissionResults := AdmissionCheck(tt.pod, nodeInfo, flags[i])
|
||||
|
||||
if diff := cmp.Diff(tt.wantAdmissionResults[i], admissionResults); diff != "" {
|
||||
t.Errorf("Unexpected admissionResults (-want, +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -174,7 +174,6 @@ func computePodResourceRequest(pod *v1.Pod, enablePodOverhead bool) *preFilterSt
|
||||
if pod.Spec.Overhead != nil && enablePodOverhead {
|
||||
result.Add(pod.Spec.Overhead)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@ -259,11 +258,11 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignor
|
||||
allowedPodNumber := nodeInfo.Allocatable.AllowedPodNumber
|
||||
if len(nodeInfo.Pods)+1 > allowedPodNumber {
|
||||
insufficientResources = append(insufficientResources, InsufficientResource{
|
||||
v1.ResourcePods,
|
||||
"Too many pods",
|
||||
1,
|
||||
int64(len(nodeInfo.Pods)),
|
||||
int64(allowedPodNumber),
|
||||
ResourceName: v1.ResourcePods,
|
||||
Reason: "Too many pods",
|
||||
Requested: 1,
|
||||
Used: int64(len(nodeInfo.Pods)),
|
||||
Capacity: int64(allowedPodNumber),
|
||||
})
|
||||
}
|
||||
|
||||
@ -276,29 +275,29 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignor
|
||||
|
||||
if podRequest.MilliCPU > (nodeInfo.Allocatable.MilliCPU - nodeInfo.Requested.MilliCPU) {
|
||||
insufficientResources = append(insufficientResources, InsufficientResource{
|
||||
v1.ResourceCPU,
|
||||
"Insufficient cpu",
|
||||
podRequest.MilliCPU,
|
||||
nodeInfo.Requested.MilliCPU,
|
||||
nodeInfo.Allocatable.MilliCPU,
|
||||
ResourceName: v1.ResourceCPU,
|
||||
Reason: "Insufficient cpu",
|
||||
Requested: podRequest.MilliCPU,
|
||||
Used: nodeInfo.Requested.MilliCPU,
|
||||
Capacity: nodeInfo.Allocatable.MilliCPU,
|
||||
})
|
||||
}
|
||||
if podRequest.Memory > (nodeInfo.Allocatable.Memory - nodeInfo.Requested.Memory) {
|
||||
insufficientResources = append(insufficientResources, InsufficientResource{
|
||||
v1.ResourceMemory,
|
||||
"Insufficient memory",
|
||||
podRequest.Memory,
|
||||
nodeInfo.Requested.Memory,
|
||||
nodeInfo.Allocatable.Memory,
|
||||
ResourceName: v1.ResourceMemory,
|
||||
Reason: "Insufficient memory",
|
||||
Requested: podRequest.Memory,
|
||||
Used: nodeInfo.Requested.Memory,
|
||||
Capacity: nodeInfo.Allocatable.Memory,
|
||||
})
|
||||
}
|
||||
if podRequest.EphemeralStorage > (nodeInfo.Allocatable.EphemeralStorage - nodeInfo.Requested.EphemeralStorage) {
|
||||
insufficientResources = append(insufficientResources, InsufficientResource{
|
||||
v1.ResourceEphemeralStorage,
|
||||
"Insufficient ephemeral-storage",
|
||||
podRequest.EphemeralStorage,
|
||||
nodeInfo.Requested.EphemeralStorage,
|
||||
nodeInfo.Allocatable.EphemeralStorage,
|
||||
ResourceName: v1.ResourceEphemeralStorage,
|
||||
Reason: "Insufficient ephemeral-storage",
|
||||
Requested: podRequest.EphemeralStorage,
|
||||
Used: nodeInfo.Requested.EphemeralStorage,
|
||||
Capacity: nodeInfo.Allocatable.EphemeralStorage,
|
||||
})
|
||||
}
|
||||
|
||||
@ -316,11 +315,11 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignor
|
||||
}
|
||||
if rQuant > (nodeInfo.Allocatable.ScalarResources[rName] - nodeInfo.Requested.ScalarResources[rName]) {
|
||||
insufficientResources = append(insufficientResources, InsufficientResource{
|
||||
rName,
|
||||
fmt.Sprintf("Insufficient %v", rName),
|
||||
podRequest.ScalarResources[rName],
|
||||
nodeInfo.Requested.ScalarResources[rName],
|
||||
nodeInfo.Allocatable.ScalarResources[rName],
|
||||
ResourceName: rName,
|
||||
Reason: fmt.Sprintf("Insufficient %v", rName),
|
||||
Requested: podRequest.ScalarResources[rName],
|
||||
Used: nodeInfo.Requested.ScalarResources[rName],
|
||||
Capacity: nodeInfo.Allocatable.ScalarResources[rName],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -136,41 +136,52 @@ func TestEnoughRequests(t *testing.T) {
|
||||
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})),
|
||||
name: "too many resources fails",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 1, 10, 10}, {v1.ResourceMemory, getErrReason(v1.ResourceMemory), 1, 20, 20}},
|
||||
name: "too many resources fails",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 1, Used: 10, Capacity: 10},
|
||||
{ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 1, Used: 20, Capacity: 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})),
|
||||
name: "too many resources fails due to init container cpu",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
|
||||
name: "too many resources fails due to init container cpu",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 3, Used: 8, Capacity: 10},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}, framework.Resource{MilliCPU: 2, Memory: 1}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})),
|
||||
name: "too many resources fails due to highest init container cpu",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
|
||||
name: "too many resources fails due to highest init container cpu",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 3, Used: 8, Capacity: 10},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
|
||||
name: "too many resources fails due to init container memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
|
||||
name: "too many resources fails due to init container memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 3, Used: 19, Capacity: 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}, framework.Resource{MilliCPU: 1, Memory: 2}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
|
||||
name: "too many resources fails due to highest init container memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
|
||||
name: "too many resources fails due to highest init container memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 3, Used: 19, Capacity: 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 1}),
|
||||
@ -197,17 +208,21 @@ func TestEnoughRequests(t *testing.T) {
|
||||
pod: newResourcePod(framework.Resource{MilliCPU: 2, Memory: 1}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 5})),
|
||||
name: "one resource memory fits",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 2, 9, 10}},
|
||||
name: "one resource memory fits",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 2, Used: 9, Capacity: 10},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 2}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
|
||||
name: "one resource cpu fits",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 2, 19, 20}},
|
||||
name: "one resource cpu fits",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 2, Used: 19, Capacity: 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}),
|
||||
@ -240,36 +255,44 @@ func TestEnoughRequests(t *testing.T) {
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
name: "extended resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
|
||||
name: "extended resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 10, Used: 0, Capacity: 5},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
name: "extended resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
|
||||
name: "extended resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 10, Used: 0, Capacity: 5},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
name: "extended resource allocatable enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
|
||||
name: "extended resource allocatable enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 1, Used: 5, Capacity: 5},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
name: "extended resource allocatable enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
|
||||
name: "extended resource allocatable enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 1, Used: 5, Capacity: 5},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@ -277,9 +300,11 @@ func TestEnoughRequests(t *testing.T) {
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
name: "extended resource allocatable enforced for multiple containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
|
||||
name: "extended resource allocatable enforced for multiple containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 6, Used: 2, Capacity: 5},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
|
||||
@ -296,63 +321,77 @@ func TestEnoughRequests(t *testing.T) {
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
name: "extended resource allocatable enforced for multiple init containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
|
||||
name: "extended resource allocatable enforced for multiple init containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 6, Used: 2, Capacity: 5},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
|
||||
name: "extended resource allocatable enforced for unknown resource",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
|
||||
name: "extended resource allocatable enforced for unknown resource",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: extendedResourceB, Reason: getErrReason(extendedResourceB), Requested: 1, Used: 0, Capacity: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
|
||||
name: "extended resource allocatable enforced for unknown resource for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
|
||||
name: "extended resource allocatable enforced for unknown resource for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: extendedResourceB, Reason: getErrReason(extendedResourceB), Requested: 1, Used: 0, Capacity: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
|
||||
name: "kubernetes.io resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceA, getErrReason(kubernetesIOResourceA), 10, 0, 0}},
|
||||
name: "kubernetes.io resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: kubernetesIOResourceA, Reason: getErrReason(kubernetesIOResourceA), Requested: 10, Used: 0, Capacity: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
|
||||
name: "kubernetes.io resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)),
|
||||
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceB, getErrReason(kubernetesIOResourceB), 10, 0, 0}},
|
||||
name: "kubernetes.io resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: kubernetesIOResourceB, Reason: getErrReason(kubernetesIOResourceB), Requested: 10, Used: 0, Capacity: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
name: "hugepages resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
|
||||
name: "hugepages resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: hugePageResourceA, Reason: getErrReason(hugePageResourceA), Requested: 10, Used: 0, Capacity: 5},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
name: "hugepages resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
|
||||
name: "hugepages resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: hugePageResourceA, Reason: getErrReason(hugePageResourceA), Requested: 10, Used: 0, Capacity: 5},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@ -360,9 +399,11 @@ func TestEnoughRequests(t *testing.T) {
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
|
||||
nodeInfo: framework.NewNodeInfo(
|
||||
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
|
||||
name: "hugepages resource allocatable enforced for multiple containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 6, 2, 5}},
|
||||
name: "hugepages resource allocatable enforced for multiple containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: hugePageResourceA, Reason: getErrReason(hugePageResourceA), Requested: 6, Used: 2, Capacity: 5},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@ -388,10 +429,12 @@ func TestEnoughRequests(t *testing.T) {
|
||||
newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
|
||||
v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")},
|
||||
),
|
||||
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
|
||||
name: "requests + overhead does not fit for memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 16, 5, 20}},
|
||||
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
|
||||
name: "requests + overhead does not fit for memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{
|
||||
{ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 16, Used: 5, Capacity: 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
|
@ -443,6 +443,12 @@ func (p *PodWrapper) PreemptionPolicy(policy v1.PreemptionPolicy) *PodWrapper {
|
||||
return p
|
||||
}
|
||||
|
||||
// Overhead sets the give resourcelist to the inner pod
|
||||
func (p *PodWrapper) Overhead(rl v1.ResourceList) *PodWrapper {
|
||||
p.Spec.Overhead = rl
|
||||
return p
|
||||
}
|
||||
|
||||
// NodeWrapper wraps a Node inside.
|
||||
type NodeWrapper struct{ v1.Node }
|
||||
|
||||
@ -502,3 +508,9 @@ func (n *NodeWrapper) Images(images map[string]int64) *NodeWrapper {
|
||||
n.Status.Images = containerImages
|
||||
return n
|
||||
}
|
||||
|
||||
// Taints applies taints to the inner node.
|
||||
func (n *NodeWrapper) Taints(taints []v1.Taint) *NodeWrapper {
|
||||
n.Spec.Taints = taints
|
||||
return n
|
||||
}
|
||||
|
@ -187,6 +187,9 @@ rules:
|
||||
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
|
||||
- k8s.io/kubernetes/pkg/scheduler/util
|
||||
- k8s.io/kubernetes/pkg/scheduler/volumebinder
|
||||
- k8s.io/kubernetes/pkg/scheduler
|
||||
- k8s.io/kubernetes/pkg/scheduler/profile
|
||||
- k8s.io/kubernetes/pkg/scheduler/testing
|
||||
- k8s.io/kubernetes/pkg/security/apparmor
|
||||
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp
|
||||
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl
|
||||
|
Loading…
Reference in New Issue
Block a user