mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
changes in test files
This commit is contained in:
parent
121d24cfc7
commit
b901ef0f68
@ -1262,7 +1262,7 @@ func TestPreFilterStateAddRemovePod(t *testing.T) {
|
||||
|
||||
// Add test.addedPod to state1 and verify it is equal to allPodsState.
|
||||
nodeInfo := mustGetNodeInfo(t, snapshot, test.addedPod.Spec.NodeName)
|
||||
if err := ipa.AddPod(ctx, cycleState, test.pendingPod, framework.NewPodInfo(test.addedPod), nodeInfo); err != nil {
|
||||
if err := ipa.AddPod(ctx, cycleState, test.pendingPod, mustNewPodInfo(t, test.addedPod), nodeInfo); err != nil {
|
||||
t.Errorf("error adding pod to meta: %v", err)
|
||||
}
|
||||
|
||||
@ -1284,7 +1284,7 @@ func TestPreFilterStateAddRemovePod(t *testing.T) {
|
||||
}
|
||||
|
||||
// Remove the added pod pod and make sure it is equal to the original state.
|
||||
if err := ipa.RemovePod(context.Background(), cycleState, test.pendingPod, framework.NewPodInfo(test.addedPod), nodeInfo); err != nil {
|
||||
if err := ipa.RemovePod(context.Background(), cycleState, test.pendingPod, mustNewPodInfo(t, test.addedPod), nodeInfo); err != nil {
|
||||
t.Errorf("error removing pod from meta: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(originalState, state) {
|
||||
@ -1439,7 +1439,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
p := plugintesting.SetupPluginWithInformers(ctx, t, New, &config.InterPodAffinityArgs{}, snapshot, nil)
|
||||
gotAffinityPodsMap, gotAntiAffinityPodsMap := p.(*InterPodAffinity).getIncomingAffinityAntiAffinityCounts(ctx, framework.NewPodInfo(tt.pod), l)
|
||||
gotAffinityPodsMap, gotAntiAffinityPodsMap := p.(*InterPodAffinity).getIncomingAffinityAntiAffinityCounts(ctx, mustNewPodInfo(t, tt.pod), l)
|
||||
if !reflect.DeepEqual(gotAffinityPodsMap, tt.wantAffinityPodsMap) {
|
||||
t.Errorf("getTPMapMatchingIncomingAffinityAntiAffinity() gotAffinityPodsMap = %#v, want %#v", gotAffinityPodsMap, tt.wantAffinityPodsMap)
|
||||
}
|
||||
@ -1458,3 +1458,11 @@ func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *frame
|
||||
}
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
|
||||
podInfo, err := framework.NewPodInfo(pod)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return podInfo
|
||||
}
|
||||
|
@ -1866,7 +1866,7 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s := p.AddPod(ctx, cs, tt.preemptor, framework.NewPodInfo(tt.addedPod), nodeInfo); !s.IsSuccess() {
|
||||
if s := p.AddPod(ctx, cs, tt.preemptor, mustNewPodInfo(t, tt.addedPod), nodeInfo); !s.IsSuccess() {
|
||||
t.Fatal(s.AsError())
|
||||
}
|
||||
state, err := getPreFilterState(cs)
|
||||
@ -2188,7 +2188,7 @@ func TestPreFilterStateRemovePod(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s := p.RemovePod(ctx, cs, tt.preemptor, framework.NewPodInfo(deletedPod), nodeInfo); !s.IsSuccess() {
|
||||
if s := p.RemovePod(ctx, cs, tt.preemptor, mustNewPodInfo(t, deletedPod), nodeInfo); !s.IsSuccess() {
|
||||
t.Fatal(s.AsError())
|
||||
}
|
||||
|
||||
@ -3228,3 +3228,11 @@ func TestPreFilterDisabled(t *testing.T) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, wantStatus)
|
||||
}
|
||||
}
|
||||
|
||||
func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
|
||||
podInfo, err := framework.NewPodInfo(pod)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return podInfo
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
@ -38,31 +39,31 @@ func TestLess(t *testing.T) {
|
||||
{
|
||||
name: "p1.priority less than p2.priority",
|
||||
p1: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(lowPriority).Obj()),
|
||||
PodInfo: mustNewPodInfo(t, st.MakePod().Priority(lowPriority).Obj()),
|
||||
},
|
||||
p2: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
PodInfo: mustNewPodInfo(t, st.MakePod().Priority(highPriority).Obj()),
|
||||
},
|
||||
expected: false, // p2 should be ahead of p1 in the queue
|
||||
},
|
||||
{
|
||||
name: "p1.priority greater than p2.priority",
|
||||
p1: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
PodInfo: mustNewPodInfo(t, st.MakePod().Priority(highPriority).Obj()),
|
||||
},
|
||||
p2: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(lowPriority).Obj()),
|
||||
PodInfo: mustNewPodInfo(t, st.MakePod().Priority(lowPriority).Obj()),
|
||||
},
|
||||
expected: true, // p1 should be ahead of p2 in the queue
|
||||
},
|
||||
{
|
||||
name: "equal priority. p1 is added to schedulingQ earlier than p2",
|
||||
p1: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
PodInfo: mustNewPodInfo(t, st.MakePod().Priority(highPriority).Obj()),
|
||||
Timestamp: t1,
|
||||
},
|
||||
p2: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
PodInfo: mustNewPodInfo(t, st.MakePod().Priority(highPriority).Obj()),
|
||||
Timestamp: t2,
|
||||
},
|
||||
expected: true, // p1 should be ahead of p2 in the queue
|
||||
@ -70,11 +71,11 @@ func TestLess(t *testing.T) {
|
||||
{
|
||||
name: "equal priority. p2 is added to schedulingQ earlier than p1",
|
||||
p1: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
PodInfo: mustNewPodInfo(t, st.MakePod().Priority(highPriority).Obj()),
|
||||
Timestamp: t2,
|
||||
},
|
||||
p2: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
PodInfo: mustNewPodInfo(t, st.MakePod().Priority(highPriority).Obj()),
|
||||
Timestamp: t1,
|
||||
},
|
||||
expected: false, // p2 should be ahead of p1 in the queue
|
||||
@ -87,3 +88,11 @@ func TestLess(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
|
||||
podInfo, err := framework.NewPodInfo(pod)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return podInfo
|
||||
}
|
||||
|
@ -1667,7 +1667,7 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) {
|
||||
podNominator := internalqueue.NewPodNominator(nil)
|
||||
if tt.nominatedPod != nil {
|
||||
podNominator.AddNominatedPod(
|
||||
framework.NewPodInfo(tt.nominatedPod),
|
||||
mustNewPodInfo(t, tt.nominatedPod),
|
||||
&framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: nodeName})
|
||||
}
|
||||
profile := config.KubeSchedulerProfile{Plugins: cfgPls}
|
||||
@ -2688,3 +2688,11 @@ func collectAndComparePermitWaitDuration(t *testing.T, wantRes string) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
|
||||
podInfo, err := framework.NewPodInfo(pod)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return podInfo
|
||||
}
|
||||
|
@ -58,22 +58,22 @@ var (
|
||||
lowPriority, midPriority, highPriority = int32(0), int32(100), int32(1000)
|
||||
mediumPriority = (lowPriority + highPriority) / 2
|
||||
|
||||
highPriorityPodInfo = framework.NewPodInfo(
|
||||
highPriorityPodInfo = mustNewPodInfo(
|
||||
st.MakePod().Name("hpp").Namespace("ns1").UID("hppns1").Priority(highPriority).Obj(),
|
||||
)
|
||||
highPriNominatedPodInfo = framework.NewPodInfo(
|
||||
highPriNominatedPodInfo = mustNewPodInfo(
|
||||
st.MakePod().Name("hpp").Namespace("ns1").UID("hppns1").Priority(highPriority).NominatedNodeName("node1").Obj(),
|
||||
)
|
||||
medPriorityPodInfo = framework.NewPodInfo(
|
||||
medPriorityPodInfo = mustNewPodInfo(
|
||||
st.MakePod().Name("mpp").Namespace("ns2").UID("mppns2").Annotation("annot2", "val2").Priority(mediumPriority).NominatedNodeName("node1").Obj(),
|
||||
)
|
||||
unschedulablePodInfo = framework.NewPodInfo(
|
||||
unschedulablePodInfo = mustNewPodInfo(
|
||||
st.MakePod().Name("up").Namespace("ns1").UID("upns1").Annotation("annot2", "val2").Priority(lowPriority).NominatedNodeName("node1").Condition(v1.PodScheduled, v1.ConditionFalse, v1.PodReasonUnschedulable).Obj(),
|
||||
)
|
||||
nonExistentPodInfo = framework.NewPodInfo(
|
||||
nonExistentPodInfo = mustNewPodInfo(
|
||||
st.MakePod().Name("ne").Namespace("ns1").UID("nens1").Obj(),
|
||||
)
|
||||
scheduledPodInfo = framework.NewPodInfo(
|
||||
scheduledPodInfo = mustNewPodInfo(
|
||||
st.MakePod().Name("sp").Namespace("ns1").UID("spns1").Node("foo").Obj(),
|
||||
)
|
||||
)
|
||||
@ -772,11 +772,11 @@ func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
|
||||
t.Errorf("add failed: %v", err)
|
||||
}
|
||||
// Update unschedulablePodInfo on a different node than specified in the pod.
|
||||
q.AddNominatedPod(framework.NewPodInfo(unschedulablePodInfo.Pod),
|
||||
q.AddNominatedPod(mustNewTestPodInfo(t, unschedulablePodInfo.Pod),
|
||||
&framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node5"})
|
||||
|
||||
// Update nominated node name of a pod on a node that is not specified in the pod object.
|
||||
q.AddNominatedPod(framework.NewPodInfo(highPriorityPodInfo.Pod),
|
||||
q.AddNominatedPod(mustNewTestPodInfo(t, highPriorityPodInfo.Pod),
|
||||
&framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node2"})
|
||||
expectedNominatedPods := &nominator{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
@ -829,7 +829,7 @@ func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
|
||||
// Nothing should change.
|
||||
scheduledPodCopy := scheduledPodInfo.Pod.DeepCopy()
|
||||
scheduledPodInfo.Pod.Spec.NodeName = ""
|
||||
q.AddNominatedPod(framework.NewPodInfo(scheduledPodCopy), &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node1"})
|
||||
q.AddNominatedPod(mustNewTestPodInfo(t, scheduledPodCopy), &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node1"})
|
||||
if diff := cmp.Diff(q.PodNominator, expectedNominatedPods, cmp.AllowUnexported(nominator{}), cmpopts.IgnoreFields(nominator{}, "podLister", "RWMutex")); diff != "" {
|
||||
t.Errorf("Unexpected diff after nominating a scheduled pod (-want, +got):\n%s", diff)
|
||||
}
|
||||
@ -895,35 +895,35 @@ func TestUnschedulablePodsMap(t *testing.T) {
|
||||
name: "create, update, delete subset of pods",
|
||||
podsToAdd: []*v1.Pod{pods[0], pods[1], pods[2], pods[3]},
|
||||
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, pods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, pods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToUpdate: []*v1.Pod{updatedPods[0]},
|
||||
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(updatedPods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, updatedPods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, pods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToDelete: []*v1.Pod{pods[0], pods[1]},
|
||||
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create, update, delete all",
|
||||
podsToAdd: []*v1.Pod{pods[0], pods[3]},
|
||||
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, pods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToUpdate: []*v1.Pod{updatedPods[3]},
|
||||
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(updatedPods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, pods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, updatedPods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToDelete: []*v1.Pod{pods[0], pods[3]},
|
||||
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{},
|
||||
@ -932,17 +932,17 @@ func TestUnschedulablePodsMap(t *testing.T) {
|
||||
name: "delete non-existing and existing pods",
|
||||
podsToAdd: []*v1.Pod{pods[1], pods[2]},
|
||||
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, pods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToUpdate: []*v1.Pod{updatedPods[1]},
|
||||
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(updatedPods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, updatedPods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToDelete: []*v1.Pod{pods[2], pods[3]},
|
||||
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(updatedPods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, updatedPods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -1210,11 +1210,11 @@ func TestPriorityQueue_initPodMaxInUnschedulablePodsDuration(t *testing.T) {
|
||||
|
||||
var timestamp = time.Now()
|
||||
pInfo1 := &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(pod1),
|
||||
PodInfo: mustNewTestPodInfo(t, pod1),
|
||||
Timestamp: timestamp.Add(-time.Second),
|
||||
}
|
||||
pInfo2 := &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(pod2),
|
||||
PodInfo: mustNewTestPodInfo(t, pod2),
|
||||
Timestamp: timestamp.Add(-2 * time.Second),
|
||||
}
|
||||
|
||||
@ -1342,11 +1342,11 @@ func TestPodTimestamp(t *testing.T) {
|
||||
|
||||
var timestamp = time.Now()
|
||||
pInfo1 := &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(pod1),
|
||||
PodInfo: mustNewTestPodInfo(t, pod1),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
pInfo2 := &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(pod2),
|
||||
PodInfo: mustNewTestPodInfo(t, pod2),
|
||||
Timestamp: timestamp.Add(time.Second),
|
||||
}
|
||||
|
||||
@ -1649,7 +1649,7 @@ func TestIncomingPodsMetrics(t *testing.T) {
|
||||
var pInfos = make([]*framework.QueuedPodInfo, 0, 3)
|
||||
for i := 1; i <= 3; i++ {
|
||||
p := &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(
|
||||
PodInfo: mustNewTestPodInfo(t,
|
||||
st.MakePod().Name(fmt.Sprintf("test-pod-%d", i)).Namespace(fmt.Sprintf("ns%d", i)).UID(fmt.Sprintf("tp-%d", i)).Obj()),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
@ -1984,7 +1984,7 @@ func makeQueuedPodInfos(num int, timestamp time.Time) []*framework.QueuedPodInfo
|
||||
var pInfos = make([]*framework.QueuedPodInfo, 0, num)
|
||||
for i := 1; i <= num; i++ {
|
||||
p := &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Name(fmt.Sprintf("test-pod-%d", i)).Namespace(fmt.Sprintf("ns%d", i)).UID(fmt.Sprintf("tp-%d", i)).Obj()),
|
||||
PodInfo: mustNewPodInfo(st.MakePod().Name(fmt.Sprintf("test-pod-%d", i)).Namespace(fmt.Sprintf("ns%d", i)).UID(fmt.Sprintf("tp-%d", i)).Obj()),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
pInfos = append(pInfos, p)
|
||||
@ -2033,3 +2033,19 @@ func TestPriorityQueue_calculateBackoffDuration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func mustNewTestPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
|
||||
podInfo, err := framework.NewPodInfo(pod)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return podInfo
|
||||
}
|
||||
|
||||
func mustNewPodInfo(pod *v1.Pod) *framework.PodInfo {
|
||||
podInfo, err := framework.NewPodInfo(pod)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return podInfo
|
||||
}
|
||||
|
@ -588,7 +588,7 @@ func TestSchedulerScheduleOne(t *testing.T) {
|
||||
Cache: cache,
|
||||
client: client,
|
||||
NextPod: func() *framework.QueuedPodInfo {
|
||||
return &framework.QueuedPodInfo{PodInfo: framework.NewPodInfo(item.sendPod)}
|
||||
return &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(t, item.sendPod)}
|
||||
},
|
||||
SchedulingQueue: internalqueue.NewTestQueue(ctx, nil),
|
||||
Profiles: profile.Map{testSchedulerName: fwk},
|
||||
@ -825,7 +825,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
||||
}
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objects...), 0)
|
||||
scheduler, _, errChan := setupTestScheduler(ctx, queuedPodStore, scache, informerFactory, nil, fns...)
|
||||
scheduler, _, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, informerFactory, nil, fns...)
|
||||
|
||||
queuedPodStore.Add(podWithTooBigResourceRequests)
|
||||
scheduler.scheduleOne(ctx)
|
||||
@ -946,7 +946,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
fakeVolumeBinder := volumebinding.NewFakeVolumeBinder(item.volumeBinderConfig)
|
||||
s, bindingChan, errChan := setupTestSchedulerWithVolumeBinding(ctx, fakeVolumeBinder, eventBroadcaster)
|
||||
s, bindingChan, errChan := setupTestSchedulerWithVolumeBinding(ctx, t, fakeVolumeBinder, eventBroadcaster)
|
||||
eventChan := make(chan struct{})
|
||||
stopFunc := eventBroadcaster.StartEventWatcher(func(obj runtime.Object) {
|
||||
e, _ := obj.(*eventsv1.Event)
|
||||
@ -2182,7 +2182,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
|
||||
if err := scheduler.Cache.UpdateSnapshot(scheduler.nodeInfoSnapshot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fwk.AddNominatedPod(framework.NewPodInfo(st.MakePod().UID("nominated").Priority(midPriority).Obj()),
|
||||
fwk.AddNominatedPod(mustNewPodInfo(t, st.MakePod().UID("nominated").Priority(midPriority).Obj()),
|
||||
&framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "1"})
|
||||
|
||||
_, _, err = scheduler.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), test.pod)
|
||||
@ -2590,7 +2590,7 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
|
||||
// cache: scheduler cache that might contain assumed pods.
|
||||
func setupTestSchedulerWithOnePodOnNode(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, scache internalcache.Cache,
|
||||
pod *v1.Pod, node *v1.Node, fns ...st.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
scheduler, bindingChan, errChan := setupTestScheduler(ctx, queuedPodStore, scache, nil, nil, fns...)
|
||||
scheduler, bindingChan, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, nil, nil, fns...)
|
||||
|
||||
queuedPodStore.Add(pod)
|
||||
// queuedPodStore: [foo:8080]
|
||||
@ -2617,7 +2617,7 @@ func setupTestSchedulerWithOnePodOnNode(ctx context.Context, t *testing.T, queue
|
||||
|
||||
// queuedPodStore: pods queued before processing.
|
||||
// scache: scheduler cache that might contain assumed pods.
|
||||
func setupTestScheduler(ctx context.Context, queuedPodStore *clientcache.FIFO, cache internalcache.Cache, informerFactory informers.SharedInformerFactory, broadcaster events.EventBroadcaster, fns ...st.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
func setupTestScheduler(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, cache internalcache.Cache, informerFactory informers.SharedInformerFactory, broadcaster events.EventBroadcaster, fns ...st.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
bindingChan := make(chan *v1.Binding, 1)
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
|
||||
@ -2658,7 +2658,7 @@ func setupTestScheduler(ctx context.Context, queuedPodStore *clientcache.FIFO, c
|
||||
nodeInfoSnapshot: internalcache.NewEmptySnapshot(),
|
||||
percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
NextPod: func() *framework.QueuedPodInfo {
|
||||
return &framework.QueuedPodInfo{PodInfo: framework.NewPodInfo(clientcache.Pop(queuedPodStore).(*v1.Pod))}
|
||||
return &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(t, clientcache.Pop(queuedPodStore).(*v1.Pod))}
|
||||
},
|
||||
SchedulingQueue: schedulingQueue,
|
||||
Profiles: profile.Map{testSchedulerName: fwk},
|
||||
@ -2674,7 +2674,7 @@ func setupTestScheduler(ctx context.Context, queuedPodStore *clientcache.FIFO, c
|
||||
return sched, bindingChan, errChan
|
||||
}
|
||||
|
||||
func setupTestSchedulerWithVolumeBinding(ctx context.Context, volumeBinder volumebinding.SchedulerVolumeBinder, broadcaster events.EventBroadcaster) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
func setupTestSchedulerWithVolumeBinding(ctx context.Context, t *testing.T, volumeBinder volumebinding.SchedulerVolumeBinder, broadcaster events.EventBroadcaster) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
pod := podWithID("foo", "")
|
||||
@ -2697,7 +2697,7 @@ func setupTestSchedulerWithVolumeBinding(ctx context.Context, volumeBinder volum
|
||||
return &volumebinding.VolumeBinding{Binder: volumeBinder, PVCLister: pvcInformer.Lister()}, nil
|
||||
}, "PreFilter", "Filter", "Reserve", "PreBind"),
|
||||
}
|
||||
s, bindingChan, errChan := setupTestScheduler(ctx, queuedPodStore, scache, informerFactory, broadcaster, fns...)
|
||||
s, bindingChan, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, informerFactory, broadcaster, fns...)
|
||||
return s, bindingChan, errChan
|
||||
}
|
||||
|
||||
@ -2708,3 +2708,11 @@ func makePredicateError(failReason string) error {
|
||||
s := fmt.Sprintf("0/1 nodes are available: %v.", failReason)
|
||||
return fmt.Errorf(s)
|
||||
}
|
||||
|
||||
func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
|
||||
podInfo, err := framework.NewPodInfo(pod)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return podInfo
|
||||
}
|
||||
|
@ -292,7 +292,7 @@ func TestFailureHandler(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testPodInfo := &framework.QueuedPodInfo{PodInfo: framework.NewPodInfo(testPod)}
|
||||
testPodInfo := &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(t, testPod)}
|
||||
s.FailureHandler(ctx, fwk, testPodInfo, tt.injectErr, v1.PodReasonUnschedulable, nil)
|
||||
|
||||
var got *v1.Pod
|
||||
@ -367,7 +367,7 @@ func TestFailureHandler_NodeNotFound(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testPodInfo := &framework.QueuedPodInfo{PodInfo: framework.NewPodInfo(testPod)}
|
||||
testPodInfo := &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(t, testPod)}
|
||||
s.FailureHandler(ctx, fwk, testPodInfo, tt.injectErr, v1.PodReasonUnschedulable, nil)
|
||||
|
||||
gotNodes := schedulerCache.Dump().Nodes
|
||||
@ -406,7 +406,7 @@ func TestFailureHandler_PodAlreadyBound(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testPodInfo := &framework.QueuedPodInfo{PodInfo: framework.NewPodInfo(testPod)}
|
||||
testPodInfo := &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(t, testPod)}
|
||||
s.FailureHandler(ctx, fwk, testPodInfo, fmt.Errorf("binding rejected: timeout"), v1.PodReasonUnschedulable, nil)
|
||||
|
||||
pod := getPodFromPriorityQueue(queue, testPod)
|
||||
|
Loading…
Reference in New Issue
Block a user