mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-08 11:38:15 +00:00
Merge pull request #118312 from mengjiao-liu/improve-scheduler-cache-test
scheduler: add test name and remove redundant test tables to improve cache_test.go
This commit is contained in:
commit
3a9c639d5a
573
pkg/scheduler/internal/cache/cache_test.go
vendored
573
pkg/scheduler/internal/cache/cache_test.go
vendored
@ -100,20 +100,22 @@ func newNodeInfo(requestedResource *framework.Resource,
|
||||
func TestAssumePodScheduled(t *testing.T) {
|
||||
nodeName := "node"
|
||||
testPods := []*v1.Pod{
|
||||
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-nonzero", "", "", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test", "100m", "500", "example.com/foo:3", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "example.com/foo:5", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test", "100m", "500", "random-invalid-extended-key:100", []v1.ContainerPort{{}}),
|
||||
makeBasePod(t, nodeName, "test-resource-request-and-port-0", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-resource-request-and-port-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-resource-request-and-port-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-nonzero-request", "", "", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-extended-resource-1", "100m", "500", "example.com/foo:3", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-extended-resource-2", "200m", "1Ki", "example.com/foo:5", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-extended-key", "100m", "500", "random-invalid-extended-key:100", []v1.ContainerPort{{}}),
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pods []*v1.Pod
|
||||
|
||||
wNodeInfo *framework.NodeInfo
|
||||
}{{
|
||||
name: "assumed one pod with resource request and used ports",
|
||||
pods: []*v1.Pod{testPods[0]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&framework.Resource{
|
||||
@ -129,6 +131,7 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
name: "node requested resource are equal to the sum of the assumed pods requested resource, node contains host ports defined by pods",
|
||||
pods: []*v1.Pod{testPods[1], testPods[2]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&framework.Resource{
|
||||
@ -144,6 +147,7 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
}, { // test non-zero request
|
||||
name: "assumed pod without resource request",
|
||||
pods: []*v1.Pod{testPods[3]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&framework.Resource{
|
||||
@ -159,6 +163,7 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
name: "assumed one pod with extended resource",
|
||||
pods: []*v1.Pod{testPods[4]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&framework.Resource{
|
||||
@ -175,6 +180,7 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
name: "assumed two pods with extended resources",
|
||||
pods: []*v1.Pod{testPods[4], testPods[5]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&framework.Resource{
|
||||
@ -191,6 +197,7 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
name: "assumed pod with random invalid extended resource key",
|
||||
pods: []*v1.Pod{testPods[6]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&framework.Resource{
|
||||
@ -208,13 +215,13 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, time.Second, time.Second)
|
||||
for _, pod := range tt.pods {
|
||||
for _, pod := range tc.pods {
|
||||
if err := cache.AssumePod(logger, pod); err != nil {
|
||||
t.Fatalf("AssumePod failed: %v", err)
|
||||
}
|
||||
@ -224,11 +231,11 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
}
|
||||
}
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, tt.wNodeInfo); err != nil {
|
||||
if err := deepEqualWithoutGeneration(n, tc.wNodeInfo); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
for _, pod := range tt.pods {
|
||||
for _, pod := range tc.pods {
|
||||
if err := cache.ForgetPod(logger, pod); err != nil {
|
||||
t.Fatalf("ForgetPod failed: %v", err)
|
||||
}
|
||||
@ -367,12 +374,12 @@ func TestAddPodWillConfirm(t *testing.T) {
|
||||
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
}
|
||||
tests := []struct {
|
||||
test := struct {
|
||||
podsToAssume []*v1.Pod
|
||||
podsToAdd []*v1.Pod
|
||||
|
||||
wNodeInfo *framework.NodeInfo
|
||||
}{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
|
||||
}{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
|
||||
podsToAssume: []*v1.Pod{testPods[0], testPods[1]},
|
||||
podsToAdd: []*v1.Pod{testPods[0]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
@ -388,35 +395,31 @@ func TestAddPodWillConfirm(t *testing.T) {
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
}}
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAssume := range tt.podsToAssume {
|
||||
if err := assumeAndFinishBinding(logger, cache, podToAssume, now); err != nil {
|
||||
t.Fatalf("assumePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for _, podToAdd := range tt.podsToAdd {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
// pod already in added state
|
||||
if err := cache.AddPod(logger, podToAdd); err == nil {
|
||||
t.Error("expected error, no error found")
|
||||
}
|
||||
}
|
||||
cache.cleanupAssumedPods(logger, now.Add(2*ttl))
|
||||
// check after expiration. confirmed pods shouldn't be expired.
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, tt.wNodeInfo); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAssume := range test.podsToAssume {
|
||||
if err := assumeAndFinishBinding(logger, cache, podToAssume, now); err != nil {
|
||||
t.Fatalf("assumePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for _, podToAdd := range test.podsToAdd {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
// pod already in added state
|
||||
if err := cache.AddPod(logger, podToAdd); err == nil {
|
||||
t.Error("expected error, no error found")
|
||||
}
|
||||
}
|
||||
cache.cleanupAssumedPods(logger, now.Add(2*ttl))
|
||||
// check after expiration. confirmed pods shouldn't be expired.
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, test.wNodeInfo); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -429,46 +432,43 @@ func TestDump(t *testing.T) {
|
||||
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
}
|
||||
tests := []struct {
|
||||
test := struct {
|
||||
podsToAssume []*v1.Pod
|
||||
podsToAdd []*v1.Pod
|
||||
}{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
|
||||
}{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
|
||||
podsToAssume: []*v1.Pod{testPods[0], testPods[1]},
|
||||
podsToAdd: []*v1.Pod{testPods[0]},
|
||||
}}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAssume := range tt.podsToAssume {
|
||||
if err := assumeAndFinishBinding(logger, cache, podToAssume, now); err != nil {
|
||||
t.Errorf("assumePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for _, podToAdd := range tt.podsToAdd {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Errorf("AddPod failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
snapshot := cache.Dump()
|
||||
if len(snapshot.Nodes) != len(cache.nodes) {
|
||||
t.Errorf("Unequal number of nodes in the cache and its snapshot. expected: %v, got: %v", len(cache.nodes), len(snapshot.Nodes))
|
||||
}
|
||||
for name, ni := range snapshot.Nodes {
|
||||
nItem := cache.nodes[name]
|
||||
if !reflect.DeepEqual(ni, nItem.info) {
|
||||
t.Errorf("expect \n%+v; got \n%+v", nItem.info, ni)
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(snapshot.AssumedPods, cache.assumedPods) {
|
||||
t.Errorf("expect \n%+v; got \n%+v", cache.assumedPods, snapshot.AssumedPods)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAssume := range test.podsToAssume {
|
||||
if err := assumeAndFinishBinding(logger, cache, podToAssume, now); err != nil {
|
||||
t.Errorf("assumePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for _, podToAdd := range test.podsToAdd {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Errorf("AddPod failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
snapshot := cache.Dump()
|
||||
if len(snapshot.Nodes) != len(cache.nodes) {
|
||||
t.Errorf("Unequal number of nodes in the cache and its snapshot. expected: %v, got: %v", len(cache.nodes), len(snapshot.Nodes))
|
||||
}
|
||||
for name, ni := range snapshot.Nodes {
|
||||
nItem := cache.nodes[name]
|
||||
if !reflect.DeepEqual(ni, nItem.info) {
|
||||
t.Errorf("expect \n%+v; got \n%+v", nItem.info, ni)
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(snapshot.AssumedPods, cache.assumedPods) {
|
||||
t.Errorf("expect \n%+v; got \n%+v", cache.assumedPods, snapshot.AssumedPods)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestAddPodAlwaysUpdatePodInfoInNodeInfo tests that AddPod method always updates PodInfo in NodeInfo,
|
||||
@ -487,52 +487,46 @@ func TestAddPodAlwaysUpdatesPodInfoInNodeInfo(t *testing.T) {
|
||||
Status: v1.ConditionTrue,
|
||||
})
|
||||
|
||||
tests := []struct {
|
||||
test := struct {
|
||||
podsToAssume []*v1.Pod
|
||||
podsToAddAfterAssume []*v1.Pod
|
||||
nodeInfo map[string]*framework.NodeInfo
|
||||
}{
|
||||
{
|
||||
podsToAssume: []*v1.Pod{p1},
|
||||
podsToAddAfterAssume: []*v1.Pod{p2},
|
||||
nodeInfo: map[string]*framework.NodeInfo{
|
||||
"node1": newNodeInfo(
|
||||
&framework.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
&framework.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
[]*v1.Pod{p2},
|
||||
newHostPortInfoBuilder().add("TCP", "0.0.0.0", 80).build(),
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
},
|
||||
podsToAssume: []*v1.Pod{p1},
|
||||
podsToAddAfterAssume: []*v1.Pod{p2},
|
||||
nodeInfo: map[string]*framework.NodeInfo{
|
||||
"node1": newNodeInfo(
|
||||
&framework.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
&framework.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
[]*v1.Pod{p2},
|
||||
newHostPortInfoBuilder().add("TCP", "0.0.0.0", 80).build(),
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAssume := range tt.podsToAssume {
|
||||
if err := assumeAndFinishBinding(logger, cache, podToAssume, now); err != nil {
|
||||
t.Fatalf("assumePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for _, podToAdd := range tt.podsToAddAfterAssume {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for nodeName, expected := range tt.nodeInfo {
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, expected); err != nil {
|
||||
t.Errorf("node %q: %v", nodeName, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAssume := range test.podsToAssume {
|
||||
if err := assumeAndFinishBinding(logger, cache, podToAssume, now); err != nil {
|
||||
t.Fatalf("assumePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for _, podToAdd := range test.podsToAddAfterAssume {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for nodeName, expected := range test.nodeInfo {
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, expected); err != nil {
|
||||
t.Errorf("node %q: %v", nodeName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -545,13 +539,13 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
|
||||
addedPod := makeBasePod(t, "actual-node", "test-1", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}})
|
||||
updatedPod := makeBasePod(t, "actual-node", "test-1", "200m", "500", "", []v1.ContainerPort{{HostPort: 90}})
|
||||
|
||||
tests := []struct {
|
||||
test := struct {
|
||||
podsToAssume []*v1.Pod
|
||||
podsToAdd []*v1.Pod
|
||||
podsToUpdate [][]*v1.Pod
|
||||
|
||||
wNodeInfo map[string]*framework.NodeInfo
|
||||
}{{
|
||||
}{
|
||||
podsToAssume: []*v1.Pod{assumedPod.DeepCopy()},
|
||||
podsToAdd: []*v1.Pod{addedPod.DeepCopy()},
|
||||
podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}},
|
||||
@ -571,36 +565,32 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAssume := range tt.podsToAssume {
|
||||
if err := assumeAndFinishBinding(logger, cache, podToAssume, now); err != nil {
|
||||
t.Fatalf("assumePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for _, podToAdd := range tt.podsToAdd {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for _, podToUpdate := range tt.podsToUpdate {
|
||||
if err := cache.UpdatePod(logger, podToUpdate[0], podToUpdate[1]); err != nil {
|
||||
t.Fatalf("UpdatePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for nodeName, expected := range tt.wNodeInfo {
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, expected); err != nil {
|
||||
t.Errorf("node %q: %v", nodeName, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAssume := range test.podsToAssume {
|
||||
if err := assumeAndFinishBinding(logger, cache, podToAssume, now); err != nil {
|
||||
t.Fatalf("assumePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for _, podToAdd := range test.podsToAdd {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for _, podToUpdate := range test.podsToUpdate {
|
||||
if err := cache.UpdatePod(logger, podToUpdate[0], podToUpdate[1]); err != nil {
|
||||
t.Fatalf("UpdatePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for nodeName, expected := range test.wNodeInfo {
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, expected); err != nil {
|
||||
t.Errorf("node %q: %v", nodeName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -609,11 +599,10 @@ func TestAddPodAfterExpiration(t *testing.T) {
|
||||
nodeName := "node"
|
||||
ttl := 10 * time.Second
|
||||
basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}})
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
|
||||
test := struct {
|
||||
pod *v1.Pod
|
||||
wNodeInfo *framework.NodeInfo
|
||||
}{{
|
||||
}{
|
||||
pod: basePod,
|
||||
wNodeInfo: newNodeInfo(
|
||||
&framework.Resource{
|
||||
@ -628,32 +617,28 @@ func TestAddPodAfterExpiration(t *testing.T) {
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
}}
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
now := time.Now()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
if err := assumeAndFinishBinding(logger, cache, tt.pod, now); err != nil {
|
||||
t.Fatalf("assumePod failed: %v", err)
|
||||
}
|
||||
cache.cleanupAssumedPods(logger, now.Add(2*ttl))
|
||||
// It should be expired and removed.
|
||||
if err := isForgottenFromCache(tt.pod, cache); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := cache.AddPod(logger, tt.pod); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
// check after expiration. confirmed pods shouldn't be expired.
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, tt.wNodeInfo); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
now := time.Now()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
if err := assumeAndFinishBinding(logger, cache, test.pod, now); err != nil {
|
||||
t.Fatalf("assumePod failed: %v", err)
|
||||
}
|
||||
cache.cleanupAssumedPods(logger, now.Add(2*ttl))
|
||||
// It should be expired and removed.
|
||||
if err := isForgottenFromCache(test.pod, cache); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := cache.AddPod(logger, test.pod); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
// check after expiration. confirmed pods shouldn't be expired.
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, test.wNodeInfo); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -665,12 +650,12 @@ func TestUpdatePod(t *testing.T) {
|
||||
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
}
|
||||
tests := []struct {
|
||||
test := struct {
|
||||
podsToAdd []*v1.Pod
|
||||
podsToUpdate []*v1.Pod
|
||||
|
||||
wNodeInfo []*framework.NodeInfo
|
||||
}{{ // add a pod and then update it twice
|
||||
}{ // add a pod and then update it twice
|
||||
podsToAdd: []*v1.Pod{testPods[0]},
|
||||
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
|
||||
wNodeInfo: []*framework.NodeInfo{newNodeInfo(
|
||||
@ -698,34 +683,30 @@ func TestUpdatePod(t *testing.T) {
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
)},
|
||||
}}
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAdd := range tt.podsToAdd {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
}
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAdd := range test.podsToAdd {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for j := range tt.podsToUpdate {
|
||||
if j == 0 {
|
||||
continue
|
||||
}
|
||||
if err := cache.UpdatePod(logger, tt.podsToUpdate[j-1], tt.podsToUpdate[j]); err != nil {
|
||||
t.Fatalf("UpdatePod failed: %v", err)
|
||||
}
|
||||
// check after expiration. confirmed pods shouldn't be expired.
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, tt.wNodeInfo[j-1]); err != nil {
|
||||
t.Errorf("update %d: %v", j, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
for j := range test.podsToUpdate {
|
||||
if j == 0 {
|
||||
continue
|
||||
}
|
||||
if err := cache.UpdatePod(logger, test.podsToUpdate[j-1], test.podsToUpdate[j]); err != nil {
|
||||
t.Fatalf("UpdatePod failed: %v", err)
|
||||
}
|
||||
// check after expiration. confirmed pods shouldn't be expired.
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, test.wNodeInfo[j-1]); err != nil {
|
||||
t.Errorf("update %d: %v", j, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -738,16 +719,15 @@ func TestUpdatePodAndGet(t *testing.T) {
|
||||
makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
}
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
|
||||
name string
|
||||
pod *v1.Pod
|
||||
podToUpdate *v1.Pod
|
||||
handler func(logger klog.Logger, cache Cache, pod *v1.Pod) error
|
||||
|
||||
assumePod bool
|
||||
assumePod bool
|
||||
}{
|
||||
{
|
||||
pod: testPods[0],
|
||||
|
||||
name: "do not update pod when pod information has not changed",
|
||||
pod: testPods[0],
|
||||
podToUpdate: testPods[0],
|
||||
handler: func(logger klog.Logger, cache Cache, pod *v1.Pod) error {
|
||||
return cache.AssumePod(logger, pod)
|
||||
@ -755,8 +735,8 @@ func TestUpdatePodAndGet(t *testing.T) {
|
||||
assumePod: true,
|
||||
},
|
||||
{
|
||||
pod: testPods[0],
|
||||
|
||||
name: "update pod when pod information changed",
|
||||
pod: testPods[0],
|
||||
podToUpdate: testPods[1],
|
||||
handler: func(logger klog.Logger, cache Cache, pod *v1.Pod) error {
|
||||
return cache.AddPod(logger, pod)
|
||||
@ -765,40 +745,40 @@ func TestUpdatePodAndGet(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
// trying to get an unknown pod should return an error
|
||||
// podToUpdate has not been added yet
|
||||
if _, err := cache.GetPod(tt.podToUpdate); err == nil {
|
||||
if _, err := cache.GetPod(tc.podToUpdate); err == nil {
|
||||
t.Error("expected error, no error found")
|
||||
}
|
||||
|
||||
// trying to update an unknown pod should return an error
|
||||
// pod has not been added yet
|
||||
if err := cache.UpdatePod(logger, tt.pod, tt.podToUpdate); err == nil {
|
||||
if err := cache.UpdatePod(logger, tc.pod, tc.podToUpdate); err == nil {
|
||||
t.Error("expected error, no error found")
|
||||
}
|
||||
|
||||
if err := tt.handler(logger, cache, tt.pod); err != nil {
|
||||
if err := tc.handler(logger, cache, tc.pod); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
|
||||
if !tt.assumePod {
|
||||
if err := cache.UpdatePod(logger, tt.pod, tt.podToUpdate); err != nil {
|
||||
if !tc.assumePod {
|
||||
if err := cache.UpdatePod(logger, tc.pod, tc.podToUpdate); err != nil {
|
||||
t.Fatalf("UpdatePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cachedPod, err := cache.GetPod(tt.pod)
|
||||
cachedPod, err := cache.GetPod(tc.pod)
|
||||
if err != nil {
|
||||
t.Fatalf("GetPod failed: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.podToUpdate, cachedPod) {
|
||||
t.Fatalf("pod get=%s, want=%s", cachedPod, tt.podToUpdate)
|
||||
if !reflect.DeepEqual(tc.podToUpdate, cachedPod) {
|
||||
t.Fatalf("pod get=%s, want=%s", cachedPod, tc.podToUpdate)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -812,13 +792,13 @@ func TestExpireAddUpdatePod(t *testing.T) {
|
||||
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
}
|
||||
tests := []struct {
|
||||
test := struct {
|
||||
podsToAssume []*v1.Pod
|
||||
podsToAdd []*v1.Pod
|
||||
podsToUpdate []*v1.Pod
|
||||
|
||||
wNodeInfo []*framework.NodeInfo
|
||||
}{{ // Pod is assumed, expired, and added. Then it would be updated twice.
|
||||
}{ // Pod is assumed, expired, and added. Then it would be updated twice.
|
||||
podsToAssume: []*v1.Pod{testPods[0]},
|
||||
podsToAdd: []*v1.Pod{testPods[0]},
|
||||
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
|
||||
@ -847,42 +827,38 @@ func TestExpireAddUpdatePod(t *testing.T) {
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
)},
|
||||
}}
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
now := time.Now()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAssume := range tt.podsToAssume {
|
||||
if err := assumeAndFinishBinding(logger, cache, podToAssume, now); err != nil {
|
||||
t.Fatalf("assumePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
cache.cleanupAssumedPods(logger, now.Add(2*ttl))
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
now := time.Now()
|
||||
cache := newCache(ctx, ttl, time.Second)
|
||||
for _, podToAssume := range test.podsToAssume {
|
||||
if err := assumeAndFinishBinding(logger, cache, podToAssume, now); err != nil {
|
||||
t.Fatalf("assumePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
cache.cleanupAssumedPods(logger, now.Add(2*ttl))
|
||||
|
||||
for _, podToAdd := range tt.podsToAdd {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
}
|
||||
for _, podToAdd := range test.podsToAdd {
|
||||
if err := cache.AddPod(logger, podToAdd); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for j := range tt.podsToUpdate {
|
||||
if j == 0 {
|
||||
continue
|
||||
}
|
||||
if err := cache.UpdatePod(logger, tt.podsToUpdate[j-1], tt.podsToUpdate[j]); err != nil {
|
||||
t.Fatalf("UpdatePod failed: %v", err)
|
||||
}
|
||||
// check after expiration. confirmed pods shouldn't be expired.
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, tt.wNodeInfo[j-1]); err != nil {
|
||||
t.Errorf("update %d: %v", j, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
for j := range test.podsToUpdate {
|
||||
if j == 0 {
|
||||
continue
|
||||
}
|
||||
if err := cache.UpdatePod(logger, test.podsToUpdate[j-1], test.podsToUpdate[j]); err != nil {
|
||||
t.Fatalf("UpdatePod failed: %v", err)
|
||||
}
|
||||
// check after expiration. confirmed pods shouldn't be expired.
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, test.wNodeInfo[j-1]); err != nil {
|
||||
t.Errorf("update %d: %v", j, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -897,47 +873,41 @@ func makePodWithEphemeralStorage(nodeName, ephemeralStorage string) *v1.Pod {
|
||||
func TestEphemeralStorageResource(t *testing.T) {
|
||||
nodeName := "node"
|
||||
podE := makePodWithEphemeralStorage(nodeName, "500")
|
||||
tests := []struct {
|
||||
test := struct {
|
||||
pod *v1.Pod
|
||||
wNodeInfo *framework.NodeInfo
|
||||
}{
|
||||
{
|
||||
pod: podE,
|
||||
wNodeInfo: newNodeInfo(
|
||||
&framework.Resource{
|
||||
EphemeralStorage: 500,
|
||||
},
|
||||
&framework.Resource{
|
||||
MilliCPU: schedutil.DefaultMilliCPURequest,
|
||||
Memory: schedutil.DefaultMemoryRequest,
|
||||
},
|
||||
[]*v1.Pod{podE},
|
||||
framework.HostPortInfo{},
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
},
|
||||
pod: podE,
|
||||
wNodeInfo: newNodeInfo(
|
||||
&framework.Resource{
|
||||
EphemeralStorage: 500,
|
||||
},
|
||||
&framework.Resource{
|
||||
MilliCPU: schedutil.DefaultMilliCPURequest,
|
||||
Memory: schedutil.DefaultMemoryRequest,
|
||||
},
|
||||
[]*v1.Pod{podE},
|
||||
framework.HostPortInfo{},
|
||||
make(map[string]*framework.ImageStateSummary),
|
||||
),
|
||||
}
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, time.Second, time.Second)
|
||||
if err := cache.AddPod(logger, test.pod); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, test.wNodeInfo); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := newCache(ctx, time.Second, time.Second)
|
||||
if err := cache.AddPod(logger, tt.pod); err != nil {
|
||||
t.Fatalf("AddPod failed: %v", err)
|
||||
}
|
||||
n := cache.nodes[nodeName]
|
||||
if err := deepEqualWithoutGeneration(n, tt.wNodeInfo); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := cache.RemovePod(logger, tt.pod); err != nil {
|
||||
t.Fatalf("RemovePod failed: %v", err)
|
||||
}
|
||||
if _, err := cache.GetPod(tt.pod); err == nil {
|
||||
t.Errorf("pod was not deleted")
|
||||
}
|
||||
})
|
||||
if err := cache.RemovePod(logger, test.pod); err != nil {
|
||||
t.Fatalf("RemovePod failed: %v", err)
|
||||
}
|
||||
if _, err := cache.GetPod(test.pod); err == nil {
|
||||
t.Errorf("pod was not deleted")
|
||||
}
|
||||
}
|
||||
|
||||
@ -1090,10 +1060,12 @@ func TestNodeOperators(t *testing.T) {
|
||||
resourceFoo := resource.MustParse("1")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
node *v1.Node
|
||||
pods []*v1.Pod
|
||||
}{
|
||||
{
|
||||
name: "operate the node with one pod",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
@ -1145,6 +1117,7 @@ func TestNodeOperators(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "operate the node with two pods",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
@ -1209,17 +1182,17 @@ func TestNodeOperators(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
expected := buildNodeInfo(test.node, test.pods)
|
||||
node := test.node
|
||||
expected := buildNodeInfo(tc.node, tc.pods)
|
||||
node := tc.node
|
||||
|
||||
cache := newCache(ctx, time.Second, time.Second)
|
||||
cache.AddNode(logger, node)
|
||||
for _, pod := range test.pods {
|
||||
for _, pod := range tc.pods {
|
||||
if err := cache.AddPod(logger, pod); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1308,14 +1281,14 @@ func TestNodeOperators(t *testing.T) {
|
||||
t.Errorf("unexpected cache.nodeTree after removing node: %v", node.Name)
|
||||
}
|
||||
// Pods are still in the pods cache.
|
||||
for _, p := range test.pods {
|
||||
for _, p := range tc.pods {
|
||||
if _, err := cache.GetPod(p); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 5: removing pods for the removed node still succeeds.
|
||||
for _, p := range test.pods {
|
||||
for _, p := range tc.pods {
|
||||
if err := cache.RemovePod(logger, p); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user