mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #100128 from ingvagabund/sig-scheduling-single-node-e2e
[sig-scheduling] SchedulerPreemption|SchedulerPredicates|SchedulerPriorities: adjust some e2e tests to run in a single node cluster scenario
This commit is contained in:
commit
2147937c41
@ -718,6 +718,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
topologyKey := "kubernetes.io/e2e-pts-filter"
|
topologyKey := "kubernetes.io/e2e-pts-filter"
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
|
if len(nodeList.Items) < 2 {
|
||||||
|
ginkgo.Skip("At least 2 nodes are required to run the test")
|
||||||
|
}
|
||||||
ginkgo.By("Trying to get 2 available nodes which can run pod")
|
ginkgo.By("Trying to get 2 available nodes which can run pod")
|
||||||
nodeNames = Get2NodesThatCanRunPod(f)
|
nodeNames = Get2NodesThatCanRunPod(f)
|
||||||
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
|
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
|
||||||
|
@ -121,17 +121,19 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
framework.ConformanceIt("validates basic preemption works", func() {
|
framework.ConformanceIt("validates basic preemption works", func() {
|
||||||
var podRes v1.ResourceList
|
var podRes v1.ResourceList
|
||||||
|
|
||||||
// Create one pod per node that uses a lot of the node's resources.
|
// Create two pods per node that uses a lot of the node's resources.
|
||||||
ginkgo.By("Create pods that use 2/3 of node resources.")
|
ginkgo.By("Create pods that use 4/5 of node resources.")
|
||||||
pods := make([]*v1.Pod, 0, len(nodeList.Items))
|
pods := make([]*v1.Pod, 0, 2*len(nodeList.Items))
|
||||||
// Now create victim pods on each of the node with lower priority
|
// Create pods in the cluster.
|
||||||
|
// One of them has low priority, making it the victim for preemption.
|
||||||
for i, node := range nodeList.Items {
|
for i, node := range nodeList.Items {
|
||||||
// Update each node to advertise 3 available extended resources
|
// Update each node to advertise 3 available extended resources
|
||||||
nodeCopy := node.DeepCopy()
|
nodeCopy := node.DeepCopy()
|
||||||
nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("3")
|
nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("5")
|
||||||
err := patchNode(cs, &node, nodeCopy)
|
err := patchNode(cs, &node, nodeCopy)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
for j := 0; j < 2; j++ {
|
||||||
// Request 2 of the available resources for the victim pods
|
// Request 2 of the available resources for the victim pods
|
||||||
podRes = v1.ResourceList{}
|
podRes = v1.ResourceList{}
|
||||||
podRes[testExtendedResource] = resource.MustParse("2")
|
podRes[testExtendedResource] = resource.MustParse("2")
|
||||||
@ -141,8 +143,8 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
if len(pods) == 0 {
|
if len(pods) == 0 {
|
||||||
priorityName = lowPriorityClassName
|
priorityName = lowPriorityClassName
|
||||||
}
|
}
|
||||||
pods = append(pods, createPausePod(f, pausePodConfig{
|
pausePod := createPausePod(f, pausePodConfig{
|
||||||
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
|
Name: fmt.Sprintf("pod%d-%d-%v", i, j, priorityName),
|
||||||
PriorityClassName: priorityName,
|
PriorityClassName: priorityName,
|
||||||
Resources: &v1.ResourceRequirements{
|
Resources: &v1.ResourceRequirements{
|
||||||
Requests: podRes,
|
Requests: podRes,
|
||||||
@ -161,8 +163,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}))
|
})
|
||||||
framework.Logf("Created pod: %v", pods[i].Name)
|
pods = append(pods, pausePod)
|
||||||
|
framework.Logf("Created pod: %v", pausePod.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if len(pods) < 2 {
|
if len(pods) < 2 {
|
||||||
framework.Failf("We need at least two pods to be created but " +
|
framework.Failf("We need at least two pods to be created but " +
|
||||||
@ -209,15 +213,16 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
framework.ConformanceIt("validates lower priority pod preemption by critical pod", func() {
|
framework.ConformanceIt("validates lower priority pod preemption by critical pod", func() {
|
||||||
var podRes v1.ResourceList
|
var podRes v1.ResourceList
|
||||||
|
|
||||||
ginkgo.By("Create pods that use 2/3 of node resources.")
|
ginkgo.By("Create pods that use 4/5 of node resources.")
|
||||||
pods := make([]*v1.Pod, 0, len(nodeList.Items))
|
pods := make([]*v1.Pod, 0, len(nodeList.Items))
|
||||||
for i, node := range nodeList.Items {
|
for i, node := range nodeList.Items {
|
||||||
// Update each node to advertise 3 available extended resources
|
// Update each node to advertise 3 available extended resources
|
||||||
nodeCopy := node.DeepCopy()
|
nodeCopy := node.DeepCopy()
|
||||||
nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("3")
|
nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("5")
|
||||||
err := patchNode(cs, &node, nodeCopy)
|
err := patchNode(cs, &node, nodeCopy)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
for j := 0; j < 2; j++ {
|
||||||
// Request 2 of the available resources for the victim pods
|
// Request 2 of the available resources for the victim pods
|
||||||
podRes = v1.ResourceList{}
|
podRes = v1.ResourceList{}
|
||||||
podRes[testExtendedResource] = resource.MustParse("2")
|
podRes[testExtendedResource] = resource.MustParse("2")
|
||||||
@ -227,8 +232,8 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
if len(pods) == 0 {
|
if len(pods) == 0 {
|
||||||
priorityName = lowPriorityClassName
|
priorityName = lowPriorityClassName
|
||||||
}
|
}
|
||||||
pods = append(pods, createPausePod(f, pausePodConfig{
|
pausePod := createPausePod(f, pausePodConfig{
|
||||||
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
|
Name: fmt.Sprintf("pod%d-%d-%v", i, j, priorityName),
|
||||||
PriorityClassName: priorityName,
|
PriorityClassName: priorityName,
|
||||||
Resources: &v1.ResourceRequirements{
|
Resources: &v1.ResourceRequirements{
|
||||||
Requests: podRes,
|
Requests: podRes,
|
||||||
@ -247,8 +252,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}))
|
})
|
||||||
framework.Logf("Created pod: %v", pods[i].Name)
|
pods = append(pods, pausePod)
|
||||||
|
framework.Logf("Created pod: %v", pausePod.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if len(pods) < 2 {
|
if len(pods) < 2 {
|
||||||
framework.Failf("We need at least two pods to be created but " +
|
framework.Failf("We need at least two pods to be created but " +
|
||||||
@ -306,6 +313,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
var fakeRes v1.ResourceName = "example.com/fakePTSRes"
|
var fakeRes v1.ResourceName = "example.com/fakePTSRes"
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
|
if len(nodeList.Items) < 2 {
|
||||||
|
ginkgo.Skip("At least 2 nodes are required to run the test")
|
||||||
|
}
|
||||||
ginkgo.By("Trying to get 2 available nodes which can run pod")
|
ginkgo.By("Trying to get 2 available nodes which can run pod")
|
||||||
nodeNames = Get2NodesThatCanRunPod(f)
|
nodeNames = Get2NodesThatCanRunPod(f)
|
||||||
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
|
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
|
||||||
|
@ -388,6 +388,9 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
|||||||
topologyKey := "kubernetes.io/e2e-pts-score"
|
topologyKey := "kubernetes.io/e2e-pts-score"
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
|
if len(nodeList.Items) < 2 {
|
||||||
|
ginkgo.Skip("At least 2 nodes are required to run the test")
|
||||||
|
}
|
||||||
ginkgo.By("Trying to get 2 available nodes which can run pod")
|
ginkgo.By("Trying to get 2 available nodes which can run pod")
|
||||||
nodeNames = Get2NodesThatCanRunPod(f)
|
nodeNames = Get2NodesThatCanRunPod(f)
|
||||||
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
|
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
|
||||||
|
Loading…
Reference in New Issue
Block a user