e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -81,33 +81,33 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
{name: highPriorityClassName, value: highPriority},
}
ginkgo.AfterEach(func() {
ginkgo.AfterEach(func(ctx context.Context) {
for _, pair := range priorityPairs {
cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, *metav1.NewDeleteOptions(0))
_ = cs.SchedulingV1().PriorityClasses().Delete(ctx, pair.name, *metav1.NewDeleteOptions(0))
}
for _, node := range nodeList.Items {
nodeCopy := node.DeepCopy()
delete(nodeCopy.Status.Capacity, testExtendedResource)
delete(nodeCopy.Status.Allocatable, testExtendedResource)
err := patchNode(cs, &node, nodeCopy)
err := patchNode(ctx, cs, &node, nodeCopy)
framework.ExpectNoError(err)
}
})
ginkgo.BeforeEach(func() {
ginkgo.BeforeEach(func(ctx context.Context) {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
var err error
for _, pair := range priorityPairs {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}, metav1.CreateOptions{})
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.Failf("expected 'alreadyExists' as error, got instead: %v", err)
}
}
e2enode.WaitForTotalHealthy(cs, time.Minute)
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
e2enode.WaitForTotalHealthy(ctx, cs, time.Minute)
nodeList, err = e2enode.GetReadySchedulableNodes(ctx, cs)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}
@@ -116,7 +116,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
workerNodes.Insert(n.Name)
}
err = framework.CheckTestingNSDeletedExcept(cs, ns)
err = framework.CheckTestingNSDeletedExcept(ctx, cs, ns)
framework.ExpectNoError(err)
})
@@ -140,7 +140,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
nodeCopy := node.DeepCopy()
nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("5")
nodeCopy.Status.Allocatable[testExtendedResource] = resource.MustParse("5")
err := patchNode(cs, &node, nodeCopy)
err := patchNode(ctx, cs, &node, nodeCopy)
framework.ExpectNoError(err)
for j := 0; j < 2; j++ {
@@ -153,7 +153,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
if len(pods) == 0 {
priorityName = lowPriorityClassName
}
pausePod := createPausePod(f, pausePodConfig{
pausePod := createPausePod(ctx, f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%d-%v", i, j, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
@@ -184,7 +184,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
}
ginkgo.By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod))
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, pod))
}
// Set the pod request to the first pod's resources (should be low priority pod)
@@ -192,7 +192,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
ginkgo.By("Run a high priority pod that has same requirements as that of lower priority pod")
// Create a high priority pod and make sure it is scheduled on the same node as the low priority pod.
runPausePodWithTimeout(f, pausePodConfig{
runPausePodWithTimeout(ctx, f, pausePodConfig{
Name: "preemptor-pod",
PriorityClassName: highPriorityClassName,
Resources: &v1.ResourceRequirements{
@@ -201,14 +201,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
},
}, framework.PodStartShortTimeout)
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(context.TODO(), pods[0].Name, metav1.GetOptions{})
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(ctx, pods[0].Name, metav1.GetOptions{})
podPreempted := (err != nil && apierrors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
if !podPreempted {
framework.Failf("expected pod to be preempted, instead got pod %+v and error %v", preemptedPod, err)
}
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(context.TODO(), pods[i].Name, metav1.GetOptions{})
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(ctx, pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil())
}
@@ -231,7 +231,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
nodeCopy := node.DeepCopy()
nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("5")
nodeCopy.Status.Allocatable[testExtendedResource] = resource.MustParse("5")
err := patchNode(cs, &node, nodeCopy)
err := patchNode(ctx, cs, &node, nodeCopy)
framework.ExpectNoError(err)
for j := 0; j < 2; j++ {
@@ -244,7 +244,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
if len(pods) == 0 {
priorityName = lowPriorityClassName
}
pausePod := createPausePod(f, pausePodConfig{
pausePod := createPausePod(ctx, f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%d-%v", i, j, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
@@ -275,7 +275,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
}
ginkgo.By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod))
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, pod))
}
// We want this pod to be preempted
@@ -285,12 +285,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
defer func() {
// Clean-up the critical pod
// Always run cleanup to make sure the pod is properly cleaned up.
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", *metav1.NewDeleteOptions(0))
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(ctx, "critical-pod", *metav1.NewDeleteOptions(0))
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error cleanup pod `%s/%s`: %v", metav1.NamespaceSystem, "critical-pod", err)
}
}()
runPausePodWithTimeout(f, pausePodConfig{
runPausePodWithTimeout(ctx, f, pausePodConfig{
Name: "critical-pod",
Namespace: metav1.NamespaceSystem,
PriorityClassName: scheduling.SystemClusterCritical,
@@ -302,15 +302,15 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
defer func() {
// Clean-up the critical pod
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", *metav1.NewDeleteOptions(0))
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(ctx, "critical-pod", *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}()
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(context.TODO(), pods[0].Name, metav1.GetOptions{})
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(ctx, pods[0].Name, metav1.GetOptions{})
podPreempted := (err != nil && apierrors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(context.TODO(), pods[i].Name, metav1.GetOptions{})
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(ctx, pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil())
}
@@ -333,7 +333,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
nodeCopy := node.DeepCopy()
nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("1")
nodeCopy.Status.Allocatable[testExtendedResource] = resource.MustParse("1")
err := patchNode(cs, &node, nodeCopy)
err := patchNode(ctx, cs, &node, nodeCopy)
framework.ExpectNoError(err)
// prepare node affinity to make sure both the lower and higher priority pods are scheduled on the same node
@@ -352,7 +352,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
}
ginkgo.By("Create a low priority pod that consumes 1/1 of node resources")
victimPod := createPausePod(f, pausePodConfig{
victimPod := createPausePod(ctx, f, pausePodConfig{
Name: "victim-pod",
PriorityClassName: lowPriorityClassName,
Resources: &v1.ResourceRequirements{
@@ -365,13 +365,13 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
framework.Logf("Created pod: %v", victimPod.Name)
ginkgo.By("Wait for the victim pod to be scheduled")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, victimPod))
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, victimPod))
// Remove the finalizer so that the victim pod can be GCed
defer e2epod.NewPodClient(f).RemoveFinalizer(victimPod.Name, testFinalizer)
defer e2epod.NewPodClient(f).RemoveFinalizer(ctx, victimPod.Name, testFinalizer)
ginkgo.By("Create a high priority pod to trigger preemption of the lower priority pod")
preemptorPod := createPausePod(f, pausePodConfig{
preemptorPod := createPausePod(ctx, f, pausePodConfig{
Name: "preemptor-pod",
PriorityClassName: highPriorityClassName,
Resources: &v1.ResourceRequirements{
@@ -383,11 +383,11 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
framework.Logf("Created pod: %v", preemptorPod.Name)
ginkgo.By("Waiting for the victim pod to be terminating")
err = e2epod.WaitForPodTerminatingInNamespaceTimeout(f.ClientSet, victimPod.Name, victimPod.Namespace, framework.PodDeleteTimeout)
err = e2epod.WaitForPodTerminatingInNamespaceTimeout(ctx, f.ClientSet, victimPod.Name, victimPod.Namespace, framework.PodDeleteTimeout)
framework.ExpectNoError(err)
ginkgo.By("Verifying the pod has the pod disruption condition")
e2epod.VerifyPodHasConditionWithType(f, victimPod, v1.DisruptionTarget)
e2epod.VerifyPodHasConditionWithType(ctx, f, victimPod, v1.DisruptionTarget)
})
ginkgo.Context("PodTopologySpread Preemption", func() {
@@ -396,29 +396,29 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
topologyKey := "kubernetes.io/e2e-pts-preemption"
var fakeRes v1.ResourceName = "example.com/fakePTSRes"
ginkgo.BeforeEach(func() {
ginkgo.BeforeEach(func(ctx context.Context) {
if len(nodeList.Items) < 2 {
ginkgo.Skip("At least 2 nodes are required to run the test")
}
ginkgo.By("Trying to get 2 available nodes which can run pod")
nodeNames = Get2NodesThatCanRunPod(f)
nodeNames = Get2NodesThatCanRunPod(ctx, f)
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
for _, nodeName := range nodeNames {
e2enode.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName)
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
// update Node API object with a fake resource
ginkgo.By(fmt.Sprintf("Apply 10 fake resource to node %v.", node.Name))
nodeCopy := node.DeepCopy()
nodeCopy.Status.Capacity[fakeRes] = resource.MustParse("10")
nodeCopy.Status.Allocatable[fakeRes] = resource.MustParse("10")
err = patchNode(cs, node, nodeCopy)
err = patchNode(ctx, cs, node, nodeCopy)
framework.ExpectNoError(err)
nodes = append(nodes, node)
}
})
ginkgo.AfterEach(func() {
ginkgo.AfterEach(func(ctx context.Context) {
for _, nodeName := range nodeNames {
e2enode.RemoveLabelOffNode(cs, nodeName, topologyKey)
}
@@ -426,7 +426,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
nodeCopy := node.DeepCopy()
delete(nodeCopy.Status.Capacity, fakeRes)
delete(nodeCopy.Status.Allocatable, fakeRes)
err := patchNode(cs, node, nodeCopy)
err := patchNode(ctx, cs, node, nodeCopy)
framework.ExpectNoError(err)
}
})
@@ -474,10 +474,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
ginkgo.By("Create 1 High Pod and 3 Low Pods to occupy 9/10 of fake resources on both nodes.")
// Prepare 1 High Pod and 3 Low Pods
runPausePod(f, highPodCfg)
runPausePod(ctx, f, highPodCfg)
for i := 1; i <= 3; i++ {
lowPodCfg.Name = fmt.Sprintf("low-%v", i)
runPausePod(f, lowPodCfg)
runPausePod(ctx, f, lowPodCfg)
}
ginkgo.By("Create 1 Medium Pod with TopologySpreadConstraints")
@@ -511,14 +511,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
// However, in that case, the Pods spread becomes [<high>, <medium, low, low>], which doesn't
// satisfy the pod topology spread constraints. Hence it needs to preempt another low pod
// to make the Pods spread like [<high>, <medium, low>].
runPausePod(f, mediumPodCfg)
runPausePod(ctx, f, mediumPodCfg)
ginkgo.By("Verify there are 3 Pods left in this namespace")
wantPods := sets.NewString("high", "medium", "low")
// Wait until the number of pods stabilizes. Note that `medium` pod can get scheduled once the
// second low priority pod is marked as terminating.
pods, err := e2epod.WaitForNumberOfPods(cs, ns, 3, framework.PollShortTimeout)
pods, err := e2epod.WaitForNumberOfPods(ctx, cs, ns, 3, framework.PollShortTimeout)
framework.ExpectNoError(err)
for _, pod := range pods.Items {
@@ -546,11 +546,11 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
priorityPairs := make([]priorityPair, 0)
ginkgo.AfterEach(func() {
ginkgo.AfterEach(func(ctx context.Context) {
// print out additional info if tests failed
if ginkgo.CurrentSpecReport().Failed() {
// List existing PriorityClasses.
priorityList, err := cs.SchedulingV1().PriorityClasses().List(context.TODO(), metav1.ListOptions{})
priorityList, err := cs.SchedulingV1().PriorityClasses().List(ctx, metav1.ListOptions{})
if err != nil {
framework.Logf("Unable to list PriorityClasses: %v", err)
} else {
@@ -565,26 +565,26 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
nodeCopy := node.DeepCopy()
delete(nodeCopy.Status.Capacity, fakecpu)
delete(nodeCopy.Status.Allocatable, fakecpu)
err := patchNode(cs, node, nodeCopy)
err := patchNode(ctx, cs, node, nodeCopy)
framework.ExpectNoError(err)
}
for _, pair := range priorityPairs {
cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, *metav1.NewDeleteOptions(0))
_ = cs.SchedulingV1().PriorityClasses().Delete(ctx, pair.name, *metav1.NewDeleteOptions(0))
}
})
ginkgo.BeforeEach(func() {
ginkgo.BeforeEach(func(ctx context.Context) {
cs = f.ClientSet
ns = f.Namespace.Name
// find an available node
ginkgo.By("Finding an available node")
nodeName := GetNodeThatCanRunPod(f)
nodeName := GetNodeThatCanRunPod(ctx, f)
framework.Logf("found a healthy node: %s", nodeName)
// get the node API object
var err error
node, err = cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
node, err = cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
framework.Failf("error getting node %q: %v", nodeName, err)
}
@@ -598,7 +598,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
nodeCopy := node.DeepCopy()
nodeCopy.Status.Capacity[fakecpu] = resource.MustParse("1000")
nodeCopy.Status.Allocatable[fakecpu] = resource.MustParse("1000")
err = patchNode(cs, node, nodeCopy)
err = patchNode(ctx, cs, node, nodeCopy)
framework.ExpectNoError(err)
// create four PriorityClass: p1, p2, p3, p4
@@ -606,7 +606,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
priorityName := fmt.Sprintf("p%d", i)
priorityVal := int32(i)
priorityPairs = append(priorityPairs, priorityPair{name: priorityName, value: priorityVal})
_, err := cs.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}, metav1.CreateOptions{})
_, err := cs.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}, metav1.CreateOptions{})
if err != nil {
framework.Logf("Failed to create priority '%v/%v'. Reason: %v. Msg: %v", priorityName, priorityVal, apierrors.ReasonForError(err), err)
}
@@ -629,11 +629,11 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
_, podController := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options)
obj, err := f.ClientSet.CoreV1().Pods(ns).List(ctx, options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options)
return f.ClientSet.CoreV1().Pods(ns).Watch(ctx, options)
},
},
&v1.Pod{},
@@ -702,7 +702,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
}
// create ReplicaSet{1,2,3} so as to occupy 950/1000 fake resource
for i := range rsConfs {
runPauseRS(f, rsConfs[i])
runPauseRS(ctx, f, rsConfs[i])
}
framework.Logf("pods created so far: %v", podNamesSeen)
@@ -720,8 +720,8 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
Limits: v1.ResourceList{fakecpu: resource.MustParse("500")},
},
}
preemptorPod := createPod(f, preemptorPodConf)
waitForPreemptingWithTimeout(f, preemptorPod, framework.PodGetTimeout)
preemptorPod := createPod(ctx, f, preemptorPodConf)
waitForPreemptingWithTimeout(ctx, f, preemptorPod, framework.PodGetTimeout)
framework.Logf("pods created so far: %v", podNamesSeen)
@@ -768,12 +768,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
testUUID := uuid.New().String()
var pcs []*schedulingv1.PriorityClass
ginkgo.BeforeEach(func() {
ginkgo.BeforeEach(func(ctx context.Context) {
cs = f.ClientSet
// Create 2 PriorityClass: p1, p2.
for i := 1; i <= 2; i++ {
name, val := fmt.Sprintf("p%d", i), int32(i)
pc, err := cs.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"e2e": testUUID}}, Value: val}, metav1.CreateOptions{})
pc, err := cs.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"e2e": testUUID}}, Value: val}, metav1.CreateOptions{})
if err != nil {
framework.Logf("Failed to create priority '%v/%v'. Reason: %v. Msg: %v", name, val, apierrors.ReasonForError(err), err)
}
@@ -784,11 +784,11 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
}
})
ginkgo.AfterEach(func() {
ginkgo.AfterEach(func(ctx context.Context) {
// Print out additional info if tests failed.
if ginkgo.CurrentSpecReport().Failed() {
// List existing PriorityClasses.
priorityList, err := cs.SchedulingV1().PriorityClasses().List(context.TODO(), metav1.ListOptions{})
priorityList, err := cs.SchedulingV1().PriorityClasses().List(ctx, metav1.ListOptions{})
if err != nil {
framework.Logf("Unable to list PriorityClasses: %v", err)
} else {
@@ -800,7 +800,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
}
// Collection deletion on created PriorityClasses.
err := cs.SchedulingV1().PriorityClasses().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: fmt.Sprintf("e2e=%v", testUUID)})
err := cs.SchedulingV1().PriorityClasses().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: fmt.Sprintf("e2e=%v", testUUID)})
framework.ExpectNoError(err)
})
@@ -815,13 +815,13 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
// 1. Patch/Update on immutable fields will fail.
pcCopy := pcs[0].DeepCopy()
pcCopy.Value = pcCopy.Value * 10
err := patchPriorityClass(cs, pcs[0], pcCopy)
err := patchPriorityClass(ctx, cs, pcs[0], pcCopy)
framework.ExpectError(err, "expect a patch error on an immutable field")
framework.Logf("%v", err)
pcCopy = pcs[1].DeepCopy()
pcCopy.Value = pcCopy.Value * 10
_, err = cs.SchedulingV1().PriorityClasses().Update(context.TODO(), pcCopy, metav1.UpdateOptions{})
_, err = cs.SchedulingV1().PriorityClasses().Update(ctx, pcCopy, metav1.UpdateOptions{})
framework.ExpectError(err, "expect an update error on an immutable field")
framework.Logf("%v", err)
@@ -829,21 +829,21 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
newDesc := "updated description"
pcCopy = pcs[0].DeepCopy()
pcCopy.Description = newDesc
err = patchPriorityClass(cs, pcs[0], pcCopy)
err = patchPriorityClass(ctx, cs, pcs[0], pcCopy)
framework.ExpectNoError(err)
pcCopy = pcs[1].DeepCopy()
pcCopy.Description = newDesc
_, err = cs.SchedulingV1().PriorityClasses().Update(context.TODO(), pcCopy, metav1.UpdateOptions{})
_, err = cs.SchedulingV1().PriorityClasses().Update(ctx, pcCopy, metav1.UpdateOptions{})
framework.ExpectNoError(err)
// 3. List existing PriorityClasses.
_, err = cs.SchedulingV1().PriorityClasses().List(context.TODO(), metav1.ListOptions{})
_, err = cs.SchedulingV1().PriorityClasses().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
// 4. Verify fields of updated PriorityClasses.
for _, pc := range pcs {
livePC, err := cs.SchedulingV1().PriorityClasses().Get(context.TODO(), pc.Name, metav1.GetOptions{})
livePC, err := cs.SchedulingV1().PriorityClasses().Get(ctx, pc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(livePC.Value, pc.Value)
framework.ExpectEqual(livePC.Description, newDesc)
@@ -878,37 +878,37 @@ func initPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet
return pauseRS
}
func createPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet {
func createPauseRS(ctx context.Context, f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet {
namespace := conf.PodConfig.Namespace
if len(namespace) == 0 {
namespace = f.Namespace.Name
}
rs, err := f.ClientSet.AppsV1().ReplicaSets(namespace).Create(context.TODO(), initPauseRS(f, conf), metav1.CreateOptions{})
rs, err := f.ClientSet.AppsV1().ReplicaSets(namespace).Create(ctx, initPauseRS(f, conf), metav1.CreateOptions{})
framework.ExpectNoError(err)
return rs
}
func runPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet {
rs := createPauseRS(f, conf)
framework.ExpectNoError(e2ereplicaset.WaitForReplicaSetTargetAvailableReplicasWithTimeout(f.ClientSet, rs, conf.Replicas, framework.PodGetTimeout))
func runPauseRS(ctx context.Context, f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet {
rs := createPauseRS(ctx, f, conf)
framework.ExpectNoError(e2ereplicaset.WaitForReplicaSetTargetAvailableReplicasWithTimeout(ctx, f.ClientSet, rs, conf.Replicas, framework.PodGetTimeout))
return rs
}
func createPod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
func createPod(ctx context.Context, f *framework.Framework, conf pausePodConfig) *v1.Pod {
namespace := conf.Namespace
if len(namespace) == 0 {
namespace = f.Namespace.Name
}
pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), initPausePod(f, conf), metav1.CreateOptions{})
pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(ctx, initPausePod(f, conf), metav1.CreateOptions{})
framework.ExpectNoError(err)
return pod
}
// waitForPreemptingWithTimeout verifies if 'pod' is preempting within 'timeout', specifically it checks
// if the 'spec.NodeName' field of preemptor 'pod' has been set.
func waitForPreemptingWithTimeout(f *framework.Framework, pod *v1.Pod, timeout time.Duration) {
func waitForPreemptingWithTimeout(ctx context.Context, f *framework.Framework, pod *v1.Pod, timeout time.Duration) {
err := wait.Poll(2*time.Second, timeout, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -920,7 +920,7 @@ func waitForPreemptingWithTimeout(f *framework.Framework, pod *v1.Pod, timeout t
framework.ExpectNoError(err, "pod %v/%v failed to preempt other pods", pod.Namespace, pod.Name)
}
func patchNode(client clientset.Interface, old *v1.Node, new *v1.Node) error {
func patchNode(ctx context.Context, client clientset.Interface, old *v1.Node, new *v1.Node) error {
oldData, err := json.Marshal(old)
if err != nil {
return err
@@ -934,11 +934,11 @@ func patchNode(client clientset.Interface, old *v1.Node, new *v1.Node) error {
if err != nil {
return fmt.Errorf("failed to create merge patch for node %q: %v", old.Name, err)
}
_, err = client.CoreV1().Nodes().Patch(context.TODO(), old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
_, err = client.CoreV1().Nodes().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
return err
}
func patchPriorityClass(cs clientset.Interface, old, new *schedulingv1.PriorityClass) error {
func patchPriorityClass(ctx context.Context, cs clientset.Interface, old, new *schedulingv1.PriorityClass) error {
oldData, err := json.Marshal(old)
if err != nil {
return err
@@ -952,6 +952,6 @@ func patchPriorityClass(cs clientset.Interface, old, new *schedulingv1.PriorityC
if err != nil {
return fmt.Errorf("failed to create merge patch for PriorityClass %q: %v", old.Name, err)
}
_, err = cs.SchedulingV1().PriorityClasses().Patch(context.TODO(), old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
_, err = cs.SchedulingV1().PriorityClasses().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
return err
}