mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Cleanup: Pass context to client calls in scheduler/volumebinding tests
Signed-off-by: kerthcet <kerthcet@gmail.com>
This commit is contained in:
parent
cc71683fda
commit
e54ce7c0c6
@ -333,9 +333,9 @@ func (env *testEnv) initVolumes(cachedPVs []*v1.PersistentVolume, apiPVs []*v1.P
|
||||
|
||||
}
|
||||
|
||||
func (env *testEnv) updateVolumes(pvs []*v1.PersistentVolume) error {
|
||||
func (env *testEnv) updateVolumes(ctx context.Context, pvs []*v1.PersistentVolume) error {
|
||||
for i, pv := range pvs {
|
||||
newPv, err := env.client.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{})
|
||||
newPv, err := env.client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -359,9 +359,9 @@ func (env *testEnv) updateVolumes(pvs []*v1.PersistentVolume) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (env *testEnv) updateClaims(pvcs []*v1.PersistentVolumeClaim) error {
|
||||
func (env *testEnv) updateClaims(ctx context.Context, pvcs []*v1.PersistentVolumeClaim) error {
|
||||
for i, pvc := range pvcs {
|
||||
newPvc, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), pvc, metav1.UpdateOptions{})
|
||||
newPvc, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(ctx, pvc, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1711,7 +1711,6 @@ func TestCheckBindings(t *testing.T) {
|
||||
run := func(t *testing.T, scenario scenarioType) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Setup
|
||||
pod := makePod("test-pod").
|
||||
withNamespace("testns").
|
||||
@ -1727,14 +1726,14 @@ func TestCheckBindings(t *testing.T) {
|
||||
if scenario.deletePVs {
|
||||
testEnv.deleteVolumes(scenario.initPVs)
|
||||
} else {
|
||||
if err := testEnv.updateVolumes(scenario.apiPVs); err != nil {
|
||||
if err := testEnv.updateVolumes(ctx, scenario.apiPVs); err != nil {
|
||||
t.Errorf("Failed to update PVs: %v", err)
|
||||
}
|
||||
}
|
||||
if scenario.deletePVCs {
|
||||
testEnv.deleteClaims(scenario.initPVCs)
|
||||
} else {
|
||||
if err := testEnv.updateClaims(scenario.apiPVCs); err != nil {
|
||||
if err := testEnv.updateClaims(ctx, scenario.apiPVCs); err != nil {
|
||||
t.Errorf("Failed to update PVCs: %v", err)
|
||||
}
|
||||
}
|
||||
@ -1860,10 +1859,10 @@ func TestCheckBindingsWithCSIMigration(t *testing.T) {
|
||||
testEnv.assumeVolumes(t, "node1", pod, scenario.bindings, scenario.provisionedPVCs)
|
||||
|
||||
// Before execute
|
||||
if err := testEnv.updateVolumes(scenario.apiPVs); err != nil {
|
||||
if err := testEnv.updateVolumes(ctx, scenario.apiPVs); err != nil {
|
||||
t.Errorf("Failed to update PVs: %v", err)
|
||||
}
|
||||
if err := testEnv.updateClaims(scenario.apiPVCs); err != nil {
|
||||
if err := testEnv.updateClaims(ctx, scenario.apiPVCs); err != nil {
|
||||
t.Errorf("Failed to update PVCs: %v", err)
|
||||
}
|
||||
|
||||
@ -1909,7 +1908,7 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
apiPVC *v1.PersistentVolumeClaim
|
||||
|
||||
// This function runs with a delay of 5 seconds
|
||||
delayFunc func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim)
|
||||
delayFunc func(t *testing.T, ctx context.Context, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim)
|
||||
|
||||
// Expected return values
|
||||
shouldFail bool
|
||||
@ -1930,14 +1929,14 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
initPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
binding: makeBinding(unboundPVC, pvNode1aBound),
|
||||
shouldFail: false, // Will succeed after PVC is fully bound to this PV by pv controller.
|
||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||
delayFunc: func(t *testing.T, ctx context.Context, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||
pvc := pvcs[0]
|
||||
pv := pvs[0]
|
||||
// Update PVC to be fully bound to PV
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Spec.VolumeName = pv.Name
|
||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, volume.AnnBindCompleted, "yes")
|
||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(ctx, newPVC, metav1.UpdateOptions{}); err != nil {
|
||||
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
||||
}
|
||||
},
|
||||
@ -1945,23 +1944,23 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
"binding-dynamic-pv-succeeds-after-time": {
|
||||
claimToProvision: pvcSetSelectedNode(provisionedPVC, "node1"),
|
||||
initPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||
delayFunc: func(t *testing.T, ctx context.Context, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||
pvc := pvcs[0]
|
||||
// Update PVC to be fully bound to PV
|
||||
newPVC, err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||
newPVC, err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("failed to get PVC %q: %v", pvc.Name, err)
|
||||
return
|
||||
}
|
||||
dynamicPV := makeTestPV("dynamic-pv", "node1", "1G", "1", newPVC, waitClass)
|
||||
dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(context.TODO(), dynamicPV, metav1.CreateOptions{})
|
||||
dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(ctx, dynamicPV, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("failed to create PV %q: %v", dynamicPV.Name, err)
|
||||
return
|
||||
}
|
||||
newPVC.Spec.VolumeName = dynamicPV.Name
|
||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, volume.AnnBindCompleted, "yes")
|
||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(ctx, newPVC, metav1.UpdateOptions{}); err != nil {
|
||||
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
||||
}
|
||||
},
|
||||
@ -1978,8 +1977,8 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
binding: makeBinding(unboundPVC, pvNode1aBound),
|
||||
initPVs: []*v1.PersistentVolume{pvNode1a},
|
||||
initPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||
testEnv.client.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{})
|
||||
delayFunc: func(t *testing.T, ctx context.Context, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||
testEnv.client.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{})
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
@ -1999,10 +1998,10 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
binding: makeBinding(unboundPVC, pvNode1aBound),
|
||||
initPVs: []*v1.PersistentVolume{pvNode1a},
|
||||
initPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||
delayFunc: func(t *testing.T, ctx context.Context, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||
pvc := pvcs[0]
|
||||
// Delete PVC will fail check
|
||||
if err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}); err != nil {
|
||||
if err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Errorf("failed to delete PVC %q: %v", pvc.Name, err)
|
||||
}
|
||||
},
|
||||
@ -2020,12 +2019,12 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
initPVCs: []*v1.PersistentVolumeClaim{selectedNodePVC},
|
||||
claimToProvision: selectedNodePVC,
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||
delayFunc: func(t *testing.T, ctx context.Context, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||
// Update PVC to be fully bound to a PV with a different node
|
||||
newPVC := pvcs[0].DeepCopy()
|
||||
newPVC.Spec.VolumeName = pvNode2.Name
|
||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, volume.AnnBindCompleted, "yes")
|
||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(ctx, newPVC, metav1.UpdateOptions{}); err != nil {
|
||||
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
||||
}
|
||||
},
|
||||
@ -2036,7 +2035,6 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
run := func(t *testing.T, scenario scenarioType) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Setup
|
||||
pod := makePod("test-pod").
|
||||
withNamespace("testns").
|
||||
@ -2063,13 +2061,13 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
|
||||
// Before Execute
|
||||
if scenario.apiPV != nil {
|
||||
_, err := testEnv.client.CoreV1().PersistentVolumes().Update(context.TODO(), scenario.apiPV, metav1.UpdateOptions{})
|
||||
_, err := testEnv.client.CoreV1().PersistentVolumes().Update(ctx, scenario.apiPV, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update PV %q", scenario.apiPV.Name)
|
||||
}
|
||||
}
|
||||
if scenario.apiPVC != nil {
|
||||
_, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(context.TODO(), scenario.apiPVC, metav1.UpdateOptions{})
|
||||
_, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(ctx, scenario.apiPVC, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update PVC %q", getPVCName(scenario.apiPVC))
|
||||
}
|
||||
@ -2080,7 +2078,7 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
time.Sleep(5 * time.Second)
|
||||
// Sleep a while to run after bindAPIUpdate in BindPodVolumes
|
||||
klog.V(5).InfoS("Running delay function")
|
||||
scenario.delayFunc(t, testEnv, pod, scenario.initPVs, scenario.initPVCs)
|
||||
scenario.delayFunc(t, ctx, testEnv, pod, scenario.initPVs, scenario.initPVCs)
|
||||
}(scenario)
|
||||
}
|
||||
|
||||
|
@ -333,8 +333,8 @@ func TestDefaultErrorFunc_NodeNotFound(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
client := fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testPod}}, &v1.NodeList{Items: tt.nodes})
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
@ -343,14 +343,14 @@ func TestDefaultErrorFunc_NodeNotFound(t *testing.T) {
|
||||
podInformer.Informer().GetStore().Add(testPod)
|
||||
|
||||
queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(testingclock.NewFakeClock(time.Now())))
|
||||
schedulerCache := internalcache.New(30*time.Second, stopCh)
|
||||
schedulerCache := internalcache.New(30*time.Second, ctx.Done())
|
||||
|
||||
for i := range tt.nodes {
|
||||
node := tt.nodes[i]
|
||||
// Add node to schedulerCache no matter it's deleted in API server or not.
|
||||
schedulerCache.AddNode(&node)
|
||||
if node.Name == tt.nodeNameToDelete {
|
||||
client.CoreV1().Nodes().Delete(context.TODO(), node.Name, metav1.DeleteOptions{})
|
||||
client.CoreV1().Nodes().Delete(ctx, node.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user