From 71a36529d12986f625c9f31db659f8463d347078 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Mon, 13 Mar 2023 16:19:56 -0600 Subject: [PATCH] kubelet: TestSyncKnownPods should not race SyncKnownPods began triggering UpdatePod() for pods that have been orphaned by desired config to ensure pods run to termination. This test reads a mutex protected value while pod workers are running in the background and as a consequence triggers a data race. Wait for the workers to stabilize before reading the value. Other tests validate that the correct sync events are triggered (see kubelet_pods_test.go#TestKubelet_HandlePodCleanups for full verification of this behavior). It is slightly concerning that I was unable to recreate the race locally even under stress testing, but I cannot identify why. --- pkg/kubelet/pod_workers_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/kubelet/pod_workers_test.go b/pkg/kubelet/pod_workers_test.go index 32c8b3ce38b..976cd25bdb6 100644 --- a/pkg/kubelet/pod_workers_test.go +++ b/pkg/kubelet/pod_workers_test.go @@ -1637,7 +1637,8 @@ func TestSyncKnownPods(t *testing.T) { // verify workers that are not terminated stay open even if config no longer // sees them podWorkers.SyncKnownPods(nil) - if len(podWorkers.podUpdates) != 2 { + drainAllWorkers(podWorkers) + if len(podWorkers.podUpdates) != 0 { t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates)) } if len(podWorkers.podSyncStatuses) != 2 {