mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
Fix capture loop vars in parallel or ginkgo tests
Fixes instances of #98213 (to ultimately complete #98213 linting is required). This commit fixes a few instances of a common mistake done when writing parallel subtests or Ginkgo tests (basically any test in which the test closure is dynamically created in a loop and the loop doesn't wait for the test closure to complete). I'm developing a very specific linter that detects this king of mistake and these are the only violations of it it found in this repo (it's not airtight so there may be more). In the case of Ginkgo tests, without this fix, only the last entry in the loop iteratee is actually tested. In the case of Parallel tests I think it's the same problem but maybe a bit different, iiuc it depends on the execution speed. Waiting for the CI to confirm the tests are still passing, even after this fix - since it's likely it's the first time those test cases are executed - they may be buggy or testing code that is buggy. Another instance of this is in `test/e2e/storage/csi_mock_volume.go` and is still failing so it has been left out of this commit and will be addressed in a separate one
This commit is contained in:
parent
132f29769d
commit
eb317ecd40
@ -54,6 +54,7 @@ func TestIsNativeResource(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(fmt.Sprintf("resourceName input=%s, expected value=%v", tc.resourceName, tc.expectVal), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
v := IsNativeResource(tc.resourceName)
|
||||
@ -94,6 +95,8 @@ func TestHugePageSizeFromResourceName(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
i := i
|
||||
tc := tc
|
||||
t.Run(fmt.Sprintf("resourceName input=%s, expected value=%v", tc.resourceName, tc.expectVal), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
v, err := HugePageSizeFromResourceName(tc.resourceName)
|
||||
@ -161,6 +164,8 @@ func TestHugePageSizeFromMedium(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
i := i
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
v, err := HugePageSizeFromMedium(tc.medium)
|
||||
@ -201,6 +206,7 @@ func TestIsOvercommitAllowed(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(fmt.Sprintf("resourceName input=%s, expected value=%v", tc.resourceName, tc.expectVal), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
v := IsOvercommitAllowed(tc.resourceName)
|
||||
|
@ -909,6 +909,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
run(t, test)
|
||||
|
@ -67,6 +67,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
||||
// make sure kubelet readonly (10255) and cadvisor (4194) ports are closed on the public IP address
|
||||
disabledPorts := []int{ports.KubeletReadOnlyPort, 4194}
|
||||
for _, port := range disabledPorts {
|
||||
port := port
|
||||
ginkgo.It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func() {
|
||||
portClosedTest(f, node, port)
|
||||
})
|
||||
|
@ -340,6 +340,7 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
for _, itArg := range deleteTests {
|
||||
name := fmt.Sprintf(
|
||||
"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
|
||||
itArg := itArg
|
||||
ginkgo.It(name, func() {
|
||||
totalPods := itArg.podsPerNode * numNodes
|
||||
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
|
||||
@ -432,6 +433,7 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
|
||||
// execute It blocks from above table of tests
|
||||
for _, t := range testTbl {
|
||||
t := t
|
||||
ginkgo.It(t.itDescr, func() {
|
||||
pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd)
|
||||
|
||||
|
@ -362,7 +362,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
|
||||
defer cleanup()
|
||||
|
||||
volumeType := t.volumeType
|
||||
volumeType := test.volumeType
|
||||
if volumeType == "" {
|
||||
volumeType = pvcReference
|
||||
}
|
||||
@ -1740,7 +1740,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
init(testParameters{
|
||||
disableAttach: true,
|
||||
registerDriver: true,
|
||||
enableVolumeMountGroup: t.enableVolumeMountGroup,
|
||||
enableVolumeMountGroup: test.enableVolumeMountGroup,
|
||||
hooks: createFSGroupRequestPreHook(&nodeStageFsGroup, &nodePublishFsGroup),
|
||||
})
|
||||
defer cleanup()
|
||||
@ -1798,6 +1798,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
ginkgo.It(test.name, func() {
|
||||
hooks := createPreHook("CreateSnapshot", test.createSnapshotHook)
|
||||
init(testParameters{
|
||||
@ -1888,6 +1889,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
ginkgo.It(test.name, func() {
|
||||
init(testParameters{
|
||||
disableAttach: true,
|
||||
|
@ -54,6 +54,7 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() {
|
||||
|
||||
ginkgo.Describe("When pod refers to non-existent ephemeral storage", func() {
|
||||
for _, testSource := range invalidEphemeralSource("pod-ephm-test") {
|
||||
testSource := testSource
|
||||
ginkgo.It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() {
|
||||
pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source)
|
||||
pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
|
@ -252,6 +252,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
|
||||
for _, t := range tests {
|
||||
numPDs := t.numPDs
|
||||
numContainers := t.numContainers
|
||||
t := t
|
||||
|
||||
ginkgo.It(fmt.Sprintf("using %d containers and %d PDs", numContainers, numPDs), func() {
|
||||
e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
Loading…
Reference in New Issue
Block a user