Merge pull request #111846 from omertuc/fixparallelclosure

Fix capture loop vars in parallel or ginkgo tests
This commit is contained in:
Kubernetes Prow Robot 2022-08-15 11:51:55 -07:00 committed by GitHub
commit d5fdf3135e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 16 additions and 2 deletions

View File

@ -54,6 +54,7 @@ func TestIsNativeResource(t *testing.T) {
} }
for _, tc := range testCases { for _, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("resourceName input=%s, expected value=%v", tc.resourceName, tc.expectVal), func(t *testing.T) { t.Run(fmt.Sprintf("resourceName input=%s, expected value=%v", tc.resourceName, tc.expectVal), func(t *testing.T) {
t.Parallel() t.Parallel()
v := IsNativeResource(tc.resourceName) v := IsNativeResource(tc.resourceName)
@ -94,6 +95,8 @@ func TestHugePageSizeFromResourceName(t *testing.T) {
} }
for i, tc := range testCases { for i, tc := range testCases {
i := i
tc := tc
t.Run(fmt.Sprintf("resourceName input=%s, expected value=%v", tc.resourceName, tc.expectVal), func(t *testing.T) { t.Run(fmt.Sprintf("resourceName input=%s, expected value=%v", tc.resourceName, tc.expectVal), func(t *testing.T) {
t.Parallel() t.Parallel()
v, err := HugePageSizeFromResourceName(tc.resourceName) v, err := HugePageSizeFromResourceName(tc.resourceName)
@ -161,6 +164,8 @@ func TestHugePageSizeFromMedium(t *testing.T) {
}, },
} }
for i, tc := range testCases { for i, tc := range testCases {
i := i
tc := tc
t.Run(tc.description, func(t *testing.T) { t.Run(tc.description, func(t *testing.T) {
t.Parallel() t.Parallel()
v, err := HugePageSizeFromMedium(tc.medium) v, err := HugePageSizeFromMedium(tc.medium)
@ -201,6 +206,7 @@ func TestIsOvercommitAllowed(t *testing.T) {
} }
for _, tc := range testCases { for _, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("resourceName input=%s, expected value=%v", tc.resourceName, tc.expectVal), func(t *testing.T) { t.Run(fmt.Sprintf("resourceName input=%s, expected value=%v", tc.resourceName, tc.expectVal), func(t *testing.T) {
t.Parallel() t.Parallel()
v := IsOvercommitAllowed(tc.resourceName) v := IsOvercommitAllowed(tc.resourceName)

View File

@ -909,6 +909,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
} }
for _, test := range tests { for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
t.Parallel() t.Parallel()
run(t, test) run(t, test)

View File

@ -67,6 +67,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
// make sure kubelet readonly (10255) and cadvisor (4194) ports are closed on the public IP address // make sure kubelet readonly (10255) and cadvisor (4194) ports are closed on the public IP address
disabledPorts := []int{ports.KubeletReadOnlyPort, 4194} disabledPorts := []int{ports.KubeletReadOnlyPort, 4194}
for _, port := range disabledPorts { for _, port := range disabledPorts {
port := port
ginkgo.It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func() { ginkgo.It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func() {
portClosedTest(f, node, port) portClosedTest(f, node, port)
}) })

View File

@ -340,6 +340,7 @@ var _ = SIGDescribe("kubelet", func() {
for _, itArg := range deleteTests { for _, itArg := range deleteTests {
name := fmt.Sprintf( name := fmt.Sprintf(
"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout) "kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
itArg := itArg
ginkgo.It(name, func() { ginkgo.It(name, func() {
totalPods := itArg.podsPerNode * numNodes totalPods := itArg.podsPerNode * numNodes
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
@ -432,6 +433,7 @@ var _ = SIGDescribe("kubelet", func() {
// execute It blocks from above table of tests // execute It blocks from above table of tests
for _, t := range testTbl { for _, t := range testTbl {
t := t
ginkgo.It(t.itDescr, func() { ginkgo.It(t.itDescr, func() {
pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd) pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd)

View File

@ -362,7 +362,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach}) init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
defer cleanup() defer cleanup()
volumeType := t.volumeType volumeType := test.volumeType
if volumeType == "" { if volumeType == "" {
volumeType = pvcReference volumeType = pvcReference
} }
@ -1740,7 +1740,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
init(testParameters{ init(testParameters{
disableAttach: true, disableAttach: true,
registerDriver: true, registerDriver: true,
enableVolumeMountGroup: t.enableVolumeMountGroup, enableVolumeMountGroup: test.enableVolumeMountGroup,
hooks: createFSGroupRequestPreHook(&nodeStageFsGroup, &nodePublishFsGroup), hooks: createFSGroupRequestPreHook(&nodeStageFsGroup, &nodePublishFsGroup),
}) })
defer cleanup() defer cleanup()
@ -1798,6 +1798,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}, },
} }
for _, test := range tests { for _, test := range tests {
test := test
ginkgo.It(test.name, func() { ginkgo.It(test.name, func() {
hooks := createPreHook("CreateSnapshot", test.createSnapshotHook) hooks := createPreHook("CreateSnapshot", test.createSnapshotHook)
init(testParameters{ init(testParameters{
@ -1888,6 +1889,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}, },
} }
for _, test := range tests { for _, test := range tests {
test := test
ginkgo.It(test.name, func() { ginkgo.It(test.name, func() {
init(testParameters{ init(testParameters{
disableAttach: true, disableAttach: true,

View File

@ -54,6 +54,7 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() {
ginkgo.Describe("When pod refers to non-existent ephemeral storage", func() { ginkgo.Describe("When pod refers to non-existent ephemeral storage", func() {
for _, testSource := range invalidEphemeralSource("pod-ephm-test") { for _, testSource := range invalidEphemeralSource("pod-ephm-test") {
testSource := testSource
ginkgo.It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() { ginkgo.It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() {
pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source) pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source)
pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})

View File

@ -252,6 +252,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
for _, t := range tests { for _, t := range tests {
numPDs := t.numPDs numPDs := t.numPDs
numContainers := t.numContainers numContainers := t.numContainers
t := t
ginkgo.It(fmt.Sprintf("using %d containers and %d PDs", numContainers, numPDs), func() { ginkgo.It(fmt.Sprintf("using %d containers and %d PDs", numContainers, numPDs), func() {
e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")