mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 01:40:07 +00:00
Merge pull request #122727 from carlory/fix-122376
Fix flaking test: CSI Mock workload info CSI PodInfoOnMount Update
This commit is contained in:
commit
42c89fdc25
@ -46,6 +46,7 @@ import (
|
|||||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
|
"k8s.io/kubernetes/test/utils/format"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -702,65 +703,83 @@ func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentV
|
|||||||
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkPodLogs(ctx context.Context, getCalls func(ctx context.Context) ([]drivers.MockCSICall, error), pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool, expectedNumNodePublish int) error {
|
// checkNodePublishVolume goes through all calls to the mock driver and checks that at least one NodePublishVolume call had expected attributes.
|
||||||
|
// If a matched call is found but it has unexpected attributes, checkNodePublishVolume skips it and continues searching.
|
||||||
|
func checkNodePublishVolume(ctx context.Context, getCalls func(ctx context.Context) ([]drivers.MockCSICall, error), pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool) error {
|
||||||
expectedAttributes := map[string]string{}
|
expectedAttributes := map[string]string{}
|
||||||
|
unexpectedAttributeKeys := sets.New[string]()
|
||||||
if expectPodInfo {
|
if expectPodInfo {
|
||||||
expectedAttributes["csi.storage.k8s.io/pod.name"] = pod.Name
|
expectedAttributes["csi.storage.k8s.io/pod.name"] = pod.Name
|
||||||
expectedAttributes["csi.storage.k8s.io/pod.namespace"] = pod.Namespace
|
expectedAttributes["csi.storage.k8s.io/pod.namespace"] = pod.Namespace
|
||||||
expectedAttributes["csi.storage.k8s.io/pod.uid"] = string(pod.UID)
|
expectedAttributes["csi.storage.k8s.io/pod.uid"] = string(pod.UID)
|
||||||
expectedAttributes["csi.storage.k8s.io/serviceAccount.name"] = "default"
|
expectedAttributes["csi.storage.k8s.io/serviceAccount.name"] = "default"
|
||||||
|
} else {
|
||||||
|
unexpectedAttributeKeys.Insert("csi.storage.k8s.io/pod.name")
|
||||||
|
unexpectedAttributeKeys.Insert("csi.storage.k8s.io/pod.namespace")
|
||||||
|
unexpectedAttributeKeys.Insert("csi.storage.k8s.io/pod.uid")
|
||||||
|
unexpectedAttributeKeys.Insert("csi.storage.k8s.io/serviceAccount.name")
|
||||||
}
|
}
|
||||||
if csiInlineVolumesEnabled {
|
if csiInlineVolumesEnabled {
|
||||||
// This is only passed in 1.15 when the CSIInlineVolume feature gate is set.
|
// This is only passed in 1.15 when the CSIInlineVolume feature gate is set.
|
||||||
expectedAttributes["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(ephemeralVolume)
|
expectedAttributes["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(ephemeralVolume)
|
||||||
|
} else {
|
||||||
|
unexpectedAttributeKeys.Insert("csi.storage.k8s.io/ephemeral")
|
||||||
}
|
}
|
||||||
|
|
||||||
if csiServiceAccountTokenEnabled {
|
if csiServiceAccountTokenEnabled {
|
||||||
expectedAttributes["csi.storage.k8s.io/serviceAccount.tokens"] = "<nonempty>"
|
expectedAttributes["csi.storage.k8s.io/serviceAccount.tokens"] = "<nonempty>"
|
||||||
|
} else {
|
||||||
|
unexpectedAttributeKeys.Insert("csi.storage.k8s.io/serviceAccount.tokens")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find NodePublish in the GRPC calls.
|
|
||||||
foundAttributes := sets.NewString()
|
|
||||||
numNodePublishVolume := 0
|
|
||||||
numNodeUnpublishVolume := 0
|
|
||||||
calls, err := getCalls(ctx)
|
calls, err := getCalls(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var volumeContexts []map[string]string
|
||||||
for _, call := range calls {
|
for _, call := range calls {
|
||||||
switch call.Method {
|
if call.Method != "NodePublishVolume" {
|
||||||
case "NodePublishVolume":
|
continue
|
||||||
numNodePublishVolume++
|
|
||||||
if numNodePublishVolume == expectedNumNodePublish {
|
|
||||||
// Check that NodePublish had expected attributes for last of expected volume
|
|
||||||
for k, v := range expectedAttributes {
|
|
||||||
vv, found := call.Request.VolumeContext[k]
|
|
||||||
if found && (v == vv || (v == "<nonempty>" && len(vv) != 0)) {
|
|
||||||
foundAttributes.Insert(k)
|
|
||||||
framework.Logf("Found volume attribute %s: %s", k, vv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "NodeUnpublishVolume":
|
|
||||||
framework.Logf("Found NodeUnpublishVolume: %+v", call)
|
|
||||||
numNodeUnpublishVolume++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if numNodePublishVolume < expectedNumNodePublish {
|
|
||||||
return fmt.Errorf("NodePublish should be called at least %d", expectedNumNodePublish)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if numNodeUnpublishVolume == 0 {
|
volumeCtx := call.Request.VolumeContext
|
||||||
return fmt.Errorf("NodeUnpublish was never called")
|
|
||||||
|
// Check that NodePublish had expected attributes
|
||||||
|
foundAttributes := sets.NewString()
|
||||||
|
for k, v := range expectedAttributes {
|
||||||
|
vv, found := volumeCtx[k]
|
||||||
|
if found && (v == vv || (v == "<nonempty>" && len(vv) != 0)) {
|
||||||
|
foundAttributes.Insert(k)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if foundAttributes.Len() != len(expectedAttributes) {
|
if foundAttributes.Len() != len(expectedAttributes) {
|
||||||
return fmt.Errorf("number of found volume attributes does not match, expected %d, got %d", len(expectedAttributes), foundAttributes.Len())
|
framework.Logf("Skipping the NodePublishVolume call: expected attribute %+v, got %+v", format.Object(expectedAttributes, 1), format.Object(volumeCtx, 1))
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check that NodePublish had no unexpected attributes
|
||||||
|
unexpectedAttributes := make(map[string]string)
|
||||||
|
for k := range volumeCtx {
|
||||||
|
if unexpectedAttributeKeys.Has(k) {
|
||||||
|
unexpectedAttributes[k] = volumeCtx[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(unexpectedAttributes) != 0 {
|
||||||
|
framework.Logf("Skipping the NodePublishVolume call because it contains unexpected attributes %+v", format.Object(unexpectedAttributes, 1))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(volumeContexts) == 0 {
|
||||||
|
return fmt.Errorf("NodePublishVolume was never called")
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("NodePublishVolume was called %d times, but no call had expected attributes %s or calls have unwanted attributes key %+v", len(volumeContexts), format.Object(expectedAttributes, 1), unexpectedAttributeKeys.UnsortedList())
|
||||||
|
}
|
||||||
|
|
||||||
// createFSGroupRequestPreHook creates a hook that records the fsGroup passed in
|
// createFSGroupRequestPreHook creates a hook that records the fsGroup passed in
|
||||||
// through NodeStageVolume and NodePublishVolume calls.
|
// through NodeStageVolume and NodePublishVolume calls.
|
||||||
func createFSGroupRequestPreHook(nodeStageFsGroup, nodePublishFsGroup *string) *drivers.Hooks {
|
func createFSGroupRequestPreHook(nodeStageFsGroup, nodePublishFsGroup *string) *drivers.Hooks {
|
||||||
|
@ -79,10 +79,8 @@ var _ = utils.SIGDescribe("CSI Mock volume service account token", func() {
|
|||||||
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
||||||
|
|
||||||
// sleep to make sure RequiresRepublish triggers more than 1 NodePublishVolume
|
// sleep to make sure RequiresRepublish triggers more than 1 NodePublishVolume
|
||||||
numNodePublishVolume := 1
|
|
||||||
if test.deployCSIDriverObject && csiServiceAccountTokenEnabled {
|
if test.deployCSIDriverObject && csiServiceAccountTokenEnabled {
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
numNodePublishVolume = 2
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Deleting the previously created pod")
|
ginkgo.By("Deleting the previously created pod")
|
||||||
@ -90,7 +88,7 @@ var _ = utils.SIGDescribe("CSI Mock volume service account token", func() {
|
|||||||
framework.ExpectNoError(err, "while deleting")
|
framework.ExpectNoError(err, "while deleting")
|
||||||
|
|
||||||
ginkgo.By("Checking CSI driver logs")
|
ginkgo.By("Checking CSI driver logs")
|
||||||
err = checkPodLogs(ctx, m.driver.GetCalls, pod, false, false, false, test.deployCSIDriverObject && csiServiceAccountTokenEnabled, numNodePublishVolume)
|
err = checkNodePublishVolume(ctx, m.driver.GetCalls, pod, false, false, false, test.deployCSIDriverObject && csiServiceAccountTokenEnabled)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -34,41 +34,30 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() {
|
|||||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||||
m := newMockDriverSetup(f)
|
m := newMockDriverSetup(f)
|
||||||
ginkgo.Context("CSI workload information using mock driver", func() {
|
ginkgo.Context("CSI workload information using mock driver", func() {
|
||||||
var (
|
|
||||||
podInfoTrue = true
|
|
||||||
podInfoFalse = false
|
|
||||||
)
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
podInfoOnMount *bool
|
podInfoOnMount bool
|
||||||
deployClusterRegistrar bool
|
deployClusterRegistrar bool
|
||||||
expectPodInfo bool
|
expectPodInfo bool
|
||||||
expectEphemeral bool
|
expectEphemeral bool
|
||||||
}{
|
}{
|
||||||
{
|
|
||||||
name: "should not be passed when podInfoOnMount=nil",
|
|
||||||
podInfoOnMount: nil,
|
|
||||||
deployClusterRegistrar: true,
|
|
||||||
expectPodInfo: false,
|
|
||||||
expectEphemeral: false,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "should be passed when podInfoOnMount=true",
|
name: "should be passed when podInfoOnMount=true",
|
||||||
podInfoOnMount: &podInfoTrue,
|
podInfoOnMount: true,
|
||||||
deployClusterRegistrar: true,
|
deployClusterRegistrar: true,
|
||||||
expectPodInfo: true,
|
expectPodInfo: true,
|
||||||
expectEphemeral: false,
|
expectEphemeral: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "contain ephemeral=true when using inline volume",
|
name: "contain ephemeral=true when using inline volume",
|
||||||
podInfoOnMount: &podInfoTrue,
|
podInfoOnMount: true,
|
||||||
deployClusterRegistrar: true,
|
deployClusterRegistrar: true,
|
||||||
expectPodInfo: true,
|
expectPodInfo: true,
|
||||||
expectEphemeral: true,
|
expectEphemeral: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "should not be passed when podInfoOnMount=false",
|
name: "should not be passed when podInfoOnMount=false",
|
||||||
podInfoOnMount: &podInfoFalse,
|
podInfoOnMount: false,
|
||||||
deployClusterRegistrar: true,
|
deployClusterRegistrar: true,
|
||||||
expectPodInfo: false,
|
expectPodInfo: false,
|
||||||
expectEphemeral: false,
|
expectEphemeral: false,
|
||||||
@ -85,45 +74,31 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() {
|
|||||||
ginkgo.It(t.name, func(ctx context.Context) {
|
ginkgo.It(t.name, func(ctx context.Context) {
|
||||||
m.init(ctx, testParameters{
|
m.init(ctx, testParameters{
|
||||||
registerDriver: test.deployClusterRegistrar,
|
registerDriver: test.deployClusterRegistrar,
|
||||||
podInfo: test.podInfoOnMount})
|
podInfo: &test.podInfoOnMount})
|
||||||
|
|
||||||
ginkgo.DeferCleanup(m.cleanup)
|
ginkgo.DeferCleanup(m.cleanup)
|
||||||
|
|
||||||
waitUntilPodInfoInLog(ctx, m, test.expectPodInfo, test.expectEphemeral, 1)
|
waitUntilPodInfoInLog(ctx, m, test.expectPodInfo, test.expectEphemeral)
|
||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Context("CSI PodInfoOnMount Update", func() {
|
ginkgo.Context("CSI PodInfoOnMount Update", func() {
|
||||||
var (
|
|
||||||
podInfoTrue = true
|
|
||||||
podInfoFalse = false
|
|
||||||
)
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
oldPodInfoOnMount *bool
|
oldPodInfoOnMount bool
|
||||||
newPodInfoOnMount *bool
|
newPodInfoOnMount bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "should not be passed when update from true to false",
|
name: "should not be passed when update from true to false",
|
||||||
oldPodInfoOnMount: &podInfoTrue,
|
oldPodInfoOnMount: true,
|
||||||
newPodInfoOnMount: &podInfoFalse,
|
newPodInfoOnMount: false,
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "should not be passed when update from true to nil",
|
|
||||||
oldPodInfoOnMount: &podInfoTrue,
|
|
||||||
newPodInfoOnMount: nil,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "should be passed when update from false to true",
|
name: "should be passed when update from false to true",
|
||||||
oldPodInfoOnMount: &podInfoFalse,
|
oldPodInfoOnMount: false,
|
||||||
newPodInfoOnMount: &podInfoTrue,
|
newPodInfoOnMount: true,
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "should be passed when update from nil to true",
|
|
||||||
oldPodInfoOnMount: nil,
|
|
||||||
newPodInfoOnMount: &podInfoTrue,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, t := range tests {
|
for _, t := range tests {
|
||||||
@ -131,21 +106,20 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() {
|
|||||||
ginkgo.It(t.name, func(ctx context.Context) {
|
ginkgo.It(t.name, func(ctx context.Context) {
|
||||||
m.init(ctx, testParameters{
|
m.init(ctx, testParameters{
|
||||||
registerDriver: true,
|
registerDriver: true,
|
||||||
podInfo: test.oldPodInfoOnMount})
|
podInfo: &test.oldPodInfoOnMount})
|
||||||
|
|
||||||
ginkgo.DeferCleanup(m.cleanup)
|
ginkgo.DeferCleanup(m.cleanup)
|
||||||
|
|
||||||
waitUntilPodInfoInLog(ctx, m, test.oldPodInfoOnMount != nil && *test.oldPodInfoOnMount, false, 1)
|
waitUntilPodInfoInLog(ctx, m, test.oldPodInfoOnMount, false)
|
||||||
m.update(utils.PatchCSIOptions{PodInfo: test.newPodInfoOnMount})
|
m.update(utils.PatchCSIOptions{PodInfo: &test.newPodInfoOnMount})
|
||||||
waitUntilPodInfoInLog(ctx, m, test.newPodInfoOnMount != nil && *test.newPodInfoOnMount, false, 2)
|
waitUntilPodInfoInLog(ctx, m, test.newPodInfoOnMount, false)
|
||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
func waitUntilPodInfoInLog(ctx context.Context, m *mockDriverSetup, expectPodInfo, expectEphemeral bool, expectedNumNodePublish int) {
|
func waitUntilPodInfoInLog(ctx context.Context, m *mockDriverSetup, expectPodInfo, expectEphemeral bool) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
utils.WaitUntil(framework.Poll, framework.PodStartTimeout, func() bool {
|
utils.WaitUntil(framework.Poll, framework.PodStartTimeout, func() bool {
|
||||||
@ -176,7 +150,7 @@ func waitUntilPodInfoInLog(ctx context.Context, m *mockDriverSetup, expectPodInf
|
|||||||
framework.ExpectNoError(err, "while deleting")
|
framework.ExpectNoError(err, "while deleting")
|
||||||
|
|
||||||
ginkgo.By("Checking CSI driver logs")
|
ginkgo.By("Checking CSI driver logs")
|
||||||
err = checkPodLogs(ctx, m.driver.GetCalls, pod, expectPodInfo, expectEphemeral, csiInlineVolumesEnabled, false, expectedNumNodePublish)
|
err = checkNodePublishVolume(ctx, m.driver.GetCalls, pod, expectPodInfo, expectEphemeral, csiInlineVolumesEnabled, false)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user