mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
fix staticcheck failures of test/e2e/storage
This commit is contained in:
parent
50437b4c5d
commit
27d645dc5b
@ -67,7 +67,6 @@ test/e2e/autoscaling
|
|||||||
test/e2e/instrumentation/logging/stackdriver
|
test/e2e/instrumentation/logging/stackdriver
|
||||||
test/e2e/instrumentation/monitoring
|
test/e2e/instrumentation/monitoring
|
||||||
test/e2e/manifest
|
test/e2e/manifest
|
||||||
test/e2e/storage
|
|
||||||
test/e2e/storage/drivers
|
test/e2e/storage/drivers
|
||||||
test/e2e/storage/testsuites
|
test/e2e/storage/testsuites
|
||||||
test/e2e/storage/utils
|
test/e2e/storage/utils
|
||||||
|
@ -592,7 +592,10 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
|
func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
|
||||||
var err error
|
reg, err := regexp.Compile(`max.+volume.+count`)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
waitErr := wait.PollImmediate(10*time.Second, csiPodUnschedulableTimeout, func() (bool, error) {
|
waitErr := wait.PollImmediate(10*time.Second, csiPodUnschedulableTimeout, func() (bool, error) {
|
||||||
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -600,11 +603,10 @@ func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
|
|||||||
}
|
}
|
||||||
conditions := pod.Status.Conditions
|
conditions := pod.Status.Conditions
|
||||||
for _, condition := range conditions {
|
for _, condition := range conditions {
|
||||||
matched, _ := regexp.MatchString("max.+volume.+count", condition.Message)
|
matched := reg.MatchString(condition.Message)
|
||||||
if condition.Reason == v1.PodReasonUnschedulable && matched {
|
if condition.Reason == v1.PodReasonUnschedulable && matched {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
|
@ -406,6 +406,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
|
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
|
||||||
|
framework.ExpectNoError(err, "error creating pods")
|
||||||
|
|
||||||
ginkgo.By("Ensuring each pod is running")
|
ginkgo.By("Ensuring each pod is running")
|
||||||
|
|
||||||
|
@ -139,7 +139,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
|
|||||||
VolumeMode: pvc.Spec.VolumeMode,
|
VolumeMode: pvc.Spec.VolumeMode,
|
||||||
})
|
})
|
||||||
|
|
||||||
pv, err = e2epv.CreatePV(c, pv)
|
_, err = e2epv.CreatePV(c, pv)
|
||||||
framework.ExpectNoError(err, "Error creating pv %v", err)
|
framework.ExpectNoError(err, "Error creating pv %v", err)
|
||||||
|
|
||||||
ginkgo.By("Waiting for PVC to be in bound phase")
|
ginkgo.By("Waiting for PVC to be in bound phase")
|
||||||
@ -173,6 +173,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
|
|||||||
|
|
||||||
ginkgo.By("Getting a pod from deployment")
|
ginkgo.By("Getting a pod from deployment")
|
||||||
podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
|
podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
|
||||||
|
framework.ExpectNoError(err, "While getting pods from deployment")
|
||||||
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
|
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
|
||||||
pod := podList.Items[0]
|
pod := podList.Items[0]
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
|
|||||||
VolumeMode: pvc.Spec.VolumeMode,
|
VolumeMode: pvc.Spec.VolumeMode,
|
||||||
})
|
})
|
||||||
|
|
||||||
pv, err = e2epv.CreatePV(c, pv)
|
_, err = e2epv.CreatePV(c, pv)
|
||||||
framework.ExpectNoError(err, "Error creating pv %v", err)
|
framework.ExpectNoError(err, "Error creating pv %v", err)
|
||||||
|
|
||||||
ginkgo.By("Waiting for PVC to be in bound phase")
|
ginkgo.By("Waiting for PVC to be in bound phase")
|
||||||
|
@ -145,6 +145,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
|
|||||||
|
|
||||||
ginkgo.By("Getting a pod from deployment")
|
ginkgo.By("Getting a pod from deployment")
|
||||||
podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
|
podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
|
||||||
|
framework.ExpectNoError(err, "While getting pods from deployment")
|
||||||
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
|
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
|
||||||
pod := podList.Items[0]
|
pod := podList.Items[0]
|
||||||
|
|
||||||
@ -177,7 +178,6 @@ func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *apps
|
|||||||
case v1.PodFailed, v1.PodSucceeded:
|
case v1.PodFailed, v1.PodSucceeded:
|
||||||
return false, conditions.ErrPodCompleted
|
return false, conditions.ErrPodCompleted
|
||||||
}
|
}
|
||||||
return false, nil
|
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
})
|
})
|
||||||
|
@ -489,13 +489,17 @@ func detachPD(nodeName types.NodeName, pdName string) error {
|
|||||||
return err
|
return err
|
||||||
|
|
||||||
} else if framework.TestContext.Provider == "aws" {
|
} else if framework.TestContext.Provider == "aws" {
|
||||||
client := ec2.New(session.New())
|
awsSession, err := session.NewSession()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error creating session: %v", err)
|
||||||
|
}
|
||||||
|
client := ec2.New(awsSession)
|
||||||
tokens := strings.Split(pdName, "/")
|
tokens := strings.Split(pdName, "/")
|
||||||
awsVolumeID := tokens[len(tokens)-1]
|
awsVolumeID := tokens[len(tokens)-1]
|
||||||
request := ec2.DetachVolumeInput{
|
request := ec2.DetachVolumeInput{
|
||||||
VolumeId: aws.String(awsVolumeID),
|
VolumeId: aws.String(awsVolumeID),
|
||||||
}
|
}
|
||||||
_, err := client.DetachVolume(&request)
|
_, err = client.DetachVolume(&request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error detaching EBS volume: %v", err)
|
return fmt.Errorf("error detaching EBS volume: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -37,13 +37,13 @@ var _ = utils.SIGDescribe("Subpath", func() {
|
|||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
ginkgo.By("Setting up data")
|
ginkgo.By("Setting up data")
|
||||||
secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, Data: map[string][]byte{"secret-key": []byte("secret-value")}}
|
secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, Data: map[string][]byte{"secret-key": []byte("secret-value")}}
|
||||||
secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
|
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
|
||||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
framework.ExpectNoError(err, "while creating secret")
|
framework.ExpectNoError(err, "while creating secret")
|
||||||
}
|
}
|
||||||
|
|
||||||
configmap := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap"}, Data: map[string]string{"configmap-key": "configmap-value"}}
|
configmap := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap"}, Data: map[string]string{"configmap-key": "configmap-value"}}
|
||||||
configmap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
||||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
framework.ExpectNoError(err, "while creating configmap")
|
framework.ExpectNoError(err, "while creating configmap")
|
||||||
}
|
}
|
||||||
|
@ -126,7 +126,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod %s", pod.Name)
|
framework.ExpectNoError(err, "Error starting pod %s", pod.Name)
|
||||||
|
|
||||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||||
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
|
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
|
||||||
@ -212,7 +212,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
|
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
|
||||||
|
|
||||||
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -269,7 +269,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
|
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
|
||||||
|
|
||||||
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -300,7 +300,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
|
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
|
||||||
|
|
||||||
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -337,7 +337,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
|
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
|
||||||
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
@ -69,14 +69,20 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool)
|
|||||||
volumeID := tokens[len(tokens)-1]
|
volumeID := tokens[len(tokens)-1]
|
||||||
|
|
||||||
zone := framework.TestContext.CloudConfig.Zone
|
zone := framework.TestContext.CloudConfig.Zone
|
||||||
|
|
||||||
|
awsSession, err := session.NewSession()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error creating session: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if len(zone) > 0 {
|
if len(zone) > 0 {
|
||||||
region := zone[:len(zone)-1]
|
region := zone[:len(zone)-1]
|
||||||
cfg := aws.Config{Region: ®ion}
|
cfg := aws.Config{Region: ®ion}
|
||||||
framework.Logf("using region %s", region)
|
framework.Logf("using region %s", region)
|
||||||
client = ec2.New(session.New(), &cfg)
|
client = ec2.New(awsSession, &cfg)
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("no region configured")
|
framework.Logf("no region configured")
|
||||||
client = ec2.New(session.New())
|
client = ec2.New(awsSession)
|
||||||
}
|
}
|
||||||
|
|
||||||
request := &ec2.DescribeVolumesInput{
|
request := &ec2.DescribeVolumesInput{
|
||||||
@ -415,13 +421,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
|
|
||||||
ginkgo.By("Discovering an unmanaged zone")
|
ginkgo.By("Discovering an unmanaged zone")
|
||||||
allZones := sets.NewString() // all zones in the project
|
allZones := sets.NewString() // all zones in the project
|
||||||
managedZones := sets.NewString() // subset of allZones
|
|
||||||
|
|
||||||
gceCloud, err := gce.GetGCECloud()
|
gceCloud, err := gce.GetGCECloud()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Get all k8s managed zones (same as zones with nodes in them for test)
|
// Get all k8s managed zones (same as zones with nodes in them for test)
|
||||||
managedZones, err = gceCloud.GetAllZonesFromCloudProvider()
|
managedZones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Get a list of all zones in the project
|
// Get a list of all zones in the project
|
||||||
@ -864,7 +869,7 @@ func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr
|
|||||||
sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] = defaultStr
|
sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] = defaultStr
|
||||||
}
|
}
|
||||||
|
|
||||||
sc, err = c.StorageV1().StorageClasses().Update(sc)
|
_, err = c.StorageV1().StorageClasses().Update(sc)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
expectedDefault := false
|
expectedDefault := false
|
||||||
|
Loading…
Reference in New Issue
Block a user