fix staticcheck failures of e2e/storage/drivers e2e/storage/testsuites

This commit is contained in:
SataQiu 2019-12-10 16:09:29 +08:00
parent 2fbe432d23
commit 3ed535a89a
6 changed files with 7 additions and 21 deletions

View File

@ -67,8 +67,6 @@ test/e2e/autoscaling
test/e2e/instrumentation/logging/stackdriver test/e2e/instrumentation/logging/stackdriver
test/e2e/instrumentation/monitoring test/e2e/instrumentation/monitoring
test/e2e/manifest test/e2e/manifest
test/e2e/storage/drivers
test/e2e/storage/testsuites
test/e2e/storage/utils test/e2e/storage/utils
test/e2e/storage/vsphere test/e2e/storage/vsphere
test/images/agnhost/dns test/images/agnhost/dns

View File

@ -576,10 +576,6 @@ func (v *rbdVolume) DeleteVolume() {
// Ceph // Ceph
type cephFSDriver struct { type cephFSDriver struct {
serverIP string
serverPod *v1.Pod
secret *v1.Secret
driverInfo testsuites.DriverInfo driverInfo testsuites.DriverInfo
} }
@ -690,8 +686,6 @@ func (v *cephVolume) DeleteVolume() {
// Hostpath // Hostpath
type hostPathDriver struct { type hostPathDriver struct {
node v1.Node
driverInfo testsuites.DriverInfo driverInfo testsuites.DriverInfo
} }
@ -758,8 +752,6 @@ func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType
// HostPathSymlink // HostPathSymlink
type hostPathSymlinkDriver struct { type hostPathSymlinkDriver struct {
node v1.Node
driverInfo testsuites.DriverInfo driverInfo testsuites.DriverInfo
} }

View File

@ -649,14 +649,6 @@ func getMigrationVolumeOpCounts(cs clientset.Interface, pluginName string) (opCo
return opCounts{}, opCounts{} return opCounts{}, opCounts{}
} }
func getTotOps(ops opCounts) int64 {
var tot = int64(0)
for _, count := range ops {
tot += count
}
return tot
}
func validateMigrationVolumeOpCounts(cs clientset.Interface, pluginName string, oldInTreeOps, oldMigratedOps opCounts) { func validateMigrationVolumeOpCounts(cs clientset.Interface, pluginName string, oldInTreeOps, oldMigratedOps opCounts) {
if len(pluginName) == 0 { if len(pluginName) == 0 {
// This is a native CSI Driver and we don't check ops // This is a native CSI Driver and we don't check ops

View File

@ -468,7 +468,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
defer func() { defer func() {
var errors map[string]error errors := map[string]error{}
for _, claim := range createdClaims { for _, claim := range createdClaims {
err := e2epv.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace) err := e2epv.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace)
if err != nil { if err != nil {

View File

@ -104,7 +104,6 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
f := framework.NewDefaultFramework("topology") f := framework.NewDefaultFramework("topology")
init := func() topologyTest { init := func() topologyTest {
const numTestTopologies = 2
l := topologyTest{} l := topologyTest{}

View File

@ -191,12 +191,17 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
e2epod.SetAffinity(&selection, nodeName) e2epod.SetAffinity(&selection, nodeName)
pod.Spec.Affinity = selection.Affinity pod.Spec.Affinity = selection.Affinity
l.unschedulablePod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod) l.unschedulablePod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit")
ginkgo.By("Waiting for the pod to get unschedulable with the right message") ginkgo.By("Waiting for the pod to get unschedulable with the right message")
err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, l.unschedulablePod.Name, "Unschedulable", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, l.unschedulablePod.Name, "Unschedulable", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
if pod.Status.Phase == v1.PodPending { if pod.Status.Phase == v1.PodPending {
reg, err := regexp.Compile(`max.+volume.+count`)
if err != nil {
return false, err
}
for _, cond := range pod.Status.Conditions { for _, cond := range pod.Status.Conditions {
matched, _ := regexp.MatchString("max.+volume.+count", cond.Message) matched := reg.MatchString(cond.Message)
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" && matched { if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" && matched {
return true, nil return true, nil
} }