mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #86057 from SataQiu/fix-staticcheck-20191209
Fix staticcheck failures of e2e/storage/drivers e2e/storage/testsuites
This commit is contained in:
commit
30a5db136f
@ -67,8 +67,6 @@ test/e2e/autoscaling
|
||||
test/e2e/instrumentation/logging/stackdriver
|
||||
test/e2e/instrumentation/monitoring
|
||||
test/e2e/manifest
|
||||
test/e2e/storage/drivers
|
||||
test/e2e/storage/testsuites
|
||||
test/e2e/storage/utils
|
||||
test/e2e/storage/vsphere
|
||||
test/images/agnhost/dns
|
||||
|
@ -576,10 +576,6 @@ func (v *rbdVolume) DeleteVolume() {
|
||||
|
||||
// Ceph
|
||||
type cephFSDriver struct {
|
||||
serverIP string
|
||||
serverPod *v1.Pod
|
||||
secret *v1.Secret
|
||||
|
||||
driverInfo testsuites.DriverInfo
|
||||
}
|
||||
|
||||
@ -690,8 +686,6 @@ func (v *cephVolume) DeleteVolume() {
|
||||
|
||||
// Hostpath
|
||||
type hostPathDriver struct {
|
||||
node v1.Node
|
||||
|
||||
driverInfo testsuites.DriverInfo
|
||||
}
|
||||
|
||||
@ -758,8 +752,6 @@ func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType
|
||||
|
||||
// HostPathSymlink
|
||||
type hostPathSymlinkDriver struct {
|
||||
node v1.Node
|
||||
|
||||
driverInfo testsuites.DriverInfo
|
||||
}
|
||||
|
||||
|
@ -649,14 +649,6 @@ func getMigrationVolumeOpCounts(cs clientset.Interface, pluginName string) (opCo
|
||||
return opCounts{}, opCounts{}
|
||||
}
|
||||
|
||||
func getTotOps(ops opCounts) int64 {
|
||||
var tot = int64(0)
|
||||
for _, count := range ops {
|
||||
tot += count
|
||||
}
|
||||
return tot
|
||||
}
|
||||
|
||||
func validateMigrationVolumeOpCounts(cs clientset.Interface, pluginName string, oldInTreeOps, oldMigratedOps opCounts) {
|
||||
if len(pluginName) == 0 {
|
||||
// This is a native CSI Driver and we don't check ops
|
||||
|
@ -468,7 +468,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
defer func() {
|
||||
var errors map[string]error
|
||||
errors := map[string]error{}
|
||||
for _, claim := range createdClaims {
|
||||
err := e2epv.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace)
|
||||
if err != nil {
|
||||
|
@ -104,7 +104,6 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
|
||||
f := framework.NewDefaultFramework("topology")
|
||||
|
||||
init := func() topologyTest {
|
||||
const numTestTopologies = 2
|
||||
|
||||
l := topologyTest{}
|
||||
|
||||
|
@ -191,12 +191,17 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
e2epod.SetAffinity(&selection, nodeName)
|
||||
pod.Spec.Affinity = selection.Affinity
|
||||
l.unschedulablePod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
|
||||
framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit")
|
||||
|
||||
ginkgo.By("Waiting for the pod to get unschedulable with the right message")
|
||||
err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, l.unschedulablePod.Name, "Unschedulable", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Phase == v1.PodPending {
|
||||
reg, err := regexp.Compile(`max.+volume.+count`)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, cond := range pod.Status.Conditions {
|
||||
matched, _ := regexp.MatchString("max.+volume.+count", cond.Message)
|
||||
matched := reg.MatchString(cond.Message)
|
||||
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" && matched {
|
||||
return true, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user