mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #83711 from tanjunchen/fix-staticcheck-test/e2e/network
fix staticcheck failures in test/e2e/common directory
This commit is contained in:
commit
2896fb0be8
@ -76,7 +76,6 @@ test/e2e/apimachinery
|
||||
test/e2e/apps
|
||||
test/e2e/auth
|
||||
test/e2e/autoscaling
|
||||
test/e2e/common
|
||||
test/e2e/instrumentation/logging/stackdriver
|
||||
test/e2e/instrumentation/monitoring
|
||||
test/e2e/manifest
|
||||
|
@ -70,7 +70,6 @@ var (
|
||||
KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"}
|
||||
KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"}
|
||||
KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}
|
||||
subresource = "scale"
|
||||
)
|
||||
|
||||
/*
|
||||
@ -472,7 +471,6 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
|
||||
switch kind {
|
||||
case KindRC:
|
||||
framework.ExpectNoError(framework.RunRC(rcConfig))
|
||||
break
|
||||
case KindDeployment:
|
||||
dpConfig := testutils.DeploymentConfig{
|
||||
RCConfig: rcConfig,
|
||||
@ -481,14 +479,12 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
|
||||
dpConfig.NodeDumpFunc = framework.DumpNodeDebugInfo
|
||||
dpConfig.ContainerDumpFunc = framework.LogFailedContainers
|
||||
framework.ExpectNoError(testutils.RunDeployment(dpConfig))
|
||||
break
|
||||
case KindReplicaSet:
|
||||
rsConfig := testutils.ReplicaSetConfig{
|
||||
RCConfig: rcConfig,
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace))
|
||||
framework.ExpectNoError(replicaset.RunReplicaSet(rsConfig))
|
||||
break
|
||||
default:
|
||||
framework.Failf(invalidKind)
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
p, err := podClient.Get(p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
isReady, err := testutils.PodRunningReady(p)
|
||||
isReady, _ := testutils.PodRunningReady(p)
|
||||
gomega.Expect(isReady).NotTo(gomega.BeTrue(), "pod should be not ready")
|
||||
|
||||
restartCount := getRestartCount(p)
|
||||
@ -269,7 +269,7 @@ func GetContainerStartedTime(p *v1.Pod, containerName string) (time.Time, error)
|
||||
continue
|
||||
}
|
||||
if status.State.Running == nil {
|
||||
return time.Time{}, fmt.Errorf("Container is not running")
|
||||
return time.Time{}, fmt.Errorf("container is not running")
|
||||
}
|
||||
return status.State.Running.StartedAt.Time, nil
|
||||
}
|
||||
@ -282,7 +282,7 @@ func GetTransitionTimeForReadyCondition(p *v1.Pod) (time.Time, error) {
|
||||
return cond.LastTransitionTime.Time, nil
|
||||
}
|
||||
}
|
||||
return time.Time{}, fmt.Errorf("No ready condition can be found for pod")
|
||||
return time.Time{}, fmt.Errorf("no ready condition can be found for pod")
|
||||
}
|
||||
|
||||
func getRestartCount(p *v1.Pod) int {
|
||||
|
@ -377,8 +377,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("creating the pod with failed condition")
|
||||
var podClient *framework.PodClient
|
||||
podClient = f.PodClient()
|
||||
var podClient *framework.PodClient = f.PodClient()
|
||||
pod = podClient.Create(pod)
|
||||
|
||||
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
@ -469,8 +468,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("creating the pod")
|
||||
var podClient *framework.PodClient
|
||||
podClient = f.PodClient()
|
||||
var podClient *framework.PodClient = f.PodClient()
|
||||
pod = podClient.Create(pod)
|
||||
|
||||
ginkgo.By("waiting for pod running")
|
||||
@ -607,8 +605,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
|
||||
// Start pod
|
||||
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||
var podClient *framework.PodClient
|
||||
podClient = f.PodClient()
|
||||
var podClient *framework.PodClient = f.PodClient()
|
||||
pod = podClient.Create(pod)
|
||||
defer func() {
|
||||
e2epod.DeletePodWithWait(f.ClientSet, pod)
|
||||
@ -640,9 +637,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
})
|
||||
|
||||
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
|
||||
|
||||
var podClient *framework.PodClient
|
||||
podClient = f.PodClient()
|
||||
var podClient *framework.PodClient = f.PodClient()
|
||||
pod = podClient.Create(pod)
|
||||
|
||||
defer func() {
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
|
||||
"golang.org/x/net/websocket"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -263,7 +263,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
select {
|
||||
case <-listCompleted:
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
case event := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
framework.Failf("Failed to observe pod creation: %v", event)
|
||||
}
|
||||
@ -313,7 +313,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
timer := time.After(framework.DefaultPodDeletionTimeout)
|
||||
for !deleted {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
case event := <-w.ResultChan():
|
||||
switch event.Type {
|
||||
case watch.Deleted:
|
||||
lastPod = event.Object.(*v1.Pod)
|
||||
@ -604,10 +604,10 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
buf.Write(msg[1:])
|
||||
}
|
||||
if buf.Len() == 0 {
|
||||
return fmt.Errorf("Unexpected output from server")
|
||||
return fmt.Errorf("unexpected output from server")
|
||||
}
|
||||
if !strings.Contains(buf.String(), "remote execution test") {
|
||||
return fmt.Errorf("Expected to find 'remote execution test' in %q", buf.String())
|
||||
return fmt.Errorf("expected to find 'remote execution test' in %q", buf.String())
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
|
||||
|
@ -86,7 +86,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected combined", func() {
|
||||
},
|
||||
}
|
||||
f.TestContainerOutput("Check all projections for projected volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("%s", podName),
|
||||
podName,
|
||||
"secret-value-1",
|
||||
"configmap-value-1",
|
||||
})
|
||||
|
@ -124,7 +124,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
framework.SkipIfNodeOSDistroIs("windows")
|
||||
name := "explicit-nonroot-uid"
|
||||
pod := makeNonRootPod(name, rootImage, pointer.Int64Ptr(1234))
|
||||
pod = podClient.Create(pod)
|
||||
podClient.Create(pod)
|
||||
|
||||
podClient.WaitForSuccess(name, framework.PodStartTimeout)
|
||||
framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1234"))
|
||||
@ -144,7 +144,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
ginkgo.It("should run with an image specified user ID", func() {
|
||||
name := "implicit-nonroot-uid"
|
||||
pod := makeNonRootPod(name, nonRootImage, nil)
|
||||
pod = podClient.Create(pod)
|
||||
podClient.Create(pod)
|
||||
|
||||
podClient.WaitForSuccess(name, framework.PodStartTimeout)
|
||||
framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1234"))
|
||||
|
Loading…
Reference in New Issue
Block a user