mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #81715 from SataQiu/fix-test-e2e-framework
Fix static check for test/e2e/framework
This commit is contained in:
commit
81c0febcd0
@ -98,17 +98,6 @@ test/e2e/apps
|
||||
test/e2e/auth
|
||||
test/e2e/autoscaling
|
||||
test/e2e/common
|
||||
test/e2e/framework
|
||||
test/e2e/framework/ingress
|
||||
test/e2e/framework/kubelet
|
||||
test/e2e/framework/node
|
||||
test/e2e/framework/pod
|
||||
test/e2e/framework/podlogs
|
||||
test/e2e/framework/providers/aws
|
||||
test/e2e/framework/providers/gce
|
||||
test/e2e/framework/psp
|
||||
test/e2e/framework/service
|
||||
test/e2e/framework/volume
|
||||
test/e2e/instrumentation/logging/stackdriver
|
||||
test/e2e/instrumentation/monitoring
|
||||
test/e2e/lifecycle
|
||||
|
@ -95,12 +95,6 @@ const (
|
||||
// IngressReqTimeout is the timeout on a single http request.
|
||||
IngressReqTimeout = 10 * time.Second
|
||||
|
||||
// healthz port used to verify glbc restarted correctly on the master.
|
||||
glbcHealthzPort = 8086
|
||||
|
||||
// General cloud resource poll timeout (eg: create static ip, firewall etc)
|
||||
cloudResourcePollTimeout = 5 * time.Minute
|
||||
|
||||
// NEGAnnotation is NEG annotation.
|
||||
NEGAnnotation = "cloud.google.com/neg"
|
||||
|
||||
|
@ -304,14 +304,12 @@ func GetOneTimeResourceUsageOnNode(
|
||||
// Process container infos that are relevant to us.
|
||||
containers := containerNames()
|
||||
usageMap := make(ResourceUsagePerContainer, len(containers))
|
||||
observedContainers := []string{}
|
||||
for _, pod := range summary.Pods {
|
||||
for _, container := range pod.Containers {
|
||||
isInteresting := false
|
||||
for _, interestingContainerName := range containers {
|
||||
if container.Name == interestingContainerName {
|
||||
isInteresting = true
|
||||
observedContainers = append(observedContainers, container.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -621,18 +621,6 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) cleanup() {
|
||||
nsClient := config.getNamespacesClient()
|
||||
nsList, err := nsClient.List(metav1.ListOptions{})
|
||||
if err == nil {
|
||||
for _, ns := range nsList.Items {
|
||||
if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace {
|
||||
nsClient.Delete(ns.Name, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shuffleNodes copies nodes from the specified slice into a copy in random
|
||||
// order. It returns a new slice.
|
||||
func shuffleNodes(nodes []v1.Node) []v1.Node {
|
||||
@ -713,10 +701,6 @@ func (config *NetworkingTestConfig) getServiceClient() coreclientset.ServiceInte
|
||||
return config.f.ClientSet.CoreV1().Services(config.Namespace)
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) getNamespacesClient() coreclientset.NamespaceInterface {
|
||||
return config.f.ClientSet.CoreV1().Namespaces()
|
||||
}
|
||||
|
||||
// CheckReachabilityFromPod checks reachability from the specified pod.
|
||||
func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, namespace, pod, target string) {
|
||||
cmd := fmt.Sprintf("wget -T 5 -qO- %q", target)
|
||||
|
@ -96,9 +96,10 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
if !hasNodeControllerTaints {
|
||||
msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
|
||||
} else {
|
||||
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
||||
}
|
||||
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
||||
if !silent {
|
||||
e2elog.Logf(msg)
|
||||
}
|
||||
|
@ -58,20 +58,6 @@ func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
// TODO: Move to its own subpkg.
|
||||
// expectNoErrorWithRetries checks if an error occurs with the given retry count.
|
||||
func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
|
||||
var err error
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
err = fn()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
e2elog.Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
|
||||
}
|
||||
gomega.ExpectWithOffset(1, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
|
||||
for _, pod := range pods.Items {
|
||||
if pod.UID == podUID {
|
||||
|
@ -251,10 +251,10 @@ func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Wri
|
||||
)
|
||||
} else if cst.State.Running != nil {
|
||||
fmt.Fprintf(buffer, "RUNNING")
|
||||
} else if cst.State.Waiting != nil {
|
||||
} else if cst.State.Terminated != nil {
|
||||
fmt.Fprintf(buffer, "TERMINATED: %s - %s",
|
||||
cst.State.Waiting.Reason,
|
||||
cst.State.Waiting.Message,
|
||||
cst.State.Terminated.Reason,
|
||||
cst.State.Terminated.Message,
|
||||
)
|
||||
}
|
||||
fmt.Fprintf(buffer, "\n")
|
||||
|
@ -50,13 +50,21 @@ type Provider struct {
|
||||
|
||||
// ResizeGroup resizes an instance group
|
||||
func (p *Provider) ResizeGroup(group string, size int32) error {
|
||||
client := autoscaling.New(session.New())
|
||||
awsSession, err := session.NewSession()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := autoscaling.New(awsSession)
|
||||
return awscloud.ResizeInstanceGroup(client, group, int(size))
|
||||
}
|
||||
|
||||
// GroupSize returns the size of an instance group
|
||||
func (p *Provider) GroupSize(group string) (int, error) {
|
||||
client := autoscaling.New(session.New())
|
||||
awsSession, err := session.NewSession()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
client := autoscaling.New(awsSession)
|
||||
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error describing instance group: %v", err)
|
||||
@ -151,5 +159,9 @@ func newAWSClient(zone string) *ec2.EC2 {
|
||||
region := zone[:len(zone)-1]
|
||||
cfg = &aws.Config{Region: aws.String(region)}
|
||||
}
|
||||
return ec2.New(session.New(), cfg)
|
||||
session, err := session.NewSession()
|
||||
if err != nil {
|
||||
e2elog.Logf("Warning: failed to create aws session")
|
||||
}
|
||||
return ec2.New(session, cfg)
|
||||
}
|
||||
|
@ -70,11 +70,8 @@ type backendType string
|
||||
// IngressController manages implementation details of Ingress on GCE/GKE.
|
||||
type IngressController struct {
|
||||
Ns string
|
||||
rcPath string
|
||||
UID string
|
||||
staticIPName string
|
||||
rc *v1.ReplicationController
|
||||
svc *v1.Service
|
||||
Client clientset.Interface
|
||||
Cloud framework.CloudConfig
|
||||
}
|
||||
|
@ -58,6 +58,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
|
||||
e2elog.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes))
|
||||
|
||||
ps, err = testutils.NewPodStore(f.ClientSet, systemNamespace, labels.Everything(), fields.Everything())
|
||||
framework.ExpectNoError(err)
|
||||
allPods := ps.List()
|
||||
originalPods := e2epod.FilterNonRestartablePods(allPods)
|
||||
originalPodNames = make([]string, len(originalPods))
|
||||
|
@ -118,7 +118,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
|
||||
}
|
||||
|
||||
psp := privilegedPSP(podSecurityPolicyPrivileged)
|
||||
psp, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp)
|
||||
_, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp)
|
||||
if !apierrs.IsAlreadyExists(err) {
|
||||
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ func (j *TestJig) CreateOnlyLocalNodePortService(namespace, serviceName string,
|
||||
func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool,
|
||||
tweak func(svc *v1.Service)) *v1.Service {
|
||||
ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer and ExternalTrafficPolicy=Local")
|
||||
svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
|
||||
j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
// We need to turn affinity off for our LB distribution tests
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
|
||||
@ -244,7 +244,7 @@ func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName stri
|
||||
j.RunOrFail(namespace, nil)
|
||||
}
|
||||
ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName)
|
||||
svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout)
|
||||
svc := j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout)
|
||||
j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
|
||||
return svc
|
||||
}
|
||||
@ -253,7 +253,7 @@ func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName stri
|
||||
// for it to acquire an ingress IP.
|
||||
func (j *TestJig) CreateLoadBalancerService(namespace, serviceName string, timeout time.Duration, tweak func(svc *v1.Service)) *v1.Service {
|
||||
ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer")
|
||||
svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
|
||||
j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
// We need to turn affinity off for our LB distribution tests
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
|
||||
@ -263,7 +263,7 @@ func (j *TestJig) CreateLoadBalancerService(namespace, serviceName string, timeo
|
||||
})
|
||||
|
||||
ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName)
|
||||
svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout)
|
||||
svc := j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout)
|
||||
j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
|
||||
return svc
|
||||
}
|
||||
|
@ -31,10 +31,6 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
var (
|
||||
cloudConfig = &TestContext.CloudConfig
|
||||
)
|
||||
|
||||
// SetupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step.
|
||||
// There are certain operations we only want to run once per overall test invocation
|
||||
// (such as deleting old namespaces, or verifying that all system pods are running.
|
||||
|
@ -38,7 +38,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/websocket"
|
||||
@ -121,9 +120,6 @@ const (
|
||||
// failures caused by leaked resources from a previous test run.
|
||||
NamespaceCleanupTimeout = 15 * time.Minute
|
||||
|
||||
// Some pods can take much longer to get ready due to volume attach/detach latency.
|
||||
slowPodStartTimeout = 15 * time.Minute
|
||||
|
||||
// ServiceStartTimeout is how long to wait for a service endpoint to be resolvable.
|
||||
ServiceStartTimeout = 3 * time.Minute
|
||||
|
||||
@ -150,10 +146,6 @@ const (
|
||||
// PodReadyBeforeTimeout is how long pods have to be "ready" when a test begins.
|
||||
PodReadyBeforeTimeout = 5 * time.Minute
|
||||
|
||||
// How long pods have to become scheduled onto nodes
|
||||
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)
|
||||
|
||||
podRespondingTimeout = 15 * time.Minute
|
||||
// ClaimProvisionTimeout is how long claims have to become dynamically provisioned.
|
||||
ClaimProvisionTimeout = 5 * time.Minute
|
||||
|
||||
@ -215,13 +207,6 @@ var (
|
||||
// For parsing Kubectl version for version-skewed testing.
|
||||
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
|
||||
|
||||
// Slice of regexps for names of pods that have to be running to consider a Node "healthy"
|
||||
requiredPerNodePods = []*regexp.Regexp{
|
||||
regexp.MustCompile(".*kube-proxy.*"),
|
||||
regexp.MustCompile(".*fluentd-elasticsearch.*"),
|
||||
regexp.MustCompile(".*node-problem-detector.*"),
|
||||
}
|
||||
|
||||
// ServeHostnameImage is a serve hostname image name.
|
||||
ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
)
|
||||
@ -439,7 +424,7 @@ func getDefaultClusterIPFamily(c clientset.Interface) string {
|
||||
// ProviderIs returns true if the provider is included is the providers. Otherwise false.
|
||||
func ProviderIs(providers ...string) bool {
|
||||
for _, provider := range providers {
|
||||
if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) {
|
||||
if strings.EqualFold(provider, TestContext.Provider) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -449,7 +434,7 @@ func ProviderIs(providers ...string) bool {
|
||||
// MasterOSDistroIs returns true if the master OS distro is included in the supportedMasterOsDistros. Otherwise false.
|
||||
func MasterOSDistroIs(supportedMasterOsDistros ...string) bool {
|
||||
for _, distro := range supportedMasterOsDistros {
|
||||
if strings.ToLower(distro) == strings.ToLower(TestContext.MasterOSDistro) {
|
||||
if strings.EqualFold(distro, TestContext.MasterOSDistro) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -459,7 +444,7 @@ func MasterOSDistroIs(supportedMasterOsDistros ...string) bool {
|
||||
// NodeOSDistroIs returns true if the node OS distro is included in the supportedNodeOsDistros. Otherwise false.
|
||||
func NodeOSDistroIs(supportedNodeOsDistros ...string) bool {
|
||||
for _, distro := range supportedNodeOsDistros {
|
||||
if strings.ToLower(distro) == strings.ToLower(TestContext.NodeOSDistro) {
|
||||
if strings.EqualFold(distro, TestContext.NodeOSDistro) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -523,32 +508,6 @@ func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVers
|
||||
// ProvidersWithSSH are those providers where each node is accessible with SSH
|
||||
var ProvidersWithSSH = []string{"gce", "gke", "aws", "local"}
|
||||
|
||||
type podCondition func(pod *v1.Pod) (bool, error)
|
||||
|
||||
// errorBadPodsStates create error message of basic info of bad pods for debugging.
|
||||
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
|
||||
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
|
||||
// Print bad pods info only if there are fewer than 10 bad pods
|
||||
if len(badPods) > 10 {
|
||||
return errStr + "There are too many bad pods. Please check log for details."
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
|
||||
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
|
||||
for _, badPod := range badPods {
|
||||
grace := ""
|
||||
if badPod.DeletionGracePeriodSeconds != nil {
|
||||
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
|
||||
}
|
||||
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
|
||||
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
|
||||
fmt.Fprintln(w, podInfo)
|
||||
}
|
||||
w.Flush()
|
||||
return errStr + buf.String()
|
||||
}
|
||||
|
||||
// WaitForDaemonSets for all daemonsets in the given namespace to be ready
|
||||
// (defined as all but 'allowedNotReadyNodes' pods associated with that
|
||||
// daemonset are ready).
|
||||
@ -1574,14 +1533,14 @@ func (b KubectlBuilder) ExecOrDie() string {
|
||||
|
||||
func isTimeout(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case net.Error:
|
||||
if err.Timeout() {
|
||||
return true
|
||||
}
|
||||
case *url.Error:
|
||||
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
|
||||
return true
|
||||
}
|
||||
case net.Error:
|
||||
if err.Timeout() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -2489,6 +2448,9 @@ func RestartKubelet(host string) error {
|
||||
sudoPresent = true
|
||||
}
|
||||
sshResult, err = e2essh.SSH("systemctl --version", host, TestContext.Provider)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to execute command 'systemctl' on host %s with error %v", host, err)
|
||||
}
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
cmd = "systemctl restart kubelet"
|
||||
} else {
|
||||
|
@ -185,7 +185,7 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestCo
|
||||
},
|
||||
},
|
||||
}
|
||||
endpoints, err := cs.CoreV1().Endpoints(namespace).Create(endpoints)
|
||||
_, err := cs.CoreV1().Endpoints(namespace).Create(endpoints)
|
||||
framework.ExpectNoError(err, "failed to create endpoints for Gluster server")
|
||||
|
||||
return config, pod, ip
|
||||
|
Loading…
Reference in New Issue
Block a user