mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
Merge pull request #81973 from oomichi/replace-e2elog-framework-r-p
Use log functions of core framework on sub [p-s]
This commit is contained in:
commit
fd6c380047
@ -26,7 +26,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
@ -182,7 +181,7 @@ func GatherCPUProfileForSeconds(componentName string, profileBaseName string, se
|
||||
defer wg.Done()
|
||||
}
|
||||
if err := gatherProfile(componentName, profileBaseName, fmt.Sprintf("profile?seconds=%v", seconds)); err != nil {
|
||||
e2elog.Logf("Failed to gather %v CPU profile: %v", componentName, err)
|
||||
Logf("Failed to gather %v CPU profile: %v", componentName, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -192,7 +191,7 @@ func GatherMemoryProfile(componentName string, profileBaseName string, wg *sync.
|
||||
defer wg.Done()
|
||||
}
|
||||
if err := gatherProfile(componentName, profileBaseName, "heap"); err != nil {
|
||||
e2elog.Logf("Failed to gather %v memory profile: %v", componentName, err)
|
||||
Logf("Failed to gather %v memory profile: %v", componentName, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,6 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
|
@ -24,13 +24,12 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// UpdateReplicaSetWithRetries updates replicaset template with retries.
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*appsv1.ReplicaSet, error) {
|
||||
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, e2elog.Logf, framework.Poll, framework.PollShortTimeout)
|
||||
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, framework.Logf, framework.Poll, framework.PollShortTimeout)
|
||||
}
|
||||
|
||||
// CheckNewRSAnnotations check if the new RS's annotation is as expected
|
||||
|
@ -36,7 +36,6 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/service/helpers:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/node:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package service
|
||||
|
||||
import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// affinityTracker tracks the destination of a request for the affinity tests.
|
||||
@ -28,7 +28,7 @@ type affinityTracker struct {
|
||||
// Record the response going to a given host.
|
||||
func (at *affinityTracker) recordHost(host string) {
|
||||
at.hostTrace = append(at.hostTrace, host)
|
||||
e2elog.Logf("Received response from host: %s", host)
|
||||
framework.Logf("Received response from host: %s", host)
|
||||
}
|
||||
|
||||
// Check that we got a constant count requests going to the same host.
|
||||
@ -51,6 +51,6 @@ func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds b
|
||||
}
|
||||
|
||||
func checkAffinityFailed(tracker affinityTracker, err string) {
|
||||
e2elog.Logf("%v", tracker.hostTrace)
|
||||
e2elog.Failf(err)
|
||||
framework.Logf("%v", tracker.hostTrace)
|
||||
framework.Failf(err)
|
||||
}
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@ -118,22 +117,22 @@ func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
|
||||
// verify service from node
|
||||
func() string {
|
||||
cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -")
|
||||
e2elog.Logf("Executing cmd %q on host %v", cmd, host)
|
||||
framework.Logf("Executing cmd %q on host %v", cmd, host)
|
||||
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
e2essh.LogResult(result)
|
||||
e2elog.Logf("error while SSH-ing to node: %v", err)
|
||||
framework.Logf("error while SSH-ing to node: %v", err)
|
||||
}
|
||||
return result.Stdout
|
||||
},
|
||||
// verify service from pod
|
||||
func() string {
|
||||
cmd := buildCommand("wget -q -T 1 -O -")
|
||||
e2elog.Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPod.Name)
|
||||
framework.Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPod.Name)
|
||||
// TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
|
||||
output, err := framework.RunHostCmd(ns, execPod.Name, cmd)
|
||||
if err != nil {
|
||||
e2elog.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPod.Name, err, output)
|
||||
framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPod.Name, err, output)
|
||||
}
|
||||
return output
|
||||
},
|
||||
@ -159,12 +158,12 @@ func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
|
||||
// and we need a better way to track how often it occurs.
|
||||
if gotEndpoints.IsSuperset(expectedEndpoints) {
|
||||
if !gotEndpoints.Equal(expectedEndpoints) {
|
||||
e2elog.Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints))
|
||||
framework.Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints))
|
||||
}
|
||||
passed = true
|
||||
break
|
||||
}
|
||||
e2elog.Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints))
|
||||
framework.Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints))
|
||||
}
|
||||
if !passed {
|
||||
// Sort the lists so they're easier to visually diff.
|
||||
@ -191,12 +190,12 @@ func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceI
|
||||
result, err := e2essh.SSH(command, host, framework.TestContext.Provider)
|
||||
if err != nil {
|
||||
e2essh.LogResult(result)
|
||||
e2elog.Logf("error while SSH-ing to node: %v", err)
|
||||
framework.Logf("error while SSH-ing to node: %v", err)
|
||||
}
|
||||
if result.Code != 99 {
|
||||
return nil
|
||||
}
|
||||
e2elog.Logf("service still alive - still waiting")
|
||||
framework.Logf("service still alive - still waiting")
|
||||
}
|
||||
return fmt.Errorf("waiting for service to be down timed out")
|
||||
}
|
||||
|
@ -46,7 +46,6 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@ -111,7 +110,7 @@ func (j *TestJig) CreateTCPServiceWithPort(namespace string, tweak func(svc *v1.
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create TCP Service %q: %v", svc.Name, err)
|
||||
framework.Failf("Failed to create TCP Service %q: %v", svc.Name, err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
@ -126,7 +125,7 @@ func (j *TestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc *v1.Se
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create TCP Service %q: %v", svc.Name, err)
|
||||
framework.Failf("Failed to create TCP Service %q: %v", svc.Name, err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
@ -141,7 +140,7 @@ func (j *TestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc *v1.Se
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create UDP Service %q: %v", svc.Name, err)
|
||||
framework.Failf("Failed to create UDP Service %q: %v", svc.Name, err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
@ -166,7 +165,7 @@ func (j *TestJig) CreateExternalNameServiceOrFail(namespace string, tweak func(s
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create ExternalName Service %q: %v", svc.Name, err)
|
||||
framework.Failf("Failed to create ExternalName Service %q: %v", svc.Name, err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
@ -274,10 +273,10 @@ func (j *TestJig) GetEndpointNodes(svc *v1.Service) map[string][]string {
|
||||
nodes := j.GetNodes(MaxNodesForEndpointsTests)
|
||||
endpoints, err := j.Client.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
|
||||
framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
|
||||
}
|
||||
if len(endpoints.Subsets) == 0 {
|
||||
e2elog.Failf("Endpoint has no subsets, cannot determine node addresses.")
|
||||
framework.Failf("Endpoint has no subsets, cannot determine node addresses.")
|
||||
}
|
||||
epNodes := sets.NewString()
|
||||
for _, ss := range endpoints.Subsets {
|
||||
@ -322,22 +321,22 @@ func (j *TestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string)
|
||||
err := wait.PollImmediate(framework.Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) {
|
||||
endpoints, err := j.Client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err)
|
||||
framework.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err)
|
||||
return false, nil
|
||||
}
|
||||
if len(endpoints.Subsets) == 0 {
|
||||
e2elog.Logf("Expect endpoints with subsets, got none.")
|
||||
framework.Logf("Expect endpoints with subsets, got none.")
|
||||
return false, nil
|
||||
}
|
||||
// TODO: Handle multiple endpoints
|
||||
if len(endpoints.Subsets[0].Addresses) == 0 {
|
||||
e2elog.Logf("Expected Ready endpoints - found none")
|
||||
framework.Logf("Expected Ready endpoints - found none")
|
||||
return false, nil
|
||||
}
|
||||
epHostName := *endpoints.Subsets[0].Addresses[0].NodeName
|
||||
e2elog.Logf("Pod for service %s/%s is on node %s", namespace, serviceName, epHostName)
|
||||
framework.Logf("Pod for service %s/%s is on node %s", namespace, serviceName, epHostName)
|
||||
if epHostName != nodeName {
|
||||
e2elog.Logf("Found endpoint on wrong node, expected %v, got %v", nodeName, epHostName)
|
||||
framework.Logf("Found endpoint on wrong node, expected %v, got %v", nodeName, epHostName)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -398,19 +397,19 @@ func (j *TestJig) WaitForAvailableEndpoint(namespace, serviceName string, timeou
|
||||
// SanityCheckService performs sanity checks on the given service
|
||||
func (j *TestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) {
|
||||
if svc.Spec.Type != svcType {
|
||||
e2elog.Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType)
|
||||
framework.Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType)
|
||||
}
|
||||
|
||||
if svcType != v1.ServiceTypeExternalName {
|
||||
if svc.Spec.ExternalName != "" {
|
||||
e2elog.Failf("unexpected Spec.ExternalName (%s) for service, expected empty", svc.Spec.ExternalName)
|
||||
framework.Failf("unexpected Spec.ExternalName (%s) for service, expected empty", svc.Spec.ExternalName)
|
||||
}
|
||||
if svc.Spec.ClusterIP != api.ClusterIPNone && svc.Spec.ClusterIP == "" {
|
||||
e2elog.Failf("didn't get ClusterIP for non-ExternamName service")
|
||||
framework.Failf("didn't get ClusterIP for non-ExternamName service")
|
||||
}
|
||||
} else {
|
||||
if svc.Spec.ClusterIP != "" {
|
||||
e2elog.Failf("unexpected Spec.ClusterIP (%s) for ExternamName service, expected empty", svc.Spec.ClusterIP)
|
||||
framework.Failf("unexpected Spec.ClusterIP (%s) for ExternamName service, expected empty", svc.Spec.ClusterIP)
|
||||
}
|
||||
}
|
||||
|
||||
@ -421,11 +420,11 @@ func (j *TestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) {
|
||||
for i, port := range svc.Spec.Ports {
|
||||
hasNodePort := (port.NodePort != 0)
|
||||
if hasNodePort != expectNodePorts {
|
||||
e2elog.Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort)
|
||||
framework.Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort)
|
||||
}
|
||||
if hasNodePort {
|
||||
if !NodePortRange.Contains(int(port.NodePort)) {
|
||||
e2elog.Failf("out-of-range nodePort (%d) for service", port.NodePort)
|
||||
framework.Failf("out-of-range nodePort (%d) for service", port.NodePort)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -435,12 +434,12 @@ func (j *TestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) {
|
||||
}
|
||||
hasIngress := len(svc.Status.LoadBalancer.Ingress) != 0
|
||||
if hasIngress != expectIngress {
|
||||
e2elog.Failf("unexpected number of Status.LoadBalancer.Ingress (%d) for service", len(svc.Status.LoadBalancer.Ingress))
|
||||
framework.Failf("unexpected number of Status.LoadBalancer.Ingress (%d) for service", len(svc.Status.LoadBalancer.Ingress))
|
||||
}
|
||||
if hasIngress {
|
||||
for i, ing := range svc.Status.LoadBalancer.Ingress {
|
||||
if ing.IP == "" && ing.Hostname == "" {
|
||||
e2elog.Failf("unexpected Status.LoadBalancer.Ingress[%d] for service: %#v", i, ing)
|
||||
framework.Failf("unexpected Status.LoadBalancer.Ingress[%d] for service: %#v", i, ing)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -473,14 +472,14 @@ func (j *TestJig) UpdateService(namespace, name string, update func(*v1.Service)
|
||||
func (j *TestJig) UpdateServiceOrFail(namespace, name string, update func(*v1.Service)) *v1.Service {
|
||||
svc, err := j.UpdateService(namespace, name, update)
|
||||
if err != nil {
|
||||
e2elog.Failf(err.Error())
|
||||
framework.Failf(err.Error())
|
||||
}
|
||||
return svc
|
||||
}
|
||||
|
||||
// WaitForNewIngressIPOrFail waits for the given service to get a new ingress IP, or fails after the given timeout
|
||||
func (j *TestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP string, timeout time.Duration) *v1.Service {
|
||||
e2elog.Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, name)
|
||||
framework.Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, name)
|
||||
service := j.waitForConditionOrFail(namespace, name, timeout, "have a new ingress IP", func(svc *v1.Service) bool {
|
||||
if len(svc.Status.LoadBalancer.Ingress) == 0 {
|
||||
return false
|
||||
@ -506,21 +505,21 @@ func (j *TestJig) ChangeServiceNodePortOrFail(namespace, name string, initial in
|
||||
s.Spec.Ports[0].NodePort = int32(newPort)
|
||||
})
|
||||
if err != nil && strings.Contains(err.Error(), portallocator.ErrAllocated.Error()) {
|
||||
e2elog.Logf("tried nodePort %d, but it is in use, will try another", newPort)
|
||||
framework.Logf("tried nodePort %d, but it is in use, will try another", newPort)
|
||||
continue
|
||||
}
|
||||
// Otherwise err was nil or err was a real error
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
e2elog.Failf("Could not change the nodePort: %v", err)
|
||||
framework.Failf("Could not change the nodePort: %v", err)
|
||||
}
|
||||
return service
|
||||
}
|
||||
|
||||
// WaitForLoadBalancerOrFail waits the given service to have a LoadBalancer, or fails after the given timeout
|
||||
func (j *TestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service {
|
||||
e2elog.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name)
|
||||
framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name)
|
||||
service := j.waitForConditionOrFail(namespace, name, timeout, "have a load balancer", func(svc *v1.Service) bool {
|
||||
return len(svc.Status.LoadBalancer.Ingress) > 0
|
||||
})
|
||||
@ -532,11 +531,11 @@ func (j *TestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip st
|
||||
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
|
||||
defer func() {
|
||||
if err := framework.EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil {
|
||||
e2elog.Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err)
|
||||
framework.Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err)
|
||||
}
|
||||
}()
|
||||
|
||||
e2elog.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name)
|
||||
framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name)
|
||||
service := j.waitForConditionOrFail(namespace, name, timeout, "have no load balancer", func(svc *v1.Service) bool {
|
||||
return len(svc.Status.LoadBalancer.Ingress) == 0
|
||||
})
|
||||
@ -557,7 +556,7 @@ func (j *TestJig) waitForConditionOrFail(namespace, name string, timeout time.Du
|
||||
return false, nil
|
||||
}
|
||||
if err := wait.PollImmediate(framework.Poll, timeout, pollFunc); err != nil {
|
||||
e2elog.Failf("Timed out waiting for service %q to %s", name, message)
|
||||
framework.Failf("Timed out waiting for service %q to %s", name, message)
|
||||
}
|
||||
return service
|
||||
}
|
||||
@ -632,10 +631,10 @@ func (j *TestJig) CreatePDBOrFail(namespace string, rc *v1.ReplicationController
|
||||
pdb := j.newPDBTemplate(namespace, rc)
|
||||
newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create PDB %q %v", pdb.Name, err)
|
||||
framework.Failf("Failed to create PDB %q %v", pdb.Name, err)
|
||||
}
|
||||
if err := j.waitForPdbReady(namespace); err != nil {
|
||||
e2elog.Failf("Failed waiting for PDB to be ready: %v", err)
|
||||
framework.Failf("Failed waiting for PDB to be ready: %v", err)
|
||||
}
|
||||
|
||||
return newPdb
|
||||
@ -672,14 +671,14 @@ func (j *TestJig) RunOrFail(namespace string, tweak func(rc *v1.ReplicationContr
|
||||
}
|
||||
result, err := j.Client.CoreV1().ReplicationControllers(namespace).Create(rc)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create RC %q: %v", rc.Name, err)
|
||||
framework.Failf("Failed to create RC %q: %v", rc.Name, err)
|
||||
}
|
||||
pods, err := j.waitForPodsCreated(namespace, int(*(rc.Spec.Replicas)))
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create pods: %v", err)
|
||||
framework.Failf("Failed to create pods: %v", err)
|
||||
}
|
||||
if err := j.waitForPodsReady(namespace, pods); err != nil {
|
||||
e2elog.Failf("Failed waiting for pods to be running: %v", err)
|
||||
framework.Failf("Failed waiting for pods to be running: %v", err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
@ -689,20 +688,20 @@ func (j *TestJig) Scale(namespace string, replicas int) {
|
||||
rc := j.Name
|
||||
scale, err := j.Client.CoreV1().ReplicationControllers(namespace).GetScale(rc, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to get scale for RC %q: %v", rc, err)
|
||||
framework.Failf("Failed to get scale for RC %q: %v", rc, err)
|
||||
}
|
||||
|
||||
scale.Spec.Replicas = int32(replicas)
|
||||
_, err = j.Client.CoreV1().ReplicationControllers(namespace).UpdateScale(rc, scale)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to scale RC %q: %v", rc, err)
|
||||
framework.Failf("Failed to scale RC %q: %v", rc, err)
|
||||
}
|
||||
pods, err := j.waitForPodsCreated(namespace, replicas)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed waiting for pods: %v", err)
|
||||
framework.Failf("Failed waiting for pods: %v", err)
|
||||
}
|
||||
if err := j.waitForPodsReady(namespace, pods); err != nil {
|
||||
e2elog.Failf("Failed waiting for pods to be running: %v", err)
|
||||
framework.Failf("Failed waiting for pods to be running: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -725,7 +724,7 @@ func (j *TestJig) waitForPodsCreated(namespace string, replicas int) ([]string,
|
||||
timeout := 2 * time.Minute
|
||||
// List the pods, making sure we observe all the replicas.
|
||||
label := labels.SelectorFromSet(labels.Set(j.Labels))
|
||||
e2elog.Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
|
||||
framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := j.Client.CoreV1().Pods(namespace).List(options)
|
||||
@ -741,10 +740,10 @@ func (j *TestJig) waitForPodsCreated(namespace string, replicas int) ([]string,
|
||||
found = append(found, pod.Name)
|
||||
}
|
||||
if len(found) == replicas {
|
||||
e2elog.Logf("Found all %d pods", replicas)
|
||||
framework.Logf("Found all %d pods", replicas)
|
||||
return found, nil
|
||||
}
|
||||
e2elog.Logf("Found %d/%d pods - will retry", len(found), replicas)
|
||||
framework.Logf("Found %d/%d pods - will retry", len(found), replicas)
|
||||
}
|
||||
return nil, fmt.Errorf("timeout waiting for %d pods to be created", replicas)
|
||||
}
|
||||
@ -777,7 +776,7 @@ func testReachabilityOverNodePorts(nodes *v1.NodeList, sp v1.ServicePort, pod *v
|
||||
// If the node's internal address points to localhost, then we are not
|
||||
// able to test the service reachability via that address
|
||||
if isInvalidOrLocalhostAddress(internalAddr) {
|
||||
e2elog.Logf("skipping testEndpointReachability() for internal adddress %s", internalAddr)
|
||||
framework.Logf("skipping testEndpointReachability() for internal adddress %s", internalAddr)
|
||||
continue
|
||||
}
|
||||
testEndpointReachability(internalAddr, sp.NodePort, sp.Protocol, pod)
|
||||
@ -809,12 +808,12 @@ func testEndpointReachability(endpoint string, port int32, protocol v1.Protocol,
|
||||
case v1.ProtocolUDP:
|
||||
cmd = fmt.Sprintf("nc -zv -u -w 2 %s %v", endpoint, port)
|
||||
default:
|
||||
e2elog.Failf("Service reachablity check is not supported for %v", protocol)
|
||||
framework.Failf("Service reachablity check is not supported for %v", protocol)
|
||||
}
|
||||
if cmd != "" {
|
||||
err := wait.PollImmediate(1*time.Second, ServiceReachabilityShortPollTimeout, func() (bool, error) {
|
||||
if _, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd); err != nil {
|
||||
e2elog.Logf("Service reachability failing with error: %v\nRetrying...", err)
|
||||
framework.Logf("Service reachability failing with error: %v\nRetrying...", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -881,7 +880,7 @@ func (j *TestJig) CheckServiceReachability(namespace string, svc *v1.Service, po
|
||||
case v1.ServiceTypeExternalName:
|
||||
j.checkExternalServiceReachability(svc, pod)
|
||||
default:
|
||||
e2elog.Failf("Unsupported service type \"%s\" to verify service reachability for \"%s\" service. This may due to diverse implementation of the service type.", svcType, svc.Name)
|
||||
framework.Failf("Unsupported service type \"%s\" to verify service reachability for \"%s\" service. This may due to diverse implementation of the service type.", svcType, svc.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -906,9 +905,9 @@ func (j *TestJig) TestReachableHTTPWithRetriableErrorCodes(host string, port int
|
||||
|
||||
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
|
||||
if err == wait.ErrWaitTimeout {
|
||||
e2elog.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
|
||||
framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
|
||||
} else {
|
||||
e2elog.Failf("Failed to reach HTTP service through %v:%v: %v", host, port, err)
|
||||
framework.Failf("Failed to reach HTTP service through %v:%v: %v", host, port, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -924,7 +923,7 @@ func (j *TestJig) TestNotReachableHTTP(host string, port int, timeout time.Durat
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
|
||||
e2elog.Failf("HTTP service %v:%v reachable after %v: %v", host, port, timeout, err)
|
||||
framework.Failf("HTTP service %v:%v reachable after %v: %v", host, port, timeout, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -939,7 +938,7 @@ func (j *TestJig) TestRejectedHTTP(host string, port int, timeout time.Duration)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
|
||||
e2elog.Failf("HTTP service %v:%v not rejected: %v", host, port, err)
|
||||
framework.Failf("HTTP service %v:%v not rejected: %v", host, port, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -957,7 +956,7 @@ func (j *TestJig) TestReachableUDP(host string, port int, timeout time.Duration)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
|
||||
e2elog.Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err)
|
||||
framework.Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -971,7 +970,7 @@ func (j *TestJig) TestNotReachableUDP(host string, port int, timeout time.Durati
|
||||
return false, nil // caller can retry
|
||||
}
|
||||
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
|
||||
e2elog.Failf("UDP service %v:%v reachable after %v: %v", host, port, timeout, err)
|
||||
framework.Failf("UDP service %v:%v reachable after %v: %v", host, port, timeout, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -985,7 +984,7 @@ func (j *TestJig) TestRejectedUDP(host string, port int, timeout time.Duration)
|
||||
return false, nil // caller can retry
|
||||
}
|
||||
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
|
||||
e2elog.Failf("UDP service %v:%v not rejected: %v", host, port, err)
|
||||
framework.Failf("UDP service %v:%v not rejected: %v", host, port, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1000,7 +999,7 @@ func (j *TestJig) GetHTTPContent(host string, port int, timeout time.Duration, u
|
||||
}
|
||||
return false, nil
|
||||
}); pollErr != nil {
|
||||
e2elog.Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, pollErr)
|
||||
framework.Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, pollErr)
|
||||
}
|
||||
return body
|
||||
}
|
||||
@ -1060,7 +1059,7 @@ func (j *TestJig) CheckAffinity(execPod *v1.Pod, targetIP string, targetPort int
|
||||
if execPod != nil {
|
||||
stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to get response from %s. Retry until timeout", targetIPPort)
|
||||
framework.Logf("Failed to get response from %s. Retry until timeout", targetIPPort)
|
||||
return false, nil
|
||||
}
|
||||
tracker.recordHost(stdout)
|
||||
@ -1099,18 +1098,18 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err
|
||||
ipPort := net.JoinHostPort(ip, strconv.Itoa(port))
|
||||
url := fmt.Sprintf("http://%s%s", ipPort, request)
|
||||
if ip == "" || port == 0 {
|
||||
e2elog.Failf("Got empty IP for reachability check (%s)", url)
|
||||
framework.Failf("Got empty IP for reachability check (%s)", url)
|
||||
return false, fmt.Errorf("invalid input ip or port")
|
||||
}
|
||||
e2elog.Logf("Testing HTTP health check on %v", url)
|
||||
framework.Logf("Testing HTTP health check on %v", url)
|
||||
resp, err := httpGetNoConnectionPoolTimeout(url, 5*time.Second)
|
||||
if err != nil {
|
||||
e2elog.Logf("Got error testing for reachability of %s: %v", url, err)
|
||||
framework.Logf("Got error testing for reachability of %s: %v", url, err)
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
e2elog.Logf("Got error reading response from %s: %v", url, err)
|
||||
framework.Logf("Got error reading response from %s: %v", url, err)
|
||||
return false, err
|
||||
}
|
||||
// HealthCheck responder returns 503 for no local endpoints
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
// GetServicesProxyRequest returns a request for a service proxy.
|
||||
@ -103,10 +102,10 @@ func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(sv
|
||||
|
||||
// DescribeSvc logs the output of kubectl describe svc for the given namespace
|
||||
func DescribeSvc(ns string) {
|
||||
e2elog.Logf("\nOutput of kubectl describe svc:\n")
|
||||
framework.Logf("\nOutput of kubectl describe svc:\n")
|
||||
desc, _ := framework.RunKubectl(
|
||||
"describe", "svc", fmt.Sprintf("--namespace=%v", ns))
|
||||
e2elog.Logf(desc)
|
||||
framework.Logf(desc)
|
||||
}
|
||||
|
||||
// GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service.
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
servicehelper "k8s.io/cloud-provider/service/helpers"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
@ -38,7 +37,7 @@ func WaitForServiceResponding(c clientset.Interface, ns, name string) error {
|
||||
return wait.PollImmediate(framework.Poll, RespondingTimeout, func() (done bool, err error) {
|
||||
proxyRequest, errProxy := GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
e2elog.Logf("Failed to get services proxy request: %v:", errProxy)
|
||||
framework.Logf("Failed to get services proxy request: %v:", errProxy)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -52,18 +51,18 @@ func WaitForServiceResponding(c clientset.Interface, ns, name string) error {
|
||||
Raw()
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
e2elog.Failf("Failed to GET from service %s: %v", name, err)
|
||||
framework.Failf("Failed to GET from service %s: %v", name, err)
|
||||
return true, err
|
||||
}
|
||||
e2elog.Logf("Failed to GET from service %s: %v:", name, err)
|
||||
framework.Logf("Failed to GET from service %s: %v:", name, err)
|
||||
return false, nil
|
||||
}
|
||||
got := string(body)
|
||||
if len(got) == 0 {
|
||||
e2elog.Logf("Service %s: expected non-empty response", name)
|
||||
framework.Logf("Service %s: expected non-empty response", name)
|
||||
return false, err // stop polling
|
||||
}
|
||||
e2elog.Logf("Service %s: found nonempty answer: %s", name, got)
|
||||
framework.Logf("Service %s: found nonempty answer: %s", name, got)
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
@ -72,7 +71,7 @@ func WaitForServiceResponding(c clientset.Interface, ns, name string) error {
|
||||
func WaitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name string) {
|
||||
ginkgo.By("Delete service with finalizer")
|
||||
if err := cs.CoreV1().Services(namespace).Delete(name, nil); err != nil {
|
||||
e2elog.Failf("Failed to delete service %s/%s", namespace, name)
|
||||
framework.Failf("Failed to delete service %s/%s", namespace, name)
|
||||
}
|
||||
|
||||
ginkgo.By("Wait for service to disappear")
|
||||
@ -80,15 +79,15 @@ func WaitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name
|
||||
svc, err := cs.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
e2elog.Logf("Service %s/%s is gone.", namespace, name)
|
||||
framework.Logf("Service %s/%s is gone.", namespace, name)
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
e2elog.Logf("Service %s/%s still exists with finalizers: %v", namespace, name, svc.Finalizers)
|
||||
framework.Logf("Service %s/%s still exists with finalizers: %v", namespace, name, svc.Finalizers)
|
||||
return false, nil
|
||||
}); pollErr != nil {
|
||||
e2elog.Failf("Failed to wait for service to disappear: %v", pollErr)
|
||||
framework.Failf("Failed to wait for service to disappear: %v", pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -108,11 +107,11 @@ func WaitForServiceUpdatedWithFinalizer(cs clientset.Interface, namespace, name
|
||||
}
|
||||
}
|
||||
if foundFinalizer != hasFinalizer {
|
||||
e2elog.Logf("Service %s/%s hasFinalizer=%t, want %t", namespace, name, foundFinalizer, hasFinalizer)
|
||||
framework.Logf("Service %s/%s hasFinalizer=%t, want %t", namespace, name, foundFinalizer, hasFinalizer)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); pollErr != nil {
|
||||
e2elog.Failf("Failed to wait for service to hasFinalizer=%t: %v", hasFinalizer, pollErr)
|
||||
framework.Failf("Failed to wait for service to hasFinalizer=%t: %v", hasFinalizer, pollErr)
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
],
|
||||
|
@ -31,7 +31,6 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
e2efwk "k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -157,7 +156,7 @@ func BreakPodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path)
|
||||
stdout, err := e2efwk.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
|
||||
e2efwk.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -181,7 +180,7 @@ func RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path)
|
||||
stdout, err := e2efwk.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
|
||||
e2efwk.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -243,17 +242,17 @@ func ResumeNextPod(c clientset.Interface, ss *appsv1.StatefulSet) {
|
||||
resumedPod := ""
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
e2elog.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
|
||||
e2efwk.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
|
||||
}
|
||||
if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) {
|
||||
continue
|
||||
}
|
||||
if resumedPod != "" {
|
||||
e2elog.Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
|
||||
e2efwk.Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
|
||||
}
|
||||
_, err := e2efwk.RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
|
||||
e2efwk.ExpectNoError(err)
|
||||
e2elog.Logf("Resumed pod %v", pod.Name)
|
||||
e2efwk.Logf("Resumed pod %v", pod.Name)
|
||||
resumedPod = pod.Name
|
||||
}
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
e2efwk "k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
)
|
||||
|
||||
@ -42,18 +41,18 @@ func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.S
|
||||
return filepath.Join(manifestPath, file)
|
||||
}
|
||||
|
||||
e2elog.Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
|
||||
e2efwk.Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
|
||||
ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
|
||||
e2efwk.ExpectNoError(err)
|
||||
e2elog.Logf("Parsing service from %v", mkpath("service.yaml"))
|
||||
e2efwk.Logf("Parsing service from %v", mkpath("service.yaml"))
|
||||
svc, err := manifest.SvcFromManifest(mkpath("service.yaml"))
|
||||
e2efwk.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
|
||||
e2efwk.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
|
||||
_, err = c.CoreV1().Services(ns).Create(svc)
|
||||
e2efwk.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
||||
e2efwk.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
||||
_, err = c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
e2efwk.ExpectNoError(err)
|
||||
WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||
@ -74,7 +73,7 @@ func DeleteStatefulPodAtIndex(c clientset.Interface, index int, ss *appsv1.State
|
||||
name := getStatefulSetPodNameAtIndex(index, ss)
|
||||
noGrace := int64(0)
|
||||
if err := c.CoreV1().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
|
||||
e2elog.Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err)
|
||||
e2efwk.Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,7 +92,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||
errList = append(errList, fmt.Sprintf("%v", err))
|
||||
}
|
||||
WaitForStatusReplicas(c, ss, 0)
|
||||
e2elog.Logf("Deleting statefulset %v", ss.Name)
|
||||
e2efwk.Logf("Deleting statefulset %v", ss.Name)
|
||||
// Use OrphanDependents=false so it's deleted synchronously.
|
||||
// We already made sure the Pods are gone inside Scale().
|
||||
if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
|
||||
@ -107,13 +106,13 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||
pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
e2elog.Logf("WARNING: Failed to list pvcs, retrying %v", err)
|
||||
e2efwk.Logf("WARNING: Failed to list pvcs, retrying %v", err)
|
||||
return false, nil
|
||||
}
|
||||
for _, pvc := range pvcList.Items {
|
||||
pvNames.Insert(pvc.Spec.VolumeName)
|
||||
// TODO: Double check that there are no pods referencing the pvc
|
||||
e2elog.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
|
||||
e2efwk.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
|
||||
if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
@ -127,7 +126,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
e2elog.Logf("WARNING: Failed to list pvs, retrying %v", err)
|
||||
e2efwk.Logf("WARNING: Failed to list pvs, retrying %v", err)
|
||||
return false, nil
|
||||
}
|
||||
waitingFor := []string{}
|
||||
@ -139,7 +138,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||
if len(waitingFor) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
|
||||
e2efwk.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
|
||||
return false, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
@ -161,7 +160,7 @@ func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string,
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(statefulSet)
|
||||
if statefulSet, err = statefulSets.Update(statefulSet); err == nil {
|
||||
e2elog.Logf("Updating stateful set %s", name)
|
||||
e2efwk.Logf("Updating stateful set %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
@ -178,7 +177,7 @@ func Scale(c clientset.Interface, ss *appsv1.StatefulSet, count int32) (*appsv1.
|
||||
name := ss.Name
|
||||
ns := ss.Namespace
|
||||
|
||||
e2elog.Logf("Scaling statefulset %s to %d", name, count)
|
||||
e2efwk.Logf("Scaling statefulset %s to %d", name, count)
|
||||
ss = update(c, ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||
|
||||
var statefulPodList *v1.PodList
|
||||
@ -223,7 +222,7 @@ func Restart(c clientset.Interface, ss *appsv1.StatefulSet) {
|
||||
func GetStatefulSet(c clientset.Interface, namespace, name string) *appsv1.StatefulSet {
|
||||
ss, err := c.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
|
||||
e2efwk.Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
|
||||
}
|
||||
return ss
|
||||
}
|
||||
@ -263,7 +262,7 @@ func CheckMount(c clientset.Interface, ss *appsv1.StatefulSet, mountPath string)
|
||||
|
||||
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
|
||||
func CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error {
|
||||
e2elog.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
|
||||
e2efwk.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
|
||||
|
||||
if expectedServiceName != ss.Spec.ServiceName {
|
||||
return fmt.Errorf("wrong service name governing statefulset. Expected %s got %s",
|
||||
@ -278,7 +277,7 @@ func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd strin
|
||||
podList := GetPodList(c, ss)
|
||||
for _, statefulPod := range podList.Items {
|
||||
stdout, err := e2efwk.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
e2elog.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
|
||||
e2efwk.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -304,7 +303,7 @@ func update(c clientset.Interface, ns, name string, update func(ss *appsv1.State
|
||||
for i := 0; i < 3; i++ {
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get statefulset %q: %v", name, err)
|
||||
e2efwk.Failf("failed to get statefulset %q: %v", name, err)
|
||||
}
|
||||
update(ss)
|
||||
ss, err = c.AppsV1().StatefulSets(ns).Update(ss)
|
||||
@ -312,10 +311,10 @@ func update(c clientset.Interface, ns, name string, update func(ss *appsv1.State
|
||||
return ss
|
||||
}
|
||||
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
|
||||
e2elog.Failf("failed to update statefulset %q: %v", name, err)
|
||||
e2efwk.Failf("failed to update statefulset %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
e2elog.Failf("too many retries draining statefulset %q", name)
|
||||
e2efwk.Failf("too many retries draining statefulset %q", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// WaitForPartitionedRollingUpdate waits for all Pods in set to exist and have the correct revision. set must have
|
||||
@ -35,13 +35,13 @@ import (
|
||||
func WaitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||
e2elog.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
|
||||
framework.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
set.Spec.UpdateStrategy.Type)
|
||||
}
|
||||
if set.Spec.UpdateStrategy.RollingUpdate == nil || set.Spec.UpdateStrategy.RollingUpdate.Partition == nil {
|
||||
e2elog.Failf("StatefulSet %s/%s attempt to wait for partitioned update with nil RollingUpdate or nil Partition",
|
||||
framework.Failf("StatefulSet %s/%s attempt to wait for partitioned update with nil RollingUpdate or nil Partition",
|
||||
set.Namespace,
|
||||
set.Name)
|
||||
}
|
||||
@ -53,14 +53,14 @@ func WaitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
|
||||
return false, nil
|
||||
}
|
||||
if partition <= 0 && set.Status.UpdateRevision != set.Status.CurrentRevision {
|
||||
e2elog.Logf("Waiting for StatefulSet %s/%s to complete update",
|
||||
framework.Logf("Waiting for StatefulSet %s/%s to complete update",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
)
|
||||
SortStatefulPods(pods)
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
framework.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
@ -71,7 +71,7 @@ func WaitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
|
||||
}
|
||||
for i := int(*set.Spec.Replicas) - 1; i >= partition; i-- {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
framework.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
@ -92,7 +92,7 @@ func WaitForRunning(c clientset.Interface, numPodsRunning, numPodsReady int32, s
|
||||
podList := GetPodList(c, ss)
|
||||
SortStatefulPods(podList)
|
||||
if int32(len(podList.Items)) < numPodsRunning {
|
||||
e2elog.Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPodsRunning)
|
||||
framework.Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPodsRunning)
|
||||
return false, nil
|
||||
}
|
||||
if int32(len(podList.Items)) > numPodsRunning {
|
||||
@ -102,7 +102,7 @@ func WaitForRunning(c clientset.Interface, numPodsRunning, numPodsReady int32, s
|
||||
shouldBeReady := getStatefulPodOrdinal(&p) < int(numPodsReady)
|
||||
isReady := podutil.IsPodReady(&p)
|
||||
desiredReadiness := shouldBeReady == isReady
|
||||
e2elog.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
|
||||
framework.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
|
||||
if p.Status.Phase != v1.PodRunning || !desiredReadiness {
|
||||
return false, nil
|
||||
}
|
||||
@ -110,7 +110,7 @@ func WaitForRunning(c clientset.Interface, numPodsRunning, numPodsReady int32, s
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for pods to enter running: %v", pollErr)
|
||||
framework.Failf("Failed waiting for pods to enter running: %v", pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -126,7 +126,7 @@ func WaitForState(c clientset.Interface, ss *appsv1.StatefulSet, until func(*app
|
||||
return until(ssGet, podList)
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for state update: %v", pollErr)
|
||||
framework.Failf("Failed waiting for state update: %v", pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -185,7 +185,7 @@ func WaitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName
|
||||
func WaitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||
e2elog.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
|
||||
framework.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
set.Spec.UpdateStrategy.Type)
|
||||
@ -197,14 +197,14 @@ func WaitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps
|
||||
return false, nil
|
||||
}
|
||||
if set.Status.UpdateRevision != set.Status.CurrentRevision {
|
||||
e2elog.Logf("Waiting for StatefulSet %s/%s to complete update",
|
||||
framework.Logf("Waiting for StatefulSet %s/%s to complete update",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
)
|
||||
SortStatefulPods(pods)
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
framework.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
@ -225,7 +225,7 @@ func WaitForRunningAndNotReady(c clientset.Interface, numStatefulPods int32, ss
|
||||
|
||||
// WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas
|
||||
func WaitForStatusReadyReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) {
|
||||
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
framework.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
@ -238,19 +238,19 @@ func WaitForStatusReadyReplicas(c clientset.Interface, ss *appsv1.StatefulSet, e
|
||||
return false, nil
|
||||
}
|
||||
if ssGet.Status.ReadyReplicas != expectedReplicas {
|
||||
e2elog.Logf("Waiting for stateful set status.readyReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.ReadyReplicas)
|
||||
framework.Logf("Waiting for stateful set status.readyReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.ReadyReplicas)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for stateful set status.readyReplicas updated to %d: %v", expectedReplicas, pollErr)
|
||||
framework.Failf("Failed waiting for stateful set status.readyReplicas updated to %d: %v", expectedReplicas, pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
|
||||
func WaitForStatusReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) {
|
||||
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
framework.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
@ -263,13 +263,13 @@ func WaitForStatusReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expect
|
||||
return false, nil
|
||||
}
|
||||
if ssGet.Status.Replicas != expectedReplicas {
|
||||
e2elog.Logf("Waiting for stateful set status.replicas to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
|
||||
framework.Logf("Waiting for stateful set status.replicas to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for stateful set status.replicas updated to %d: %v", expectedReplicas, pollErr)
|
||||
framework.Failf("Failed waiting for stateful set status.replicas updated to %d: %v", expectedReplicas, pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -277,9 +277,9 @@ func WaitForStatusReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expect
|
||||
func Saturate(c clientset.Interface, ss *appsv1.StatefulSet) {
|
||||
var i int32
|
||||
for i = 0; i < *(ss.Spec.Replicas); i++ {
|
||||
e2elog.Logf("Waiting for stateful pod at index %v to enter Running", i)
|
||||
framework.Logf("Waiting for stateful pod at index %v to enter Running", i)
|
||||
WaitForRunning(c, i+1, i, ss)
|
||||
e2elog.Logf("Resuming stateful pod at index %v", i)
|
||||
framework.Logf("Resuming stateful pod at index %v", i)
|
||||
ResumeNextPod(c, ss)
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user