Merge pull request #79293 from SataQiu/cleanup-test-20190622

e2e: remove framework.Failf
This commit is contained in:
Kubernetes Prow Robot 2019-06-22 12:32:12 -07:00 committed by GitHub
commit eb145bef43
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 90 additions and 103 deletions

View File

@ -79,7 +79,7 @@ func visitManifests(cb func([]byte) error, files ...string) error {
for _, fileName := range files {
data, err := testfiles.Read(fileName)
if err != nil {
Failf("reading manifest file: %v", err)
e2elog.Failf("reading manifest file: %v", err)
}
// Split at the "---" separator before working on

View File

@ -333,7 +333,7 @@ func (f *Framework) AfterEach() {
for namespaceKey, namespaceErr := range nsDeletionErrors {
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
}
Failf(strings.Join(messages, ","))
e2elog.Failf(strings.Join(messages, ","))
}
}()
@ -390,7 +390,7 @@ func (f *Framework) AfterEach() {
// This is explicitly done at the very end of the test, to avoid
// e.g. not removing namespace in case of this failure.
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
Failf("All nodes should be ready after test, %v", err)
e2elog.Failf("All nodes should be ready after test, %v", err)
}
}
@ -847,7 +847,7 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) {
pods, err := cl.WaitFor(atLeast, timeout)
if err != nil || len(pods) < atLeast {
Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
e2elog.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
}
}
@ -860,7 +860,7 @@ func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
pods, err := cl.podState.filter(cl.client, cl.namespace)
if err == nil {
if len(pods) == 0 {
Failf("No pods matched the filter.")
e2elog.Failf("No pods matched the filter.")
}
e2elog.Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
for _, p := range pods {

View File

@ -169,7 +169,7 @@ func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor
}
nodes, err := m.client.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err)
e2elog.Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err)
}
for _, node := range nodes.Items {
m.nodesRuntimeOps[node.Name] = make(NodeRuntimeOperationErrorRate)
@ -684,7 +684,7 @@ func (r *ResourceMonitor) Start() {
// It should be OK to monitor unschedulable Nodes
nodes, err := r.client.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
Failf("ResourceMonitor: unable to get list of nodes: %v", err)
e2elog.Failf("ResourceMonitor: unable to get list of nodes: %v", err)
}
r.collectors = make(map[string]*resourceCollector, 0)
for _, node := range nodes.Items {

View File

@ -235,7 +235,7 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
}
config.diagnoseMissingEndpoints(eps)
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
e2elog.Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
}
// GetEndpointsFromTestContainer executes a curl via kubectl exec in a test container.
@ -347,7 +347,7 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
}
config.diagnoseMissingEndpoints(eps)
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
e2elog.Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
}
// GetSelfURL executes a curl against the given path via kubectl exec into a
@ -394,7 +394,7 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string)
desc, _ := RunKubectl(
"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
e2elog.Logf("%s", desc)
Failf("Timed out in %v: %v", retryTimeout, msg)
e2elog.Failf("Timed out in %v: %v", retryTimeout, msg)
}
}
@ -538,12 +538,12 @@ func (config *NetworkingTestConfig) createTestPods() {
var err error
config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name, metav1.GetOptions{})
if err != nil {
Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
e2elog.Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
}
config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name, metav1.GetOptions{})
if err != nil {
Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
e2elog.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
}
}
@ -675,12 +675,12 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() {
// wait for pod being deleted.
err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
if err != nil {
Failf("Failed to delete %s pod: %v", pod.Name, err)
e2elog.Failf("Failed to delete %s pod: %v", pod.Name, err)
}
// wait for endpoint being removed.
err = WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
if err != nil {
Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
e2elog.Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
}
// wait for kube-proxy to catch up with the pod being deleted.
time.Sleep(5 * time.Second)
@ -784,11 +784,11 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
// Sanity check inputs, because it has happened. These are the only things
// that should hard fail the test - they are basically ASSERT()s.
if host == "" {
Failf("Got empty host for HTTP poke (%s)", url)
e2elog.Failf("Got empty host for HTTP poke (%s)", url)
return ret
}
if port == 0 {
Failf("Got port==0 for HTTP poke (%s)", url)
e2elog.Failf("Got port==0 for HTTP poke (%s)", url)
return ret
}
@ -920,11 +920,11 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
// Sanity check inputs, because it has happened. These are the only things
// that should hard fail the test - they are basically ASSERT()s.
if host == "" {
Failf("Got empty host for UDP poke (%s)", url)
e2elog.Failf("Got empty host for UDP poke (%s)", url)
return ret
}
if port == 0 {
Failf("Got port==0 for UDP poke (%s)", url)
e2elog.Failf("Got port==0 for UDP poke (%s)", url)
return ret
}
@ -1051,7 +1051,7 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
host, err := e2enode.GetExternalIP(node)
if err != nil {
Failf("Error getting node external ip : %v", err)
e2elog.Failf("Error getting node external ip : %v", err)
}
masterAddresses := GetAllMasterAddresses(c)
ginkgo.By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
@ -1068,7 +1068,7 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
e2elog.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
for _, masterAddress := range masterAddresses {
BlockNetwork(host, masterAddress)
@ -1076,7 +1076,7 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
e2elog.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
e2elog.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
}
testFunc()

View File

@ -173,7 +173,7 @@ func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeo
func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options *metav1.DeleteOptions, timeout time.Duration) {
err := c.Delete(name, options)
if err != nil && !errors.IsNotFound(err) {
Failf("Failed to delete pod %q: %v", name, err)
e2elog.Failf("Failed to delete pod %q: %v", name, err)
}
gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)

View File

@ -256,5 +256,5 @@ waitLoop:
}
}
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
e2elog.Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
}

View File

@ -168,7 +168,7 @@ func (j *ServiceTestJig) CreateTCPServiceWithPort(namespace string, tweak func(s
}
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
if err != nil {
Failf("Failed to create TCP Service %q: %v", svc.Name, err)
e2elog.Failf("Failed to create TCP Service %q: %v", svc.Name, err)
}
return result
}
@ -183,7 +183,7 @@ func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc
}
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
if err != nil {
Failf("Failed to create TCP Service %q: %v", svc.Name, err)
e2elog.Failf("Failed to create TCP Service %q: %v", svc.Name, err)
}
return result
}
@ -198,7 +198,7 @@ func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc
}
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
if err != nil {
Failf("Failed to create UDP Service %q: %v", svc.Name, err)
e2elog.Failf("Failed to create UDP Service %q: %v", svc.Name, err)
}
return result
}
@ -223,7 +223,7 @@ func (j *ServiceTestJig) CreateExternalNameServiceOrFail(namespace string, tweak
}
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
if err != nil {
Failf("Failed to create ExternalName Service %q: %v", svc.Name, err)
e2elog.Failf("Failed to create ExternalName Service %q: %v", svc.Name, err)
}
return result
}
@ -342,7 +342,7 @@ func PickNodeIP(c clientset.Interface) string {
publicIps, err := GetNodePublicIps(c)
ExpectNoError(err)
if len(publicIps) == 0 {
Failf("got unexpected number (%d) of public IPs", len(publicIps))
e2elog.Failf("got unexpected number (%d) of public IPs", len(publicIps))
}
ip := publicIps[0]
return ip
@ -373,10 +373,10 @@ func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string {
nodes := j.GetNodes(MaxNodesForEndpointsTests)
endpoints, err := j.Client.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{})
if err != nil {
Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
e2elog.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
}
if len(endpoints.Subsets) == 0 {
Failf("Endpoint has no subsets, cannot determine node addresses.")
e2elog.Failf("Endpoint has no subsets, cannot determine node addresses.")
}
epNodes := sets.NewString()
for _, ss := range endpoints.Subsets {
@ -447,19 +447,19 @@ func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName
// SanityCheckService performs sanity checks on the given service
func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) {
if svc.Spec.Type != svcType {
Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType)
e2elog.Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType)
}
if svcType != v1.ServiceTypeExternalName {
if svc.Spec.ExternalName != "" {
Failf("unexpected Spec.ExternalName (%s) for service, expected empty", svc.Spec.ExternalName)
e2elog.Failf("unexpected Spec.ExternalName (%s) for service, expected empty", svc.Spec.ExternalName)
}
if svc.Spec.ClusterIP != api.ClusterIPNone && svc.Spec.ClusterIP == "" {
Failf("didn't get ClusterIP for non-ExternamName service")
e2elog.Failf("didn't get ClusterIP for non-ExternamName service")
}
} else {
if svc.Spec.ClusterIP != "" {
Failf("unexpected Spec.ClusterIP (%s) for ExternamName service, expected empty", svc.Spec.ClusterIP)
e2elog.Failf("unexpected Spec.ClusterIP (%s) for ExternamName service, expected empty", svc.Spec.ClusterIP)
}
}
@ -470,11 +470,11 @@ func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceT
for i, port := range svc.Spec.Ports {
hasNodePort := (port.NodePort != 0)
if hasNodePort != expectNodePorts {
Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort)
e2elog.Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort)
}
if hasNodePort {
if !ServiceNodePortRange.Contains(int(port.NodePort)) {
Failf("out-of-range nodePort (%d) for service", port.NodePort)
e2elog.Failf("out-of-range nodePort (%d) for service", port.NodePort)
}
}
}
@ -484,12 +484,12 @@ func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceT
}
hasIngress := len(svc.Status.LoadBalancer.Ingress) != 0
if hasIngress != expectIngress {
Failf("unexpected number of Status.LoadBalancer.Ingress (%d) for service", len(svc.Status.LoadBalancer.Ingress))
e2elog.Failf("unexpected number of Status.LoadBalancer.Ingress (%d) for service", len(svc.Status.LoadBalancer.Ingress))
}
if hasIngress {
for i, ing := range svc.Status.LoadBalancer.Ingress {
if ing.IP == "" && ing.Hostname == "" {
Failf("unexpected Status.LoadBalancer.Ingress[%d] for service: %#v", i, ing)
e2elog.Failf("unexpected Status.LoadBalancer.Ingress[%d] for service: %#v", i, ing)
}
}
}
@ -522,7 +522,7 @@ func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.S
func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func(*v1.Service)) *v1.Service {
svc, err := j.UpdateService(namespace, name, update)
if err != nil {
Failf(err.Error())
e2elog.Failf(err.Error())
}
return svc
}
@ -562,7 +562,7 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini
break
}
if err != nil {
Failf("Could not change the nodePort: %v", err)
e2elog.Failf("Could not change the nodePort: %v", err)
}
return service
}
@ -606,7 +606,7 @@ func (j *ServiceTestJig) waitForConditionOrFail(namespace, name string, timeout
return false, nil
}
if err := wait.PollImmediate(Poll, timeout, pollFunc); err != nil {
Failf("Timed out waiting for service %q to %s", name, message)
e2elog.Failf("Timed out waiting for service %q to %s", name, message)
}
return service
}
@ -681,10 +681,10 @@ func (j *ServiceTestJig) CreatePDBOrFail(namespace string, rc *v1.ReplicationCon
pdb := j.newPDBTemplate(namespace, rc)
newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
if err != nil {
Failf("Failed to create PDB %q %v", pdb.Name, err)
e2elog.Failf("Failed to create PDB %q %v", pdb.Name, err)
}
if err := j.waitForPdbReady(namespace); err != nil {
Failf("Failed waiting for PDB to be ready: %v", err)
e2elog.Failf("Failed waiting for PDB to be ready: %v", err)
}
return newPdb
@ -721,14 +721,14 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.Replicati
}
result, err := j.Client.CoreV1().ReplicationControllers(namespace).Create(rc)
if err != nil {
Failf("Failed to create RC %q: %v", rc.Name, err)
e2elog.Failf("Failed to create RC %q: %v", rc.Name, err)
}
pods, err := j.waitForPodsCreated(namespace, int(*(rc.Spec.Replicas)))
if err != nil {
Failf("Failed to create pods: %v", err)
e2elog.Failf("Failed to create pods: %v", err)
}
if err := j.waitForPodsReady(namespace, pods); err != nil {
Failf("Failed waiting for pods to be running: %v", err)
e2elog.Failf("Failed waiting for pods to be running: %v", err)
}
return result
}
@ -738,20 +738,20 @@ func (j *ServiceTestJig) Scale(namespace string, replicas int) {
rc := j.Name
scale, err := j.Client.CoreV1().ReplicationControllers(namespace).GetScale(rc, metav1.GetOptions{})
if err != nil {
Failf("Failed to get scale for RC %q: %v", rc, err)
e2elog.Failf("Failed to get scale for RC %q: %v", rc, err)
}
scale.Spec.Replicas = int32(replicas)
_, err = j.Client.CoreV1().ReplicationControllers(namespace).UpdateScale(rc, scale)
if err != nil {
Failf("Failed to scale RC %q: %v", rc, err)
e2elog.Failf("Failed to scale RC %q: %v", rc, err)
}
pods, err := j.waitForPodsCreated(namespace, replicas)
if err != nil {
Failf("Failed waiting for pods: %v", err)
e2elog.Failf("Failed waiting for pods: %v", err)
}
if err := j.waitForPodsReady(namespace, pods); err != nil {
Failf("Failed waiting for pods to be running: %v", err)
e2elog.Failf("Failed waiting for pods to be running: %v", err)
}
}
@ -910,9 +910,9 @@ func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, p
if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil {
if err == wait.ErrWaitTimeout {
Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
e2elog.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
} else {
Failf("Failed to reach HTTP service through %v:%v: %v", host, port, err)
e2elog.Failf("Failed to reach HTTP service through %v:%v: %v", host, port, err)
}
}
}
@ -928,7 +928,7 @@ func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout tim
}
if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil {
Failf("HTTP service %v:%v reachable after %v: %v", host, port, timeout, err)
e2elog.Failf("HTTP service %v:%v reachable after %v: %v", host, port, timeout, err)
}
}
@ -943,7 +943,7 @@ func (j *ServiceTestJig) TestRejectedHTTP(host string, port int, timeout time.Du
}
if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil {
Failf("HTTP service %v:%v not rejected: %v", host, port, err)
e2elog.Failf("HTTP service %v:%v not rejected: %v", host, port, err)
}
}
@ -961,7 +961,7 @@ func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Du
}
if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil {
Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err)
e2elog.Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err)
}
}
@ -975,7 +975,7 @@ func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time
return false, nil // caller can retry
}
if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil {
Failf("UDP service %v:%v reachable after %v: %v", host, port, timeout, err)
e2elog.Failf("UDP service %v:%v reachable after %v: %v", host, port, timeout, err)
}
}
@ -989,7 +989,7 @@ func (j *ServiceTestJig) TestRejectedUDP(host string, port int, timeout time.Dur
return false, nil // caller can retry
}
if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil {
Failf("UDP service %v:%v not rejected: %v", host, port, err)
e2elog.Failf("UDP service %v:%v not rejected: %v", host, port, err)
}
}
@ -1004,7 +1004,7 @@ func (j *ServiceTestJig) GetHTTPContent(host string, port int, timeout time.Dura
}
return false, nil
}); pollErr != nil {
Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, pollErr)
e2elog.Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, pollErr)
}
return body
}
@ -1013,7 +1013,7 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err
ipPort := net.JoinHostPort(ip, strconv.Itoa(port))
url := fmt.Sprintf("http://%s%s", ipPort, request)
if ip == "" || port == 0 {
Failf("Got empty IP for reachability check (%s)", url)
e2elog.Failf("Got empty IP for reachability check (%s)", url)
return false, fmt.Errorf("invalid input ip or port")
}
e2elog.Logf("Testing HTTP health check on %v", url)
@ -1467,7 +1467,7 @@ func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds b
func checkAffinityFailed(tracker affinityTracker, err string) {
e2elog.Logf("%v", tracker.hostTrace)
Failf(err)
e2elog.Failf(err)
}
// CheckAffinity function tests whether the service affinity works as expected.

View File

@ -85,7 +85,7 @@ func NewStatefulSetTester(c clientset.Interface) *StatefulSetTester {
func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *appsv1.StatefulSet {
ss, err := s.c.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
e2elog.Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
}
return ss
}
@ -176,7 +176,7 @@ func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *appsv1.State
name := getStatefulSetPodNameAtIndex(index, ss)
noGrace := int64(0)
if err := s.c.CoreV1().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err)
e2elog.Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err)
}
}
@ -247,7 +247,7 @@ func (s *StatefulSetTester) update(ns, name string, update func(ss *appsv1.State
for i := 0; i < 3; i++ {
ss, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
Failf("failed to get statefulset %q: %v", name, err)
e2elog.Failf("failed to get statefulset %q: %v", name, err)
}
update(ss)
ss, err = s.c.AppsV1().StatefulSets(ns).Update(ss)
@ -255,10 +255,10 @@ func (s *StatefulSetTester) update(ns, name string, update func(ss *appsv1.State
return ss
}
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
Failf("failed to update statefulset %q: %v", name, err)
e2elog.Failf("failed to update statefulset %q: %v", name, err)
}
}
Failf("too many retries draining statefulset %q", name)
e2elog.Failf("too many retries draining statefulset %q", name)
return nil
}
@ -282,7 +282,7 @@ func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *appsv1.Statef
if statefulPodCount != count {
e2epod.LogPodStates(podList.Items)
if hard {
Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
e2elog.Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
} else {
e2elog.Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
}
@ -320,7 +320,7 @@ func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, s
return true, nil
})
if pollErr != nil {
Failf("Failed waiting for pods to enter running: %v", pollErr)
e2elog.Failf("Failed waiting for pods to enter running: %v", pollErr)
}
}
@ -336,7 +336,7 @@ func (s *StatefulSetTester) WaitForState(ss *appsv1.StatefulSet, until func(*app
return until(ssGet, podList)
})
if pollErr != nil {
Failf("Failed waiting for state update: %v", pollErr)
e2elog.Failf("Failed waiting for state update: %v", pollErr)
}
}
@ -397,7 +397,7 @@ func (s *StatefulSetTester) WaitForPodNotReady(set *appsv1.StatefulSet, podName
func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
var pods *v1.PodList
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
e2elog.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
set.Namespace,
set.Name,
set.Spec.UpdateStrategy.Type)
@ -437,13 +437,13 @@ func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1.StatefulSet) (*apps
func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
var pods *v1.PodList
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
e2elog.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
set.Namespace,
set.Name,
set.Spec.UpdateStrategy.Type)
}
if set.Spec.UpdateStrategy.RollingUpdate == nil || set.Spec.UpdateStrategy.RollingUpdate.Partition == nil {
Failf("StatefulSet %s/%s attempt to wait for partitioned update with nil RollingUpdate or nil Partition",
e2elog.Failf("StatefulSet %s/%s attempt to wait for partitioned update with nil RollingUpdate or nil Partition",
set.Namespace,
set.Name)
}
@ -590,13 +590,13 @@ func (s *StatefulSetTester) ResumeNextPod(ss *appsv1.StatefulSet) {
resumedPod := ""
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning {
Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
e2elog.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
}
if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) {
continue
}
if resumedPod != "" {
Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
e2elog.Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
}
_, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
ExpectNoError(err)
@ -626,7 +626,7 @@ func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *appsv1.StatefulSet, e
return true, nil
})
if pollErr != nil {
Failf("Failed waiting for stateful set status.readyReplicas updated to %d: %v", expectedReplicas, pollErr)
e2elog.Failf("Failed waiting for stateful set status.readyReplicas updated to %d: %v", expectedReplicas, pollErr)
}
}
@ -651,7 +651,7 @@ func (s *StatefulSetTester) WaitForStatusReplicas(ss *appsv1.StatefulSet, expect
return true, nil
})
if pollErr != nil {
Failf("Failed waiting for stateful set status.replicas updated to %d: %v", expectedReplicas, pollErr)
e2elog.Failf("Failed waiting for stateful set status.replicas updated to %d: %v", expectedReplicas, pollErr)
}
}

View File

@ -249,19 +249,6 @@ func log(level string, format string, args ...interface{}) {
fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
// Failf logs the fail info.
func Failf(format string, args ...interface{}) {
FailfWithOffset(1, format, args...)
}
// FailfWithOffset calls "Fail" and logs the error at "offset" levels above its caller
// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f").
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
}
func skipInternalf(caller int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
@ -465,7 +452,7 @@ func ProxyMode(f *Framework) (string, error) {
func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) {
gte, err := ServerVersionGTE(v, c)
if err != nil {
Failf("Failed to get server version: %v", err)
e2elog.Failf("Failed to get server version: %v", err)
}
if !gte {
skipInternalf(1, "Not supported for server versions before %q", v)
@ -481,7 +468,7 @@ func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVers
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
skipInternalf(1, "Could not find %s resource, skipping test: %#v", gvr, err)
}
Failf("Unexpected error getting %v: %v", gvr, err)
e2elog.Failf("Unexpected error getting %v: %v", gvr, err)
}
}
@ -1289,7 +1276,7 @@ func ServiceResponding(c clientset.Interface, ns, name string) error {
Raw()
if err != nil {
if ctx.Err() != nil {
Failf("Failed to GET from service %s: %v", name, err)
e2elog.Failf("Failed to GET from service %s: %v", name, err)
return true, err
}
e2elog.Logf("Failed to GET from service %s: %v:", name, err)
@ -1430,7 +1417,7 @@ func AssertCleanup(ns string, selectors ...string) {
}
err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc)
if err != nil {
Failf(e.Error())
e2elog.Failf(e.Error())
}
}
@ -1671,7 +1658,7 @@ func (f *Framework) testContainerOutputMatcher(scenarioName string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
ginkgo.By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
Failf("Invalid container index: %d", containerIndex)
e2elog.Failf("Invalid container index: %d", containerIndex)
}
ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}
@ -1937,7 +1924,7 @@ func isNodeUntainted(node *v1.Node) bool {
nodeInfo.SetNode(node)
fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo)
if err != nil {
Failf("Can't test predicates for node %s: %v", node.Name, err)
e2elog.Failf("Can't test predicates for node %s: %v", node.Name, err)
return false
}
return fit
@ -2114,7 +2101,7 @@ func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Tai
nodeUpdated, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
if taintutils.TaintExists(nodeUpdated.Spec.Taints, taint) {
Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
e2elog.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
@ -2123,7 +2110,7 @@ func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint)
ginkgo.By("verifying the node has the taint " + taint.ToString())
if has, err := NodeHasTaint(c, nodeName, taint); !has {
ExpectNoError(err)
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
e2elog.Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
@ -2822,7 +2809,7 @@ func BlockNetwork(from string, to string) {
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
if result, err := e2essh.SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil {
e2essh.LogResult(result)
Failf("Unexpected error: %v", err)
e2elog.Failf("Unexpected error: %v", err)
}
}
@ -2849,7 +2836,7 @@ func UnblockNetwork(from string, to string) {
return false, nil
})
if err != nil {
Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+
e2elog.Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+
"required on host %s: remove rule %s, if exists", from, iptablesRule)
}
}
@ -3043,7 +3030,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
scheduledPods, currentlyNotScheduledPods = e2epod.GetPodsScheduled(masterNodes, allPods)
if startTime.Add(timeout).Before(time.Now()) {
Failf("Timed out after %v waiting for stable cluster.", timeout)
e2elog.Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
}
@ -3149,17 +3136,17 @@ func getMasterAddresses(c clientset.Interface) (string, string, string) {
// Populate the internal IP.
eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil {
Failf("Failed to get kubernetes endpoints: %v", err)
e2elog.Failf("Failed to get kubernetes endpoints: %v", err)
}
if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 {
Failf("There are more than 1 endpoints for kubernetes service: %+v", eps)
e2elog.Failf("There are more than 1 endpoints for kubernetes service: %+v", eps)
}
internalIP = eps.Subsets[0].Addresses[0].IP
// Populate the external IP/hostname.
url, err := url.Parse(TestContext.Host)
if err != nil {
Failf("Failed to parse hostname: %v", err)
e2elog.Failf("Failed to parse hostname: %v", err)
}
if net.ParseIP(url.Host) != nil {
externalIP = url.Host
@ -3189,7 +3176,7 @@ func GetAllMasterAddresses(c clientset.Interface) []string {
case "aws":
ips.Insert(awsMasterIP)
default:
Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider)
e2elog.Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider)
}
return ips.List()
}