mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-06 07:57:35 +00:00
refactor: replace framework.Failf with e2elog.Failf
This commit is contained in:
@@ -75,10 +75,10 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
framework.Failf("Failed to observe pod creation: %v", event)
|
||||
e2elog.Failf("Failed to observe pod creation: %v", event)
|
||||
}
|
||||
case <-time.After(framework.ServiceRespondingTimeout):
|
||||
framework.Failf("Timeout while waiting for LimitRange creation")
|
||||
e2elog.Failf("Timeout while waiting for LimitRange creation")
|
||||
}
|
||||
|
||||
ginkgo.By("Fetching the LimitRange to ensure it has proper values")
|
||||
|
||||
@@ -275,7 +275,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
|
||||
}
|
||||
}
|
||||
if successes != completions {
|
||||
framework.Failf("Only got %v completions. Expected %v completions.", successes, completions)
|
||||
e2elog.Failf("Only got %v completions. Expected %v completions.", successes, completions)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -430,12 +430,12 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||
var err error
|
||||
node, err = cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("error getting node %q: %v", nodeName, err)
|
||||
e2elog.Failf("error getting node %q: %v", nodeName, err)
|
||||
}
|
||||
var ok bool
|
||||
nodeHostNameLabel, ok = node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"]
|
||||
if !ok {
|
||||
framework.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
|
||||
e2elog.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
|
||||
}
|
||||
|
||||
// update Node API object with a fake resource
|
||||
@@ -581,7 +581,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||
for i, got := range rsPodsSeen {
|
||||
expected := maxRSPodsSeen[i]
|
||||
if got > expected {
|
||||
framework.Failf("pods of ReplicaSet%d have been over-preempted: expect %v pod names, but got %d", i+1, expected, got)
|
||||
e2elog.Failf("pods of ReplicaSet%d have been over-preempted: expect %v pod names, but got %d", i+1, expected, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -328,7 +328,7 @@ func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
|
||||
totalRequestedMemResource := resource.Requests.Memory().Value()
|
||||
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Expect error of invalid, got : %v", err)
|
||||
e2elog.Failf("Expect error of invalid, got : %v", err)
|
||||
}
|
||||
for _, pod := range allpods.Items {
|
||||
if pod.Spec.NodeName == node.Name {
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
@@ -119,7 +120,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
|
||||
nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName)
|
||||
nodeList, err := cs.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: nodeSelector.String()})
|
||||
if err != nil || len(nodeList.Items) != 1 {
|
||||
framework.Failf("expected no err, got %v; expected len(nodes) = 1, got %v", err, len(nodeList.Items))
|
||||
e2elog.Failf("expected no err, got %v; expected len(nodes) = 1, got %v", err, len(nodeList.Items))
|
||||
}
|
||||
node := nodeList.Items[0]
|
||||
|
||||
@@ -139,7 +140,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
|
||||
}
|
||||
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.Failf("Current e2e test has failed, so return from here.")
|
||||
e2elog.Failf("Current e2e test has failed, so return from here.")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -156,7 +157,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName))
|
||||
if !e2enode.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) {
|
||||
framework.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName)
|
||||
e2elog.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName)
|
||||
}
|
||||
ginkgo.By("Expecting to see unreachable=:NoExecute taint is applied")
|
||||
err = framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, true, time.Second*30)
|
||||
@@ -188,7 +189,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
|
||||
seconds, err := getTolerationSeconds(livePod1.Spec.Tolerations)
|
||||
framework.ExpectNoError(err)
|
||||
if seconds != 200 {
|
||||
framework.Failf("expect tolerationSeconds of pod1 is 200, but got %v", seconds)
|
||||
e2elog.Failf("expect tolerationSeconds of pod1 is 200, but got %v", seconds)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -196,7 +196,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||
timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
|
||||
select {
|
||||
case <-timeoutChannel:
|
||||
framework.Failf("Failed to evict Pod")
|
||||
e2elog.Failf("Failed to evict Pod")
|
||||
case <-observedDeletions:
|
||||
e2elog.Logf("Noticed Pod eviction. Test successful")
|
||||
}
|
||||
@@ -230,7 +230,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||
case <-timeoutChannel:
|
||||
e2elog.Logf("Pod wasn't evicted. Test successful")
|
||||
case <-observedDeletions:
|
||||
framework.Failf("Pod was evicted despite toleration")
|
||||
e2elog.Failf("Pod was evicted despite toleration")
|
||||
}
|
||||
})
|
||||
|
||||
@@ -263,14 +263,14 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||
case <-timeoutChannel:
|
||||
e2elog.Logf("Pod wasn't evicted")
|
||||
case <-observedDeletions:
|
||||
framework.Failf("Pod was evicted despite toleration")
|
||||
e2elog.Failf("Pod was evicted despite toleration")
|
||||
return
|
||||
}
|
||||
ginkgo.By("Waiting for Pod to be deleted")
|
||||
timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
|
||||
select {
|
||||
case <-timeoutChannel:
|
||||
framework.Failf("Pod wasn't evicted")
|
||||
e2elog.Failf("Pod wasn't evicted")
|
||||
case <-observedDeletions:
|
||||
e2elog.Logf("Pod was evicted after toleration time run out. Test successful")
|
||||
return
|
||||
@@ -312,7 +312,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||
case <-timeoutChannel:
|
||||
e2elog.Logf("Pod wasn't evicted. Proceeding")
|
||||
case <-observedDeletions:
|
||||
framework.Failf("Pod was evicted despite toleration")
|
||||
e2elog.Failf("Pod was evicted despite toleration")
|
||||
return
|
||||
}
|
||||
e2elog.Logf("Removing taint from Node")
|
||||
@@ -324,7 +324,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||
case <-timeoutChannel:
|
||||
e2elog.Logf("Pod wasn't evicted. Test successful")
|
||||
case <-observedDeletions:
|
||||
framework.Failf("Pod was evicted despite toleration")
|
||||
e2elog.Failf("Pod was evicted despite toleration")
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -383,9 +383,9 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
||||
select {
|
||||
case <-timeoutChannel:
|
||||
if evicted == 0 {
|
||||
framework.Failf("Failed to evict Pod1.")
|
||||
e2elog.Failf("Failed to evict Pod1.")
|
||||
} else if evicted == 2 {
|
||||
framework.Failf("Pod1 is evicted. But unexpected Pod2 also get evicted.")
|
||||
e2elog.Failf("Pod1 is evicted. But unexpected Pod2 also get evicted.")
|
||||
}
|
||||
return
|
||||
case podName := <-observedDeletions:
|
||||
@@ -393,7 +393,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
||||
if podName == podGroup+"1" {
|
||||
e2elog.Logf("Noticed Pod %q gets evicted.", podName)
|
||||
} else if podName == podGroup+"2" {
|
||||
framework.Failf("Unexepected Pod %q gets evicted.", podName)
|
||||
e2elog.Failf("Unexepected Pod %q gets evicted.", podName)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -418,7 +418,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"]
|
||||
if !ok {
|
||||
framework.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
|
||||
e2elog.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName)
|
||||
@@ -441,7 +441,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
||||
for evicted != 2 {
|
||||
select {
|
||||
case <-timeoutChannel:
|
||||
framework.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted)
|
||||
e2elog.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted)
|
||||
return
|
||||
case podName := <-observedDeletions:
|
||||
e2elog.Logf("Noticed Pod %q gets evicted.", podName)
|
||||
|
||||
@@ -145,7 +145,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
||||
e2elog.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
|
||||
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
|
||||
if err != nil {
|
||||
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
|
||||
e2elog.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user