mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Fix error log issue, remove OrDie suffix in methods naming
This commit is contained in:
parent
0b37152f17
commit
e24a962821
@ -300,7 +300,11 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||||||
ginkgo.It("Kubelet should not restart containers across restart", func() {
|
ginkgo.It("Kubelet should not restart containers across restart", func() {
|
||||||
|
|
||||||
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
|
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
||||||
if preRestarts != 0 {
|
if preRestarts != 0 {
|
||||||
e2elog.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
|
e2elog.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
|
||||||
|
@ -1226,8 +1226,12 @@ func deleteNodePool(name string) {
|
|||||||
|
|
||||||
func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
|
func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
|
||||||
nodes := make([]*v1.Node, 0, 1)
|
nodes := make([]*v1.Node, 0, 1)
|
||||||
nodeList, err := e2enode.GetReadyNodesIncludingTaintedOrDie(f.ClientSet)
|
nodeList, err := e2enode.GetReadyNodesIncludingTainted(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
if node.Labels[gkeNodepoolNameKey] == poolName {
|
if node.Labels[gkeNodepoolNameKey] == poolName {
|
||||||
nodes = append(nodes, &node)
|
nodes = append(nodes, &node)
|
||||||
|
@ -47,8 +47,13 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
|
|||||||
nodeDeleteCandidates := framework.GetReadySchedulableNodesOrDie(c)
|
nodeDeleteCandidates := framework.GetReadySchedulableNodesOrDie(c)
|
||||||
nodeToDelete := nodeDeleteCandidates.Items[0]
|
nodeToDelete := nodeDeleteCandidates.Items[0]
|
||||||
|
|
||||||
origNodes, err := e2enode.GetReadyNodesIncludingTaintedOrDie(c)
|
origNodes, err := e2enode.GetReadyNodesIncludingTainted(c)
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
|
|
||||||
e2elog.Logf("Original number of ready nodes: %d", len(origNodes.Items))
|
e2elog.Logf("Original number of ready nodes: %d", len(origNodes.Items))
|
||||||
|
|
||||||
err = framework.DeleteNodeOnCloudProvider(&nodeToDelete)
|
err = framework.DeleteNodeOnCloudProvider(&nodeToDelete)
|
||||||
|
@ -360,8 +360,9 @@ func GetPublicIps(c clientset.Interface) ([]string, error) {
|
|||||||
// 2) Needs to be ready.
|
// 2) Needs to be ready.
|
||||||
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
|
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
|
||||||
// TODO: remove references in framework/util.go.
|
// TODO: remove references in framework/util.go.
|
||||||
|
// TODO: remove "OrDie" suffix.
|
||||||
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList, err error) {
|
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList, err error) {
|
||||||
nodes, err = waitListSchedulableNodesOrDie(c)
|
nodes, err = checkWaitListSchedulableNodes(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
||||||
}
|
}
|
||||||
@ -373,12 +374,12 @@ func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList, e
|
|||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReadyNodesIncludingTaintedOrDie returns all ready nodes, even those which are tainted.
|
// GetReadyNodesIncludingTainted returns all ready nodes, even those which are tainted.
|
||||||
// There are cases when we care about tainted nodes
|
// There are cases when we care about tainted nodes
|
||||||
// E.g. in tests related to nodes with gpu we care about nodes despite
|
// E.g. in tests related to nodes with gpu we care about nodes despite
|
||||||
// presence of nvidia.com/gpu=present:NoSchedule taint
|
// presence of nvidia.com/gpu=present:NoSchedule taint
|
||||||
func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeList, err error) {
|
func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, err error) {
|
||||||
nodes, err = waitListSchedulableNodesOrDie(c)
|
nodes, err = checkWaitListSchedulableNodes(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
||||||
}
|
}
|
||||||
@ -388,8 +389,8 @@ func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeLi
|
|||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
|
// GetMasterAndWorkerNodes will return a list masters and schedulable worker nodes
|
||||||
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList, error) {
|
func GetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
|
||||||
nodes := &v1.NodeList{}
|
nodes := &v1.NodeList{}
|
||||||
masters := sets.NewString()
|
masters := sets.NewString()
|
||||||
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||||
@ -407,6 +408,7 @@ func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeL
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test whether a fake pod can be scheduled on "node", given its current taints.
|
// Test whether a fake pod can be scheduled on "node", given its current taints.
|
||||||
|
// TODO: need to discuss wether to return bool and error type
|
||||||
func isNodeUntainted(node *v1.Node) bool {
|
func isNodeUntainted(node *v1.Node) bool {
|
||||||
fakePod := &v1.Pod{
|
fakePod := &v1.Pod{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
@ -198,8 +198,8 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
|
|||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
|
// checkWaitListSchedulableNodes is a wrapper around listing nodes supporting retries.
|
||||||
func waitListSchedulableNodesOrDie(c clientset.Interface) (*v1.NodeList, error) {
|
func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
|
||||||
nodes, err := waitListSchedulableNodes(c)
|
nodes, err := waitListSchedulableNodes(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error: %s. Non-retryable failure or timed out while listing nodes for e2e cluster", err)
|
return nil, fmt.Errorf("error: %s. Non-retryable failure or timed out while listing nodes for e2e cluster", err)
|
||||||
|
@ -499,7 +499,11 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
|
|
||||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
jig := framework.NewServiceTestJig(cs, serviceName)
|
||||||
nodeIP, err := e2enode.PickIP(jig.Client) // for later
|
nodeIP, err := e2enode.PickIP(jig.Client) // for later
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
|
|
||||||
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
|
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
|
||||||
service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
|
service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
|
||||||
@ -556,7 +560,11 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
|
|
||||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
jig := framework.NewServiceTestJig(cs, serviceName)
|
||||||
nodeIP, err := e2enode.PickIP(jig.Client) // for later
|
nodeIP, err := e2enode.PickIP(jig.Client) // for later
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
|
|
||||||
// Test TCP and UDP Services. Services with the same name in different
|
// Test TCP and UDP Services. Services with the same name in different
|
||||||
// namespaces should get different node ports and load balancers.
|
// namespaces should get different node ports and load balancers.
|
||||||
|
@ -520,8 +520,12 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
_, nodes, err = e2enode.GetMasterAndWorkerNodesOrDie(c)
|
_, nodes, err = e2enode.GetMasterAndWorkerNodes(c)
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
nodeCount = len(nodes.Items)
|
nodeCount = len(nodes.Items)
|
||||||
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
|
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
|
||||||
|
|
||||||
|
@ -56,8 +56,12 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
|||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
|
|
||||||
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodesOrDie(cs)
|
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
|
|
||||||
framework.ExpectNoError(framework.CheckTestingNSDeletedExcept(cs, ns))
|
framework.ExpectNoError(framework.CheckTestingNSDeletedExcept(cs, ns))
|
||||||
|
|
||||||
|
@ -86,8 +86,12 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
framework.AllNodesReady(cs, time.Minute)
|
framework.AllNodesReady(cs, time.Minute)
|
||||||
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodesOrDie(cs)
|
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
|
|
||||||
err = framework.CheckTestingNSDeletedExcept(cs, ns)
|
err = framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -84,8 +84,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodesOrDie(cs)
|
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
|
|
||||||
err = framework.CheckTestingNSDeletedExcept(cs, ns)
|
err = framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -80,8 +80,12 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
_, nodeList, err = e2enode.GetMasterAndWorkerNodesOrDie(cs)
|
_, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
|
|
||||||
err = framework.CheckTestingNSDeletedExcept(cs, ns)
|
err = framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -111,8 +111,12 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
if !(len(nodeList.Items) > 0) {
|
if !(len(nodeList.Items) > 0) {
|
||||||
e2elog.Failf("Unable to find ready and schedulable Node")
|
e2elog.Failf("Unable to find ready and schedulable Node")
|
||||||
}
|
}
|
||||||
masternodes, _, err := e2enode.GetMasterAndWorkerNodesOrDie(client)
|
masternodes, _, err := e2enode.GetMasterAndWorkerNodes(client)
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
gomega.Expect(masternodes).NotTo(gomega.BeEmpty())
|
gomega.Expect(masternodes).NotTo(gomega.BeEmpty())
|
||||||
masterNode = masternodes.List()[0]
|
masterNode = masternodes.List()[0]
|
||||||
})
|
})
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -43,7 +44,11 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
|
|
||||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
jig := framework.NewServiceTestJig(cs, serviceName)
|
||||||
nodeIP, err := e2enode.PickIP(jig.Client)
|
nodeIP, err := e2enode.PickIP(jig.Client)
|
||||||
framework.ExpectNoError(err)
|
if err != nil {
|
||||||
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
}
|
||||||
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
|
|
||||||
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
|
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
|
||||||
service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
|
service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
|
||||||
|
Loading…
Reference in New Issue
Block a user