Refactor and Move node related methods to framework/node package

Signed-off-by: Jiatong Wang <wangjiatong@vmware.com>
This commit is contained in:
Jiatong Wang 2019-07-07 23:45:10 -07:00
parent 5de41340ce
commit 0b37152f17
19 changed files with 211 additions and 102 deletions

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -298,7 +299,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
ginkgo.It("Kubelet should not restart containers across restart", func() {
nodeIPs, err := framework.GetNodePublicIps(f.ClientSet)
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
framework.ExpectNoError(err)
preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
if preRestarts != 0 {

View File

@ -28,7 +28,7 @@ import (
"strings"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
schedulingv1 "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -1226,7 +1226,8 @@ func deleteNodePool(name string) {
func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
nodes := make([]*v1.Node, 0, 1)
nodeList := framework.GetReadyNodesIncludingTaintedOrDie(f.ClientSet)
nodeList, err := e2enode.GetReadyNodesIncludingTaintedOrDie(f.ClientSet)
framework.ExpectNoError(err)
for _, node := range nodeList.Items {
if node.Labels[gkeNodepoolNameKey] == poolName {
nodes = append(nodes, &node)

View File

@ -47,10 +47,11 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
nodeDeleteCandidates := framework.GetReadySchedulableNodesOrDie(c)
nodeToDelete := nodeDeleteCandidates.Items[0]
origNodes := framework.GetReadyNodesIncludingTaintedOrDie(c)
origNodes, err := e2enode.GetReadyNodesIncludingTaintedOrDie(c)
framework.ExpectNoError(err)
e2elog.Logf("Original number of ready nodes: %d", len(origNodes.Items))
err := framework.DeleteNodeOnCloudProvider(&nodeToDelete)
err = framework.DeleteNodeOnCloudProvider(&nodeToDelete)
if err != nil {
e2elog.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err)
}

View File

@ -10,10 +10,13 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/system:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",

View File

@ -21,13 +21,17 @@ import (
"net"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/system"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
)
@ -48,6 +52,14 @@ const (
proxyTimeout = 2 * time.Minute
)
// PodNode is a pod-node pair indicating which node a given pod is running on
type PodNode struct {
// Pod represents pod name
Pod string
// Node represents node name
Node string
}
// FirstAddress returns the first address of the given type of each node.
// TODO: Use return type string instead of []string
func FirstAddress(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
@ -315,3 +327,141 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri
}
return ips
}
// PickIP picks one public node IP
func PickIP(c clientset.Interface) (string, error) {
publicIps, err := GetPublicIps(c)
if err != nil {
return "", fmt.Errorf("get node public IPs error: %s", err)
}
if len(publicIps) == 0 {
return "", fmt.Errorf("got unexpected number (%d) of public IPs", len(publicIps))
}
ip := publicIps[0]
return ip, nil
}
// GetPublicIps returns a public IP list of nodes.
func GetPublicIps(c clientset.Interface) ([]string, error) {
nodes, err := GetReadySchedulableNodesOrDie(c)
if err != nil {
return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err)
}
ips := CollectAddresses(nodes, v1.NodeExternalIP)
if len(ips) == 0 {
// If ExternalIP isn't set, assume the test programs can reach the InternalIP
ips = CollectAddresses(nodes, v1.NodeInternalIP)
}
return ips, nil
}
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
// TODO: remove references in framework/util.go.
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = waitListSchedulableNodesOrDie(c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
}
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
Filter(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node) && isNodeUntainted(&node)
})
return nodes, nil
}
// GetReadyNodesIncludingTaintedOrDie returns all ready nodes, even those which are tainted.
// There are cases when we care about tainted nodes
// E.g. in tests related to nodes with gpu we care about nodes despite
// presence of nvidia.com/gpu=present:NoSchedule taint
func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = waitListSchedulableNodesOrDie(c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
}
Filter(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node)
})
return nodes, nil
}
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList, error) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, nil, fmt.Errorf("get nodes error: %s", err)
}
for _, n := range all.Items {
if system.IsMasterNode(n.Name) {
masters.Insert(n.Name)
} else if isNodeSchedulable(&n) && isNodeUntainted(&n) {
nodes.Items = append(nodes.Items, n)
}
}
return masters, nodes, nil
}
// Test whether a fake pod can be scheduled on "node", given its current taints.
func isNodeUntainted(node *v1.Node) bool {
fakePod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "fake-not-scheduled",
Namespace: "fake-not-scheduled",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-not-scheduled",
Image: "fake-not-scheduled",
},
},
},
}
nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(node)
fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo)
if err != nil {
e2elog.Failf("Can't test predicates for node %s: %v", node.Name, err)
return false
}
return fit
}
// Node is schedulable if:
// 1) doesn't have "unschedulable" field set
// 2) it's Ready condition is set to true
// 3) doesn't have NetworkUnavailable condition set to true
func isNodeSchedulable(node *v1.Node) bool {
nodeReady := IsConditionSetAsExpected(node, v1.NodeReady, true)
networkReady := IsConditionUnset(node, v1.NodeNetworkUnavailable) ||
IsConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
return !node.Spec.Unschedulable && nodeReady && networkReady
}
// PodNodePairs return podNode pairs for all pods in a namespace
func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
var result []PodNode
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
return result, err
}
for _, pod := range podList.Items {
result = append(result, PodNode{
Pod: pod.Name,
Node: pod.Spec.NodeName,
})
}
return result, nil
}

View File

@ -21,7 +21,7 @@ import (
"regexp"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
@ -197,3 +197,12 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
}
return nodes, nil
}
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c clientset.Interface) (*v1.NodeList, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
return nil, fmt.Errorf("error: %s. Non-retryable failure or timed out while listing nodes for e2e cluster", err)
}
return nodes, nil
}

View File

@ -116,14 +116,6 @@ type ServiceTestJig struct {
Labels map[string]string
}
// PodNode is a pod-node pair indicating which node a given pod is running on
type PodNode struct {
// Pod represents pod name
Pod string
// Node represents node name
Node string
}
// NewServiceTestJig allocates and inits a new ServiceTestJig.
func NewServiceTestJig(client clientset.Interface, name string) *ServiceTestJig {
j := &ServiceTestJig{}
@ -325,48 +317,6 @@ func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string
return svc
}
// GetNodePublicIps returns a public IP list of nodes.
func GetNodePublicIps(c clientset.Interface) ([]string, error) {
nodes := GetReadySchedulableNodesOrDie(c)
ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
if len(ips) == 0 {
// If ExternalIP isn't set, assume the test programs can reach the InternalIP
ips = e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
}
return ips, nil
}
// PickNodeIP picks one public node IP
func PickNodeIP(c clientset.Interface) string {
publicIps, err := GetNodePublicIps(c)
ExpectNoError(err)
if len(publicIps) == 0 {
e2elog.Failf("got unexpected number (%d) of public IPs", len(publicIps))
}
ip := publicIps[0]
return ip
}
// PodNodePairs return PodNode pairs for all pods in a namespace
func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
var result []PodNode
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
return result, err
}
for _, pod := range podList.Items {
result = append(result, PodNode{
Pod: pod.Name,
Node: pod.Spec.NodeName,
})
}
return result, nil
}
// GetEndpointNodes returns a map of nodenames:external-ip on which the
// endpoints of the given Service are running.
func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string {

View File

@ -79,7 +79,6 @@ import (
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/system"
taintutils "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
@ -1943,6 +1942,7 @@ func isNodeUntainted(node *v1.Node) bool {
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
// TODO: remove this function here when references point to e2enode.
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
@ -1953,18 +1953,6 @@ func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
return nodes
}
// GetReadyNodesIncludingTaintedOrDie returns all ready nodes, even those which are tainted.
// There are cases when we care about tainted nodes
// E.g. in tests related to nodes with gpu we care about nodes despite
// presence of nvidia.com/gpu=present:NoSchedule taint
func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
e2enode.Filter(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node)
})
return nodes
}
// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become scheduable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
@ -3046,22 +3034,6 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
return len(scheduledPods)
}
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
ExpectNoError(err)
for _, n := range all.Items {
if system.IsMasterNode(n.Name) {
masters.Insert(n.Name)
} else if isNodeSchedulable(&n) && isNodeUntainted(&n) {
nodes.Items = append(nodes.Items, n)
}
}
return masters, nodes
}
// ListNamespaceEvents lists the events in the given namespace.
func ListNamespaceEvents(c clientset.Interface, ns string) error {
ls, err := c.CoreV1().Events(ns).List(metav1.ListOptions{})

View File

@ -498,7 +498,8 @@ var _ = SIGDescribe("Services", func() {
ns := f.Namespace.Name
jig := framework.NewServiceTestJig(cs, serviceName)
nodeIP := framework.PickNodeIP(jig.Client) // for later
nodeIP, err := e2enode.PickIP(jig.Client) // for later
framework.ExpectNoError(err)
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
@ -554,7 +555,8 @@ var _ = SIGDescribe("Services", func() {
e2elog.Logf("namespace for UDP test: %s", ns2)
jig := framework.NewServiceTestJig(cs, serviceName)
nodeIP := framework.PickNodeIP(jig.Client) // for later
nodeIP, err := e2enode.PickIP(jig.Client) // for later
framework.ExpectNoError(err)
// Test TCP and UDP Services. Services with the same name in different
// namespaces should get different node ports and load balancers.
@ -2416,7 +2418,7 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework,
svc = jig.WaitForLoadBalancerOrFail(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault)
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
defer func() {
podNodePairs, err := framework.PodNodePairs(cs, ns)
podNodePairs, err := e2enode.PodNodePairs(cs, ns)
e2elog.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err)
framework.StopServeHostnameService(cs, ns, serviceName)
lb := cloudprovider.DefaultLoadBalancerName(svc)

View File

@ -40,6 +40,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/timer:go_default_library",
"//test/utils:go_default_library",

View File

@ -53,6 +53,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/timer"
testutils "k8s.io/kubernetes/test/utils"
@ -502,6 +503,7 @@ var _ = SIGDescribe("Density", func() {
f.NamespaceDeletionTimeout = time.Hour
ginkgo.BeforeEach(func() {
var err error
c = f.ClientSet
ns = f.Namespace.Name
testPhaseDurations = timer.NewTestPhaseTimer()
@ -518,7 +520,8 @@ var _ = SIGDescribe("Density", func() {
},
})
_, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
_, nodes, err = e2enode.GetMasterAndWorkerNodesOrDie(c)
framework.ExpectNoError(err)
nodeCount = len(nodes.Items)
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
@ -529,7 +532,7 @@ var _ = SIGDescribe("Density", func() {
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
err := framework.CheckTestingNSDeletedExcept(c, ns)
err = framework.CheckTestingNSDeletedExcept(c, ns)
framework.ExpectNoError(err)
uuid = string(utiluuid.NewUUID())

View File

@ -48,6 +48,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
var masterNodes sets.String
var systemPodsNo int
var ns string
var err error
f := framework.NewDefaultFramework("equivalence-cache")
ginkgo.BeforeEach(func() {
@ -55,7 +56,8 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
ns = f.Namespace.Name
e2enode.WaitForTotalHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodesOrDie(cs)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.CheckTestingNSDeletedExcept(cs, ns))

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -82,11 +83,13 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
var err error
framework.AllNodesReady(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodesOrDie(cs)
framework.ExpectNoError(err)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
err = framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
for _, node := range nodeList.Items {

View File

@ -41,6 +41,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert"
)
@ -76,15 +77,17 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
var err error
for _, pair := range priorityPairs {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value})
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
}
e2enode.WaitForTotalHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodesOrDie(cs)
framework.ExpectNoError(err)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
err = framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
})

View File

@ -27,7 +27,7 @@ import (
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@ -77,11 +77,13 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
var err error
e2enode.WaitForTotalHealthy(cs, time.Minute)
_, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
_, nodeList, err = e2enode.GetMasterAndWorkerNodesOrDie(cs)
framework.ExpectNoError(err)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
err = framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
err = e2epod.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{})
framework.ExpectNoError(err)

View File

@ -54,6 +54,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/statefulset:go_default_library",

View File

@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -110,7 +111,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
if !(len(nodeList.Items) > 0) {
e2elog.Failf("Unable to find ready and schedulable Node")
}
masternodes, _ := framework.GetMasterAndWorkerNodesOrDie(client)
masternodes, _, err := e2enode.GetMasterAndWorkerNodesOrDie(client)
framework.ExpectNoError(err)
gomega.Expect(masternodes).NotTo(gomega.BeEmpty())
masterNode = masternodes.List()[0]
})

View File

@ -33,6 +33,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@ -22,6 +22,7 @@ import (
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo"
)
@ -41,7 +42,8 @@ var _ = SIGDescribe("Services", func() {
ns := f.Namespace.Name
jig := framework.NewServiceTestJig(cs, serviceName)
nodeIP := framework.PickNodeIP(jig.Client)
nodeIP, err := e2enode.PickIP(jig.Client)
framework.ExpectNoError(err)
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {