Use clientset in GetReadySchedulableNodesOrDie

This commit is contained in:
gmarek 2016-10-19 15:55:39 +02:00
parent 8290366a8f
commit f08f751831
27 changed files with 97 additions and 83 deletions

View File

@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
c = f.Client c = f.Client
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU] cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]

View File

@ -22,6 +22,7 @@ import (
"strings" "strings"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/chaosmonkey" "k8s.io/kubernetes/test/e2e/chaosmonkey"
@ -59,7 +60,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
v, err := realVersion(framework.TestContext.UpgradeTarget) v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage)) framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.Client, v)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
}) })
cm.Register(func(sem *chaosmonkey.Semaphore) { cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f. // Close over f.
@ -73,7 +74,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
v, err := realVersion(framework.TestContext.UpgradeTarget) v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage)) framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.Client, v)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
}) })
cm.Register(func(sem *chaosmonkey.Semaphore) { cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f. // Close over f.
@ -91,7 +92,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
framework.ExpectNoError(framework.MasterUpgrade(v)) framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.Client, v)) framework.ExpectNoError(checkMasterVersion(f.Client, v))
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage)) framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.Client, v)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
}) })
cm.Register(func(sem *chaosmonkey.Semaphore) { cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f. // Close over f.
@ -107,7 +108,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
framework.ExpectNoError(framework.MasterUpgrade(v)) framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.Client, v)) framework.ExpectNoError(checkMasterVersion(f.Client, v))
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage)) framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.Client, v)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
}) })
cm.Register(func(sem *chaosmonkey.Semaphore) { cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f. // Close over f.
@ -146,7 +147,7 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD
// Setup // Setup
serviceName := "service-test" serviceName := "service-test"
jig := NewServiceTestJig(f.Client, serviceName) jig := NewServiceTestJig(f.Client, f.ClientSet, serviceName)
// nodeIP := pickNodeIP(jig.Client) // for later // nodeIP := pickNodeIP(jig.Client) // for later
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name) By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name)
@ -209,8 +210,8 @@ func checkMasterVersion(c *client.Client, want string) error {
return nil return nil
} }
func checkNodesVersions(c *client.Client, want string) error { func checkNodesVersions(cs clientset.Interface, want string) error {
l := framework.GetReadySchedulableNodesOrDie(c) l := framework.GetReadySchedulableNodesOrDie(cs)
for _, n := range l.Items { for _, n := range l.Items {
// We do prefix trimming and then matching because: // We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4 // want looks like: 0.19.3-815-g50e67d4

View File

@ -300,7 +300,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
It("Kubelet should not restart containers across restart", func() { It("Kubelet should not restart containers across restart", func() {
nodeIPs, err := getNodePublicIps(f.Client) nodeIPs, err := getNodePublicIps(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
preRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector) preRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector)
if preRestarts != 0 { if preRestarts != 0 {

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -68,7 +69,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
} else { } else {
framework.Logf("unable to dump pods: %v", err) framework.Logf("unable to dump pods: %v", err)
} }
err := clearDaemonSetNodeLabels(f.Client) err := clearDaemonSetNodeLabels(f.Client, f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -83,7 +84,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
BeforeEach(func() { BeforeEach(func() {
ns = f.Namespace.Name ns = f.Namespace.Name
c = f.Client c = f.Client
err := clearDaemonSetNodeLabels(c) err := clearDaemonSetNodeLabels(c, f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -180,7 +181,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.") By("Change label of node, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node") Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
@ -248,7 +249,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.") By("Change label of node, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node") Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
@ -284,8 +285,8 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
return daemonSetLabels, otherLabels return daemonSetLabels, otherLabels
} }
func clearDaemonSetNodeLabels(c *client.Client) error { func clearDaemonSetNodeLabels(c *client.Client, cs clientset.Interface) error {
nodeList := framework.GetReadySchedulableNodesOrDie(c) nodeList := framework.GetReadySchedulableNodesOrDie(cs)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil { if err != nil {

View File

@ -309,7 +309,7 @@ func makeConfigMapVolumes(configMapNames []string) (volumes []api.Volume, volume
func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volumeMounts []api.VolumeMount, podCount int32) { func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volumeMounts []api.VolumeMount, podCount int32) {
rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID()) rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
targetNode := nodeList.Items[0] targetNode := nodeList.Items[0]

View File

@ -165,7 +165,7 @@ var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {
f := framework.NewDefaultFramework("petstore") f := framework.NewDefaultFramework("petstore")
It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() { It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
loadGenerators := nodeCount loadGenerators := nodeCount

View File

@ -261,7 +261,7 @@ func (f *Framework) BeforeEach() {
f.logsSizeWaitGroup = sync.WaitGroup{} f.logsSizeWaitGroup = sync.WaitGroup{}
f.logsSizeWaitGroup.Add(1) f.logsSizeWaitGroup.Add(1)
f.logsSizeCloseChannel = make(chan bool) f.logsSizeCloseChannel = make(chan bool)
f.logsSizeVerifier = NewLogsVerifier(f.Client, f.logsSizeCloseChannel) f.logsSizeVerifier = NewLogsVerifier(f.Client, f.ClientSet, f.logsSizeCloseChannel)
go func() { go func() {
f.logsSizeVerifier.Run() f.logsSizeVerifier.Run()
f.logsSizeWaitGroup.Done() f.logsSizeWaitGroup.Done()
@ -659,7 +659,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
// CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking. // CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n api.Node) api.PodSpec, maxCount int) map[string]string { func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n api.Node) api.PodSpec, maxCount int) map[string]string {
nodes := GetReadySchedulableNodesOrDie(f.Client) nodes := GetReadySchedulableNodesOrDie(f.ClientSet)
labels := map[string]string{ labels := map[string]string{
"app": appName + "-pod", "app": appName + "-pod",
} }

View File

@ -25,6 +25,7 @@ import (
"text/tabwriter" "text/tabwriter"
"time" "time"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
) )
@ -65,6 +66,7 @@ type LogSizeGatherer struct {
// It oversees a <workersNo> workers which do the gathering. // It oversees a <workersNo> workers which do the gathering.
type LogsSizeVerifier struct { type LogsSizeVerifier struct {
client *client.Client client *client.Client
clientset clientset.Interface
stopChannel chan bool stopChannel chan bool
// data stores LogSizeData groupped per IP and log_path // data stores LogSizeData groupped per IP and log_path
data *LogsSizeData data *LogsSizeData
@ -142,8 +144,8 @@ func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int
} }
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed // NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier { func NewLogsVerifier(c *client.Client, cs clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := NodeSSHHosts(c) nodeAddresses, err := NodeSSHHosts(cs)
ExpectNoError(err) ExpectNoError(err)
masterAddress := GetMasterHost() + ":22" masterAddress := GetMasterHost() + ":22"
@ -152,6 +154,7 @@ func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier
verifier := &LogsSizeVerifier{ verifier := &LogsSizeVerifier{
client: c, client: c,
clientset: cs,
stopChannel: stopChannel, stopChannel: stopChannel,
data: prepareData(masterAddress, nodeAddresses), data: prepareData(masterAddress, nodeAddresses),
masterAddress: masterAddress, masterAddress: masterAddress,

View File

@ -432,7 +432,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
By("Getting node addresses") By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.Client)) ExpectNoError(WaitForAllNodesSchedulable(config.f.Client))
nodeList := GetReadySchedulableNodesOrDie(config.f.Client) nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
config.ExternalAddrs = NodeAddresses(nodeList, api.NodeExternalIP) config.ExternalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.ExternalAddrs) < 2 { if len(config.ExternalAddrs) < 2 {
// fall back to legacy IPs // fall back to legacy IPs
@ -483,7 +483,7 @@ func shuffleNodes(nodes []api.Node) []api.Node {
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
ExpectNoError(WaitForAllNodesSchedulable(config.f.Client)) ExpectNoError(WaitForAllNodesSchedulable(config.f.Client))
nodeList := GetReadySchedulableNodesOrDie(config.f.Client) nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
// To make this test work reasonably fast in large clusters, // To make this test work reasonably fast in large clusters,
// we limit the number of NetProxyPods to no more than 100 ones // we limit the number of NetProxyPods to no more than 100 ones

View File

@ -2336,11 +2336,11 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event {
} }
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries. // waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c *client.Client) *api.NodeList { func waitListSchedulableNodesOrDie(c clientset.Interface) *api.NodeList {
var nodes *api.NodeList var nodes *api.NodeList
var err error var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{ nodes, err = c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false", "spec.unschedulable": "false",
}.AsSelector()}) }.AsSelector()})
return err == nil, nil return err == nil, nil
@ -2365,7 +2365,7 @@ func isNodeSchedulable(node *api.Node) bool {
// 1) Needs to be schedulable. // 1) Needs to be schedulable.
// 2) Needs to be ready. // 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely. // If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
func GetReadySchedulableNodesOrDie(c *client.Client) (nodes *api.NodeList) { func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *api.NodeList) {
nodes = waitListSchedulableNodesOrDie(c) nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip // previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test). // 'Not Ready' nodes, just in case (there is no need to fail the test).
@ -3254,7 +3254,7 @@ func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []strin
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node. // NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node.
// It returns an error if it can't find an external IP for every node, though it still returns all // It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case. // hosts that it found in that case.
func NodeSSHHosts(c *client.Client) ([]string, error) { func NodeSSHHosts(c clientset.Interface) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c) nodelist := waitListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462). // TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).

View File

@ -98,7 +98,7 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
// label with the given node pool name. // label with the given node pool name.
func nodesWithPoolLabel(f *framework.Framework, poolName string) int { func nodesWithPoolLabel(f *framework.Framework, poolName string) int {
nodeCount := 0 nodeCount := 0
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName { if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName {
nodeCount++ nodeCount++

View File

@ -137,7 +137,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.Client
nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
numNodes = len(nodes.Items) numNodes = len(nodes.Items)
nodeNames = sets.NewString() nodeNames = sets.NewString()
// If there are a lot of nodes, we don't want to use all of them // If there are a lot of nodes, we don't want to use all of them

View File

@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() {
if err := framework.WaitForPodsSuccess(f.Client, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil { if err := framework.WaitForPodsSuccess(f.Client, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adulterated", imagePrePullingLongTimeout) framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adulterated", imagePrePullingLongTimeout)
} }
nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeNames = sets.NewString() nodeNames = sets.NewString()
for _, node := range nodes.Items { for _, node := range nodes.Items {
nodeNames.Insert(node.Name) nodeNames.Insert(node.Name)

View File

@ -107,7 +107,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c))
ns = f.Namespace.Name ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(c) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())

View File

@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("Logging soak [Performance] [Slow] [Disruptive]",
// was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs. // was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) { func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {
nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items) totalPods := len(nodes.Items)
Expect(totalPods).NotTo(Equal(0)) Expect(totalPods).NotTo(Equal(0))

View File

@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
client := f.Client client := f.Client
framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready") framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready")
nodelist := framework.GetReadySchedulableNodesOrDie(f.Client) nodelist := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
const ns = "static-pods" const ns = "static-pods"
numpods := int32(len(nodelist.Items)) numpods := int32(len(nodelist.Items))

View File

@ -49,7 +49,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
It("should grab all metrics from a Kubelet.", func() { It("should grab all metrics from a Kubelet.", func() {
By("Proxying to Node through the API server") By("Proxying to Node through the API server")
nodes := framework.GetReadySchedulableNodesOrDie(c) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodes.Items).NotTo(BeEmpty()) Expect(nodes.Items).NotTo(BeEmpty())
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name) response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -49,7 +49,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
maxBandwidthBits := gceBandwidthBitsEstimate maxBandwidthBits := gceBandwidthBitsEstimate
It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() { It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items) totalPods := len(nodes.Items)
// for a single service, we expect to divide bandwidth between the network. Very crude estimate. // for a single service, we expect to divide bandwidth between the network. Very crude estimate.
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods)) expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
@ -110,7 +110,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
// Calculate expected number of clients based on total nodes. // Calculate expected number of clients based on total nodes.
expectedCli := func() int { expectedCli := func() int {
nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return int(math.Min(float64(len(nodes.Items)), float64(numClient))) return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
}() }()

View File

@ -24,6 +24,7 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -67,13 +68,15 @@ const (
// Flaky issue #20015. We have no clear path for how to test this functionality in a non-flaky way. // Flaky issue #20015. We have no clear path for how to test this functionality in a non-flaky way.
var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
var c *client.Client var c *client.Client
var cs clientset.Interface
var unfilledNodeName, recoveredNodeName string var unfilledNodeName, recoveredNodeName string
f := framework.NewDefaultFramework("node-outofdisk") f := framework.NewDefaultFramework("node-outofdisk")
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.Client
cs = f.ClientSet
nodelist := framework.GetReadySchedulableNodesOrDie(c) nodelist := framework.GetReadySchedulableNodesOrDie(cs)
// Skip this test on small clusters. No need to fail since it is not a use // Skip this test on small clusters. No need to fail since it is not a use
// case that any cluster of small size needs to support. // case that any cluster of small size needs to support.
@ -87,7 +90,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
AfterEach(func() { AfterEach(func() {
nodelist := framework.GetReadySchedulableNodesOrDie(c) nodelist := framework.GetReadySchedulableNodesOrDie(cs)
Expect(len(nodelist.Items)).ToNot(BeZero()) Expect(len(nodelist.Items)).ToNot(BeZero())
for _, node := range nodelist.Items { for _, node := range nodelist.Items {
if unfilledNodeName == node.Name || recoveredNodeName == node.Name { if unfilledNodeName == node.Name || recoveredNodeName == node.Name {
@ -150,7 +153,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
} }
}) })
nodelist := framework.GetReadySchedulableNodesOrDie(c) nodelist := framework.GetReadySchedulableNodesOrDie(cs)
Expect(len(nodelist.Items)).To(BeNumerically(">", 1)) Expect(len(nodelist.Items)).To(BeNumerically(">", 1))
nodeToRecover := nodelist.Items[1] nodeToRecover := nodelist.Items[1]

View File

@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
podClient = f.Client.Pods(f.Namespace.Name) podClient = f.Client.Pods(f.Namespace.Name)
nodeClient = f.Client.Nodes() nodeClient = f.Client.Nodes()
nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes") Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")

View File

@ -285,7 +285,7 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func(
It("should recreate evicted petset", func() { It("should recreate evicted petset", func() {
By("looking for a node to schedule pet set and pod") By("looking for a node to schedule pet set and pod")
nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node := nodes.Items[0] node := nodes.Items[0]
By("creating pod with conflicting port in namespace " + f.Namespace.Name) By("creating pod with conflicting port in namespace " + f.Namespace.Name)

View File

@ -27,7 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/pkg/util/net"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -300,9 +300,9 @@ func truncate(b []byte, maxLen int) []byte {
return b2 return b2
} }
func pickNode(c *client.Client) (string, error) { func pickNode(cs clientset.Interface) (string, error) {
// TODO: investigate why it doesn't work on master Node. // TODO: investigate why it doesn't work on master Node.
nodes := framework.GetReadySchedulableNodesOrDie(c) nodes := framework.GetReadySchedulableNodesOrDie(cs)
if len(nodes.Items) == 0 { if len(nodes.Items) == 0 {
return "", fmt.Errorf("no nodes exist, can't test node proxy") return "", fmt.Errorf("no nodes exist, can't test node proxy")
} }
@ -310,7 +310,7 @@ func pickNode(c *client.Client) (string, error) {
} }
func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) { func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
node, err := pickNode(f.Client) node, err := pickNode(f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// TODO: Change it to test whether all requests succeeded when requests // TODO: Change it to test whether all requests succeeded when requests
// not reaching Kubelet issue is debugged. // not reaching Kubelet issue is debugged.

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -89,32 +90,32 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
It("each node by ordering clean reboot and ensure they function upon restart", func() { It("each node by ordering clean reboot and ensure they function upon restart", func() {
// clean shutdown and restart // clean shutdown and restart
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted. // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted.
testReboot(f.Client, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &") testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &")
}) })
It("each node by ordering unclean reboot and ensure they function upon restart", func() { It("each node by ordering unclean reboot and ensure they function upon restart", func() {
// unclean shutdown and restart // unclean shutdown and restart
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown. // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
testReboot(f.Client, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &") testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &")
}) })
It("each node by triggering kernel panic and ensure they function upon restart", func() { It("each node by triggering kernel panic and ensure they function upon restart", func() {
// kernel panic // kernel panic
// We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered. // We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered.
testReboot(f.Client, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &") testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &")
}) })
It("each node by switching off the network interface and ensure they function upon switch on", func() { It("each node by switching off the network interface and ensure they function upon switch on", func() {
// switch the network interface off for a while to simulate a network outage // switch the network interface off for a while to simulate a network outage
// We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down. // We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down.
testReboot(f.Client, "nohup sh -c 'sleep 10 && (sudo ifdown eth0 || sudo ip link set eth0 down) && sleep 120 && (sudo ifup eth0 || sudo ip link set eth0 up)' >/dev/null 2>&1 &") testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && (sudo ifdown eth0 || sudo ip link set eth0 down) && sleep 120 && (sudo ifup eth0 || sudo ip link set eth0 up)' >/dev/null 2>&1 &")
}) })
It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() { It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() {
// tell the firewall to drop all inbound packets for a while // tell the firewall to drop all inbound packets for a while
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets. // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets.
// We still accept packages send from localhost to prevent monit from restarting kubelet. // We still accept packages send from localhost to prevent monit from restarting kubelet.
testReboot(f.Client, "nohup sh -c 'sleep 10 && sudo iptables -I INPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I INPUT 2 -j DROP && "+ testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I INPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I INPUT 2 -j DROP && "+
" sleep 120 && sudo iptables -D INPUT -j DROP && sudo iptables -D INPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &") " sleep 120 && sudo iptables -D INPUT -j DROP && sudo iptables -D INPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &")
}) })
@ -122,14 +123,14 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
// tell the firewall to drop all outbound packets for a while // tell the firewall to drop all outbound packets for a while
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets. // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets.
// We still accept packages send to localhost to prevent monit from restarting kubelet. // We still accept packages send to localhost to prevent monit from restarting kubelet.
testReboot(f.Client, "nohup sh -c 'sleep 10 && sudo iptables -I OUTPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I OUTPUT 2 -j DROP && "+ testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I OUTPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I OUTPUT 2 -j DROP && "+
" sleep 120 && sudo iptables -D OUTPUT -j DROP && sudo iptables -D OUTPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &") " sleep 120 && sudo iptables -D OUTPUT -j DROP && sudo iptables -D OUTPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &")
}) })
}) })
func testReboot(c *client.Client, rebootCmd string) { func testReboot(c *client.Client, cs clientset.Interface, rebootCmd string) {
// Get all nodes, and kick off the test on each. // Get all nodes, and kick off the test on each.
nodelist := framework.GetReadySchedulableNodesOrDie(c) nodelist := framework.GetReadySchedulableNodesOrDie(cs)
result := make([]bool, len(nodelist.Items)) result := make([]bool, len(nodelist.Items))
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
wg.Add(len(nodelist.Items)) wg.Add(len(nodelist.Items))

View File

@ -38,7 +38,7 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
ns = f.Namespace.Name ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount := len(nodes.Items) nodeCount := len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())

View File

@ -33,7 +33,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/service" "k8s.io/kubernetes/pkg/api/service"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -74,9 +74,11 @@ var _ = framework.KubeDescribe("Services", func() {
f := framework.NewDefaultFramework("services") f := framework.NewDefaultFramework("services")
var c *client.Client var c *client.Client
var cs clientset.Interface
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.Client
cs = f.ClientSet
}) })
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here. // TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
@ -237,7 +239,7 @@ var _ = framework.KubeDescribe("Services", func() {
ns := f.Namespace.Name ns := f.Namespace.Name
By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
jig := NewServiceTestJig(c, serviceName) jig := NewServiceTestJig(c, cs, serviceName)
servicePort := 8080 servicePort := 8080
tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort)) tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort))
jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP) jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP)
@ -250,7 +252,7 @@ var _ = framework.KubeDescribe("Services", func() {
framework.Logf("sourceip-test cluster ip: %s", serviceIp) framework.Logf("sourceip-test cluster ip: %s", serviceIp)
By("Picking multiple nodes") By("Picking multiple nodes")
nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodes.Items) == 1 { if len(nodes.Items) == 1 {
framework.Skipf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider) framework.Skipf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider)
@ -296,7 +298,7 @@ var _ = framework.KubeDescribe("Services", func() {
podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
hosts, err := framework.NodeSSHHosts(c) hosts, err := framework.NodeSSHHosts(f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(hosts) == 0 { if len(hosts) == 0 {
framework.Failf("No ssh-able nodes") framework.Failf("No ssh-able nodes")
@ -356,7 +358,7 @@ var _ = framework.KubeDescribe("Services", func() {
framework.Failf("VIPs conflict: %v", svc1IP) framework.Failf("VIPs conflict: %v", svc1IP)
} }
hosts, err := framework.NodeSSHHosts(c) hosts, err := framework.NodeSSHHosts(f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(hosts) == 0 { if len(hosts) == 0 {
framework.Failf("No ssh-able nodes") framework.Failf("No ssh-able nodes")
@ -397,7 +399,7 @@ var _ = framework.KubeDescribe("Services", func() {
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods) podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
hosts, err := framework.NodeSSHHosts(c) hosts, err := framework.NodeSSHHosts(f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(hosts) == 0 { if len(hosts) == 0 {
framework.Failf("No ssh-able nodes") framework.Failf("No ssh-able nodes")
@ -436,8 +438,8 @@ var _ = framework.KubeDescribe("Services", func() {
serviceName := "nodeport-test" serviceName := "nodeport-test"
ns := f.Namespace.Name ns := f.Namespace.Name
jig := NewServiceTestJig(c, serviceName) jig := NewServiceTestJig(c, cs, serviceName)
nodeIP := pickNodeIP(jig.Client) // for later nodeIP := pickNodeIP(jig.ClientSet) // for later
By("creating service " + serviceName + " with type=NodePort in namespace " + ns) By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
service := jig.CreateTCPServiceOrFail(ns, func(svc *api.Service) { service := jig.CreateTCPServiceOrFail(ns, func(svc *api.Service) {
@ -475,7 +477,7 @@ var _ = framework.KubeDescribe("Services", func() {
} }
loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault
largeClusterMinNodesNumber := 100 largeClusterMinNodesNumber := 100
if nodes := framework.GetReadySchedulableNodesOrDie(c); len(nodes.Items) > largeClusterMinNodesNumber { if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > largeClusterMinNodesNumber {
loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge
} }
@ -492,8 +494,8 @@ var _ = framework.KubeDescribe("Services", func() {
ns2 := namespacePtr.Name // LB2 in ns2 on UDP ns2 := namespacePtr.Name // LB2 in ns2 on UDP
framework.Logf("namespace for UDP test: %s", ns2) framework.Logf("namespace for UDP test: %s", ns2)
jig := NewServiceTestJig(c, serviceName) jig := NewServiceTestJig(c, cs, serviceName)
nodeIP := pickNodeIP(jig.Client) // for later nodeIP := pickNodeIP(jig.ClientSet) // for later
// Test TCP and UDP Services. Services with the same name in different // Test TCP and UDP Services. Services with the same name in different
// namespaces should get different node ports and load balancers. // namespaces should get different node ports and load balancers.
@ -1078,12 +1080,12 @@ var _ = framework.KubeDescribe("Services", func() {
loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault
largeClusterMinNodesNumber := 100 largeClusterMinNodesNumber := 100
if nodes := framework.GetReadySchedulableNodesOrDie(c); len(nodes.Items) > largeClusterMinNodesNumber { if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > largeClusterMinNodesNumber {
loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge
} }
namespace := f.Namespace.Name namespace := f.Namespace.Name
serviceName := "external-local" serviceName := "external-local"
jig := NewServiceTestJig(c, serviceName) jig := NewServiceTestJig(c, cs, serviceName)
By("creating a service " + namespace + "/" + namespace + " with type=LoadBalancer and annotation for local-traffic-only") By("creating a service " + namespace + "/" + namespace + " with type=LoadBalancer and annotation for local-traffic-only")
svc := jig.CreateTCPServiceOrFail(namespace, func(svc *api.Service) { svc := jig.CreateTCPServiceOrFail(namespace, func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeLoadBalancer svc.Spec.Type = api.ServiceTypeLoadBalancer
@ -1132,7 +1134,7 @@ var _ = framework.KubeDescribe("Services", func() {
framework.Logf("Pod for service %s/%s is on node %s", namespace, serviceName, readyHostName) framework.Logf("Pod for service %s/%s is on node %s", namespace, serviceName, readyHostName)
// HealthCheck responder validation - iterate over all node IPs and check their HC responses // HealthCheck responder validation - iterate over all node IPs and check their HC responses
// Collect all node names and their public IPs - the nodes and ips slices parallel each other // Collect all node names and their public IPs - the nodes and ips slices parallel each other
nodes := framework.GetReadySchedulableNodesOrDie(jig.Client) nodes := framework.GetReadySchedulableNodesOrDie(jig.ClientSet)
ips := collectAddresses(nodes, api.NodeExternalIP) ips := collectAddresses(nodes, api.NodeExternalIP)
if len(ips) == 0 { if len(ips) == 0 {
ips = collectAddresses(nodes, api.NodeLegacyHostIP) ips = collectAddresses(nodes, api.NodeLegacyHostIP)
@ -1384,8 +1386,8 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st
return ips return ips
} }
func getNodePublicIps(c *client.Client) ([]string, error) { func getNodePublicIps(cs clientset.Interface) ([]string, error) {
nodes := framework.GetReadySchedulableNodesOrDie(c) nodes := framework.GetReadySchedulableNodesOrDie(cs)
ips := collectAddresses(nodes, api.NodeExternalIP) ips := collectAddresses(nodes, api.NodeExternalIP)
if len(ips) == 0 { if len(ips) == 0 {
@ -1394,8 +1396,8 @@ func getNodePublicIps(c *client.Client) ([]string, error) {
return ips, nil return ips, nil
} }
func pickNodeIP(c *client.Client) string { func pickNodeIP(cs clientset.Interface) string {
publicIps, err := getNodePublicIps(c) publicIps, err := getNodePublicIps(cs)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(publicIps) == 0 { if len(publicIps) == 0 {
framework.Failf("got unexpected number (%d) of public IPs", len(publicIps)) framework.Failf("got unexpected number (%d) of public IPs", len(publicIps))
@ -1641,7 +1643,7 @@ func startServeHostnameService(c *client.Client, ns, name string, port, replicas
return podNames, serviceIP, nil return podNames, serviceIP, nil
} }
func stopServeHostnameService(c *client.Client, clientset internalclientset.Interface, ns, name string) error { func stopServeHostnameService(c *client.Client, clientset clientset.Interface, ns, name string) error {
if err := framework.DeleteRCAndPods(c, clientset, ns, name); err != nil { if err := framework.DeleteRCAndPods(c, clientset, ns, name); err != nil {
return err return err
} }
@ -1769,13 +1771,15 @@ type ServiceTestJig struct {
ID string ID string
Name string Name string
Client *client.Client Client *client.Client
ClientSet clientset.Interface
Labels map[string]string Labels map[string]string
} }
// NewServiceTestJig allocates and inits a new ServiceTestJig. // NewServiceTestJig allocates and inits a new ServiceTestJig.
func NewServiceTestJig(client *client.Client, name string) *ServiceTestJig { func NewServiceTestJig(client *client.Client, cs clientset.Interface, name string) *ServiceTestJig {
j := &ServiceTestJig{} j := &ServiceTestJig{}
j.Client = client j.Client = client
j.ClientSet = cs
j.Name = name j.Name = name
j.ID = j.Name + "-" + string(uuid.NewUUID()) j.ID = j.Name + "-" + string(uuid.NewUUID())
j.Labels = map[string]string{"testid": j.ID} j.Labels = map[string]string{"testid": j.ID}

View File

@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("SSH", func() {
It("should SSH to all nodes and run commands", func() { It("should SSH to all nodes and run commands", func() {
// Get all nodes' external IPs. // Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses") By("Getting all nodes' SSH-able IP addresses")
hosts, err := framework.NodeSSHHosts(f.Client) hosts, err := framework.NodeSSHHosts(f.ClientSet)
if err != nil { if err != nil {
framework.Failf("Error getting node hostnames: %v", err) framework.Failf("Error getting node hostnames: %v", err)
} }

View File

@ -27,6 +27,7 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
) )
@ -152,7 +153,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
// The node should have disk pressure condition after the pods are evicted. // The node should have disk pressure condition after the pods are evicted.
if !nodeDiskPressureCondition { if !nodeDiskPressureCondition {
if !nodeHasDiskPressure(f.Client) { if !nodeHasDiskPressure(f.ClientSet) {
return fmt.Errorf("expected disk pressure condition is not set") return fmt.Errorf("expected disk pressure condition is not set")
} }
nodeDiskPressureCondition = true nodeDiskPressureCondition = true
@ -161,7 +162,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
// After eviction happens the pod is evicted so eventually the node disk pressure should be relieved. // After eviction happens the pod is evicted so eventually the node disk pressure should be relieved.
if !podRescheduleable { if !podRescheduleable {
if nodeHasDiskPressure(f.Client) { if nodeHasDiskPressure(f.ClientSet) {
return fmt.Errorf("expected disk pressure condition relief has not happened") return fmt.Errorf("expected disk pressure condition relief has not happened")
} }
createIdlePod(verifyPodName, podClient) createIdlePod(verifyPodName, podClient)
@ -212,8 +213,8 @@ func verifyPodEviction(podData *api.Pod) error {
return nil return nil
} }
func nodeHasDiskPressure(c *client.Client) bool { func nodeHasDiskPressure(cs clientset.Interface) bool {
nodeList := framework.GetReadySchedulableNodesOrDie(c) nodeList := framework.GetReadySchedulableNodesOrDie(cs)
for _, condition := range nodeList.Items[0].Status.Conditions { for _, condition := range nodeList.Items[0].Status.Conditions {
if condition.Type == api.NodeDiskPressure { if condition.Type == api.NodeDiskPressure {
return condition.Status == api.ConditionTrue return condition.Status == api.ConditionTrue