Merge pull request #14957 from thockin/services-e2e

Services e2e cleanup
This commit is contained in:
Alex Robinson 2015-10-05 14:56:19 -07:00
commit 4856c7c033
3 changed files with 260 additions and 417 deletions

View File

@ -39,9 +39,7 @@ var prom_registered = false
// Reusable function for pushing metrics to prometheus. Handles initialization and so on.
func promPushRunningPending(running, pending int) error {
if testContext.PrometheusPushGateway == "" {
Logf("Ignoring prom push, push gateway unavailable")
return nil
} else {
// Register metrics if necessary

View File

@ -26,7 +26,6 @@ import (
"strings"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
@ -46,32 +45,26 @@ var _ = Describe("Services", func() {
f := NewFramework("services")
var c *client.Client
// Use these in tests. They're unique for each test to prevent name collisions.
var namespaces [2]string
var extraNamespaces []string
BeforeEach(func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
By("Building a namespace api objects")
for i := range namespaces {
namespacePtr, err := createTestingNS(fmt.Sprintf("service-%d", i), c)
Expect(err).NotTo(HaveOccurred())
namespaces[i] = namespacePtr.Name
}
})
AfterEach(func() {
for _, ns := range namespaces {
for _, ns := range extraNamespaces {
By(fmt.Sprintf("Destroying namespace %v", ns))
if err := deleteNS(c, ns, 5*time.Minute /* namespace deletion timeout */); err != nil {
Failf("Couldn't delete namespace %s: %s", ns, err)
}
}
extraNamespaces = nil
})
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
It("should provide secure master service", func() {
_, err := c.Services(api.NamespaceDefault).Get("kubernetes")
Expect(err).NotTo(HaveOccurred())
@ -79,12 +72,13 @@ var _ = Describe("Services", func() {
It("should serve a basic endpoint from pods", func() {
serviceName := "endpoint-test2"
ns := namespaces[0]
ns := f.Namespace.Name
labels := map[string]string{
"foo": "bar",
"baz": "blah",
}
By("creating service " + serviceName + " in namespace " + ns)
defer func() {
err := c.Services(ns).Delete(serviceName)
Expect(err).NotTo(HaveOccurred())
@ -107,43 +101,38 @@ var _ = Describe("Services", func() {
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
var names []string
names := map[string]bool{}
defer func() {
for _, name := range names {
for name := range names {
err := c.Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred())
}
}()
name1 := "test1"
addEndpointPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}})
names = append(names, name1)
name1 := "pod1"
name2 := "pod2"
createPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}})
names[name1] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}})
name2 := "test2"
addEndpointPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}})
names = append(names, name2)
createPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}})
names[name2] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}})
err = c.Pods(ns).Delete(name1, nil)
Expect(err).NotTo(HaveOccurred())
names = []string{name2}
deletePodOrFail(c, ns, name1)
delete(names, name1)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name2: {80}})
err = c.Pods(ns).Delete(name2, nil)
Expect(err).NotTo(HaveOccurred())
names = []string{}
deletePodOrFail(c, ns, name2)
delete(names, name2)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
})
It("should serve multiport endpoints from pods", func() {
// repacking functionality is intentionally not tested here - it's better to test it in an integration test.
serviceName := "multi-endpoint-test"
ns := namespaces[0]
ns := f.Namespace.Name
defer func() {
err := c.Services(ns).Delete(serviceName)
@ -155,6 +144,7 @@ var _ = Describe("Services", func() {
svc1port := "svc1"
svc2port := "svc2"
By("creating service " + serviceName + " in namespace " + ns)
service := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: serviceName,
@ -181,9 +171,9 @@ var _ = Describe("Services", func() {
port2 := 101
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
var names []string
names := map[string]bool{}
defer func() {
for _, name := range names {
for name := range names {
err := c.Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred())
}
@ -202,44 +192,30 @@ var _ = Describe("Services", func() {
},
}
podname1 := "podname1"
addEndpointPodOrFail(c, ns, podname1, labels, containerPorts1)
names = append(names, podname1)
podname1 := "pod1"
podname2 := "pod2"
createPodOrFail(c, ns, podname1, labels, containerPorts1)
names[podname1] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}})
podname2 := "podname2"
addEndpointPodOrFail(c, ns, podname2, labels, containerPorts2)
names = append(names, podname2)
createPodOrFail(c, ns, podname2, labels, containerPorts2)
names[podname2] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}})
podname3 := "podname3"
addEndpointPodOrFail(c, ns, podname3, labels, append(containerPorts1, containerPorts2...))
names = append(names, podname3)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}, podname3: {port1, port2}})
err = c.Pods(ns).Delete(podname1, nil)
Expect(err).NotTo(HaveOccurred())
names = []string{podname2, podname3}
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname2: {port2}, podname3: {port1, port2}})
err = c.Pods(ns).Delete(podname2, nil)
Expect(err).NotTo(HaveOccurred())
names = []string{podname3}
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname3: {port1, port2}})
err = c.Pods(ns).Delete(podname3, nil)
Expect(err).NotTo(HaveOccurred())
names = []string{}
deletePodOrFail(c, ns, podname1)
delete(names, podname1)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname2: {port2}})
deletePodOrFail(c, ns, podname2)
delete(names, podname2)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
})
It("should be able to up and down services", func() {
// this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP
SkipUnlessProviderIs(providersWithSSH...)
ns := namespaces[0]
ns := f.Namespace.Name
numPods, servicePort := 3, 80
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
@ -281,14 +257,18 @@ var _ = Describe("Services", func() {
It("should work after restarting kube-proxy", func() {
SkipUnlessProviderIs("gce", "gke")
ns := namespaces[0]
ns := f.Namespace.Name
numPods, servicePort := 3, 80
defer func() { expectNoError(stopServeHostnameService(c, ns, "service1")) }()
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
svc1 := "service1"
svc2 := "service2"
defer func() { expectNoError(stopServeHostnameService(c, ns, svc1)) }()
podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
defer func() { expectNoError(stopServeHostnameService(c, ns, "service2")) }()
podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods)
defer func() { expectNoError(stopServeHostnameService(c, ns, svc2)) }()
podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
if svc1IP == svc2IP {
@ -305,18 +285,18 @@ var _ = Describe("Services", func() {
expectNoError(verifyServeHostnameServiceUp(c, host, podNames1, svc1IP, servicePort))
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
// Restart kube-proxy and verify that services are still reachable (after some time).
By("Restarting kube-proxy")
if err := restartKubeProxy(host); err != nil {
Failf("error restarting kube-proxy: %v", err)
}
expectNoError(verifyServeHostnameServiceUp(c, host, podNames1, svc1IP, servicePort))
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
// Remove iptable rules and make sure they come back.
By("Remove iptable rules and make sure they come back")
By("Removing iptable rules")
_, _, code, err := SSH(`
sudo iptables -t nat -F KUBE-SERVICES || true;
sudo iptables -t nat -F KUBE-PORTALS-HOST || true;
sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, testContext.Provider)
sudo iptables -t nat -F KUBE-SERVICES || true;
sudo iptables -t nat -F KUBE-PORTALS-HOST || true;
sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, testContext.Provider)
if err != nil || code != 0 {
Failf("couldn't remove iptable rules: %v (code %v)", err, code)
}
@ -328,7 +308,7 @@ var _ = Describe("Services", func() {
// TODO: restartApiserver doesn't work in GKE - fix it and reenable this test.
SkipUnlessProviderIs("gce")
ns := namespaces[0]
ns := f.Namespace.Name
numPods, servicePort := 3, 80
defer func() { expectNoError(stopServeHostnameService(c, ns, "service1")) }()
@ -363,134 +343,9 @@ var _ = Describe("Services", func() {
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
})
It("should be able to create a functioning external load balancer", func() {
// requires ExternalLoadBalancer
SkipUnlessProviderIs("gce", "gke", "aws")
serviceName := "external-lb-test"
ns := namespaces[0]
t := NewWebserverTest(c, ns, serviceName)
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
inboundPort := 3000
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeLoadBalancer
service.Spec.Ports[0].Port = inboundPort
service.Spec.Ports[0].TargetPort = util.NewIntOrStringFromInt(80)
By("creating service " + serviceName + " with external load balancer in namespace " + ns)
result, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
// Wait for the load balancer to be created asynchronously, which is
// currently indicated by ingress point(s) being added to the status.
result, err = waitForLoadBalancerIngress(c, serviceName, ns)
Expect(err).NotTo(HaveOccurred())
if len(result.Status.LoadBalancer.Ingress) != 1 {
Failf("got unexpected number (%v) of ingress points for externally load balanced service: %v", result.Status.LoadBalancer.Ingress, result)
}
ingress := result.Status.LoadBalancer.Ingress[0]
if len(result.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", result)
}
port := result.Spec.Ports[0]
if port.NodePort == 0 {
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", result)
}
if !ServiceNodePortRange.Contains(port.NodePort) {
Failf("got unexpected (out-of-range) port for LoadBalancer service: %v", result)
}
By("creating pod to be part of service " + serviceName)
t.CreateWebserverRC(1)
By("hitting the pod through the service's NodePort")
testReachable(pickNodeIP(c), port.NodePort)
By("hitting the pod through the service's external load balancer")
testLoadBalancerReachable(ingress, inboundPort)
})
It("should be able to create a functioning external load balancer with user-provided load balancer ip", func() {
// requires ExternalLoadBalancer
SkipUnlessProviderIs("gce", "gke")
serviceName := "lb-test-with-user-ip"
ns := namespaces[0]
t := NewWebserverTest(c, ns, serviceName)
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
inboundPort := 3000
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeLoadBalancer
service.Spec.Ports[0].Port = inboundPort
service.Spec.Ports[0].TargetPort = util.NewIntOrStringFromInt(80)
By("creating an external static ip")
rand.Seed(time.Now().UTC().UnixNano())
staticIPName := fmt.Sprintf("e2e-external-lb-test-%d", rand.Intn(65535))
glog.Errorf("static ip name is %s", staticIPName)
loadBalancerIP, err := createGCEStaticIP(staticIPName)
Expect(err).NotTo(HaveOccurred())
defer func() {
deleteGCEStaticIP(staticIPName)
}()
service.Spec.LoadBalancerIP = loadBalancerIP
By("creating service " + serviceName + " with external load balancer in namespace " + ns)
result, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
// Wait for the load balancer to be created asynchronously, which is
// currently indicated by ingress point(s) being added to the status.
result, err = waitForLoadBalancerIngress(c, serviceName, ns)
Expect(err).NotTo(HaveOccurred())
if len(result.Status.LoadBalancer.Ingress) != 1 {
Failf("got unexpected number (%v) of ingress points for externally load balanced service: %v", result.Status.LoadBalancer.Ingress, result)
}
ingress := result.Status.LoadBalancer.Ingress[0]
Expect(ingress.IP).To(Equal(loadBalancerIP))
if len(result.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", result)
}
port := result.Spec.Ports[0]
if port.NodePort == 0 {
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", result)
}
if !ServiceNodePortRange.Contains(port.NodePort) {
Failf("got unexpected (out-of-range) port for LoadBalancer service: %v", result)
}
By("creating pod to be part of service " + serviceName)
t.CreateWebserverRC(1)
By("hitting the pod through the service's NodePort")
testReachable(pickNodeIP(c), port.NodePort)
By("hitting the pod through the service's external load balancer")
testLoadBalancerReachable(ingress, inboundPort)
})
It("should be able to create a functioning NodePort service", func() {
serviceName := "nodeportservice-test"
ns := namespaces[0]
ns := f.Namespace.Name
t := NewWebserverTest(c, ns, serviceName)
defer func() {
@ -547,7 +402,7 @@ var _ = Describe("Services", func() {
})
It("should be able to change the type and nodeport settings of a service", func() {
// requires ExternalLoadBalancer
// requires cloud load-balancer support
SkipUnlessProviderIs("gce", "gke", "aws")
serviceName := "mutability-service-test"
@ -578,7 +433,7 @@ var _ = Describe("Services", func() {
Failf("got unexpected Spec.Ports[0].nodePort for default service: %v", service)
}
if len(service.Status.LoadBalancer.Ingress) != 0 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for default service: %v", service)
Failf("got unexpected len(Status.LoadBalancer.Ingress) for default service: %v", service)
}
By("creating pod to be part of service " + t.ServiceName)
@ -603,17 +458,16 @@ var _ = Describe("Services", func() {
if !ServiceNodePortRange.Contains(port.NodePort) {
Failf("got unexpected (out-of-range) port for NodePort service: %v", service)
}
if len(service.Status.LoadBalancer.Ingress) != 0 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
Failf("got unexpected len(Status.LoadBalancer.Ingress) for NodePort service: %v", service)
}
By("hitting the pod through the service's NodePort")
ip := pickNodeIP(f.Client)
nodePort1 := port.NodePort // Save for later!
testReachable(ip, nodePort1)
By("changing service " + serviceName + " to type=LoadBalancer")
service.Spec.Type = api.ServiceTypeLoadBalancer
service, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) {
s.Spec.Type = api.ServiceTypeLoadBalancer
})
@ -634,12 +488,13 @@ var _ = Describe("Services", func() {
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", service)
}
if len(service.Status.LoadBalancer.Ingress) != 1 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for LoadBalancer service: %v", service)
Failf("got unexpected len(Status.LoadBalancer.Ingress) for LoadBalancer service: %v", service)
}
ingress1 := service.Status.LoadBalancer.Ingress[0]
if ingress1.IP == "" && ingress1.Hostname == "" {
Failf("got unexpected Status.LoadBalancer.Ingresss[0] for LoadBalancer service: %v", service)
Failf("got unexpected Status.LoadBalancer.Ingress[0] for LoadBalancer service: %v", service)
}
By("hitting the pod through the service's NodePort")
ip = pickNodeIP(f.Client)
testReachable(ip, nodePort1)
@ -668,25 +523,31 @@ var _ = Describe("Services", func() {
Failf("got unexpected Spec.Ports[0].nodePort for NodePort service: %v", service)
}
if len(service.Status.LoadBalancer.Ingress) != 1 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
Failf("got unexpected len(Status.LoadBalancer.Ingress) for NodePort service: %v", service)
}
// TODO: Make this less of a hack. Watch for events?
Logf("Waiting 2 minutes to give service time to settle after changing configuration")
time.Sleep(time.Second * 120)
service, err = waitForLoadBalancerIngress(f.Client, serviceName, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
ingress2 := service.Status.LoadBalancer.Ingress[0]
Expect(ingress1).To(Equal(ingress2))
By("hitting the pod through the service's updated NodePort")
testReachable(ip, nodePort2)
By("hitting the pod through the service's LoadBalancer")
testLoadBalancerReachable(ingress2, 80)
By("checking the old NodePort is closed")
testNotReachable(ip, nodePort1)
By("hitting the pod through the service's LoadBalancer")
i := 1
for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(3 * time.Second) {
service, err = waitForLoadBalancerIngress(f.Client, serviceName, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
ingress2 := service.Status.LoadBalancer.Ingress[0]
if testLoadBalancerReachableInTime(ingress2, 80, 5*time.Second) {
break
}
if i%5 == 0 {
Logf("Waiting for load-balancer changes (%v elapsed, will retry)", time.Since(start))
}
i++
}
By("changing service " + serviceName + " back to type=ClusterIP")
service, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) {
s.Spec.Type = api.ServiceTypeClusterIP
@ -694,6 +555,9 @@ var _ = Describe("Services", func() {
})
Expect(err).NotTo(HaveOccurred())
if len(service.Status.LoadBalancer.Ingress) != 0 {
Failf("got unexpected len(Status.LoadBalancer.Ingress) for NodePort service: %v", service)
}
if service.Spec.Type != api.ServiceTypeClusterIP {
Failf("got unexpected Spec.Type for back-to-ClusterIP service: %v", service)
}
@ -710,109 +574,22 @@ var _ = Describe("Services", func() {
Expect(err).NotTo(HaveOccurred())
if len(service.Status.LoadBalancer.Ingress) != 0 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for back-to-ClusterIP service: %v", service)
Failf("got unexpected len(Status.LoadBalancer.Ingress) for back-to-ClusterIP service: %v", service)
}
By("checking the NodePort (original) is closed")
ip = pickNodeIP(f.Client)
testNotReachable(ip, nodePort1)
By("checking the NodePort (updated) is closed")
By("checking the NodePort is closed")
ip = pickNodeIP(f.Client)
testNotReachable(ip, nodePort2)
By("checking the LoadBalancer is closed")
testLoadBalancerNotReachable(ingress2, 80)
})
It("should release the load balancer when Type goes from LoadBalancer -> NodePort", func() {
// requires ExternalLoadBalancer
SkipUnlessProviderIs("gce", "gke", "aws")
serviceName := "service-release-lb"
ns := namespaces[0]
t := NewWebserverTest(c, ns, serviceName)
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeLoadBalancer
By("creating service " + serviceName + " with type LoadBalancer")
service, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
By("creating pod to be part of service " + t.ServiceName)
t.CreateWebserverRC(1)
if service.Spec.Type != api.ServiceTypeLoadBalancer {
Failf("got unexpected Spec.Type for LoadBalancer service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", service)
}
nodePort := service.Spec.Ports[0].NodePort
if nodePort == 0 {
Failf("got unexpected Spec.Ports[0].NodePort for LoadBalancer service: %v", service)
}
// Wait for the load balancer to be created asynchronously
service, err = waitForLoadBalancerIngress(c, serviceName, ns)
Expect(err).NotTo(HaveOccurred())
if len(service.Status.LoadBalancer.Ingress) != 1 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for LoadBalancer service: %v", service)
}
ingress := service.Status.LoadBalancer.Ingress[0]
if ingress.IP == "" && ingress.Hostname == "" {
Failf("got unexpected Status.LoadBalancer.Ingresss[0] for LoadBalancer service: %v", service)
}
By("hitting the pod through the service's NodePort")
ip := pickNodeIP(c)
testReachable(ip, nodePort)
By("hitting the pod through the service's LoadBalancer")
testLoadBalancerReachable(ingress, 80)
By("changing service " + serviceName + " to type=NodePort")
service, err = updateService(c, ns, serviceName, func(s *api.Service) {
s.Spec.Type = api.ServiceTypeNodePort
})
Expect(err).NotTo(HaveOccurred())
if service.Spec.Type != api.ServiceTypeNodePort {
Failf("got unexpected Spec.Type for NodePort service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for NodePort service: %v", service)
}
if service.Spec.Ports[0].NodePort != nodePort {
Failf("got unexpected Spec.Ports[0].NodePort for NodePort service: %v", service)
}
// Wait for the load balancer to be created asynchronously
service, err = waitForLoadBalancerDestroy(c, serviceName, ns)
Expect(err).NotTo(HaveOccurred())
if len(service.Status.LoadBalancer.Ingress) != 0 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
}
By("hitting the pod through the service's NodePort")
testReachable(ip, nodePort)
By("checking the LoadBalancer is closed")
testLoadBalancerNotReachable(ingress, 80)
testLoadBalancerNotReachable(ingress1, 80)
})
It("should prevent NodePort collisions", func() {
serviceName := "nodeport-collision"
serviceName2 := serviceName + "2"
ns := namespaces[0]
baseName := "nodeport-collision-"
serviceName1 := baseName + "1"
serviceName2 := baseName + "2"
ns := f.Namespace.Name
t := NewWebserverTest(c, ns, serviceName)
t := NewWebserverTest(c, ns, serviceName1)
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
@ -821,10 +598,9 @@ var _ = Describe("Services", func() {
}
}()
By("creating service " + serviceName1 + " with type NodePort in namespace " + ns)
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
By("creating service " + serviceName + " with type NodePort in namespace " + ns)
result, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
@ -839,23 +615,21 @@ var _ = Describe("Services", func() {
Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", result)
}
By("creating service " + serviceName + " with conflicting NodePort")
By("creating service " + serviceName2 + " with conflicting NodePort")
service2 := t.BuildServiceSpec()
service2.Name = serviceName2
service2.Spec.Type = api.ServiceTypeNodePort
service2.Spec.Ports[0].NodePort = port.NodePort
By("creating service " + serviceName2 + " with conflicting NodePort")
result2, err := t.CreateService(service2)
if err == nil {
Failf("Created service with conflicting NodePort: %v", result2)
}
expectedErr := fmt.Sprintf("Service \"%s\" is invalid: spec.ports[0].nodePort: invalid value '%d', Details: provided port is already allocated", serviceName2, port.NodePort)
expectedErr := fmt.Sprintf("Service \"%s\" is invalid: spec.ports[0].nodePort: invalid value '%d', Details: provided port is already allocated",
serviceName2, port.NodePort)
Expect(fmt.Sprintf("%v", err)).To(Equal(expectedErr))
By("deleting original service " + serviceName + " with type NodePort in namespace " + ns)
err = t.DeleteService(serviceName)
By("deleting service " + serviceName1 + " to release NodePort")
err = t.DeleteService(serviceName1)
Expect(err).NotTo(HaveOccurred())
By("creating service " + serviceName2 + " with no-longer-conflicting NodePort")
@ -865,7 +639,7 @@ var _ = Describe("Services", func() {
It("should check NodePort out-of-range", func() {
serviceName := "nodeport-range-test"
ns := namespaces[0]
ns := f.Namespace.Name
t := NewWebserverTest(c, ns, serviceName)
defer func() {
@ -931,7 +705,7 @@ var _ = Describe("Services", func() {
It("should release NodePorts on delete", func() {
serviceName := "nodeport-reuse"
ns := namespaces[0]
ns := f.Namespace.Name
t := NewWebserverTest(c, ns, serviceName)
defer func() {
@ -989,54 +763,98 @@ var _ = Describe("Services", func() {
Expect(err).NotTo(HaveOccurred())
})
It("should correctly serve identically named services in different namespaces on different external IP addresses", func() {
// This test hits several load-balancer cases because LB turnup is slow.
It("should serve identically named services in different namespaces on different load-balancers", func() {
// requires ExternalLoadBalancer
SkipUnlessProviderIs("gce", "gke", "aws")
serviceNames := []string{"s0"} // Could add more here, but then it takes longer.
labels := map[string]string{
"key0": "value0",
"key1": "value1",
}
service := &api.Service{
ObjectMeta: api.ObjectMeta{},
Spec: api.ServiceSpec{
Selector: labels,
Ports: []api.ServicePort{{
Port: 80,
TargetPort: util.NewIntOrStringFromInt(80),
}},
Type: api.ServiceTypeLoadBalancer,
},
ns1 := f.Namespace.Name
By("Building a second namespace api object")
namespacePtr, err := createTestingNS("services", c)
Expect(err).NotTo(HaveOccurred())
ns2 := namespacePtr.Name
extraNamespaces = append(extraNamespaces, ns2)
serviceName := "test-svc"
servicePort := 9376
By("creating service " + serviceName + " with load balancer in namespace " + ns1)
t1 := NewWebserverTest(c, ns1, serviceName)
svc1 := t1.BuildServiceSpec()
svc1.Spec.Type = api.ServiceTypeLoadBalancer
svc1.Spec.Ports[0].Port = servicePort
svc1.Spec.Ports[0].TargetPort = util.NewIntOrStringFromInt(80)
_, err = t1.CreateService(svc1)
Expect(err).NotTo(HaveOccurred())
By("creating pod to be part of service " + serviceName + " in namespace " + ns1)
t1.CreateWebserverRC(1)
loadBalancerIP := ""
if providerIs("gce", "gke") {
By("creating a static IP")
rand.Seed(time.Now().UTC().UnixNano())
staticIPName := fmt.Sprintf("e2e-external-lb-test-%d", rand.Intn(65535))
loadBalancerIP, err = createGCEStaticIP(staticIPName)
Expect(err).NotTo(HaveOccurred())
defer func() {
// Release GCE static IP - this is not kube-managed and will not be automatically released.
deleteGCEStaticIP(staticIPName)
}()
}
By("creating service " + serviceName + " with load balancer in namespace " + ns2)
t2 := NewWebserverTest(c, ns2, serviceName)
svc2 := t2.BuildServiceSpec()
svc2.Spec.Type = api.ServiceTypeLoadBalancer
svc2.Spec.Ports[0].Port = servicePort
svc2.Spec.Ports[0].TargetPort = util.NewIntOrStringFromInt(80)
svc2.Spec.LoadBalancerIP = loadBalancerIP
_, err = t2.CreateService(svc2)
Expect(err).NotTo(HaveOccurred())
By("creating pod to be part of service " + serviceName + " in namespace " + ns2)
t2.CreateWebserverRC(2)
ingressPoints := []string{}
for _, namespace := range namespaces {
for _, serviceName := range serviceNames {
service.ObjectMeta.Name = serviceName
service.ObjectMeta.Namespace = namespace
By("creating service " + serviceName + " in namespace " + namespace)
_, err := c.Services(namespace).Create(service)
Expect(err).NotTo(HaveOccurred())
defer func(namespace, serviceName string) { // clean up when we're done
By("deleting service " + serviceName + " in namespace " + namespace)
err := c.Services(namespace).Delete(serviceName)
Expect(err).NotTo(HaveOccurred())
}(namespace, serviceName)
svcs := []*api.Service{svc1, svc2}
for _, svc := range svcs {
namespace := svc.Namespace
lbip := svc.Spec.LoadBalancerIP
// Wait for the load balancer to be created asynchronously, which is
// currently indicated by ingress point(s) being added to the status.
result, err := waitForLoadBalancerIngress(c, serviceName, namespace)
Expect(err).NotTo(HaveOccurred())
if len(result.Status.LoadBalancer.Ingress) != 1 {
Failf("got unexpected number (%v) of ingress points for externally load balanced service: %v", result.Status.LoadBalancer.Ingress, result)
}
}
for _, namespace := range namespaces {
for _, serviceName := range serviceNames {
result, err := waitForLoadBalancerIngress(c, serviceName, namespace)
Expect(err).NotTo(HaveOccurred())
for i := range result.Status.LoadBalancer.Ingress {
ingress := result.Status.LoadBalancer.Ingress[i].IP
if ingress == "" {
ingress = result.Status.LoadBalancer.Ingress[i].Hostname
}
ingressPoints = append(ingressPoints, ingress) // Save 'em to check uniqueness
}
ingress := result.Status.LoadBalancer.Ingress[0]
if len(result.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", result)
}
if lbip != "" {
Expect(ingress.IP).To(Equal(lbip))
}
port := result.Spec.Ports[0]
if port.NodePort == 0 {
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", result)
}
if !ServiceNodePortRange.Contains(port.NodePort) {
Failf("got unexpected (out-of-range) port for LoadBalancer service: %v", result)
}
ing := result.Status.LoadBalancer.Ingress[0].IP
if ing == "" {
ing = result.Status.LoadBalancer.Ingress[0].Hostname
}
ingressPoints = append(ingressPoints, ing) // Save 'em to check uniqueness
By("hitting the pod through the service's NodePort")
testReachable(pickNodeIP(c), port.NodePort)
By("hitting the pod through the service's external load balancer")
testLoadBalancerReachable(ingress, servicePort)
}
validateUniqueOrFail(ingressPoints)
})
@ -1070,7 +888,8 @@ func waitForLoadBalancerIngress(c *client.Client, serviceName, namespace string)
const timeout = 20 * time.Minute
var service *api.Service
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have a LoadBalancer ingress point", timeout, serviceName, namespace))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
i := 1
for start := time.Now(); time.Since(start) < timeout; time.Sleep(3 * time.Second) {
service, err := c.Services(namespace).Get(serviceName)
if err != nil {
Logf("Get service failed, ignoring for 5s: %v", err)
@ -1079,13 +898,17 @@ func waitForLoadBalancerIngress(c *client.Client, serviceName, namespace string)
if len(service.Status.LoadBalancer.Ingress) > 0 {
return service, nil
}
Logf("Waiting for service %s in namespace %s to have a LoadBalancer ingress point (%v)", serviceName, namespace, time.Since(start))
if i%5 == 0 {
Logf("Waiting for service %s in namespace %s to have a LoadBalancer ingress point (%v)", serviceName, namespace, time.Since(start))
}
i++
}
return service, fmt.Errorf("service %s in namespace %s doesn't have a LoadBalancer ingress point after %.2f seconds", serviceName, namespace, timeout.Seconds())
}
func waitForLoadBalancerDestroy(c *client.Client, serviceName, namespace string) (*api.Service, error) {
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
// TODO: this should actually test that the LB was released at the cloud provider
const timeout = 10 * time.Minute
var service *api.Service
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have no LoadBalancer ingress points", timeout, serviceName, namespace))
@ -1136,7 +959,7 @@ func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID {
Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString)
}
Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort)
// Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort)
if _, ok := m[addr.TargetRef.UID]; !ok {
m[addr.TargetRef.UID] = make([]int, 0)
}
@ -1159,9 +982,8 @@ func translatePodNameToUIDOrFail(c *client.Client, ns string, expectedEndpoints
Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
}
portsByUID[pod.ObjectMeta.UID] = portList
By(fmt.Sprintf(""))
}
By(fmt.Sprintf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns))
// Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns)
return portsByUID
}
@ -1188,26 +1010,31 @@ func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUI
}
func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, expectedEndpoints PortsByPodName) {
By(fmt.Sprintf("Waiting up to %v for service %s in namespace %s to expose endpoints %v", serviceStartTimeout, serviceName, namespace, expectedEndpoints))
for start := time.Now(); time.Since(start) < serviceStartTimeout; time.Sleep(5 * time.Second) {
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", serviceStartTimeout, serviceName, namespace, expectedEndpoints))
i := 1
for start := time.Now(); time.Since(start) < serviceStartTimeout; time.Sleep(1 * time.Second) {
endpoints, err := c.Endpoints(namespace).Get(serviceName)
if err != nil {
Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
continue
}
Logf("Found endpoints %v", endpoints)
// Logf("Found endpoints %v", endpoints)
portsByPodUID := getContainerPortsByPodUID(endpoints)
Logf("Found port by pod UID %v", portsByPodUID)
// Logf("Found port by pod UID %v", portsByPodUID)
expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints)
if len(portsByPodUID) == len(expectedEndpoints) {
validatePortsOrFail(portsByPodUID, expectedPortsByPodUID)
By(fmt.Sprintf("Successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, time.Since(start)))
Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)",
serviceName, namespace, expectedEndpoints, time.Since(start))
return
}
Logf("Unexpected number of endpoints: found %v, expected %v (%v elapsed, ignoring for 5s)", portsByPodUID, expectedEndpoints, time.Since(start))
if i%5 == 0 {
Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start))
}
i++
}
if pods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()); err == nil {
@ -1220,8 +1047,8 @@ func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, ex
Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, serviceStartTimeout)
}
func addEndpointPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) {
By(fmt.Sprintf("Adding pod %v in namespace %v", name, ns))
func createPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) {
By(fmt.Sprintf("creating pod %s in namespace %s", name, ns))
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
@ -1241,6 +1068,12 @@ func addEndpointPodOrFail(c *client.Client, ns, name string, labels map[string]s
Expect(err).NotTo(HaveOccurred())
}
func deletePodOrFail(c *client.Client, ns, name string) {
By(fmt.Sprintf("deleting pod %s in namespace %s", name, ns))
err := c.Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred())
}
func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []string {
ips := []string{}
for i := range nodes.Items {
@ -1278,13 +1111,17 @@ func pickNodeIP(c *client.Client) string {
return ip
}
func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) {
func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) bool {
return testLoadBalancerReachableInTime(ingress, port, podStartTimeout)
}
func testLoadBalancerReachableInTime(ingress api.LoadBalancerIngress, port int, timeout time.Duration) bool {
ip := ingress.IP
if ip == "" {
ip = ingress.Hostname
}
testReachable(ip, port)
return testReachableInTime(ip, port, timeout)
}
func testLoadBalancerNotReachable(ingress api.LoadBalancerIngress, port int) {
@ -1296,19 +1133,25 @@ func testLoadBalancerNotReachable(ingress api.LoadBalancerIngress, port int) {
testNotReachable(ip, port)
}
func testReachable(ip string, port int) {
func testReachable(ip string, port int) bool {
return testReachableInTime(ip, port, podStartTimeout)
}
func testReachableInTime(ip string, port int, timeout time.Duration) bool {
url := fmt.Sprintf("http://%s:%d", ip, port)
if ip == "" {
Failf("Got empty IP for reachability check (%s)", url)
return false
}
if port == 0 {
Failf("Got port==0 for reachability check (%s)", url)
return false
}
desc := fmt.Sprintf("the url %s to be reachable", url)
By(fmt.Sprintf("Waiting up to %v for %s", podStartTimeout, desc))
By(fmt.Sprintf("Waiting up to %v for %s", timeout, desc))
start := time.Now()
err := wait.Poll(poll, podStartTimeout, func() (bool, error) {
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
resp, err := httpGetNoConnectionPool(url)
if err != nil {
Logf("Got error waiting for reachability of %s: %v (%v)", url, err, time.Since(start))
@ -1329,7 +1172,11 @@ func testReachable(ip string, port int) {
Logf("Successfully reached %v", url)
return true, nil
})
Expect(err).NotTo(HaveOccurred(), "Error waiting for %s", desc)
if err != nil {
Expect(err).NotTo(HaveOccurred(), "Error waiting for %s", desc)
return false
}
return true
}
func testNotReachable(ip string, port int) {
@ -1343,7 +1190,7 @@ func testNotReachable(ip string, port int) {
desc := fmt.Sprintf("the url %s to be *not* reachable", url)
By(fmt.Sprintf("Waiting up to %v for %s", podStartTimeout, desc))
err := wait.Poll(poll, podStartTimeout, func() (bool, error) {
err := wait.PollImmediate(poll, podStartTimeout, func() (bool, error) {
resp, err := httpGetNoConnectionPool(url)
if err != nil {
Logf("Successfully waited for %s", desc)
@ -1365,6 +1212,7 @@ func testNotReachable(ip string, port int) {
func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) {
podNames := make([]string, replicas)
By("creating service " + name + " in namespace " + ns)
_, err := c.Services(ns).Create(&api.Service{
ObjectMeta: api.ObjectMeta{
Name: name,
@ -1447,6 +1295,7 @@ func verifyServeHostnameServiceUp(c *client.Client, host string, expectedPods []
command),
}
By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods)))
for _, cmd := range commands {
passed := false
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5) {
@ -1539,7 +1388,8 @@ func NewWebserverTest(client *client.Client, namespace string, serviceName strin
func (t *WebserverTest) BuildServiceSpec() *api.Service {
service := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: t.ServiceName,
Name: t.ServiceName,
Namespace: t.Namespace,
},
Spec: api.ServiceSpec{
Selector: t.Labels,

View File

@ -66,7 +66,7 @@ const (
nonExist = "NonExist"
// How often to poll pods and nodes.
poll = 5 * time.Second
poll = 2 * time.Second
// service accounts are provisioned after namespace creation
// a service account is required to support pod creation in a namespace as part of admission control
@ -86,10 +86,6 @@ const (
podRespondingTimeout = 2 * time.Minute
serviceRespondingTimeout = 2 * time.Minute
endpointRegisterTimeout = time.Minute
// How wide to print pod names, by default. Useful for aligning printing to
// quickly scan through output.
podPrintWidth = 55
)
type CloudConfig struct {
@ -335,7 +331,7 @@ func waitForPodsRunningReady(ns string, minPods int, timeout time.Duration) erro
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
if wait.Poll(poll, timeout, func() (bool, error) {
if wait.PollImmediate(poll, timeout, func() (bool, error) {
// We get the new list of pods and replication controllers in every
// iteration because more pods come online during startup and we want to
// ensure they are also checked.
@ -411,22 +407,22 @@ func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName s
}
func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %[1]v for pod %-[2]*[3]s status to be %[4]s", timeout, podPrintWidth, podName, desc)
Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pod, err := c.Pods(ns).Get(podName)
if err != nil {
// Aligning this text makes it much more readable
Logf("Get pod %-[1]*[2]s in namespace '%[3]s' failed, ignoring for %[4]v. Error: %[5]v",
podPrintWidth, podName, ns, poll, err)
Logf("Get pod %[1]s in namespace '%[2]s' failed, ignoring for %[3]v. Error: %[4]v",
podName, ns, poll, err)
continue
}
done, err := condition(pod)
if done {
return err
}
Logf("Waiting for pod %-[1]*[2]s in namespace '%[3]s' status to be '%[4]s'"+
"(found phase: %[5]q, readiness: %[6]t) (%[7]v elapsed)",
podPrintWidth, podName, ns, desc, pod.Status.Phase, podReady(pod), time.Since(start))
Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+
"(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)",
podName, ns, desc, pod.Status.Phase, podReady(pod), time.Since(start))
}
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
}
@ -470,7 +466,7 @@ func createTestingNS(baseName string, c *client.Client) (*api.Namespace, error)
}
// Be robust about making the namespace creation call.
var got *api.Namespace
if err := wait.Poll(poll, singleCallTimeout, func() (bool, error) {
if err := wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
var err error
got, err = c.Namespaces().Create(namespaceObj)
if err != nil {
@ -535,7 +531,7 @@ func deleteNS(c *client.Client, namespace string, timeout time.Duration) error {
return err
}
err := wait.Poll(5*time.Second, timeout, func() (bool, error) {
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
if _, err := c.Namespaces().Get(namespace); err != nil {
if apierrs.IsNotFound(err) {
return true, nil
@ -623,7 +619,7 @@ func waitForPodSuccessInNamespace(c *client.Client, podName string, contName str
func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
var p *api.Pod = nil
err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
Logf("Waiting for pod %s to appear on node %s", rcName, node)
pods, err := c.Pods(ns).List(label, fields.Everything())
if err != nil {
@ -642,7 +638,7 @@ func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, er
}
func waitForPodToDisappear(c *client.Client, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)
pods, err := c.Pods(ns).List(label, fields.Everything())
if err != nil {
@ -672,7 +668,7 @@ func waitForRCPodToDisappear(c *client.Client, ns, rcName, podName string) error
// waitForService waits until the service appears (exist == true), or disappears (exist == false)
func waitForService(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.Poll(interval, timeout, func() (bool, error) {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.Services(namespace).Get(name)
switch {
case err == nil:
@ -727,7 +723,7 @@ func countEndpointsNum(e *api.Endpoints) int {
// waitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
func waitForReplicationController(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.Poll(interval, timeout, func() (bool, error) {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.ReplicationControllers(namespace).Get(name)
if err != nil {
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
@ -822,13 +818,13 @@ func (r podResponseChecker) checkAllResponses() (done bool, err error) {
func podsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error {
By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.Poll(poll, podRespondingTimeout, podResponseChecker{c, ns, label, name, wantName, pods}.checkAllResponses)
return wait.PollImmediate(poll, podRespondingTimeout, podResponseChecker{c, ns, label, name, wantName, pods}.checkAllResponses)
}
func serviceResponding(c *client.Client, ns, name string) error {
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
return wait.Poll(poll, serviceRespondingTimeout, func() (done bool, err error) {
return wait.PollImmediate(poll, serviceRespondingTimeout, func() (done bool, err error) {
body, err := c.Get().
Prefix("proxy").
Namespace(ns).
@ -1214,7 +1210,6 @@ func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff {
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling cleanup).
func RunRC(config RCConfig) error {
// Don't force tests to fail if they don't care about containers restarting.
var maxContainerFailures int
if config.MaxContainerFailures == nil {
@ -1225,7 +1220,7 @@ func RunRC(config RCConfig) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
By(fmt.Sprintf("%v Creating replication controller %s", time.Now(), config.Name))
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
@ -1292,7 +1287,7 @@ func RunRC(config RCConfig) error {
if err != nil {
return fmt.Errorf("Error creating replication controller: %v", err)
}
Logf("%v Created replication controller with name: %v, namespace: %v, replica count: %v", time.Now(), rc.Name, config.Namespace, rc.Spec.Replicas)
Logf("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
podStore := newPodStore(config.Client, config.Namespace, label, fields.Everything())
defer podStore.Stop()
@ -1364,13 +1359,13 @@ func RunRC(config RCConfig) error {
*config.CreatedPods = pods
}
Logf("%v %v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
time.Now(), rc.Name, len(pods), config.Replicas, running, pending, waiting, inactive, terminating, unknown, runningButNotReady)
Logf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
rc.Name, len(pods), config.Replicas, running, pending, waiting, inactive, terminating, unknown, runningButNotReady)
promPushRunningPending(running, pending)
if config.PodStatusFile != nil {
fmt.Fprintf(config.PodStatusFile, "%s, %d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", time.Now(), running, pending, waiting, inactive, unknown, runningButNotReady)
fmt.Fprintf(config.PodStatusFile, "%d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", running, pending, waiting, inactive, unknown, runningButNotReady)
}
if failedContainers > maxContainerFailures {
@ -1482,7 +1477,7 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event {
}
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("%v Scaling replication controller %s in namespace %s to %d", time.Now(), name, ns, size))
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor("ReplicationController", c)
if err != nil {
return err
@ -1538,7 +1533,7 @@ func waitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (p
// Delete a Replication Controller and all pods it spawned
func DeleteRC(c *client.Client, ns, name string) error {
By(fmt.Sprintf("%v Deleting replication controller %s in namespace %s", time.Now(), name, ns))
By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns))
rc, err := c.ReplicationControllers(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
@ -1574,7 +1569,7 @@ func DeleteRC(c *client.Client, ns, name string) error {
// waitForRCPodsGone waits until there are no pods reported under an RC's selector (because the pods
// have completed termination).
func waitForRCPodsGone(c *client.Client, rc *api.ReplicationController) error {
return wait.Poll(poll, 2*time.Minute, func() (bool, error) {
return wait.PollImmediate(poll, 2*time.Minute, func() (bool, error) {
if pods, err := c.Pods(rc.Namespace).List(labels.SelectorFromSet(rc.Spec.Selector), fields.Everything()); err == nil && len(pods.Items) == 0 {
return true, nil
}
@ -1635,7 +1630,7 @@ func waitForDeploymentStatus(c *client.Client, ns, deploymentName string, desire
func listNodes(c *client.Client, label labels.Selector, field fields.Selector) (*api.NodeList, error) {
var nodes *api.NodeList
var errLast error
if wait.Poll(poll, singleCallTimeout, func() (bool, error) {
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
nodes, errLast = c.Nodes().List(label, field)
return errLast == nil, nil
}) != nil {
@ -1815,7 +1810,7 @@ func checkPodsRunningReady(c *client.Client, ns string, podNames []string, timeo
// support only Go >= 1.4.
for _, podName := range podNames {
if !<-result {
Logf("Pod %-[1]*[2]s failed to be %[3]s.", podPrintWidth, podName, desc)
Logf("Pod %[1]s failed to be %[2]s.", podName, desc)
success = false
}
}
@ -1877,7 +1872,7 @@ func allNodesReady(c *client.Client, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []api.Node
err := wait.Poll(poll, timeout, func() (bool, error) {
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
notReady = nil
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {