mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-11 21:12:07 +00:00
Merge pull request #14957 from thockin/services-e2e
Services e2e cleanup
This commit is contained in:
commit
4856c7c033
@ -39,9 +39,7 @@ var prom_registered = false
|
|||||||
|
|
||||||
// Reusable function for pushing metrics to prometheus. Handles initialization and so on.
|
// Reusable function for pushing metrics to prometheus. Handles initialization and so on.
|
||||||
func promPushRunningPending(running, pending int) error {
|
func promPushRunningPending(running, pending int) error {
|
||||||
|
|
||||||
if testContext.PrometheusPushGateway == "" {
|
if testContext.PrometheusPushGateway == "" {
|
||||||
Logf("Ignoring prom push, push gateway unavailable")
|
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
// Register metrics if necessary
|
// Register metrics if necessary
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
@ -46,32 +45,26 @@ var _ = Describe("Services", func() {
|
|||||||
f := NewFramework("services")
|
f := NewFramework("services")
|
||||||
|
|
||||||
var c *client.Client
|
var c *client.Client
|
||||||
// Use these in tests. They're unique for each test to prevent name collisions.
|
var extraNamespaces []string
|
||||||
var namespaces [2]string
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
var err error
|
var err error
|
||||||
c, err = loadClient()
|
c, err = loadClient()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Building a namespace api objects")
|
|
||||||
for i := range namespaces {
|
|
||||||
namespacePtr, err := createTestingNS(fmt.Sprintf("service-%d", i), c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
namespaces[i] = namespacePtr.Name
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
for _, ns := range namespaces {
|
for _, ns := range extraNamespaces {
|
||||||
By(fmt.Sprintf("Destroying namespace %v", ns))
|
By(fmt.Sprintf("Destroying namespace %v", ns))
|
||||||
if err := deleteNS(c, ns, 5*time.Minute /* namespace deletion timeout */); err != nil {
|
if err := deleteNS(c, ns, 5*time.Minute /* namespace deletion timeout */); err != nil {
|
||||||
Failf("Couldn't delete namespace %s: %s", ns, err)
|
Failf("Couldn't delete namespace %s: %s", ns, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
extraNamespaces = nil
|
||||||
})
|
})
|
||||||
|
|
||||||
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
|
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
|
||||||
|
|
||||||
It("should provide secure master service", func() {
|
It("should provide secure master service", func() {
|
||||||
_, err := c.Services(api.NamespaceDefault).Get("kubernetes")
|
_, err := c.Services(api.NamespaceDefault).Get("kubernetes")
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -79,12 +72,13 @@ var _ = Describe("Services", func() {
|
|||||||
|
|
||||||
It("should serve a basic endpoint from pods", func() {
|
It("should serve a basic endpoint from pods", func() {
|
||||||
serviceName := "endpoint-test2"
|
serviceName := "endpoint-test2"
|
||||||
ns := namespaces[0]
|
ns := f.Namespace.Name
|
||||||
labels := map[string]string{
|
labels := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
"baz": "blah",
|
"baz": "blah",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
By("creating service " + serviceName + " in namespace " + ns)
|
||||||
defer func() {
|
defer func() {
|
||||||
err := c.Services(ns).Delete(serviceName)
|
err := c.Services(ns).Delete(serviceName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -107,43 +101,38 @@ var _ = Describe("Services", func() {
|
|||||||
|
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
||||||
|
|
||||||
var names []string
|
names := map[string]bool{}
|
||||||
defer func() {
|
defer func() {
|
||||||
for _, name := range names {
|
for name := range names {
|
||||||
err := c.Pods(ns).Delete(name, nil)
|
err := c.Pods(ns).Delete(name, nil)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
name1 := "test1"
|
name1 := "pod1"
|
||||||
addEndpointPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}})
|
name2 := "pod2"
|
||||||
names = append(names, name1)
|
|
||||||
|
|
||||||
|
createPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}})
|
||||||
|
names[name1] = true
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}})
|
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}})
|
||||||
|
|
||||||
name2 := "test2"
|
createPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}})
|
||||||
addEndpointPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}})
|
names[name2] = true
|
||||||
names = append(names, name2)
|
|
||||||
|
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}})
|
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}})
|
||||||
|
|
||||||
err = c.Pods(ns).Delete(name1, nil)
|
deletePodOrFail(c, ns, name1)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
delete(names, name1)
|
||||||
names = []string{name2}
|
|
||||||
|
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name2: {80}})
|
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name2: {80}})
|
||||||
|
|
||||||
err = c.Pods(ns).Delete(name2, nil)
|
deletePodOrFail(c, ns, name2)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
delete(names, name2)
|
||||||
names = []string{}
|
|
||||||
|
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should serve multiport endpoints from pods", func() {
|
It("should serve multiport endpoints from pods", func() {
|
||||||
// repacking functionality is intentionally not tested here - it's better to test it in an integration test.
|
// repacking functionality is intentionally not tested here - it's better to test it in an integration test.
|
||||||
serviceName := "multi-endpoint-test"
|
serviceName := "multi-endpoint-test"
|
||||||
ns := namespaces[0]
|
ns := f.Namespace.Name
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
err := c.Services(ns).Delete(serviceName)
|
err := c.Services(ns).Delete(serviceName)
|
||||||
@ -155,6 +144,7 @@ var _ = Describe("Services", func() {
|
|||||||
svc1port := "svc1"
|
svc1port := "svc1"
|
||||||
svc2port := "svc2"
|
svc2port := "svc2"
|
||||||
|
|
||||||
|
By("creating service " + serviceName + " in namespace " + ns)
|
||||||
service := &api.Service{
|
service := &api.Service{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: serviceName,
|
Name: serviceName,
|
||||||
@ -181,9 +171,9 @@ var _ = Describe("Services", func() {
|
|||||||
port2 := 101
|
port2 := 101
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
||||||
|
|
||||||
var names []string
|
names := map[string]bool{}
|
||||||
defer func() {
|
defer func() {
|
||||||
for _, name := range names {
|
for name := range names {
|
||||||
err := c.Pods(ns).Delete(name, nil)
|
err := c.Pods(ns).Delete(name, nil)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
@ -202,44 +192,30 @@ var _ = Describe("Services", func() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
podname1 := "podname1"
|
podname1 := "pod1"
|
||||||
addEndpointPodOrFail(c, ns, podname1, labels, containerPorts1)
|
podname2 := "pod2"
|
||||||
names = append(names, podname1)
|
|
||||||
|
createPodOrFail(c, ns, podname1, labels, containerPorts1)
|
||||||
|
names[podname1] = true
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}})
|
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}})
|
||||||
|
|
||||||
podname2 := "podname2"
|
createPodOrFail(c, ns, podname2, labels, containerPorts2)
|
||||||
addEndpointPodOrFail(c, ns, podname2, labels, containerPorts2)
|
names[podname2] = true
|
||||||
names = append(names, podname2)
|
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}})
|
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}})
|
||||||
|
|
||||||
podname3 := "podname3"
|
deletePodOrFail(c, ns, podname1)
|
||||||
addEndpointPodOrFail(c, ns, podname3, labels, append(containerPorts1, containerPorts2...))
|
delete(names, podname1)
|
||||||
names = append(names, podname3)
|
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname2: {port2}})
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}, podname3: {port1, port2}})
|
|
||||||
|
|
||||||
err = c.Pods(ns).Delete(podname1, nil)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
names = []string{podname2, podname3}
|
|
||||||
|
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname2: {port2}, podname3: {port1, port2}})
|
|
||||||
|
|
||||||
err = c.Pods(ns).Delete(podname2, nil)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
names = []string{podname3}
|
|
||||||
|
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname3: {port1, port2}})
|
|
||||||
|
|
||||||
err = c.Pods(ns).Delete(podname3, nil)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
names = []string{}
|
|
||||||
|
|
||||||
|
deletePodOrFail(c, ns, podname2)
|
||||||
|
delete(names, podname2)
|
||||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should be able to up and down services", func() {
|
It("should be able to up and down services", func() {
|
||||||
// this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP
|
// this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP
|
||||||
SkipUnlessProviderIs(providersWithSSH...)
|
SkipUnlessProviderIs(providersWithSSH...)
|
||||||
ns := namespaces[0]
|
ns := f.Namespace.Name
|
||||||
numPods, servicePort := 3, 80
|
numPods, servicePort := 3, 80
|
||||||
|
|
||||||
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
|
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
|
||||||
@ -281,14 +257,18 @@ var _ = Describe("Services", func() {
|
|||||||
It("should work after restarting kube-proxy", func() {
|
It("should work after restarting kube-proxy", func() {
|
||||||
SkipUnlessProviderIs("gce", "gke")
|
SkipUnlessProviderIs("gce", "gke")
|
||||||
|
|
||||||
ns := namespaces[0]
|
ns := f.Namespace.Name
|
||||||
numPods, servicePort := 3, 80
|
numPods, servicePort := 3, 80
|
||||||
|
|
||||||
defer func() { expectNoError(stopServeHostnameService(c, ns, "service1")) }()
|
svc1 := "service1"
|
||||||
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
|
svc2 := "service2"
|
||||||
|
|
||||||
|
defer func() { expectNoError(stopServeHostnameService(c, ns, svc1)) }()
|
||||||
|
podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() { expectNoError(stopServeHostnameService(c, ns, "service2")) }()
|
|
||||||
podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods)
|
defer func() { expectNoError(stopServeHostnameService(c, ns, svc2)) }()
|
||||||
|
podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
if svc1IP == svc2IP {
|
if svc1IP == svc2IP {
|
||||||
@ -305,14 +285,14 @@ var _ = Describe("Services", func() {
|
|||||||
expectNoError(verifyServeHostnameServiceUp(c, host, podNames1, svc1IP, servicePort))
|
expectNoError(verifyServeHostnameServiceUp(c, host, podNames1, svc1IP, servicePort))
|
||||||
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
|
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
|
||||||
|
|
||||||
// Restart kube-proxy and verify that services are still reachable (after some time).
|
By("Restarting kube-proxy")
|
||||||
if err := restartKubeProxy(host); err != nil {
|
if err := restartKubeProxy(host); err != nil {
|
||||||
Failf("error restarting kube-proxy: %v", err)
|
Failf("error restarting kube-proxy: %v", err)
|
||||||
}
|
}
|
||||||
expectNoError(verifyServeHostnameServiceUp(c, host, podNames1, svc1IP, servicePort))
|
expectNoError(verifyServeHostnameServiceUp(c, host, podNames1, svc1IP, servicePort))
|
||||||
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
|
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
|
||||||
// Remove iptable rules and make sure they come back.
|
|
||||||
By("Remove iptable rules and make sure they come back")
|
By("Removing iptable rules")
|
||||||
_, _, code, err := SSH(`
|
_, _, code, err := SSH(`
|
||||||
sudo iptables -t nat -F KUBE-SERVICES || true;
|
sudo iptables -t nat -F KUBE-SERVICES || true;
|
||||||
sudo iptables -t nat -F KUBE-PORTALS-HOST || true;
|
sudo iptables -t nat -F KUBE-PORTALS-HOST || true;
|
||||||
@ -328,7 +308,7 @@ var _ = Describe("Services", func() {
|
|||||||
// TODO: restartApiserver doesn't work in GKE - fix it and reenable this test.
|
// TODO: restartApiserver doesn't work in GKE - fix it and reenable this test.
|
||||||
SkipUnlessProviderIs("gce")
|
SkipUnlessProviderIs("gce")
|
||||||
|
|
||||||
ns := namespaces[0]
|
ns := f.Namespace.Name
|
||||||
numPods, servicePort := 3, 80
|
numPods, servicePort := 3, 80
|
||||||
|
|
||||||
defer func() { expectNoError(stopServeHostnameService(c, ns, "service1")) }()
|
defer func() { expectNoError(stopServeHostnameService(c, ns, "service1")) }()
|
||||||
@ -363,134 +343,9 @@ var _ = Describe("Services", func() {
|
|||||||
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
|
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should be able to create a functioning external load balancer", func() {
|
|
||||||
// requires ExternalLoadBalancer
|
|
||||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
|
||||||
|
|
||||||
serviceName := "external-lb-test"
|
|
||||||
ns := namespaces[0]
|
|
||||||
|
|
||||||
t := NewWebserverTest(c, ns, serviceName)
|
|
||||||
defer func() {
|
|
||||||
defer GinkgoRecover()
|
|
||||||
errs := t.Cleanup()
|
|
||||||
if len(errs) != 0 {
|
|
||||||
Failf("errors in cleanup: %v", errs)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
inboundPort := 3000
|
|
||||||
|
|
||||||
service := t.BuildServiceSpec()
|
|
||||||
service.Spec.Type = api.ServiceTypeLoadBalancer
|
|
||||||
service.Spec.Ports[0].Port = inboundPort
|
|
||||||
service.Spec.Ports[0].TargetPort = util.NewIntOrStringFromInt(80)
|
|
||||||
|
|
||||||
By("creating service " + serviceName + " with external load balancer in namespace " + ns)
|
|
||||||
result, err := t.CreateService(service)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// Wait for the load balancer to be created asynchronously, which is
|
|
||||||
// currently indicated by ingress point(s) being added to the status.
|
|
||||||
result, err = waitForLoadBalancerIngress(c, serviceName, ns)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
if len(result.Status.LoadBalancer.Ingress) != 1 {
|
|
||||||
Failf("got unexpected number (%v) of ingress points for externally load balanced service: %v", result.Status.LoadBalancer.Ingress, result)
|
|
||||||
}
|
|
||||||
ingress := result.Status.LoadBalancer.Ingress[0]
|
|
||||||
if len(result.Spec.Ports) != 1 {
|
|
||||||
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", result)
|
|
||||||
}
|
|
||||||
port := result.Spec.Ports[0]
|
|
||||||
if port.NodePort == 0 {
|
|
||||||
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", result)
|
|
||||||
}
|
|
||||||
if !ServiceNodePortRange.Contains(port.NodePort) {
|
|
||||||
Failf("got unexpected (out-of-range) port for LoadBalancer service: %v", result)
|
|
||||||
}
|
|
||||||
|
|
||||||
By("creating pod to be part of service " + serviceName)
|
|
||||||
t.CreateWebserverRC(1)
|
|
||||||
|
|
||||||
By("hitting the pod through the service's NodePort")
|
|
||||||
testReachable(pickNodeIP(c), port.NodePort)
|
|
||||||
|
|
||||||
By("hitting the pod through the service's external load balancer")
|
|
||||||
testLoadBalancerReachable(ingress, inboundPort)
|
|
||||||
})
|
|
||||||
|
|
||||||
It("should be able to create a functioning external load balancer with user-provided load balancer ip", func() {
|
|
||||||
// requires ExternalLoadBalancer
|
|
||||||
SkipUnlessProviderIs("gce", "gke")
|
|
||||||
|
|
||||||
serviceName := "lb-test-with-user-ip"
|
|
||||||
ns := namespaces[0]
|
|
||||||
|
|
||||||
t := NewWebserverTest(c, ns, serviceName)
|
|
||||||
defer func() {
|
|
||||||
defer GinkgoRecover()
|
|
||||||
errs := t.Cleanup()
|
|
||||||
if len(errs) != 0 {
|
|
||||||
Failf("errors in cleanup: %v", errs)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
inboundPort := 3000
|
|
||||||
|
|
||||||
service := t.BuildServiceSpec()
|
|
||||||
service.Spec.Type = api.ServiceTypeLoadBalancer
|
|
||||||
service.Spec.Ports[0].Port = inboundPort
|
|
||||||
service.Spec.Ports[0].TargetPort = util.NewIntOrStringFromInt(80)
|
|
||||||
|
|
||||||
By("creating an external static ip")
|
|
||||||
rand.Seed(time.Now().UTC().UnixNano())
|
|
||||||
staticIPName := fmt.Sprintf("e2e-external-lb-test-%d", rand.Intn(65535))
|
|
||||||
glog.Errorf("static ip name is %s", staticIPName)
|
|
||||||
loadBalancerIP, err := createGCEStaticIP(staticIPName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
defer func() {
|
|
||||||
deleteGCEStaticIP(staticIPName)
|
|
||||||
}()
|
|
||||||
|
|
||||||
service.Spec.LoadBalancerIP = loadBalancerIP
|
|
||||||
|
|
||||||
By("creating service " + serviceName + " with external load balancer in namespace " + ns)
|
|
||||||
result, err := t.CreateService(service)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// Wait for the load balancer to be created asynchronously, which is
|
|
||||||
// currently indicated by ingress point(s) being added to the status.
|
|
||||||
result, err = waitForLoadBalancerIngress(c, serviceName, ns)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
if len(result.Status.LoadBalancer.Ingress) != 1 {
|
|
||||||
Failf("got unexpected number (%v) of ingress points for externally load balanced service: %v", result.Status.LoadBalancer.Ingress, result)
|
|
||||||
}
|
|
||||||
ingress := result.Status.LoadBalancer.Ingress[0]
|
|
||||||
Expect(ingress.IP).To(Equal(loadBalancerIP))
|
|
||||||
if len(result.Spec.Ports) != 1 {
|
|
||||||
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", result)
|
|
||||||
}
|
|
||||||
port := result.Spec.Ports[0]
|
|
||||||
if port.NodePort == 0 {
|
|
||||||
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", result)
|
|
||||||
}
|
|
||||||
if !ServiceNodePortRange.Contains(port.NodePort) {
|
|
||||||
Failf("got unexpected (out-of-range) port for LoadBalancer service: %v", result)
|
|
||||||
}
|
|
||||||
|
|
||||||
By("creating pod to be part of service " + serviceName)
|
|
||||||
t.CreateWebserverRC(1)
|
|
||||||
|
|
||||||
By("hitting the pod through the service's NodePort")
|
|
||||||
testReachable(pickNodeIP(c), port.NodePort)
|
|
||||||
|
|
||||||
By("hitting the pod through the service's external load balancer")
|
|
||||||
testLoadBalancerReachable(ingress, inboundPort)
|
|
||||||
})
|
|
||||||
|
|
||||||
It("should be able to create a functioning NodePort service", func() {
|
It("should be able to create a functioning NodePort service", func() {
|
||||||
serviceName := "nodeportservice-test"
|
serviceName := "nodeportservice-test"
|
||||||
ns := namespaces[0]
|
ns := f.Namespace.Name
|
||||||
|
|
||||||
t := NewWebserverTest(c, ns, serviceName)
|
t := NewWebserverTest(c, ns, serviceName)
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -547,7 +402,7 @@ var _ = Describe("Services", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("should be able to change the type and nodeport settings of a service", func() {
|
It("should be able to change the type and nodeport settings of a service", func() {
|
||||||
// requires ExternalLoadBalancer
|
// requires cloud load-balancer support
|
||||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||||
|
|
||||||
serviceName := "mutability-service-test"
|
serviceName := "mutability-service-test"
|
||||||
@ -578,7 +433,7 @@ var _ = Describe("Services", func() {
|
|||||||
Failf("got unexpected Spec.Ports[0].nodePort for default service: %v", service)
|
Failf("got unexpected Spec.Ports[0].nodePort for default service: %v", service)
|
||||||
}
|
}
|
||||||
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
||||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for default service: %v", service)
|
Failf("got unexpected len(Status.LoadBalancer.Ingress) for default service: %v", service)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("creating pod to be part of service " + t.ServiceName)
|
By("creating pod to be part of service " + t.ServiceName)
|
||||||
@ -603,17 +458,16 @@ var _ = Describe("Services", func() {
|
|||||||
if !ServiceNodePortRange.Contains(port.NodePort) {
|
if !ServiceNodePortRange.Contains(port.NodePort) {
|
||||||
Failf("got unexpected (out-of-range) port for NodePort service: %v", service)
|
Failf("got unexpected (out-of-range) port for NodePort service: %v", service)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
||||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
|
Failf("got unexpected len(Status.LoadBalancer.Ingress) for NodePort service: %v", service)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("hitting the pod through the service's NodePort")
|
By("hitting the pod through the service's NodePort")
|
||||||
ip := pickNodeIP(f.Client)
|
ip := pickNodeIP(f.Client)
|
||||||
nodePort1 := port.NodePort // Save for later!
|
nodePort1 := port.NodePort // Save for later!
|
||||||
testReachable(ip, nodePort1)
|
testReachable(ip, nodePort1)
|
||||||
|
|
||||||
By("changing service " + serviceName + " to type=LoadBalancer")
|
By("changing service " + serviceName + " to type=LoadBalancer")
|
||||||
service.Spec.Type = api.ServiceTypeLoadBalancer
|
|
||||||
service, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) {
|
service, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) {
|
||||||
s.Spec.Type = api.ServiceTypeLoadBalancer
|
s.Spec.Type = api.ServiceTypeLoadBalancer
|
||||||
})
|
})
|
||||||
@ -634,12 +488,13 @@ var _ = Describe("Services", func() {
|
|||||||
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", service)
|
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", service)
|
||||||
}
|
}
|
||||||
if len(service.Status.LoadBalancer.Ingress) != 1 {
|
if len(service.Status.LoadBalancer.Ingress) != 1 {
|
||||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for LoadBalancer service: %v", service)
|
Failf("got unexpected len(Status.LoadBalancer.Ingress) for LoadBalancer service: %v", service)
|
||||||
}
|
}
|
||||||
ingress1 := service.Status.LoadBalancer.Ingress[0]
|
ingress1 := service.Status.LoadBalancer.Ingress[0]
|
||||||
if ingress1.IP == "" && ingress1.Hostname == "" {
|
if ingress1.IP == "" && ingress1.Hostname == "" {
|
||||||
Failf("got unexpected Status.LoadBalancer.Ingresss[0] for LoadBalancer service: %v", service)
|
Failf("got unexpected Status.LoadBalancer.Ingress[0] for LoadBalancer service: %v", service)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("hitting the pod through the service's NodePort")
|
By("hitting the pod through the service's NodePort")
|
||||||
ip = pickNodeIP(f.Client)
|
ip = pickNodeIP(f.Client)
|
||||||
testReachable(ip, nodePort1)
|
testReachable(ip, nodePort1)
|
||||||
@ -668,24 +523,30 @@ var _ = Describe("Services", func() {
|
|||||||
Failf("got unexpected Spec.Ports[0].nodePort for NodePort service: %v", service)
|
Failf("got unexpected Spec.Ports[0].nodePort for NodePort service: %v", service)
|
||||||
}
|
}
|
||||||
if len(service.Status.LoadBalancer.Ingress) != 1 {
|
if len(service.Status.LoadBalancer.Ingress) != 1 {
|
||||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
|
Failf("got unexpected len(Status.LoadBalancer.Ingress) for NodePort service: %v", service)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Make this less of a hack. Watch for events?
|
By("hitting the pod through the service's updated NodePort")
|
||||||
Logf("Waiting 2 minutes to give service time to settle after changing configuration")
|
testReachable(ip, nodePort2)
|
||||||
time.Sleep(time.Second * 120)
|
By("checking the old NodePort is closed")
|
||||||
|
testNotReachable(ip, nodePort1)
|
||||||
|
|
||||||
|
By("hitting the pod through the service's LoadBalancer")
|
||||||
|
i := 1
|
||||||
|
for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(3 * time.Second) {
|
||||||
service, err = waitForLoadBalancerIngress(f.Client, serviceName, f.Namespace.Name)
|
service, err = waitForLoadBalancerIngress(f.Client, serviceName, f.Namespace.Name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
ingress2 := service.Status.LoadBalancer.Ingress[0]
|
ingress2 := service.Status.LoadBalancer.Ingress[0]
|
||||||
Expect(ingress1).To(Equal(ingress2))
|
if testLoadBalancerReachableInTime(ingress2, 80, 5*time.Second) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
By("hitting the pod through the service's updated NodePort")
|
if i%5 == 0 {
|
||||||
testReachable(ip, nodePort2)
|
Logf("Waiting for load-balancer changes (%v elapsed, will retry)", time.Since(start))
|
||||||
By("hitting the pod through the service's LoadBalancer")
|
}
|
||||||
testLoadBalancerReachable(ingress2, 80)
|
i++
|
||||||
By("checking the old NodePort is closed")
|
}
|
||||||
testNotReachable(ip, nodePort1)
|
|
||||||
|
|
||||||
By("changing service " + serviceName + " back to type=ClusterIP")
|
By("changing service " + serviceName + " back to type=ClusterIP")
|
||||||
service, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) {
|
service, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) {
|
||||||
@ -694,6 +555,9 @@ var _ = Describe("Services", func() {
|
|||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
||||||
|
Failf("got unexpected len(Status.LoadBalancer.Ingress) for NodePort service: %v", service)
|
||||||
|
}
|
||||||
if service.Spec.Type != api.ServiceTypeClusterIP {
|
if service.Spec.Type != api.ServiceTypeClusterIP {
|
||||||
Failf("got unexpected Spec.Type for back-to-ClusterIP service: %v", service)
|
Failf("got unexpected Spec.Type for back-to-ClusterIP service: %v", service)
|
||||||
}
|
}
|
||||||
@ -710,109 +574,22 @@ var _ = Describe("Services", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
||||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for back-to-ClusterIP service: %v", service)
|
Failf("got unexpected len(Status.LoadBalancer.Ingress) for back-to-ClusterIP service: %v", service)
|
||||||
}
|
}
|
||||||
By("checking the NodePort (original) is closed")
|
By("checking the NodePort is closed")
|
||||||
ip = pickNodeIP(f.Client)
|
|
||||||
testNotReachable(ip, nodePort1)
|
|
||||||
By("checking the NodePort (updated) is closed")
|
|
||||||
ip = pickNodeIP(f.Client)
|
ip = pickNodeIP(f.Client)
|
||||||
testNotReachable(ip, nodePort2)
|
testNotReachable(ip, nodePort2)
|
||||||
By("checking the LoadBalancer is closed")
|
By("checking the LoadBalancer is closed")
|
||||||
testLoadBalancerNotReachable(ingress2, 80)
|
testLoadBalancerNotReachable(ingress1, 80)
|
||||||
})
|
|
||||||
|
|
||||||
It("should release the load balancer when Type goes from LoadBalancer -> NodePort", func() {
|
|
||||||
// requires ExternalLoadBalancer
|
|
||||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
|
||||||
|
|
||||||
serviceName := "service-release-lb"
|
|
||||||
ns := namespaces[0]
|
|
||||||
|
|
||||||
t := NewWebserverTest(c, ns, serviceName)
|
|
||||||
defer func() {
|
|
||||||
defer GinkgoRecover()
|
|
||||||
errs := t.Cleanup()
|
|
||||||
if len(errs) != 0 {
|
|
||||||
Failf("errors in cleanup: %v", errs)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
service := t.BuildServiceSpec()
|
|
||||||
service.Spec.Type = api.ServiceTypeLoadBalancer
|
|
||||||
|
|
||||||
By("creating service " + serviceName + " with type LoadBalancer")
|
|
||||||
service, err := t.CreateService(service)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("creating pod to be part of service " + t.ServiceName)
|
|
||||||
t.CreateWebserverRC(1)
|
|
||||||
|
|
||||||
if service.Spec.Type != api.ServiceTypeLoadBalancer {
|
|
||||||
Failf("got unexpected Spec.Type for LoadBalancer service: %v", service)
|
|
||||||
}
|
|
||||||
if len(service.Spec.Ports) != 1 {
|
|
||||||
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", service)
|
|
||||||
}
|
|
||||||
nodePort := service.Spec.Ports[0].NodePort
|
|
||||||
if nodePort == 0 {
|
|
||||||
Failf("got unexpected Spec.Ports[0].NodePort for LoadBalancer service: %v", service)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for the load balancer to be created asynchronously
|
|
||||||
service, err = waitForLoadBalancerIngress(c, serviceName, ns)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
if len(service.Status.LoadBalancer.Ingress) != 1 {
|
|
||||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for LoadBalancer service: %v", service)
|
|
||||||
}
|
|
||||||
ingress := service.Status.LoadBalancer.Ingress[0]
|
|
||||||
if ingress.IP == "" && ingress.Hostname == "" {
|
|
||||||
Failf("got unexpected Status.LoadBalancer.Ingresss[0] for LoadBalancer service: %v", service)
|
|
||||||
}
|
|
||||||
|
|
||||||
By("hitting the pod through the service's NodePort")
|
|
||||||
ip := pickNodeIP(c)
|
|
||||||
testReachable(ip, nodePort)
|
|
||||||
By("hitting the pod through the service's LoadBalancer")
|
|
||||||
testLoadBalancerReachable(ingress, 80)
|
|
||||||
|
|
||||||
By("changing service " + serviceName + " to type=NodePort")
|
|
||||||
service, err = updateService(c, ns, serviceName, func(s *api.Service) {
|
|
||||||
s.Spec.Type = api.ServiceTypeNodePort
|
|
||||||
})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
if service.Spec.Type != api.ServiceTypeNodePort {
|
|
||||||
Failf("got unexpected Spec.Type for NodePort service: %v", service)
|
|
||||||
}
|
|
||||||
if len(service.Spec.Ports) != 1 {
|
|
||||||
Failf("got unexpected len(Spec.Ports) for NodePort service: %v", service)
|
|
||||||
}
|
|
||||||
if service.Spec.Ports[0].NodePort != nodePort {
|
|
||||||
Failf("got unexpected Spec.Ports[0].NodePort for NodePort service: %v", service)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for the load balancer to be created asynchronously
|
|
||||||
service, err = waitForLoadBalancerDestroy(c, serviceName, ns)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
|
||||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
|
|
||||||
}
|
|
||||||
|
|
||||||
By("hitting the pod through the service's NodePort")
|
|
||||||
testReachable(ip, nodePort)
|
|
||||||
By("checking the LoadBalancer is closed")
|
|
||||||
testLoadBalancerNotReachable(ingress, 80)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should prevent NodePort collisions", func() {
|
It("should prevent NodePort collisions", func() {
|
||||||
serviceName := "nodeport-collision"
|
baseName := "nodeport-collision-"
|
||||||
serviceName2 := serviceName + "2"
|
serviceName1 := baseName + "1"
|
||||||
ns := namespaces[0]
|
serviceName2 := baseName + "2"
|
||||||
|
ns := f.Namespace.Name
|
||||||
|
|
||||||
t := NewWebserverTest(c, ns, serviceName)
|
t := NewWebserverTest(c, ns, serviceName1)
|
||||||
defer func() {
|
defer func() {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
errs := t.Cleanup()
|
errs := t.Cleanup()
|
||||||
@ -821,10 +598,9 @@ var _ = Describe("Services", func() {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
By("creating service " + serviceName1 + " with type NodePort in namespace " + ns)
|
||||||
service := t.BuildServiceSpec()
|
service := t.BuildServiceSpec()
|
||||||
service.Spec.Type = api.ServiceTypeNodePort
|
service.Spec.Type = api.ServiceTypeNodePort
|
||||||
|
|
||||||
By("creating service " + serviceName + " with type NodePort in namespace " + ns)
|
|
||||||
result, err := t.CreateService(service)
|
result, err := t.CreateService(service)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
@ -839,23 +615,21 @@ var _ = Describe("Services", func() {
|
|||||||
Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", result)
|
Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", result)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("creating service " + serviceName + " with conflicting NodePort")
|
By("creating service " + serviceName2 + " with conflicting NodePort")
|
||||||
|
|
||||||
service2 := t.BuildServiceSpec()
|
service2 := t.BuildServiceSpec()
|
||||||
service2.Name = serviceName2
|
service2.Name = serviceName2
|
||||||
service2.Spec.Type = api.ServiceTypeNodePort
|
service2.Spec.Type = api.ServiceTypeNodePort
|
||||||
service2.Spec.Ports[0].NodePort = port.NodePort
|
service2.Spec.Ports[0].NodePort = port.NodePort
|
||||||
|
|
||||||
By("creating service " + serviceName2 + " with conflicting NodePort")
|
|
||||||
result2, err := t.CreateService(service2)
|
result2, err := t.CreateService(service2)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
Failf("Created service with conflicting NodePort: %v", result2)
|
Failf("Created service with conflicting NodePort: %v", result2)
|
||||||
}
|
}
|
||||||
expectedErr := fmt.Sprintf("Service \"%s\" is invalid: spec.ports[0].nodePort: invalid value '%d', Details: provided port is already allocated", serviceName2, port.NodePort)
|
expectedErr := fmt.Sprintf("Service \"%s\" is invalid: spec.ports[0].nodePort: invalid value '%d', Details: provided port is already allocated",
|
||||||
|
serviceName2, port.NodePort)
|
||||||
Expect(fmt.Sprintf("%v", err)).To(Equal(expectedErr))
|
Expect(fmt.Sprintf("%v", err)).To(Equal(expectedErr))
|
||||||
|
|
||||||
By("deleting original service " + serviceName + " with type NodePort in namespace " + ns)
|
By("deleting service " + serviceName1 + " to release NodePort")
|
||||||
err = t.DeleteService(serviceName)
|
err = t.DeleteService(serviceName1)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("creating service " + serviceName2 + " with no-longer-conflicting NodePort")
|
By("creating service " + serviceName2 + " with no-longer-conflicting NodePort")
|
||||||
@ -865,7 +639,7 @@ var _ = Describe("Services", func() {
|
|||||||
|
|
||||||
It("should check NodePort out-of-range", func() {
|
It("should check NodePort out-of-range", func() {
|
||||||
serviceName := "nodeport-range-test"
|
serviceName := "nodeport-range-test"
|
||||||
ns := namespaces[0]
|
ns := f.Namespace.Name
|
||||||
|
|
||||||
t := NewWebserverTest(c, ns, serviceName)
|
t := NewWebserverTest(c, ns, serviceName)
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -931,7 +705,7 @@ var _ = Describe("Services", func() {
|
|||||||
|
|
||||||
It("should release NodePorts on delete", func() {
|
It("should release NodePorts on delete", func() {
|
||||||
serviceName := "nodeport-reuse"
|
serviceName := "nodeport-reuse"
|
||||||
ns := namespaces[0]
|
ns := f.Namespace.Name
|
||||||
|
|
||||||
t := NewWebserverTest(c, ns, serviceName)
|
t := NewWebserverTest(c, ns, serviceName)
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -989,54 +763,98 @@ var _ = Describe("Services", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should correctly serve identically named services in different namespaces on different external IP addresses", func() {
|
// This test hits several load-balancer cases because LB turnup is slow.
|
||||||
|
It("should serve identically named services in different namespaces on different load-balancers", func() {
|
||||||
// requires ExternalLoadBalancer
|
// requires ExternalLoadBalancer
|
||||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||||
|
|
||||||
serviceNames := []string{"s0"} // Could add more here, but then it takes longer.
|
ns1 := f.Namespace.Name
|
||||||
labels := map[string]string{
|
|
||||||
"key0": "value0",
|
By("Building a second namespace api object")
|
||||||
"key1": "value1",
|
namespacePtr, err := createTestingNS("services", c)
|
||||||
}
|
Expect(err).NotTo(HaveOccurred())
|
||||||
service := &api.Service{
|
ns2 := namespacePtr.Name
|
||||||
ObjectMeta: api.ObjectMeta{},
|
extraNamespaces = append(extraNamespaces, ns2)
|
||||||
Spec: api.ServiceSpec{
|
|
||||||
Selector: labels,
|
serviceName := "test-svc"
|
||||||
Ports: []api.ServicePort{{
|
servicePort := 9376
|
||||||
Port: 80,
|
|
||||||
TargetPort: util.NewIntOrStringFromInt(80),
|
By("creating service " + serviceName + " with load balancer in namespace " + ns1)
|
||||||
}},
|
t1 := NewWebserverTest(c, ns1, serviceName)
|
||||||
Type: api.ServiceTypeLoadBalancer,
|
svc1 := t1.BuildServiceSpec()
|
||||||
},
|
svc1.Spec.Type = api.ServiceTypeLoadBalancer
|
||||||
|
svc1.Spec.Ports[0].Port = servicePort
|
||||||
|
svc1.Spec.Ports[0].TargetPort = util.NewIntOrStringFromInt(80)
|
||||||
|
_, err = t1.CreateService(svc1)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("creating pod to be part of service " + serviceName + " in namespace " + ns1)
|
||||||
|
t1.CreateWebserverRC(1)
|
||||||
|
|
||||||
|
loadBalancerIP := ""
|
||||||
|
if providerIs("gce", "gke") {
|
||||||
|
By("creating a static IP")
|
||||||
|
rand.Seed(time.Now().UTC().UnixNano())
|
||||||
|
staticIPName := fmt.Sprintf("e2e-external-lb-test-%d", rand.Intn(65535))
|
||||||
|
loadBalancerIP, err = createGCEStaticIP(staticIPName)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
defer func() {
|
||||||
|
// Release GCE static IP - this is not kube-managed and will not be automatically released.
|
||||||
|
deleteGCEStaticIP(staticIPName)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
By("creating service " + serviceName + " with load balancer in namespace " + ns2)
|
||||||
|
t2 := NewWebserverTest(c, ns2, serviceName)
|
||||||
|
svc2 := t2.BuildServiceSpec()
|
||||||
|
svc2.Spec.Type = api.ServiceTypeLoadBalancer
|
||||||
|
svc2.Spec.Ports[0].Port = servicePort
|
||||||
|
svc2.Spec.Ports[0].TargetPort = util.NewIntOrStringFromInt(80)
|
||||||
|
svc2.Spec.LoadBalancerIP = loadBalancerIP
|
||||||
|
_, err = t2.CreateService(svc2)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("creating pod to be part of service " + serviceName + " in namespace " + ns2)
|
||||||
|
t2.CreateWebserverRC(2)
|
||||||
|
|
||||||
ingressPoints := []string{}
|
ingressPoints := []string{}
|
||||||
for _, namespace := range namespaces {
|
svcs := []*api.Service{svc1, svc2}
|
||||||
for _, serviceName := range serviceNames {
|
for _, svc := range svcs {
|
||||||
service.ObjectMeta.Name = serviceName
|
namespace := svc.Namespace
|
||||||
service.ObjectMeta.Namespace = namespace
|
lbip := svc.Spec.LoadBalancerIP
|
||||||
By("creating service " + serviceName + " in namespace " + namespace)
|
|
||||||
_, err := c.Services(namespace).Create(service)
|
// Wait for the load balancer to be created asynchronously, which is
|
||||||
Expect(err).NotTo(HaveOccurred())
|
// currently indicated by ingress point(s) being added to the status.
|
||||||
defer func(namespace, serviceName string) { // clean up when we're done
|
|
||||||
By("deleting service " + serviceName + " in namespace " + namespace)
|
|
||||||
err := c.Services(namespace).Delete(serviceName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}(namespace, serviceName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, namespace := range namespaces {
|
|
||||||
for _, serviceName := range serviceNames {
|
|
||||||
result, err := waitForLoadBalancerIngress(c, serviceName, namespace)
|
result, err := waitForLoadBalancerIngress(c, serviceName, namespace)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
for i := range result.Status.LoadBalancer.Ingress {
|
if len(result.Status.LoadBalancer.Ingress) != 1 {
|
||||||
ingress := result.Status.LoadBalancer.Ingress[i].IP
|
Failf("got unexpected number (%v) of ingress points for externally load balanced service: %v", result.Status.LoadBalancer.Ingress, result)
|
||||||
if ingress == "" {
|
|
||||||
ingress = result.Status.LoadBalancer.Ingress[i].Hostname
|
|
||||||
}
|
}
|
||||||
ingressPoints = append(ingressPoints, ingress) // Save 'em to check uniqueness
|
ingress := result.Status.LoadBalancer.Ingress[0]
|
||||||
|
if len(result.Spec.Ports) != 1 {
|
||||||
|
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", result)
|
||||||
}
|
}
|
||||||
|
if lbip != "" {
|
||||||
|
Expect(ingress.IP).To(Equal(lbip))
|
||||||
}
|
}
|
||||||
|
port := result.Spec.Ports[0]
|
||||||
|
if port.NodePort == 0 {
|
||||||
|
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", result)
|
||||||
|
}
|
||||||
|
if !ServiceNodePortRange.Contains(port.NodePort) {
|
||||||
|
Failf("got unexpected (out-of-range) port for LoadBalancer service: %v", result)
|
||||||
|
}
|
||||||
|
ing := result.Status.LoadBalancer.Ingress[0].IP
|
||||||
|
if ing == "" {
|
||||||
|
ing = result.Status.LoadBalancer.Ingress[0].Hostname
|
||||||
|
}
|
||||||
|
ingressPoints = append(ingressPoints, ing) // Save 'em to check uniqueness
|
||||||
|
|
||||||
|
By("hitting the pod through the service's NodePort")
|
||||||
|
testReachable(pickNodeIP(c), port.NodePort)
|
||||||
|
|
||||||
|
By("hitting the pod through the service's external load balancer")
|
||||||
|
testLoadBalancerReachable(ingress, servicePort)
|
||||||
}
|
}
|
||||||
validateUniqueOrFail(ingressPoints)
|
validateUniqueOrFail(ingressPoints)
|
||||||
})
|
})
|
||||||
@ -1070,7 +888,8 @@ func waitForLoadBalancerIngress(c *client.Client, serviceName, namespace string)
|
|||||||
const timeout = 20 * time.Minute
|
const timeout = 20 * time.Minute
|
||||||
var service *api.Service
|
var service *api.Service
|
||||||
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have a LoadBalancer ingress point", timeout, serviceName, namespace))
|
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have a LoadBalancer ingress point", timeout, serviceName, namespace))
|
||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
|
i := 1
|
||||||
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(3 * time.Second) {
|
||||||
service, err := c.Services(namespace).Get(serviceName)
|
service, err := c.Services(namespace).Get(serviceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("Get service failed, ignoring for 5s: %v", err)
|
Logf("Get service failed, ignoring for 5s: %v", err)
|
||||||
@ -1079,13 +898,17 @@ func waitForLoadBalancerIngress(c *client.Client, serviceName, namespace string)
|
|||||||
if len(service.Status.LoadBalancer.Ingress) > 0 {
|
if len(service.Status.LoadBalancer.Ingress) > 0 {
|
||||||
return service, nil
|
return service, nil
|
||||||
}
|
}
|
||||||
|
if i%5 == 0 {
|
||||||
Logf("Waiting for service %s in namespace %s to have a LoadBalancer ingress point (%v)", serviceName, namespace, time.Since(start))
|
Logf("Waiting for service %s in namespace %s to have a LoadBalancer ingress point (%v)", serviceName, namespace, time.Since(start))
|
||||||
}
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
return service, fmt.Errorf("service %s in namespace %s doesn't have a LoadBalancer ingress point after %.2f seconds", serviceName, namespace, timeout.Seconds())
|
return service, fmt.Errorf("service %s in namespace %s doesn't have a LoadBalancer ingress point after %.2f seconds", serviceName, namespace, timeout.Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForLoadBalancerDestroy(c *client.Client, serviceName, namespace string) (*api.Service, error) {
|
func waitForLoadBalancerDestroy(c *client.Client, serviceName, namespace string) (*api.Service, error) {
|
||||||
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
|
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
|
||||||
|
// TODO: this should actually test that the LB was released at the cloud provider
|
||||||
const timeout = 10 * time.Minute
|
const timeout = 10 * time.Minute
|
||||||
var service *api.Service
|
var service *api.Service
|
||||||
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have no LoadBalancer ingress points", timeout, serviceName, namespace))
|
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have no LoadBalancer ingress points", timeout, serviceName, namespace))
|
||||||
@ -1136,7 +959,7 @@ func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID {
|
|||||||
Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString)
|
Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString)
|
||||||
}
|
}
|
||||||
|
|
||||||
Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort)
|
// Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort)
|
||||||
if _, ok := m[addr.TargetRef.UID]; !ok {
|
if _, ok := m[addr.TargetRef.UID]; !ok {
|
||||||
m[addr.TargetRef.UID] = make([]int, 0)
|
m[addr.TargetRef.UID] = make([]int, 0)
|
||||||
}
|
}
|
||||||
@ -1159,9 +982,8 @@ func translatePodNameToUIDOrFail(c *client.Client, ns string, expectedEndpoints
|
|||||||
Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
|
Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
|
||||||
}
|
}
|
||||||
portsByUID[pod.ObjectMeta.UID] = portList
|
portsByUID[pod.ObjectMeta.UID] = portList
|
||||||
By(fmt.Sprintf(""))
|
|
||||||
}
|
}
|
||||||
By(fmt.Sprintf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns))
|
// Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns)
|
||||||
return portsByUID
|
return portsByUID
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1188,26 +1010,31 @@ func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUI
|
|||||||
}
|
}
|
||||||
|
|
||||||
func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, expectedEndpoints PortsByPodName) {
|
func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, expectedEndpoints PortsByPodName) {
|
||||||
By(fmt.Sprintf("Waiting up to %v for service %s in namespace %s to expose endpoints %v", serviceStartTimeout, serviceName, namespace, expectedEndpoints))
|
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", serviceStartTimeout, serviceName, namespace, expectedEndpoints))
|
||||||
for start := time.Now(); time.Since(start) < serviceStartTimeout; time.Sleep(5 * time.Second) {
|
i := 1
|
||||||
|
for start := time.Now(); time.Since(start) < serviceStartTimeout; time.Sleep(1 * time.Second) {
|
||||||
endpoints, err := c.Endpoints(namespace).Get(serviceName)
|
endpoints, err := c.Endpoints(namespace).Get(serviceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
|
Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
Logf("Found endpoints %v", endpoints)
|
// Logf("Found endpoints %v", endpoints)
|
||||||
|
|
||||||
portsByPodUID := getContainerPortsByPodUID(endpoints)
|
portsByPodUID := getContainerPortsByPodUID(endpoints)
|
||||||
Logf("Found port by pod UID %v", portsByPodUID)
|
// Logf("Found port by pod UID %v", portsByPodUID)
|
||||||
|
|
||||||
expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints)
|
expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints)
|
||||||
if len(portsByPodUID) == len(expectedEndpoints) {
|
if len(portsByPodUID) == len(expectedEndpoints) {
|
||||||
validatePortsOrFail(portsByPodUID, expectedPortsByPodUID)
|
validatePortsOrFail(portsByPodUID, expectedPortsByPodUID)
|
||||||
By(fmt.Sprintf("Successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, time.Since(start)))
|
Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)",
|
||||||
|
serviceName, namespace, expectedEndpoints, time.Since(start))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
Logf("Unexpected number of endpoints: found %v, expected %v (%v elapsed, ignoring for 5s)", portsByPodUID, expectedEndpoints, time.Since(start))
|
if i%5 == 0 {
|
||||||
|
Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start))
|
||||||
|
}
|
||||||
|
i++
|
||||||
}
|
}
|
||||||
|
|
||||||
if pods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()); err == nil {
|
if pods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()); err == nil {
|
||||||
@ -1220,8 +1047,8 @@ func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, ex
|
|||||||
Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, serviceStartTimeout)
|
Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, serviceStartTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func addEndpointPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) {
|
func createPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) {
|
||||||
By(fmt.Sprintf("Adding pod %v in namespace %v", name, ns))
|
By(fmt.Sprintf("creating pod %s in namespace %s", name, ns))
|
||||||
pod := &api.Pod{
|
pod := &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
@ -1241,6 +1068,12 @@ func addEndpointPodOrFail(c *client.Client, ns, name string, labels map[string]s
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func deletePodOrFail(c *client.Client, ns, name string) {
|
||||||
|
By(fmt.Sprintf("deleting pod %s in namespace %s", name, ns))
|
||||||
|
err := c.Pods(ns).Delete(name, nil)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []string {
|
func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []string {
|
||||||
ips := []string{}
|
ips := []string{}
|
||||||
for i := range nodes.Items {
|
for i := range nodes.Items {
|
||||||
@ -1278,13 +1111,17 @@ func pickNodeIP(c *client.Client) string {
|
|||||||
return ip
|
return ip
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) {
|
func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) bool {
|
||||||
|
return testLoadBalancerReachableInTime(ingress, port, podStartTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testLoadBalancerReachableInTime(ingress api.LoadBalancerIngress, port int, timeout time.Duration) bool {
|
||||||
ip := ingress.IP
|
ip := ingress.IP
|
||||||
if ip == "" {
|
if ip == "" {
|
||||||
ip = ingress.Hostname
|
ip = ingress.Hostname
|
||||||
}
|
}
|
||||||
|
|
||||||
testReachable(ip, port)
|
return testReachableInTime(ip, port, timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLoadBalancerNotReachable(ingress api.LoadBalancerIngress, port int) {
|
func testLoadBalancerNotReachable(ingress api.LoadBalancerIngress, port int) {
|
||||||
@ -1296,19 +1133,25 @@ func testLoadBalancerNotReachable(ingress api.LoadBalancerIngress, port int) {
|
|||||||
testNotReachable(ip, port)
|
testNotReachable(ip, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testReachable(ip string, port int) {
|
func testReachable(ip string, port int) bool {
|
||||||
|
return testReachableInTime(ip, port, podStartTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testReachableInTime(ip string, port int, timeout time.Duration) bool {
|
||||||
url := fmt.Sprintf("http://%s:%d", ip, port)
|
url := fmt.Sprintf("http://%s:%d", ip, port)
|
||||||
if ip == "" {
|
if ip == "" {
|
||||||
Failf("Got empty IP for reachability check (%s)", url)
|
Failf("Got empty IP for reachability check (%s)", url)
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
if port == 0 {
|
if port == 0 {
|
||||||
Failf("Got port==0 for reachability check (%s)", url)
|
Failf("Got port==0 for reachability check (%s)", url)
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
desc := fmt.Sprintf("the url %s to be reachable", url)
|
desc := fmt.Sprintf("the url %s to be reachable", url)
|
||||||
By(fmt.Sprintf("Waiting up to %v for %s", podStartTimeout, desc))
|
By(fmt.Sprintf("Waiting up to %v for %s", timeout, desc))
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
err := wait.Poll(poll, podStartTimeout, func() (bool, error) {
|
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
resp, err := httpGetNoConnectionPool(url)
|
resp, err := httpGetNoConnectionPool(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("Got error waiting for reachability of %s: %v (%v)", url, err, time.Since(start))
|
Logf("Got error waiting for reachability of %s: %v (%v)", url, err, time.Since(start))
|
||||||
@ -1329,7 +1172,11 @@ func testReachable(ip string, port int) {
|
|||||||
Logf("Successfully reached %v", url)
|
Logf("Successfully reached %v", url)
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
Expect(err).NotTo(HaveOccurred(), "Error waiting for %s", desc)
|
Expect(err).NotTo(HaveOccurred(), "Error waiting for %s", desc)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func testNotReachable(ip string, port int) {
|
func testNotReachable(ip string, port int) {
|
||||||
@ -1343,7 +1190,7 @@ func testNotReachable(ip string, port int) {
|
|||||||
|
|
||||||
desc := fmt.Sprintf("the url %s to be *not* reachable", url)
|
desc := fmt.Sprintf("the url %s to be *not* reachable", url)
|
||||||
By(fmt.Sprintf("Waiting up to %v for %s", podStartTimeout, desc))
|
By(fmt.Sprintf("Waiting up to %v for %s", podStartTimeout, desc))
|
||||||
err := wait.Poll(poll, podStartTimeout, func() (bool, error) {
|
err := wait.PollImmediate(poll, podStartTimeout, func() (bool, error) {
|
||||||
resp, err := httpGetNoConnectionPool(url)
|
resp, err := httpGetNoConnectionPool(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("Successfully waited for %s", desc)
|
Logf("Successfully waited for %s", desc)
|
||||||
@ -1365,6 +1212,7 @@ func testNotReachable(ip string, port int) {
|
|||||||
func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) {
|
func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) {
|
||||||
podNames := make([]string, replicas)
|
podNames := make([]string, replicas)
|
||||||
|
|
||||||
|
By("creating service " + name + " in namespace " + ns)
|
||||||
_, err := c.Services(ns).Create(&api.Service{
|
_, err := c.Services(ns).Create(&api.Service{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
@ -1447,6 +1295,7 @@ func verifyServeHostnameServiceUp(c *client.Client, host string, expectedPods []
|
|||||||
command),
|
command),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods)))
|
||||||
for _, cmd := range commands {
|
for _, cmd := range commands {
|
||||||
passed := false
|
passed := false
|
||||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5) {
|
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5) {
|
||||||
@ -1540,6 +1389,7 @@ func (t *WebserverTest) BuildServiceSpec() *api.Service {
|
|||||||
service := &api.Service{
|
service := &api.Service{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: t.ServiceName,
|
Name: t.ServiceName,
|
||||||
|
Namespace: t.Namespace,
|
||||||
},
|
},
|
||||||
Spec: api.ServiceSpec{
|
Spec: api.ServiceSpec{
|
||||||
Selector: t.Labels,
|
Selector: t.Labels,
|
||||||
|
@ -66,7 +66,7 @@ const (
|
|||||||
nonExist = "NonExist"
|
nonExist = "NonExist"
|
||||||
|
|
||||||
// How often to poll pods and nodes.
|
// How often to poll pods and nodes.
|
||||||
poll = 5 * time.Second
|
poll = 2 * time.Second
|
||||||
|
|
||||||
// service accounts are provisioned after namespace creation
|
// service accounts are provisioned after namespace creation
|
||||||
// a service account is required to support pod creation in a namespace as part of admission control
|
// a service account is required to support pod creation in a namespace as part of admission control
|
||||||
@ -86,10 +86,6 @@ const (
|
|||||||
podRespondingTimeout = 2 * time.Minute
|
podRespondingTimeout = 2 * time.Minute
|
||||||
serviceRespondingTimeout = 2 * time.Minute
|
serviceRespondingTimeout = 2 * time.Minute
|
||||||
endpointRegisterTimeout = time.Minute
|
endpointRegisterTimeout = time.Minute
|
||||||
|
|
||||||
// How wide to print pod names, by default. Useful for aligning printing to
|
|
||||||
// quickly scan through output.
|
|
||||||
podPrintWidth = 55
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type CloudConfig struct {
|
type CloudConfig struct {
|
||||||
@ -335,7 +331,7 @@ func waitForPodsRunningReady(ns string, minPods int, timeout time.Duration) erro
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||||
timeout, minPods, ns)
|
timeout, minPods, ns)
|
||||||
if wait.Poll(poll, timeout, func() (bool, error) {
|
if wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
// We get the new list of pods and replication controllers in every
|
// We get the new list of pods and replication controllers in every
|
||||||
// iteration because more pods come online during startup and we want to
|
// iteration because more pods come online during startup and we want to
|
||||||
// ensure they are also checked.
|
// ensure they are also checked.
|
||||||
@ -411,22 +407,22 @@ func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName s
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
|
func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
|
||||||
Logf("Waiting up to %[1]v for pod %-[2]*[3]s status to be %[4]s", timeout, podPrintWidth, podName, desc)
|
Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc)
|
||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||||
pod, err := c.Pods(ns).Get(podName)
|
pod, err := c.Pods(ns).Get(podName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Aligning this text makes it much more readable
|
// Aligning this text makes it much more readable
|
||||||
Logf("Get pod %-[1]*[2]s in namespace '%[3]s' failed, ignoring for %[4]v. Error: %[5]v",
|
Logf("Get pod %[1]s in namespace '%[2]s' failed, ignoring for %[3]v. Error: %[4]v",
|
||||||
podPrintWidth, podName, ns, poll, err)
|
podName, ns, poll, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
done, err := condition(pod)
|
done, err := condition(pod)
|
||||||
if done {
|
if done {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
Logf("Waiting for pod %-[1]*[2]s in namespace '%[3]s' status to be '%[4]s'"+
|
Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+
|
||||||
"(found phase: %[5]q, readiness: %[6]t) (%[7]v elapsed)",
|
"(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)",
|
||||||
podPrintWidth, podName, ns, desc, pod.Status.Phase, podReady(pod), time.Since(start))
|
podName, ns, desc, pod.Status.Phase, podReady(pod), time.Since(start))
|
||||||
}
|
}
|
||||||
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
|
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
|
||||||
}
|
}
|
||||||
@ -470,7 +466,7 @@ func createTestingNS(baseName string, c *client.Client) (*api.Namespace, error)
|
|||||||
}
|
}
|
||||||
// Be robust about making the namespace creation call.
|
// Be robust about making the namespace creation call.
|
||||||
var got *api.Namespace
|
var got *api.Namespace
|
||||||
if err := wait.Poll(poll, singleCallTimeout, func() (bool, error) {
|
if err := wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
got, err = c.Namespaces().Create(namespaceObj)
|
got, err = c.Namespaces().Create(namespaceObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -535,7 +531,7 @@ func deleteNS(c *client.Client, namespace string, timeout time.Duration) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err := wait.Poll(5*time.Second, timeout, func() (bool, error) {
|
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
|
||||||
if _, err := c.Namespaces().Get(namespace); err != nil {
|
if _, err := c.Namespaces().Get(namespace); err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrs.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -623,7 +619,7 @@ func waitForPodSuccessInNamespace(c *client.Client, podName string, contName str
|
|||||||
func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, error) {
|
func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, error) {
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
|
||||||
var p *api.Pod = nil
|
var p *api.Pod = nil
|
||||||
err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||||
Logf("Waiting for pod %s to appear on node %s", rcName, node)
|
Logf("Waiting for pod %s to appear on node %s", rcName, node)
|
||||||
pods, err := c.Pods(ns).List(label, fields.Everything())
|
pods, err := c.Pods(ns).List(label, fields.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -642,7 +638,7 @@ func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitForPodToDisappear(c *client.Client, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
|
func waitForPodToDisappear(c *client.Client, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
|
||||||
return wait.Poll(interval, timeout, func() (bool, error) {
|
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
Logf("Waiting for pod %s to disappear", podName)
|
Logf("Waiting for pod %s to disappear", podName)
|
||||||
pods, err := c.Pods(ns).List(label, fields.Everything())
|
pods, err := c.Pods(ns).List(label, fields.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -672,7 +668,7 @@ func waitForRCPodToDisappear(c *client.Client, ns, rcName, podName string) error
|
|||||||
|
|
||||||
// waitForService waits until the service appears (exist == true), or disappears (exist == false)
|
// waitForService waits until the service appears (exist == true), or disappears (exist == false)
|
||||||
func waitForService(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
func waitForService(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
||||||
err := wait.Poll(interval, timeout, func() (bool, error) {
|
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
_, err := c.Services(namespace).Get(name)
|
_, err := c.Services(namespace).Get(name)
|
||||||
switch {
|
switch {
|
||||||
case err == nil:
|
case err == nil:
|
||||||
@ -727,7 +723,7 @@ func countEndpointsNum(e *api.Endpoints) int {
|
|||||||
|
|
||||||
// waitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
|
// waitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
|
||||||
func waitForReplicationController(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
func waitForReplicationController(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
||||||
err := wait.Poll(interval, timeout, func() (bool, error) {
|
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
_, err := c.ReplicationControllers(namespace).Get(name)
|
_, err := c.ReplicationControllers(namespace).Get(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
|
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
|
||||||
@ -822,13 +818,13 @@ func (r podResponseChecker) checkAllResponses() (done bool, err error) {
|
|||||||
func podsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error {
|
func podsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error {
|
||||||
By("trying to dial each unique pod")
|
By("trying to dial each unique pod")
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||||
return wait.Poll(poll, podRespondingTimeout, podResponseChecker{c, ns, label, name, wantName, pods}.checkAllResponses)
|
return wait.PollImmediate(poll, podRespondingTimeout, podResponseChecker{c, ns, label, name, wantName, pods}.checkAllResponses)
|
||||||
}
|
}
|
||||||
|
|
||||||
func serviceResponding(c *client.Client, ns, name string) error {
|
func serviceResponding(c *client.Client, ns, name string) error {
|
||||||
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
|
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
|
||||||
|
|
||||||
return wait.Poll(poll, serviceRespondingTimeout, func() (done bool, err error) {
|
return wait.PollImmediate(poll, serviceRespondingTimeout, func() (done bool, err error) {
|
||||||
body, err := c.Get().
|
body, err := c.Get().
|
||||||
Prefix("proxy").
|
Prefix("proxy").
|
||||||
Namespace(ns).
|
Namespace(ns).
|
||||||
@ -1214,7 +1210,6 @@ func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff {
|
|||||||
// It's the caller's responsibility to clean up externally (i.e. use the
|
// It's the caller's responsibility to clean up externally (i.e. use the
|
||||||
// namespace lifecycle for handling cleanup).
|
// namespace lifecycle for handling cleanup).
|
||||||
func RunRC(config RCConfig) error {
|
func RunRC(config RCConfig) error {
|
||||||
|
|
||||||
// Don't force tests to fail if they don't care about containers restarting.
|
// Don't force tests to fail if they don't care about containers restarting.
|
||||||
var maxContainerFailures int
|
var maxContainerFailures int
|
||||||
if config.MaxContainerFailures == nil {
|
if config.MaxContainerFailures == nil {
|
||||||
@ -1225,7 +1220,7 @@ func RunRC(config RCConfig) error {
|
|||||||
|
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
|
||||||
|
|
||||||
By(fmt.Sprintf("%v Creating replication controller %s", time.Now(), config.Name))
|
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
|
||||||
rc := &api.ReplicationController{
|
rc := &api.ReplicationController{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: config.Name,
|
Name: config.Name,
|
||||||
@ -1292,7 +1287,7 @@ func RunRC(config RCConfig) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error creating replication controller: %v", err)
|
return fmt.Errorf("Error creating replication controller: %v", err)
|
||||||
}
|
}
|
||||||
Logf("%v Created replication controller with name: %v, namespace: %v, replica count: %v", time.Now(), rc.Name, config.Namespace, rc.Spec.Replicas)
|
Logf("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
|
||||||
podStore := newPodStore(config.Client, config.Namespace, label, fields.Everything())
|
podStore := newPodStore(config.Client, config.Namespace, label, fields.Everything())
|
||||||
defer podStore.Stop()
|
defer podStore.Stop()
|
||||||
|
|
||||||
@ -1364,13 +1359,13 @@ func RunRC(config RCConfig) error {
|
|||||||
*config.CreatedPods = pods
|
*config.CreatedPods = pods
|
||||||
}
|
}
|
||||||
|
|
||||||
Logf("%v %v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
|
Logf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
|
||||||
time.Now(), rc.Name, len(pods), config.Replicas, running, pending, waiting, inactive, terminating, unknown, runningButNotReady)
|
rc.Name, len(pods), config.Replicas, running, pending, waiting, inactive, terminating, unknown, runningButNotReady)
|
||||||
|
|
||||||
promPushRunningPending(running, pending)
|
promPushRunningPending(running, pending)
|
||||||
|
|
||||||
if config.PodStatusFile != nil {
|
if config.PodStatusFile != nil {
|
||||||
fmt.Fprintf(config.PodStatusFile, "%s, %d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", time.Now(), running, pending, waiting, inactive, unknown, runningButNotReady)
|
fmt.Fprintf(config.PodStatusFile, "%d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", running, pending, waiting, inactive, unknown, runningButNotReady)
|
||||||
}
|
}
|
||||||
|
|
||||||
if failedContainers > maxContainerFailures {
|
if failedContainers > maxContainerFailures {
|
||||||
@ -1482,7 +1477,7 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
|
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
|
||||||
By(fmt.Sprintf("%v Scaling replication controller %s in namespace %s to %d", time.Now(), name, ns, size))
|
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
|
||||||
scaler, err := kubectl.ScalerFor("ReplicationController", c)
|
scaler, err := kubectl.ScalerFor("ReplicationController", c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1538,7 +1533,7 @@ func waitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (p
|
|||||||
|
|
||||||
// Delete a Replication Controller and all pods it spawned
|
// Delete a Replication Controller and all pods it spawned
|
||||||
func DeleteRC(c *client.Client, ns, name string) error {
|
func DeleteRC(c *client.Client, ns, name string) error {
|
||||||
By(fmt.Sprintf("%v Deleting replication controller %s in namespace %s", time.Now(), name, ns))
|
By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns))
|
||||||
rc, err := c.ReplicationControllers(ns).Get(name)
|
rc, err := c.ReplicationControllers(ns).Get(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrs.IsNotFound(err) {
|
||||||
@ -1574,7 +1569,7 @@ func DeleteRC(c *client.Client, ns, name string) error {
|
|||||||
// waitForRCPodsGone waits until there are no pods reported under an RC's selector (because the pods
|
// waitForRCPodsGone waits until there are no pods reported under an RC's selector (because the pods
|
||||||
// have completed termination).
|
// have completed termination).
|
||||||
func waitForRCPodsGone(c *client.Client, rc *api.ReplicationController) error {
|
func waitForRCPodsGone(c *client.Client, rc *api.ReplicationController) error {
|
||||||
return wait.Poll(poll, 2*time.Minute, func() (bool, error) {
|
return wait.PollImmediate(poll, 2*time.Minute, func() (bool, error) {
|
||||||
if pods, err := c.Pods(rc.Namespace).List(labels.SelectorFromSet(rc.Spec.Selector), fields.Everything()); err == nil && len(pods.Items) == 0 {
|
if pods, err := c.Pods(rc.Namespace).List(labels.SelectorFromSet(rc.Spec.Selector), fields.Everything()); err == nil && len(pods.Items) == 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
@ -1635,7 +1630,7 @@ func waitForDeploymentStatus(c *client.Client, ns, deploymentName string, desire
|
|||||||
func listNodes(c *client.Client, label labels.Selector, field fields.Selector) (*api.NodeList, error) {
|
func listNodes(c *client.Client, label labels.Selector, field fields.Selector) (*api.NodeList, error) {
|
||||||
var nodes *api.NodeList
|
var nodes *api.NodeList
|
||||||
var errLast error
|
var errLast error
|
||||||
if wait.Poll(poll, singleCallTimeout, func() (bool, error) {
|
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
|
||||||
nodes, errLast = c.Nodes().List(label, field)
|
nodes, errLast = c.Nodes().List(label, field)
|
||||||
return errLast == nil, nil
|
return errLast == nil, nil
|
||||||
}) != nil {
|
}) != nil {
|
||||||
@ -1815,7 +1810,7 @@ func checkPodsRunningReady(c *client.Client, ns string, podNames []string, timeo
|
|||||||
// support only Go >= 1.4.
|
// support only Go >= 1.4.
|
||||||
for _, podName := range podNames {
|
for _, podName := range podNames {
|
||||||
if !<-result {
|
if !<-result {
|
||||||
Logf("Pod %-[1]*[2]s failed to be %[3]s.", podPrintWidth, podName, desc)
|
Logf("Pod %[1]s failed to be %[2]s.", podName, desc)
|
||||||
success = false
|
success = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1877,7 +1872,7 @@ func allNodesReady(c *client.Client, timeout time.Duration) error {
|
|||||||
Logf("Waiting up to %v for all nodes to be ready", timeout)
|
Logf("Waiting up to %v for all nodes to be ready", timeout)
|
||||||
|
|
||||||
var notReady []api.Node
|
var notReady []api.Node
|
||||||
err := wait.Poll(poll, timeout, func() (bool, error) {
|
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
notReady = nil
|
notReady = nil
|
||||||
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user