mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Make Service e2e run a bit faster
This commit is contained in:
parent
0aa5c16f38
commit
a51ba71a65
@ -39,9 +39,7 @@ var prom_registered = false
|
||||
|
||||
// Reusable function for pushing metrics to prometheus. Handles initialization and so on.
|
||||
func promPushRunningPending(running, pending int) error {
|
||||
|
||||
if testContext.PrometheusPushGateway == "" {
|
||||
Logf("Ignoring prom push, push gateway unavailable")
|
||||
return nil
|
||||
} else {
|
||||
// Register metrics if necessary
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
@ -46,32 +45,26 @@ var _ = Describe("Services", func() {
|
||||
f := NewFramework("services")
|
||||
|
||||
var c *client.Client
|
||||
// Use these in tests. They're unique for each test to prevent name collisions.
|
||||
var namespaces [2]string
|
||||
var extraNamespaces []string
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
c, err = loadClient()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Building a namespace api objects")
|
||||
for i := range namespaces {
|
||||
namespacePtr, err := createTestingNS(fmt.Sprintf("service-%d", i), c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
namespaces[i] = namespacePtr.Name
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
for _, ns := range namespaces {
|
||||
for _, ns := range extraNamespaces {
|
||||
By(fmt.Sprintf("Destroying namespace %v", ns))
|
||||
if err := deleteNS(c, ns, 5*time.Minute /* namespace deletion timeout */); err != nil {
|
||||
Failf("Couldn't delete namespace %s: %s", ns, err)
|
||||
}
|
||||
}
|
||||
extraNamespaces = nil
|
||||
})
|
||||
|
||||
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
|
||||
|
||||
It("should provide secure master service", func() {
|
||||
_, err := c.Services(api.NamespaceDefault).Get("kubernetes")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -79,12 +72,13 @@ var _ = Describe("Services", func() {
|
||||
|
||||
It("should serve a basic endpoint from pods", func() {
|
||||
serviceName := "endpoint-test2"
|
||||
ns := namespaces[0]
|
||||
ns := f.Namespace.Name
|
||||
labels := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}
|
||||
|
||||
By("creating service " + serviceName + " in namespace " + ns)
|
||||
defer func() {
|
||||
err := c.Services(ns).Delete(serviceName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -107,43 +101,38 @@ var _ = Describe("Services", func() {
|
||||
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
||||
|
||||
var names []string
|
||||
names := map[string]bool{}
|
||||
defer func() {
|
||||
for _, name := range names {
|
||||
for name := range names {
|
||||
err := c.Pods(ns).Delete(name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}()
|
||||
|
||||
name1 := "test1"
|
||||
addEndpointPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}})
|
||||
names = append(names, name1)
|
||||
name1 := "pod1"
|
||||
name2 := "pod2"
|
||||
|
||||
createPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}})
|
||||
names[name1] = true
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}})
|
||||
|
||||
name2 := "test2"
|
||||
addEndpointPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}})
|
||||
names = append(names, name2)
|
||||
|
||||
createPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}})
|
||||
names[name2] = true
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}})
|
||||
|
||||
err = c.Pods(ns).Delete(name1, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
names = []string{name2}
|
||||
|
||||
deletePodOrFail(c, ns, name1)
|
||||
delete(names, name1)
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name2: {80}})
|
||||
|
||||
err = c.Pods(ns).Delete(name2, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
names = []string{}
|
||||
|
||||
deletePodOrFail(c, ns, name2)
|
||||
delete(names, name2)
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
||||
})
|
||||
|
||||
It("should serve multiport endpoints from pods", func() {
|
||||
// repacking functionality is intentionally not tested here - it's better to test it in an integration test.
|
||||
serviceName := "multi-endpoint-test"
|
||||
ns := namespaces[0]
|
||||
ns := f.Namespace.Name
|
||||
|
||||
defer func() {
|
||||
err := c.Services(ns).Delete(serviceName)
|
||||
@ -155,6 +144,7 @@ var _ = Describe("Services", func() {
|
||||
svc1port := "svc1"
|
||||
svc2port := "svc2"
|
||||
|
||||
By("creating service " + serviceName + " in namespace " + ns)
|
||||
service := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: serviceName,
|
||||
@ -181,9 +171,9 @@ var _ = Describe("Services", func() {
|
||||
port2 := 101
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
||||
|
||||
var names []string
|
||||
names := map[string]bool{}
|
||||
defer func() {
|
||||
for _, name := range names {
|
||||
for name := range names {
|
||||
err := c.Pods(ns).Delete(name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
@ -202,44 +192,30 @@ var _ = Describe("Services", func() {
|
||||
},
|
||||
}
|
||||
|
||||
podname1 := "podname1"
|
||||
addEndpointPodOrFail(c, ns, podname1, labels, containerPorts1)
|
||||
names = append(names, podname1)
|
||||
podname1 := "pod1"
|
||||
podname2 := "pod2"
|
||||
|
||||
createPodOrFail(c, ns, podname1, labels, containerPorts1)
|
||||
names[podname1] = true
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}})
|
||||
|
||||
podname2 := "podname2"
|
||||
addEndpointPodOrFail(c, ns, podname2, labels, containerPorts2)
|
||||
names = append(names, podname2)
|
||||
createPodOrFail(c, ns, podname2, labels, containerPorts2)
|
||||
names[podname2] = true
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}})
|
||||
|
||||
podname3 := "podname3"
|
||||
addEndpointPodOrFail(c, ns, podname3, labels, append(containerPorts1, containerPorts2...))
|
||||
names = append(names, podname3)
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}, podname3: {port1, port2}})
|
||||
|
||||
err = c.Pods(ns).Delete(podname1, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
names = []string{podname2, podname3}
|
||||
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname2: {port2}, podname3: {port1, port2}})
|
||||
|
||||
err = c.Pods(ns).Delete(podname2, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
names = []string{podname3}
|
||||
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname3: {port1, port2}})
|
||||
|
||||
err = c.Pods(ns).Delete(podname3, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
names = []string{}
|
||||
deletePodOrFail(c, ns, podname1)
|
||||
delete(names, podname1)
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname2: {port2}})
|
||||
|
||||
deletePodOrFail(c, ns, podname2)
|
||||
delete(names, podname2)
|
||||
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
|
||||
})
|
||||
|
||||
It("should be able to up and down services", func() {
|
||||
// this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP
|
||||
SkipUnlessProviderIs(providersWithSSH...)
|
||||
ns := namespaces[0]
|
||||
ns := f.Namespace.Name
|
||||
numPods, servicePort := 3, 80
|
||||
|
||||
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
|
||||
@ -281,14 +257,18 @@ var _ = Describe("Services", func() {
|
||||
It("should work after restarting kube-proxy", func() {
|
||||
SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
ns := namespaces[0]
|
||||
ns := f.Namespace.Name
|
||||
numPods, servicePort := 3, 80
|
||||
|
||||
defer func() { expectNoError(stopServeHostnameService(c, ns, "service1")) }()
|
||||
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
|
||||
svc1 := "service1"
|
||||
svc2 := "service2"
|
||||
|
||||
defer func() { expectNoError(stopServeHostnameService(c, ns, svc1)) }()
|
||||
podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() { expectNoError(stopServeHostnameService(c, ns, "service2")) }()
|
||||
podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods)
|
||||
|
||||
defer func() { expectNoError(stopServeHostnameService(c, ns, svc2)) }()
|
||||
podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
if svc1IP == svc2IP {
|
||||
@ -305,18 +285,18 @@ var _ = Describe("Services", func() {
|
||||
expectNoError(verifyServeHostnameServiceUp(c, host, podNames1, svc1IP, servicePort))
|
||||
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
|
||||
|
||||
// Restart kube-proxy and verify that services are still reachable (after some time).
|
||||
By("Restarting kube-proxy")
|
||||
if err := restartKubeProxy(host); err != nil {
|
||||
Failf("error restarting kube-proxy: %v", err)
|
||||
}
|
||||
expectNoError(verifyServeHostnameServiceUp(c, host, podNames1, svc1IP, servicePort))
|
||||
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
|
||||
// Remove iptable rules and make sure they come back.
|
||||
By("Remove iptable rules and make sure they come back")
|
||||
|
||||
By("Removing iptable rules")
|
||||
_, _, code, err := SSH(`
|
||||
sudo iptables -t nat -F KUBE-SERVICES || true;
|
||||
sudo iptables -t nat -F KUBE-PORTALS-HOST || true;
|
||||
sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, testContext.Provider)
|
||||
sudo iptables -t nat -F KUBE-SERVICES || true;
|
||||
sudo iptables -t nat -F KUBE-PORTALS-HOST || true;
|
||||
sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, testContext.Provider)
|
||||
if err != nil || code != 0 {
|
||||
Failf("couldn't remove iptable rules: %v (code %v)", err, code)
|
||||
}
|
||||
@ -328,7 +308,7 @@ var _ = Describe("Services", func() {
|
||||
// TODO: restartApiserver doesn't work in GKE - fix it and reenable this test.
|
||||
SkipUnlessProviderIs("gce")
|
||||
|
||||
ns := namespaces[0]
|
||||
ns := f.Namespace.Name
|
||||
numPods, servicePort := 3, 80
|
||||
|
||||
defer func() { expectNoError(stopServeHostnameService(c, ns, "service1")) }()
|
||||
@ -368,7 +348,7 @@ var _ = Describe("Services", func() {
|
||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
||||
serviceName := "external-lb-test"
|
||||
ns := namespaces[0]
|
||||
ns := f.Namespace.Name
|
||||
|
||||
t := NewWebserverTest(c, ns, serviceName)
|
||||
defer func() {
|
||||
@ -381,12 +361,11 @@ var _ = Describe("Services", func() {
|
||||
|
||||
inboundPort := 3000
|
||||
|
||||
By("creating service " + serviceName + " with external load balancer in namespace " + ns)
|
||||
service := t.BuildServiceSpec()
|
||||
service.Spec.Type = api.ServiceTypeLoadBalancer
|
||||
service.Spec.Ports[0].Port = inboundPort
|
||||
service.Spec.Ports[0].TargetPort = util.NewIntOrStringFromInt(80)
|
||||
|
||||
By("creating service " + serviceName + " with external load balancer in namespace " + ns)
|
||||
result, err := t.CreateService(service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@ -424,7 +403,7 @@ var _ = Describe("Services", func() {
|
||||
SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
serviceName := "lb-test-with-user-ip"
|
||||
ns := namespaces[0]
|
||||
ns := f.Namespace.Name
|
||||
|
||||
t := NewWebserverTest(c, ns, serviceName)
|
||||
defer func() {
|
||||
@ -445,7 +424,6 @@ var _ = Describe("Services", func() {
|
||||
By("creating an external static ip")
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
staticIPName := fmt.Sprintf("e2e-external-lb-test-%d", rand.Intn(65535))
|
||||
glog.Errorf("static ip name is %s", staticIPName)
|
||||
loadBalancerIP, err := createGCEStaticIP(staticIPName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
@ -490,7 +468,7 @@ var _ = Describe("Services", func() {
|
||||
|
||||
It("should be able to create a functioning NodePort service", func() {
|
||||
serviceName := "nodeportservice-test"
|
||||
ns := namespaces[0]
|
||||
ns := f.Namespace.Name
|
||||
|
||||
t := NewWebserverTest(c, ns, serviceName)
|
||||
defer func() {
|
||||
@ -547,7 +525,7 @@ var _ = Describe("Services", func() {
|
||||
})
|
||||
|
||||
It("should be able to change the type and nodeport settings of a service", func() {
|
||||
// requires ExternalLoadBalancer
|
||||
// requires cloud load-balancer support
|
||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
||||
serviceName := "mutability-service-test"
|
||||
@ -578,7 +556,7 @@ var _ = Describe("Services", func() {
|
||||
Failf("got unexpected Spec.Ports[0].nodePort for default service: %v", service)
|
||||
}
|
||||
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for default service: %v", service)
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingress) for default service: %v", service)
|
||||
}
|
||||
|
||||
By("creating pod to be part of service " + t.ServiceName)
|
||||
@ -603,17 +581,16 @@ var _ = Describe("Services", func() {
|
||||
if !ServiceNodePortRange.Contains(port.NodePort) {
|
||||
Failf("got unexpected (out-of-range) port for NodePort service: %v", service)
|
||||
}
|
||||
|
||||
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingress) for NodePort service: %v", service)
|
||||
}
|
||||
|
||||
By("hitting the pod through the service's NodePort")
|
||||
ip := pickNodeIP(f.Client)
|
||||
nodePort1 := port.NodePort // Save for later!
|
||||
testReachable(ip, nodePort1)
|
||||
|
||||
By("changing service " + serviceName + " to type=LoadBalancer")
|
||||
service.Spec.Type = api.ServiceTypeLoadBalancer
|
||||
service, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) {
|
||||
s.Spec.Type = api.ServiceTypeLoadBalancer
|
||||
})
|
||||
@ -634,12 +611,13 @@ var _ = Describe("Services", func() {
|
||||
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", service)
|
||||
}
|
||||
if len(service.Status.LoadBalancer.Ingress) != 1 {
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for LoadBalancer service: %v", service)
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingress) for LoadBalancer service: %v", service)
|
||||
}
|
||||
ingress1 := service.Status.LoadBalancer.Ingress[0]
|
||||
if ingress1.IP == "" && ingress1.Hostname == "" {
|
||||
Failf("got unexpected Status.LoadBalancer.Ingresss[0] for LoadBalancer service: %v", service)
|
||||
Failf("got unexpected Status.LoadBalancer.Ingress[0] for LoadBalancer service: %v", service)
|
||||
}
|
||||
|
||||
By("hitting the pod through the service's NodePort")
|
||||
ip = pickNodeIP(f.Client)
|
||||
testReachable(ip, nodePort1)
|
||||
@ -668,25 +646,31 @@ var _ = Describe("Services", func() {
|
||||
Failf("got unexpected Spec.Ports[0].nodePort for NodePort service: %v", service)
|
||||
}
|
||||
if len(service.Status.LoadBalancer.Ingress) != 1 {
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingress) for NodePort service: %v", service)
|
||||
}
|
||||
|
||||
// TODO: Make this less of a hack. Watch for events?
|
||||
Logf("Waiting 2 minutes to give service time to settle after changing configuration")
|
||||
time.Sleep(time.Second * 120)
|
||||
service, err = waitForLoadBalancerIngress(f.Client, serviceName, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ingress2 := service.Status.LoadBalancer.Ingress[0]
|
||||
Expect(ingress1).To(Equal(ingress2))
|
||||
|
||||
By("hitting the pod through the service's updated NodePort")
|
||||
testReachable(ip, nodePort2)
|
||||
By("hitting the pod through the service's LoadBalancer")
|
||||
testLoadBalancerReachable(ingress2, 80)
|
||||
By("checking the old NodePort is closed")
|
||||
testNotReachable(ip, nodePort1)
|
||||
|
||||
By("hitting the pod through the service's LoadBalancer")
|
||||
i := 1
|
||||
for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(3 * time.Second) {
|
||||
service, err = waitForLoadBalancerIngress(f.Client, serviceName, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ingress2 := service.Status.LoadBalancer.Ingress[0]
|
||||
if testLoadBalancerReachableInTime(ingress2, 80, 5*time.Second) {
|
||||
break
|
||||
}
|
||||
|
||||
if i%5 == 0 {
|
||||
Logf("Waiting for load-balancer changes (%v elapsed, will retry)", time.Since(start))
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
By("changing service " + serviceName + " back to type=ClusterIP")
|
||||
service, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) {
|
||||
s.Spec.Type = api.ServiceTypeClusterIP
|
||||
@ -694,6 +678,9 @@ var _ = Describe("Services", func() {
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingress) for NodePort service: %v", service)
|
||||
}
|
||||
if service.Spec.Type != api.ServiceTypeClusterIP {
|
||||
Failf("got unexpected Spec.Type for back-to-ClusterIP service: %v", service)
|
||||
}
|
||||
@ -710,109 +697,22 @@ var _ = Describe("Services", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for back-to-ClusterIP service: %v", service)
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingress) for back-to-ClusterIP service: %v", service)
|
||||
}
|
||||
By("checking the NodePort (original) is closed")
|
||||
ip = pickNodeIP(f.Client)
|
||||
testNotReachable(ip, nodePort1)
|
||||
By("checking the NodePort (updated) is closed")
|
||||
By("checking the NodePort is closed")
|
||||
ip = pickNodeIP(f.Client)
|
||||
testNotReachable(ip, nodePort2)
|
||||
By("checking the LoadBalancer is closed")
|
||||
testLoadBalancerNotReachable(ingress2, 80)
|
||||
})
|
||||
|
||||
It("should release the load balancer when Type goes from LoadBalancer -> NodePort", func() {
|
||||
// requires ExternalLoadBalancer
|
||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
||||
serviceName := "service-release-lb"
|
||||
ns := namespaces[0]
|
||||
|
||||
t := NewWebserverTest(c, ns, serviceName)
|
||||
defer func() {
|
||||
defer GinkgoRecover()
|
||||
errs := t.Cleanup()
|
||||
if len(errs) != 0 {
|
||||
Failf("errors in cleanup: %v", errs)
|
||||
}
|
||||
}()
|
||||
|
||||
service := t.BuildServiceSpec()
|
||||
service.Spec.Type = api.ServiceTypeLoadBalancer
|
||||
|
||||
By("creating service " + serviceName + " with type LoadBalancer")
|
||||
service, err := t.CreateService(service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("creating pod to be part of service " + t.ServiceName)
|
||||
t.CreateWebserverRC(1)
|
||||
|
||||
if service.Spec.Type != api.ServiceTypeLoadBalancer {
|
||||
Failf("got unexpected Spec.Type for LoadBalancer service: %v", service)
|
||||
}
|
||||
if len(service.Spec.Ports) != 1 {
|
||||
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", service)
|
||||
}
|
||||
nodePort := service.Spec.Ports[0].NodePort
|
||||
if nodePort == 0 {
|
||||
Failf("got unexpected Spec.Ports[0].NodePort for LoadBalancer service: %v", service)
|
||||
}
|
||||
|
||||
// Wait for the load balancer to be created asynchronously
|
||||
service, err = waitForLoadBalancerIngress(c, serviceName, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
if len(service.Status.LoadBalancer.Ingress) != 1 {
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for LoadBalancer service: %v", service)
|
||||
}
|
||||
ingress := service.Status.LoadBalancer.Ingress[0]
|
||||
if ingress.IP == "" && ingress.Hostname == "" {
|
||||
Failf("got unexpected Status.LoadBalancer.Ingresss[0] for LoadBalancer service: %v", service)
|
||||
}
|
||||
|
||||
By("hitting the pod through the service's NodePort")
|
||||
ip := pickNodeIP(c)
|
||||
testReachable(ip, nodePort)
|
||||
By("hitting the pod through the service's LoadBalancer")
|
||||
testLoadBalancerReachable(ingress, 80)
|
||||
|
||||
By("changing service " + serviceName + " to type=NodePort")
|
||||
service, err = updateService(c, ns, serviceName, func(s *api.Service) {
|
||||
s.Spec.Type = api.ServiceTypeNodePort
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
if service.Spec.Type != api.ServiceTypeNodePort {
|
||||
Failf("got unexpected Spec.Type for NodePort service: %v", service)
|
||||
}
|
||||
if len(service.Spec.Ports) != 1 {
|
||||
Failf("got unexpected len(Spec.Ports) for NodePort service: %v", service)
|
||||
}
|
||||
if service.Spec.Ports[0].NodePort != nodePort {
|
||||
Failf("got unexpected Spec.Ports[0].NodePort for NodePort service: %v", service)
|
||||
}
|
||||
|
||||
// Wait for the load balancer to be created asynchronously
|
||||
service, err = waitForLoadBalancerDestroy(c, serviceName, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
if len(service.Status.LoadBalancer.Ingress) != 0 {
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
|
||||
}
|
||||
|
||||
By("hitting the pod through the service's NodePort")
|
||||
testReachable(ip, nodePort)
|
||||
By("checking the LoadBalancer is closed")
|
||||
testLoadBalancerNotReachable(ingress, 80)
|
||||
testLoadBalancerNotReachable(ingress1, 80)
|
||||
})
|
||||
|
||||
It("should prevent NodePort collisions", func() {
|
||||
serviceName := "nodeport-collision"
|
||||
serviceName2 := serviceName + "2"
|
||||
ns := namespaces[0]
|
||||
baseName := "nodeport-collision-"
|
||||
serviceName1 := baseName + "1"
|
||||
serviceName2 := baseName + "2"
|
||||
ns := f.Namespace.Name
|
||||
|
||||
t := NewWebserverTest(c, ns, serviceName)
|
||||
t := NewWebserverTest(c, ns, serviceName1)
|
||||
defer func() {
|
||||
defer GinkgoRecover()
|
||||
errs := t.Cleanup()
|
||||
@ -821,10 +721,9 @@ var _ = Describe("Services", func() {
|
||||
}
|
||||
}()
|
||||
|
||||
By("creating service " + serviceName1 + " with type NodePort in namespace " + ns)
|
||||
service := t.BuildServiceSpec()
|
||||
service.Spec.Type = api.ServiceTypeNodePort
|
||||
|
||||
By("creating service " + serviceName + " with type NodePort in namespace " + ns)
|
||||
result, err := t.CreateService(service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@ -839,23 +738,21 @@ var _ = Describe("Services", func() {
|
||||
Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", result)
|
||||
}
|
||||
|
||||
By("creating service " + serviceName + " with conflicting NodePort")
|
||||
|
||||
By("creating service " + serviceName2 + " with conflicting NodePort")
|
||||
service2 := t.BuildServiceSpec()
|
||||
service2.Name = serviceName2
|
||||
service2.Spec.Type = api.ServiceTypeNodePort
|
||||
service2.Spec.Ports[0].NodePort = port.NodePort
|
||||
|
||||
By("creating service " + serviceName2 + " with conflicting NodePort")
|
||||
result2, err := t.CreateService(service2)
|
||||
if err == nil {
|
||||
Failf("Created service with conflicting NodePort: %v", result2)
|
||||
}
|
||||
expectedErr := fmt.Sprintf("Service \"%s\" is invalid: spec.ports[0].nodePort: invalid value '%d', Details: provided port is already allocated", serviceName2, port.NodePort)
|
||||
expectedErr := fmt.Sprintf("Service \"%s\" is invalid: spec.ports[0].nodePort: invalid value '%d', Details: provided port is already allocated",
|
||||
serviceName2, port.NodePort)
|
||||
Expect(fmt.Sprintf("%v", err)).To(Equal(expectedErr))
|
||||
|
||||
By("deleting original service " + serviceName + " with type NodePort in namespace " + ns)
|
||||
err = t.DeleteService(serviceName)
|
||||
By("deleting service " + serviceName1 + " to release NodePort")
|
||||
err = t.DeleteService(serviceName1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("creating service " + serviceName2 + " with no-longer-conflicting NodePort")
|
||||
@ -865,7 +762,7 @@ var _ = Describe("Services", func() {
|
||||
|
||||
It("should check NodePort out-of-range", func() {
|
||||
serviceName := "nodeport-range-test"
|
||||
ns := namespaces[0]
|
||||
ns := f.Namespace.Name
|
||||
|
||||
t := NewWebserverTest(c, ns, serviceName)
|
||||
defer func() {
|
||||
@ -931,7 +828,7 @@ var _ = Describe("Services", func() {
|
||||
|
||||
It("should release NodePorts on delete", func() {
|
||||
serviceName := "nodeport-reuse"
|
||||
ns := namespaces[0]
|
||||
ns := f.Namespace.Name
|
||||
|
||||
t := NewWebserverTest(c, ns, serviceName)
|
||||
defer func() {
|
||||
@ -993,6 +890,13 @@ var _ = Describe("Services", func() {
|
||||
// requires ExternalLoadBalancer
|
||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
||||
namespaces := []string{f.Namespace.Name}
|
||||
By("Building a second namespace api object")
|
||||
namespacePtr, err := createTestingNS("services", c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
namespaces = append(namespaces, namespacePtr.Name)
|
||||
extraNamespaces = append(extraNamespaces, namespacePtr.Name)
|
||||
|
||||
serviceNames := []string{"s0"} // Could add more here, but then it takes longer.
|
||||
labels := map[string]string{
|
||||
"key0": "value0",
|
||||
@ -1070,7 +974,8 @@ func waitForLoadBalancerIngress(c *client.Client, serviceName, namespace string)
|
||||
const timeout = 20 * time.Minute
|
||||
var service *api.Service
|
||||
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have a LoadBalancer ingress point", timeout, serviceName, namespace))
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
|
||||
i := 1
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(3 * time.Second) {
|
||||
service, err := c.Services(namespace).Get(serviceName)
|
||||
if err != nil {
|
||||
Logf("Get service failed, ignoring for 5s: %v", err)
|
||||
@ -1079,13 +984,17 @@ func waitForLoadBalancerIngress(c *client.Client, serviceName, namespace string)
|
||||
if len(service.Status.LoadBalancer.Ingress) > 0 {
|
||||
return service, nil
|
||||
}
|
||||
Logf("Waiting for service %s in namespace %s to have a LoadBalancer ingress point (%v)", serviceName, namespace, time.Since(start))
|
||||
if i%5 == 0 {
|
||||
Logf("Waiting for service %s in namespace %s to have a LoadBalancer ingress point (%v)", serviceName, namespace, time.Since(start))
|
||||
}
|
||||
i++
|
||||
}
|
||||
return service, fmt.Errorf("service %s in namespace %s doesn't have a LoadBalancer ingress point after %.2f seconds", serviceName, namespace, timeout.Seconds())
|
||||
}
|
||||
|
||||
func waitForLoadBalancerDestroy(c *client.Client, serviceName, namespace string) (*api.Service, error) {
|
||||
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
|
||||
// TODO: this should actually test that the LB was released at the cloud provider
|
||||
const timeout = 10 * time.Minute
|
||||
var service *api.Service
|
||||
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have no LoadBalancer ingress points", timeout, serviceName, namespace))
|
||||
@ -1136,7 +1045,7 @@ func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID {
|
||||
Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString)
|
||||
}
|
||||
|
||||
Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort)
|
||||
// Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort)
|
||||
if _, ok := m[addr.TargetRef.UID]; !ok {
|
||||
m[addr.TargetRef.UID] = make([]int, 0)
|
||||
}
|
||||
@ -1159,9 +1068,8 @@ func translatePodNameToUIDOrFail(c *client.Client, ns string, expectedEndpoints
|
||||
Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
|
||||
}
|
||||
portsByUID[pod.ObjectMeta.UID] = portList
|
||||
By(fmt.Sprintf(""))
|
||||
}
|
||||
By(fmt.Sprintf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns))
|
||||
// Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns)
|
||||
return portsByUID
|
||||
}
|
||||
|
||||
@ -1188,26 +1096,31 @@ func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUI
|
||||
}
|
||||
|
||||
func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, expectedEndpoints PortsByPodName) {
|
||||
By(fmt.Sprintf("Waiting up to %v for service %s in namespace %s to expose endpoints %v", serviceStartTimeout, serviceName, namespace, expectedEndpoints))
|
||||
for start := time.Now(); time.Since(start) < serviceStartTimeout; time.Sleep(5 * time.Second) {
|
||||
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", serviceStartTimeout, serviceName, namespace, expectedEndpoints))
|
||||
i := 1
|
||||
for start := time.Now(); time.Since(start) < serviceStartTimeout; time.Sleep(1 * time.Second) {
|
||||
endpoints, err := c.Endpoints(namespace).Get(serviceName)
|
||||
if err != nil {
|
||||
Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
|
||||
continue
|
||||
}
|
||||
Logf("Found endpoints %v", endpoints)
|
||||
// Logf("Found endpoints %v", endpoints)
|
||||
|
||||
portsByPodUID := getContainerPortsByPodUID(endpoints)
|
||||
Logf("Found port by pod UID %v", portsByPodUID)
|
||||
// Logf("Found port by pod UID %v", portsByPodUID)
|
||||
|
||||
expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints)
|
||||
if len(portsByPodUID) == len(expectedEndpoints) {
|
||||
validatePortsOrFail(portsByPodUID, expectedPortsByPodUID)
|
||||
By(fmt.Sprintf("Successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, time.Since(start)))
|
||||
Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)",
|
||||
serviceName, namespace, expectedEndpoints, time.Since(start))
|
||||
return
|
||||
}
|
||||
|
||||
Logf("Unexpected number of endpoints: found %v, expected %v (%v elapsed, ignoring for 5s)", portsByPodUID, expectedEndpoints, time.Since(start))
|
||||
if i%5 == 0 {
|
||||
Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start))
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if pods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()); err == nil {
|
||||
@ -1220,8 +1133,8 @@ func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, ex
|
||||
Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, serviceStartTimeout)
|
||||
}
|
||||
|
||||
func addEndpointPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) {
|
||||
By(fmt.Sprintf("Adding pod %v in namespace %v", name, ns))
|
||||
func createPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) {
|
||||
By(fmt.Sprintf("creating pod %s in namespace %s", name, ns))
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
@ -1241,6 +1154,12 @@ func addEndpointPodOrFail(c *client.Client, ns, name string, labels map[string]s
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func deletePodOrFail(c *client.Client, ns, name string) {
|
||||
By(fmt.Sprintf("deleting pod %s in namespace %s", name, ns))
|
||||
err := c.Pods(ns).Delete(name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []string {
|
||||
ips := []string{}
|
||||
for i := range nodes.Items {
|
||||
@ -1278,13 +1197,17 @@ func pickNodeIP(c *client.Client) string {
|
||||
return ip
|
||||
}
|
||||
|
||||
func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) {
|
||||
func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) bool {
|
||||
return testLoadBalancerReachableInTime(ingress, port, podStartTimeout)
|
||||
}
|
||||
|
||||
func testLoadBalancerReachableInTime(ingress api.LoadBalancerIngress, port int, timeout time.Duration) bool {
|
||||
ip := ingress.IP
|
||||
if ip == "" {
|
||||
ip = ingress.Hostname
|
||||
}
|
||||
|
||||
testReachable(ip, port)
|
||||
return testReachableInTime(ip, port, timeout)
|
||||
}
|
||||
|
||||
func testLoadBalancerNotReachable(ingress api.LoadBalancerIngress, port int) {
|
||||
@ -1296,19 +1219,25 @@ func testLoadBalancerNotReachable(ingress api.LoadBalancerIngress, port int) {
|
||||
testNotReachable(ip, port)
|
||||
}
|
||||
|
||||
func testReachable(ip string, port int) {
|
||||
func testReachable(ip string, port int) bool {
|
||||
return testReachableInTime(ip, port, podStartTimeout)
|
||||
}
|
||||
|
||||
func testReachableInTime(ip string, port int, timeout time.Duration) bool {
|
||||
url := fmt.Sprintf("http://%s:%d", ip, port)
|
||||
if ip == "" {
|
||||
Failf("Got empty IP for reachability check (%s)", url)
|
||||
return false
|
||||
}
|
||||
if port == 0 {
|
||||
Failf("Got port==0 for reachability check (%s)", url)
|
||||
return false
|
||||
}
|
||||
|
||||
desc := fmt.Sprintf("the url %s to be reachable", url)
|
||||
By(fmt.Sprintf("Waiting up to %v for %s", podStartTimeout, desc))
|
||||
By(fmt.Sprintf("Waiting up to %v for %s", timeout, desc))
|
||||
start := time.Now()
|
||||
err := wait.Poll(poll, podStartTimeout, func() (bool, error) {
|
||||
err := wait.Poll(poll, timeout, func() (bool, error) {
|
||||
resp, err := httpGetNoConnectionPool(url)
|
||||
if err != nil {
|
||||
Logf("Got error waiting for reachability of %s: %v (%v)", url, err, time.Since(start))
|
||||
@ -1329,7 +1258,11 @@ func testReachable(ip string, port int) {
|
||||
Logf("Successfully reached %v", url)
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "Error waiting for %s", desc)
|
||||
if err != nil {
|
||||
Expect(err).NotTo(HaveOccurred(), "Error waiting for %s", desc)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func testNotReachable(ip string, port int) {
|
||||
@ -1365,6 +1298,7 @@ func testNotReachable(ip string, port int) {
|
||||
func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) {
|
||||
podNames := make([]string, replicas)
|
||||
|
||||
By("creating service " + name + " in namespace " + ns)
|
||||
_, err := c.Services(ns).Create(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
@ -1447,6 +1381,7 @@ func verifyServeHostnameServiceUp(c *client.Client, host string, expectedPods []
|
||||
command),
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods)))
|
||||
for _, cmd := range commands {
|
||||
passed := false
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5) {
|
||||
|
@ -86,10 +86,6 @@ const (
|
||||
podRespondingTimeout = 2 * time.Minute
|
||||
serviceRespondingTimeout = 2 * time.Minute
|
||||
endpointRegisterTimeout = time.Minute
|
||||
|
||||
// How wide to print pod names, by default. Useful for aligning printing to
|
||||
// quickly scan through output.
|
||||
podPrintWidth = 55
|
||||
)
|
||||
|
||||
type CloudConfig struct {
|
||||
@ -411,22 +407,22 @@ func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName s
|
||||
}
|
||||
|
||||
func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
|
||||
Logf("Waiting up to %[1]v for pod %-[2]*[3]s status to be %[4]s", timeout, podPrintWidth, podName, desc)
|
||||
Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
pod, err := c.Pods(ns).Get(podName)
|
||||
if err != nil {
|
||||
// Aligning this text makes it much more readable
|
||||
Logf("Get pod %-[1]*[2]s in namespace '%[3]s' failed, ignoring for %[4]v. Error: %[5]v",
|
||||
podPrintWidth, podName, ns, poll, err)
|
||||
Logf("Get pod %[1]s in namespace '%[2]s' failed, ignoring for %[3]v. Error: %[4]v",
|
||||
podName, ns, poll, err)
|
||||
continue
|
||||
}
|
||||
done, err := condition(pod)
|
||||
if done {
|
||||
return err
|
||||
}
|
||||
Logf("Waiting for pod %-[1]*[2]s in namespace '%[3]s' status to be '%[4]s'"+
|
||||
"(found phase: %[5]q, readiness: %[6]t) (%[7]v elapsed)",
|
||||
podPrintWidth, podName, ns, desc, pod.Status.Phase, podReady(pod), time.Since(start))
|
||||
Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+
|
||||
"(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)",
|
||||
podName, ns, desc, pod.Status.Phase, podReady(pod), time.Since(start))
|
||||
}
|
||||
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
|
||||
}
|
||||
@ -1214,7 +1210,6 @@ func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff {
|
||||
// It's the caller's responsibility to clean up externally (i.e. use the
|
||||
// namespace lifecycle for handling cleanup).
|
||||
func RunRC(config RCConfig) error {
|
||||
|
||||
// Don't force tests to fail if they don't care about containers restarting.
|
||||
var maxContainerFailures int
|
||||
if config.MaxContainerFailures == nil {
|
||||
@ -1225,7 +1220,7 @@ func RunRC(config RCConfig) error {
|
||||
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
|
||||
|
||||
By(fmt.Sprintf("%v Creating replication controller %s", time.Now(), config.Name))
|
||||
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
|
||||
rc := &api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: config.Name,
|
||||
@ -1292,7 +1287,7 @@ func RunRC(config RCConfig) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating replication controller: %v", err)
|
||||
}
|
||||
Logf("%v Created replication controller with name: %v, namespace: %v, replica count: %v", time.Now(), rc.Name, config.Namespace, rc.Spec.Replicas)
|
||||
Logf("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
|
||||
podStore := newPodStore(config.Client, config.Namespace, label, fields.Everything())
|
||||
defer podStore.Stop()
|
||||
|
||||
@ -1364,13 +1359,13 @@ func RunRC(config RCConfig) error {
|
||||
*config.CreatedPods = pods
|
||||
}
|
||||
|
||||
Logf("%v %v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
|
||||
time.Now(), rc.Name, len(pods), config.Replicas, running, pending, waiting, inactive, terminating, unknown, runningButNotReady)
|
||||
Logf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
|
||||
rc.Name, len(pods), config.Replicas, running, pending, waiting, inactive, terminating, unknown, runningButNotReady)
|
||||
|
||||
promPushRunningPending(running, pending)
|
||||
|
||||
if config.PodStatusFile != nil {
|
||||
fmt.Fprintf(config.PodStatusFile, "%s, %d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", time.Now(), running, pending, waiting, inactive, unknown, runningButNotReady)
|
||||
fmt.Fprintf(config.PodStatusFile, "%d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", running, pending, waiting, inactive, unknown, runningButNotReady)
|
||||
}
|
||||
|
||||
if failedContainers > maxContainerFailures {
|
||||
@ -1482,7 +1477,7 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event {
|
||||
}
|
||||
|
||||
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
|
||||
By(fmt.Sprintf("%v Scaling replication controller %s in namespace %s to %d", time.Now(), name, ns, size))
|
||||
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
|
||||
scaler, err := kubectl.ScalerFor("ReplicationController", c)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1538,7 +1533,7 @@ func waitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (p
|
||||
|
||||
// Delete a Replication Controller and all pods it spawned
|
||||
func DeleteRC(c *client.Client, ns, name string) error {
|
||||
By(fmt.Sprintf("%v Deleting replication controller %s in namespace %s", time.Now(), name, ns))
|
||||
By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns))
|
||||
rc, err := c.ReplicationControllers(ns).Get(name)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
@ -1815,7 +1810,7 @@ func checkPodsRunningReady(c *client.Client, ns string, podNames []string, timeo
|
||||
// support only Go >= 1.4.
|
||||
for _, podName := range podNames {
|
||||
if !<-result {
|
||||
Logf("Pod %-[1]*[2]s failed to be %[3]s.", podPrintWidth, podName, desc)
|
||||
Logf("Pod %[1]s failed to be %[2]s.", podName, desc)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user