mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Add Namespace to e2eservice.TestJig, make all methods use jig namespace and name
Many TestJig methods made the caller pass a serviceName argument, even though the jig already has a name, and every caller was passing the same name to each function as they had passed to NewTestJig(). Likewise, many methods made the caller pass a namespace argument, but only a single test used more than one namespace, and it can easily be rewritten to use two test jigs as well.
This commit is contained in:
parent
a0ad420018
commit
52b366457f
@ -899,9 +899,9 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns)
|
||||
jig := e2eservice.NewTestJig(c, name)
|
||||
jig := e2eservice.NewTestJig(c, ns, name)
|
||||
jig.Labels = podLabels
|
||||
service, err := jig.CreateLoadBalancerService(ns, name, e2eservice.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
|
||||
service, err := jig.CreateLoadBalancerService(e2eservice.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -922,14 +922,14 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
|
||||
defer close(done)
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
expectedNodes, err := jig.GetEndpointNodeNames(service)
|
||||
expectedNodes, err := jig.GetEndpointNodeNames()
|
||||
framework.ExpectNoError(err)
|
||||
// The affinity policy should ensure that before an old pod is
|
||||
// deleted, a new pod will have been created on the same node.
|
||||
// Thus the set of nodes with local endpoints for the service
|
||||
// should remain unchanged.
|
||||
wait.Until(func() {
|
||||
actualNodes, err := jig.GetEndpointNodeNames(service)
|
||||
actualNodes, err := jig.GetEndpointNodeNames()
|
||||
framework.ExpectNoError(err)
|
||||
if !actualNodes.Equal(expectedNodes) {
|
||||
framework.Logf("The set of nodes with local endpoints changed; started with %v, now have %v", expectedNodes.List(), actualNodes.List())
|
||||
|
@ -841,8 +841,8 @@ func (cont *NginxIngressController) Init() {
|
||||
// --publish-service flag (see <IngressManifestPath>/nginx/rc.yaml) to make it work in private
|
||||
// clusters, i.e. clusters where nodes don't have public IPs.
|
||||
framework.Logf("Creating load balancer service for nginx ingress controller")
|
||||
serviceJig := e2eservice.NewTestJig(cont.Client, "nginx-ingress-lb")
|
||||
_, err := serviceJig.CreateTCPService(cont.Ns, func(svc *v1.Service) {
|
||||
serviceJig := e2eservice.NewTestJig(cont.Client, cont.Ns, "nginx-ingress-lb")
|
||||
_, err := serviceJig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
svc.Spec.Selector = map[string]string{"k8s-app": "nginx-ingress-lb"}
|
||||
svc.Spec.Ports = []v1.ServicePort{
|
||||
@ -851,7 +851,7 @@ func (cont *NginxIngressController) Init() {
|
||||
{Name: "stats", Port: 18080}}
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
cont.lbSvc, err = serviceJig.WaitForLoadBalancer(cont.Ns, "nginx-ingress-lb", e2eservice.GetServiceLoadBalancerCreationTimeout(cont.Client))
|
||||
cont.lbSvc, err = serviceJig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cont.Client))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
read := func(file string) string {
|
||||
|
@ -53,16 +53,18 @@ var NodePortRange = utilnet.PortRange{Base: 30000, Size: 2768}
|
||||
|
||||
// TestJig is a test jig to help service testing.
|
||||
type TestJig struct {
|
||||
ID string
|
||||
Name string
|
||||
Client clientset.Interface
|
||||
Labels map[string]string
|
||||
Client clientset.Interface
|
||||
Namespace string
|
||||
Name string
|
||||
ID string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// NewTestJig allocates and inits a new TestJig.
|
||||
func NewTestJig(client clientset.Interface, name string) *TestJig {
|
||||
func NewTestJig(client clientset.Interface, namespace, name string) *TestJig {
|
||||
j := &TestJig{}
|
||||
j.Client = client
|
||||
j.Namespace = namespace
|
||||
j.Name = name
|
||||
j.ID = j.Name + "-" + string(uuid.NewUUID())
|
||||
j.Labels = map[string]string{"testid": j.ID}
|
||||
@ -73,10 +75,10 @@ func NewTestJig(client clientset.Interface, name string) *TestJig {
|
||||
// newServiceTemplate returns the default v1.Service template for this j, but
|
||||
// does not actually create the Service. The default Service has the same name
|
||||
// as the j and exposes the given port.
|
||||
func (j *TestJig) newServiceTemplate(namespace string, proto v1.Protocol, port int32) *v1.Service {
|
||||
func (j *TestJig) newServiceTemplate(proto v1.Protocol, port int32) *v1.Service {
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Namespace: j.Namespace,
|
||||
Name: j.Name,
|
||||
Labels: j.Labels,
|
||||
},
|
||||
@ -96,12 +98,12 @@ func (j *TestJig) newServiceTemplate(namespace string, proto v1.Protocol, port i
|
||||
// CreateTCPServiceWithPort creates a new TCP Service with given port based on the
|
||||
// j's defaults. Callers can provide a function to tweak the Service object before
|
||||
// it is created.
|
||||
func (j *TestJig) CreateTCPServiceWithPort(namespace string, tweak func(svc *v1.Service), port int32) (*v1.Service, error) {
|
||||
svc := j.newServiceTemplate(namespace, v1.ProtocolTCP, port)
|
||||
func (j *TestJig) CreateTCPServiceWithPort(tweak func(svc *v1.Service), port int32) (*v1.Service, error) {
|
||||
svc := j.newServiceTemplate(v1.ProtocolTCP, port)
|
||||
if tweak != nil {
|
||||
tweak(svc)
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
|
||||
result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err)
|
||||
}
|
||||
@ -111,12 +113,12 @@ func (j *TestJig) CreateTCPServiceWithPort(namespace string, tweak func(svc *v1.
|
||||
// CreateTCPService creates a new TCP Service based on the j's
|
||||
// defaults. Callers can provide a function to tweak the Service object before
|
||||
// it is created.
|
||||
func (j *TestJig) CreateTCPService(namespace string, tweak func(svc *v1.Service)) (*v1.Service, error) {
|
||||
svc := j.newServiceTemplate(namespace, v1.ProtocolTCP, 80)
|
||||
func (j *TestJig) CreateTCPService(tweak func(svc *v1.Service)) (*v1.Service, error) {
|
||||
svc := j.newServiceTemplate(v1.ProtocolTCP, 80)
|
||||
if tweak != nil {
|
||||
tweak(svc)
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
|
||||
result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err)
|
||||
}
|
||||
@ -126,12 +128,12 @@ func (j *TestJig) CreateTCPService(namespace string, tweak func(svc *v1.Service)
|
||||
// CreateUDPService creates a new UDP Service based on the j's
|
||||
// defaults. Callers can provide a function to tweak the Service object before
|
||||
// it is created.
|
||||
func (j *TestJig) CreateUDPService(namespace string, tweak func(svc *v1.Service)) (*v1.Service, error) {
|
||||
svc := j.newServiceTemplate(namespace, v1.ProtocolUDP, 80)
|
||||
func (j *TestJig) CreateUDPService(tweak func(svc *v1.Service)) (*v1.Service, error) {
|
||||
svc := j.newServiceTemplate(v1.ProtocolUDP, 80)
|
||||
if tweak != nil {
|
||||
tweak(svc)
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
|
||||
result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create UDP Service %q: %v", svc.Name, err)
|
||||
}
|
||||
@ -140,10 +142,10 @@ func (j *TestJig) CreateUDPService(namespace string, tweak func(svc *v1.Service)
|
||||
|
||||
// CreateExternalNameService creates a new ExternalName type Service based on the j's defaults.
|
||||
// Callers can provide a function to tweak the Service object before it is created.
|
||||
func (j *TestJig) CreateExternalNameService(namespace string, tweak func(svc *v1.Service)) (*v1.Service, error) {
|
||||
func (j *TestJig) CreateExternalNameService(tweak func(svc *v1.Service)) (*v1.Service, error) {
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Namespace: j.Namespace,
|
||||
Name: j.Name,
|
||||
Labels: j.Labels,
|
||||
},
|
||||
@ -156,7 +158,7 @@ func (j *TestJig) CreateExternalNameService(namespace string, tweak func(svc *v1
|
||||
if tweak != nil {
|
||||
tweak(svc)
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(namespace).Create(svc)
|
||||
result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create ExternalName Service %q: %v", svc.Name, err)
|
||||
}
|
||||
@ -164,9 +166,9 @@ func (j *TestJig) CreateExternalNameService(namespace string, tweak func(svc *v1
|
||||
}
|
||||
|
||||
// ChangeServiceType updates the given service's ServiceType to the given newType.
|
||||
func (j *TestJig) ChangeServiceType(namespace, name string, newType v1.ServiceType, timeout time.Duration) error {
|
||||
func (j *TestJig) ChangeServiceType(newType v1.ServiceType, timeout time.Duration) error {
|
||||
ingressIP := ""
|
||||
svc, err := j.UpdateService(namespace, name, func(s *v1.Service) {
|
||||
svc, err := j.UpdateService(func(s *v1.Service) {
|
||||
for _, ing := range s.Status.LoadBalancer.Ingress {
|
||||
if ing.IP != "" {
|
||||
ingressIP = ing.IP
|
||||
@ -179,7 +181,7 @@ func (j *TestJig) ChangeServiceType(namespace, name string, newType v1.ServiceTy
|
||||
return err
|
||||
}
|
||||
if ingressIP != "" {
|
||||
_, err = j.WaitForLoadBalancerDestroy(namespace, svc.Name, ingressIP, int(svc.Spec.Ports[0].Port), timeout)
|
||||
_, err = j.WaitForLoadBalancerDestroy(ingressIP, int(svc.Spec.Ports[0].Port), timeout)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -188,9 +190,9 @@ func (j *TestJig) ChangeServiceType(namespace, name string, newType v1.ServiceTy
|
||||
// ExternalTrafficPolicy set to Local and sanity checks its nodePort.
|
||||
// If createPod is true, it also creates an RC with 1 replica of
|
||||
// the standard netexec container used everywhere in this test.
|
||||
func (j *TestJig) CreateOnlyLocalNodePortService(namespace, serviceName string, createPod bool) (*v1.Service, error) {
|
||||
ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=NodePort and ExternalTrafficPolicy=Local")
|
||||
svc, err := j.CreateTCPService(namespace, func(svc *v1.Service) {
|
||||
func (j *TestJig) CreateOnlyLocalNodePortService(createPod bool) (*v1.Service, error) {
|
||||
ginkgo.By("creating a service " + j.Namespace + "/" + j.Name + " with type=NodePort and ExternalTrafficPolicy=Local")
|
||||
svc, err := j.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: 80}}
|
||||
@ -200,8 +202,8 @@ func (j *TestJig) CreateOnlyLocalNodePortService(namespace, serviceName string,
|
||||
}
|
||||
|
||||
if createPod {
|
||||
ginkgo.By("creating a pod to be part of the service " + serviceName)
|
||||
_, err = j.Run(namespace, nil)
|
||||
ginkgo.By("creating a pod to be part of the service " + j.Name)
|
||||
_, err = j.Run(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -213,9 +215,9 @@ func (j *TestJig) CreateOnlyLocalNodePortService(namespace, serviceName string,
|
||||
// ExternalTrafficPolicy set to Local and waits for it to acquire an ingress IP.
|
||||
// If createPod is true, it also creates an RC with 1 replica of
|
||||
// the standard netexec container used everywhere in this test.
|
||||
func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool,
|
||||
func (j *TestJig) CreateOnlyLocalLoadBalancerService(timeout time.Duration, createPod bool,
|
||||
tweak func(svc *v1.Service)) (*v1.Service, error) {
|
||||
_, err := j.CreateLoadBalancerService(namespace, serviceName, timeout, func(svc *v1.Service) {
|
||||
_, err := j.CreateLoadBalancerService(timeout, func(svc *v1.Service) {
|
||||
ginkgo.By("setting ExternalTrafficPolicy=Local")
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
if tweak != nil {
|
||||
@ -227,44 +229,44 @@ func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName stri
|
||||
}
|
||||
|
||||
if createPod {
|
||||
ginkgo.By("creating a pod to be part of the service " + serviceName)
|
||||
_, err = j.Run(namespace, nil)
|
||||
ginkgo.By("creating a pod to be part of the service " + j.Name)
|
||||
_, err = j.Run(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName)
|
||||
return j.WaitForLoadBalancer(namespace, serviceName, timeout)
|
||||
ginkgo.By("waiting for loadbalancer for service " + j.Namespace + "/" + j.Name)
|
||||
return j.WaitForLoadBalancer(timeout)
|
||||
}
|
||||
|
||||
// CreateLoadBalancerService creates a loadbalancer service and waits
|
||||
// for it to acquire an ingress IP.
|
||||
func (j *TestJig) CreateLoadBalancerService(namespace, serviceName string, timeout time.Duration, tweak func(svc *v1.Service)) (*v1.Service, error) {
|
||||
ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer")
|
||||
svc := j.newServiceTemplate(namespace, v1.ProtocolTCP, 80)
|
||||
func (j *TestJig) CreateLoadBalancerService(timeout time.Duration, tweak func(svc *v1.Service)) (*v1.Service, error) {
|
||||
ginkgo.By("creating a service " + j.Namespace + "/" + j.Name + " with type=LoadBalancer")
|
||||
svc := j.newServiceTemplate(v1.ProtocolTCP, 80)
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
// We need to turn affinity off for our LB distribution tests
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
|
||||
if tweak != nil {
|
||||
tweak(svc)
|
||||
}
|
||||
_, err := j.Client.CoreV1().Services(namespace).Create(svc)
|
||||
_, err := j.Client.CoreV1().Services(j.Namespace).Create(svc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err)
|
||||
}
|
||||
|
||||
ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName)
|
||||
return j.WaitForLoadBalancer(namespace, serviceName, timeout)
|
||||
ginkgo.By("waiting for loadbalancer for service " + j.Namespace + "/" + j.Name)
|
||||
return j.WaitForLoadBalancer(timeout)
|
||||
}
|
||||
|
||||
// GetEndpointNodes returns a map of nodenames:external-ip on which the
|
||||
// endpoints of the given Service are running.
|
||||
func (j *TestJig) GetEndpointNodes(svc *v1.Service) (map[string][]string, error) {
|
||||
// endpoints of the Service are running.
|
||||
func (j *TestJig) GetEndpointNodes() (map[string][]string, error) {
|
||||
nodes, err := e2enode.GetBoundedReadySchedulableNodes(j.Client, MaxNodesForEndpointsTests)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epNodes, err := j.GetEndpointNodeNames(svc)
|
||||
epNodes, err := j.GetEndpointNodeNames()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -279,10 +281,10 @@ func (j *TestJig) GetEndpointNodes(svc *v1.Service) (map[string][]string, error)
|
||||
|
||||
// GetEndpointNodeNames returns a string set of node names on which the
|
||||
// endpoints of the given Service are running.
|
||||
func (j *TestJig) GetEndpointNodeNames(svc *v1.Service) (sets.String, error) {
|
||||
endpoints, err := j.Client.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{})
|
||||
func (j *TestJig) GetEndpointNodeNames() (sets.String, error) {
|
||||
endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(j.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
|
||||
return nil, fmt.Errorf("get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err)
|
||||
}
|
||||
if len(endpoints.Subsets) == 0 {
|
||||
return nil, fmt.Errorf("endpoint has no subsets, cannot determine node addresses")
|
||||
@ -299,11 +301,11 @@ func (j *TestJig) GetEndpointNodeNames(svc *v1.Service) (sets.String, error) {
|
||||
}
|
||||
|
||||
// WaitForEndpointOnNode waits for a service endpoint on the given node.
|
||||
func (j *TestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string) error {
|
||||
func (j *TestJig) WaitForEndpointOnNode(nodeName string) error {
|
||||
return wait.PollImmediate(framework.Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) {
|
||||
endpoints, err := j.Client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(j.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err)
|
||||
framework.Logf("Get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err)
|
||||
return false, nil
|
||||
}
|
||||
if len(endpoints.Subsets) == 0 {
|
||||
@ -316,7 +318,7 @@ func (j *TestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string)
|
||||
return false, nil
|
||||
}
|
||||
epHostName := *endpoints.Subsets[0].Addresses[0].NodeName
|
||||
framework.Logf("Pod for service %s/%s is on node %s", namespace, serviceName, epHostName)
|
||||
framework.Logf("Pod for service %s/%s is on node %s", j.Namespace, j.Name, epHostName)
|
||||
if epHostName != nodeName {
|
||||
framework.Logf("Found endpoint on wrong node, expected %v, got %v", nodeName, epHostName)
|
||||
return false, nil
|
||||
@ -326,9 +328,9 @@ func (j *TestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string)
|
||||
}
|
||||
|
||||
// WaitForAvailableEndpoint waits for at least 1 endpoint to be available till timeout
|
||||
func (j *TestJig) WaitForAvailableEndpoint(namespace, serviceName string, timeout time.Duration) error {
|
||||
func (j *TestJig) WaitForAvailableEndpoint(timeout time.Duration) error {
|
||||
//Wait for endpoints to be created, this may take longer time if service backing pods are taking longer time to run
|
||||
endpointSelector := fields.OneTermEqualSelector("metadata.name", serviceName)
|
||||
endpointSelector := fields.OneTermEqualSelector("metadata.name", j.Name)
|
||||
stopCh := make(chan struct{})
|
||||
endpointAvailable := false
|
||||
var controller cache.Controller
|
||||
@ -336,12 +338,12 @@ func (j *TestJig) WaitForAvailableEndpoint(namespace, serviceName string, timeou
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = endpointSelector.String()
|
||||
obj, err := j.Client.CoreV1().Endpoints(namespace).List(options)
|
||||
obj, err := j.Client.CoreV1().Endpoints(j.Namespace).List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = endpointSelector.String()
|
||||
return j.Client.CoreV1().Endpoints(namespace).Watch(options)
|
||||
return j.Client.CoreV1().Endpoints(j.Namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Endpoints{},
|
||||
@ -373,7 +375,7 @@ func (j *TestJig) WaitForAvailableEndpoint(namespace, serviceName string, timeou
|
||||
return endpointAvailable, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("no subset of available IP address found for the endpoint %s within timeout %v", serviceName, timeout)
|
||||
return fmt.Errorf("no subset of available IP address found for the endpoint %s within timeout %v", j.Name, timeout)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -438,28 +440,28 @@ func (j *TestJig) sanityCheckService(svc *v1.Service, svcType v1.ServiceType) (*
|
||||
// UpdateService fetches a service, calls the update function on it, and
|
||||
// then attempts to send the updated service. It tries up to 3 times in the
|
||||
// face of timeouts and conflicts.
|
||||
func (j *TestJig) UpdateService(namespace, name string, update func(*v1.Service)) (*v1.Service, error) {
|
||||
func (j *TestJig) UpdateService(update func(*v1.Service)) (*v1.Service, error) {
|
||||
for i := 0; i < 3; i++ {
|
||||
service, err := j.Client.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
|
||||
service, err := j.Client.CoreV1().Services(j.Namespace).Get(j.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get Service %q: %v", name, err)
|
||||
return nil, fmt.Errorf("failed to get Service %q: %v", j.Name, err)
|
||||
}
|
||||
update(service)
|
||||
result, err := j.Client.CoreV1().Services(namespace).Update(service)
|
||||
result, err := j.Client.CoreV1().Services(j.Namespace).Update(service)
|
||||
if err == nil {
|
||||
return j.sanityCheckService(result, service.Spec.Type)
|
||||
}
|
||||
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
|
||||
return nil, fmt.Errorf("failed to update Service %q: %v", name, err)
|
||||
return nil, fmt.Errorf("failed to update Service %q: %v", j.Name, err)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("too many retries updating Service %q", name)
|
||||
return nil, fmt.Errorf("too many retries updating Service %q", j.Name)
|
||||
}
|
||||
|
||||
// WaitForNewIngressIP waits for the given service to get a new ingress IP, or returns an error after the given timeout
|
||||
func (j *TestJig) WaitForNewIngressIP(namespace, name, existingIP string, timeout time.Duration) (*v1.Service, error) {
|
||||
framework.Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, name)
|
||||
service, err := j.waitForCondition(namespace, name, timeout, "have a new ingress IP", func(svc *v1.Service) bool {
|
||||
func (j *TestJig) WaitForNewIngressIP(existingIP string, timeout time.Duration) (*v1.Service, error) {
|
||||
framework.Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, j.Name)
|
||||
service, err := j.waitForCondition(timeout, "have a new ingress IP", func(svc *v1.Service) bool {
|
||||
if len(svc.Status.LoadBalancer.Ingress) == 0 {
|
||||
return false
|
||||
}
|
||||
@ -476,14 +478,14 @@ func (j *TestJig) WaitForNewIngressIP(namespace, name, existingIP string, timeou
|
||||
}
|
||||
|
||||
// ChangeServiceNodePort changes node ports of the given service.
|
||||
func (j *TestJig) ChangeServiceNodePort(namespace, name string, initial int) (*v1.Service, error) {
|
||||
func (j *TestJig) ChangeServiceNodePort(initial int) (*v1.Service, error) {
|
||||
var err error
|
||||
var service *v1.Service
|
||||
for i := 1; i < NodePortRange.Size; i++ {
|
||||
offs1 := initial - NodePortRange.Base
|
||||
offs2 := (offs1 + i) % NodePortRange.Size
|
||||
newPort := NodePortRange.Base + offs2
|
||||
service, err = j.UpdateService(namespace, name, func(s *v1.Service) {
|
||||
service, err = j.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Ports[0].NodePort = int32(newPort)
|
||||
})
|
||||
if err != nil && strings.Contains(err.Error(), portallocator.ErrAllocated.Error()) {
|
||||
@ -497,9 +499,9 @@ func (j *TestJig) ChangeServiceNodePort(namespace, name string, initial int) (*v
|
||||
}
|
||||
|
||||
// WaitForLoadBalancer waits the given service to have a LoadBalancer, or returns an error after the given timeout
|
||||
func (j *TestJig) WaitForLoadBalancer(namespace, name string, timeout time.Duration) (*v1.Service, error) {
|
||||
framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name)
|
||||
service, err := j.waitForCondition(namespace, name, timeout, "have a load balancer", func(svc *v1.Service) bool {
|
||||
func (j *TestJig) WaitForLoadBalancer(timeout time.Duration) (*v1.Service, error) {
|
||||
framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, j.Name)
|
||||
service, err := j.waitForCondition(timeout, "have a load balancer", func(svc *v1.Service) bool {
|
||||
return len(svc.Status.LoadBalancer.Ingress) > 0
|
||||
})
|
||||
if err != nil {
|
||||
@ -509,7 +511,7 @@ func (j *TestJig) WaitForLoadBalancer(namespace, name string, timeout time.Durat
|
||||
}
|
||||
|
||||
// WaitForLoadBalancerDestroy waits the given service to destroy a LoadBalancer, or returns an error after the given timeout
|
||||
func (j *TestJig) WaitForLoadBalancerDestroy(namespace, name string, ip string, port int, timeout time.Duration) (*v1.Service, error) {
|
||||
func (j *TestJig) WaitForLoadBalancerDestroy(ip string, port int, timeout time.Duration) (*v1.Service, error) {
|
||||
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
|
||||
defer func() {
|
||||
if err := framework.EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil {
|
||||
@ -517,8 +519,8 @@ func (j *TestJig) WaitForLoadBalancerDestroy(namespace, name string, ip string,
|
||||
}
|
||||
}()
|
||||
|
||||
framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name)
|
||||
service, err := j.waitForCondition(namespace, name, timeout, "have no load balancer", func(svc *v1.Service) bool {
|
||||
framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, j.Name)
|
||||
service, err := j.waitForCondition(timeout, "have no load balancer", func(svc *v1.Service) bool {
|
||||
return len(svc.Status.LoadBalancer.Ingress) == 0
|
||||
})
|
||||
if err != nil {
|
||||
@ -527,10 +529,10 @@ func (j *TestJig) WaitForLoadBalancerDestroy(namespace, name string, ip string,
|
||||
return j.sanityCheckService(service, v1.ServiceTypeLoadBalancer)
|
||||
}
|
||||
|
||||
func (j *TestJig) waitForCondition(namespace, name string, timeout time.Duration, message string, conditionFn func(*v1.Service) bool) (*v1.Service, error) {
|
||||
func (j *TestJig) waitForCondition(timeout time.Duration, message string, conditionFn func(*v1.Service) bool) (*v1.Service, error) {
|
||||
var service *v1.Service
|
||||
pollFunc := func() (bool, error) {
|
||||
svc, err := j.Client.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
|
||||
svc, err := j.Client.CoreV1().Services(j.Namespace).Get(j.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -541,7 +543,7 @@ func (j *TestJig) waitForCondition(namespace, name string, timeout time.Duration
|
||||
return false, nil
|
||||
}
|
||||
if err := wait.PollImmediate(framework.Poll, timeout, pollFunc); err != nil {
|
||||
return nil, fmt.Errorf("timed out waiting for service %q to %s", name, message)
|
||||
return nil, fmt.Errorf("timed out waiting for service %q to %s", j.Name, message)
|
||||
}
|
||||
return service, nil
|
||||
}
|
||||
@ -549,13 +551,13 @@ func (j *TestJig) waitForCondition(namespace, name string, timeout time.Duration
|
||||
// newRCTemplate returns the default v1.ReplicationController object for
|
||||
// this j, but does not actually create the RC. The default RC has the same
|
||||
// name as the j and runs the "netexec" container.
|
||||
func (j *TestJig) newRCTemplate(namespace string) *v1.ReplicationController {
|
||||
func (j *TestJig) newRCTemplate() *v1.ReplicationController {
|
||||
var replicas int32 = 1
|
||||
var grace int64 = 3 // so we don't race with kube-proxy when scaling up/down
|
||||
|
||||
rc := &v1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Namespace: j.Namespace,
|
||||
Name: j.Name,
|
||||
Labels: j.Labels,
|
||||
},
|
||||
@ -612,13 +614,13 @@ func (j *TestJig) AddRCAntiAffinity(rc *v1.ReplicationController) {
|
||||
}
|
||||
|
||||
// CreatePDB returns a PodDisruptionBudget for the given ReplicationController, or returns an error if a PodDisruptionBudget isn't ready
|
||||
func (j *TestJig) CreatePDB(namespace string, rc *v1.ReplicationController) (*policyv1beta1.PodDisruptionBudget, error) {
|
||||
pdb := j.newPDBTemplate(namespace, rc)
|
||||
newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
|
||||
func (j *TestJig) CreatePDB(rc *v1.ReplicationController) (*policyv1beta1.PodDisruptionBudget, error) {
|
||||
pdb := j.newPDBTemplate(rc)
|
||||
newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Create(pdb)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err)
|
||||
}
|
||||
if err := j.waitForPdbReady(namespace); err != nil {
|
||||
if err := j.waitForPdbReady(); err != nil {
|
||||
return nil, fmt.Errorf("failed waiting for PDB to be ready: %v", err)
|
||||
}
|
||||
|
||||
@ -628,12 +630,12 @@ func (j *TestJig) CreatePDB(namespace string, rc *v1.ReplicationController) (*po
|
||||
// newPDBTemplate returns the default policyv1beta1.PodDisruptionBudget object for
|
||||
// this j, but does not actually create the PDB. The default PDB specifies a
|
||||
// MinAvailable of N-1 and matches the pods created by the RC.
|
||||
func (j *TestJig) newPDBTemplate(namespace string, rc *v1.ReplicationController) *policyv1beta1.PodDisruptionBudget {
|
||||
func (j *TestJig) newPDBTemplate(rc *v1.ReplicationController) *policyv1beta1.PodDisruptionBudget {
|
||||
minAvailable := intstr.FromInt(int(*rc.Spec.Replicas) - 1)
|
||||
|
||||
pdb := &policyv1beta1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Namespace: j.Namespace,
|
||||
Name: j.Name,
|
||||
Labels: j.Labels,
|
||||
},
|
||||
@ -649,52 +651,52 @@ func (j *TestJig) newPDBTemplate(namespace string, rc *v1.ReplicationController)
|
||||
// Run creates a ReplicationController and Pod(s) and waits for the
|
||||
// Pod(s) to be running. Callers can provide a function to tweak the RC object
|
||||
// before it is created.
|
||||
func (j *TestJig) Run(namespace string, tweak func(rc *v1.ReplicationController)) (*v1.ReplicationController, error) {
|
||||
rc := j.newRCTemplate(namespace)
|
||||
func (j *TestJig) Run(tweak func(rc *v1.ReplicationController)) (*v1.ReplicationController, error) {
|
||||
rc := j.newRCTemplate()
|
||||
if tweak != nil {
|
||||
tweak(rc)
|
||||
}
|
||||
result, err := j.Client.CoreV1().ReplicationControllers(namespace).Create(rc)
|
||||
result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(rc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create RC %q: %v", rc.Name, err)
|
||||
}
|
||||
pods, err := j.waitForPodsCreated(namespace, int(*(rc.Spec.Replicas)))
|
||||
pods, err := j.waitForPodsCreated(int(*(rc.Spec.Replicas)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pods: %v", err)
|
||||
}
|
||||
if err := j.waitForPodsReady(namespace, pods); err != nil {
|
||||
if err := j.waitForPodsReady(pods); err != nil {
|
||||
return nil, fmt.Errorf("failed waiting for pods to be running: %v", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Scale scales pods to the given replicas
|
||||
func (j *TestJig) Scale(namespace string, replicas int) error {
|
||||
func (j *TestJig) Scale(replicas int) error {
|
||||
rc := j.Name
|
||||
scale, err := j.Client.CoreV1().ReplicationControllers(namespace).GetScale(rc, metav1.GetOptions{})
|
||||
scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(rc, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get scale for RC %q: %v", rc, err)
|
||||
}
|
||||
|
||||
scale.Spec.Replicas = int32(replicas)
|
||||
_, err = j.Client.CoreV1().ReplicationControllers(namespace).UpdateScale(rc, scale)
|
||||
_, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(rc, scale)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to scale RC %q: %v", rc, err)
|
||||
}
|
||||
pods, err := j.waitForPodsCreated(namespace, replicas)
|
||||
pods, err := j.waitForPodsCreated(replicas)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed waiting for pods: %v", err)
|
||||
}
|
||||
if err := j.waitForPodsReady(namespace, pods); err != nil {
|
||||
if err := j.waitForPodsReady(pods); err != nil {
|
||||
return fmt.Errorf("failed waiting for pods to be running: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *TestJig) waitForPdbReady(namespace string) error {
|
||||
func (j *TestJig) waitForPdbReady() error {
|
||||
timeout := 2 * time.Minute
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
|
||||
pdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(j.Name, metav1.GetOptions{})
|
||||
pdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Get(j.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -706,14 +708,14 @@ func (j *TestJig) waitForPdbReady(namespace string) error {
|
||||
return fmt.Errorf("timeout waiting for PDB %q to be ready", j.Name)
|
||||
}
|
||||
|
||||
func (j *TestJig) waitForPodsCreated(namespace string, replicas int) ([]string, error) {
|
||||
func (j *TestJig) waitForPodsCreated(replicas int) ([]string, error) {
|
||||
timeout := 2 * time.Minute
|
||||
// List the pods, making sure we observe all the replicas.
|
||||
label := labels.SelectorFromSet(labels.Set(j.Labels))
|
||||
framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := j.Client.CoreV1().Pods(namespace).List(options)
|
||||
pods, err := j.Client.CoreV1().Pods(j.Namespace).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -734,9 +736,9 @@ func (j *TestJig) waitForPodsCreated(namespace string, replicas int) ([]string,
|
||||
return nil, fmt.Errorf("timeout waiting for %d pods to be created", replicas)
|
||||
}
|
||||
|
||||
func (j *TestJig) waitForPodsReady(namespace string, pods []string) error {
|
||||
func (j *TestJig) waitForPodsReady(pods []string) error {
|
||||
timeout := 2 * time.Minute
|
||||
if !e2epod.CheckPodsRunningReady(j.Client, namespace, pods, timeout) {
|
||||
if !e2epod.CheckPodsRunningReady(j.Client, j.Namespace, pods, timeout) {
|
||||
return fmt.Errorf("timeout waiting for %d pods to be ready", len(pods))
|
||||
}
|
||||
return nil
|
||||
@ -822,11 +824,11 @@ func testEndpointReachability(endpoint string, port int32, protocol v1.Protocol,
|
||||
|
||||
// checkClusterIPServiceReachability ensures that service of type ClusterIP is reachable over
|
||||
// - ServiceName:ServicePort, ClusterIP:ServicePort
|
||||
func (j *TestJig) checkClusterIPServiceReachability(namespace string, svc *v1.Service, pod *v1.Pod) error {
|
||||
func (j *TestJig) checkClusterIPServiceReachability(svc *v1.Service, pod *v1.Pod) error {
|
||||
clusterIP := svc.Spec.ClusterIP
|
||||
servicePorts := svc.Spec.Ports
|
||||
|
||||
err := j.WaitForAvailableEndpoint(namespace, svc.Name, ServiceEndpointsTimeout)
|
||||
err := j.WaitForAvailableEndpoint(ServiceEndpointsTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -849,7 +851,7 @@ func (j *TestJig) checkClusterIPServiceReachability(namespace string, svc *v1.Se
|
||||
// ServiceName:ServicePort, ClusterIP:ServicePort and NodeInternalIPs:NodePort
|
||||
// - External clients should be reachable to service over -
|
||||
// NodePublicIPs:NodePort
|
||||
func (j *TestJig) checkNodePortServiceReachability(namespace string, svc *v1.Service, pod *v1.Pod) error {
|
||||
func (j *TestJig) checkNodePortServiceReachability(svc *v1.Service, pod *v1.Pod) error {
|
||||
clusterIP := svc.Spec.ClusterIP
|
||||
servicePorts := svc.Spec.Ports
|
||||
|
||||
@ -859,7 +861,7 @@ func (j *TestJig) checkNodePortServiceReachability(namespace string, svc *v1.Ser
|
||||
return err
|
||||
}
|
||||
|
||||
err = j.WaitForAvailableEndpoint(namespace, svc.Name, ServiceEndpointsTimeout)
|
||||
err = j.WaitForAvailableEndpoint(ServiceEndpointsTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -895,7 +897,7 @@ func (j *TestJig) checkExternalServiceReachability(svc *v1.Service, pod *v1.Pod)
|
||||
}
|
||||
|
||||
// CheckServiceReachability ensures that request are served by the services. Only supports Services with type ClusterIP, NodePort and ExternalName.
|
||||
func (j *TestJig) CheckServiceReachability(namespace string, svc *v1.Service, pod *v1.Pod) error {
|
||||
func (j *TestJig) CheckServiceReachability(svc *v1.Service, pod *v1.Pod) error {
|
||||
svcType := svc.Spec.Type
|
||||
|
||||
_, err := j.sanityCheckService(svc, svcType)
|
||||
@ -905,9 +907,9 @@ func (j *TestJig) CheckServiceReachability(namespace string, svc *v1.Service, po
|
||||
|
||||
switch svcType {
|
||||
case v1.ServiceTypeClusterIP:
|
||||
return j.checkClusterIPServiceReachability(namespace, svc, pod)
|
||||
return j.checkClusterIPServiceReachability(svc, pod)
|
||||
case v1.ServiceTypeNodePort:
|
||||
return j.checkNodePortServiceReachability(namespace, svc, pod)
|
||||
return j.checkNodePortServiceReachability(svc, pod)
|
||||
case v1.ServiceTypeExternalName:
|
||||
return j.checkExternalServiceReachability(svc, pod)
|
||||
default:
|
||||
@ -916,13 +918,13 @@ func (j *TestJig) CheckServiceReachability(namespace string, svc *v1.Service, po
|
||||
}
|
||||
|
||||
// CreateServicePods creates a replication controller with the label same as service. Service listens to HTTP.
|
||||
func (j *TestJig) CreateServicePods(c clientset.Interface, ns string, replica int) error {
|
||||
func (j *TestJig) CreateServicePods(replica int) error {
|
||||
config := testutils.RCConfig{
|
||||
Client: c,
|
||||
Client: j.Client,
|
||||
Name: j.Name,
|
||||
Image: framework.ServeHostnameImage,
|
||||
Command: []string{"/agnhost", "serve-hostname"},
|
||||
Namespace: ns,
|
||||
Namespace: j.Namespace,
|
||||
Labels: j.Labels,
|
||||
PollInterval: 3 * time.Second,
|
||||
Timeout: framework.PodReadyBeforeTimeout,
|
||||
@ -932,13 +934,13 @@ func (j *TestJig) CreateServicePods(c clientset.Interface, ns string, replica in
|
||||
}
|
||||
|
||||
// CreateTCPUDPServicePods creates a replication controller with the label same as service. Service listens to TCP and UDP.
|
||||
func (j *TestJig) CreateTCPUDPServicePods(c clientset.Interface, ns string, replica int) error {
|
||||
func (j *TestJig) CreateTCPUDPServicePods(replica int) error {
|
||||
config := testutils.RCConfig{
|
||||
Client: c,
|
||||
Client: j.Client,
|
||||
Name: j.Name,
|
||||
Image: framework.ServeHostnameImage,
|
||||
Command: []string{"/agnhost", "serve-hostname", "--http=false", "--tcp", "--udp"},
|
||||
Namespace: ns,
|
||||
Namespace: j.Namespace,
|
||||
Labels: j.Labels,
|
||||
PollInterval: 3 * time.Second,
|
||||
Timeout: framework.PodReadyBeforeTimeout,
|
||||
|
@ -210,7 +210,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
|
||||
ginkgo.It("should create service with cluster ip from primary service range [Feature:IPv6DualStackAlphaFeature:Phase2]", func() {
|
||||
serviceName := "defaultclusterip"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
defaultIPFamily := v1.IPv4Protocol
|
||||
if framework.TestContext.ClusterIsIPv6() {
|
||||
@ -229,7 +229,8 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
|
||||
service := createService(t.ServiceName, t.Namespace, t.Labels, nil)
|
||||
|
||||
jig.Labels = t.Labels
|
||||
jig.CreateServicePods(cs, ns, 2)
|
||||
err := jig.CreateServicePods(2)
|
||||
framework.ExpectNoError(err)
|
||||
svc, err := t.CreateService(service)
|
||||
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
|
||||
|
||||
@ -251,7 +252,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
|
||||
ns := f.Namespace.Name
|
||||
ipv4 := v1.IPv4Protocol
|
||||
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
t := e2eservice.NewServerTest(cs, ns, serviceName)
|
||||
defer func() {
|
||||
@ -265,7 +266,8 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
|
||||
service := createService(t.ServiceName, t.Namespace, t.Labels, &ipv4)
|
||||
|
||||
jig.Labels = t.Labels
|
||||
jig.CreateServicePods(cs, ns, 2)
|
||||
err := jig.CreateServicePods(2)
|
||||
framework.ExpectNoError(err)
|
||||
svc, err := t.CreateService(service)
|
||||
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
|
||||
|
||||
@ -287,7 +289,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
|
||||
ns := f.Namespace.Name
|
||||
ipv6 := v1.IPv6Protocol
|
||||
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
t := e2eservice.NewServerTest(cs, ns, serviceName)
|
||||
defer func() {
|
||||
@ -301,7 +303,8 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
|
||||
service := createService(t.ServiceName, t.Namespace, t.Labels, &ipv6)
|
||||
|
||||
jig.Labels = t.Labels
|
||||
jig.CreateServicePods(cs, ns, 2)
|
||||
err := jig.CreateServicePods(2)
|
||||
framework.ExpectNoError(err)
|
||||
svc, err := t.CreateService(service)
|
||||
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
|
||||
|
||||
|
@ -71,7 +71,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Got cluster ID: %v", clusterID)
|
||||
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
nodeList, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@ -82,13 +82,13 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
nodesSet := sets.NewString(nodesNames...)
|
||||
|
||||
ginkgo.By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global")
|
||||
svc, err := jig.CreateLoadBalancerService(ns, serviceName, e2eservice.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
|
||||
svc, err := jig.CreateLoadBalancerService(e2eservice.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}}
|
||||
svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
_, err = jig.UpdateService(svc.Namespace, svc.Name, func(svc *v1.Service) {
|
||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
svc.Spec.LoadBalancerSourceRanges = nil
|
||||
})
|
||||
@ -118,7 +118,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
|
||||
// OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE
|
||||
ginkgo.By("Updating LoadBalancer service to ExternalTrafficPolicy=Local")
|
||||
svc, err = jig.UpdateService(svc.Namespace, svc.Name, func(svc *v1.Service) {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -65,15 +65,15 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
|
||||
svcName := "net-tiers-svc"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, svcName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, svcName)
|
||||
|
||||
ginkgo.By("creating a pod to be part of the service " + svcName)
|
||||
_, err := jig.Run(ns, nil)
|
||||
_, err := jig.Run(nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Test 1: create a standard tiered LB for the Service.
|
||||
ginkgo.By("creating a Service of type LoadBalancer using the standard network tier")
|
||||
svc, err := jig.CreateTCPService(ns, func(svc *v1.Service) {
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard))
|
||||
})
|
||||
@ -86,11 +86,11 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
|
||||
|
||||
// Wait and verify the LB.
|
||||
ingressIP := waitAndVerifyLBWithTier(jig, ns, svcName, "", createTimeout, lagTimeout)
|
||||
ingressIP := waitAndVerifyLBWithTier(jig, "", createTimeout, lagTimeout)
|
||||
|
||||
// Test 2: re-create a LB of a different tier for the updated Service.
|
||||
ginkgo.By("updating the Service to use the premium (default) tier")
|
||||
svc, err = jig.UpdateService(ns, svcName, func(svc *v1.Service) {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
clearNetworkTier(svc)
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -101,7 +101,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
|
||||
// Wait until the ingress IP changes. Each tier has its own pool of
|
||||
// IPs, so changing tiers implies changing IPs.
|
||||
ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout)
|
||||
ingressIP = waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout)
|
||||
|
||||
// Test 3: create a standard-tierd LB with a user-requested IP.
|
||||
ginkgo.By("reserving a static IP for the load balancer")
|
||||
@ -122,7 +122,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
framework.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP)
|
||||
|
||||
ginkgo.By("updating the Service to use the standard tier with a requested IP")
|
||||
svc, err = jig.UpdateService(ns, svc.Name, func(svc *v1.Service) {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.LoadBalancerIP = requestedIP
|
||||
setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard))
|
||||
})
|
||||
@ -134,14 +134,14 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
framework.ExpectEqual(svcTier, cloud.NetworkTierStandard)
|
||||
|
||||
// Wait until the ingress IP changes and verifies the LB.
|
||||
ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout)
|
||||
ingressIP = waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout)
|
||||
})
|
||||
})
|
||||
|
||||
func waitAndVerifyLBWithTier(jig *e2eservice.TestJig, ns, svcName, existingIP string, waitTimeout, checkTimeout time.Duration) string {
|
||||
func waitAndVerifyLBWithTier(jig *e2eservice.TestJig, existingIP string, waitTimeout, checkTimeout time.Duration) string {
|
||||
// If existingIP is "" this will wait for any ingress IP to show up. Otherwise
|
||||
// it will wait for the ingress IP to change to something different.
|
||||
svc, err := jig.WaitForNewIngressIP(ns, svcName, existingIP, waitTimeout)
|
||||
svc, err := jig.WaitForNewIngressIP(existingIP, waitTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
svcPort := int(svc.Spec.Ports[0].Port)
|
||||
|
@ -124,14 +124,14 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ConformanceIt("should serve a basic endpoint from pods ", func() {
|
||||
serviceName := "endpoint-test2"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
ginkgo.By("creating service " + serviceName + " in namespace " + ns)
|
||||
defer func() {
|
||||
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
|
||||
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
|
||||
}()
|
||||
_, err := jig.CreateTCPServiceWithPort(ns, nil, 80)
|
||||
_, err := jig.CreateTCPServiceWithPort(nil, 80)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{})
|
||||
@ -178,7 +178,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
// repacking functionality is intentionally not tested here - it's better to test it in an integration test.
|
||||
serviceName := "multi-endpoint-test"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
defer func() {
|
||||
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
|
||||
@ -189,7 +189,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
svc2port := "svc2"
|
||||
|
||||
ginkgo.By("creating service " + serviceName + " in namespace " + ns)
|
||||
_, err := jig.CreateTCPService(ns, func(service *v1.Service) {
|
||||
_, err := jig.CreateTCPService(func(service *v1.Service) {
|
||||
service.Spec.Ports = []v1.ServicePort{
|
||||
{
|
||||
Name: "portname1",
|
||||
@ -273,9 +273,9 @@ var _ = SIGDescribe("Services", func() {
|
||||
ns := f.Namespace.Name
|
||||
|
||||
ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
servicePort := 8080
|
||||
tcpService, err := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort))
|
||||
tcpService, err := jig.CreateTCPServiceWithPort(nil, int32(servicePort))
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the sourceip test service")
|
||||
@ -344,9 +344,9 @@ var _ = SIGDescribe("Services", func() {
|
||||
ns := f.Namespace.Name
|
||||
|
||||
ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
servicePort := 8080
|
||||
svc, err := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort))
|
||||
svc, err := jig.CreateTCPServiceWithPort(nil, int32(servicePort))
|
||||
framework.ExpectNoError(err)
|
||||
serviceIP := svc.Spec.ClusterIP
|
||||
framework.Logf("hairpin-test cluster ip: %s", serviceIP)
|
||||
@ -364,7 +364,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
ginkgo.By("Checking if the pod can reach itself")
|
||||
err = jig.CheckServiceReachability(ns, svc, pod)
|
||||
err = jig.CheckServiceReachability(svc, pod)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -534,20 +534,20 @@ var _ = SIGDescribe("Services", func() {
|
||||
serviceName := "nodeport-test"
|
||||
ns := f.Namespace.Name
|
||||
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
|
||||
nodePortService, err := jig.CreateTCPService(ns, func(svc *v1.Service) {
|
||||
nodePortService, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
svc.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(9376)},
|
||||
}
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
err = jig.CreateServicePods(cs, ns, 2)
|
||||
err = jig.CreateServicePods(2)
|
||||
framework.ExpectNoError(err)
|
||||
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil)
|
||||
err = jig.CheckServiceReachability(ns, nodePortService, execPod)
|
||||
err = jig.CheckServiceReachability(nodePortService, execPod)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -582,23 +582,20 @@ var _ = SIGDescribe("Services", func() {
|
||||
ns2 := namespacePtr.Name // LB2 in ns2 on UDP
|
||||
framework.Logf("namespace for UDP test: %s", ns2)
|
||||
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
nodeIP, err := e2enode.PickIP(jig.Client) // for later
|
||||
if err != nil {
|
||||
framework.Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||
framework.ExpectNoErrorWithOffset(0, err)
|
||||
nodeIP, err := e2enode.PickIP(cs) // for later
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Test TCP and UDP Services. Services with the same name in different
|
||||
// namespaces should get different node ports and load balancers.
|
||||
|
||||
ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1)
|
||||
tcpService, err := jig.CreateTCPService(ns1, nil)
|
||||
tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName)
|
||||
tcpService, err := tcpJig.CreateTCPService(nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2)
|
||||
udpService, err := jig.CreateUDPService(ns2, nil)
|
||||
udpJig := e2eservice.NewTestJig(cs, ns2, serviceName)
|
||||
udpService, err := udpJig.CreateUDPService(nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("verifying that TCP and UDP use the same port")
|
||||
@ -609,17 +606,17 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.Logf("service port (TCP and UDP): %d", svcPort)
|
||||
|
||||
ginkgo.By("creating a pod to be part of the TCP service " + serviceName)
|
||||
_, err = jig.Run(ns1, nil)
|
||||
_, err = tcpJig.Run(nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating a pod to be part of the UDP service " + serviceName)
|
||||
_, err = jig.Run(ns2, nil)
|
||||
_, err = udpJig.Run(nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Change the services to NodePort.
|
||||
|
||||
ginkgo.By("changing the TCP service to type=NodePort")
|
||||
tcpService, err = jig.UpdateService(ns1, tcpService.Name, func(s *v1.Service) {
|
||||
tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeNodePort
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -627,7 +624,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.Logf("TCP node port: %d", tcpNodePort)
|
||||
|
||||
ginkgo.By("changing the UDP service to type=NodePort")
|
||||
udpService, err = jig.UpdateService(ns2, udpService.Name, func(s *v1.Service) {
|
||||
udpService, err = udpJig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeNodePort
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -670,7 +667,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("changing the TCP service to type=LoadBalancer")
|
||||
tcpService, err = jig.UpdateService(ns1, tcpService.Name, func(s *v1.Service) {
|
||||
tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable
|
||||
s.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
})
|
||||
@ -678,7 +675,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
if loadBalancerSupportsUDP {
|
||||
ginkgo.By("changing the UDP service to type=LoadBalancer")
|
||||
udpService, err = jig.UpdateService(ns2, udpService.Name, func(s *v1.Service) {
|
||||
udpService, err = udpJig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -690,7 +687,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
ginkgo.By("waiting for the TCP service to have a load balancer")
|
||||
// Wait for the load balancer to be created asynchronously
|
||||
tcpService, err = jig.WaitForLoadBalancer(ns1, tcpService.Name, loadBalancerCreateTimeout)
|
||||
tcpService, err = tcpJig.WaitForLoadBalancer(loadBalancerCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
|
||||
framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort)
|
||||
@ -723,7 +720,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
if loadBalancerSupportsUDP {
|
||||
ginkgo.By("waiting for the UDP service to have a load balancer")
|
||||
// 2nd one should be faster since they ran in parallel.
|
||||
udpService, err = jig.WaitForLoadBalancer(ns2, udpService.Name, loadBalancerCreateTimeout)
|
||||
udpService, err = udpJig.WaitForLoadBalancer(loadBalancerCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
|
||||
framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
|
||||
@ -754,7 +751,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
// Change the services' node ports.
|
||||
|
||||
ginkgo.By("changing the TCP service's NodePort")
|
||||
tcpService, err = jig.ChangeServiceNodePort(ns1, tcpService.Name, tcpNodePort)
|
||||
tcpService, err = tcpJig.ChangeServiceNodePort(tcpNodePort)
|
||||
framework.ExpectNoError(err)
|
||||
tcpNodePortOld := tcpNodePort
|
||||
tcpNodePort = int(tcpService.Spec.Ports[0].NodePort)
|
||||
@ -767,7 +764,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.Logf("TCP node port: %d", tcpNodePort)
|
||||
|
||||
ginkgo.By("changing the UDP service's NodePort")
|
||||
udpService, err = jig.ChangeServiceNodePort(ns2, udpService.Name, udpNodePort)
|
||||
udpService, err = udpJig.ChangeServiceNodePort(udpNodePort)
|
||||
framework.ExpectNoError(err)
|
||||
udpNodePortOld := udpNodePort
|
||||
udpNodePort = int(udpService.Spec.Ports[0].NodePort)
|
||||
@ -802,7 +799,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
// Change the services' main ports.
|
||||
|
||||
ginkgo.By("changing the TCP service's port")
|
||||
tcpService, err = jig.UpdateService(ns1, tcpService.Name, func(s *v1.Service) {
|
||||
tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Ports[0].Port++
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -819,7 +816,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("changing the UDP service's port")
|
||||
udpService, err = jig.UpdateService(ns2, udpService.Name, func(s *v1.Service) {
|
||||
udpService, err = udpJig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Ports[0].Port++
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -850,9 +847,9 @@ var _ = SIGDescribe("Services", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("Scaling the pods to 0")
|
||||
err = jig.Scale(ns1, 0)
|
||||
err = tcpJig.Scale(0)
|
||||
framework.ExpectNoError(err)
|
||||
err = jig.Scale(ns2, 0)
|
||||
err = udpJig.Scale(0)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort")
|
||||
@ -870,9 +867,9 @@ var _ = SIGDescribe("Services", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("Scaling the pods to 1")
|
||||
err = jig.Scale(ns1, 1)
|
||||
err = tcpJig.Scale(1)
|
||||
framework.ExpectNoError(err)
|
||||
err = jig.Scale(ns2, 1)
|
||||
err = udpJig.Scale(1)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("hitting the TCP service's NodePort")
|
||||
@ -892,24 +889,24 @@ var _ = SIGDescribe("Services", func() {
|
||||
// Change the services back to ClusterIP.
|
||||
|
||||
ginkgo.By("changing TCP service back to type=ClusterIP")
|
||||
tcpService, err = jig.UpdateService(ns1, tcpService.Name, func(s *v1.Service) {
|
||||
tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeClusterIP
|
||||
s.Spec.Ports[0].NodePort = 0
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
// Wait for the load balancer to be destroyed asynchronously
|
||||
tcpService, err = jig.WaitForLoadBalancerDestroy(ns1, tcpService.Name, tcpIngressIP, svcPort, loadBalancerCreateTimeout)
|
||||
tcpService, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("changing UDP service back to type=ClusterIP")
|
||||
udpService, err = jig.UpdateService(ns2, udpService.Name, func(s *v1.Service) {
|
||||
udpService, err = udpJig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeClusterIP
|
||||
s.Spec.Ports[0].NodePort = 0
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
if loadBalancerSupportsUDP {
|
||||
// Wait for the load balancer to be destroyed asynchronously
|
||||
udpService, err = jig.WaitForLoadBalancerDestroy(ns2, udpService.Name, udpIngressIP, svcPort, loadBalancerCreateTimeout)
|
||||
udpService, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@ -938,10 +935,10 @@ var _ = SIGDescribe("Services", func() {
|
||||
ginkgo.It("should be able to update service type to NodePort listening on same port number but different protocols", func() {
|
||||
serviceName := "nodeport-update-service"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
|
||||
tcpService, err := jig.CreateTCPService(ns, nil)
|
||||
tcpService, err := jig.CreateTCPService(nil)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the updating NodePorts test service")
|
||||
@ -951,7 +948,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.Logf("Service Port TCP: %v", tcpService.Spec.Ports[0].Port)
|
||||
|
||||
ginkgo.By("changing the TCP service to type=NodePort")
|
||||
nodePortService, err := jig.UpdateService(ns, tcpService.Name, func(s *v1.Service) {
|
||||
nodePortService, err := jig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeNodePort
|
||||
s.Spec.Ports = []v1.ServicePort{
|
||||
{
|
||||
@ -964,14 +961,14 @@ var _ = SIGDescribe("Services", func() {
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = jig.CreateTCPUDPServicePods(cs, ns, 2)
|
||||
err = jig.CreateTCPUDPServicePods(2)
|
||||
framework.ExpectNoError(err)
|
||||
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil)
|
||||
err = jig.CheckServiceReachability(ns, nodePortService, execPod)
|
||||
err = jig.CheckServiceReachability(nodePortService, execPod)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Updating NodePort service to listen TCP and UDP based requests over same Port")
|
||||
nodePortService, err = jig.UpdateService(ns, tcpService.Name, func(s *v1.Service) {
|
||||
nodePortService, err = jig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeNodePort
|
||||
s.Spec.Ports = []v1.ServicePort{
|
||||
{
|
||||
@ -989,7 +986,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
}
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
err = jig.CheckServiceReachability(ns, nodePortService, execPod)
|
||||
err = jig.CheckServiceReachability(nodePortService, execPod)
|
||||
framework.ExpectNoError(err)
|
||||
nodePortCounts := len(nodePortService.Spec.Ports)
|
||||
framework.ExpectEqual(nodePortCounts, 2, "updated service should have two Ports but found %d Ports", nodePortCounts)
|
||||
@ -1010,10 +1007,10 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ConformanceIt("should be able to change the type from ExternalName to ClusterIP", func() {
|
||||
serviceName := "externalname-service"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
|
||||
externalNameService, err := jig.CreateExternalNameService(ns, nil)
|
||||
_, err := jig.CreateExternalNameService(nil)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the ExternalName to ClusterIP test service")
|
||||
@ -1022,7 +1019,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
}()
|
||||
|
||||
ginkgo.By("changing the ExternalName service to type=ClusterIP")
|
||||
clusterIPService, err := jig.UpdateService(ns, externalNameService.Name, func(s *v1.Service) {
|
||||
clusterIPService, err := jig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeClusterIP
|
||||
s.Spec.ExternalName = ""
|
||||
s.Spec.Ports = []v1.ServicePort{
|
||||
@ -1031,10 +1028,10 @@ var _ = SIGDescribe("Services", func() {
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = jig.CreateServicePods(cs, ns, 2)
|
||||
err = jig.CreateServicePods(2)
|
||||
framework.ExpectNoError(err)
|
||||
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil)
|
||||
err = jig.CheckServiceReachability(ns, clusterIPService, execPod)
|
||||
err = jig.CheckServiceReachability(clusterIPService, execPod)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -1049,10 +1046,10 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ConformanceIt("should be able to change the type from ExternalName to NodePort", func() {
|
||||
serviceName := "externalname-service"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
|
||||
externalNameService, err := jig.CreateExternalNameService(ns, nil)
|
||||
_, err := jig.CreateExternalNameService(nil)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the ExternalName to NodePort test service")
|
||||
@ -1061,7 +1058,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
}()
|
||||
|
||||
ginkgo.By("changing the ExternalName service to type=NodePort")
|
||||
nodePortService, err := jig.UpdateService(ns, externalNameService.Name, func(s *v1.Service) {
|
||||
nodePortService, err := jig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeNodePort
|
||||
s.Spec.ExternalName = ""
|
||||
s.Spec.Ports = []v1.ServicePort{
|
||||
@ -1069,11 +1066,11 @@ var _ = SIGDescribe("Services", func() {
|
||||
}
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
err = jig.CreateServicePods(cs, ns, 2)
|
||||
err = jig.CreateServicePods(2)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil)
|
||||
err = jig.CheckServiceReachability(ns, nodePortService, execPod)
|
||||
err = jig.CheckServiceReachability(nodePortService, execPod)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -1087,10 +1084,10 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ConformanceIt("should be able to change the type from ClusterIP to ExternalName", func() {
|
||||
serviceName := "clusterip-service"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
ginkgo.By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns)
|
||||
clusterIPService, err := jig.CreateTCPService(ns, nil)
|
||||
_, err := jig.CreateTCPService(nil)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the ClusterIP to ExternalName test service")
|
||||
@ -1106,14 +1103,14 @@ var _ = SIGDescribe("Services", func() {
|
||||
}()
|
||||
|
||||
ginkgo.By("changing the ClusterIP service to type=ExternalName")
|
||||
externalNameService, err := jig.UpdateService(ns, clusterIPService.Name, func(s *v1.Service) {
|
||||
externalNameService, err := jig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeExternalName
|
||||
s.Spec.ExternalName = externalServiceFQDN
|
||||
s.Spec.ClusterIP = ""
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil)
|
||||
err = jig.CheckServiceReachability(ns, externalNameService, execPod)
|
||||
err = jig.CheckServiceReachability(externalNameService, execPod)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -1127,10 +1124,10 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ConformanceIt("should be able to change the type from NodePort to ExternalName", func() {
|
||||
serviceName := "nodeport-service"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
ginkgo.By("creating a service " + serviceName + " with the type=NodePort in namespace " + ns)
|
||||
nodePortService, err := jig.CreateTCPService(ns, func(svc *v1.Service) {
|
||||
_, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -1148,7 +1145,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
}()
|
||||
|
||||
ginkgo.By("changing the NodePort service to type=ExternalName")
|
||||
externalNameService, err := jig.UpdateService(ns, nodePortService.Name, func(s *v1.Service) {
|
||||
externalNameService, err := jig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeExternalName
|
||||
s.Spec.ExternalName = externalServiceFQDN
|
||||
s.Spec.ClusterIP = ""
|
||||
@ -1156,7 +1153,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil)
|
||||
err = jig.CheckServiceReachability(ns, externalNameService, execPod)
|
||||
err = jig.CheckServiceReachability(externalNameService, execPod)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -1507,7 +1504,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
serviceName := "lb-sourcerange"
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, namespace, serviceName)
|
||||
|
||||
ginkgo.By("Prepare allow source ips")
|
||||
// prepare the exec pods
|
||||
@ -1518,7 +1515,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
ginkgo.By("creating a pod to be part of the service " + serviceName)
|
||||
// This container is an nginx container listening on port 80
|
||||
// See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response
|
||||
_, err = jig.Run(namespace, nil)
|
||||
_, err = jig.Run(nil)
|
||||
framework.ExpectNoError(err)
|
||||
// Make sure acceptPod is running. There are certain chances that pod might be teminated due to unexpected reasons.
|
||||
acceptPod, err = cs.CoreV1().Pods(namespace).Get(acceptPod.Name, metav1.GetOptions{})
|
||||
@ -1527,7 +1524,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ExpectNotEqual(acceptPod.Status.PodIP, "")
|
||||
|
||||
// Create loadbalancer service with source range from node[0] and podAccept
|
||||
svc, err := jig.CreateTCPService(namespace, func(svc *v1.Service) {
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
svc.Spec.LoadBalancerSourceRanges = []string{acceptPod.Status.PodIP + "/32"}
|
||||
})
|
||||
@ -1538,7 +1535,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
|
||||
}()
|
||||
|
||||
svc, err = jig.WaitForLoadBalancer(namespace, serviceName, loadBalancerCreateTimeout)
|
||||
svc, err = jig.WaitForLoadBalancer(loadBalancerCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// timeout when we haven't just created the load balancer
|
||||
@ -1558,7 +1555,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ExpectNotEqual(acceptPod.Status.PodIP, "")
|
||||
|
||||
ginkgo.By("Update service LoadBalancerSourceRange and check reachability")
|
||||
_, err = jig.UpdateService(svc.Namespace, svc.Name, func(svc *v1.Service) {
|
||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
// only allow access from dropPod
|
||||
svc.Spec.LoadBalancerSourceRanges = []string{dropPod.Status.PodIP + "/32"}
|
||||
})
|
||||
@ -1567,7 +1564,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.CheckReachabilityFromPod(true, normalReachabilityTimeout, namespace, dropPod.Name, svcIP)
|
||||
|
||||
ginkgo.By("Delete LoadBalancerSourceRange field and check reachability")
|
||||
_, err = jig.UpdateService(svc.Namespace, svc.Name, func(svc *v1.Service) {
|
||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.LoadBalancerSourceRanges = nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -1590,10 +1587,10 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
serviceName := "lb-internal"
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, namespace, serviceName)
|
||||
|
||||
ginkgo.By("creating pod to be part of service " + serviceName)
|
||||
_, err = jig.Run(namespace, nil)
|
||||
_, err = jig.Run(nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
enableILB, disableILB := e2eservice.EnableAndDisableInternalLB()
|
||||
@ -1605,7 +1602,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled")
|
||||
svc, err := jig.CreateTCPService(namespace, func(svc *v1.Service) {
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
enableILB(svc)
|
||||
})
|
||||
@ -1616,7 +1613,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
|
||||
}()
|
||||
|
||||
svc, err = jig.WaitForLoadBalancer(namespace, serviceName, createTimeout)
|
||||
svc, err = jig.WaitForLoadBalancer(createTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
lbIngress := &svc.Status.LoadBalancer.Ingress[0]
|
||||
svcPort := int(svc.Spec.Ports[0].Port)
|
||||
@ -1651,13 +1648,13 @@ var _ = SIGDescribe("Services", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("switching to external type LoadBalancer")
|
||||
svc, err = jig.UpdateService(namespace, serviceName, func(svc *v1.Service) {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
disableILB(svc)
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName)
|
||||
if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
|
||||
svc, err := jig.Client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
svc, err := cs.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -1679,14 +1676,14 @@ var _ = SIGDescribe("Services", func() {
|
||||
if framework.ProviderIs("azure") {
|
||||
ginkgo.By("switching back to interal type LoadBalancer, with static IP specified.")
|
||||
internalStaticIP := "10.240.11.11"
|
||||
svc, err = jig.UpdateService(namespace, serviceName, func(svc *v1.Service) {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.LoadBalancerIP = internalStaticIP
|
||||
enableILB(svc)
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName)
|
||||
if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
|
||||
svc, err := jig.Client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
svc, err := cs.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -1721,11 +1718,11 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
serviceName := "lb-hc-int"
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, namespace, serviceName)
|
||||
|
||||
ginkgo.By("create load balancer service")
|
||||
// Create loadbalancer service with source range from node[0] and podAccept
|
||||
svc, err := jig.CreateTCPService(namespace, func(svc *v1.Service) {
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -1735,7 +1732,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
|
||||
}()
|
||||
|
||||
svc, err = jig.WaitForLoadBalancer(namespace, serviceName, e2eservice.LoadBalancerCreateTimeoutDefault)
|
||||
svc, err = jig.WaitForLoadBalancer(e2eservice.LoadBalancerCreateTimeoutDefault)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
hcName := gcecloud.MakeNodesHealthCheckName(clusterID)
|
||||
@ -1876,7 +1873,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
podToggledNames, svcToggledIP, err := e2eservice.StartServeHostnameService(cs, svcToggled, ns, numPods)
|
||||
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns)
|
||||
|
||||
jig := e2eservice.NewTestJig(cs, svcToggled.ObjectMeta.Name)
|
||||
jig := e2eservice.NewTestJig(cs, ns, svcToggled.ObjectMeta.Name)
|
||||
|
||||
hosts, err := e2essh.NodeSSHHosts(cs)
|
||||
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
|
||||
@ -1892,7 +1889,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort))
|
||||
|
||||
ginkgo.By("adding service-proxy-name label")
|
||||
_, err = jig.UpdateService(ns, svcToggled.ObjectMeta.Name, func(svc *v1.Service) {
|
||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Labels = serviceProxyNameLabels
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -1901,7 +1898,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceDown(cs, host, svcToggledIP, servicePort))
|
||||
|
||||
ginkgo.By("removing service-proxy-name annotation")
|
||||
_, err = jig.UpdateService(ns, svcToggled.ObjectMeta.Name, func(svc *v1.Service) {
|
||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Labels = nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -1940,7 +1937,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
podHeadlessToggledNames, svcHeadlessToggledIP, err := e2eservice.StartServeHostnameService(cs, svcHeadlessToggled, ns, numPods)
|
||||
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcHeadlessToggledIP, ns)
|
||||
|
||||
jig := e2eservice.NewTestJig(cs, svcHeadlessToggled.ObjectMeta.Name)
|
||||
jig := e2eservice.NewTestJig(cs, ns, svcHeadlessToggled.ObjectMeta.Name)
|
||||
|
||||
hosts, err := e2essh.NodeSSHHosts(cs)
|
||||
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
|
||||
@ -1956,7 +1953,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceDown(cs, host, svcHeadlessIP, servicePort))
|
||||
|
||||
ginkgo.By("adding service.kubernetes.io/headless label")
|
||||
_, err = jig.UpdateService(ns, svcHeadlessToggled.ObjectMeta.Name, func(svc *v1.Service) {
|
||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Labels = serviceHeadlessLabels
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -1965,7 +1962,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceDown(cs, host, svcHeadlessToggledIP, servicePort))
|
||||
|
||||
ginkgo.By("removing service.kubernetes.io/headless annotation")
|
||||
_, err = jig.UpdateService(ns, svcHeadlessToggled.ObjectMeta.Name, func(svc *v1.Service) {
|
||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Labels = nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -1980,13 +1977,13 @@ var _ = SIGDescribe("Services", func() {
|
||||
ginkgo.It("should be rejected when no endpoints exist", func() {
|
||||
namespace := f.Namespace.Name
|
||||
serviceName := "no-pods"
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, namespace, serviceName)
|
||||
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
|
||||
framework.ExpectNoError(err)
|
||||
port := 80
|
||||
|
||||
ginkgo.By("creating a service with no endpoints")
|
||||
_, err = jig.CreateTCPServiceWithPort(namespace, nil, int32(port))
|
||||
_, err = jig.CreateTCPServiceWithPort(nil, int32(port))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
nodeName := nodes.Items[0].Name
|
||||
@ -2027,10 +2024,10 @@ var _ = SIGDescribe("Services", func() {
|
||||
// 3. Update service to type=LoadBalancer. Finalizer should be added.
|
||||
// 4. Delete service with type=LoadBalancer. Finalizer should be removed.
|
||||
ginkgo.It("should handle load balancer cleanup finalizer for service [Slow]", func() {
|
||||
jig := e2eservice.NewTestJig(cs, "lb-finalizer")
|
||||
jig := e2eservice.NewTestJig(cs, f.Namespace.Name, "lb-finalizer")
|
||||
|
||||
ginkgo.By("Create load balancer service")
|
||||
svc, err := jig.CreateTCPService(f.Namespace.Name, func(svc *v1.Service) {
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -2041,19 +2038,19 @@ var _ = SIGDescribe("Services", func() {
|
||||
}()
|
||||
|
||||
ginkgo.By("Wait for load balancer to serve traffic")
|
||||
svc, err = jig.WaitForLoadBalancer(svc.Namespace, svc.Name, e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
|
||||
svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check if finalizer presents on service with type=LoadBalancer")
|
||||
e2eservice.WaitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true)
|
||||
|
||||
ginkgo.By("Check if finalizer is removed on service after changed to type=ClusterIP")
|
||||
err = jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
|
||||
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
|
||||
framework.ExpectNoError(err)
|
||||
e2eservice.WaitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, false)
|
||||
|
||||
ginkgo.By("Check if finalizer is added back to service after changed to type=LoadBalancer")
|
||||
err = jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeLoadBalancer, e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
|
||||
err = jig.ChangeServiceType(v1.ServiceTypeLoadBalancer, e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
|
||||
framework.ExpectNoError(err)
|
||||
e2eservice.WaitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true)
|
||||
})
|
||||
@ -2094,9 +2091,9 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
ginkgo.It("should work for type=LoadBalancer", func() {
|
||||
namespace := f.Namespace.Name
|
||||
serviceName := "external-local-lb"
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, namespace, serviceName)
|
||||
|
||||
svc, err := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
|
||||
svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
|
||||
framework.ExpectNoError(err)
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
|
||||
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
|
||||
@ -2104,12 +2101,12 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
framework.Failf("Service HealthCheck NodePort was not allocated")
|
||||
}
|
||||
defer func() {
|
||||
err = jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
|
||||
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Make sure we didn't leak the health check node port.
|
||||
threshold := 2
|
||||
nodes, err := jig.GetEndpointNodes(svc)
|
||||
nodes, err := jig.GetEndpointNodes()
|
||||
framework.ExpectNoError(err)
|
||||
for _, ips := range nodes {
|
||||
err := e2eservice.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", e2eservice.KubeProxyEndpointLagTimeout, false, threshold)
|
||||
@ -2136,9 +2133,9 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
ginkgo.It("should work for type=NodePort", func() {
|
||||
namespace := f.Namespace.Name
|
||||
serviceName := "external-local-nodeport"
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, namespace, serviceName)
|
||||
|
||||
svc, err := jig.CreateOnlyLocalNodePortService(namespace, serviceName, true)
|
||||
svc, err := jig.CreateOnlyLocalNodePortService(true)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
|
||||
@ -2146,7 +2143,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
}()
|
||||
|
||||
tcpNodePort := int(svc.Spec.Ports[0].NodePort)
|
||||
endpointsNodeMap, err := jig.GetEndpointNodes(svc)
|
||||
endpointsNodeMap, err := jig.GetEndpointNodes()
|
||||
framework.ExpectNoError(err)
|
||||
path := "/clientip"
|
||||
|
||||
@ -2165,11 +2162,11 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
ginkgo.It("should only target nodes with endpoints", func() {
|
||||
namespace := f.Namespace.Name
|
||||
serviceName := "external-local-nodes"
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, namespace, serviceName)
|
||||
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
svc, err := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false,
|
||||
svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, false,
|
||||
func(svc *v1.Service) {
|
||||
// Change service port to avoid collision with opened hostPorts
|
||||
// in other tests that run in parallel.
|
||||
@ -2182,7 +2179,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
|
||||
defer func() {
|
||||
err = jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
|
||||
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
|
||||
framework.ExpectNoError(err)
|
||||
@ -2204,7 +2201,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
endpointNodeName := nodes.Items[i].Name
|
||||
|
||||
ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName)
|
||||
_, err = jig.Run(namespace, func(rc *v1.ReplicationController) {
|
||||
_, err = jig.Run(func(rc *v1.ReplicationController) {
|
||||
rc.Name = serviceName
|
||||
if endpointNodeName != "" {
|
||||
rc.Spec.Template.Spec.NodeName = endpointNodeName
|
||||
@ -2213,7 +2210,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName))
|
||||
err = jig.WaitForEndpointOnNode(namespace, serviceName, endpointNodeName)
|
||||
err = jig.WaitForEndpointOnNode(endpointNodeName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// HealthCheck should pass only on the node where num(endpoints) > 0
|
||||
@ -2237,13 +2234,13 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
var err error
|
||||
namespace := f.Namespace.Name
|
||||
serviceName := "external-local-pods"
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, namespace, serviceName)
|
||||
|
||||
svc, err := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
|
||||
svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
|
||||
framework.ExpectNoError(err)
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
|
||||
defer func() {
|
||||
err = jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
|
||||
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
|
||||
framework.ExpectNoError(err)
|
||||
@ -2294,7 +2291,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
ginkgo.It("should handle updates to ExternalTrafficPolicy field", func() {
|
||||
namespace := f.Namespace.Name
|
||||
serviceName := "external-local-update"
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, namespace, serviceName)
|
||||
|
||||
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
|
||||
framework.ExpectNoError(err)
|
||||
@ -2302,11 +2299,11 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint")
|
||||
}
|
||||
|
||||
svc, err := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
|
||||
svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
|
||||
framework.ExpectNoError(err)
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
|
||||
defer func() {
|
||||
err = jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
|
||||
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
|
||||
framework.ExpectNoError(err)
|
||||
@ -2316,7 +2313,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
|
||||
|
||||
ginkgo.By("turning ESIPP off")
|
||||
svc, err = jig.UpdateService(svc.Namespace, svc.Name, func(svc *v1.Service) {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -2324,7 +2321,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
framework.Failf("Service HealthCheck NodePort still present")
|
||||
}
|
||||
|
||||
endpointNodeMap, err := jig.GetEndpointNodes(svc)
|
||||
endpointNodeMap, err := jig.GetEndpointNodes()
|
||||
framework.ExpectNoError(err)
|
||||
noEndpointNodeMap := map[string][]string{}
|
||||
for _, n := range nodes.Items {
|
||||
@ -2385,7 +2382,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
// creation will fail.
|
||||
|
||||
ginkgo.By("setting ExternalTraffic field back to OnlyLocal")
|
||||
svc, err = jig.UpdateService(svc.Namespace, svc.Name, func(svc *v1.Service) {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
// Request the same healthCheckNodePort as before, to test the user-requested allocation path
|
||||
svc.Spec.HealthCheckNodePort = int32(healthCheckNodePort)
|
||||
@ -2462,7 +2459,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
|
||||
defer func() {
|
||||
e2eservice.StopServeHostnameService(cs, ns, serviceName)
|
||||
}()
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
svc, err = jig.Client.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns)
|
||||
var svcIP string
|
||||
@ -2483,19 +2480,19 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
|
||||
err := cs.CoreV1().Pods(ns).Delete(execPod.Name, nil)
|
||||
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns)
|
||||
}()
|
||||
err = jig.CheckServiceReachability(ns, svc, execPod)
|
||||
err = jig.CheckServiceReachability(svc, execPod)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if !isTransitionTest {
|
||||
gomega.Expect(e2eservice.CheckAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue())
|
||||
}
|
||||
if isTransitionTest {
|
||||
svc, err = jig.UpdateService(svc.Namespace, svc.Name, func(svc *v1.Service) {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(e2eservice.CheckAffinity(execPod, svcIP, servicePort, false)).To(gomega.BeTrue())
|
||||
svc, err = jig.UpdateService(svc.Namespace, svc.Name, func(svc *v1.Service) {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -2521,9 +2518,9 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework,
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
||||
_, _, err := e2eservice.StartServeHostnameService(cs, svc, ns, numPods)
|
||||
framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns)
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName)
|
||||
svc, err = jig.WaitForLoadBalancer(ns, serviceName, e2eservice.LoadBalancerCreateTimeoutDefault)
|
||||
svc, err = jig.WaitForLoadBalancer(e2eservice.LoadBalancerCreateTimeoutDefault)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
podNodePairs, err := e2enode.PodNodePairs(cs, ns)
|
||||
@ -2540,12 +2537,12 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework,
|
||||
gomega.Expect(e2eservice.CheckAffinity(nil, ingressIP, port, true)).To(gomega.BeTrue())
|
||||
}
|
||||
if isTransitionTest {
|
||||
svc, err = jig.UpdateService(svc.Namespace, svc.Name, func(svc *v1.Service) {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(e2eservice.CheckAffinity(nil, ingressIP, port, false)).To(gomega.BeTrue())
|
||||
svc, err = jig.UpdateService(svc.Namespace, svc.Name, func(svc *v1.Service) {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -43,16 +43,16 @@ func shouldTestPDBs() bool { return framework.ProviderIs("gce", "gke") }
|
||||
// Setup creates a service with a load balancer and makes sure it's reachable.
|
||||
func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
|
||||
serviceName := "service-test"
|
||||
jig := e2eservice.NewTestJig(f.ClientSet, serviceName)
|
||||
jig := e2eservice.NewTestJig(f.ClientSet, f.Namespace.Name, serviceName)
|
||||
|
||||
ns := f.Namespace
|
||||
|
||||
ginkgo.By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name)
|
||||
tcpService, err := jig.CreateTCPService(ns.Name, func(s *v1.Service) {
|
||||
tcpService, err := jig.CreateTCPService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
tcpService, err = jig.WaitForLoadBalancer(ns.Name, tcpService.Name, e2eservice.LoadBalancerCreateTimeoutDefault)
|
||||
tcpService, err = jig.WaitForLoadBalancer(e2eservice.LoadBalancerCreateTimeoutDefault)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Get info to hit it with
|
||||
@ -60,12 +60,12 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
|
||||
svcPort := int(tcpService.Spec.Ports[0].Port)
|
||||
|
||||
ginkgo.By("creating pod to be part of service " + serviceName)
|
||||
rc, err := jig.Run(ns.Name, jig.AddRCAntiAffinity)
|
||||
rc, err := jig.Run(jig.AddRCAntiAffinity)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if shouldTestPDBs() {
|
||||
ginkgo.By("creating a PodDisruptionBudget to cover the ReplicationController")
|
||||
_, err = jig.CreatePDB(ns.Name, rc)
|
||||
_, err = jig.CreatePDB(rc)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
|
@ -42,12 +42,12 @@ var _ = SIGDescribe("Services", func() {
|
||||
serviceName := "nodeport-test"
|
||||
ns := f.Namespace.Name
|
||||
|
||||
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
nodeIP, err := e2enode.PickIP(jig.Client)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
|
||||
svc, err := jig.CreateTCPService(ns, func(svc *v1.Service) {
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -55,7 +55,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
nodePort := int(svc.Spec.Ports[0].NodePort)
|
||||
|
||||
ginkgo.By("creating Pod to be part of service " + serviceName)
|
||||
_, err = jig.Run(ns, nil)
|
||||
_, err = jig.Run(nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
//using hybrid_network methods
|
||||
|
Loading…
Reference in New Issue
Block a user