mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #94822 from JornShen/replace_e2essh_e2e_service_tests
Replace e2essh on e2e service tests
This commit is contained in:
commit
9646d5998c
@ -491,7 +491,7 @@ var _ = SIGDescribe("Networking", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("verifying that kube-proxy rules are eventually recreated")
|
ginkgo.By("verifying that kube-proxy rules are eventually recreated")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(f.ClientSet, ns, host, podNames, svcIP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(f.ClientSet, ns, podNames, svcIP, servicePort))
|
||||||
|
|
||||||
ginkgo.By("verifying that kubelet rules are eventually recreated")
|
ginkgo.By("verifying that kubelet rules are eventually recreated")
|
||||||
err = utilwait.PollImmediate(framework.Poll, framework.RestartNodeReadyAgainTimeout, func() (bool, error) {
|
err = utilwait.PollImmediate(framework.Poll, framework.RestartNodeReadyAgainTimeout, func() (bool, error) {
|
||||||
|
@ -303,56 +303,44 @@ func StopServeHostnameService(clientset clientset.Interface, ns, name string) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// verifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the
|
// verifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the
|
||||||
// given host and from within a pod. The host is expected to be an SSH-able node
|
// the host exec pod of host network type and from the exec pod of container network type.
|
||||||
// in the cluster. Each pod in the service is expected to echo its name. These
|
// Each pod in the service is expected to echo its name. These names are compared with the
|
||||||
// names are compared with the given expectedPods list after a sort | uniq.
|
// given expectedPods list after a sort | uniq.
|
||||||
func verifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error {
|
func verifyServeHostnameServiceUp(c clientset.Interface, ns string, expectedPods []string, serviceIP string, servicePort int) error {
|
||||||
execPod := e2epod.CreateExecPodOrFail(c, ns, "execpod-", nil)
|
// to verify from host network
|
||||||
|
hostExecPod := launchHostExecPod(c, ns, "verify-service-up-host-exec-pod")
|
||||||
|
|
||||||
|
// to verify from container's network
|
||||||
|
execPod := e2epod.CreateExecPodOrFail(c, ns, "verify-service-up-exec-pod-", nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
|
e2epod.DeletePodOrFail(c, ns, hostExecPod.Name)
|
||||||
e2epod.DeletePodOrFail(c, ns, execPod.Name)
|
e2epod.DeletePodOrFail(c, ns, execPod.Name)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Loop a bunch of times - the proxy is randomized, so we want a good
|
|
||||||
// chance of hitting each backend at least once.
|
|
||||||
buildCommand := func(wget string) string {
|
|
||||||
serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
|
|
||||||
return fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s 2>&1 || true; echo; done",
|
|
||||||
50*len(expectedPods), wget, serviceIPPort)
|
|
||||||
}
|
|
||||||
commands := []func() string{
|
|
||||||
// verify service from node
|
|
||||||
func() string {
|
|
||||||
cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -")
|
|
||||||
framework.Logf("Executing cmd %q on host %v", cmd, host)
|
|
||||||
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
|
|
||||||
if err != nil || result.Code != 0 {
|
|
||||||
e2essh.LogResult(result)
|
|
||||||
framework.Logf("error while SSH-ing to node: %v", err)
|
|
||||||
}
|
|
||||||
return result.Stdout
|
|
||||||
},
|
|
||||||
// verify service from pod
|
// verify service from pod
|
||||||
func() string {
|
cmdFunc := func(podName string) string {
|
||||||
cmd := buildCommand("wget -q -T 1 -O -")
|
wgetCmd := "wget -q -T 1 -O -"
|
||||||
framework.Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPod.Name)
|
serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
|
||||||
|
cmd := fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s 2>&1 || true; echo; done",
|
||||||
|
50*len(expectedPods), wgetCmd, serviceIPPort)
|
||||||
|
framework.Logf("Executing cmd %q in pod %v/%v", cmd, ns, podName)
|
||||||
// TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
|
// TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
|
||||||
output, err := framework.RunHostCmd(ns, execPod.Name, cmd)
|
output, err := framework.RunHostCmd(ns, podName, cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPod.Name, err, output)
|
framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, podName, err, output)
|
||||||
}
|
}
|
||||||
return output
|
return output
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedEndpoints := sets.NewString(expectedPods...)
|
expectedEndpoints := sets.NewString(expectedPods...)
|
||||||
ginkgo.By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods)))
|
ginkgo.By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods)))
|
||||||
for _, cmdFunc := range commands {
|
for _, podName := range []string{hostExecPod.Name, execPod.Name} {
|
||||||
passed := false
|
passed := false
|
||||||
gotEndpoints := sets.NewString()
|
gotEndpoints := sets.NewString()
|
||||||
|
|
||||||
// Retry cmdFunc for a while
|
// Retry cmdFunc for a while
|
||||||
for start := time.Now(); time.Since(start) < e2eservice.KubeProxyLagTimeout; time.Sleep(5 * time.Second) {
|
for start := time.Now(); time.Since(start) < e2eservice.KubeProxyLagTimeout; time.Sleep(5 * time.Second) {
|
||||||
for _, endpoint := range strings.Split(cmdFunc(), "\n") {
|
for _, endpoint := range strings.Split(cmdFunc(podName), "\n") {
|
||||||
trimmedEp := strings.TrimSpace(endpoint)
|
trimmedEp := strings.TrimSpace(endpoint)
|
||||||
if trimmedEp != "" {
|
if trimmedEp != "" {
|
||||||
gotEndpoints.Insert(trimmedEp)
|
gotEndpoints.Insert(trimmedEp)
|
||||||
@ -384,25 +372,32 @@ func verifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
|
|||||||
}
|
}
|
||||||
|
|
||||||
// verifyServeHostnameServiceDown verifies that the given service isn't served.
|
// verifyServeHostnameServiceDown verifies that the given service isn't served.
|
||||||
func verifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error {
|
func verifyServeHostnameServiceDown(c clientset.Interface, ns string, serviceIP string, servicePort int) error {
|
||||||
|
// verify from host network
|
||||||
|
hostExecPod := launchHostExecPod(c, ns, "verify-service-down-host-exec-pod")
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
e2epod.DeletePodOrFail(c, ns, hostExecPod.Name)
|
||||||
|
}()
|
||||||
|
|
||||||
ipPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
|
ipPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
|
||||||
// The current versions of curl included in CentOS and RHEL distros
|
// The current versions of curl included in CentOS and RHEL distros
|
||||||
// misinterpret square brackets around IPv6 as globbing, so use the -g
|
// misinterpret square brackets around IPv6 as globbing, so use the -g
|
||||||
// argument to disable globbing to handle the IPv6 case.
|
// argument to disable globbing to handle the IPv6 case.
|
||||||
command := fmt.Sprintf(
|
command := fmt.Sprintf(
|
||||||
"curl -g -s --connect-timeout 2 http://%s && exit 99", ipPort)
|
"curl -g -s --connect-timeout 2 http://%s && echo service-down-failed", ipPort)
|
||||||
|
|
||||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
for start := time.Now(); time.Since(start) < e2eservice.KubeProxyLagTimeout; time.Sleep(5 * time.Second) {
|
||||||
result, err := e2essh.SSH(command, host, framework.TestContext.Provider)
|
output, err := framework.RunHostCmd(ns, hostExecPod.Name, command)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2essh.LogResult(result)
|
framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", command, ns, hostExecPod.Name, err, output)
|
||||||
framework.Logf("error while SSH-ing to node: %v", err)
|
|
||||||
}
|
}
|
||||||
if result.Code != 99 {
|
if !strings.Contains(output, "service-down-failed") {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
framework.Logf("service still alive - still waiting")
|
framework.Logf("service still alive - still waiting")
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("waiting for service to be down timed out")
|
return fmt.Errorf("waiting for service to be down timed out")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1060,12 +1055,6 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should be able to up and down services", func() {
|
ginkgo.It("should be able to up and down services", func() {
|
||||||
// TODO: use the ServiceTestJig here
|
|
||||||
// this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
|
|
||||||
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
|
||||||
// this test does not work if the Node does not support SSH Key
|
|
||||||
e2eskipper.SkipUnlessSSHKeyPresent()
|
|
||||||
|
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
numPods, servicePort := 3, defaultServeHostnameServicePort
|
numPods, servicePort := 3, defaultServeHostnameServicePort
|
||||||
|
|
||||||
@ -1080,27 +1069,20 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods)
|
podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods)
|
||||||
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
|
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
|
||||||
|
|
||||||
hosts, err := e2essh.NodeSSHHosts(cs)
|
|
||||||
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
|
|
||||||
if len(hosts) == 0 {
|
|
||||||
framework.Failf("No ssh-able nodes")
|
|
||||||
}
|
|
||||||
host := hosts[0]
|
|
||||||
|
|
||||||
ginkgo.By("verifying service " + svc1 + " is up")
|
ginkgo.By("verifying service " + svc1 + " is up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
|
||||||
|
|
||||||
ginkgo.By("verifying service " + svc2 + " is up")
|
ginkgo.By("verifying service " + svc2 + " is up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
|
||||||
|
|
||||||
// Stop service 1 and make sure it is gone.
|
// Stop service 1 and make sure it is gone.
|
||||||
ginkgo.By("stopping service " + svc1)
|
ginkgo.By("stopping service " + svc1)
|
||||||
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc1))
|
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc1))
|
||||||
|
|
||||||
ginkgo.By("verifying service " + svc1 + " is not up")
|
ginkgo.By("verifying service " + svc1 + " is not up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svc1IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svc1IP, servicePort))
|
||||||
ginkgo.By("verifying service " + svc2 + " is still up")
|
ginkgo.By("verifying service " + svc2 + " is still up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
|
||||||
|
|
||||||
// Start another service and verify both are up.
|
// Start another service and verify both are up.
|
||||||
ginkgo.By("creating service " + svc3 + " in namespace " + ns)
|
ginkgo.By("creating service " + svc3 + " in namespace " + ns)
|
||||||
@ -1112,10 +1094,10 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("verifying service " + svc2 + " is still up")
|
ginkgo.By("verifying service " + svc2 + " is still up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
|
||||||
|
|
||||||
ginkgo.By("verifying service " + svc3 + " is up")
|
ginkgo.By("verifying service " + svc3 + " is up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames3, svc3IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames3, svc3IP, servicePort))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should work after restarting kube-proxy [Disruptive]", func() {
|
ginkgo.It("should work after restarting kube-proxy [Disruptive]", func() {
|
||||||
@ -1152,15 +1134,15 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
}
|
}
|
||||||
host := hosts[0]
|
host := hosts[0]
|
||||||
|
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Restarting kube-proxy on %v", host))
|
ginkgo.By(fmt.Sprintf("Restarting kube-proxy on %v", host))
|
||||||
if err := restartKubeProxy(host); err != nil {
|
if err := restartKubeProxy(host); err != nil {
|
||||||
framework.Failf("error restarting kube-proxy: %v", err)
|
framework.Failf("error restarting kube-proxy: %v", err)
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should work after restarting apiserver [Disruptive]", func() {
|
ginkgo.It("should work after restarting apiserver [Disruptive]", func() {
|
||||||
@ -1180,14 +1162,7 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods)
|
podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods)
|
||||||
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns)
|
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns)
|
||||||
|
|
||||||
hosts, err := e2essh.NodeSSHHosts(cs)
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
|
||||||
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
|
|
||||||
if len(hosts) == 0 {
|
|
||||||
framework.Failf("No ssh-able nodes")
|
|
||||||
}
|
|
||||||
host := hosts[0]
|
|
||||||
|
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
|
|
||||||
|
|
||||||
// Restart apiserver
|
// Restart apiserver
|
||||||
ginkgo.By("Restarting apiserver")
|
ginkgo.By("Restarting apiserver")
|
||||||
@ -1198,7 +1173,7 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
if err := waitForApiserverUp(cs); err != nil {
|
if err := waitForApiserverUp(cs); err != nil {
|
||||||
framework.Failf("error while waiting for apiserver up: %v", err)
|
framework.Failf("error while waiting for apiserver up: %v", err)
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
|
||||||
|
|
||||||
// Create a new service and check if it's not reusing IP.
|
// Create a new service and check if it's not reusing IP.
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -1210,8 +1185,8 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
if svc1IP == svc2IP {
|
if svc1IP == svc2IP {
|
||||||
framework.Failf("VIPs conflict: %v", svc1IP)
|
framework.Failf("VIPs conflict: %v", svc1IP)
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2590,11 +2565,6 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should implement service.kubernetes.io/service-proxy-name", func() {
|
ginkgo.It("should implement service.kubernetes.io/service-proxy-name", func() {
|
||||||
// this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
|
|
||||||
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
|
||||||
// this test does not work if the Node does not support SSH Key
|
|
||||||
e2eskipper.SkipUnlessSSHKeyPresent()
|
|
||||||
|
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
numPods, servicePort := 3, defaultServeHostnameServicePort
|
numPods, servicePort := 3, defaultServeHostnameServicePort
|
||||||
serviceProxyNameLabels := map[string]string{"service.kubernetes.io/service-proxy-name": "foo-bar"}
|
serviceProxyNameLabels := map[string]string{"service.kubernetes.io/service-proxy-name": "foo-bar"}
|
||||||
@ -2617,18 +2587,11 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
|
|
||||||
jig := e2eservice.NewTestJig(cs, ns, svcToggled.ObjectMeta.Name)
|
jig := e2eservice.NewTestJig(cs, ns, svcToggled.ObjectMeta.Name)
|
||||||
|
|
||||||
hosts, err := e2essh.NodeSSHHosts(cs)
|
|
||||||
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
|
|
||||||
if len(hosts) == 0 {
|
|
||||||
framework.Failf("No ssh-able nodes")
|
|
||||||
}
|
|
||||||
host := hosts[0]
|
|
||||||
|
|
||||||
ginkgo.By("verifying service is up")
|
ginkgo.By("verifying service is up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podToggledNames, svcToggledIP, servicePort))
|
||||||
|
|
||||||
ginkgo.By("verifying service-disabled is not up")
|
ginkgo.By("verifying service-disabled is not up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcDisabledIP, servicePort))
|
||||||
|
|
||||||
ginkgo.By("adding service-proxy-name label")
|
ginkgo.By("adding service-proxy-name label")
|
||||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||||
@ -2637,7 +2600,7 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("verifying service is not up")
|
ginkgo.By("verifying service is not up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcToggledIP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcToggledIP, servicePort))
|
||||||
|
|
||||||
ginkgo.By("removing service-proxy-name annotation")
|
ginkgo.By("removing service-proxy-name annotation")
|
||||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||||
@ -2646,18 +2609,13 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("verifying service is up")
|
ginkgo.By("verifying service is up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podToggledNames, svcToggledIP, servicePort))
|
||||||
|
|
||||||
ginkgo.By("verifying service-disabled is still not up")
|
ginkgo.By("verifying service-disabled is still not up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcDisabledIP, servicePort))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should implement service.kubernetes.io/headless", func() {
|
ginkgo.It("should implement service.kubernetes.io/headless", func() {
|
||||||
// this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
|
|
||||||
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
|
||||||
// this test does not work if the Node does not support SSH Key
|
|
||||||
e2eskipper.SkipUnlessSSHKeyPresent()
|
|
||||||
|
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
numPods, servicePort := 3, defaultServeHostnameServicePort
|
numPods, servicePort := 3, defaultServeHostnameServicePort
|
||||||
serviceHeadlessLabels := map[string]string{v1.IsHeadlessService: ""}
|
serviceHeadlessLabels := map[string]string{v1.IsHeadlessService: ""}
|
||||||
@ -2681,18 +2639,11 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
|
|
||||||
jig := e2eservice.NewTestJig(cs, ns, svcHeadlessToggled.ObjectMeta.Name)
|
jig := e2eservice.NewTestJig(cs, ns, svcHeadlessToggled.ObjectMeta.Name)
|
||||||
|
|
||||||
hosts, err := e2essh.NodeSSHHosts(cs)
|
|
||||||
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
|
|
||||||
if len(hosts) == 0 {
|
|
||||||
framework.Failf("No ssh-able nodes")
|
|
||||||
}
|
|
||||||
host := hosts[0]
|
|
||||||
|
|
||||||
ginkgo.By("verifying service is up")
|
ginkgo.By("verifying service is up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort))
|
||||||
|
|
||||||
ginkgo.By("verifying service-headless is not up")
|
ginkgo.By("verifying service-headless is not up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcHeadlessIP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcHeadlessIP, servicePort))
|
||||||
|
|
||||||
ginkgo.By("adding service.kubernetes.io/headless label")
|
ginkgo.By("adding service.kubernetes.io/headless label")
|
||||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||||
@ -2701,7 +2652,7 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("verifying service is not up")
|
ginkgo.By("verifying service is not up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcHeadlessToggledIP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcHeadlessToggledIP, servicePort))
|
||||||
|
|
||||||
ginkgo.By("removing service.kubernetes.io/headless annotation")
|
ginkgo.By("removing service.kubernetes.io/headless annotation")
|
||||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||||
@ -2710,10 +2661,10 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("verifying service is up")
|
ginkgo.By("verifying service is up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort))
|
||||||
|
|
||||||
ginkgo.By("verifying service-headless is still not up")
|
ginkgo.By("verifying service-headless is still not up")
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcHeadlessIP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcHeadlessIP, servicePort))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should be rejected when no endpoints exist", func() {
|
ginkgo.It("should be rejected when no endpoints exist", func() {
|
||||||
@ -3621,6 +3572,7 @@ func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]s
|
|||||||
// launchHostExecPod launches a hostexec pod in the given namespace and waits
|
// launchHostExecPod launches a hostexec pod in the given namespace and waits
|
||||||
// until it's Running
|
// until it's Running
|
||||||
func launchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
|
func launchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
|
||||||
|
framework.Logf("Creating new host exec pod")
|
||||||
hostExecPod := e2epod.NewExecPodSpec(ns, name, true)
|
hostExecPod := e2epod.NewExecPodSpec(ns, name, true)
|
||||||
pod, err := client.CoreV1().Pods(ns).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
|
pod, err := client.CoreV1().Pods(ns).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
Loading…
Reference in New Issue
Block a user