mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
[Federation] Separate the cleanup phases of service and service shards so that service shards can be cleaned up even after the service is deleted elsewhere.
This separation is necessary because "Federated Service DNS should be able to discover a federated service" e2e test recently added a case where it deletes the service from federation but not the shards from the underlying clusters. Because of the way cleanup was implemented in the AfterEach block currently, we did not cleanup any of the underlying shards. However, separating the two phases of the cleanup needs this separation.
This commit is contained in:
parent
5e41d0904f
commit
bcddb157e4
@ -24,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/conversion"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -139,7 +140,8 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
|
||||
var _ = Describe("DNS", func() {
|
||||
|
||||
var (
|
||||
service *v1.Service
|
||||
service *v1.Service
|
||||
serviceShard *v1.Service
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
@ -168,8 +170,25 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
|
||||
}
|
||||
|
||||
createBackendPodsOrFail(clusters, nsName, FederatedServicePodName)
|
||||
|
||||
service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName)
|
||||
waitForServiceShardsOrFail(nsName, service, clusters)
|
||||
obj, err := conversion.NewCloner().DeepCopy(service)
|
||||
// Cloning shouldn't fail. On the off-chance it does, we should
|
||||
// shallow copy service to serviceShard before failing.
|
||||
if err != nil {
|
||||
serviceCopy := *service
|
||||
serviceShard = &serviceCopy
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Error in deep copying service %q", service.Name))
|
||||
}
|
||||
var ok bool
|
||||
serviceShard, ok = obj.(*v1.Service)
|
||||
if !ok {
|
||||
serviceCopy := *service
|
||||
serviceShard = &serviceCopy
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Unexpected service object copied %T", obj))
|
||||
}
|
||||
|
||||
waitForServiceShardsOrFail(nsName, serviceShard, clusters)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
@ -180,14 +199,19 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
|
||||
|
||||
if service != nil {
|
||||
deleteServiceOrFail(f.FederationClientset_1_5, nsName, service.Name)
|
||||
|
||||
By(fmt.Sprintf("Deleting service shards and their provider resources in underlying clusters for service %q in namespace %q", service.Name, nsName))
|
||||
cleanupServiceShardsAndProviderResources(nsName, service, clusters)
|
||||
|
||||
service = nil
|
||||
} else {
|
||||
By("No service to delete. Service is nil")
|
||||
}
|
||||
|
||||
if serviceShard != nil {
|
||||
By(fmt.Sprintf("Deleting service shards and their provider resources in underlying clusters for service %q in namespace %q", service.Name, nsName))
|
||||
cleanupServiceShardsAndProviderResources(nsName, service, clusters)
|
||||
serviceShard = nil
|
||||
} else {
|
||||
By("No service shards to delete. `serviceShard` is nil")
|
||||
}
|
||||
|
||||
// Delete the kube-dns config map from all clusters.
|
||||
for clusterName, cluster := range clusters {
|
||||
By(fmt.Sprintf("Deleting kube dns config map from cluster: %s", clusterName))
|
||||
@ -218,6 +242,9 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
|
||||
By("Deleting the service to verify that DNS rules still work")
|
||||
err := f.FederationClientset_1_5.Services(nsName).Delete(FederatedServiceName, &v1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, service.Namespace)
|
||||
// Service is deleted, unset the test block-global service variable.
|
||||
service = nil
|
||||
|
||||
for i, DNSName := range svcDNSNames {
|
||||
discoverService(f, DNSName, true, "federated-service-e2e-discovery-pod-"+strconv.Itoa(i))
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user