mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Add Federated Service update test case
This commit is contained in:
parent
2d79d53fb2
commit
a152b9919a
@ -108,7 +108,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
|
||||
}
|
||||
})
|
||||
|
||||
It("should create matching services in underlying clusters", func() {
|
||||
It("should create and update matching services in underlying clusters", func() {
|
||||
fedframework.SkipUnlessFederated(f.ClientSet)
|
||||
service = createServiceOrFail(f.FederationClientset, nsName, FederatedServiceName)
|
||||
defer func() { // Cleanup
|
||||
@ -116,7 +116,13 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
|
||||
err := f.FederationClientset.Services(nsName).Delete(service.Name, &metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, nsName)
|
||||
}()
|
||||
By(fmt.Sprintf("Wait for service shards to be created in all clusters for service \"%s/%s\"", nsName, service.Name))
|
||||
waitForServiceShardsOrFail(nsName, service, clusters)
|
||||
framework.Logf("Successfully created and synced service \"%s/%s\" to all clusters", nsName, service.Name)
|
||||
By(fmt.Sprintf("Update federated service \"%s/%s\"", nsName, service.Name))
|
||||
service = updateServiceOrFail(f.FederationClientset, nsName, FederatedServiceName)
|
||||
waitForServiceShardsOrFail(nsName, service, clusters)
|
||||
framework.Logf("Successfully updated and synced service \"%s/%s\" to clusters", nsName, service.Name)
|
||||
})
|
||||
|
||||
It("should be deleted from underlying clusters when OrphanDependents is false", func() {
|
||||
@ -330,18 +336,31 @@ func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, cluste
|
||||
}
|
||||
}
|
||||
|
||||
func updateServiceOrFail(clientset *fedclientset.Clientset, namespace, name string) *v1.Service {
|
||||
service, err := clientset.Services(namespace).Get(name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Getting service %q in namespace %q", name, namespace)
|
||||
service.Spec.Selector["name"] = "update-demo"
|
||||
newService, err := clientset.Services(namespace).Update(service)
|
||||
By(fmt.Sprintf("Successfully updated federated service %q in namespace %q", name, namespace))
|
||||
return newService
|
||||
}
|
||||
|
||||
// equivalent returns true if the two services are equivalent. Fields which are expected to differ between
|
||||
// federated services and the underlying cluster services (e.g. ClusterIP, LoadBalancerIP etc) are ignored.
|
||||
// federated services and the underlying cluster services (e.g. ClusterIP, NodePort) are ignored.
|
||||
func equivalent(federationService, clusterService v1.Service) bool {
|
||||
// TODO: I think that we need a DeepCopy here to avoid clobbering our parameters.
|
||||
clusterService.Spec.ClusterIP = federationService.Spec.ClusterIP
|
||||
clusterService.Spec.ExternalIPs = federationService.Spec.ExternalIPs
|
||||
clusterService.Spec.LoadBalancerIP = federationService.Spec.LoadBalancerIP
|
||||
clusterService.Spec.LoadBalancerSourceRanges = federationService.Spec.LoadBalancerSourceRanges
|
||||
// N.B. We cannot iterate over the port objects directly, as their values
|
||||
// only get copied and our updates will get lost.
|
||||
for i := range clusterService.Spec.Ports {
|
||||
clusterService.Spec.Ports[i].NodePort = federationService.Spec.Ports[i].NodePort
|
||||
}
|
||||
return reflect.DeepEqual(clusterService.Spec, federationService.Spec)
|
||||
|
||||
if federationService.Name != clusterService.Name || federationService.Namespace != clusterService.Namespace {
|
||||
return false
|
||||
}
|
||||
if !reflect.DeepEqual(federationService.Labels, clusterService.Labels) && (len(federationService.Labels) != 0 || len(clusterService.Labels) != 0) {
|
||||
return false
|
||||
}
|
||||
if !reflect.DeepEqual(federationService.Spec, clusterService.Spec) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user