mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #41364 from perotinus/fix-doc-comments
Automatic merge from submit-queue [Federation] Modify the comments in Federation E2E tests to use standard Go conventions for documentation comments ```release-note NONE ```
This commit is contained in:
commit
8e6643acd4
@ -207,9 +207,7 @@ func deleteAllIngressesOrFail(clientset *fedclientset.Clientset, nsName string)
|
|||||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Error in deleting ingresses in namespace: %s", nsName))
|
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Error in deleting ingresses in namespace: %s", nsName))
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// equivalent returns true if the two ingress spec are equivalent.
|
||||||
equivalent returns true if the two ingress spec are equivalent.
|
|
||||||
*/
|
|
||||||
func equivalentIngress(federatedIngress, clusterIngress v1beta1.Ingress) bool {
|
func equivalentIngress(federatedIngress, clusterIngress v1beta1.Ingress) bool {
|
||||||
return reflect.DeepEqual(clusterIngress.Spec, federatedIngress.Spec)
|
return reflect.DeepEqual(clusterIngress.Spec, federatedIngress.Spec)
|
||||||
}
|
}
|
||||||
@ -244,10 +242,8 @@ func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, cluste
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// waitForIngressOrFail waits until a ingress is either present or absent in the cluster specified by clientset.
|
||||||
waitForIngressOrFail waits until a ingress is either present or absent in the cluster specified by clientset.
|
// If the condition is not met within timout, it fails the calling test.
|
||||||
If the condition is not met within timout, it fails the calling test.
|
|
||||||
*/
|
|
||||||
func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, present bool, timeout time.Duration) {
|
func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, present bool, timeout time.Duration) {
|
||||||
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
|
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
|
||||||
var clusterIngress *v1beta1.Ingress
|
var clusterIngress *v1beta1.Ingress
|
||||||
@ -271,9 +267,7 @@ func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// waitForIngressShardsOrFail waits for the ingress to appear in all clusters
|
||||||
waitForIngressShardsOrFail waits for the ingress to appear in all clusters
|
|
||||||
*/
|
|
||||||
func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
|
func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
|
||||||
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
|
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
|
||||||
for _, c := range clusters {
|
for _, c := range clusters {
|
||||||
@ -281,9 +275,7 @@ func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clus
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters
|
||||||
waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters
|
|
||||||
*/
|
|
||||||
func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
|
func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
|
||||||
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
|
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
|
||||||
for _, c := range clusters {
|
for _, c := range clusters {
|
||||||
@ -291,10 +283,8 @@ func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingres
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// waitForIngressUpdateOrFail waits until a ingress is updated in the specified cluster with same spec of federated ingress.
|
||||||
waitForIngressUpdateOrFail waits until a ingress is updated in the specified cluster with same spec of federated ingress.
|
// If the condition is not met within timeout, it fails the calling test.
|
||||||
If the condition is not met within timeout, it fails the calling test.
|
|
||||||
*/
|
|
||||||
func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, timeout time.Duration) {
|
func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, timeout time.Duration) {
|
||||||
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
|
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
|
||||||
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||||
@ -313,9 +303,7 @@ func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace st
|
|||||||
framework.ExpectNoError(err, "Failed to verify ingress %q in namespace %q in cluster", ingress.Name, namespace)
|
framework.ExpectNoError(err, "Failed to verify ingress %q in namespace %q in cluster", ingress.Name, namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters
|
||||||
waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters
|
|
||||||
*/
|
|
||||||
func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
|
func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
|
||||||
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
|
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
|
||||||
for _, c := range clusters {
|
for _, c := range clusters {
|
||||||
|
@ -358,10 +358,8 @@ func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, cluste
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// equivalent returns true if the two services are equivalent. Fields which are expected to differ between
|
||||||
equivalent returns true if the two services are equivalent. Fields which are expected to differ between
|
// federated services and the underlying cluster services (e.g. ClusterIP, LoadBalancerIP etc) are ignored.
|
||||||
federated services and the underlying cluster services (e.g. ClusterIP, LoadBalancerIP etc) are ignored.
|
|
||||||
*/
|
|
||||||
func equivalent(federationService, clusterService v1.Service) bool {
|
func equivalent(federationService, clusterService v1.Service) bool {
|
||||||
// TODO: I think that we need a DeepCopy here to avoid clobbering our parameters.
|
// TODO: I think that we need a DeepCopy here to avoid clobbering our parameters.
|
||||||
clusterService.Spec.ClusterIP = federationService.Spec.ClusterIP
|
clusterService.Spec.ClusterIP = federationService.Spec.ClusterIP
|
||||||
|
@ -52,10 +52,8 @@ var (
|
|||||||
|
|
||||||
var FederationSuite common.Suite
|
var FederationSuite common.Suite
|
||||||
|
|
||||||
/*
|
// cluster keeps track of the assorted objects and state related to each cluster
|
||||||
cluster keeps track of the assorted objects and state related to each cluster
|
// in the federation
|
||||||
in the federation
|
|
||||||
*/
|
|
||||||
type cluster struct {
|
type cluster struct {
|
||||||
name string
|
name string
|
||||||
*kubeclientset.Clientset
|
*kubeclientset.Clientset
|
||||||
@ -210,10 +208,8 @@ func getRegisteredClusters(userAgentName string, f *fedframework.Framework) (map
|
|||||||
return clusters, primaryClusterName
|
return clusters, primaryClusterName
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset.
|
||||||
waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset.
|
// If the condition is not met within timout, it fails the calling test.
|
||||||
If the condition is not met within timout, it fails the calling test.
|
|
||||||
*/
|
|
||||||
func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) {
|
func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) {
|
||||||
By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace))
|
By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace))
|
||||||
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||||
@ -235,9 +231,7 @@ func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string,
|
|||||||
framework.ExpectNoError(err, "Failed to verify service %q in namespace %q in cluster: Present=%v", service.Name, namespace, present)
|
framework.ExpectNoError(err, "Failed to verify service %q in namespace %q in cluster: Present=%v", service.Name, namespace, present)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// waitForServiceShardsOrFail waits for the service to appear in all clusters
|
||||||
waitForServiceShardsOrFail waits for the service to appear in all clusters
|
|
||||||
*/
|
|
||||||
func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters map[string]*cluster) {
|
func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters map[string]*cluster) {
|
||||||
framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters))
|
framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters))
|
||||||
for _, c := range clusters {
|
for _, c := range clusters {
|
||||||
@ -463,10 +457,8 @@ func discoverService(f *fedframework.Framework, name string, exists bool, podNam
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// createBackendPodsOrFail creates one pod in each cluster, and returns the created pods (in the same order as clusterClientSets).
|
||||||
createBackendPodsOrFail creates one pod in each cluster, and returns the created pods (in the same order as clusterClientSets).
|
// If creation of any pod fails, the test fails (possibly with a partially created set of pods). No retries are attempted.
|
||||||
If creation of any pod fails, the test fails (possibly with a partially created set of pods). No retries are attempted.
|
|
||||||
*/
|
|
||||||
func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, name string) {
|
func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, name string) {
|
||||||
pod := &v1.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -493,10 +485,8 @@ func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, nam
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// deleteOneBackendPodOrFail deletes exactly one backend pod which must not be nil
|
||||||
deleteOneBackendPodOrFail deletes exactly one backend pod which must not be nil
|
// The test fails if there are any errors.
|
||||||
The test fails if there are any errors.
|
|
||||||
*/
|
|
||||||
func deleteOneBackendPodOrFail(c *cluster) {
|
func deleteOneBackendPodOrFail(c *cluster) {
|
||||||
pod := c.backendPod
|
pod := c.backendPod
|
||||||
Expect(pod).ToNot(BeNil())
|
Expect(pod).ToNot(BeNil())
|
||||||
@ -509,10 +499,8 @@ func deleteOneBackendPodOrFail(c *cluster) {
|
|||||||
By(fmt.Sprintf("Backend pod %q in namespace %q in cluster %q deleted or does not exist", pod.Name, pod.Namespace, c.name))
|
By(fmt.Sprintf("Backend pod %q in namespace %q in cluster %q deleted or does not exist", pod.Name, pod.Namespace, c.name))
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// deleteBackendPodsOrFail deletes one pod from each cluster that has one.
|
||||||
deleteBackendPodsOrFail deletes one pod from each cluster that has one.
|
// If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted.
|
||||||
If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted.
|
|
||||||
*/
|
|
||||||
func deleteBackendPodsOrFail(clusters map[string]*cluster, namespace string) {
|
func deleteBackendPodsOrFail(clusters map[string]*cluster, namespace string) {
|
||||||
for name, c := range clusters {
|
for name, c := range clusters {
|
||||||
if c.backendPod != nil {
|
if c.backendPod != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user