Merge pull request #41364 from perotinus/fix-doc-comments

Automatic merge from submit-queue

[Federation] Modify the comments in Federation E2E tests to use standard Go conventions for documentation comments

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-02-21 07:06:55 -08:00 committed by GitHub
commit 8e6643acd4
3 changed files with 21 additions and 47 deletions

View File

@ -207,9 +207,7 @@ func deleteAllIngressesOrFail(clientset *fedclientset.Clientset, nsName string)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Error in deleting ingresses in namespace: %s", nsName))
}
/*
equivalent returns true if the two ingress spec are equivalent.
*/
// equivalent returns true if the two ingress spec are equivalent.
func equivalentIngress(federatedIngress, clusterIngress v1beta1.Ingress) bool {
return reflect.DeepEqual(clusterIngress.Spec, federatedIngress.Spec)
}
@ -244,10 +242,8 @@ func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, cluste
}
}
/*
waitForIngressOrFail waits until a ingress is either present or absent in the cluster specified by clientset.
If the condition is not met within timout, it fails the calling test.
*/
// waitForIngressOrFail waits until a ingress is either present or absent in the cluster specified by clientset.
// If the condition is not met within timout, it fails the calling test.
func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, present bool, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
var clusterIngress *v1beta1.Ingress
@ -271,9 +267,7 @@ func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string,
}
}
/*
waitForIngressShardsOrFail waits for the ingress to appear in all clusters
*/
// waitForIngressShardsOrFail waits for the ingress to appear in all clusters
func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {
@ -281,9 +275,7 @@ func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clus
}
}
/*
waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters
*/
// waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters
func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {
@ -291,10 +283,8 @@ func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingres
}
}
/*
waitForIngressUpdateOrFail waits until a ingress is updated in the specified cluster with same spec of federated ingress.
If the condition is not met within timeout, it fails the calling test.
*/
// waitForIngressUpdateOrFail waits until a ingress is updated in the specified cluster with same spec of federated ingress.
// If the condition is not met within timeout, it fails the calling test.
func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
@ -313,9 +303,7 @@ func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace st
framework.ExpectNoError(err, "Failed to verify ingress %q in namespace %q in cluster", ingress.Name, namespace)
}
/*
waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters
*/
// waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters
func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {

View File

@ -358,10 +358,8 @@ func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, cluste
}
}
/*
equivalent returns true if the two services are equivalent. Fields which are expected to differ between
federated services and the underlying cluster services (e.g. ClusterIP, LoadBalancerIP etc) are ignored.
*/
// equivalent returns true if the two services are equivalent. Fields which are expected to differ between
// federated services and the underlying cluster services (e.g. ClusterIP, LoadBalancerIP etc) are ignored.
func equivalent(federationService, clusterService v1.Service) bool {
// TODO: I think that we need a DeepCopy here to avoid clobbering our parameters.
clusterService.Spec.ClusterIP = federationService.Spec.ClusterIP

View File

@ -52,10 +52,8 @@ var (
var FederationSuite common.Suite
/*
cluster keeps track of the assorted objects and state related to each cluster
in the federation
*/
// cluster keeps track of the assorted objects and state related to each cluster
// in the federation
type cluster struct {
name string
*kubeclientset.Clientset
@ -210,10 +208,8 @@ func getRegisteredClusters(userAgentName string, f *fedframework.Framework) (map
return clusters, primaryClusterName
}
/*
waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset.
If the condition is not met within timout, it fails the calling test.
*/
// waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset.
// If the condition is not met within timout, it fails the calling test.
func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
@ -235,9 +231,7 @@ func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string,
framework.ExpectNoError(err, "Failed to verify service %q in namespace %q in cluster: Present=%v", service.Name, namespace, present)
}
/*
waitForServiceShardsOrFail waits for the service to appear in all clusters
*/
// waitForServiceShardsOrFail waits for the service to appear in all clusters
func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters map[string]*cluster) {
framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters))
for _, c := range clusters {
@ -463,10 +457,8 @@ func discoverService(f *fedframework.Framework, name string, exists bool, podNam
}
}
/*
createBackendPodsOrFail creates one pod in each cluster, and returns the created pods (in the same order as clusterClientSets).
If creation of any pod fails, the test fails (possibly with a partially created set of pods). No retries are attempted.
*/
// createBackendPodsOrFail creates one pod in each cluster, and returns the created pods (in the same order as clusterClientSets).
// If creation of any pod fails, the test fails (possibly with a partially created set of pods). No retries are attempted.
func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, name string) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -493,10 +485,8 @@ func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, nam
}
}
/*
deleteOneBackendPodOrFail deletes exactly one backend pod which must not be nil
The test fails if there are any errors.
*/
// deleteOneBackendPodOrFail deletes exactly one backend pod which must not be nil
// The test fails if there are any errors.
func deleteOneBackendPodOrFail(c *cluster) {
pod := c.backendPod
Expect(pod).ToNot(BeNil())
@ -509,10 +499,8 @@ func deleteOneBackendPodOrFail(c *cluster) {
By(fmt.Sprintf("Backend pod %q in namespace %q in cluster %q deleted or does not exist", pod.Name, pod.Namespace, c.name))
}
/*
deleteBackendPodsOrFail deletes one pod from each cluster that has one.
If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted.
*/
// deleteBackendPodsOrFail deletes one pod from each cluster that has one.
// If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted.
func deleteBackendPodsOrFail(clusters map[string]*cluster, namespace string) {
for name, c := range clusters {
if c.backendPod != nil {