From 1fa57d0a856140714c656b1ae6d81d2983d75b19 Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Sat, 23 Mar 2019 00:27:21 +0000 Subject: [PATCH] Fix golint failures of e2e/framework/[g-j]*.go --- test/e2e/framework/get-kubemark-resource-usage.go | 2 ++ test/e2e/framework/google_compute.go | 4 ++++ test/e2e/framework/gpu_util.go | 9 +++++---- test/e2e/framework/jobs_util.go | 8 +++++--- 4 files changed, 16 insertions(+), 7 deletions(-) diff --git a/test/e2e/framework/get-kubemark-resource-usage.go b/test/e2e/framework/get-kubemark-resource-usage.go index 2d13cf929d1..5c26d272d00 100644 --- a/test/e2e/framework/get-kubemark-resource-usage.go +++ b/test/e2e/framework/get-kubemark-resource-usage.go @@ -22,6 +22,7 @@ import ( "strings" ) +// KubemarkResourceUsage is a struct for tracking the resource usage of kubemark. type KubemarkResourceUsage struct { Name string MemoryWorkingSetInBytes uint64 @@ -36,6 +37,7 @@ func getMasterUsageByPrefix(prefix string) (string, error) { return sshResult.Stdout, nil } +// GetKubemarkMasterComponentsResourceUsage returns the resource usage of kubemark which contains multiple combinations of cpu and memory usage for each pod name. // TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework) func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsage { result := make(map[string]*KubemarkResourceUsage) diff --git a/test/e2e/framework/google_compute.go b/test/e2e/framework/google_compute.go index 3c1ed0d5cdb..25f7564b4cb 100644 --- a/test/e2e/framework/google_compute.go +++ b/test/e2e/framework/google_compute.go @@ -108,6 +108,7 @@ func lookupClusterImageSources() (string, string, error) { return masterImg, nodeImg, nil } +// LogClusterImageSources writes out cluster image sources. func LogClusterImageSources() { masterImg, nodeImg, err := lookupClusterImageSources() if err != nil { @@ -129,6 +130,7 @@ func LogClusterImageSources() { } } +// CreateManagedInstanceGroup creates a Compute Engine managed instance group. func CreateManagedInstanceGroup(size int64, zone, template string) error { // TODO(verult): make this hit the compute API directly instead of // shelling out to gcloud. @@ -145,6 +147,7 @@ func CreateManagedInstanceGroup(size int64, zone, template string) error { return nil } +// GetManagedInstanceGroupTemplateName returns the list of Google Compute Engine managed instance groups. func GetManagedInstanceGroupTemplateName(zone string) (string, error) { // TODO(verult): make this hit the compute API directly instead of // shelling out to gcloud. Use InstanceGroupManager to get Instance Template name. @@ -167,6 +170,7 @@ func GetManagedInstanceGroupTemplateName(zone string) (string, error) { return templateName, nil } +// DeleteManagedInstanceGroup deletes Google Compute Engine managed instance group. func DeleteManagedInstanceGroup(zone string) error { // TODO(verult): make this hit the compute API directly instead of // shelling out to gcloud. diff --git a/test/e2e/framework/gpu_util.go b/test/e2e/framework/gpu_util.go index 15d78443330..dc873f3ac2b 100644 --- a/test/e2e/framework/gpu_util.go +++ b/test/e2e/framework/gpu_util.go @@ -24,20 +24,20 @@ import ( ) const ( - // GPUResourceName is the extended name of the GPU resource since v1.8 + // NVIDIAGPUResourceName is the extended name of the GPU resource since v1.8 // this uses the device plugin mechanism NVIDIAGPUResourceName = "nvidia.com/gpu" + // GPUDevicePluginDSYAML is the official Google Device Plugin Daemonset NVIDIA GPU manifest for GKE // TODO: Parametrize it by making it a feature in TestFramework. // so we can override the daemonset in other setups (non COS). - // GPUDevicePluginDSYAML is the official Google Device Plugin Daemonset NVIDIA GPU manifest for GKE GPUDevicePluginDSYAML = "https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml" ) -// TODO make this generic and not linked to COS only -// NumberOfGPUs returs the number of GPUs advertised by a node +// NumberOfNVIDIAGPUs returns the number of GPUs advertised by a node // This is based on the Device Plugin system and expected to run on a COS based node // After the NVIDIA drivers were installed +// TODO make this generic and not linked to COS only func NumberOfNVIDIAGPUs(node *v1.Node) int64 { val, ok := node.Status.Capacity[NVIDIAGPUResourceName] @@ -66,6 +66,7 @@ func NVIDIADevicePlugin() *v1.Pod { return p } +// GetGPUDevicePluginImage returns the image of GPU device plugin. func GetGPUDevicePluginImage() string { ds, err := DsFromManifest(GPUDevicePluginDSYAML) if err != nil { diff --git a/test/e2e/framework/jobs_util.go b/test/e2e/framework/jobs_util.go index a5839dc36f8..13fbb7db8e3 100644 --- a/test/e2e/framework/jobs_util.go +++ b/test/e2e/framework/jobs_util.go @@ -31,10 +31,10 @@ import ( ) const ( - // How long to wait for a job to finish. + // JobTimeout is how long to wait for a job to finish. JobTimeout = 15 * time.Minute - // Job selector name + // JobSelectorKey is a job selector name JobSelectorKey = "job" ) @@ -251,7 +251,7 @@ func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parall return count == parallelism, nil } -// WaitForAllJobPodsRunning wait for all pods for the Job named jobName in namespace ns +// WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns // to be deleted. func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error { return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) { @@ -271,6 +271,7 @@ func newBool(val bool) *bool { type updateJobFunc func(*batch.Job) +// UpdateJobWithRetries updates jobs with retries. func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) { jobs := c.BatchV1().Jobs(namespace) var updateErr error @@ -305,6 +306,7 @@ func WaitForJobDeleting(c clientset.Interface, ns, jobName string) error { }) } +// JobFinishTime returns finish time of the specified job. func JobFinishTime(finishedJob *batch.Job) metav1.Time { var finishTime metav1.Time for _, c := range finishedJob.Status.Conditions {