Fix golint failures of e2e/framework/[g-j]*.go

This commit is contained in:
Kenichi Omichi 2019-03-23 00:27:21 +00:00
parent 733f2478d3
commit 1fa57d0a85
4 changed files with 16 additions and 7 deletions

View File

@ -22,6 +22,7 @@ import (
"strings"
)
// KubemarkResourceUsage is a struct for tracking the resource usage of kubemark.
type KubemarkResourceUsage struct {
Name string
MemoryWorkingSetInBytes uint64
@ -36,6 +37,7 @@ func getMasterUsageByPrefix(prefix string) (string, error) {
return sshResult.Stdout, nil
}
// GetKubemarkMasterComponentsResourceUsage returns the resource usage of kubemark which contains multiple combinations of cpu and memory usage for each pod name.
// TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework)
func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsage {
result := make(map[string]*KubemarkResourceUsage)

View File

@ -108,6 +108,7 @@ func lookupClusterImageSources() (string, string, error) {
return masterImg, nodeImg, nil
}
// LogClusterImageSources writes out cluster image sources.
func LogClusterImageSources() {
masterImg, nodeImg, err := lookupClusterImageSources()
if err != nil {
@ -129,6 +130,7 @@ func LogClusterImageSources() {
}
}
// CreateManagedInstanceGroup creates a Compute Engine managed instance group.
func CreateManagedInstanceGroup(size int64, zone, template string) error {
// TODO(verult): make this hit the compute API directly instead of
// shelling out to gcloud.
@ -145,6 +147,7 @@ func CreateManagedInstanceGroup(size int64, zone, template string) error {
return nil
}
// GetManagedInstanceGroupTemplateName returns the list of Google Compute Engine managed instance groups.
func GetManagedInstanceGroupTemplateName(zone string) (string, error) {
// TODO(verult): make this hit the compute API directly instead of
// shelling out to gcloud. Use InstanceGroupManager to get Instance Template name.
@ -167,6 +170,7 @@ func GetManagedInstanceGroupTemplateName(zone string) (string, error) {
return templateName, nil
}
// DeleteManagedInstanceGroup deletes Google Compute Engine managed instance group.
func DeleteManagedInstanceGroup(zone string) error {
// TODO(verult): make this hit the compute API directly instead of
// shelling out to gcloud.

View File

@ -24,20 +24,20 @@ import (
)
const (
// GPUResourceName is the extended name of the GPU resource since v1.8
// NVIDIAGPUResourceName is the extended name of the GPU resource since v1.8
// this uses the device plugin mechanism
NVIDIAGPUResourceName = "nvidia.com/gpu"
// GPUDevicePluginDSYAML is the official Google Device Plugin Daemonset NVIDIA GPU manifest for GKE
// TODO: Parametrize it by making it a feature in TestFramework.
// so we can override the daemonset in other setups (non COS).
// GPUDevicePluginDSYAML is the official Google Device Plugin Daemonset NVIDIA GPU manifest for GKE
GPUDevicePluginDSYAML = "https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml"
)
// TODO make this generic and not linked to COS only
// NumberOfGPUs returs the number of GPUs advertised by a node
// NumberOfNVIDIAGPUs returns the number of GPUs advertised by a node
// This is based on the Device Plugin system and expected to run on a COS based node
// After the NVIDIA drivers were installed
// TODO make this generic and not linked to COS only
func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
val, ok := node.Status.Capacity[NVIDIAGPUResourceName]
@ -66,6 +66,7 @@ func NVIDIADevicePlugin() *v1.Pod {
return p
}
// GetGPUDevicePluginImage returns the image of GPU device plugin.
func GetGPUDevicePluginImage() string {
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
if err != nil {

View File

@ -31,10 +31,10 @@ import (
)
const (
// How long to wait for a job to finish.
// JobTimeout is how long to wait for a job to finish.
JobTimeout = 15 * time.Minute
// Job selector name
// JobSelectorKey is a job selector name
JobSelectorKey = "job"
)
@ -251,7 +251,7 @@ func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parall
return count == parallelism, nil
}
// WaitForAllJobPodsRunning wait for all pods for the Job named jobName in namespace ns
// WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns
// to be deleted.
func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
@ -271,6 +271,7 @@ func newBool(val bool) *bool {
type updateJobFunc func(*batch.Job)
// UpdateJobWithRetries updates jobs with retries.
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
jobs := c.BatchV1().Jobs(namespace)
var updateErr error
@ -305,6 +306,7 @@ func WaitForJobDeleting(c clientset.Interface, ns, jobName string) error {
})
}
// JobFinishTime returns finish time of the specified job.
func JobFinishTime(finishedJob *batch.Job) metav1.Time {
var finishTime metav1.Time
for _, c := range finishedJob.Status.Conditions {