Merge pull request #75621 from oomichi/golint-e2e-framework-g-j

Fix golint failures of e2e/framework/[g-j]*.go
This commit is contained in:
Kubernetes Prow Robot 2019-03-29 21:11:08 -07:00 committed by GitHub
commit a9db137737
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 16 additions and 7 deletions

View File

@ -22,6 +22,7 @@ import (
"strings" "strings"
) )
// KubemarkResourceUsage is a struct for tracking the resource usage of kubemark.
type KubemarkResourceUsage struct { type KubemarkResourceUsage struct {
Name string Name string
MemoryWorkingSetInBytes uint64 MemoryWorkingSetInBytes uint64
@ -36,6 +37,7 @@ func getMasterUsageByPrefix(prefix string) (string, error) {
return sshResult.Stdout, nil return sshResult.Stdout, nil
} }
// GetKubemarkMasterComponentsResourceUsage returns the resource usage of kubemark which contains multiple combinations of cpu and memory usage for each pod name.
// TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework) // TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework)
func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsage { func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsage {
result := make(map[string]*KubemarkResourceUsage) result := make(map[string]*KubemarkResourceUsage)

View File

@ -108,6 +108,7 @@ func lookupClusterImageSources() (string, string, error) {
return masterImg, nodeImg, nil return masterImg, nodeImg, nil
} }
// LogClusterImageSources writes out cluster image sources.
func LogClusterImageSources() { func LogClusterImageSources() {
masterImg, nodeImg, err := lookupClusterImageSources() masterImg, nodeImg, err := lookupClusterImageSources()
if err != nil { if err != nil {
@ -129,6 +130,7 @@ func LogClusterImageSources() {
} }
} }
// CreateManagedInstanceGroup creates a Compute Engine managed instance group.
func CreateManagedInstanceGroup(size int64, zone, template string) error { func CreateManagedInstanceGroup(size int64, zone, template string) error {
// TODO(verult): make this hit the compute API directly instead of // TODO(verult): make this hit the compute API directly instead of
// shelling out to gcloud. // shelling out to gcloud.
@ -145,6 +147,7 @@ func CreateManagedInstanceGroup(size int64, zone, template string) error {
return nil return nil
} }
// GetManagedInstanceGroupTemplateName returns the list of Google Compute Engine managed instance groups.
func GetManagedInstanceGroupTemplateName(zone string) (string, error) { func GetManagedInstanceGroupTemplateName(zone string) (string, error) {
// TODO(verult): make this hit the compute API directly instead of // TODO(verult): make this hit the compute API directly instead of
// shelling out to gcloud. Use InstanceGroupManager to get Instance Template name. // shelling out to gcloud. Use InstanceGroupManager to get Instance Template name.
@ -167,6 +170,7 @@ func GetManagedInstanceGroupTemplateName(zone string) (string, error) {
return templateName, nil return templateName, nil
} }
// DeleteManagedInstanceGroup deletes Google Compute Engine managed instance group.
func DeleteManagedInstanceGroup(zone string) error { func DeleteManagedInstanceGroup(zone string) error {
// TODO(verult): make this hit the compute API directly instead of // TODO(verult): make this hit the compute API directly instead of
// shelling out to gcloud. // shelling out to gcloud.

View File

@ -24,20 +24,20 @@ import (
) )
const ( const (
// GPUResourceName is the extended name of the GPU resource since v1.8 // NVIDIAGPUResourceName is the extended name of the GPU resource since v1.8
// this uses the device plugin mechanism // this uses the device plugin mechanism
NVIDIAGPUResourceName = "nvidia.com/gpu" NVIDIAGPUResourceName = "nvidia.com/gpu"
// GPUDevicePluginDSYAML is the official Google Device Plugin Daemonset NVIDIA GPU manifest for GKE
// TODO: Parametrize it by making it a feature in TestFramework. // TODO: Parametrize it by making it a feature in TestFramework.
// so we can override the daemonset in other setups (non COS). // so we can override the daemonset in other setups (non COS).
// GPUDevicePluginDSYAML is the official Google Device Plugin Daemonset NVIDIA GPU manifest for GKE
GPUDevicePluginDSYAML = "https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml" GPUDevicePluginDSYAML = "https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml"
) )
// TODO make this generic and not linked to COS only // NumberOfNVIDIAGPUs returns the number of GPUs advertised by a node
// NumberOfGPUs returs the number of GPUs advertised by a node
// This is based on the Device Plugin system and expected to run on a COS based node // This is based on the Device Plugin system and expected to run on a COS based node
// After the NVIDIA drivers were installed // After the NVIDIA drivers were installed
// TODO make this generic and not linked to COS only
func NumberOfNVIDIAGPUs(node *v1.Node) int64 { func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
val, ok := node.Status.Capacity[NVIDIAGPUResourceName] val, ok := node.Status.Capacity[NVIDIAGPUResourceName]
@ -66,6 +66,7 @@ func NVIDIADevicePlugin() *v1.Pod {
return p return p
} }
// GetGPUDevicePluginImage returns the image of GPU device plugin.
func GetGPUDevicePluginImage() string { func GetGPUDevicePluginImage() string {
ds, err := DsFromManifest(GPUDevicePluginDSYAML) ds, err := DsFromManifest(GPUDevicePluginDSYAML)
if err != nil { if err != nil {

View File

@ -31,10 +31,10 @@ import (
) )
const ( const (
// How long to wait for a job to finish. // JobTimeout is how long to wait for a job to finish.
JobTimeout = 15 * time.Minute JobTimeout = 15 * time.Minute
// Job selector name // JobSelectorKey is a job selector name
JobSelectorKey = "job" JobSelectorKey = "job"
) )
@ -251,7 +251,7 @@ func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parall
return count == parallelism, nil return count == parallelism, nil
} }
// WaitForAllJobPodsRunning wait for all pods for the Job named jobName in namespace ns // WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns
// to be deleted. // to be deleted.
func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error { func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) { return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
@ -271,6 +271,7 @@ func newBool(val bool) *bool {
type updateJobFunc func(*batch.Job) type updateJobFunc func(*batch.Job)
// UpdateJobWithRetries updates jobs with retries.
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) { func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
jobs := c.BatchV1().Jobs(namespace) jobs := c.BatchV1().Jobs(namespace)
var updateErr error var updateErr error
@ -305,6 +306,7 @@ func WaitForJobDeleting(c clientset.Interface, ns, jobName string) error {
}) })
} }
// JobFinishTime returns finish time of the specified job.
func JobFinishTime(finishedJob *batch.Job) metav1.Time { func JobFinishTime(finishedJob *batch.Job) metav1.Time {
var finishTime metav1.Time var finishTime metav1.Time
for _, c := range finishedJob.Status.Conditions { for _, c := range finishedJob.Status.Conditions {