Merge pull request #60599 from cblecker/gofmt-1.10

Automatic merge from submit-queue (batch tested with PRs 60599, 61819). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Update gofmt for go1.10, and fix go vet errors

**What this PR does / why we need it**:
Update gofmt for go1.10, and fix go vet errors

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2018-04-02 20:03:01 -07:00 committed by GitHub
commit b17afaa29e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 27 additions and 27 deletions

View File

@ -474,7 +474,7 @@ func TestTokenCreation(t *testing.T) {
AddedSecret: serviceAccountTokenSecretWithNamespaceData([]byte("custom")),
ExpectedActions: []core.Action{
// no update is performed... the custom namespace is preserved
// no update is performed... the custom namespace is preserved
},
},
@ -539,7 +539,7 @@ func TestTokenCreation(t *testing.T) {
UpdatedSecret: serviceAccountTokenSecretWithNamespaceData([]byte("custom")),
ExpectedActions: []core.Action{
// no update is performed... the custom namespace is preserved
// no update is performed... the custom namespace is preserved
},
},

View File

@ -142,7 +142,7 @@ func TestCreateAuthInfoOptions(t *testing.T) {
},
{
flags: []string{
// No name for authinfo provided.
// No name for authinfo provided.
},
wantCompleteErr: true,
},

View File

@ -1017,7 +1017,7 @@ func defaultPod() *api.Pod {
},
Spec: api.PodSpec{
SecurityContext: &api.PodSecurityContext{
// fill in for test cases
// fill in for test cases
},
Containers: []api.Container{
{
@ -1041,7 +1041,7 @@ func defaultV1Pod() *v1.Pod {
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
// fill in for test cases
// fill in for test cases
},
Containers: []v1.Container{
{

View File

@ -44,10 +44,10 @@ func NameSystems() namer.NameSystems {
publicNamer := &ExceptionNamer{
Exceptions: map[string]string{
// these exceptions are used to deconflict the generated code
// you can put your fully qualified package like
// to generate a name that doesn't conflict with your group.
// "k8s.io/apis/events/v1beta1.Event": "EventResource"
// these exceptions are used to deconflict the generated code
// you can put your fully qualified package like
// to generate a name that doesn't conflict with your group.
// "k8s.io/apis/events/v1beta1.Event": "EventResource"
},
KeyFunc: func(t *types.Type) string {
return t.Name.Package + "." + t.Name.Name
@ -56,10 +56,10 @@ func NameSystems() namer.NameSystems {
}
privateNamer := &ExceptionNamer{
Exceptions: map[string]string{
// these exceptions are used to deconflict the generated code
// you can put your fully qualified package like
// to generate a name that doesn't conflict with your group.
// "k8s.io/apis/events/v1beta1.Event": "eventResource"
// these exceptions are used to deconflict the generated code
// you can put your fully qualified package like
// to generate a name that doesn't conflict with your group.
// "k8s.io/apis/events/v1beta1.Event": "eventResource"
},
KeyFunc: func(t *types.Type) string {
return t.Name.Package + "." + t.Name.Name
@ -68,10 +68,10 @@ func NameSystems() namer.NameSystems {
}
publicPluralNamer := &ExceptionNamer{
Exceptions: map[string]string{
// these exceptions are used to deconflict the generated code
// you can put your fully qualified package like
// to generate a name that doesn't conflict with your group.
// "k8s.io/apis/events/v1beta1.Event": "EventResource"
// these exceptions are used to deconflict the generated code
// you can put your fully qualified package like
// to generate a name that doesn't conflict with your group.
// "k8s.io/apis/events/v1beta1.Event": "EventResource"
},
KeyFunc: func(t *types.Type) string {
return t.Name.Package + "." + t.Name.Name

View File

@ -199,7 +199,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
var rsgather *framework.ContainerResourceGatherer
if setupResourceGatherer {
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{false, false, 2 * time.Second, 2 * time.Second, true}, pods)
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, MasterOnly: false, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
go rsgather.StartGatheringData()
}

View File

@ -134,7 +134,7 @@ type ImageConfig struct {
type Accelerator struct {
Type string `json:"type,omitempty"`
Count int64 `json:"count, omitempty"`
Count int64 `json:"count,omitempty"`
}
type Resources struct {
@ -142,19 +142,19 @@ type Resources struct {
}
type GCEImage struct {
Image string `json:"image, omitempty"`
ImageDesc string `json:"image_description, omitempty"`
Image string `json:"image,omitempty"`
ImageDesc string `json:"image_description,omitempty"`
Project string `json:"project"`
Metadata string `json:"metadata"`
ImageRegex string `json:"image_regex, omitempty"`
ImageRegex string `json:"image_regex,omitempty"`
// Defaults to using only the latest image. Acceptable values are [0, # of images that match the regex).
// If the number of existing previous images is lesser than what is desired, the test will use that is available.
PreviousImages int `json:"previous_images, omitempty"`
PreviousImages int `json:"previous_images,omitempty"`
Machine string `json:"machine, omitempty"`
Resources Resources `json:"resources, omitempty"`
Machine string `json:"machine,omitempty"`
Resources Resources `json:"resources,omitempty"`
// This test is for benchmark (no limit verification, more result log, node name has format 'machine-image-uuid') if 'Tests' is non-empty.
Tests []string `json:"tests, omitempty"`
Tests []string `json:"tests,omitempty"`
}
type internalImageConfig struct {

View File

@ -666,7 +666,7 @@ func TestUpdateNodeObjects(t *testing.T) {
go func(lister int) {
w, err := c.Nodes().Watch(metav1.ListOptions{})
if err != nil {
fmt.Printf("[watch:%d] error: %v", k, err)
fmt.Printf("[watch:%d] error: %v", lister, err)
return
}
i := 0