diff --git a/pkg/kubelet/container/cache_test.go b/pkg/kubelet/container/cache_test.go index 5362bc57158..04d1c2e32eb 100644 --- a/pkg/kubelet/container/cache_test.go +++ b/pkg/kubelet/container/cache_test.go @@ -49,7 +49,7 @@ func getTestPodIDAndStatus(numContainers int) (types.UID, *PodStatus) { status = &PodStatus{ID: id} } for i := 0; i < numContainers; i++ { - status.ContainerStatuses = append(status.ContainerStatuses, &Status{Name: string(i)}) + status.ContainerStatuses = append(status.ContainerStatuses, &Status{Name: strconv.Itoa(i)}) } return id, status } diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 2416f495ac1..73ac185b38c 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -21,6 +21,7 @@ import ( "io/ioutil" "os" "sort" + "strconv" "testing" "time" @@ -355,7 +356,7 @@ func newTestPods(count int) []*v1.Pod { HostNetwork: true, }, ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(10000 + i), + UID: types.UID(strconv.Itoa(10000 + i)), Name: fmt.Sprintf("pod%d", i), }, } diff --git a/pkg/kubelet/network/dns/dns_test.go b/pkg/kubelet/network/dns/dns_test.go index de4bf620041..9a001968627 100644 --- a/pkg/kubelet/network/dns/dns_test.go +++ b/pkg/kubelet/network/dns/dns_test.go @@ -21,10 +21,11 @@ import ( "io/ioutil" "net" "os" + "strconv" "strings" "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" @@ -632,7 +633,7 @@ func newTestPods(count int) []*v1.Pod { HostNetwork: true, }, ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(10000 + i), + UID: types.UID(strconv.Itoa(10000 + i)), Name: fmt.Sprintf("pod%d", i), }, } diff --git a/pkg/kubelet/pod_workers_test.go b/pkg/kubelet/pod_workers_test.go index 3584b54fa49..f13761203ec 100644 --- a/pkg/kubelet/pod_workers_test.go +++ b/pkg/kubelet/pod_workers_test.go @@ -18,11 +18,12 @@ package kubelet import ( "reflect" + "strconv" "sync" "testing" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" @@ -114,7 +115,7 @@ func drainWorkers(podWorkers *podWorkers, numPods int) { stillWorking := false podWorkers.podLock.Lock() for i := 0; i < numPods; i++ { - if podWorkers.isWorking[types.UID(string(i))] { + if podWorkers.isWorking[types.UID(strconv.Itoa(i))] { stillWorking = true } } @@ -133,7 +134,7 @@ func TestUpdatePod(t *testing.T) { for i := 0; i < numPods; i++ { for j := i; j < numPods; j++ { podWorkers.UpdatePod(&UpdatePodOptions{ - Pod: newPod(string(j), string(i)), + Pod: newPod(strconv.Itoa(j), strconv.Itoa(i)), UpdateType: kubetypes.SyncPodCreate, }) } @@ -145,7 +146,7 @@ func TestUpdatePod(t *testing.T) { return } for i := 0; i < numPods; i++ { - uid := types.UID(i) + uid := types.UID(strconv.Itoa(i)) if len(processed[uid]) < 1 || len(processed[uid]) > i+1 { t.Errorf("Pod %v processed %v times", i, len(processed[uid])) continue @@ -154,11 +155,11 @@ func TestUpdatePod(t *testing.T) { // PodWorker guarantees the first and the last event will be processed first := 0 last := len(processed[uid]) - 1 - if processed[uid][first].name != string(0) { + if processed[uid][first].name != "0" { t.Errorf("Pod %v: incorrect order %v, %v", i, first, processed[uid][first]) } - if processed[uid][last].name != string(i) { + if processed[uid][last].name != strconv.Itoa(i) { t.Errorf("Pod %v: incorrect order %v, %v", i, last, processed[uid][last]) } } @@ -168,7 +169,7 @@ func TestUpdatePodDoesNotForgetSyncPodKill(t *testing.T) { podWorkers, processed := createPodWorkers() numPods := 20 for i := 0; i < numPods; i++ { - pod := newPod(string(i), string(i)) + pod := newPod(strconv.Itoa(i), strconv.Itoa(i)) podWorkers.UpdatePod(&UpdatePodOptions{ Pod: pod, UpdateType: kubetypes.SyncPodCreate, @@ -188,7 +189,7 @@ func TestUpdatePodDoesNotForgetSyncPodKill(t *testing.T) { return } for i := 0; i < numPods; i++ { - uid := types.UID(i) + uid := types.UID(strconv.Itoa(i)) // each pod should be processed two times (create, kill, but not update) syncPodRecords := processed[uid] if len(syncPodRecords) < 2 { @@ -210,7 +211,7 @@ func TestForgetNonExistingPodWorkers(t *testing.T) { numPods := 20 for i := 0; i < numPods; i++ { podWorkers.UpdatePod(&UpdatePodOptions{ - Pod: newPod(string(i), "name"), + Pod: newPod(strconv.Itoa(i), "name"), UpdateType: kubetypes.SyncPodUpdate, }) } @@ -221,16 +222,16 @@ func TestForgetNonExistingPodWorkers(t *testing.T) { } desiredPods := map[types.UID]sets.Empty{} - desiredPods[types.UID(2)] = sets.Empty{} - desiredPods[types.UID(14)] = sets.Empty{} + desiredPods[types.UID("2")] = sets.Empty{} + desiredPods[types.UID("14")] = sets.Empty{} podWorkers.ForgetNonExistingPodWorkers(desiredPods) if len(podWorkers.podUpdates) != 2 { t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates)) } - if _, exists := podWorkers.podUpdates[types.UID(2)]; !exists { + if _, exists := podWorkers.podUpdates[types.UID("2")]; !exists { t.Errorf("No updates channel for pod 2") } - if _, exists := podWorkers.podUpdates[types.UID(14)]; !exists { + if _, exists := podWorkers.podUpdates[types.UID("14")]; !exists { t.Errorf("No updates channel for pod 14") } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 8af21c774a7..6ec68fbb549 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -22,6 +22,7 @@ import ( "io/ioutil" "math/rand" "os" + "strconv" "time" v1 "k8s.io/api/core/v1" @@ -613,7 +614,7 @@ func getAttemptsLabel(p *framework.QueuedPodInfo) string { if p.Attempts >= 15 { return "15+" } - return string(p.Attempts) + return strconv.Itoa(p.Attempts) } func (sched *Scheduler) profileForPod(pod *v1.Pod) (*profile.Profile, error) { diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure_test.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure_test.go index eb186d88451..5f9aab76943 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure_test.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure_test.go @@ -185,7 +185,7 @@ func TestAzureTokenSource(t *testing.T) { expiresOn = "foo" ) cfg := map[string]string{ - cfgConfigMode: string(configMode), + cfgConfigMode: strconv.Itoa(int(configMode)), cfgApiserverID: serverID, cfgClientID: clientID, cfgTenantID: tenantID, @@ -365,7 +365,7 @@ func TestAzureTokenSourceScenarios(t *testing.T) { persister := newFakePersister() cfg := map[string]string{ - cfgConfigMode: string(configMode), + cfgConfigMode: strconv.Itoa(int(configMode)), } if tc.configToken != nil { cfg = token2Cfg(tc.configToken) diff --git a/staging/src/k8s.io/client-go/tools/record/event_test.go b/staging/src/k8s.io/client-go/tools/record/event_test.go index eadb1331c9e..67b033d5f1c 100644 --- a/staging/src/k8s.io/client-go/tools/record/event_test.go +++ b/staging/src/k8s.io/client-go/tools/record/event_test.go @@ -24,7 +24,7 @@ import ( "testing" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sruntime "k8s.io/apimachinery/pkg/runtime" @@ -509,7 +509,7 @@ func TestLotsOfEvents(t *testing.T) { APIVersion: "version", } // we need to vary the reason to prevent aggregation - go recorder.Eventf(ref, v1.EventTypeNormal, "Reason-"+string(i), strconv.Itoa(i)) + go recorder.Eventf(ref, v1.EventTypeNormal, "Reason-"+strconv.Itoa(i), strconv.Itoa(i)) } // Make sure no events were dropped by either of the listeners. for i := 0; i < maxQueuedEvents; i++ { diff --git a/staging/src/k8s.io/client-go/tools/record/events_cache_test.go b/staging/src/k8s.io/client-go/tools/record/events_cache_test.go index 8cb4a39e8ec..7eb4d34a96b 100644 --- a/staging/src/k8s.io/client-go/tools/record/events_cache_test.go +++ b/staging/src/k8s.io/client-go/tools/record/events_cache_test.go @@ -18,6 +18,7 @@ package record import ( "reflect" + "strconv" "strings" "testing" "time" @@ -69,10 +70,10 @@ func makeUniqueEvents(num int) []v1.Event { events := []v1.Event{} kind := "Pod" for i := 0; i < num; i++ { - reason := strings.Join([]string{"reason", string(i)}, "-") - message := strings.Join([]string{"message", string(i)}, "-") - name := strings.Join([]string{"pod", string(i)}, "-") - namespace := strings.Join([]string{"ns", string(i)}, "-") + reason := strings.Join([]string{"reason", strconv.Itoa(i)}, "-") + message := strings.Join([]string{"message", strconv.Itoa(i)}, "-") + name := strings.Join([]string{"pod", strconv.Itoa(i)}, "-") + namespace := strings.Join([]string{"ns", strconv.Itoa(i)}, "-") involvedObject := makeObjectReference(kind, name, namespace) events = append(events, makeEvent(reason, message, involvedObject)) } @@ -82,7 +83,7 @@ func makeUniqueEvents(num int) []v1.Event { func makeSimilarEvents(num int, template v1.Event, messagePrefix string) []v1.Event { events := makeEvents(num, template) for i := range events { - events[i].Message = strings.Join([]string{messagePrefix, string(i), events[i].Message}, "-") + events[i].Message = strings.Join([]string{messagePrefix, strconv.Itoa(i), events[i].Message}, "-") } return events } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding_test.go index 78b26c029f1..43834128906 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding_test.go @@ -17,11 +17,12 @@ limitations under the License. package create import ( + "strconv" "testing" rbac "k8s.io/api/rbac/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestCreateClusterRoleBinding(t *testing.T) { @@ -72,7 +73,7 @@ func TestCreateClusterRoleBinding(t *testing.T) { } for i, tc := range tests { - t.Run(string(i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { clusterRoleBinding, err := tc.options.createClusterRoleBinding() if err != nil { t.Errorf("unexpected error:\n%#v\n", err) diff --git a/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go b/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go index ce0c0b63ea5..46c5344aa3e 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go @@ -196,7 +196,7 @@ func createPods(ifCreateNewPods bool) (map[string]corev1.Pod, []corev1.Pod) { for i := 0; i < 8; i++ { var uid types.UID if ifCreateNewPods { - uid = types.UID(i) + uid = types.UID(strconv.Itoa(i)) } else { uid = types.UID(strconv.Itoa(i) + strconv.Itoa(i)) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_metrics_test.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_metrics_test.go index 106b8273251..808a642704d 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_metrics_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_metrics_test.go @@ -19,6 +19,7 @@ limitations under the License. package gce import ( + "strconv" "testing" "github.com/google/go-cmp/cmp" @@ -149,7 +150,7 @@ func TestComputeL4ILBMetrics(t *testing.T) { l4ILBServiceMap: make(map[string]L4ILBServiceState), } for i, serviceState := range tc.serviceStates { - newMetrics.SetL4ILBService(string(i), serviceState) + newMetrics.SetL4ILBService(strconv.Itoa(i), serviceState) } got := newMetrics.computeL4ILBMetrics() if diff := cmp.Diff(tc.expectL4ILBCount, got); diff != "" {