diff --git a/pkg/controller/replication_controller.go b/pkg/controller/replication_controller.go index bdf77eedacf..b74578261e9 100644 --- a/pkg/controller/replication_controller.go +++ b/pkg/controller/replication_controller.go @@ -187,9 +187,10 @@ func (rm *ReplicationManager) watchControllers(resourceVersion *string) { } // Helper function. Also used in pkg/registry/controller, for now. -func FilterActivePods(pods []api.Pod) []api.Pod { - var result []api.Pod - for _, value := range pods { +func FilterActivePods(pods []api.Pod) []*api.Pod { + var result []*api.Pod + for i := range pods { + value := &pods[i] if api.PodSucceeded != value.Status.Phase && api.PodFailed != value.Status.Phase { result = append(result, value) diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 60fd22adb58..c01e7a52e9e 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -478,12 +478,13 @@ func (d *NodeDescriber) Describe(namespace, name string) (string, error) { return "", err } - var pods []api.Pod + var pods []*api.Pod allPods, err := d.Pods(namespace).List(labels.Everything()) if err != nil { return "", err } - for _, pod := range allPods.Items { + for i := range allPods.Items { + pod := &allPods.Items[i] if pod.Spec.Host != name { continue } @@ -502,7 +503,7 @@ func (d *NodeDescriber) Describe(namespace, name string) (string, error) { return describeNode(node, pods, events) } -func describeNode(node *api.Node, pods []api.Pod, events *api.EventList) (string, error) { +func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", node.Name) fmt.Fprintf(out, "Labels:\t%s\n", formatLabels(node.Labels)) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 0e4e6ccea61..b643d068617 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -91,7 +91,7 @@ type SyncHandler interface { // Syncs current state to match the specified pods. SyncPodType specified what // type of sync is occuring per pod. StartTime specifies the time at which // syncing began (for use in monitoring). - SyncPods(pods []*api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType, mirrorPods map[string]api.Pod, + SyncPods(pods []*api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType, mirrorPods map[string]*api.Pod, startTime time.Time) error } @@ -1315,7 +1315,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont } if mirrorPod == nil { glog.V(3).Infof("Creating a mirror pod %q", podFullName) - if err := kl.podManager.CreateMirrorPod(*pod); err != nil { + if err := kl.podManager.CreateMirrorPod(pod); err != nil { glog.Errorf("Failed creating a mirror pod %q: %v", podFullName, err) } // Pod status update is edge-triggered. If there is any update of the @@ -1399,7 +1399,7 @@ func (kl *Kubelet) cleanupOrphanedVolumes(pods []*api.Pod, running []*docker.Con // SyncPods synchronizes the configured list of pods (desired state) with the host current state. func (kl *Kubelet) SyncPods(allPods []*api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType, - mirrorPods map[string]api.Pod, start time.Time) error { + mirrorPods map[string]*api.Pod, start time.Time) error { defer func() { metrics.SyncPodsLatency.Observe(metrics.SinceInMicroseconds(start)) }() @@ -1439,11 +1439,7 @@ func (kl *Kubelet) SyncPods(allPods []*api.Pod, podSyncTypes map[types.UID]metri desiredPods[uid] = empty{} // Run the sync in an async manifest worker. - var mirrorPod *api.Pod = nil - if m, ok := mirrorPods[podFullName]; ok { - mirrorPod = &m - } - kl.podWorkers.UpdatePod(pod, mirrorPod, func() { + kl.podWorkers.UpdatePod(pod, mirrorPods[podFullName], func() { metrics.SyncPodLatency.WithLabelValues(podSyncTypes[pod.UID].String()).Observe(metrics.SinceInMicroseconds(start)) }) diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 103a6e6223b..63c630ae692 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -503,7 +503,7 @@ func TestSyncPodsDoesNothing(t *testing.T) { kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -545,7 +545,7 @@ func TestSyncPodsWithTerminationLog(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -604,7 +604,7 @@ func TestSyncPodsCreatesNetAndContainer(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -667,7 +667,7 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) { } waitGroup.Add(1) kubelet.podManager.SetPods(pods) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -734,7 +734,7 @@ func TestSyncPodsWithPodInfraCreatesContainer(t *testing.T) { } waitGroup.Add(1) kubelet.podManager.SetPods(pods) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -808,7 +808,7 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) { } waitGroup.Add(1) kubelet.podManager.SetPods(pods) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -906,7 +906,7 @@ func TestSyncPodsDeletesWithNoPodInfraContainer(t *testing.T) { waitGroup.Add(2) kubelet.podManager.SetPods(pods) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -968,7 +968,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) { ID: "9876", }, } - if err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()); err != nil { + if err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now()); err != nil { t.Errorf("unexpected error: %v", err) } // Validate nothing happened. @@ -976,7 +976,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) { fakeDocker.ClearCalls() ready = true - if err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()); err != nil { + if err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now()); err != nil { t.Errorf("unexpected error: %v", err) } verifyCalls(t, fakeDocker, []string{"list", "stop", "stop", "inspect_container", "inspect_container"}) @@ -1015,7 +1015,7 @@ func TestSyncPodsDeletes(t *testing.T) { ID: "4567", }, } - err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1094,7 +1094,7 @@ func TestSyncPodsDeletesDuplicate(t *testing.T) { kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1165,7 +1165,7 @@ func TestSyncPodsBadHash(t *testing.T) { kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1239,7 +1239,7 @@ func TestSyncPodsUnhealthy(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1851,7 +1851,7 @@ func TestSyncPodEventHandlerFails(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1914,7 +1914,7 @@ func TestSyncPodsWithPullPolicy(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -3203,7 +3203,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) { t.Fatalf("expected to have status cached for %q: %v", "pod2", err) } // Sync with empty pods so that the entry in status map will be removed. - kl.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + kl.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if _, err := kl.GetPodStatus(kubecontainer.BuildPodFullName("pod2", "")); err == nil { t.Fatalf("expected to not have status cached for %q: %v", "pod2", err) } @@ -3706,7 +3706,7 @@ func TestDoNotCacheStatusForStaticPods(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -3912,7 +3912,7 @@ func TestSyncPodsWithRestartPolicy(t *testing.T) { kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("%d: unexpected error: %v", i, err) } @@ -4043,7 +4043,7 @@ func TestGetPodStatusWithLastTermination(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("%d: unexpected error: %v", i, err) } diff --git a/pkg/kubelet/mirror_client.go b/pkg/kubelet/mirror_client.go index 4cad840abb5..caa12de3007 100644 --- a/pkg/kubelet/mirror_client.go +++ b/pkg/kubelet/mirror_client.go @@ -28,7 +28,7 @@ import ( // Mirror client is used to create/delete a mirror pod. type mirrorClient interface { - CreateMirrorPod(api.Pod) error + CreateMirrorPod(*api.Pod) error DeleteMirrorPod(string) error } @@ -43,13 +43,13 @@ func newBasicMirrorClient(apiserverClient client.Interface) *basicMirrorClient { } // Creates a mirror pod. -func (mc *basicMirrorClient) CreateMirrorPod(pod api.Pod) error { +func (mc *basicMirrorClient) CreateMirrorPod(pod *api.Pod) error { if mc.apiserverClient == nil { return nil } pod.Annotations[ConfigMirrorAnnotationKey] = MirrorType - _, err := mc.apiserverClient.Pods(NamespaceDefault).Create(&pod) + _, err := mc.apiserverClient.Pods(NamespaceDefault).Create(pod) return err } diff --git a/pkg/kubelet/mirror_client_test.go b/pkg/kubelet/mirror_client_test.go index 64b5318c880..78fd607eb8f 100644 --- a/pkg/kubelet/mirror_client_test.go +++ b/pkg/kubelet/mirror_client_test.go @@ -34,10 +34,10 @@ type fakeMirrorClient struct { deleteCounts map[string]int } -func (fmc *fakeMirrorClient) CreateMirrorPod(pod api.Pod) error { +func (fmc *fakeMirrorClient) CreateMirrorPod(pod *api.Pod) error { fmc.mirrorPodLock.Lock() defer fmc.mirrorPodLock.Unlock() - podFullName := kubecontainer.GetPodFullName(&pod) + podFullName := kubecontainer.GetPodFullName(pod) fmc.mirrorPods.Insert(podFullName) fmc.createCounts[podFullName]++ return nil diff --git a/pkg/kubelet/pod_manager.go b/pkg/kubelet/pod_manager.go index c8a894a82e3..1b0b1e6dc1c 100644 --- a/pkg/kubelet/pod_manager.go +++ b/pkg/kubelet/pod_manager.go @@ -46,7 +46,7 @@ type podManager interface { GetPods() []*api.Pod GetPodByFullName(podFullName string) (*api.Pod, bool) GetPodByName(namespace, name string) (*api.Pod, bool) - GetPodsAndMirrorMap() ([]*api.Pod, map[string]api.Pod) + GetPodsAndMirrorMap() ([]*api.Pod, map[string]*api.Pod) SetPods(pods []*api.Pod) UpdatePods(u PodUpdate, podSyncTypes map[types.UID]metrics.SyncPodType) DeleteOrphanedMirrorPods() @@ -190,12 +190,12 @@ func (pm *basicPodManager) getAllPods() []*api.Pod { // GetPodsAndMirrorMap returns the a copy of the regular pods and the mirror // pods indexed by full name. -func (pm *basicPodManager) GetPodsAndMirrorMap() ([]*api.Pod, map[string]api.Pod) { +func (pm *basicPodManager) GetPodsAndMirrorMap() ([]*api.Pod, map[string]*api.Pod) { pm.lock.RLock() defer pm.lock.RUnlock() - mirrorPods := make(map[string]api.Pod) + mirrorPods := make(map[string]*api.Pod) for key, pod := range pm.mirrorPodByFullName { - mirrorPods[key] = *pod + mirrorPods[key] = pod } return podsMapToPods(pm.podByUID), mirrorPods } diff --git a/pkg/registry/registrytest/scheduler.go b/pkg/registry/registrytest/scheduler.go index 2e44ccbaa32..6a463775320 100644 --- a/pkg/registry/registrytest/scheduler.go +++ b/pkg/registry/registrytest/scheduler.go @@ -23,11 +23,11 @@ import ( type Scheduler struct { Err error - Pod api.Pod + Pod *api.Pod Machine string } -func (s *Scheduler) Schedule(pod api.Pod, lister scheduler.MinionLister) (string, error) { +func (s *Scheduler) Schedule(pod *api.Pod, lister scheduler.MinionLister) (string, error) { s.Pod = pod return s.Machine, s.Err } diff --git a/pkg/resourcequota/resource_quota_controller.go b/pkg/resourcequota/resource_quota_controller.go index 3d651c12fc8..fb4189848e8 100644 --- a/pkg/resourcequota/resource_quota_controller.go +++ b/pkg/resourcequota/resource_quota_controller.go @@ -81,9 +81,10 @@ func (rm *ResourceQuotaManager) synchronize() { // pods that have a restart policy of always are always returned // pods that are in a failed state, but have a restart policy of on failure are always returned // pods that are not in a success state or a failure state are included in quota -func FilterQuotaPods(pods []api.Pod) []api.Pod { - var result []api.Pod - for _, value := range pods { +func FilterQuotaPods(pods []api.Pod) []*api.Pod { + var result []*api.Pod + for i := range pods { + value := &pods[i] // a pod that has a restart policy always no matter its state counts against usage if value.Spec.RestartPolicy == api.RestartPolicyAlways { result = append(result, value) @@ -170,14 +171,14 @@ func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI) case api.ResourceMemory: val := int64(0) - for i := range filteredPods { - val = val + PodMemory(&filteredPods[i]).Value() + for _, pod := range filteredPods { + val = val + PodMemory(pod).Value() } value = resource.NewQuantity(int64(val), resource.DecimalSI) case api.ResourceCPU: val := int64(0) - for i := range filteredPods { - val = val + PodCPU(&filteredPods[i]).MilliValue() + for _, pod := range filteredPods { + val = val + PodCPU(pod).MilliValue() } value = resource.NewMilliQuantity(int64(val), resource.DecimalSI) case api.ResourceServices: diff --git a/pkg/scheduler/listers.go b/pkg/scheduler/listers.go index 73e5b58c37e..470e3aeebb5 100644 --- a/pkg/scheduler/listers.go +++ b/pkg/scheduler/listers.go @@ -45,7 +45,7 @@ type PodLister interface { // FakePodLister implements PodLister on an []api.Pods for test purposes. type FakePodLister []*api.Pod -// List returns []api.Pod matching a query. +// List returns []*api.Pod matching a query. func (f FakePodLister) List(s labels.Selector) (selected []*api.Pod, err error) { for _, pod := range f { if s.Matches(labels.Set(pod.Labels)) {