Merge pull request #17572 from yujuhong/kubeutil

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot
2015-12-04 03:15:54 -08:00
7 changed files with 55 additions and 43 deletions

View File

@@ -27,7 +27,7 @@ import (
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/util/config" "k8s.io/kubernetes/pkg/util/config"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
utilvalidation "k8s.io/kubernetes/pkg/util/validation" utilvalidation "k8s.io/kubernetes/pkg/util/validation"
@@ -380,7 +380,7 @@ func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool {
// recordFirstSeenTime records the first seen time of this pod. // recordFirstSeenTime records the first seen time of this pod.
func recordFirstSeenTime(pod *api.Pod) { func recordFirstSeenTime(pod *api.Pod) {
glog.V(4).Infof("Receiving a new pod %q", kubeletutil.FormatPodName(pod)) glog.V(4).Infof("Receiving a new pod %q", format.Pod(pod))
pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString() pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString()
} }

View File

@@ -60,7 +60,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/rkt" "k8s.io/kubernetes/pkg/kubelet/rkt"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/util/queue"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/securitycontext"
@@ -2295,13 +2295,13 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan kubetypes.PodUpdate, handler
switch u.Op { switch u.Op {
case kubetypes.ADD: case kubetypes.ADD:
glog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, kubeletutil.FormatPodNames(u.Pods)) glog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, format.Pods(u.Pods))
handler.HandlePodAdditions(u.Pods) handler.HandlePodAdditions(u.Pods)
case kubetypes.UPDATE: case kubetypes.UPDATE:
glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, kubeletutil.FormatPodNames(u.Pods)) glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, format.Pods(u.Pods))
handler.HandlePodUpdates(u.Pods) handler.HandlePodUpdates(u.Pods)
case kubetypes.REMOVE: case kubetypes.REMOVE:
glog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, kubeletutil.FormatPodNames(u.Pods)) glog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, format.Pods(u.Pods))
handler.HandlePodDeletions(u.Pods) handler.HandlePodDeletions(u.Pods)
case kubetypes.SET: case kubetypes.SET:
// TODO: Do we want to support this? // TODO: Do we want to support this?
@@ -2318,7 +2318,7 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan kubetypes.PodUpdate, handler
glog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e) glog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e)
break break
} }
glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", kubeletutil.FormatPodName(pod), e) glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e)
// Force the container runtime cache to update. // Force the container runtime cache to update.
if err := kl.runtimeCache.ForceUpdateIfOlder(time.Now()); err != nil { if err := kl.runtimeCache.ForceUpdateIfOlder(time.Now()); err != nil {
glog.Errorf("SyncLoop: unable to update runtime cache") glog.Errorf("SyncLoop: unable to update runtime cache")
@@ -2331,12 +2331,12 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan kubetypes.PodUpdate, handler
if len(podsToSync) == 0 { if len(podsToSync) == 0 {
break break
} }
glog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), kubeletutil.FormatPodNames(podsToSync)) glog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), format.Pods(podsToSync))
kl.HandlePodSyncs(podsToSync) kl.HandlePodSyncs(podsToSync)
case update := <-kl.livenessManager.Updates(): case update := <-kl.livenessManager.Updates():
// We only care about failures (signalling container death) here. // We only care about failures (signalling container death) here.
if update.Result == proberesults.Failure { if update.Result == proberesults.Failure {
glog.V(1).Infof("SyncLoop (container unhealthy): %q", kubeletutil.FormatPodName(update.Pod)) glog.V(1).Infof("SyncLoop (container unhealthy): %q", format.Pod(update.Pod))
handler.HandlePodSyncs([]*api.Pod{update.Pod}) handler.HandlePodSyncs([]*api.Pod{update.Pod})
} }
case <-housekeepingCh: case <-housekeepingCh:
@@ -2431,7 +2431,7 @@ func (kl *Kubelet) HandlePodDeletions(pods []*api.Pod) {
// Deletion is allowed to fail because the periodic cleanup routine // Deletion is allowed to fail because the periodic cleanup routine
// will trigger deletion again. // will trigger deletion again.
if err := kl.deletePod(pod.UID); err != nil { if err := kl.deletePod(pod.UID); err != nil {
glog.V(2).Infof("Failed to delete pod %q, err: %v", kubeletutil.FormatPodName(pod), err) glog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err)
} }
kl.probeManager.RemovePod(pod) kl.probeManager.RemovePod(pod)
} }

View File

@@ -25,7 +25,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
kubeutil "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@@ -134,7 +134,7 @@ func (m *manager) AddPod(pod *api.Pod) {
key.probeType = readiness key.probeType = readiness
if _, ok := m.workers[key]; ok { if _, ok := m.workers[key]; ok {
glog.Errorf("Readiness probe already exists! %v - %v", glog.Errorf("Readiness probe already exists! %v - %v",
kubeutil.FormatPodName(pod), c.Name) format.Pod(pod), c.Name)
return return
} }
w := newWorker(m, readiness, pod, c) w := newWorker(m, readiness, pod, c)
@@ -146,7 +146,7 @@ func (m *manager) AddPod(pod *api.Pod) {
key.probeType = liveness key.probeType = liveness
if _, ok := m.workers[key]; ok { if _, ok := m.workers[key]; ok {
glog.Errorf("Liveness probe already exists! %v - %v", glog.Errorf("Liveness probe already exists! %v - %v",
kubeutil.FormatPodName(pod), c.Name) format.Pod(pod), c.Name)
return return
} }
w := newWorker(m, liveness, pod, c) w := newWorker(m, liveness, pod, c)

View File

@@ -23,7 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/prober/results"
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
) )
@@ -125,14 +125,14 @@ func (w *worker) doProbe() (keepGoing bool) {
status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID) status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID)
if !ok { if !ok {
// Either the pod has not been created yet, or it was already deleted. // Either the pod has not been created yet, or it was already deleted.
glog.V(3).Infof("No status for pod: %v", kubeletutil.FormatPodName(w.pod)) glog.V(3).Infof("No status for pod: %v", format.Pod(w.pod))
return true return true
} }
// Worker should terminate if pod is terminated. // Worker should terminate if pod is terminated.
if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded { if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded {
glog.V(3).Infof("Pod %v %v, exiting probe worker", glog.V(3).Infof("Pod %v %v, exiting probe worker",
kubeletutil.FormatPodName(w.pod), status.Phase) format.Pod(w.pod), status.Phase)
return false return false
} }
@@ -140,7 +140,7 @@ func (w *worker) doProbe() (keepGoing bool) {
if !ok { if !ok {
// Either the container has not been created yet, or it was deleted. // Either the container has not been created yet, or it was deleted.
glog.V(3).Infof("Non-existant container probed: %v - %v", glog.V(3).Infof("Non-existant container probed: %v - %v",
kubeletutil.FormatPodName(w.pod), w.container.Name) format.Pod(w.pod), w.container.Name)
return true // Wait for more information. return true // Wait for more information.
} }
@@ -154,7 +154,7 @@ func (w *worker) doProbe() (keepGoing bool) {
if c.State.Running == nil { if c.State.Running == nil {
glog.V(3).Infof("Non-running container probed: %v - %v", glog.V(3).Infof("Non-running container probed: %v - %v",
kubeletutil.FormatPodName(w.pod), w.container.Name) format.Pod(w.pod), w.container.Name)
if !w.containerID.IsEmpty() { if !w.containerID.IsEmpty() {
w.resultsManager.Set(w.containerID, results.Failure, w.pod) w.resultsManager.Set(w.containerID, results.Failure, w.pod)
} }

View File

@@ -44,7 +44,7 @@ import (
"k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/securitycontext"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
@@ -473,7 +473,7 @@ func (r *Runtime) makePodManifest(pod *api.Pod, pullSecrets []api.Secret) (*appc
volumeMap, ok := r.volumeGetter.GetVolumes(pod.UID) volumeMap, ok := r.volumeGetter.GetVolumes(pod.UID)
if !ok { if !ok {
return nil, fmt.Errorf("cannot get the volumes for pod %q", kubeletutil.FormatPodName(pod)) return nil, fmt.Errorf("cannot get the volumes for pod %q", format.Pod(pod))
} }
// Set global volumes. // Set global volumes.
@@ -629,7 +629,7 @@ func (r *Runtime) preparePod(pod *api.Pod, pullSecrets []api.Secret) (string, *k
} }
units = append(units, newUnitOption(unitKubernetesSection, unitRestartCount, strconv.Itoa(restartCount))) units = append(units, newUnitOption(unitKubernetesSection, unitRestartCount, strconv.Itoa(restartCount)))
glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, kubeletutil.FormatPodName(pod)) glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, format.Pod(pod))
serviceFile, err := os.Create(serviceFilePath(serviceName)) serviceFile, err := os.Create(serviceFilePath(serviceName))
if err != nil { if err != nil {
return "", nil, err return "", nil, err
@@ -688,7 +688,7 @@ func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f
// RunPod first creates the unit file for a pod, and then // RunPod first creates the unit file for a pod, and then
// starts the unit over d-bus. // starts the unit over d-bus.
func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error { func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
glog.V(4).Infof("Rkt starts to run pod: name %q.", kubeletutil.FormatPodName(pod)) glog.V(4).Infof("Rkt starts to run pod: name %q.", format.Pod(pod))
name, runtimePod, prepareErr := r.preparePod(pod, pullSecrets) name, runtimePod, prepareErr := r.preparePod(pod, pullSecrets)
@@ -698,7 +698,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
for i, c := range pod.Spec.Containers { for i, c := range pod.Spec.Containers {
ref, err := kubecontainer.GenerateContainerRef(pod, &c) ref, err := kubecontainer.GenerateContainerRef(pod, &c)
if err != nil { if err != nil {
glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", kubeletutil.FormatPodName(pod), c.Name, err) glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", format.Pod(pod), c.Name, err)
continue continue
} }
if prepareErr != nil { if prepareErr != nil {
@@ -989,7 +989,7 @@ func (r *Runtime) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) {
// SyncPod syncs the running pod to match the specified desired pod. // SyncPod syncs the running pod to match the specified desired pod.
func (r *Runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error { func (r *Runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
podFullName := kubeletutil.FormatPodName(pod) podFullName := format.Pod(pod)
// Add references to all containers. // Add references to all containers.
unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container) unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)

View File

@@ -30,7 +30,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod" kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
) )
@@ -186,7 +186,7 @@ func (m *manager) SetContainerReadiness(pod *api.Pod, containerID kubecontainer.
oldStatus, found := m.podStatuses[pod.UID] oldStatus, found := m.podStatuses[pod.UID]
if !found { if !found {
glog.Warningf("Container readiness changed before pod has synced: %q - %q", glog.Warningf("Container readiness changed before pod has synced: %q - %q",
kubeletutil.FormatPodName(pod), containerID.String()) format.Pod(pod), containerID.String())
return return
} }
status := oldStatus.status status := oldStatus.status
@@ -201,13 +201,13 @@ func (m *manager) SetContainerReadiness(pod *api.Pod, containerID kubecontainer.
} }
if containerIndex == -1 { if containerIndex == -1 {
glog.Warningf("Container readiness changed for unknown container: %q - %q", glog.Warningf("Container readiness changed for unknown container: %q - %q",
kubeletutil.FormatPodName(pod), containerID.String()) format.Pod(pod), containerID.String())
return return
} }
if status.ContainerStatuses[containerIndex].Ready == ready { if status.ContainerStatuses[containerIndex].Ready == ready {
glog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready, glog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready,
kubeletutil.FormatPodName(pod), containerID.String()) format.Pod(pod), containerID.String())
return return
} }
@@ -229,7 +229,7 @@ func (m *manager) TerminatePods(pods []*api.Pod) bool {
} }
} }
if sent := m.updateStatusInternal(pod, pod.Status); !sent { if sent := m.updateStatusInternal(pod, pod.Status); !sent {
glog.V(4).Infof("Termination notice for %q was dropped because the status channel is full", kubeletutil.FormatPodName(pod)) glog.V(4).Infof("Termination notice for %q was dropped because the status channel is full", format.Pod(pod))
allSent = false allSent = false
} }
} }
@@ -244,7 +244,7 @@ func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus) bool
// clobbering each other so the phase of a pod progresses monotonically. // clobbering each other so the phase of a pod progresses monotonically.
oldStatus, found := m.podStatuses[pod.UID] oldStatus, found := m.podStatuses[pod.UID]
if found && isStatusEqual(&oldStatus.status, &status) && pod.DeletionTimestamp == nil { if found && isStatusEqual(&oldStatus.status, &status) && pod.DeletionTimestamp == nil {
glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", kubeletutil.FormatPodName(pod), status) glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status)
return false // No new status. return false // No new status.
} }
@@ -330,30 +330,30 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
if err == nil { if err == nil {
translatedUID := m.podManager.TranslatePodUID(pod.UID) translatedUID := m.podManager.TranslatePodUID(pod.UID)
if len(translatedUID) > 0 && translatedUID != uid { if len(translatedUID) > 0 && translatedUID != uid {
glog.V(3).Infof("Pod %q was deleted and then recreated, skipping status update", kubeletutil.FormatPodName(pod)) glog.V(3).Infof("Pod %q was deleted and then recreated, skipping status update", format.Pod(pod))
m.deletePodStatus(uid) m.deletePodStatus(uid)
return return
} }
if !m.needsUpdate(pod.UID, status) { if !m.needsUpdate(pod.UID, status) {
glog.V(1).Infof("Status for pod %q is up-to-date; skipping", kubeletutil.FormatPodName(pod)) glog.V(1).Infof("Status for pod %q is up-to-date; skipping", format.Pod(pod))
return return
} }
pod.Status = status.status pod.Status = status.status
// TODO: handle conflict as a retry, make that easier too. // TODO: handle conflict as a retry, make that easier too.
pod, err = m.kubeClient.Pods(pod.Namespace).UpdateStatus(pod) pod, err = m.kubeClient.Pods(pod.Namespace).UpdateStatus(pod)
if err == nil { if err == nil {
glog.V(3).Infof("Status for pod %q updated successfully: %+v", kubeletutil.FormatPodName(pod), status) glog.V(3).Infof("Status for pod %q updated successfully: %+v", format.Pod(pod), status)
m.apiStatusVersions[pod.UID] = status.version m.apiStatusVersions[pod.UID] = status.version
if pod.DeletionTimestamp == nil { if pod.DeletionTimestamp == nil {
return return
} }
if !notRunning(pod.Status.ContainerStatuses) { if !notRunning(pod.Status.ContainerStatuses) {
glog.V(3).Infof("Pod %q is terminated, but some containers are still running", kubeletutil.FormatPodName(pod)) glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod))
return return
} }
if err := m.kubeClient.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err == nil { if err := m.kubeClient.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err == nil {
glog.V(3).Infof("Pod %q fully terminated and removed from etcd", kubeletutil.FormatPodName(pod)) glog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod))
m.deletePodStatus(uid) m.deletePodStatus(uid)
return return
} }
@@ -361,7 +361,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
} }
// We failed to update status, wait for periodic sync to retry. // We failed to update status, wait for periodic sync to retry.
glog.Warningf("Failed to updated status for pod %q: %v", kubeletutil.FormatPodName(pod), err) glog.Warningf("Failed to updated status for pod %q: %v", format.Pod(pod), err)
} }
// needsUpdate returns whether the status is stale for the given pod UID. // needsUpdate returns whether the status is stale for the given pod UID.

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package format
import ( import (
"fmt" "fmt"
@@ -23,22 +23,34 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
) )
// FormatPodName returns a string representating a pod in a human readable type podHandler func(*api.Pod) string
// Pod returns a string representating a pod in a human readable
// format. This function currently is the same as GetPodFullName in // format. This function currently is the same as GetPodFullName in
// kubelet/containers, but may differ in the future. As opposed to // kubelet/containers, but may differ in the future. As opposed to
// GetPodFullName, FormatPodName is mainly used for logging. // GetPodFullName, this function is mainly used for logging.
func FormatPodName(pod *api.Pod) string { func Pod(pod *api.Pod) string {
// Use underscore as the delimiter because it is not allowed in pod name // Use underscore as the delimiter because it is not allowed in pod name
// (DNS subdomain format), while allowed in the container name format. // (DNS subdomain format), while allowed in the container name format.
return fmt.Sprintf("%s_%s", pod.Name, pod.Namespace) return fmt.Sprintf("%s_%s", pod.Name, pod.Namespace)
} }
// FormatPodNames returns a string representating a list of pods in a human // PodWithUID returns a string reprenetating a pod in a human readable format,
// with pod UID as part of the string.
func PodWithUID(pod *api.Pod) string {
return fmt.Sprintf("%s(%s)", Pod(pod), pod.UID)
}
// Pods returns a string representating a list of pods in a human
// readable format. // readable format.
func FormatPodNames(pods []*api.Pod) string { func Pods(pods []*api.Pod) string {
return aggregatePods(pods, PodWithUID)
}
func aggregatePods(pods []*api.Pod, handler podHandler) string {
podStrings := make([]string, 0, len(pods)) podStrings := make([]string, 0, len(pods))
for _, pod := range pods { for _, pod := range pods {
podStrings = append(podStrings, FormatPodName(pod)) podStrings = append(podStrings, handler(pod))
} }
return fmt.Sprintf(strings.Join(podStrings, ", ")) return fmt.Sprintf(strings.Join(podStrings, ", "))
} }