diff --git a/pkg/kubelet/config/config.go b/pkg/kubelet/config/config.go index c851adcaccf..53a4ebead01 100644 --- a/pkg/kubelet/config/config.go +++ b/pkg/kubelet/config/config.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - kubeletutil "k8s.io/kubernetes/pkg/kubelet/util" + "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/util/config" "k8s.io/kubernetes/pkg/util/sets" utilvalidation "k8s.io/kubernetes/pkg/util/validation" @@ -379,7 +379,7 @@ func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool { // recordFirstSeenTime records the first seen time of this pod. func recordFirstSeenTime(pod *api.Pod) { - glog.V(4).Infof("Receiving a new pod %q", kubeletutil.FormatPodName(pod)) + glog.V(4).Infof("Receiving a new pod %q", format.Pod(pod)) pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString() } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index c6fdb39e3e0..537e2c540de 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -60,7 +60,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/rkt" "k8s.io/kubernetes/pkg/kubelet/status" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - kubeletutil "k8s.io/kubernetes/pkg/kubelet/util" + "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/securitycontext" @@ -2294,13 +2294,13 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan kubetypes.PodUpdate, handler switch u.Op { case kubetypes.ADD: - glog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, kubeletutil.FormatPodNames(u.Pods)) + glog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, format.Pods(u.Pods)) handler.HandlePodAdditions(u.Pods) case kubetypes.UPDATE: - glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, kubeletutil.FormatPodNames(u.Pods)) + glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, format.Pods(u.Pods)) handler.HandlePodUpdates(u.Pods) case kubetypes.REMOVE: - glog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, kubeletutil.FormatPodNames(u.Pods)) + glog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, format.Pods(u.Pods)) handler.HandlePodDeletions(u.Pods) case kubetypes.SET: // TODO: Do we want to support this? @@ -2317,7 +2317,7 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan kubetypes.PodUpdate, handler glog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e) break } - glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", kubeletutil.FormatPodName(pod), e) + glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e) // Force the container runtime cache to update. if err := kl.runtimeCache.ForceUpdateIfOlder(time.Now()); err != nil { glog.Errorf("SyncLoop: unable to update runtime cache") @@ -2330,12 +2330,12 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan kubetypes.PodUpdate, handler if len(podsToSync) == 0 { break } - glog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), kubeletutil.FormatPodNames(podsToSync)) + glog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), format.Pods(podsToSync)) kl.HandlePodSyncs(podsToSync) case update := <-kl.livenessManager.Updates(): // We only care about failures (signalling container death) here. if update.Result == proberesults.Failure { - glog.V(1).Infof("SyncLoop (container unhealthy): %q", kubeletutil.FormatPodName(update.Pod)) + glog.V(1).Infof("SyncLoop (container unhealthy): %q", format.Pod(update.Pod)) handler.HandlePodSyncs([]*api.Pod{update.Pod}) } case <-housekeepingCh: @@ -2430,7 +2430,7 @@ func (kl *Kubelet) HandlePodDeletions(pods []*api.Pod) { // Deletion is allowed to fail because the periodic cleanup routine // will trigger deletion again. if err := kl.deletePod(pod.UID); err != nil { - glog.V(2).Infof("Failed to delete pod %q, err: %v", kubeletutil.FormatPodName(pod), err) + glog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err) } kl.probeManager.RemovePod(pod) } diff --git a/pkg/kubelet/prober/manager.go b/pkg/kubelet/prober/manager.go index 98cd088ee23..e6fa47dd2e1 100644 --- a/pkg/kubelet/prober/manager.go +++ b/pkg/kubelet/prober/manager.go @@ -25,7 +25,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/status" - kubeutil "k8s.io/kubernetes/pkg/kubelet/util" + "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/sets" @@ -134,7 +134,7 @@ func (m *manager) AddPod(pod *api.Pod) { key.probeType = readiness if _, ok := m.workers[key]; ok { glog.Errorf("Readiness probe already exists! %v - %v", - kubeutil.FormatPodName(pod), c.Name) + format.Pod(pod), c.Name) return } w := newWorker(m, readiness, pod, c) @@ -146,7 +146,7 @@ func (m *manager) AddPod(pod *api.Pod) { key.probeType = liveness if _, ok := m.workers[key]; ok { glog.Errorf("Liveness probe already exists! %v - %v", - kubeutil.FormatPodName(pod), c.Name) + format.Pod(pod), c.Name) return } w := newWorker(m, liveness, pod, c) diff --git a/pkg/kubelet/prober/worker.go b/pkg/kubelet/prober/worker.go index ee217c4eee8..5880ac7c420 100644 --- a/pkg/kubelet/prober/worker.go +++ b/pkg/kubelet/prober/worker.go @@ -23,7 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober/results" - kubeletutil "k8s.io/kubernetes/pkg/kubelet/util" + "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/util" ) @@ -125,14 +125,14 @@ func (w *worker) doProbe() (keepGoing bool) { status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID) if !ok { // Either the pod has not been created yet, or it was already deleted. - glog.V(3).Infof("No status for pod: %v", kubeletutil.FormatPodName(w.pod)) + glog.V(3).Infof("No status for pod: %v", format.Pod(w.pod)) return true } // Worker should terminate if pod is terminated. if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded { glog.V(3).Infof("Pod %v %v, exiting probe worker", - kubeletutil.FormatPodName(w.pod), status.Phase) + format.Pod(w.pod), status.Phase) return false } @@ -140,7 +140,7 @@ func (w *worker) doProbe() (keepGoing bool) { if !ok { // Either the container has not been created yet, or it was deleted. glog.V(3).Infof("Non-existant container probed: %v - %v", - kubeletutil.FormatPodName(w.pod), w.container.Name) + format.Pod(w.pod), w.container.Name) return true // Wait for more information. } @@ -154,7 +154,7 @@ func (w *worker) doProbe() (keepGoing bool) { if c.State.Running == nil { glog.V(3).Infof("Non-running container probed: %v - %v", - kubeletutil.FormatPodName(w.pod), w.container.Name) + format.Pod(w.pod), w.container.Name) if !w.containerID.IsEmpty() { w.resultsManager.Set(w.containerID, results.Failure, w.pod) } diff --git a/pkg/kubelet/rkt/rkt.go b/pkg/kubelet/rkt/rkt.go index 5bfe3fa8e8b..074f323eb94 100644 --- a/pkg/kubelet/rkt/rkt.go +++ b/pkg/kubelet/rkt/rkt.go @@ -44,7 +44,7 @@ import ( "k8s.io/kubernetes/pkg/credentialprovider" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" - kubeletutil "k8s.io/kubernetes/pkg/kubelet/util" + "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util" @@ -473,7 +473,7 @@ func (r *Runtime) makePodManifest(pod *api.Pod, pullSecrets []api.Secret) (*appc volumeMap, ok := r.volumeGetter.GetVolumes(pod.UID) if !ok { - return nil, fmt.Errorf("cannot get the volumes for pod %q", kubeletutil.FormatPodName(pod)) + return nil, fmt.Errorf("cannot get the volumes for pod %q", format.Pod(pod)) } // Set global volumes. @@ -629,7 +629,7 @@ func (r *Runtime) preparePod(pod *api.Pod, pullSecrets []api.Secret) (string, *k } units = append(units, newUnitOption(unitKubernetesSection, unitRestartCount, strconv.Itoa(restartCount))) - glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, kubeletutil.FormatPodName(pod)) + glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, format.Pod(pod)) serviceFile, err := os.Create(serviceFilePath(serviceName)) if err != nil { return "", nil, err @@ -688,7 +688,7 @@ func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f // RunPod first creates the unit file for a pod, and then // starts the unit over d-bus. func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error { - glog.V(4).Infof("Rkt starts to run pod: name %q.", kubeletutil.FormatPodName(pod)) + glog.V(4).Infof("Rkt starts to run pod: name %q.", format.Pod(pod)) name, runtimePod, prepareErr := r.preparePod(pod, pullSecrets) @@ -698,7 +698,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error { for i, c := range pod.Spec.Containers { ref, err := kubecontainer.GenerateContainerRef(pod, &c) if err != nil { - glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", kubeletutil.FormatPodName(pod), c.Name, err) + glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", format.Pod(pod), c.Name, err) continue } if prepareErr != nil { @@ -989,7 +989,7 @@ func (r *Runtime) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) { // SyncPod syncs the running pod to match the specified desired pod. func (r *Runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error { - podFullName := kubeletutil.FormatPodName(pod) + podFullName := format.Pod(pod) // Add references to all containers. unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container) diff --git a/pkg/kubelet/status/manager.go b/pkg/kubelet/status/manager.go index 920890c8f93..ce639ed9f23 100644 --- a/pkg/kubelet/status/manager.go +++ b/pkg/kubelet/status/manager.go @@ -30,7 +30,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - kubeletutil "k8s.io/kubernetes/pkg/kubelet/util" + "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util" ) @@ -186,7 +186,7 @@ func (m *manager) SetContainerReadiness(pod *api.Pod, containerID kubecontainer. oldStatus, found := m.podStatuses[pod.UID] if !found { glog.Warningf("Container readiness changed before pod has synced: %q - %q", - kubeletutil.FormatPodName(pod), containerID.String()) + format.Pod(pod), containerID.String()) return } status := oldStatus.status @@ -201,13 +201,13 @@ func (m *manager) SetContainerReadiness(pod *api.Pod, containerID kubecontainer. } if containerIndex == -1 { glog.Warningf("Container readiness changed for unknown container: %q - %q", - kubeletutil.FormatPodName(pod), containerID.String()) + format.Pod(pod), containerID.String()) return } if status.ContainerStatuses[containerIndex].Ready == ready { glog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready, - kubeletutil.FormatPodName(pod), containerID.String()) + format.Pod(pod), containerID.String()) return } @@ -229,7 +229,7 @@ func (m *manager) TerminatePods(pods []*api.Pod) bool { } } if sent := m.updateStatusInternal(pod, pod.Status); !sent { - glog.V(4).Infof("Termination notice for %q was dropped because the status channel is full", kubeletutil.FormatPodName(pod)) + glog.V(4).Infof("Termination notice for %q was dropped because the status channel is full", format.Pod(pod)) allSent = false } } @@ -244,7 +244,7 @@ func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus) bool // clobbering each other so the phase of a pod progresses monotonically. oldStatus, found := m.podStatuses[pod.UID] if found && isStatusEqual(&oldStatus.status, &status) && pod.DeletionTimestamp == nil { - glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", kubeletutil.FormatPodName(pod), status) + glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status) return false // No new status. } @@ -330,30 +330,30 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { if err == nil { translatedUID := m.podManager.TranslatePodUID(pod.UID) if len(translatedUID) > 0 && translatedUID != uid { - glog.V(3).Infof("Pod %q was deleted and then recreated, skipping status update", kubeletutil.FormatPodName(pod)) + glog.V(3).Infof("Pod %q was deleted and then recreated, skipping status update", format.Pod(pod)) m.deletePodStatus(uid) return } if !m.needsUpdate(pod.UID, status) { - glog.V(1).Infof("Status for pod %q is up-to-date; skipping", kubeletutil.FormatPodName(pod)) + glog.V(1).Infof("Status for pod %q is up-to-date; skipping", format.Pod(pod)) return } pod.Status = status.status // TODO: handle conflict as a retry, make that easier too. pod, err = m.kubeClient.Pods(pod.Namespace).UpdateStatus(pod) if err == nil { - glog.V(3).Infof("Status for pod %q updated successfully: %+v", kubeletutil.FormatPodName(pod), status) + glog.V(3).Infof("Status for pod %q updated successfully: %+v", format.Pod(pod), status) m.apiStatusVersions[pod.UID] = status.version if pod.DeletionTimestamp == nil { return } if !notRunning(pod.Status.ContainerStatuses) { - glog.V(3).Infof("Pod %q is terminated, but some containers are still running", kubeletutil.FormatPodName(pod)) + glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) return } if err := m.kubeClient.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err == nil { - glog.V(3).Infof("Pod %q fully terminated and removed from etcd", kubeletutil.FormatPodName(pod)) + glog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod)) m.deletePodStatus(uid) return } @@ -361,7 +361,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { } // We failed to update status, wait for periodic sync to retry. - glog.Warningf("Failed to updated status for pod %q: %v", kubeletutil.FormatPodName(pod), err) + glog.Warningf("Failed to updated status for pod %q: %v", format.Pod(pod), err) } // needsUpdate returns whether the status is stale for the given pod UID. diff --git a/pkg/kubelet/util/format.go b/pkg/kubelet/util/format/pod.go similarity index 62% rename from pkg/kubelet/util/format.go rename to pkg/kubelet/util/format/pod.go index 9c1ce16ff27..a37d6e6acec 100644 --- a/pkg/kubelet/util/format.go +++ b/pkg/kubelet/util/format/pod.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package format import ( "fmt" @@ -23,22 +23,34 @@ import ( "k8s.io/kubernetes/pkg/api" ) -// FormatPodName returns a string representating a pod in a human readable +type podHandler func(*api.Pod) string + +// Pod returns a string representating a pod in a human readable // format. This function currently is the same as GetPodFullName in // kubelet/containers, but may differ in the future. As opposed to -// GetPodFullName, FormatPodName is mainly used for logging. -func FormatPodName(pod *api.Pod) string { +// GetPodFullName, this function is mainly used for logging. +func Pod(pod *api.Pod) string { // Use underscore as the delimiter because it is not allowed in pod name // (DNS subdomain format), while allowed in the container name format. return fmt.Sprintf("%s_%s", pod.Name, pod.Namespace) } -// FormatPodNames returns a string representating a list of pods in a human +// PodWithUID returns a string reprenetating a pod in a human readable format, +// with pod UID as part of the string. +func PodWithUID(pod *api.Pod) string { + return fmt.Sprintf("%s(%s)", Pod(pod), pod.UID) +} + +// Pods returns a string representating a list of pods in a human // readable format. -func FormatPodNames(pods []*api.Pod) string { +func Pods(pods []*api.Pod) string { + return aggregatePods(pods, PodWithUID) +} + +func aggregatePods(pods []*api.Pod, handler podHandler) string { podStrings := make([]string, 0, len(pods)) for _, pod := range pods { - podStrings = append(podStrings, FormatPodName(pod)) + podStrings = append(podStrings, handler(pod)) } return fmt.Sprintf(strings.Join(podStrings, ", ")) }