mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #15595 from zhengguoyong/del_capatical_packagename_for_kubeletUtil
Del capatical local packagename for kubeletUtil
This commit is contained in:
commit
6248cd71b6
@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubeletUtil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/util/config"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/fielderrors"
|
||||
@ -378,7 +378,7 @@ func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool {
|
||||
|
||||
// recordFirstSeenTime records the first seen time of this pod.
|
||||
func recordFirstSeenTime(pod *api.Pod) {
|
||||
glog.V(4).Infof("Receiving a new pod %q", kubeletUtil.FormatPodName(pod))
|
||||
glog.V(4).Infof("Receiving a new pod %q", kubeletutil.FormatPodName(pod))
|
||||
pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString()
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/rkt"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubeletUtil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/probe"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@ -1966,13 +1966,13 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan kubetypes.PodUpdate, handler
|
||||
kl.addSource(u.Source)
|
||||
switch u.Op {
|
||||
case kubetypes.ADD:
|
||||
glog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, kubeletUtil.FormatPodNames(u.Pods))
|
||||
glog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, kubeletutil.FormatPodNames(u.Pods))
|
||||
handler.HandlePodAdditions(u.Pods)
|
||||
case kubetypes.UPDATE:
|
||||
glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, kubeletUtil.FormatPodNames(u.Pods))
|
||||
glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, kubeletutil.FormatPodNames(u.Pods))
|
||||
handler.HandlePodUpdates(u.Pods)
|
||||
case kubetypes.REMOVE:
|
||||
glog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, kubeletUtil.FormatPodNames(u.Pods))
|
||||
glog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, kubeletutil.FormatPodNames(u.Pods))
|
||||
handler.HandlePodDeletions(u.Pods)
|
||||
case kubetypes.SET:
|
||||
// TODO: Do we want to support this?
|
||||
@ -2063,7 +2063,7 @@ func (kl *Kubelet) HandlePodDeletions(pods []*api.Pod) {
|
||||
// Deletion is allowed to fail because the periodic cleanup routine
|
||||
// will trigger deletion again.
|
||||
if err := kl.deletePod(pod.UID); err != nil {
|
||||
glog.V(2).Infof("Failed to delete pod %q, err: %v", kubeletUtil.FormatPodName(pod), err)
|
||||
glog.V(2).Infof("Failed to delete pod %q, err: %v", kubeletutil.FormatPodName(pod), err)
|
||||
}
|
||||
kl.probeManager.RemovePod(pod)
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
kubeutil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/probe"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
@ -102,14 +102,14 @@ func doProbe(m *manager, w *worker) (keepGoing bool) {
|
||||
status, ok := m.statusManager.GetPodStatus(w.pod.UID)
|
||||
if !ok {
|
||||
// Either the pod has not been created yet, or it was already deleted.
|
||||
glog.V(3).Infof("No status for pod: %v", kubeutil.FormatPodName(w.pod))
|
||||
glog.V(3).Infof("No status for pod: %v", kubeletutil.FormatPodName(w.pod))
|
||||
return true
|
||||
}
|
||||
|
||||
// Worker should terminate if pod is terminated.
|
||||
if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded {
|
||||
glog.V(3).Infof("Pod %v %v, exiting probe worker",
|
||||
kubeutil.FormatPodName(w.pod), status.Phase)
|
||||
kubeletutil.FormatPodName(w.pod), status.Phase)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -117,7 +117,7 @@ func doProbe(m *manager, w *worker) (keepGoing bool) {
|
||||
if !ok {
|
||||
// Either the container has not been created yet, or it was deleted.
|
||||
glog.V(3).Infof("Non-existant container probed: %v - %v",
|
||||
kubeutil.FormatPodName(w.pod), w.container.Name)
|
||||
kubeletutil.FormatPodName(w.pod), w.container.Name)
|
||||
return true // Wait for more information.
|
||||
}
|
||||
|
||||
@ -130,7 +130,7 @@ func doProbe(m *manager, w *worker) (keepGoing bool) {
|
||||
|
||||
if c.State.Running == nil {
|
||||
glog.V(3).Infof("Non-running container probed: %v - %v",
|
||||
kubeutil.FormatPodName(w.pod), w.container.Name)
|
||||
kubeletutil.FormatPodName(w.pod), w.container.Name)
|
||||
m.readinessCache.Set(w.containerID, results.Failure)
|
||||
// Abort if the container will not be restarted.
|
||||
return c.State.Terminated == nil ||
|
||||
|
@ -42,7 +42,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober"
|
||||
kubeletUtil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/probe"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
@ -459,7 +459,7 @@ func (r *Runtime) makePodManifest(pod *api.Pod, pullSecrets []api.Secret) (*appc
|
||||
|
||||
volumeMap, ok := r.volumeGetter.GetVolumes(pod.UID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot get the volumes for pod %q", kubeletUtil.FormatPodName(pod))
|
||||
return nil, fmt.Errorf("cannot get the volumes for pod %q", kubeletutil.FormatPodName(pod))
|
||||
}
|
||||
|
||||
// Set global volumes.
|
||||
@ -615,7 +615,7 @@ func (r *Runtime) preparePod(pod *api.Pod, pullSecrets []api.Secret) (string, *k
|
||||
}
|
||||
units = append(units, newUnitOption(unitKubernetesSection, unitRestartCount, strconv.Itoa(restartCount)))
|
||||
|
||||
glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, kubeletUtil.FormatPodName(pod))
|
||||
glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, kubeletutil.FormatPodName(pod))
|
||||
serviceFile, err := os.Create(serviceFilePath(serviceName))
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
@ -674,7 +674,7 @@ func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f
|
||||
// RunPod first creates the unit file for a pod, and then
|
||||
// starts the unit over d-bus.
|
||||
func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
|
||||
glog.V(4).Infof("Rkt starts to run pod: name %q.", kubeletUtil.FormatPodName(pod))
|
||||
glog.V(4).Infof("Rkt starts to run pod: name %q.", kubeletutil.FormatPodName(pod))
|
||||
|
||||
name, runtimePod, prepareErr := r.preparePod(pod, pullSecrets)
|
||||
|
||||
@ -684,7 +684,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
|
||||
for i, c := range pod.Spec.Containers {
|
||||
ref, err := kubecontainer.GenerateContainerRef(pod, &c)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", kubeletUtil.FormatPodName(pod), c.Name, err)
|
||||
glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", kubeletutil.FormatPodName(pod), c.Name, err)
|
||||
continue
|
||||
}
|
||||
if prepareErr != nil {
|
||||
@ -979,7 +979,7 @@ func (r *Runtime) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) {
|
||||
|
||||
// SyncPod syncs the running pod to match the specified desired pod.
|
||||
func (r *Runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
|
||||
podFullName := kubeletUtil.FormatPodName(pod)
|
||||
podFullName := kubeletutil.FormatPodName(pod)
|
||||
|
||||
// Add references to all containers.
|
||||
unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubeletUtil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
@ -168,7 +168,7 @@ func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
|
||||
m.podStatuses[pod.UID] = status
|
||||
m.podStatusChannel <- podStatusSyncRequest{pod, status}
|
||||
} else {
|
||||
glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", kubeletUtil.FormatPodName(pod), status)
|
||||
glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", kubeletutil.FormatPodName(pod), status)
|
||||
}
|
||||
}
|
||||
|
||||
@ -186,7 +186,7 @@ func (m *manager) TerminatePods(pods []*api.Pod) bool {
|
||||
case m.podStatusChannel <- podStatusSyncRequest{pod, pod.Status}:
|
||||
default:
|
||||
sent = false
|
||||
glog.V(4).Infof("Termination notice for %q was dropped because the status channel is full", kubeletUtil.FormatPodName(pod))
|
||||
glog.V(4).Infof("Termination notice for %q was dropped because the status channel is full", kubeletutil.FormatPodName(pod))
|
||||
}
|
||||
}
|
||||
return sent
|
||||
@ -228,14 +228,14 @@ func (m *manager) syncBatch() error {
|
||||
}
|
||||
if err == nil {
|
||||
if len(pod.UID) > 0 && statusPod.UID != pod.UID {
|
||||
glog.V(3).Infof("Pod %q was deleted and then recreated, skipping status update", kubeletUtil.FormatPodName(pod))
|
||||
glog.V(3).Infof("Pod %q was deleted and then recreated, skipping status update", kubeletutil.FormatPodName(pod))
|
||||
return nil
|
||||
}
|
||||
statusPod.Status = status
|
||||
// TODO: handle conflict as a retry, make that easier too.
|
||||
statusPod, err = m.kubeClient.Pods(pod.Namespace).UpdateStatus(statusPod)
|
||||
if err == nil {
|
||||
glog.V(3).Infof("Status for pod %q updated successfully", kubeletUtil.FormatPodName(pod))
|
||||
glog.V(3).Infof("Status for pod %q updated successfully", kubeletutil.FormatPodName(pod))
|
||||
|
||||
if pod.DeletionTimestamp == nil {
|
||||
return nil
|
||||
@ -260,7 +260,7 @@ func (m *manager) syncBatch() error {
|
||||
// to clear the channel. Even if this delete never runs subsequent container
|
||||
// changes on the node should trigger updates.
|
||||
go m.DeletePodStatus(pod.UID)
|
||||
return fmt.Errorf("error updating status for pod %q: %v", kubeletUtil.FormatPodName(pod), err)
|
||||
return fmt.Errorf("error updating status for pod %q: %v", kubeletutil.FormatPodName(pod), err)
|
||||
}
|
||||
|
||||
// notRunning returns true if every status is terminated or waiting, or the status list
|
||||
|
Loading…
Reference in New Issue
Block a user