mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-06 07:57:35 +00:00
Merge pull request #23567 from smarterclayton/init_containers
Automatic merge from submit-queue Add init containers to pods This implements #1589 as per proposal #23666 Incorporates feedback on #1589, creates parallel structure for InitContainers and Containers, adds validation for InitContainers that requires name uniqueness, and comments on a number of implications of init containers. This is a complete alpha implementation. <!-- Reviewable:start --> --- This change is [<img src="http://reviewable.k8s.io/review_button.svg" height="35" align="absmiddle" alt="Reviewable"/>](http://reviewable.k8s.io/reviews/kubernetes/kubernetes/23567) <!-- Reviewable:end -->
This commit is contained in:
@@ -1784,6 +1784,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
||||
!firstSeenTime.IsZero() {
|
||||
metrics.PodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime))
|
||||
}
|
||||
|
||||
// Update status in the status manager
|
||||
kl.statusManager.SetPodStatus(pod, apiPodStatus)
|
||||
|
||||
@@ -2370,6 +2371,10 @@ func hasHostPortConflicts(pods []*api.Pod) bool {
|
||||
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
|
||||
return true
|
||||
}
|
||||
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 {
|
||||
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -3392,12 +3397,46 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
||||
// This func is exported to simplify integration with 3rd party kubelet
|
||||
// integrations like kubernetes-mesos.
|
||||
func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase {
|
||||
initialized := 0
|
||||
pendingInitialization := 0
|
||||
failedInitialization := 0
|
||||
for _, container := range spec.InitContainers {
|
||||
containerStatus, ok := api.GetContainerStatus(info, container.Name)
|
||||
if !ok {
|
||||
pendingInitialization++
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case containerStatus.State.Running != nil:
|
||||
pendingInitialization++
|
||||
case containerStatus.State.Terminated != nil:
|
||||
if containerStatus.State.Terminated.ExitCode == 0 {
|
||||
initialized++
|
||||
} else {
|
||||
failedInitialization++
|
||||
}
|
||||
case containerStatus.State.Waiting != nil:
|
||||
if containerStatus.LastTerminationState.Terminated != nil {
|
||||
if containerStatus.LastTerminationState.Terminated.ExitCode == 0 {
|
||||
initialized++
|
||||
} else {
|
||||
failedInitialization++
|
||||
}
|
||||
} else {
|
||||
pendingInitialization++
|
||||
}
|
||||
default:
|
||||
pendingInitialization++
|
||||
}
|
||||
}
|
||||
|
||||
unknown := 0
|
||||
running := 0
|
||||
waiting := 0
|
||||
stopped := 0
|
||||
failed := 0
|
||||
succeeded := 0
|
||||
unknown := 0
|
||||
for _, container := range spec.Containers {
|
||||
containerStatus, ok := api.GetContainerStatus(info, container.Name)
|
||||
if !ok {
|
||||
@@ -3426,7 +3465,13 @@ func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase {
|
||||
}
|
||||
}
|
||||
|
||||
if failedInitialization > 0 && spec.RestartPolicy == api.RestartPolicyNever {
|
||||
return api.PodFailed
|
||||
}
|
||||
|
||||
switch {
|
||||
case pendingInitialization > 0:
|
||||
fallthrough
|
||||
case waiting > 0:
|
||||
glog.V(5).Infof("pod waiting > 0, pending")
|
||||
// One or more containers has not been started
|
||||
@@ -3491,8 +3536,10 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P
|
||||
|
||||
// Assume info is ready to process
|
||||
spec := &pod.Spec
|
||||
s.Phase = GetPhase(spec, s.ContainerStatuses)
|
||||
allStatus := append(append([]api.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
|
||||
s.Phase = GetPhase(spec, allStatus)
|
||||
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))
|
||||
// s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus()
|
||||
// does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure
|
||||
@@ -3525,9 +3572,27 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P
|
||||
// alter the kubelet state at all.
|
||||
func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) *api.PodStatus {
|
||||
var apiPodStatus api.PodStatus
|
||||
uid := pod.UID
|
||||
apiPodStatus.PodIP = podStatus.IP
|
||||
|
||||
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
|
||||
pod, podStatus,
|
||||
pod.Status.ContainerStatuses,
|
||||
pod.Spec.Containers,
|
||||
len(pod.Spec.InitContainers) > 0,
|
||||
false,
|
||||
)
|
||||
apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses(
|
||||
pod, podStatus,
|
||||
pod.Status.InitContainerStatuses,
|
||||
pod.Spec.InitContainers,
|
||||
len(pod.Spec.InitContainers) > 0,
|
||||
true,
|
||||
)
|
||||
|
||||
return &apiPodStatus
|
||||
}
|
||||
|
||||
func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubecontainer.PodStatus, previousStatus []api.ContainerStatus, containers []api.Container, hasInitContainers, isInitContainer bool) []api.ContainerStatus {
|
||||
convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *api.ContainerStatus {
|
||||
cid := cs.ID.String()
|
||||
status := &api.ContainerStatus{
|
||||
@@ -3556,15 +3621,19 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain
|
||||
}
|
||||
|
||||
// Fetch old containers statuses from old pod status.
|
||||
oldStatuses := make(map[string]api.ContainerStatus, len(pod.Spec.Containers))
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
oldStatuses := make(map[string]api.ContainerStatus, len(containers))
|
||||
for _, status := range previousStatus {
|
||||
oldStatuses[status.Name] = status
|
||||
}
|
||||
|
||||
// Set all container statuses to default waiting state
|
||||
statuses := make(map[string]*api.ContainerStatus, len(pod.Spec.Containers))
|
||||
statuses := make(map[string]*api.ContainerStatus, len(containers))
|
||||
defaultWaitingState := api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerCreating"}}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if hasInitContainers {
|
||||
defaultWaitingState = api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "PodInitializing"}}
|
||||
}
|
||||
|
||||
for _, container := range containers {
|
||||
status := &api.ContainerStatus{
|
||||
Name: container.Name,
|
||||
Image: container.Image,
|
||||
@@ -3580,7 +3649,6 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain
|
||||
|
||||
// Make the latest container status comes first.
|
||||
sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses)))
|
||||
|
||||
// Set container statuses according to the statuses seen in pod status
|
||||
containerSeen := map[string]int{}
|
||||
for _, cStatus := range podStatus.ContainerStatuses {
|
||||
@@ -3602,13 +3670,13 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain
|
||||
}
|
||||
|
||||
// Handle the containers failed to be started, which should be in Waiting state.
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, container := range containers {
|
||||
// If a container should be restarted in next syncpod, it is *Waiting*.
|
||||
if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
|
||||
continue
|
||||
}
|
||||
status := statuses[container.Name]
|
||||
reason, message, ok := kl.reasonCache.Get(uid, container.Name)
|
||||
reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name)
|
||||
if !ok {
|
||||
// In fact, we could also apply Waiting state here, but it is less informative,
|
||||
// and the container will be restarted soon, so we prefer the original state here.
|
||||
@@ -3630,15 +3698,15 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain
|
||||
statuses[container.Name] = status
|
||||
}
|
||||
|
||||
apiPodStatus.ContainerStatuses = make([]api.ContainerStatus, 0)
|
||||
var containerStatuses []api.ContainerStatus
|
||||
for _, status := range statuses {
|
||||
apiPodStatus.ContainerStatuses = append(apiPodStatus.ContainerStatuses, *status)
|
||||
containerStatuses = append(containerStatuses, *status)
|
||||
}
|
||||
|
||||
// Sort the container statuses since clients of this interface expect the list
|
||||
// of containers in a pod has a deterministic order.
|
||||
sort.Sort(kubetypes.SortedContainerStatuses(apiPodStatus.ContainerStatuses))
|
||||
return &apiPodStatus
|
||||
sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses))
|
||||
return containerStatuses
|
||||
}
|
||||
|
||||
// Returns logs of current machine.
|
||||
|
||||
Reference in New Issue
Block a user