From 6685715c4ca1f143108273edd038806b2bb5fd72 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Mon, 28 Mar 2016 22:13:16 -0400 Subject: [PATCH 1/6] API for adding init containers --- pkg/api/resource_helpers.go | 11 +++ pkg/api/serialization_proto_test.go | 5 +- pkg/api/types.go | 11 ++- pkg/api/v1/conversion.go | 135 ++++++++++++++++++++++++++++ pkg/api/v1/types.go | 33 ++++++- pkg/api/validation/validation.go | 32 +++++++ 6 files changed, 224 insertions(+), 3 deletions(-) diff --git a/pkg/api/resource_helpers.go b/pkg/api/resource_helpers.go index 1e3b3698b54..65bc1b85f08 100644 --- a/pkg/api/resource_helpers.go +++ b/pkg/api/resource_helpers.go @@ -131,6 +131,17 @@ func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { } } +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodCondition(status PodStatus, t PodConditionType) *PodCondition { + for i, c := range status.Conditions { + if c.Type == t { + return &status.Conditions[i] + } + } + return nil +} + // IsNodeReady returns true if a node is ready; false otherwise. func IsNodeReady(node *Node) bool { for _, c := range node.Status.Conditions { diff --git a/pkg/api/serialization_proto_test.go b/pkg/api/serialization_proto_test.go index e35708340b5..a108a98308c 100644 --- a/pkg/api/serialization_proto_test.go +++ b/pkg/api/serialization_proto_test.go @@ -67,6 +67,9 @@ func TestUniversalDeserializer(t *testing.T) { func TestProtobufRoundTrip(t *testing.T) { obj := &v1.Pod{} apitesting.FuzzerFor(t, v1.SchemeGroupVersion, rand.NewSource(benchmarkSeed)).Fuzz(obj) + // InitContainers are turned into annotations by conversion. + obj.Spec.InitContainers = nil + obj.Status.InitContainerStatuses = nil data, err := obj.Marshal() if err != nil { t.Fatal(err) @@ -77,7 +80,7 @@ func TestProtobufRoundTrip(t *testing.T) { } if !api.Semantic.Equalities.DeepEqual(out, obj) { t.Logf("marshal\n%s", hex.Dump(data)) - t.Fatalf("Unmarshal is unequal\n%s", diff.ObjectGoPrintSideBySide(out, obj)) + t.Fatalf("Unmarshal is unequal\n%s", diff.ObjectGoPrintDiff(out, obj)) } } diff --git a/pkg/api/types.go b/pkg/api/types.go index 911ad13ad71..c694cc09a45 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -1076,6 +1076,8 @@ const ( // PodReady means the pod is able to service requests and should be added to the // load balancing pools of all matching services. PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" ) type PodCondition struct { @@ -1309,7 +1311,9 @@ type PreferredSchedulingTerm struct { // PodSpec is a description of a pod type PodSpec struct { Volumes []Volume `json:"volumes"` - // Required: there must be at least one container in a pod. + // List of initialization containers belonging to the pod. + InitContainers []Container `json:"-"` + // List of containers belonging to the pod. Containers []Container `json:"containers"` RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"` // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. @@ -1416,6 +1420,11 @@ type PodStatus struct { // This is before the Kubelet pulled the container image(s) for the pod. StartTime *unversioned.Time `json:"startTime,omitempty"` + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses + InitContainerStatuses []ContainerStatus `json:"-"` // The list has one entry per container in the manifest. Each entry is // currently the output of `docker inspect`. This output format is *not* // final and should not be relied upon. diff --git a/pkg/api/v1/conversion.go b/pkg/api/v1/conversion.go index a3631d00c75..7dc8f1e84da 100644 --- a/pkg/api/v1/conversion.go +++ b/pkg/api/v1/conversion.go @@ -17,6 +17,7 @@ limitations under the License. package v1 import ( + "encoding/json" "fmt" inf "gopkg.in/inf.v0" @@ -258,6 +259,75 @@ func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *R return nil } +func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { + if err := autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s); err != nil { + return err + } + + if len(out.Status.InitContainerStatuses) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Status.InitContainerStatuses) + if err != nil { + return err + } + out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + } else { + delete(in.Annotations, PodInitContainerStatusesAnnotationKey) + } + return nil +} + +func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { + // TODO: when we move init container to beta, remove these conversions + if value := in.Annotations[PodInitContainerStatusesAnnotationKey]; len(value) > 0 { + delete(in.Annotations, PodInitContainerStatusesAnnotationKey) + var values []ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + in.Status.InitContainerStatuses = values + } + + return autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s) +} + +func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { + return err + } + + // TODO: when we move init container to beta, remove these conversions + if len(out.Spec.InitContainers) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Spec.InitContainers) + if err != nil { + return err + } + out.Annotations[PodInitContainersAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainersAnnotationKey) + } + return nil +} + +func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { + // TODO: when we move init container to beta, remove these conversions + if value := in.Annotations[PodInitContainersAnnotationKey]; len(value) > 0 { + delete(in.Annotations, PodInitContainersAnnotationKey) + var values []Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + in.Spec.InitContainers = values + } + + return autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s) +} + // The following two PodSpec conversions are done here to support ServiceAccount // as an alias for ServiceAccountName. func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { @@ -271,6 +341,16 @@ func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversi } else { out.Volumes = nil } + if in.InitContainers != nil { + out.InitContainers = make([]Container, len(in.InitContainers)) + for i := range in.InitContainers { + if err := Convert_api_Container_To_v1_Container(&in.InitContainers[i], &out.InitContainers[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } if in.Containers != nil { out.Containers = make([]Container, len(in.Containers)) for i := range in.Containers { @@ -346,6 +426,16 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversi } else { out.Volumes = nil } + if in.InitContainers != nil { + out.InitContainers = make([]api.Container, len(in.InitContainers)) + for i := range in.InitContainers { + if err := Convert_v1_Container_To_api_Container(&in.InitContainers[i], &out.InitContainers[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } if in.Containers != nil { out.Containers = make([]api.Container, len(in.Containers)) for i := range in.Containers { @@ -419,6 +509,33 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil { return err } + + // TODO: when we move init container to beta, remove these conversions + if len(out.Spec.InitContainers) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Spec.InitContainers) + if err != nil { + return err + } + out.Annotations[PodInitContainersAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainersAnnotationKey) + } + if len(out.Status.InitContainerStatuses) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Status.InitContainerStatuses) + if err != nil { + return err + } + out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + } else { + delete(in.Annotations, PodInitContainerStatusesAnnotationKey) + } + // We need to reset certain fields for mirror pods from pre-v1.1 kubelet // (#15960). // TODO: Remove this code after we drop support for v1.0 kubelets. @@ -434,6 +551,24 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error } func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { + // TODO: when we move init container to beta, remove these conversions + if value := in.Annotations[PodInitContainersAnnotationKey]; len(value) > 0 { + delete(in.Annotations, PodInitContainersAnnotationKey) + var values []Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + in.Spec.InitContainers = values + } + if value := in.Annotations[PodInitContainerStatusesAnnotationKey]; len(value) > 0 { + delete(in.Annotations, PodInitContainerStatusesAnnotationKey) + var values []ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + in.Status.InitContainerStatuses = values + } + return autoConvert_v1_Pod_To_api_Pod(in, out, s) } diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index b1fa56f0578..315c9193fd9 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -1106,7 +1106,7 @@ type Container struct { // Cannot be updated. // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - // Pod volumes to mount into the container's filesyste. + // Pod volumes to mount into the container's filesystem. // Cannot be updated. VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeMounts"` // Periodic probe of container liveness. @@ -1541,11 +1541,36 @@ type PreferredSchedulingTerm struct { Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"` } +const ( + // This annotation key will be used to contain an array of v1 JSON encoded Containers + // for init containers. The annotation will be placed into the internal type and cleared. + PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers" + // This annotation key will be used to contain an array of v1 JSON encoded + // ContainerStatuses for init containers. The annotation will be placed into the internal + // type and cleared. + PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses" +) + // PodSpec is a description of a pod. type PodSpec struct { // List of volumes that can be mounted by containers belonging to the pod. // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Init containers are in alpha state and may change without notice. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md + InitContainers []Container `json:"-" patchStrategy:"merge" patchMergeKey:"name"` // List of containers belonging to the pod. // Containers cannot currently be added or removed. // There must be at least one container in a Pod. @@ -1679,6 +1704,12 @@ type PodStatus struct { // This is before the Kubelet pulled the container image(s) for the pod. StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // Init containers are in alpha state and may change without notice. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses + InitContainerStatuses []ContainerStatus `json:"-"` // The list has one entry per container in the manifest. Each entry is currently the output // of `docker inspect`. // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index ad55b55c128..ce5ba70d90f 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -1326,6 +1326,37 @@ func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorL return allErrors } +func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if len(containers) > 0 { + allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...) + } + + allNames := sets.String{} + for _, ctr := range otherContainers { + allNames.Insert(ctr.Name) + } + for i, ctr := range containers { + idxPath := fldPath.Index(i) + if allNames.Has(ctr.Name) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name)) + } + if len(ctr.Name) > 0 { + allNames.Insert(ctr.Name) + } + if ctr.Lifecycle != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers")) + } + if ctr.LivenessProbe != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers")) + } + if ctr.ReadinessProbe != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers")) + } + } + return allErrs +} + func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -1451,6 +1482,7 @@ func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { allVolumes, vErrs := validateVolumes(spec.Volumes, fldPath.Child("volumes")) allErrs = append(allErrs, vErrs...) allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...) + allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...) allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) From 6cc6d293398af79d7305c0fa8282a0ace77c4890 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Mon, 28 Mar 2016 23:27:33 -0400 Subject: [PATCH 2/6] Generated code for init containers --- CHANGELOG.md | 1 + api/swagger-spec/apps_v1alpha1.json | 2 +- api/swagger-spec/batch_v1.json | 2 +- api/swagger-spec/extensions_v1beta1.json | 2 +- api/swagger-spec/v1.json | 2 +- docs/api-reference/batch/v1/definitions.html | 4 +- .../extensions/v1beta1/definitions.html | 2 +- docs/api-reference/v1/definitions.html | 2 +- pkg/api/deep_copy_generated.go | 22 +++++++++ pkg/api/types.generated.go | 4 +- pkg/api/v1/conversion_generated.go | 49 +++++++++++++------ pkg/api/v1/deep_copy_generated.go | 22 +++++++++ pkg/api/v1/generated.proto | 2 +- pkg/api/v1/types.generated.go | 4 +- pkg/api/v1/types_swagger_doc_generated.go | 2 +- pkg/apis/apps/types.generated.go | 2 +- pkg/apis/apps/v1alpha1/types.generated.go | 2 +- pkg/apis/batch/types.generated.go | 4 +- pkg/apis/batch/v1/conversion_generated.go | 6 +-- pkg/apis/batch/v1/types.generated.go | 2 +- .../batch/v2alpha1/conversion_generated.go | 6 +-- pkg/apis/batch/v2alpha1/types.generated.go | 2 +- pkg/apis/extensions/types.generated.go | 6 +-- .../v1beta1/conversion_generated.go | 6 +-- .../extensions/v1beta1/types.generated.go | 8 +-- 25 files changed, 111 insertions(+), 55 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aec64c37e66..fd4c5394507 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [Other notable changes](#other-notable-changes) - [v1.3.0-alpha.3](#v130-alpha3) - [Downloads](#downloads) +- [v1.3.0-alpha.3](#v130-alpha3) - [Changes since v1.3.0-alpha.2](#changes-since-v130-alpha2) - [Action Required](#action-required) - [Other notable changes](#other-notable-changes) diff --git a/api/swagger-spec/apps_v1alpha1.json b/api/swagger-spec/apps_v1alpha1.json index 912d2f9ab9a..1abfb9a44e2 100644 --- a/api/swagger-spec/apps_v1alpha1.json +++ b/api/swagger-spec/apps_v1alpha1.json @@ -1854,7 +1854,7 @@ "items": { "$ref": "v1.VolumeMount" }, - "description": "Pod volumes to mount into the container's filesyste. Cannot be updated." + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated." }, "livenessProbe": { "$ref": "v1.Probe", diff --git a/api/swagger-spec/batch_v1.json b/api/swagger-spec/batch_v1.json index 2d33824ef8f..de114104c82 100644 --- a/api/swagger-spec/batch_v1.json +++ b/api/swagger-spec/batch_v1.json @@ -1859,7 +1859,7 @@ "items": { "$ref": "v1.VolumeMount" }, - "description": "Pod volumes to mount into the container's filesyste. Cannot be updated." + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated." }, "livenessProbe": { "$ref": "v1.Probe", diff --git a/api/swagger-spec/extensions_v1beta1.json b/api/swagger-spec/extensions_v1beta1.json index caed694ac9e..7a6ab88f53b 100644 --- a/api/swagger-spec/extensions_v1beta1.json +++ b/api/swagger-spec/extensions_v1beta1.json @@ -7166,7 +7166,7 @@ "items": { "$ref": "v1.VolumeMount" }, - "description": "Pod volumes to mount into the container's filesyste. Cannot be updated." + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated." }, "livenessProbe": { "$ref": "v1.Probe", diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 4da13cc55ea..3c04fcdde75 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -17177,7 +17177,7 @@ "items": { "$ref": "v1.VolumeMount" }, - "description": "Pod volumes to mount into the container's filesyste. Cannot be updated." + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated." }, "livenessProbe": { "$ref": "v1.Probe", diff --git a/docs/api-reference/batch/v1/definitions.html b/docs/api-reference/batch/v1/definitions.html index c43fa8403a1..45d52586215 100755 --- a/docs/api-reference/batch/v1/definitions.html +++ b/docs/api-reference/batch/v1/definitions.html @@ -1375,7 +1375,7 @@ Examples:

volumeMounts

-

Pod volumes to mount into the container’s filesyste. Cannot be updated.

+

Pod volumes to mount into the container’s filesystem. Cannot be updated.

false

v1.VolumeMount array

@@ -3979,7 +3979,7 @@ Populated by the system when a graceful deletion is requested. Read-only. More i diff --git a/docs/api-reference/extensions/v1beta1/definitions.html b/docs/api-reference/extensions/v1beta1/definitions.html index 60006901ef9..c5e4b2dc72e 100755 --- a/docs/api-reference/extensions/v1beta1/definitions.html +++ b/docs/api-reference/extensions/v1beta1/definitions.html @@ -4706,7 +4706,7 @@ Both these may change in the future. Incoming requests are matched against the h

volumeMounts

-

Pod volumes to mount into the container’s filesyste. Cannot be updated.

+

Pod volumes to mount into the container’s filesystem. Cannot be updated.

false

v1.VolumeMount array

diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index b54d1aacf5b..5ba4bbbe0ca 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -5440,7 +5440,7 @@ The resulting set of endpoints can be viewed as:

volumeMounts

-

Pod volumes to mount into the container’s filesyste. Cannot be updated.

+

Pod volumes to mount into the container’s filesystem. Cannot be updated.

false

v1.VolumeMount array

diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index 48ffccdec0b..e7bceba9013 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -2232,6 +2232,17 @@ func DeepCopy_api_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error } else { out.Volumes = nil } + if in.InitContainers != nil { + in, out := in.InitContainers, &out.InitContainers + *out = make([]Container, len(in)) + for i := range in { + if err := DeepCopy_api_Container(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } if in.Containers != nil { in, out := in.Containers, &out.Containers *out = make([]Container, len(in)) @@ -2321,6 +2332,17 @@ func DeepCopy_api_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) } else { out.StartTime = nil } + if in.InitContainerStatuses != nil { + in, out := in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(in)) + for i := range in { + if err := DeepCopy_api_ContainerStatus(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.InitContainerStatuses = nil + } if in.ContainerStatuses != nil { in, out := in.ContainerStatuses, &out.ContainerStatuses *out = make([]ContainerStatus, len(in)) diff --git a/pkg/api/types.generated.go b/pkg/api/types.generated.go index 7e85c765585..3f7b9e55173 100644 --- a/pkg/api/types.generated.go +++ b/pkg/api/types.generated.go @@ -52540,7 +52540,7 @@ func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 576) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 624) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -53849,7 +53849,7 @@ func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Deco yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 648) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 672) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index dd2fcb933e1..6f7c708d35e 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -5012,6 +5012,17 @@ func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conv } else { out.Volumes = nil } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } if in.Containers != nil { in, out := &in.Containers, &out.Containers *out = make([]Container, len(*in)) @@ -5101,6 +5112,17 @@ func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus } else { out.StartTime = nil } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]api.ContainerStatus, len(*in)) + for i := range *in { + if err := Convert_v1_ContainerStatus_To_api_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainerStatuses = nil + } if in.ContainerStatuses != nil { in, out := &in.ContainerStatuses, &out.ContainerStatuses *out = make([]api.ContainerStatus, len(*in)) @@ -5145,6 +5167,17 @@ func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus } else { out.StartTime = nil } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := Convert_api_ContainerStatus_To_v1_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainerStatuses = nil + } if in.ContainerStatuses != nil { in, out := &in.ContainerStatuses, &out.ContainerStatuses *out = make([]ContainerStatus, len(*in)) @@ -5176,10 +5209,6 @@ func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, return nil } -func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - return autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s) -} - func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -5193,10 +5222,6 @@ func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResu return nil } -func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { - return autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s) -} - func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -5291,10 +5316,6 @@ func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, return nil } -func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - return autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s) -} - func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -5305,10 +5326,6 @@ func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSp return nil } -func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { - return autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s) -} - func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { if in.UID != nil { in, out := &in.UID, &out.UID diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index 618b6f6c689..4fef3f5f23d 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -2176,6 +2176,17 @@ func DeepCopy_v1_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error { } else { out.Volumes = nil } + if in.InitContainers != nil { + in, out := in.InitContainers, &out.InitContainers + *out = make([]Container, len(in)) + for i := range in { + if err := DeepCopy_v1_Container(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } if in.Containers != nil { in, out := in.Containers, &out.Containers *out = make([]Container, len(in)) @@ -2269,6 +2280,17 @@ func DeepCopy_v1_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) e } else { out.StartTime = nil } + if in.InitContainerStatuses != nil { + in, out := in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(in)) + for i := range in { + if err := DeepCopy_v1_ContainerStatus(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.InitContainerStatuses = nil + } if in.ContainerStatuses != nil { in, out := in.ContainerStatuses, &out.ContainerStatuses *out = make([]ContainerStatus, len(in)) diff --git a/pkg/api/v1/generated.proto b/pkg/api/v1/generated.proto index c6ad9c61f44..875bea9af33 100644 --- a/pkg/api/v1/generated.proto +++ b/pkg/api/v1/generated.proto @@ -295,7 +295,7 @@ message Container { // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources optional ResourceRequirements resources = 8; - // Pod volumes to mount into the container's filesyste. + // Pod volumes to mount into the container's filesystem. // Cannot be updated. repeated VolumeMount volumeMounts = 9; diff --git a/pkg/api/v1/types.generated.go b/pkg/api/v1/types.generated.go index 4fc28fca499..bdc5f75238c 100644 --- a/pkg/api/v1/types.generated.go +++ b/pkg/api/v1/types.generated.go @@ -53783,7 +53783,7 @@ func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 600) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 648) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -53902,7 +53902,7 @@ func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Deco yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 672) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/pkg/api/v1/types_swagger_doc_generated.go b/pkg/api/v1/types_swagger_doc_generated.go index b4b5df2a1e3..39a60368c1a 100644 --- a/pkg/api/v1/types_swagger_doc_generated.go +++ b/pkg/api/v1/types_swagger_doc_generated.go @@ -186,7 +186,7 @@ var map_Container = map[string]string{ "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources", - "volumeMounts": "Pod volumes to mount into the container's filesyste. Cannot be updated.", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", diff --git a/pkg/apis/apps/types.generated.go b/pkg/apis/apps/types.generated.go index 2675f23deea..50418724784 100644 --- a/pkg/apis/apps/types.generated.go +++ b/pkg/apis/apps/types.generated.go @@ -1555,7 +1555,7 @@ func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 720) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/pkg/apis/apps/v1alpha1/types.generated.go b/pkg/apis/apps/v1alpha1/types.generated.go index 5125beaf2fe..38edac9e59b 100644 --- a/pkg/apis/apps/v1alpha1/types.generated.go +++ b/pkg/apis/apps/v1alpha1/types.generated.go @@ -1585,7 +1585,7 @@ func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/pkg/apis/batch/types.generated.go b/pkg/apis/batch/types.generated.go index 4e3d1472315..01589cda732 100644 --- a/pkg/apis/batch/types.generated.go +++ b/pkg/apis/batch/types.generated.go @@ -4205,7 +4205,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -4443,7 +4443,7 @@ func (x codecSelfer1234) decSliceScheduledJob(v *[]ScheduledJob, d *codec1978.De yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 976) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1000) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/pkg/apis/batch/v1/conversion_generated.go b/pkg/apis/batch/v1/conversion_generated.go index ef7a0133803..9fc08b6d1ce 100644 --- a/pkg/apis/batch/v1/conversion_generated.go +++ b/pkg/apis/batch/v1/conversion_generated.go @@ -217,8 +217,7 @@ func autoConvert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s } else { out.ManualSelector = nil } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -262,8 +261,7 @@ func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s } else { out.ManualSelector = nil } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil diff --git a/pkg/apis/batch/v1/types.generated.go b/pkg/apis/batch/v1/types.generated.go index 5e7ab0e3c57..b305bb42bb9 100644 --- a/pkg/apis/batch/v1/types.generated.go +++ b/pkg/apis/batch/v1/types.generated.go @@ -2867,7 +2867,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/pkg/apis/batch/v2alpha1/conversion_generated.go b/pkg/apis/batch/v2alpha1/conversion_generated.go index 7ab274c2c88..cecbfe4d6e5 100644 --- a/pkg/apis/batch/v2alpha1/conversion_generated.go +++ b/pkg/apis/batch/v2alpha1/conversion_generated.go @@ -223,8 +223,7 @@ func autoConvert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSp } else { out.ManualSelector = nil } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -268,8 +267,7 @@ func autoConvert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSp } else { out.ManualSelector = nil } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil diff --git a/pkg/apis/batch/v2alpha1/types.generated.go b/pkg/apis/batch/v2alpha1/types.generated.go index b85c9a718e3..f1369d6893b 100644 --- a/pkg/apis/batch/v2alpha1/types.generated.go +++ b/pkg/apis/batch/v2alpha1/types.generated.go @@ -4739,7 +4739,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/pkg/apis/extensions/types.generated.go b/pkg/apis/extensions/types.generated.go index ffdba4306f9..f5b42e377f4 100644 --- a/pkg/apis/extensions/types.generated.go +++ b/pkg/apis/extensions/types.generated.go @@ -14325,7 +14325,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -14444,7 +14444,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 672) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -15158,7 +15158,7 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 680) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 704) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/pkg/apis/extensions/v1beta1/conversion_generated.go b/pkg/apis/extensions/v1beta1/conversion_generated.go index 4b8506a9d52..9f965829c1a 100644 --- a/pkg/apis/extensions/v1beta1/conversion_generated.go +++ b/pkg/apis/extensions/v1beta1/conversion_generated.go @@ -401,8 +401,7 @@ func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSet } else { out.Selector = nil } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -422,8 +421,7 @@ func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extension } else { out.Selector = nil } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil diff --git a/pkg/apis/extensions/v1beta1/types.generated.go b/pkg/apis/extensions/v1beta1/types.generated.go index 44e78dd202e..84e3d95850d 100644 --- a/pkg/apis/extensions/v1beta1/types.generated.go +++ b/pkg/apis/extensions/v1beta1/types.generated.go @@ -19988,7 +19988,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -20107,7 +20107,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 720) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -20345,7 +20345,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -21178,7 +21178,7 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 704) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 728) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] From 205a8b4574c4d3cf35ac7a430b19a426c835dda3 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Mon, 28 Mar 2016 23:08:54 -0400 Subject: [PATCH 3/6] Add init container loading to the kubelet --- pkg/api/resource_helpers.go | 13 +- pkg/kubelet/container/ref.go | 10 + pkg/kubelet/container/sync_result.go | 8 +- pkg/kubelet/dockertools/manager.go | 322 ++++++++++++++++++++---- pkg/kubelet/kubelet.go | 96 +++++-- pkg/kubelet/prober/manager.go | 9 + pkg/kubelet/server/server.go | 7 + pkg/kubelet/status/generate.go | 55 ++++ pkg/kubelet/status/manager.go | 71 +++++- pkg/kubelet/util.go | 5 + test/e2e/framework/util.go | 109 +++++++- test/e2e/pods.go | 358 +++++++++++++++++++++++++++ 12 files changed, 975 insertions(+), 88 deletions(-) diff --git a/pkg/api/resource_helpers.go b/pkg/api/resource_helpers.go index 65bc1b85f08..af1189b4fb3 100644 --- a/pkg/api/resource_helpers.go +++ b/pkg/api/resource_helpers.go @@ -92,6 +92,8 @@ func GetPodReadyCondition(status PodStatus) *PodCondition { return condition } +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the the index of the located condition. func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { for i, c := range status.Conditions { if c.Type == conditionType { @@ -131,17 +133,6 @@ func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { } } -// GetPodCondition extracts the provided condition from the given status and returns that. -// Returns nil if the condition is not present. -func GetPodCondition(status PodStatus, t PodConditionType) *PodCondition { - for i, c := range status.Conditions { - if c.Type == t { - return &status.Conditions[i] - } - } - return nil -} - // IsNodeReady returns true if a node is ready; false otherwise. func IsNodeReady(node *Node) bool { for _, c := range node.Status.Conditions { diff --git a/pkg/kubelet/container/ref.go b/pkg/kubelet/container/ref.go index 55e4d546575..ebfff2ebf70 100644 --- a/pkg/kubelet/container/ref.go +++ b/pkg/kubelet/container/ref.go @@ -57,5 +57,15 @@ func fieldPath(pod *api.Pod, container *api.Container) (string, error) { } } } + for i := range pod.Spec.InitContainers { + here := &pod.Spec.InitContainers[i] + if here.Name == container.Name { + if here.Name == "" { + return fmt.Sprintf("spec.initContainers[%d]", i), nil + } else { + return fmt.Sprintf("spec.initContainers{%s}", here.Name), nil + } + } + } return "", fmt.Errorf("container %#v not found in pod %#v", container, pod) } diff --git a/pkg/kubelet/container/sync_result.go b/pkg/kubelet/container/sync_result.go index 1c3aa9eea90..6a196f602b2 100644 --- a/pkg/kubelet/container/sync_result.go +++ b/pkg/kubelet/container/sync_result.go @@ -50,9 +50,10 @@ var ( ) var ( - ErrRunContainer = errors.New("RunContainerError") - ErrKillContainer = errors.New("KillContainerError") - ErrVerifyNonRoot = errors.New("VerifyNonRootError") + ErrRunContainer = errors.New("RunContainerError") + ErrKillContainer = errors.New("KillContainerError") + ErrVerifyNonRoot = errors.New("VerifyNonRootError") + ErrRunInitContainer = errors.New("RunInitContainerError") ) var ( @@ -69,6 +70,7 @@ const ( KillContainer SyncAction = "KillContainer" SetupNetwork SyncAction = "SetupNetwork" TeardownNetwork SyncAction = "TeardownNetwork" + InitContainer SyncAction = "InitContainer" ) // SyncResult is the result of sync action. diff --git a/pkg/kubelet/dockertools/manager.go b/pkg/kubelet/dockertools/manager.go index e9409818747..10ac9e281f3 100644 --- a/pkg/kubelet/dockertools/manager.go +++ b/pkg/kubelet/dockertools/manager.go @@ -37,6 +37,7 @@ import ( dockernat "github.com/docker/go-connections/nat" "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/record" @@ -57,6 +58,7 @@ import ( "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/procfs" utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/sets" utilstrings "k8s.io/kubernetes/pkg/util/strings" ) @@ -876,6 +878,9 @@ func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContaine } else if dm.networkPlugin.Name() != "cni" && dm.networkPlugin.Name() != "kubenet" { // Docker only exports ports from the pod infra container. Let's // collect all of the relevant ports and export them. + for _, container := range pod.Spec.InitContainers { + ports = append(ports, container.Ports...) + } for _, container := range pod.Spec.Containers { ports = append(ports, container.Ports...) } @@ -1179,6 +1184,14 @@ func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecont break } } + if containerSpec == nil { + for i, c := range pod.Spec.InitContainers { + if c.Name == container.Name { + containerSpec = &pod.Spec.InitContainers[i] + break + } + } + } } // TODO: Handle this without signaling the pod infra container to @@ -1369,6 +1382,14 @@ func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *api.Pod break } } + if container == nil { + for ix := range pod.Spec.InitContainers { + if pod.Spec.InitContainers[ix].Name == name { + container = &pod.Spec.InitContainers[ix] + break + } + } + } if container == nil { err = fmt.Errorf("unable to find container %s in pod %v", name, pod) } @@ -1425,6 +1446,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe if err != nil { glog.Errorf("Can't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err) } + glog.Infof("Generating ref for container %s: %#v", container.Name, ref) opts, err := dm.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP) if err != nil { @@ -1603,6 +1625,9 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do } else { // Docker only exports ports from the pod infra container. Let's // collect all of the relevant ports and export them. + for _, container := range pod.Spec.InitContainers { + ports = append(ports, container.Ports...) + } for _, container := range pod.Spec.Containers { ports = append(ports, container.Ports...) } @@ -1640,13 +1665,16 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do // should be kept running. If startInfraContainer is false then it contains an entry for infraContainerId (mapped to -1). // It shouldn't be the case where containersToStart is empty and containersToKeep contains only infraContainerId. In such case // Infra Container should be killed, hence it's removed from this map. -// - all running containers which are NOT contained in containersToKeep should be killed. +// - all init containers are stored in initContainersToKeep +// - all running containers which are NOT contained in containersToKeep and initContainersToKeep should be killed. type podContainerChangesSpec struct { - StartInfraContainer bool - InfraChanged bool - InfraContainerId kubecontainer.DockerID - ContainersToStart map[int]string - ContainersToKeep map[kubecontainer.DockerID]int + StartInfraContainer bool + InfraChanged bool + InfraContainerId kubecontainer.DockerID + InitFailed bool + InitContainersToKeep map[kubecontainer.DockerID]int + ContainersToStart map[int]string + ContainersToKeep map[kubecontainer.DockerID]int } func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) (podContainerChangesSpec, error) { @@ -1683,6 +1711,35 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub containersToKeep[podInfraContainerID] = -1 } + // check the status of the init containers + initFailed := false + initContainersToKeep := make(map[kubecontainer.DockerID]int) + // always reset the init containers if the pod is reset + if !createPodInfraContainer { + // keep all successfully completed containers up to and including the first failing container + Containers: + for i, container := range pod.Spec.InitContainers { + containerStatus := podStatus.FindContainerStatusByName(container.Name) + if containerStatus == nil { + continue + } + switch { + case containerStatus == nil: + continue + case containerStatus.State == kubecontainer.ContainerStateRunning: + initContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] = i + case containerStatus.State == kubecontainer.ContainerStateExited: + initContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] = i + // TODO: should we abstract the "did the init container fail" check? + if containerStatus.ExitCode != 0 { + initFailed = true + break Containers + } + } + } + } + + // check the status of the containers for index, container := range pod.Spec.Containers { expectedHash := kubecontainer.HashContainer(&container) @@ -1716,6 +1773,19 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub continue } + if initFailed { + // initialization failed and Container exists + // If we have an initialization failure everything will be killed anyway + // If RestartPolicy is Always or OnFailure we restart containers that were running before we + // killed them when re-running initialization + if pod.Spec.RestartPolicy != api.RestartPolicyNever { + message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name) + glog.V(1).Info(message) + containersToStart[index] = message + } + continue + } + // At this point, the container is running and pod infra container is good. // We will look for changes and check healthiness for the container. containerChanged := hash != 0 && hash != expectedHash @@ -1743,17 +1813,21 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub // (In fact, when createPodInfraContainer is false, containersToKeep will not be touched). // - createPodInfraContainer is false and containersToKeep contains at least ID of Infra Container - // If Infra container is the last running one, we don't want to keep it. + // If Infra container is the last running one, we don't want to keep it, and we don't want to + // keep any init containers. if !createPodInfraContainer && len(containersToStart) == 0 && len(containersToKeep) == 1 { containersToKeep = make(map[kubecontainer.DockerID]int) + initContainersToKeep = make(map[kubecontainer.DockerID]int) } return podContainerChangesSpec{ - StartInfraContainer: createPodInfraContainer, - InfraChanged: changed, - InfraContainerId: podInfraContainerID, - ContainersToStart: containersToStart, - ContainersToKeep: containersToKeep, + StartInfraContainer: createPodInfraContainer, + InfraChanged: changed, + InfraContainerId: podInfraContainerID, + InitFailed: initFailed, + InitContainersToKeep: initContainersToKeep, + ContainersToStart: containersToStart, + ContainersToKeep: containersToKeep, }, nil } @@ -1797,7 +1871,8 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec runningContainerStatues := podStatus.GetRunningContainerStatuses() for _, containerStatus := range runningContainerStatues { _, keep := containerChanges.ContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] - if !keep { + _, keepInit := containerChanges.InitContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] + if !keep && !keepInit { glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerStatus.Name, containerStatus.ID, format.Pod(pod)) // attempt to find the appropriate container policy var podContainer *api.Container @@ -1820,6 +1895,9 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec } } + // Keep terminated init containers fairly aggressively controlled + dm.pruneInitContainersBeforeStart(pod, podStatus, containerChanges.InitContainersToKeep) + // We pass the value of the podIP down to runContainerInPod, which in turn // passes it to various other functions, in order to facilitate // functionality that requires this value (hosts file and downward API) @@ -1889,14 +1967,78 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec } } - // Start everything + next, status, done := findActiveInitContainer(pod, podStatus) + if status != nil { + if status.ExitCode != 0 { + // container initialization has failed, flag the pod as failed + initContainerResult := kubecontainer.NewSyncResult(kubecontainer.InitContainer, status.Name) + initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode)) + result.AddSyncResult(initContainerResult) + if pod.Spec.RestartPolicy == api.RestartPolicyNever { + utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %+v", format.Pod(pod), status.Name, status)) + return + } + utilruntime.HandleError(fmt.Errorf("Error running pod %q init container %q, restarting: %+v", format.Pod(pod), status.Name, status)) + } + } + + // Note: when configuring the pod's containers anything that can be configured by pointing + // to the namespace of the infra container should use namespaceMode. This includes things like the net namespace + // and IPC namespace. PID mode cannot point to another container right now. + // See createPodInfraContainer for infra container setup. + namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID) + pidMode := getPidMode(pod) + + if next != nil { + if len(containerChanges.ContainersToStart) == 0 { + glog.V(4).Infof("No containers to start, stopping at init container %+v in pod %v", next.Name, format.Pod(pod)) + return + } + + // If we need to start the next container, do so now then exit + container := next + startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name) + result.AddSyncResult(startContainerResult) + + // containerChanges.StartInfraContainer causes the containers to be restarted for config reasons + if !containerChanges.StartInfraContainer { + isInBackOff, err, msg := dm.doBackOff(pod, container, podStatus, backOff) + if isInBackOff { + startContainerResult.Fail(err, msg) + glog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod)) + return + } + } + + glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod)) + if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil { + startContainerResult.Fail(err, msg) + utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) + return + } + + // Successfully started the container; clear the entry in the failure + glog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod)) + return + } + if !done { + // init container still running + glog.V(4).Infof("An init container is still running in pod %v", format.Pod(pod)) + return + } + if containerChanges.InitFailed { + // init container still running + glog.V(4).Infof("Not all init containers have succeeded for pod %v", format.Pod(pod)) + return + } + + // Start regular containers for idx := range containerChanges.ContainersToStart { container := &pod.Spec.Containers[idx] startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name) result.AddSyncResult(startContainerResult) // containerChanges.StartInfraContainer causes the containers to be restarted for config reasons - // ignore backoff if !containerChanges.StartInfraContainer { isInBackOff, err, msg := dm.doBackOff(pod, container, podStatus, backOff) if isInBackOff { @@ -1905,46 +2047,131 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec continue } } + glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) - err, msg := dm.imagePuller.PullImage(pod, container, pullSecrets) - if err != nil { + if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil { startContainerResult.Fail(err, msg) + utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) continue } - - if container.SecurityContext != nil && container.SecurityContext.RunAsNonRoot != nil && *container.SecurityContext.RunAsNonRoot { - err := dm.verifyNonRoot(container) - if err != nil { - startContainerResult.Fail(kubecontainer.ErrVerifyNonRoot, err.Error()) - glog.Errorf("Error running pod %q container %q: %v", format.Pod(pod), container.Name, err) - continue - } - } - // For a new container, the RestartCount should be 0 - restartCount := 0 - containerStatus := podStatus.FindContainerStatusByName(container.Name) - if containerStatus != nil { - restartCount = containerStatus.RestartCount + 1 - } - - // TODO(dawnchen): Check RestartPolicy.DelaySeconds before restart a container - // Note: when configuring the pod's containers anything that can be configured by pointing - // to the namespace of the infra container should use namespaceMode. This includes things like the net namespace - // and IPC namespace. PID mode cannot point to another container right now. - // See createPodInfraContainer for infra container setup. - namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID) - _, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode, getPidMode(pod), podIP, restartCount) - if err != nil { - startContainerResult.Fail(kubecontainer.ErrRunContainer, err.Error()) - // TODO(bburns) : Perhaps blacklist a container after N failures? - glog.Errorf("Error running pod %q container %q: %v", format.Pod(pod), container.Name, err) - continue - } - // Successfully started the container; clear the entry in the failure } return } +// tryContainerStart attempts to pull and start the container, returning an error and a reason string if the start +// was not successful. +func (dm *DockerManager) tryContainerStart(container *api.Container, pod *api.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, namespaceMode, pidMode, podIP string) (err error, reason string) { + err, msg := dm.imagePuller.PullImage(pod, container, pullSecrets) + if err != nil { + return err, msg + } + + if container.SecurityContext != nil && container.SecurityContext.RunAsNonRoot != nil && *container.SecurityContext.RunAsNonRoot { + err := dm.verifyNonRoot(container) + if err != nil { + return kubecontainer.ErrVerifyNonRoot, err.Error() + } + } + + // For a new container, the RestartCount should be 0 + restartCount := 0 + containerStatus := podStatus.FindContainerStatusByName(container.Name) + if containerStatus != nil { + restartCount = containerStatus.RestartCount + 1 + } + + // TODO(dawnchen): Check RestartPolicy.DelaySeconds before restart a container + _, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode, pidMode, podIP, restartCount) + if err != nil { + // TODO(bburns) : Perhaps blacklist a container after N failures? + return kubecontainer.ErrRunContainer, err.Error() + } + return nil, "" +} + +// pruneInitContainers ensures that before we begin creating init containers, we have reduced the number +// of outstanding init containers still present. This reduces load on the container garbage collector +// by only preserving the most recent terminated init container. +func (dm *DockerManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.DockerID]int) { + // only the last execution of an init container should be preserved, and only preserve it if it is in the + // list of init containers to keep. + initContainerNames := sets.NewString() + for _, container := range pod.Spec.InitContainers { + initContainerNames.Insert(container.Name) + } + for name := range initContainerNames { + count := 0 + for _, status := range podStatus.ContainerStatuses { + if !initContainerNames.Has(status.Name) || status.State != kubecontainer.ContainerStateExited { + continue + } + count++ + // keep the first init container we see + if count == 1 { + continue + } + // if there is a reason to preserve the older container, do so + if _, ok := initContainersToKeep[kubecontainer.DockerID(status.ID.ID)]; ok { + continue + } + + // prune all other init containers that match this container name + // TODO: we may not need aggressive pruning + glog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count) + if err := dm.client.RemoveContainer(status.ID.ID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true}); err != nil { + if _, ok := err.(containerNotFoundError); ok { + count-- + continue + } + utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", name, err, format.Pod(pod))) + // TODO: report serious errors + continue + } + + // remove any references to this container + if _, ok := dm.containerRefManager.GetRef(status.ID); ok { + dm.containerRefManager.ClearRef(status.ID) + } else { + glog.Warningf("No ref for pod '%q'", pod.Name) + } + } + } +} + +// findActiveInitContainer returns the status of the last failed container, the next init container to +// start, or done if there are no further init containers. Status is only returned if an init container +// failed, in which case next will point to the current container. +func findActiveInitContainer(pod *api.Pod, podStatus *kubecontainer.PodStatus) (next *api.Container, status *kubecontainer.ContainerStatus, done bool) { + if len(pod.Spec.InitContainers) == 0 { + return nil, nil, true + } + + for i := len(pod.Spec.InitContainers) - 1; i >= 0; i-- { + container := &pod.Spec.InitContainers[i] + status := podStatus.FindContainerStatusByName(container.Name) + switch { + case status == nil: + continue + case status.State == kubecontainer.ContainerStateRunning: + return nil, nil, false + case status.State == kubecontainer.ContainerStateExited: + switch { + // the container has failed, we'll have to retry + case status.ExitCode != 0: + return &pod.Spec.InitContainers[i], status, false + // all init containers successful + case i == (len(pod.Spec.InitContainers) - 1): + return nil, nil, true + // all containers up to i successful, go to i+1 + default: + return &pod.Spec.InitContainers[i+1], nil, false + } + } + } + + return &pod.Spec.InitContainers[0], nil, false +} + // verifyNonRoot returns an error if the container or image will run as the root user. func (dm *DockerManager) verifyNonRoot(container *api.Container) error { if securitycontext.HasRunAsUser(container) { @@ -2018,6 +2245,7 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt } } if cStatus != nil { + glog.Infof("checking backoff for container %q in pod %q", container.Name, pod.Name) ts := cStatus.FinishedAt // found a container that requires backoff dockerName := KubeletContainerName{ diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 82e49538cb6..2163a3189d7 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1761,6 +1761,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { !firstSeenTime.IsZero() { metrics.PodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime)) } + // Update status in the status manager kl.statusManager.SetPodStatus(pod, apiPodStatus) @@ -2347,6 +2348,10 @@ func hasHostPortConflicts(pods []*api.Pod) bool { glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) return true } + if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 { + glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) + return true + } } return false } @@ -3310,12 +3315,46 @@ func (kl *Kubelet) tryUpdateNodeStatus() error { // This func is exported to simplify integration with 3rd party kubelet // integrations like kubernetes-mesos. func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase { + initialized := 0 + pendingInitialization := 0 + failedInitialization := 0 + for _, container := range spec.InitContainers { + containerStatus, ok := api.GetContainerStatus(info, container.Name) + if !ok { + pendingInitialization++ + continue + } + + switch { + case containerStatus.State.Running != nil: + pendingInitialization++ + case containerStatus.State.Terminated != nil: + if containerStatus.State.Terminated.ExitCode == 0 { + initialized++ + } else { + failedInitialization++ + } + case containerStatus.State.Waiting != nil: + if containerStatus.LastTerminationState.Terminated != nil { + if containerStatus.LastTerminationState.Terminated.ExitCode == 0 { + initialized++ + } else { + failedInitialization++ + } + } else { + pendingInitialization++ + } + default: + pendingInitialization++ + } + } + + unknown := 0 running := 0 waiting := 0 stopped := 0 failed := 0 succeeded := 0 - unknown := 0 for _, container := range spec.Containers { containerStatus, ok := api.GetContainerStatus(info, container.Name) if !ok { @@ -3344,7 +3383,13 @@ func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase { } } + if failedInitialization > 0 && spec.RestartPolicy == api.RestartPolicyNever { + return api.PodFailed + } + switch { + case pendingInitialization > 0: + fallthrough case waiting > 0: glog.V(5).Infof("pod waiting > 0, pending") // One or more containers has not been started @@ -3409,8 +3454,10 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P // Assume info is ready to process spec := &pod.Spec - s.Phase = GetPhase(spec, s.ContainerStatuses) + allStatus := append(append([]api.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...) + s.Phase = GetPhase(spec, allStatus) kl.probeManager.UpdatePodStatus(pod.UID, s) + s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase)) s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase)) // s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus() // does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure @@ -3443,9 +3490,27 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P // alter the kubelet state at all. func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) *api.PodStatus { var apiPodStatus api.PodStatus - uid := pod.UID apiPodStatus.PodIP = podStatus.IP + apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses( + pod, podStatus, + pod.Status.ContainerStatuses, + pod.Spec.Containers, + len(pod.Spec.InitContainers) > 0, + false, + ) + apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses( + pod, podStatus, + pod.Status.InitContainerStatuses, + pod.Spec.InitContainers, + len(pod.Spec.InitContainers) > 0, + true, + ) + + return &apiPodStatus +} + +func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubecontainer.PodStatus, previousStatus []api.ContainerStatus, containers []api.Container, hasInitContainers, isInitContainer bool) []api.ContainerStatus { convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *api.ContainerStatus { cid := cs.ID.String() status := &api.ContainerStatus{ @@ -3474,15 +3539,19 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain } // Fetch old containers statuses from old pod status. - oldStatuses := make(map[string]api.ContainerStatus, len(pod.Spec.Containers)) - for _, status := range pod.Status.ContainerStatuses { + oldStatuses := make(map[string]api.ContainerStatus, len(containers)) + for _, status := range previousStatus { oldStatuses[status.Name] = status } // Set all container statuses to default waiting state - statuses := make(map[string]*api.ContainerStatus, len(pod.Spec.Containers)) + statuses := make(map[string]*api.ContainerStatus, len(containers)) defaultWaitingState := api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerCreating"}} - for _, container := range pod.Spec.Containers { + if hasInitContainers { + defaultWaitingState = api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "PodInitializing"}} + } + + for _, container := range containers { status := &api.ContainerStatus{ Name: container.Name, Image: container.Image, @@ -3498,7 +3567,6 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain // Make the latest container status comes first. sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses))) - // Set container statuses according to the statuses seen in pod status containerSeen := map[string]int{} for _, cStatus := range podStatus.ContainerStatuses { @@ -3520,13 +3588,13 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain } // Handle the containers failed to be started, which should be in Waiting state. - for _, container := range pod.Spec.Containers { + for _, container := range containers { // If a container should be restarted in next syncpod, it is *Waiting*. if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { continue } status := statuses[container.Name] - reason, message, ok := kl.reasonCache.Get(uid, container.Name) + reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name) if !ok { // In fact, we could also apply Waiting state here, but it is less informative, // and the container will be restarted soon, so we prefer the original state here. @@ -3548,15 +3616,15 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain statuses[container.Name] = status } - apiPodStatus.ContainerStatuses = make([]api.ContainerStatus, 0) + var containerStatuses []api.ContainerStatus for _, status := range statuses { - apiPodStatus.ContainerStatuses = append(apiPodStatus.ContainerStatuses, *status) + containerStatuses = append(containerStatuses, *status) } // Sort the container statuses since clients of this interface expect the list // of containers in a pod has a deterministic order. - sort.Sort(kubetypes.SortedContainerStatuses(apiPodStatus.ContainerStatuses)) - return &apiPodStatus + sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses)) + return containerStatuses } // Returns logs of current machine. diff --git a/pkg/kubelet/prober/manager.go b/pkg/kubelet/prober/manager.go index 9e46f0be3f7..01218a75df1 100644 --- a/pkg/kubelet/prober/manager.go +++ b/pkg/kubelet/prober/manager.go @@ -207,6 +207,15 @@ func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *api.PodStatus) { } podStatus.ContainerStatuses[i].Ready = ready } + // init containers are ready if they have exited with success or if a readiness probe has + // succeeded. + for i, c := range podStatus.InitContainerStatuses { + var ready bool + if c.State.Terminated != nil && c.State.Terminated.ExitCode == 0 { + ready = true + } + podStatus.InitContainerStatuses[i].Ready = ready + } } func (m *manager) getWorker(podUID types.UID, containerName string, probeType probeType) (*worker, bool) { diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index 163ecf57b64..e8b06be494f 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -465,6 +465,13 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re containerExists = true } } + if !containerExists { + for _, container := range pod.Spec.InitContainers { + if container.Name == containerName { + containerExists = true + } + } + } if !containerExists { response.WriteError(http.StatusNotFound, fmt.Errorf("container %q not found in pod %q\n", containerName, podID)) return diff --git a/pkg/kubelet/status/generate.go b/pkg/kubelet/status/generate.go index 05d845470b4..cc000929a07 100644 --- a/pkg/kubelet/status/generate.go +++ b/pkg/kubelet/status/generate.go @@ -77,3 +77,58 @@ func GeneratePodReadyCondition(spec *api.PodSpec, containerStatuses []api.Contai Status: api.ConditionTrue, } } + +// GeneratePodInitializedCondition returns initialized condition if all init containers in a pod are ready, else it +// returns an uninitialized condition. +func GeneratePodInitializedCondition(spec *api.PodSpec, containerStatuses []api.ContainerStatus, podPhase api.PodPhase) api.PodCondition { + // Find if all containers are ready or not. + if containerStatuses == nil && len(spec.InitContainers) > 0 { + return api.PodCondition{ + Type: api.PodInitialized, + Status: api.ConditionFalse, + Reason: "UnknownContainerStatuses", + } + } + unknownContainers := []string{} + unreadyContainers := []string{} + for _, container := range spec.InitContainers { + if containerStatus, ok := api.GetContainerStatus(containerStatuses, container.Name); ok { + if !containerStatus.Ready { + unreadyContainers = append(unreadyContainers, container.Name) + } + } else { + unknownContainers = append(unknownContainers, container.Name) + } + } + + // If all init containers are known and succeeded, just return PodCompleted. + if podPhase == api.PodSucceeded && len(unknownContainers) == 0 { + return api.PodCondition{ + Type: api.PodInitialized, + Status: api.ConditionTrue, + Reason: "PodCompleted", + } + } + + unreadyMessages := []string{} + if len(unknownContainers) > 0 { + unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers)) + } + if len(unreadyContainers) > 0 { + unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with incomplete status: %s", unreadyContainers)) + } + unreadyMessage := strings.Join(unreadyMessages, ", ") + if unreadyMessage != "" { + return api.PodCondition{ + Type: api.PodInitialized, + Status: api.ConditionFalse, + Reason: "ContainersNotInitialized", + Message: unreadyMessage, + } + } + + return api.PodCondition{ + Type: api.PodInitialized, + Status: api.ConditionTrue, + } +} diff --git a/pkg/kubelet/status/manager.go b/pkg/kubelet/status/manager.go index 815cc78091e..c9b5b1e4e2c 100644 --- a/pkg/kubelet/status/manager.go +++ b/pkg/kubelet/status/manager.go @@ -172,20 +172,14 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai } // Find the container to update. - containerIndex := -1 - for i, c := range oldStatus.status.ContainerStatuses { - if c.ContainerID == containerID.String() { - containerIndex = i - break - } - } - if containerIndex == -1 { + containerStatus, _, ok := findContainerStatus(&oldStatus.status, containerID.String()) + if !ok { glog.Warningf("Container readiness changed for unknown container: %q - %q", format.Pod(pod), containerID.String()) return } - if oldStatus.status.ContainerStatuses[containerIndex].Ready == ready { + if containerStatus.Ready == ready { glog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready, format.Pod(pod), containerID.String()) return @@ -196,7 +190,8 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai if err != nil { return } - status.ContainerStatuses[containerIndex].Ready = ready + containerStatus, _, _ = findContainerStatus(&status, containerID.String()) + containerStatus.Ready = ready // Update pod condition. readyConditionIndex := -1 @@ -217,6 +212,31 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai m.updateStatusInternal(pod, status, false) } +func findContainerStatus(status *api.PodStatus, containerID string) (containerStatus *api.ContainerStatus, init bool, ok bool) { + // Find the container to update. + containerIndex := -1 + for i, c := range status.ContainerStatuses { + if c.ContainerID == containerID { + containerIndex = i + break + } + } + if containerIndex != -1 { + return &status.ContainerStatuses[containerIndex], false, true + } + + for i, c := range status.InitContainerStatuses { + if c.ContainerID == containerID { + containerIndex = i + break + } + } + if containerIndex != -1 { + return &status.InitContainerStatuses[containerIndex], true, true + } + return nil, false, false +} + func (m *manager) TerminatePod(pod *api.Pod) { m.podStatusesLock.Lock() defer m.podStatusesLock.Unlock() @@ -233,6 +253,11 @@ func (m *manager) TerminatePod(pod *api.Pod) { Terminated: &api.ContainerStateTerminated{}, } } + for i := range status.InitContainerStatuses { + status.InitContainerStatuses[i].State = api.ContainerState{ + Terminated: &api.ContainerStateTerminated{}, + } + } m.updateStatusInternal(pod, pod.Status, true) } @@ -251,16 +276,27 @@ func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus, force } // Set ReadyCondition.LastTransitionTime. - if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil { + if _, readyCondition := api.GetPodCondition(&status, api.PodReady); readyCondition != nil { // Need to set LastTransitionTime. lastTransitionTime := unversioned.Now() - oldReadyCondition := api.GetPodReadyCondition(oldStatus) + _, oldReadyCondition := api.GetPodCondition(&oldStatus, api.PodReady) if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status { lastTransitionTime = oldReadyCondition.LastTransitionTime } readyCondition.LastTransitionTime = lastTransitionTime } + // Set InitializedCondition.LastTransitionTime. + if _, initCondition := api.GetPodCondition(&status, api.PodInitialized); initCondition != nil { + // Need to set LastTransitionTime. + lastTransitionTime := unversioned.Now() + _, oldInitCondition := api.GetPodCondition(&oldStatus, api.PodInitialized) + if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status { + lastTransitionTime = oldInitCondition.LastTransitionTime + } + initCondition.LastTransitionTime = lastTransitionTime + } + // ensure that the start time does not change across updates. if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() { status.StartTime = oldStatus.StartTime @@ -490,6 +526,8 @@ func normalizeStatus(status *api.PodStatus) *api.PodStatus { normalizeTimeStamp(&condition.LastProbeTime) normalizeTimeStamp(&condition.LastTransitionTime) } + + // update container statuses for i := range status.ContainerStatuses { cstatus := &status.ContainerStatuses[i] normalizeContainerState(&cstatus.State) @@ -497,6 +535,15 @@ func normalizeStatus(status *api.PodStatus) *api.PodStatus { } // Sort the container statuses, so that the order won't affect the result of comparison sort.Sort(kubetypes.SortedContainerStatuses(status.ContainerStatuses)) + + // update init container statuses + for i := range status.InitContainerStatuses { + cstatus := &status.InitContainerStatuses[i] + normalizeContainerState(&cstatus.State) + normalizeContainerState(&cstatus.LastTerminationState) + } + // Sort the container statuses, so that the order won't affect the result of comparison + sort.Sort(kubetypes.SortedContainerStatuses(status.InitContainerStatuses)) return status } diff --git a/pkg/kubelet/util.go b/pkg/kubelet/util.go index a06a57ce566..ae2d94bfa12 100644 --- a/pkg/kubelet/util.go +++ b/pkg/kubelet/util.go @@ -63,6 +63,11 @@ func canRunPod(pod *api.Pod) error { return fmt.Errorf("pod with UID %q specified privileged container, but is disallowed", pod.UID) } } + for _, container := range pod.Spec.InitContainers { + if securitycontext.HasPrivilegedRequest(&container) { + return fmt.Errorf("pod with UID %q specified privileged container, but is disallowed", pod.UID) + } + } } return nil } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 8a1b6348514..e137dbd0457 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -879,6 +879,109 @@ func deleteNS(c *client.Client, namespace string, timeout time.Duration) error { return nil } +func ContainerInitInvariant(older, newer runtime.Object) error { + oldPod := older.(*api.Pod) + newPod := newer.(*api.Pod) + if len(oldPod.Spec.InitContainers) == 0 { + return nil + } + if len(oldPod.Spec.InitContainers) != len(newPod.Spec.InitContainers) { + return fmt.Errorf("init container list changed") + } + if oldPod.UID != newPod.UID { + return fmt.Errorf("two different pods exist in the condition: %s vs %s", oldPod.UID, newPod.UID) + } + if err := initContainersInvariants(oldPod); err != nil { + return err + } + if err := initContainersInvariants(newPod); err != nil { + return err + } + oldInit, _, _ := podInitialized(oldPod) + newInit, _, _ := podInitialized(newPod) + if oldInit && !newInit { + // TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it + // from scratch + return fmt.Errorf("pod cannot be initialized and then regress to not being initialized") + } + return nil +} + +func podInitialized(pod *api.Pod) (ok bool, failed bool, err error) { + allInit := true + initFailed := false + for _, s := range pod.Status.InitContainerStatuses { + switch { + case initFailed && s.State.Waiting == nil: + return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name) + case allInit && s.State.Waiting == nil: + return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name) + case s.State.Terminated == nil: + allInit = false + case s.State.Terminated.ExitCode != 0: + allInit = false + initFailed = true + case !s.Ready: + return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name) + } + } + return allInit, initFailed, nil +} + +func initContainersInvariants(pod *api.Pod) error { + allInit, initFailed, err := podInitialized(pod) + if err != nil { + return err + } + if !allInit || initFailed { + for _, s := range pod.Status.ContainerStatuses { + if s.State.Waiting == nil || s.RestartCount != 0 { + return fmt.Errorf("container %s is not waiting but initialization not complete", s.Name) + } + if s.State.Waiting.Reason != "PodInitializing" { + return fmt.Errorf("container %s should have reason PodInitializing: %s", s.Name, s.State.Waiting.Reason) + } + } + } + _, c := api.GetPodCondition(&pod.Status, api.PodInitialized) + if c == nil { + return fmt.Errorf("pod does not have initialized condition") + } + if c.LastTransitionTime.IsZero() { + return fmt.Errorf("PodInitialized condition should always have a transition time") + } + switch { + case c.Status == api.ConditionUnknown: + return fmt.Errorf("PodInitialized condition should never be Unknown") + case c.Status == api.ConditionTrue && (initFailed || !allInit): + return fmt.Errorf("PodInitialized condition was True but all not all containers initialized") + case c.Status == api.ConditionFalse && (!initFailed && allInit): + return fmt.Errorf("PodInitialized condition was False but all containers initialized") + } + return nil +} + +type InvariantFunc func(older, newer runtime.Object) error + +func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error { + errs := sets.NewString() + for i := range events { + j := i + 1 + if j >= len(events) { + continue + } + for _, fn := range fns { + if err := fn(events[i].Object, events[j].Object); err != nil { + errs.Insert(err.Error()) + } + } + } + if errs.Len() > 0 { + return fmt.Errorf("invariants violated:\n* %s", strings.Join(errs.List(), "\n* ")) + } + return nil +} + // Waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. func WaitForPodRunningInNamespace(c *client.Client, podName string, namespace string) error { @@ -2218,7 +2321,11 @@ func DumpNodeDebugInfo(c *client.Client, nodeNames []string) { continue } for _, p := range podList.Items { - Logf("%v started at %v (%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.ContainerStatuses)) + Logf("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses)) + for _, c := range p.Status.InitContainerStatuses { + Logf("\tInit container %v ready: %v, restart count %v", + c.Name, c.Ready, c.RestartCount) + } for _, c := range p.Status.ContainerStatuses { Logf("\tContainer %v ready: %v, restart count %v", c.Name, c.Ready, c.RestartCount) diff --git a/test/e2e/pods.go b/test/e2e/pods.go index 47f5868b80a..da3d0821e46 100644 --- a/test/e2e/pods.go +++ b/test/e2e/pods.go @@ -659,6 +659,364 @@ var _ = framework.KubeDescribe("Pods", func() { }) }) + It("should invoke init containers on a RestartNever pod", func() { + podClient := f.Client.Pods(f.Namespace.Name) + + By("creating the pod") + name := "pod-init-" + string(util.NewUUID()) + value := strconv.Itoa(time.Now().Nanosecond()) + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "name": "foo", + "time": value, + }, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyNever, + InitContainers: []api.Container{ + { + Name: "init1", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"/bin/true"}, + }, + { + Name: "init2", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"/bin/true"}, + }, + }, + Containers: []api.Container{ + { + Name: "run1", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"/bin/true"}, + }, + }, + }, + } + defer podClient.Delete(pod.Name, nil) + startedPod, err := podClient.Create(pod) + if err != nil { + framework.Failf("Error creating a pod: %v", err) + } + w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta)) + if err != nil { + framework.Failf("Error watching a pod: %v", err) + } + wr := watch.NewRecorder(w) + event, err := watch.Until(framework.PodStartTimeout, wr, client.PodCompleted) + Expect(err).To(BeNil()) + framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant) + endPod := event.Object.(*api.Pod) + + Expect(endPod.Status.Phase).To(Equal(api.PodSucceeded)) + _, init := api.GetPodCondition(&endPod.Status, api.PodInitialized) + Expect(init).NotTo(BeNil()) + Expect(init.Status).To(Equal(api.ConditionTrue)) + + Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2)) + for _, status := range endPod.Status.InitContainerStatuses { + Expect(status.Ready).To(BeTrue()) + Expect(status.State.Terminated).NotTo(BeNil()) + Expect(status.State.Terminated.ExitCode).To(BeZero()) + } + }) + + It("should invoke init containers on a RestartAlways pod", func() { + podClient := f.Client.Pods(f.Namespace.Name) + + By("creating the pod") + name := "pod-init-" + string(util.NewUUID()) + value := strconv.Itoa(time.Now().Nanosecond()) + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "name": "foo", + "time": value, + }, + }, + Spec: api.PodSpec{ + InitContainers: []api.Container{ + { + Name: "init1", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"/bin/true"}, + }, + { + Name: "init2", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"/bin/true"}, + }, + }, + Containers: []api.Container{ + { + Name: "run1", + Image: "gcr.io/google_containers/pause:2.0", + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(10*1024*1024, resource.DecimalSI), + }, + }, + }, + }, + }, + } + defer podClient.Delete(pod.Name, nil) + startedPod, err := podClient.Create(pod) + if err != nil { + framework.Failf("Error creating a pod: %v", err) + } + w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta)) + if err != nil { + framework.Failf("Error watching a pod: %v", err) + } + wr := watch.NewRecorder(w) + event, err := watch.Until(framework.PodStartTimeout, wr, client.PodRunning) + Expect(err).To(BeNil()) + framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant) + endPod := event.Object.(*api.Pod) + + Expect(endPod.Status.Phase).To(Equal(api.PodRunning)) + _, init := api.GetPodCondition(&endPod.Status, api.PodInitialized) + Expect(init).NotTo(BeNil()) + Expect(init.Status).To(Equal(api.ConditionTrue)) + + Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2)) + for _, status := range endPod.Status.InitContainerStatuses { + Expect(status.Ready).To(BeTrue()) + Expect(status.State.Terminated).NotTo(BeNil()) + Expect(status.State.Terminated.ExitCode).To(BeZero()) + } + }) + + It("should not start app containers if init containers fail on a RestartAlways pod", func() { + podClient := f.Client.Pods(f.Namespace.Name) + + By("creating the pod") + name := "pod-init-" + string(util.NewUUID()) + value := strconv.Itoa(time.Now().Nanosecond()) + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "name": "foo", + "time": value, + }, + }, + Spec: api.PodSpec{ + InitContainers: []api.Container{ + { + Name: "init1", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"/bin/false"}, + }, + { + Name: "init2", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"/bin/true"}, + }, + }, + Containers: []api.Container{ + { + Name: "run1", + Image: "gcr.io/google_containers/pause:2.0", + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(10*1024*1024, resource.DecimalSI), + }, + }, + }, + }, + }, + } + defer podClient.Delete(pod.Name, nil) + startedPod, err := podClient.Create(pod) + if err != nil { + framework.Failf("Error creating a pod: %v", err) + } + w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta)) + if err != nil { + framework.Failf("Error watching a pod: %v", err) + } + + wr := watch.NewRecorder(w) + event, err := watch.Until( + framework.PodStartTimeout, wr, + // check for the first container to fail at least once + func(evt watch.Event) (bool, error) { + switch t := evt.Object.(type) { + case *api.Pod: + for _, status := range t.Status.ContainerStatuses { + if status.State.Waiting == nil { + return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status) + } + if status.State.Waiting.Reason != "PodInitializing" { + return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status) + } + } + if len(t.Status.InitContainerStatuses) != 2 { + return false, nil + } + status := t.Status.InitContainerStatuses[1] + if status.State.Waiting == nil { + return false, fmt.Errorf("second init container should not be out of waiting: %#v", status) + } + if status.State.Waiting.Reason != "PodInitializing" { + return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status) + } + status = t.Status.InitContainerStatuses[0] + if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 { + return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status) + } + // continue until we see an attempt to restart the pod + return status.LastTerminationState.Terminated != nil, nil + default: + return false, fmt.Errorf("unexpected object: %#v", t) + } + }, + // verify we get two restarts + func(evt watch.Event) (bool, error) { + switch t := evt.Object.(type) { + case *api.Pod: + status := t.Status.InitContainerStatuses[0] + if status.RestartCount < 3 { + return false, nil + } + framework.Logf("init container has failed twice: %#v", t) + // TODO: more conditions + return true, nil + default: + return false, fmt.Errorf("unexpected object: %#v", t) + } + }, + ) + Expect(err).To(BeNil()) + framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant) + endPod := event.Object.(*api.Pod) + + Expect(endPod.Status.Phase).To(Equal(api.PodPending)) + _, init := api.GetPodCondition(&endPod.Status, api.PodInitialized) + Expect(init).NotTo(BeNil()) + Expect(init.Status).To(Equal(api.ConditionFalse)) + Expect(init.Reason).To(Equal("ContainersNotInitialized")) + Expect(init.Message).To(Equal("containers with incomplete status: [init1 init2]")) + Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2)) + }) + + It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() { + podClient := f.Client.Pods(f.Namespace.Name) + + By("creating the pod") + name := "pod-init-" + string(util.NewUUID()) + value := strconv.Itoa(time.Now().Nanosecond()) + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "name": "foo", + "time": value, + }, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyNever, + InitContainers: []api.Container{ + { + Name: "init1", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"/bin/true"}, + }, + { + Name: "init2", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"/bin/false"}, + }, + }, + Containers: []api.Container{ + { + Name: "run1", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"/bin/true"}, + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(10*1024*1024, resource.DecimalSI), + }, + }, + }, + }, + }, + } + defer podClient.Delete(pod.Name, nil) + startedPod, err := podClient.Create(pod) + if err != nil { + framework.Failf("Error creating a pod: %v", err) + } + w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta)) + if err != nil { + framework.Failf("Error watching a pod: %v", err) + } + + wr := watch.NewRecorder(w) + event, err := watch.Until( + framework.PodStartTimeout, wr, + // check for the second container to fail at least once + func(evt watch.Event) (bool, error) { + switch t := evt.Object.(type) { + case *api.Pod: + for _, status := range t.Status.ContainerStatuses { + if status.State.Waiting == nil { + return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status) + } + if status.State.Waiting.Reason != "PodInitializing" { + return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status) + } + } + if len(t.Status.InitContainerStatuses) != 2 { + return false, nil + } + status := t.Status.InitContainerStatuses[0] + if status.State.Terminated == nil { + if status.State.Waiting != nil && status.State.Waiting.Reason != "PodInitializing" { + return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status) + } + return false, nil + } + if status.State.Terminated != nil && status.State.Terminated.ExitCode != 0 { + return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status) + } + status = t.Status.InitContainerStatuses[1] + if status.State.Terminated == nil { + return false, nil + } + if status.State.Terminated.ExitCode == 0 { + return false, fmt.Errorf("second init container should have failed: %#v", status) + } + return true, nil + default: + return false, fmt.Errorf("unexpected object: %#v", t) + } + }, + client.PodCompleted, + ) + Expect(err).To(BeNil()) + framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant) + endPod := event.Object.(*api.Pod) + + Expect(endPod.Status.Phase).To(Equal(api.PodFailed)) + _, init := api.GetPodCondition(&endPod.Status, api.PodInitialized) + Expect(init).NotTo(BeNil()) + Expect(init.Status).To(Equal(api.ConditionFalse)) + Expect(init.Reason).To(Equal("ContainersNotInitialized")) + Expect(init.Message).To(Equal("containers with incomplete status: [init2]")) + Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2)) + Expect(endPod.Status.ContainerStatuses[0].State.Waiting).ToNot(BeNil()) + }) + It("should be restarted with a docker exec \"cat /tmp/health\" liveness probe [Conformance]", func() { runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{ ObjectMeta: api.ObjectMeta{ From 1b6591312df88a1806d734d191ab84056a92c18f Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Fri, 8 Apr 2016 11:20:24 -0400 Subject: [PATCH 4/6] Update the scheduler to handle init containers --- .../scheduler/algorithm/predicates/error.go | 2 +- .../algorithm/predicates/predicates.go | 12 +++- .../algorithm/predicates/predicates_test.go | 71 ++++++++++++++++++- 3 files changed, 82 insertions(+), 3 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/predicates/error.go b/plugin/pkg/scheduler/algorithm/predicates/error.go index 9f6a0d1bb39..b95cc4eb2f7 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/error.go +++ b/plugin/pkg/scheduler/algorithm/predicates/error.go @@ -21,7 +21,7 @@ import "fmt" const ( podCountResourceName string = "PodCount" cpuResourceName string = "CPU" - memoryResoureceName string = "Memory" + memoryResourceName string = "Memory" nvidiaGpuResourceName string = "NvidiaGpu" ) diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 4b1c8c9b5d9..a01876c3fca 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -359,6 +359,16 @@ func getResourceRequest(pod *api.Pod) resourceRequest { result.milliCPU += requests.Cpu().MilliValue() result.nvidiaGPU += requests.NvidiaGPU().Value() } + // take max_resource(sum_pod, any_init_container) + for _, container := range pod.Spec.InitContainers { + requests := container.Resources.Requests + if mem := requests.Memory().Value(); mem > result.memory { + result.memory = mem + } + if cpu := requests.Cpu().MilliValue(); cpu > result.milliCPU { + result.milliCPU = cpu + } + } return result } @@ -428,7 +438,7 @@ func PodFitsResources(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, er } if totalMemory < podRequest.memory+nodeInfo.RequestedResource().Memory { return false, - newInsufficientResourceError(memoryResoureceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory) + newInsufficientResourceError(memoryResourceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory) } if totalNvidiaGPU < podRequest.nvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU { return false, diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go index a2e7e691c37..358a483909e 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -111,6 +111,11 @@ func newResourcePod(usage ...resourceRequest) *api.Pod { } } +func newResourceInitPod(pod *api.Pod, usage ...resourceRequest) *api.Pod { + pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers + return pod +} + func TestPodFitsResources(t *testing.T) { enoughPodsTests := []struct { pod *api.Pod @@ -135,6 +140,54 @@ func TestPodFitsResources(t *testing.T) { test: "too many resources fails", wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10), }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 8, memory: 19})), + fits: false, + test: "too many resources fails due to init container cpu", + wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10), + }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}, resourceRequest{milliCPU: 2, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 8, memory: 19})), + fits: false, + test: "too many resources fails due to highest init container cpu", + wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10), + }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), + fits: false, + test: "too many resources fails due to init container memory", + wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20), + }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}, resourceRequest{milliCPU: 1, memory: 2}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), + fits: false, + test: "too many resources fails due to highest init container memory", + wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20), + }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), + fits: true, + test: "init container fits because it's the max, not sum, of containers and init containers", + wErr: nil, + }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}, resourceRequest{milliCPU: 1, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), + fits: true, + test: "multiple init containers fit because it's the max, not sum, of containers and init containers", + wErr: nil, + }, { pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), nodeInfo: schedulercache.NewNodeInfo( @@ -149,7 +202,7 @@ func TestPodFitsResources(t *testing.T) { newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), fits: false, test: "one resources fits", - wErr: newInsufficientResourceError(memoryResoureceName, 2, 19, 20), + wErr: newInsufficientResourceError(memoryResourceName, 2, 19, 20), }, { pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), @@ -159,6 +212,14 @@ func TestPodFitsResources(t *testing.T) { test: "equal edge case", wErr: nil, }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 4, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), + fits: true, + test: "equal edge case for init container", + wErr: nil, + }, } for _, test := range enoughPodsTests { @@ -205,6 +266,14 @@ func TestPodFitsResources(t *testing.T) { test: "even for equal edge case predicate fails when there's no space for additional pod", wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1), }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), + fits: false, + test: "even for equal edge case predicate fails when there's no space for additional pod due to init container", + wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1), + }, } for _, test := range notEnoughPodsTests { node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1)}} From f2008152f4382add2bbc617e46390f9ea91084e9 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Fri, 8 Apr 2016 11:20:33 -0400 Subject: [PATCH 5/6] Update limit ranging to handle init containers --- plugin/pkg/admission/limitranger/admission.go | 99 ++++++++++++------- .../admission/limitranger/admission_test.go | 88 ++++++++++++++++- 2 files changed, 150 insertions(+), 37 deletions(-) diff --git a/plugin/pkg/admission/limitranger/admission.go b/plugin/pkg/admission/limitranger/admission.go index c8f3acd0cd0..6b6d8331fe4 100644 --- a/plugin/pkg/admission/limitranger/admission.go +++ b/plugin/pkg/admission/limitranger/admission.go @@ -208,45 +208,54 @@ func defaultContainerResourceRequirements(limitRange *api.LimitRange) api.Resour return requirements } +// mergeContainerResources handles defaulting all of the resources on a container. +func mergeContainerResources(container *api.Container, defaultRequirements *api.ResourceRequirements, annotationPrefix string, annotations []string) []string { + setRequests := []string{} + setLimits := []string{} + if container.Resources.Limits == nil { + container.Resources.Limits = api.ResourceList{} + } + if container.Resources.Requests == nil { + container.Resources.Requests = api.ResourceList{} + } + for k, v := range defaultRequirements.Limits { + _, found := container.Resources.Limits[k] + if !found { + container.Resources.Limits[k] = *v.Copy() + setLimits = append(setLimits, string(k)) + } + } + for k, v := range defaultRequirements.Requests { + _, found := container.Resources.Requests[k] + if !found { + container.Resources.Requests[k] = *v.Copy() + setRequests = append(setRequests, string(k)) + } + } + if len(setRequests) > 0 { + sort.Strings(setRequests) + a := strings.Join(setRequests, ", ") + fmt.Sprintf(" request for %s %s", annotationPrefix, container.Name) + annotations = append(annotations, a) + } + if len(setLimits) > 0 { + sort.Strings(setLimits) + a := strings.Join(setLimits, ", ") + fmt.Sprintf(" limit for %s %s", annotationPrefix, container.Name) + annotations = append(annotations, a) + } + return annotations +} + // mergePodResourceRequirements merges enumerated requirements with default requirements // it annotates the pod with information about what requirements were modified func mergePodResourceRequirements(pod *api.Pod, defaultRequirements *api.ResourceRequirements) { annotations := []string{} for i := range pod.Spec.Containers { - container := &pod.Spec.Containers[i] - setRequests := []string{} - setLimits := []string{} - if container.Resources.Limits == nil { - container.Resources.Limits = api.ResourceList{} - } - if container.Resources.Requests == nil { - container.Resources.Requests = api.ResourceList{} - } - for k, v := range defaultRequirements.Limits { - _, found := container.Resources.Limits[k] - if !found { - container.Resources.Limits[k] = *v.Copy() - setLimits = append(setLimits, string(k)) - } - } - for k, v := range defaultRequirements.Requests { - _, found := container.Resources.Requests[k] - if !found { - container.Resources.Requests[k] = *v.Copy() - setRequests = append(setRequests, string(k)) - } - } - if len(setRequests) > 0 { - sort.Strings(setRequests) - a := strings.Join(setRequests, ", ") + " request for container " + container.Name - annotations = append(annotations, a) - } - if len(setLimits) > 0 { - sort.Strings(setLimits) - a := strings.Join(setLimits, ", ") + " limit for container " + container.Name - annotations = append(annotations, a) - } + annotations = mergeContainerResources(&pod.Spec.Containers[i], defaultRequirements, "container", annotations) + } + + for i := range pod.Spec.InitContainers { + annotations = mergeContainerResources(&pod.Spec.InitContainers[i], defaultRequirements, "init container", annotations) } if len(annotations) > 0 { @@ -441,7 +450,7 @@ func PodLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { } } - // enforce pod limits + // enforce pod limits on init containers if limitType == api.LimitTypePod { containerRequests, containerLimits := []api.ResourceList{}, []api.ResourceList{} for j := range pod.Spec.Containers { @@ -451,6 +460,28 @@ func PodLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { } podRequests := sum(containerRequests) podLimits := sum(containerLimits) + for j := range pod.Spec.InitContainers { + container := &pod.Spec.InitContainers[j] + // take max(sum_containers, any_init_container) + for k, v := range container.Resources.Requests { + if v2, ok := podRequests[k]; ok { + if v.Cmp(v2) > 0 { + podRequests[k] = v + } + } else { + podRequests[k] = v + } + } + for k, v := range container.Resources.Limits { + if v2, ok := podLimits[k]; ok { + if v.Cmp(v2) > 0 { + podLimits[k] = v + } + } else { + podLimits[k] = v + } + } + } for k, v := range limit.Min { if err := minConstraint(limitType, k, v, podRequests, podLimits); err != nil { errs = append(errs, err) diff --git a/plugin/pkg/admission/limitranger/admission_test.go b/plugin/pkg/admission/limitranger/admission_test.go index 1b36f7342a1..c83edf9d701 100644 --- a/plugin/pkg/admission/limitranger/admission_test.go +++ b/plugin/pkg/admission/limitranger/admission_test.go @@ -134,6 +134,17 @@ func validPod(name string, numContainers int, resources api.ResourceRequirements return pod } +func validPodInit(pod api.Pod, resources ...api.ResourceRequirements) api.Pod { + for i := 0; i < len(resources); i++ { + pod.Spec.InitContainers = append(pod.Spec.InitContainers, api.Container{ + Image: "foo:V" + strconv.Itoa(i), + Resources: resources[i], + Name: "foo-" + strconv.Itoa(i), + }) + } + return pod +} + func TestDefaultContainerResourceRequirements(t *testing.T) { limitRange := validLimitRange() expected := api.ResourceRequirements{ @@ -183,7 +194,7 @@ func TestMergePodResourceRequirements(t *testing.T) { // pod with some resources enumerated should only merge empty input := getResourceRequirements(getResourceList("", "512Mi"), getResourceList("", "")) - pod = validPod("limit-memory", 1, input) + pod = validPodInit(validPod("limit-memory", 1, input), input) expected = api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceCPU: defaultRequirements.Requests[api.ResourceCPU], @@ -198,11 +209,18 @@ func TestMergePodResourceRequirements(t *testing.T) { t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, expected, actual) } } + for i := range pod.Spec.InitContainers { + actual := pod.Spec.InitContainers[i].Resources + if !api.Semantic.DeepEqual(expected, actual) { + t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, expected, actual) + } + } verifyAnnotation(t, &pod, "LimitRanger plugin set: cpu request for container foo-0; cpu, memory limit for container foo-0") // pod with all resources enumerated should not merge anything input = getResourceRequirements(getResourceList("100m", "512Mi"), getResourceList("200m", "1G")) - pod = validPod("limit-memory", 1, input) + initInputs := []api.ResourceRequirements{getResourceRequirements(getResourceList("200m", "1G"), getResourceList("400m", "2G"))} + pod = validPodInit(validPod("limit-memory", 1, input), initInputs...) expected = input mergePodResourceRequirements(&pod, &defaultRequirements) for i := range pod.Spec.Containers { @@ -211,6 +229,12 @@ func TestMergePodResourceRequirements(t *testing.T) { t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, expected, actual) } } + for i := range pod.Spec.InitContainers { + actual := pod.Spec.InitContainers[i].Resources + if !api.Semantic.DeepEqual(initInputs[i], actual) { + t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, initInputs[i], actual) + } + } expectNoAnnotation(t, &pod) } @@ -273,6 +297,20 @@ func TestPodLimitFunc(t *testing.T) { pod: validPod("pod-min-memory-request-limit", 2, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", "100Mi"))), limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, + { + pod: validPodInit( + validPod("pod-init-min-memory-request", 2, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", ""))), + getResourceRequirements(getResourceList("", "100Mi"), getResourceList("", "")), + ), + limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + }, + { + pod: validPodInit( + validPod("pod-init-min-memory-request-limit", 2, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", "100Mi"))), + getResourceRequirements(getResourceList("", "80Mi"), getResourceList("", "100Mi")), + ), + limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + }, { pod: validPod("pod-max-cpu-request-limit", 2, getResourceRequirements(getResourceList("500m", ""), getResourceList("1", ""))), limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), @@ -281,6 +319,22 @@ func TestPodLimitFunc(t *testing.T) { pod: validPod("pod-max-cpu-limit", 2, getResourceRequirements(getResourceList("", ""), getResourceList("1", ""))), limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, + { + pod: validPodInit( + validPod("pod-init-max-cpu-request-limit", 2, getResourceRequirements(getResourceList("500m", ""), getResourceList("1", ""))), + getResourceRequirements(getResourceList("1", ""), getResourceList("2", "")), + getResourceRequirements(getResourceList("1", ""), getResourceList("1", "")), + ), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + }, + { + pod: validPodInit( + validPod("pod-init-max-cpu-limit", 2, getResourceRequirements(getResourceList("", ""), getResourceList("1", ""))), + getResourceRequirements(getResourceList("", ""), getResourceList("2", "")), + getResourceRequirements(getResourceList("", ""), getResourceList("2", "")), + ), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + }, { pod: validPod("pod-max-mem-request-limit", 2, getResourceRequirements(getResourceList("", "250Mi"), getResourceList("", "500Mi"))), limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), @@ -387,6 +441,13 @@ func TestPodLimitFunc(t *testing.T) { pod: validPod("pod-max-mem-limit", 3, getResourceRequirements(getResourceList("", ""), getResourceList("", "500Mi"))), limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, + { + pod: validPodInit( + validPod("pod-init-max-mem-limit", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "500Mi"))), + getResourceRequirements(getResourceList("", ""), getResourceList("", "1.5Gi")), + ), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + }, { pod: validPod("pod-max-mem-ratio", 3, getResourceRequirements(getResourceList("", "250Mi"), getResourceList("", "500Mi"))), limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "2Gi"), api.ResourceList{}, api.ResourceList{}, getResourceList("", "1.5")), @@ -403,7 +464,7 @@ func TestPodLimitFunc(t *testing.T) { func TestPodLimitFuncApplyDefault(t *testing.T) { limitRange := validLimitRange() - testPod := validPod("foo", 1, getResourceRequirements(api.ResourceList{}, api.ResourceList{})) + testPod := validPodInit(validPod("foo", 1, getResourceRequirements(api.ResourceList{}, api.ResourceList{})), getResourceRequirements(api.ResourceList{}, api.ResourceList{})) err := PodLimitFunc(&limitRange, &testPod) if err != nil { t.Errorf("Unexpected error for valid pod: %v, %v", testPod.Name, err) @@ -429,6 +490,27 @@ func TestPodLimitFuncApplyDefault(t *testing.T) { t.Errorf("Unexpected cpu value %s", requestCpu) } } + + for i := range testPod.Spec.InitContainers { + container := testPod.Spec.InitContainers[i] + limitMemory := container.Resources.Limits.Memory().String() + limitCpu := container.Resources.Limits.Cpu().String() + requestMemory := container.Resources.Requests.Memory().String() + requestCpu := container.Resources.Requests.Cpu().String() + + if limitMemory != "10Mi" { + t.Errorf("Unexpected memory value %s", limitMemory) + } + if limitCpu != "75m" { + t.Errorf("Unexpected cpu value %s", limitCpu) + } + if requestMemory != "5Mi" { + t.Errorf("Unexpected memory value %s", requestMemory) + } + if requestCpu != "50m" { + t.Errorf("Unexpected cpu value %s", requestCpu) + } + } } func TestLimitRangerIgnoresSubresource(t *testing.T) { From 2a53330700ac39ee61109c748fe665cf38581a5d Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Mon, 2 May 2016 18:08:15 -0400 Subject: [PATCH 6/6] Describe and get should show init containers --- pkg/api/resource_helpers.go | 23 +++++++++++++ pkg/kubectl/describe.go | 18 ++++++++--- pkg/kubectl/describe_test.go | 2 +- pkg/kubectl/resource_printer.go | 57 +++++++++++++++++++++++++-------- 4 files changed, 81 insertions(+), 19 deletions(-) diff --git a/pkg/api/resource_helpers.go b/pkg/api/resource_helpers.go index af1189b4fb3..74bc1720c93 100644 --- a/pkg/api/resource_helpers.go +++ b/pkg/api/resource_helpers.go @@ -163,5 +163,28 @@ func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, li } } } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + for name, quantity := range container.Resources.Requests { + value, ok := reqs[name] + if !ok { + reqs[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + reqs[name] = *quantity.Copy() + } + } + for name, quantity := range container.Resources.Limits { + value, ok := limits[name] + if !ok { + limits[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + limits[name] = *quantity.Copy() + } + } + } return } diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index c0189f56041..2b11114abe6 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -524,7 +524,10 @@ func describePod(pod *api.Pod, events *api.EventList) (string, error) { } fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP) fmt.Fprintf(out, "Controllers:\t%s\n", printControllers(pod.Annotations)) - describeContainers(pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), out, "") + if len(pod.Spec.InitContainers) > 0 { + describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), out, "") + } + describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), out, "") if len(pod.Status.Conditions) > 0 { fmt.Fprint(out, "Conditions:\n Type\tStatus\n") for _, c := range pod.Status.Conditions { @@ -782,12 +785,16 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri } // TODO: Do a better job at indenting, maybe by using a prefix writer -func describeContainers(containers []api.Container, containerStatuses []api.ContainerStatus, resolverFn EnvVarResolverFunc, out io.Writer, space string) { +func describeContainers(label string, containers []api.Container, containerStatuses []api.ContainerStatus, resolverFn EnvVarResolverFunc, out io.Writer, space string) { statuses := map[string]api.ContainerStatus{} for _, status := range containerStatuses { statuses[status.Name] = status } - fmt.Fprintf(out, "%sContainers:\n", space) + if len(containers) == 0 { + fmt.Fprintf(out, "%s%s: \n", space, label) + } else { + fmt.Fprintf(out, "%s%s:\n", space, label) + } for _, container := range containers { status, ok := statuses[container.Name] nameIndent := "" @@ -1037,7 +1044,10 @@ func DescribePodTemplate(template *api.PodTemplateSpec, out io.Writer) { if len(template.Spec.ServiceAccountName) > 0 { fmt.Fprintf(out, " Service Account:\t%s\n", template.Spec.ServiceAccountName) } - describeContainers(template.Spec.Containers, nil, nil, out, " ") + if len(template.Spec.InitContainers) > 0 { + describeContainers("Init Containers", template.Spec.InitContainers, nil, nil, out, " ") + } + describeContainers("Containers", template.Spec.Containers, nil, nil, out, " ") describeVolumes(template.Spec.Volumes, out, " ") } diff --git a/pkg/kubectl/describe_test.go b/pkg/kubectl/describe_test.go index f073d3050cd..429216da92d 100644 --- a/pkg/kubectl/describe_test.go +++ b/pkg/kubectl/describe_test.go @@ -268,7 +268,7 @@ func TestDescribeContainers(t *testing.T) { ContainerStatuses: []api.ContainerStatus{testCase.status}, }, } - describeContainers(pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(&pod), out, "") + describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(&pod), out, "") output := out.String() for _, expected := range testCase.expectedElements { if !strings.Contains(output, expected) { diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index e29a952d27c..c0811806c60 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -594,22 +594,51 @@ func printPodBase(pod *api.Pod, w io.Writer, options PrintOptions) error { reason = pod.Status.Reason } - for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { - container := pod.Status.ContainerStatuses[i] - - restarts += int(container.RestartCount) - if container.State.Waiting != nil && container.State.Waiting.Reason != "" { - reason = container.State.Waiting.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { - reason = container.State.Terminated.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { - if container.State.Terminated.Signal != 0 { - reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) + initializing := false + for i := range pod.Status.InitContainerStatuses { + container := pod.Status.InitContainerStatuses[i] + switch { + case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0: + continue + case container.State.Terminated != nil: + // initialization is failed + if len(container.State.Terminated.Reason) == 0 { + if container.State.Terminated.Signal != 0 { + reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal) + } else { + reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode) + } } else { - reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) + reason = "Init:" + container.State.Terminated.Reason + } + initializing = true + case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing": + reason = "Init:" + container.State.Waiting.Reason + initializing = true + default: + reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers)) + initializing = true + } + break + } + if !initializing { + for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { + container := pod.Status.ContainerStatuses[i] + + restarts += int(container.RestartCount) + if container.State.Waiting != nil && container.State.Waiting.Reason != "" { + reason = container.State.Waiting.Reason + } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { + reason = container.State.Terminated.Reason + } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { + if container.State.Terminated.Signal != 0 { + reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) + } else { + reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) + } + } else if container.Ready && container.State.Running != nil { + readyContainers++ } - } else if container.Ready && container.State.Running != nil { - readyContainers++ } } if pod.DeletionTimestamp != nil {