mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 23:15:14 +00:00
Merge pull request #23567 from smarterclayton/init_containers
Automatic merge from submit-queue Add init containers to pods This implements #1589 as per proposal #23666 Incorporates feedback on #1589, creates parallel structure for InitContainers and Containers, adds validation for InitContainers that requires name uniqueness, and comments on a number of implications of init containers. This is a complete alpha implementation. <!-- Reviewable:start --> --- This change is [<img src="http://reviewable.k8s.io/review_button.svg" height="35" align="absmiddle" alt="Reviewable"/>](http://reviewable.k8s.io/reviews/kubernetes/kubernetes/23567) <!-- Reviewable:end -->
This commit is contained in:
commit
bf4f84167f
@ -11,6 +11,7 @@
|
|||||||
- [Other notable changes](#other-notable-changes)
|
- [Other notable changes](#other-notable-changes)
|
||||||
- [v1.3.0-alpha.3](#v130-alpha3)
|
- [v1.3.0-alpha.3](#v130-alpha3)
|
||||||
- [Downloads](#downloads)
|
- [Downloads](#downloads)
|
||||||
|
- [v1.3.0-alpha.3](#v130-alpha3)
|
||||||
- [Changes since v1.3.0-alpha.2](#changes-since-v130-alpha2)
|
- [Changes since v1.3.0-alpha.2](#changes-since-v130-alpha2)
|
||||||
- [Action Required](#action-required)
|
- [Action Required](#action-required)
|
||||||
- [Other notable changes](#other-notable-changes)
|
- [Other notable changes](#other-notable-changes)
|
||||||
|
@ -1854,7 +1854,7 @@
|
|||||||
"items": {
|
"items": {
|
||||||
"$ref": "v1.VolumeMount"
|
"$ref": "v1.VolumeMount"
|
||||||
},
|
},
|
||||||
"description": "Pod volumes to mount into the container's filesyste. Cannot be updated."
|
"description": "Pod volumes to mount into the container's filesystem. Cannot be updated."
|
||||||
},
|
},
|
||||||
"livenessProbe": {
|
"livenessProbe": {
|
||||||
"$ref": "v1.Probe",
|
"$ref": "v1.Probe",
|
||||||
|
@ -1859,7 +1859,7 @@
|
|||||||
"items": {
|
"items": {
|
||||||
"$ref": "v1.VolumeMount"
|
"$ref": "v1.VolumeMount"
|
||||||
},
|
},
|
||||||
"description": "Pod volumes to mount into the container's filesyste. Cannot be updated."
|
"description": "Pod volumes to mount into the container's filesystem. Cannot be updated."
|
||||||
},
|
},
|
||||||
"livenessProbe": {
|
"livenessProbe": {
|
||||||
"$ref": "v1.Probe",
|
"$ref": "v1.Probe",
|
||||||
|
@ -7166,7 +7166,7 @@
|
|||||||
"items": {
|
"items": {
|
||||||
"$ref": "v1.VolumeMount"
|
"$ref": "v1.VolumeMount"
|
||||||
},
|
},
|
||||||
"description": "Pod volumes to mount into the container's filesyste. Cannot be updated."
|
"description": "Pod volumes to mount into the container's filesystem. Cannot be updated."
|
||||||
},
|
},
|
||||||
"livenessProbe": {
|
"livenessProbe": {
|
||||||
"$ref": "v1.Probe",
|
"$ref": "v1.Probe",
|
||||||
|
@ -17177,7 +17177,7 @@
|
|||||||
"items": {
|
"items": {
|
||||||
"$ref": "v1.VolumeMount"
|
"$ref": "v1.VolumeMount"
|
||||||
},
|
},
|
||||||
"description": "Pod volumes to mount into the container's filesyste. Cannot be updated."
|
"description": "Pod volumes to mount into the container's filesystem. Cannot be updated."
|
||||||
},
|
},
|
||||||
"livenessProbe": {
|
"livenessProbe": {
|
||||||
"$ref": "v1.Probe",
|
"$ref": "v1.Probe",
|
||||||
|
@ -1375,7 +1375,7 @@ Examples:<br>
|
|||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">volumeMounts</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">volumeMounts</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Pod volumes to mount into the container’s filesyste. Cannot be updated.</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">Pod volumes to mount into the container’s filesystem. Cannot be updated.</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_volumemount">v1.VolumeMount</a> array</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_volumemount">v1.VolumeMount</a> array</p></td>
|
||||||
<td class="tableblock halign-left valign-top"></td>
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
@ -3979,7 +3979,7 @@ Populated by the system when a graceful deletion is requested. Read-only. More i
|
|||||||
</div>
|
</div>
|
||||||
<div id="footer">
|
<div id="footer">
|
||||||
<div id="footer-text">
|
<div id="footer-text">
|
||||||
Last updated 2016-05-06 04:28:34 UTC
|
Last updated 2016-05-06 14:18:34 UTC
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
|
@ -4706,7 +4706,7 @@ Both these may change in the future. Incoming requests are matched against the h
|
|||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">volumeMounts</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">volumeMounts</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Pod volumes to mount into the container’s filesyste. Cannot be updated.</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">Pod volumes to mount into the container’s filesystem. Cannot be updated.</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_volumemount">v1.VolumeMount</a> array</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_volumemount">v1.VolumeMount</a> array</p></td>
|
||||||
<td class="tableblock halign-left valign-top"></td>
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
|
@ -5440,7 +5440,7 @@ The resulting set of endpoints can be viewed as:<br>
|
|||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">volumeMounts</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">volumeMounts</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Pod volumes to mount into the container’s filesyste. Cannot be updated.</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">Pod volumes to mount into the container’s filesystem. Cannot be updated.</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_volumemount">v1.VolumeMount</a> array</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_volumemount">v1.VolumeMount</a> array</p></td>
|
||||||
<td class="tableblock halign-left valign-top"></td>
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
|
@ -2232,6 +2232,17 @@ func DeepCopy_api_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error
|
|||||||
} else {
|
} else {
|
||||||
out.Volumes = nil
|
out.Volumes = nil
|
||||||
}
|
}
|
||||||
|
if in.InitContainers != nil {
|
||||||
|
in, out := in.InitContainers, &out.InitContainers
|
||||||
|
*out = make([]Container, len(in))
|
||||||
|
for i := range in {
|
||||||
|
if err := DeepCopy_api_Container(in[i], &(*out)[i], c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.InitContainers = nil
|
||||||
|
}
|
||||||
if in.Containers != nil {
|
if in.Containers != nil {
|
||||||
in, out := in.Containers, &out.Containers
|
in, out := in.Containers, &out.Containers
|
||||||
*out = make([]Container, len(in))
|
*out = make([]Container, len(in))
|
||||||
@ -2321,6 +2332,17 @@ func DeepCopy_api_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner)
|
|||||||
} else {
|
} else {
|
||||||
out.StartTime = nil
|
out.StartTime = nil
|
||||||
}
|
}
|
||||||
|
if in.InitContainerStatuses != nil {
|
||||||
|
in, out := in.InitContainerStatuses, &out.InitContainerStatuses
|
||||||
|
*out = make([]ContainerStatus, len(in))
|
||||||
|
for i := range in {
|
||||||
|
if err := DeepCopy_api_ContainerStatus(in[i], &(*out)[i], c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.InitContainerStatuses = nil
|
||||||
|
}
|
||||||
if in.ContainerStatuses != nil {
|
if in.ContainerStatuses != nil {
|
||||||
in, out := in.ContainerStatuses, &out.ContainerStatuses
|
in, out := in.ContainerStatuses, &out.ContainerStatuses
|
||||||
*out = make([]ContainerStatus, len(in))
|
*out = make([]ContainerStatus, len(in))
|
||||||
|
@ -92,6 +92,8 @@ func GetPodReadyCondition(status PodStatus) *PodCondition {
|
|||||||
return condition
|
return condition
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPodCondition extracts the provided condition from the given status and returns that.
|
||||||
|
// Returns nil and -1 if the condition is not present, and the the index of the located condition.
|
||||||
func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) {
|
func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) {
|
||||||
for i, c := range status.Conditions {
|
for i, c := range status.Conditions {
|
||||||
if c.Type == conditionType {
|
if c.Type == conditionType {
|
||||||
@ -161,5 +163,28 @@ func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, li
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// init containers define the minimum of any resource
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
for name, quantity := range container.Resources.Requests {
|
||||||
|
value, ok := reqs[name]
|
||||||
|
if !ok {
|
||||||
|
reqs[name] = *quantity.Copy()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if quantity.Cmp(value) > 0 {
|
||||||
|
reqs[name] = *quantity.Copy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for name, quantity := range container.Resources.Limits {
|
||||||
|
value, ok := limits[name]
|
||||||
|
if !ok {
|
||||||
|
limits[name] = *quantity.Copy()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if quantity.Cmp(value) > 0 {
|
||||||
|
limits[name] = *quantity.Copy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -67,6 +67,9 @@ func TestUniversalDeserializer(t *testing.T) {
|
|||||||
func TestProtobufRoundTrip(t *testing.T) {
|
func TestProtobufRoundTrip(t *testing.T) {
|
||||||
obj := &v1.Pod{}
|
obj := &v1.Pod{}
|
||||||
apitesting.FuzzerFor(t, v1.SchemeGroupVersion, rand.NewSource(benchmarkSeed)).Fuzz(obj)
|
apitesting.FuzzerFor(t, v1.SchemeGroupVersion, rand.NewSource(benchmarkSeed)).Fuzz(obj)
|
||||||
|
// InitContainers are turned into annotations by conversion.
|
||||||
|
obj.Spec.InitContainers = nil
|
||||||
|
obj.Status.InitContainerStatuses = nil
|
||||||
data, err := obj.Marshal()
|
data, err := obj.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -77,7 +80,7 @@ func TestProtobufRoundTrip(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if !api.Semantic.Equalities.DeepEqual(out, obj) {
|
if !api.Semantic.Equalities.DeepEqual(out, obj) {
|
||||||
t.Logf("marshal\n%s", hex.Dump(data))
|
t.Logf("marshal\n%s", hex.Dump(data))
|
||||||
t.Fatalf("Unmarshal is unequal\n%s", diff.ObjectGoPrintSideBySide(out, obj))
|
t.Fatalf("Unmarshal is unequal\n%s", diff.ObjectGoPrintDiff(out, obj))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52540,7 +52540,7 @@ func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) {
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 576)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 624)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
@ -53849,7 +53849,7 @@ func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Deco
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 648)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 672)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
|
@ -1076,6 +1076,8 @@ const (
|
|||||||
// PodReady means the pod is able to service requests and should be added to the
|
// PodReady means the pod is able to service requests and should be added to the
|
||||||
// load balancing pools of all matching services.
|
// load balancing pools of all matching services.
|
||||||
PodReady PodConditionType = "Ready"
|
PodReady PodConditionType = "Ready"
|
||||||
|
// PodInitialized means that all init containers in the pod have started successfully.
|
||||||
|
PodInitialized PodConditionType = "Initialized"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PodCondition struct {
|
type PodCondition struct {
|
||||||
@ -1309,7 +1311,9 @@ type PreferredSchedulingTerm struct {
|
|||||||
// PodSpec is a description of a pod
|
// PodSpec is a description of a pod
|
||||||
type PodSpec struct {
|
type PodSpec struct {
|
||||||
Volumes []Volume `json:"volumes"`
|
Volumes []Volume `json:"volumes"`
|
||||||
// Required: there must be at least one container in a pod.
|
// List of initialization containers belonging to the pod.
|
||||||
|
InitContainers []Container `json:"-"`
|
||||||
|
// List of containers belonging to the pod.
|
||||||
Containers []Container `json:"containers"`
|
Containers []Container `json:"containers"`
|
||||||
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"`
|
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"`
|
||||||
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
|
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
|
||||||
@ -1416,6 +1420,11 @@ type PodStatus struct {
|
|||||||
// This is before the Kubelet pulled the container image(s) for the pod.
|
// This is before the Kubelet pulled the container image(s) for the pod.
|
||||||
StartTime *unversioned.Time `json:"startTime,omitempty"`
|
StartTime *unversioned.Time `json:"startTime,omitempty"`
|
||||||
|
|
||||||
|
// The list has one entry per init container in the manifest. The most recent successful
|
||||||
|
// init container will have ready = true, the most recently started container will have
|
||||||
|
// startTime set.
|
||||||
|
// More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses
|
||||||
|
InitContainerStatuses []ContainerStatus `json:"-"`
|
||||||
// The list has one entry per container in the manifest. Each entry is
|
// The list has one entry per container in the manifest. Each entry is
|
||||||
// currently the output of `docker inspect`. This output format is *not*
|
// currently the output of `docker inspect`. This output format is *not*
|
||||||
// final and should not be relied upon.
|
// final and should not be relied upon.
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
inf "gopkg.in/inf.v0"
|
inf "gopkg.in/inf.v0"
|
||||||
@ -258,6 +259,75 @@ func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *R
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error {
|
||||||
|
if err := autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.Status.InitContainerStatuses) > 0 {
|
||||||
|
if out.Annotations == nil {
|
||||||
|
out.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
value, err := json.Marshal(out.Status.InitContainerStatuses)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value)
|
||||||
|
} else {
|
||||||
|
delete(in.Annotations, PodInitContainerStatusesAnnotationKey)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error {
|
||||||
|
// TODO: when we move init container to beta, remove these conversions
|
||||||
|
if value := in.Annotations[PodInitContainerStatusesAnnotationKey]; len(value) > 0 {
|
||||||
|
delete(in.Annotations, PodInitContainerStatusesAnnotationKey)
|
||||||
|
var values []ContainerStatus
|
||||||
|
if err := json.Unmarshal([]byte(value), &values); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
in.Status.InitContainerStatuses = values
|
||||||
|
}
|
||||||
|
|
||||||
|
return autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error {
|
||||||
|
if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: when we move init container to beta, remove these conversions
|
||||||
|
if len(out.Spec.InitContainers) > 0 {
|
||||||
|
if out.Annotations == nil {
|
||||||
|
out.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
value, err := json.Marshal(out.Spec.InitContainers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out.Annotations[PodInitContainersAnnotationKey] = string(value)
|
||||||
|
} else {
|
||||||
|
delete(out.Annotations, PodInitContainersAnnotationKey)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error {
|
||||||
|
// TODO: when we move init container to beta, remove these conversions
|
||||||
|
if value := in.Annotations[PodInitContainersAnnotationKey]; len(value) > 0 {
|
||||||
|
delete(in.Annotations, PodInitContainersAnnotationKey)
|
||||||
|
var values []Container
|
||||||
|
if err := json.Unmarshal([]byte(value), &values); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
in.Spec.InitContainers = values
|
||||||
|
}
|
||||||
|
|
||||||
|
return autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
// The following two PodSpec conversions are done here to support ServiceAccount
|
// The following two PodSpec conversions are done here to support ServiceAccount
|
||||||
// as an alias for ServiceAccountName.
|
// as an alias for ServiceAccountName.
|
||||||
func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error {
|
func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error {
|
||||||
@ -271,6 +341,16 @@ func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversi
|
|||||||
} else {
|
} else {
|
||||||
out.Volumes = nil
|
out.Volumes = nil
|
||||||
}
|
}
|
||||||
|
if in.InitContainers != nil {
|
||||||
|
out.InitContainers = make([]Container, len(in.InitContainers))
|
||||||
|
for i := range in.InitContainers {
|
||||||
|
if err := Convert_api_Container_To_v1_Container(&in.InitContainers[i], &out.InitContainers[i], s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.InitContainers = nil
|
||||||
|
}
|
||||||
if in.Containers != nil {
|
if in.Containers != nil {
|
||||||
out.Containers = make([]Container, len(in.Containers))
|
out.Containers = make([]Container, len(in.Containers))
|
||||||
for i := range in.Containers {
|
for i := range in.Containers {
|
||||||
@ -346,6 +426,16 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversi
|
|||||||
} else {
|
} else {
|
||||||
out.Volumes = nil
|
out.Volumes = nil
|
||||||
}
|
}
|
||||||
|
if in.InitContainers != nil {
|
||||||
|
out.InitContainers = make([]api.Container, len(in.InitContainers))
|
||||||
|
for i := range in.InitContainers {
|
||||||
|
if err := Convert_v1_Container_To_api_Container(&in.InitContainers[i], &out.InitContainers[i], s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.InitContainers = nil
|
||||||
|
}
|
||||||
if in.Containers != nil {
|
if in.Containers != nil {
|
||||||
out.Containers = make([]api.Container, len(in.Containers))
|
out.Containers = make([]api.Container, len(in.Containers))
|
||||||
for i := range in.Containers {
|
for i := range in.Containers {
|
||||||
@ -419,6 +509,33 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error
|
|||||||
if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil {
|
if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: when we move init container to beta, remove these conversions
|
||||||
|
if len(out.Spec.InitContainers) > 0 {
|
||||||
|
if out.Annotations == nil {
|
||||||
|
out.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
value, err := json.Marshal(out.Spec.InitContainers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out.Annotations[PodInitContainersAnnotationKey] = string(value)
|
||||||
|
} else {
|
||||||
|
delete(out.Annotations, PodInitContainersAnnotationKey)
|
||||||
|
}
|
||||||
|
if len(out.Status.InitContainerStatuses) > 0 {
|
||||||
|
if out.Annotations == nil {
|
||||||
|
out.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
value, err := json.Marshal(out.Status.InitContainerStatuses)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value)
|
||||||
|
} else {
|
||||||
|
delete(in.Annotations, PodInitContainerStatusesAnnotationKey)
|
||||||
|
}
|
||||||
|
|
||||||
// We need to reset certain fields for mirror pods from pre-v1.1 kubelet
|
// We need to reset certain fields for mirror pods from pre-v1.1 kubelet
|
||||||
// (#15960).
|
// (#15960).
|
||||||
// TODO: Remove this code after we drop support for v1.0 kubelets.
|
// TODO: Remove this code after we drop support for v1.0 kubelets.
|
||||||
@ -434,6 +551,24 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error {
|
func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error {
|
||||||
|
// TODO: when we move init container to beta, remove these conversions
|
||||||
|
if value := in.Annotations[PodInitContainersAnnotationKey]; len(value) > 0 {
|
||||||
|
delete(in.Annotations, PodInitContainersAnnotationKey)
|
||||||
|
var values []Container
|
||||||
|
if err := json.Unmarshal([]byte(value), &values); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
in.Spec.InitContainers = values
|
||||||
|
}
|
||||||
|
if value := in.Annotations[PodInitContainerStatusesAnnotationKey]; len(value) > 0 {
|
||||||
|
delete(in.Annotations, PodInitContainerStatusesAnnotationKey)
|
||||||
|
var values []ContainerStatus
|
||||||
|
if err := json.Unmarshal([]byte(value), &values); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
in.Status.InitContainerStatuses = values
|
||||||
|
}
|
||||||
|
|
||||||
return autoConvert_v1_Pod_To_api_Pod(in, out, s)
|
return autoConvert_v1_Pod_To_api_Pod(in, out, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5012,6 +5012,17 @@ func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conv
|
|||||||
} else {
|
} else {
|
||||||
out.Volumes = nil
|
out.Volumes = nil
|
||||||
}
|
}
|
||||||
|
if in.InitContainers != nil {
|
||||||
|
in, out := &in.InitContainers, &out.InitContainers
|
||||||
|
*out = make([]Container, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.InitContainers = nil
|
||||||
|
}
|
||||||
if in.Containers != nil {
|
if in.Containers != nil {
|
||||||
in, out := &in.Containers, &out.Containers
|
in, out := &in.Containers, &out.Containers
|
||||||
*out = make([]Container, len(*in))
|
*out = make([]Container, len(*in))
|
||||||
@ -5101,6 +5112,17 @@ func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus
|
|||||||
} else {
|
} else {
|
||||||
out.StartTime = nil
|
out.StartTime = nil
|
||||||
}
|
}
|
||||||
|
if in.InitContainerStatuses != nil {
|
||||||
|
in, out := &in.InitContainerStatuses, &out.InitContainerStatuses
|
||||||
|
*out = make([]api.ContainerStatus, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
if err := Convert_v1_ContainerStatus_To_api_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.InitContainerStatuses = nil
|
||||||
|
}
|
||||||
if in.ContainerStatuses != nil {
|
if in.ContainerStatuses != nil {
|
||||||
in, out := &in.ContainerStatuses, &out.ContainerStatuses
|
in, out := &in.ContainerStatuses, &out.ContainerStatuses
|
||||||
*out = make([]api.ContainerStatus, len(*in))
|
*out = make([]api.ContainerStatus, len(*in))
|
||||||
@ -5145,6 +5167,17 @@ func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus
|
|||||||
} else {
|
} else {
|
||||||
out.StartTime = nil
|
out.StartTime = nil
|
||||||
}
|
}
|
||||||
|
if in.InitContainerStatuses != nil {
|
||||||
|
in, out := &in.InitContainerStatuses, &out.InitContainerStatuses
|
||||||
|
*out = make([]ContainerStatus, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
if err := Convert_api_ContainerStatus_To_v1_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.InitContainerStatuses = nil
|
||||||
|
}
|
||||||
if in.ContainerStatuses != nil {
|
if in.ContainerStatuses != nil {
|
||||||
in, out := &in.ContainerStatuses, &out.ContainerStatuses
|
in, out := &in.ContainerStatuses, &out.ContainerStatuses
|
||||||
*out = make([]ContainerStatus, len(*in))
|
*out = make([]ContainerStatus, len(*in))
|
||||||
@ -5176,10 +5209,6 @@ func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error {
|
func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error {
|
||||||
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
|
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -5193,10 +5222,6 @@ func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResu
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error {
|
func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error {
|
||||||
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
|
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -5291,10 +5316,6 @@ func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error {
|
func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error {
|
||||||
if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
|
if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -5305,10 +5326,6 @@ func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSp
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error {
|
func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error {
|
||||||
if in.UID != nil {
|
if in.UID != nil {
|
||||||
in, out := &in.UID, &out.UID
|
in, out := &in.UID, &out.UID
|
||||||
|
@ -2176,6 +2176,17 @@ func DeepCopy_v1_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error {
|
|||||||
} else {
|
} else {
|
||||||
out.Volumes = nil
|
out.Volumes = nil
|
||||||
}
|
}
|
||||||
|
if in.InitContainers != nil {
|
||||||
|
in, out := in.InitContainers, &out.InitContainers
|
||||||
|
*out = make([]Container, len(in))
|
||||||
|
for i := range in {
|
||||||
|
if err := DeepCopy_v1_Container(in[i], &(*out)[i], c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.InitContainers = nil
|
||||||
|
}
|
||||||
if in.Containers != nil {
|
if in.Containers != nil {
|
||||||
in, out := in.Containers, &out.Containers
|
in, out := in.Containers, &out.Containers
|
||||||
*out = make([]Container, len(in))
|
*out = make([]Container, len(in))
|
||||||
@ -2269,6 +2280,17 @@ func DeepCopy_v1_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) e
|
|||||||
} else {
|
} else {
|
||||||
out.StartTime = nil
|
out.StartTime = nil
|
||||||
}
|
}
|
||||||
|
if in.InitContainerStatuses != nil {
|
||||||
|
in, out := in.InitContainerStatuses, &out.InitContainerStatuses
|
||||||
|
*out = make([]ContainerStatus, len(in))
|
||||||
|
for i := range in {
|
||||||
|
if err := DeepCopy_v1_ContainerStatus(in[i], &(*out)[i], c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.InitContainerStatuses = nil
|
||||||
|
}
|
||||||
if in.ContainerStatuses != nil {
|
if in.ContainerStatuses != nil {
|
||||||
in, out := in.ContainerStatuses, &out.ContainerStatuses
|
in, out := in.ContainerStatuses, &out.ContainerStatuses
|
||||||
*out = make([]ContainerStatus, len(in))
|
*out = make([]ContainerStatus, len(in))
|
||||||
|
@ -295,7 +295,7 @@ message Container {
|
|||||||
// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources
|
// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources
|
||||||
optional ResourceRequirements resources = 8;
|
optional ResourceRequirements resources = 8;
|
||||||
|
|
||||||
// Pod volumes to mount into the container's filesyste.
|
// Pod volumes to mount into the container's filesystem.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
repeated VolumeMount volumeMounts = 9;
|
repeated VolumeMount volumeMounts = 9;
|
||||||
|
|
||||||
|
@ -53783,7 +53783,7 @@ func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) {
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 600)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 648)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
@ -53902,7 +53902,7 @@ func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Deco
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 672)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
|
@ -1106,7 +1106,7 @@ type Container struct {
|
|||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources
|
// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources
|
||||||
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
|
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
|
||||||
// Pod volumes to mount into the container's filesyste.
|
// Pod volumes to mount into the container's filesystem.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeMounts"`
|
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeMounts"`
|
||||||
// Periodic probe of container liveness.
|
// Periodic probe of container liveness.
|
||||||
@ -1541,11 +1541,36 @@ type PreferredSchedulingTerm struct {
|
|||||||
Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
|
Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// This annotation key will be used to contain an array of v1 JSON encoded Containers
|
||||||
|
// for init containers. The annotation will be placed into the internal type and cleared.
|
||||||
|
PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers"
|
||||||
|
// This annotation key will be used to contain an array of v1 JSON encoded
|
||||||
|
// ContainerStatuses for init containers. The annotation will be placed into the internal
|
||||||
|
// type and cleared.
|
||||||
|
PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses"
|
||||||
|
)
|
||||||
|
|
||||||
// PodSpec is a description of a pod.
|
// PodSpec is a description of a pod.
|
||||||
type PodSpec struct {
|
type PodSpec struct {
|
||||||
// List of volumes that can be mounted by containers belonging to the pod.
|
// List of volumes that can be mounted by containers belonging to the pod.
|
||||||
// More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md
|
// More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md
|
||||||
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
|
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
|
||||||
|
// List of initialization containers belonging to the pod.
|
||||||
|
// Init containers are executed in order prior to containers being started. If any
|
||||||
|
// init container fails, the pod is considered to have failed and is handled according
|
||||||
|
// to its restartPolicy. The name for an init container or normal container must be
|
||||||
|
// unique among all containers.
|
||||||
|
// Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
|
||||||
|
// The resourceRequirements of an init container are taken into account during scheduling
|
||||||
|
// by finding the highest request/limit for each resource type, and then using the max of
|
||||||
|
// of that value or the sum of the normal containers. Limits are applied to init containers
|
||||||
|
// in a similar fashion.
|
||||||
|
// Init containers cannot currently be added or removed.
|
||||||
|
// Init containers are in alpha state and may change without notice.
|
||||||
|
// Cannot be updated.
|
||||||
|
// More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md
|
||||||
|
InitContainers []Container `json:"-" patchStrategy:"merge" patchMergeKey:"name"`
|
||||||
// List of containers belonging to the pod.
|
// List of containers belonging to the pod.
|
||||||
// Containers cannot currently be added or removed.
|
// Containers cannot currently be added or removed.
|
||||||
// There must be at least one container in a Pod.
|
// There must be at least one container in a Pod.
|
||||||
@ -1679,6 +1704,12 @@ type PodStatus struct {
|
|||||||
// This is before the Kubelet pulled the container image(s) for the pod.
|
// This is before the Kubelet pulled the container image(s) for the pod.
|
||||||
StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
|
StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
|
||||||
|
|
||||||
|
// The list has one entry per init container in the manifest. The most recent successful
|
||||||
|
// init container will have ready = true, the most recently started container will have
|
||||||
|
// startTime set.
|
||||||
|
// Init containers are in alpha state and may change without notice.
|
||||||
|
// More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses
|
||||||
|
InitContainerStatuses []ContainerStatus `json:"-"`
|
||||||
// The list has one entry per container in the manifest. Each entry is currently the output
|
// The list has one entry per container in the manifest. Each entry is currently the output
|
||||||
// of `docker inspect`.
|
// of `docker inspect`.
|
||||||
// More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses
|
// More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses
|
||||||
|
@ -186,7 +186,7 @@ var map_Container = map[string]string{
|
|||||||
"ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
|
"ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
|
||||||
"env": "List of environment variables to set in the container. Cannot be updated.",
|
"env": "List of environment variables to set in the container. Cannot be updated.",
|
||||||
"resources": "Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources",
|
"resources": "Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources",
|
||||||
"volumeMounts": "Pod volumes to mount into the container's filesyste. Cannot be updated.",
|
"volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
|
||||||
"livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
|
"livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
|
||||||
"readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
|
"readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
|
||||||
"lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
|
"lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
|
||||||
|
@ -1326,6 +1326,37 @@ func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorL
|
|||||||
return allErrors
|
return allErrors
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList {
|
||||||
|
var allErrs field.ErrorList
|
||||||
|
if len(containers) > 0 {
|
||||||
|
allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
allNames := sets.String{}
|
||||||
|
for _, ctr := range otherContainers {
|
||||||
|
allNames.Insert(ctr.Name)
|
||||||
|
}
|
||||||
|
for i, ctr := range containers {
|
||||||
|
idxPath := fldPath.Index(i)
|
||||||
|
if allNames.Has(ctr.Name) {
|
||||||
|
allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name))
|
||||||
|
}
|
||||||
|
if len(ctr.Name) > 0 {
|
||||||
|
allNames.Insert(ctr.Name)
|
||||||
|
}
|
||||||
|
if ctr.Lifecycle != nil {
|
||||||
|
allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers"))
|
||||||
|
}
|
||||||
|
if ctr.LivenessProbe != nil {
|
||||||
|
allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers"))
|
||||||
|
}
|
||||||
|
if ctr.ReadinessProbe != nil {
|
||||||
|
allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList {
|
func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList {
|
||||||
allErrs := field.ErrorList{}
|
allErrs := field.ErrorList{}
|
||||||
|
|
||||||
@ -1451,6 +1482,7 @@ func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList {
|
|||||||
allVolumes, vErrs := validateVolumes(spec.Volumes, fldPath.Child("volumes"))
|
allVolumes, vErrs := validateVolumes(spec.Volumes, fldPath.Child("volumes"))
|
||||||
allErrs = append(allErrs, vErrs...)
|
allErrs = append(allErrs, vErrs...)
|
||||||
allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...)
|
allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...)
|
||||||
|
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...)
|
||||||
allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
|
allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
|
||||||
allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
|
allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
|
||||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
|
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||||
|
@ -1555,7 +1555,7 @@ func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) {
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 720)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
|
@ -1585,7 +1585,7 @@ func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) {
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
|
@ -4205,7 +4205,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) {
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
@ -4443,7 +4443,7 @@ func (x codecSelfer1234) decSliceScheduledJob(v *[]ScheduledJob, d *codec1978.De
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 976)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1000)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
|
@ -217,8 +217,7 @@ func autoConvert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s
|
|||||||
} else {
|
} else {
|
||||||
out.ManualSelector = nil
|
out.ManualSelector = nil
|
||||||
}
|
}
|
||||||
// TODO: Inefficient conversion - can we improve it?
|
if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
|
||||||
if err := s.Convert(&in.Template, &out.Template, 0); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -262,8 +261,7 @@ func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s
|
|||||||
} else {
|
} else {
|
||||||
out.ManualSelector = nil
|
out.ManualSelector = nil
|
||||||
}
|
}
|
||||||
// TODO: Inefficient conversion - can we improve it?
|
if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
|
||||||
if err := s.Convert(&in.Template, &out.Template, 0); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -2867,7 +2867,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) {
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
|
@ -223,8 +223,7 @@ func autoConvert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSp
|
|||||||
} else {
|
} else {
|
||||||
out.ManualSelector = nil
|
out.ManualSelector = nil
|
||||||
}
|
}
|
||||||
// TODO: Inefficient conversion - can we improve it?
|
if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
|
||||||
if err := s.Convert(&in.Template, &out.Template, 0); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -268,8 +267,7 @@ func autoConvert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSp
|
|||||||
} else {
|
} else {
|
||||||
out.ManualSelector = nil
|
out.ManualSelector = nil
|
||||||
}
|
}
|
||||||
// TODO: Inefficient conversion - can we improve it?
|
if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
|
||||||
if err := s.Convert(&in.Template, &out.Template, 0); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -4739,7 +4739,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) {
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
|
@ -14325,7 +14325,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
@ -14444,7 +14444,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder)
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 672)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
@ -15158,7 +15158,7 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 680)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 704)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
|
@ -401,8 +401,7 @@ func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSet
|
|||||||
} else {
|
} else {
|
||||||
out.Selector = nil
|
out.Selector = nil
|
||||||
}
|
}
|
||||||
// TODO: Inefficient conversion - can we improve it?
|
if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
|
||||||
if err := s.Convert(&in.Template, &out.Template, 0); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -422,8 +421,7 @@ func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extension
|
|||||||
} else {
|
} else {
|
||||||
out.Selector = nil
|
out.Selector = nil
|
||||||
}
|
}
|
||||||
// TODO: Inefficient conversion - can we improve it?
|
if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
|
||||||
if err := s.Convert(&in.Template, &out.Template, 0); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -19988,7 +19988,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
@ -20107,7 +20107,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder)
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 720)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
@ -20345,7 +20345,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) {
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
@ -21178,7 +21178,7 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode
|
|||||||
|
|
||||||
yyrg1 := len(yyv1) > 0
|
yyrg1 := len(yyv1) > 0
|
||||||
yyv21 := yyv1
|
yyv21 := yyv1
|
||||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 704)
|
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 728)
|
||||||
if yyrt1 {
|
if yyrt1 {
|
||||||
if yyrl1 <= cap(yyv1) {
|
if yyrl1 <= cap(yyv1) {
|
||||||
yyv1 = yyv1[:yyrl1]
|
yyv1 = yyv1[:yyrl1]
|
||||||
|
@ -524,7 +524,10 @@ func describePod(pod *api.Pod, events *api.EventList) (string, error) {
|
|||||||
}
|
}
|
||||||
fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP)
|
fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP)
|
||||||
fmt.Fprintf(out, "Controllers:\t%s\n", printControllers(pod.Annotations))
|
fmt.Fprintf(out, "Controllers:\t%s\n", printControllers(pod.Annotations))
|
||||||
describeContainers(pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), out, "")
|
if len(pod.Spec.InitContainers) > 0 {
|
||||||
|
describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), out, "")
|
||||||
|
}
|
||||||
|
describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), out, "")
|
||||||
if len(pod.Status.Conditions) > 0 {
|
if len(pod.Status.Conditions) > 0 {
|
||||||
fmt.Fprint(out, "Conditions:\n Type\tStatus\n")
|
fmt.Fprint(out, "Conditions:\n Type\tStatus\n")
|
||||||
for _, c := range pod.Status.Conditions {
|
for _, c := range pod.Status.Conditions {
|
||||||
@ -782,12 +785,16 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Do a better job at indenting, maybe by using a prefix writer
|
// TODO: Do a better job at indenting, maybe by using a prefix writer
|
||||||
func describeContainers(containers []api.Container, containerStatuses []api.ContainerStatus, resolverFn EnvVarResolverFunc, out io.Writer, space string) {
|
func describeContainers(label string, containers []api.Container, containerStatuses []api.ContainerStatus, resolverFn EnvVarResolverFunc, out io.Writer, space string) {
|
||||||
statuses := map[string]api.ContainerStatus{}
|
statuses := map[string]api.ContainerStatus{}
|
||||||
for _, status := range containerStatuses {
|
for _, status := range containerStatuses {
|
||||||
statuses[status.Name] = status
|
statuses[status.Name] = status
|
||||||
}
|
}
|
||||||
fmt.Fprintf(out, "%sContainers:\n", space)
|
if len(containers) == 0 {
|
||||||
|
fmt.Fprintf(out, "%s%s: <none>\n", space, label)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(out, "%s%s:\n", space, label)
|
||||||
|
}
|
||||||
for _, container := range containers {
|
for _, container := range containers {
|
||||||
status, ok := statuses[container.Name]
|
status, ok := statuses[container.Name]
|
||||||
nameIndent := ""
|
nameIndent := ""
|
||||||
@ -1037,7 +1044,10 @@ func DescribePodTemplate(template *api.PodTemplateSpec, out io.Writer) {
|
|||||||
if len(template.Spec.ServiceAccountName) > 0 {
|
if len(template.Spec.ServiceAccountName) > 0 {
|
||||||
fmt.Fprintf(out, " Service Account:\t%s\n", template.Spec.ServiceAccountName)
|
fmt.Fprintf(out, " Service Account:\t%s\n", template.Spec.ServiceAccountName)
|
||||||
}
|
}
|
||||||
describeContainers(template.Spec.Containers, nil, nil, out, " ")
|
if len(template.Spec.InitContainers) > 0 {
|
||||||
|
describeContainers("Init Containers", template.Spec.InitContainers, nil, nil, out, " ")
|
||||||
|
}
|
||||||
|
describeContainers("Containers", template.Spec.Containers, nil, nil, out, " ")
|
||||||
describeVolumes(template.Spec.Volumes, out, " ")
|
describeVolumes(template.Spec.Volumes, out, " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -268,7 +268,7 @@ func TestDescribeContainers(t *testing.T) {
|
|||||||
ContainerStatuses: []api.ContainerStatus{testCase.status},
|
ContainerStatuses: []api.ContainerStatus{testCase.status},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
describeContainers(pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(&pod), out, "")
|
describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(&pod), out, "")
|
||||||
output := out.String()
|
output := out.String()
|
||||||
for _, expected := range testCase.expectedElements {
|
for _, expected := range testCase.expectedElements {
|
||||||
if !strings.Contains(output, expected) {
|
if !strings.Contains(output, expected) {
|
||||||
|
@ -594,22 +594,51 @@ func printPodBase(pod *api.Pod, w io.Writer, options PrintOptions) error {
|
|||||||
reason = pod.Status.Reason
|
reason = pod.Status.Reason
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
initializing := false
|
||||||
container := pod.Status.ContainerStatuses[i]
|
for i := range pod.Status.InitContainerStatuses {
|
||||||
|
container := pod.Status.InitContainerStatuses[i]
|
||||||
restarts += int(container.RestartCount)
|
switch {
|
||||||
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
|
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
|
||||||
reason = container.State.Waiting.Reason
|
continue
|
||||||
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
|
case container.State.Terminated != nil:
|
||||||
reason = container.State.Terminated.Reason
|
// initialization is failed
|
||||||
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
|
if len(container.State.Terminated.Reason) == 0 {
|
||||||
if container.State.Terminated.Signal != 0 {
|
if container.State.Terminated.Signal != 0 {
|
||||||
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
|
reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
|
||||||
|
} else {
|
||||||
|
reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
|
reason = "Init:" + container.State.Terminated.Reason
|
||||||
|
}
|
||||||
|
initializing = true
|
||||||
|
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
|
||||||
|
reason = "Init:" + container.State.Waiting.Reason
|
||||||
|
initializing = true
|
||||||
|
default:
|
||||||
|
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
|
||||||
|
initializing = true
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if !initializing {
|
||||||
|
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
||||||
|
container := pod.Status.ContainerStatuses[i]
|
||||||
|
|
||||||
|
restarts += int(container.RestartCount)
|
||||||
|
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
|
||||||
|
reason = container.State.Waiting.Reason
|
||||||
|
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
|
||||||
|
reason = container.State.Terminated.Reason
|
||||||
|
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
|
||||||
|
if container.State.Terminated.Signal != 0 {
|
||||||
|
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
|
||||||
|
} else {
|
||||||
|
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
|
||||||
|
}
|
||||||
|
} else if container.Ready && container.State.Running != nil {
|
||||||
|
readyContainers++
|
||||||
}
|
}
|
||||||
} else if container.Ready && container.State.Running != nil {
|
|
||||||
readyContainers++
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if pod.DeletionTimestamp != nil {
|
if pod.DeletionTimestamp != nil {
|
||||||
|
@ -57,5 +57,15 @@ func fieldPath(pod *api.Pod, container *api.Container) (string, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for i := range pod.Spec.InitContainers {
|
||||||
|
here := &pod.Spec.InitContainers[i]
|
||||||
|
if here.Name == container.Name {
|
||||||
|
if here.Name == "" {
|
||||||
|
return fmt.Sprintf("spec.initContainers[%d]", i), nil
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("spec.initContainers{%s}", here.Name), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return "", fmt.Errorf("container %#v not found in pod %#v", container, pod)
|
return "", fmt.Errorf("container %#v not found in pod %#v", container, pod)
|
||||||
}
|
}
|
||||||
|
@ -50,9 +50,10 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrRunContainer = errors.New("RunContainerError")
|
ErrRunContainer = errors.New("RunContainerError")
|
||||||
ErrKillContainer = errors.New("KillContainerError")
|
ErrKillContainer = errors.New("KillContainerError")
|
||||||
ErrVerifyNonRoot = errors.New("VerifyNonRootError")
|
ErrVerifyNonRoot = errors.New("VerifyNonRootError")
|
||||||
|
ErrRunInitContainer = errors.New("RunInitContainerError")
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -69,6 +70,7 @@ const (
|
|||||||
KillContainer SyncAction = "KillContainer"
|
KillContainer SyncAction = "KillContainer"
|
||||||
SetupNetwork SyncAction = "SetupNetwork"
|
SetupNetwork SyncAction = "SetupNetwork"
|
||||||
TeardownNetwork SyncAction = "TeardownNetwork"
|
TeardownNetwork SyncAction = "TeardownNetwork"
|
||||||
|
InitContainer SyncAction = "InitContainer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SyncResult is the result of sync action.
|
// SyncResult is the result of sync action.
|
||||||
|
@ -37,6 +37,7 @@ import (
|
|||||||
dockernat "github.com/docker/go-connections/nat"
|
dockernat "github.com/docker/go-connections/nat"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
@ -57,6 +58,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/util/oom"
|
"k8s.io/kubernetes/pkg/util/oom"
|
||||||
"k8s.io/kubernetes/pkg/util/procfs"
|
"k8s.io/kubernetes/pkg/util/procfs"
|
||||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||||
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -876,6 +878,9 @@ func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContaine
|
|||||||
} else if dm.networkPlugin.Name() != "cni" && dm.networkPlugin.Name() != "kubenet" {
|
} else if dm.networkPlugin.Name() != "cni" && dm.networkPlugin.Name() != "kubenet" {
|
||||||
// Docker only exports ports from the pod infra container. Let's
|
// Docker only exports ports from the pod infra container. Let's
|
||||||
// collect all of the relevant ports and export them.
|
// collect all of the relevant ports and export them.
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
ports = append(ports, container.Ports...)
|
||||||
|
}
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
ports = append(ports, container.Ports...)
|
ports = append(ports, container.Ports...)
|
||||||
}
|
}
|
||||||
@ -1179,6 +1184,14 @@ func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecont
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if containerSpec == nil {
|
||||||
|
for i, c := range pod.Spec.InitContainers {
|
||||||
|
if c.Name == container.Name {
|
||||||
|
containerSpec = &pod.Spec.InitContainers[i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Handle this without signaling the pod infra container to
|
// TODO: Handle this without signaling the pod infra container to
|
||||||
@ -1369,6 +1382,14 @@ func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *api.Pod
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if container == nil {
|
||||||
|
for ix := range pod.Spec.InitContainers {
|
||||||
|
if pod.Spec.InitContainers[ix].Name == name {
|
||||||
|
container = &pod.Spec.InitContainers[ix]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if container == nil {
|
if container == nil {
|
||||||
err = fmt.Errorf("unable to find container %s in pod %v", name, pod)
|
err = fmt.Errorf("unable to find container %s in pod %v", name, pod)
|
||||||
}
|
}
|
||||||
@ -1425,6 +1446,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Can't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err)
|
glog.Errorf("Can't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err)
|
||||||
}
|
}
|
||||||
|
glog.Infof("Generating ref for container %s: %#v", container.Name, ref)
|
||||||
|
|
||||||
opts, err := dm.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
|
opts, err := dm.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1603,6 +1625,9 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do
|
|||||||
} else {
|
} else {
|
||||||
// Docker only exports ports from the pod infra container. Let's
|
// Docker only exports ports from the pod infra container. Let's
|
||||||
// collect all of the relevant ports and export them.
|
// collect all of the relevant ports and export them.
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
ports = append(ports, container.Ports...)
|
||||||
|
}
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
ports = append(ports, container.Ports...)
|
ports = append(ports, container.Ports...)
|
||||||
}
|
}
|
||||||
@ -1640,13 +1665,16 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do
|
|||||||
// should be kept running. If startInfraContainer is false then it contains an entry for infraContainerId (mapped to -1).
|
// should be kept running. If startInfraContainer is false then it contains an entry for infraContainerId (mapped to -1).
|
||||||
// It shouldn't be the case where containersToStart is empty and containersToKeep contains only infraContainerId. In such case
|
// It shouldn't be the case where containersToStart is empty and containersToKeep contains only infraContainerId. In such case
|
||||||
// Infra Container should be killed, hence it's removed from this map.
|
// Infra Container should be killed, hence it's removed from this map.
|
||||||
// - all running containers which are NOT contained in containersToKeep should be killed.
|
// - all init containers are stored in initContainersToKeep
|
||||||
|
// - all running containers which are NOT contained in containersToKeep and initContainersToKeep should be killed.
|
||||||
type podContainerChangesSpec struct {
|
type podContainerChangesSpec struct {
|
||||||
StartInfraContainer bool
|
StartInfraContainer bool
|
||||||
InfraChanged bool
|
InfraChanged bool
|
||||||
InfraContainerId kubecontainer.DockerID
|
InfraContainerId kubecontainer.DockerID
|
||||||
ContainersToStart map[int]string
|
InitFailed bool
|
||||||
ContainersToKeep map[kubecontainer.DockerID]int
|
InitContainersToKeep map[kubecontainer.DockerID]int
|
||||||
|
ContainersToStart map[int]string
|
||||||
|
ContainersToKeep map[kubecontainer.DockerID]int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) (podContainerChangesSpec, error) {
|
func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) (podContainerChangesSpec, error) {
|
||||||
@ -1683,6 +1711,35 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
|
|||||||
containersToKeep[podInfraContainerID] = -1
|
containersToKeep[podInfraContainerID] = -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check the status of the init containers
|
||||||
|
initFailed := false
|
||||||
|
initContainersToKeep := make(map[kubecontainer.DockerID]int)
|
||||||
|
// always reset the init containers if the pod is reset
|
||||||
|
if !createPodInfraContainer {
|
||||||
|
// keep all successfully completed containers up to and including the first failing container
|
||||||
|
Containers:
|
||||||
|
for i, container := range pod.Spec.InitContainers {
|
||||||
|
containerStatus := podStatus.FindContainerStatusByName(container.Name)
|
||||||
|
if containerStatus == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case containerStatus == nil:
|
||||||
|
continue
|
||||||
|
case containerStatus.State == kubecontainer.ContainerStateRunning:
|
||||||
|
initContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] = i
|
||||||
|
case containerStatus.State == kubecontainer.ContainerStateExited:
|
||||||
|
initContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] = i
|
||||||
|
// TODO: should we abstract the "did the init container fail" check?
|
||||||
|
if containerStatus.ExitCode != 0 {
|
||||||
|
initFailed = true
|
||||||
|
break Containers
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the status of the containers
|
||||||
for index, container := range pod.Spec.Containers {
|
for index, container := range pod.Spec.Containers {
|
||||||
expectedHash := kubecontainer.HashContainer(&container)
|
expectedHash := kubecontainer.HashContainer(&container)
|
||||||
|
|
||||||
@ -1716,6 +1773,19 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if initFailed {
|
||||||
|
// initialization failed and Container exists
|
||||||
|
// If we have an initialization failure everything will be killed anyway
|
||||||
|
// If RestartPolicy is Always or OnFailure we restart containers that were running before we
|
||||||
|
// killed them when re-running initialization
|
||||||
|
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
|
||||||
|
message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name)
|
||||||
|
glog.V(1).Info(message)
|
||||||
|
containersToStart[index] = message
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// At this point, the container is running and pod infra container is good.
|
// At this point, the container is running and pod infra container is good.
|
||||||
// We will look for changes and check healthiness for the container.
|
// We will look for changes and check healthiness for the container.
|
||||||
containerChanged := hash != 0 && hash != expectedHash
|
containerChanged := hash != 0 && hash != expectedHash
|
||||||
@ -1743,17 +1813,21 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
|
|||||||
// (In fact, when createPodInfraContainer is false, containersToKeep will not be touched).
|
// (In fact, when createPodInfraContainer is false, containersToKeep will not be touched).
|
||||||
// - createPodInfraContainer is false and containersToKeep contains at least ID of Infra Container
|
// - createPodInfraContainer is false and containersToKeep contains at least ID of Infra Container
|
||||||
|
|
||||||
// If Infra container is the last running one, we don't want to keep it.
|
// If Infra container is the last running one, we don't want to keep it, and we don't want to
|
||||||
|
// keep any init containers.
|
||||||
if !createPodInfraContainer && len(containersToStart) == 0 && len(containersToKeep) == 1 {
|
if !createPodInfraContainer && len(containersToStart) == 0 && len(containersToKeep) == 1 {
|
||||||
containersToKeep = make(map[kubecontainer.DockerID]int)
|
containersToKeep = make(map[kubecontainer.DockerID]int)
|
||||||
|
initContainersToKeep = make(map[kubecontainer.DockerID]int)
|
||||||
}
|
}
|
||||||
|
|
||||||
return podContainerChangesSpec{
|
return podContainerChangesSpec{
|
||||||
StartInfraContainer: createPodInfraContainer,
|
StartInfraContainer: createPodInfraContainer,
|
||||||
InfraChanged: changed,
|
InfraChanged: changed,
|
||||||
InfraContainerId: podInfraContainerID,
|
InfraContainerId: podInfraContainerID,
|
||||||
ContainersToStart: containersToStart,
|
InitFailed: initFailed,
|
||||||
ContainersToKeep: containersToKeep,
|
InitContainersToKeep: initContainersToKeep,
|
||||||
|
ContainersToStart: containersToStart,
|
||||||
|
ContainersToKeep: containersToKeep,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1797,7 +1871,8 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
|
|||||||
runningContainerStatues := podStatus.GetRunningContainerStatuses()
|
runningContainerStatues := podStatus.GetRunningContainerStatuses()
|
||||||
for _, containerStatus := range runningContainerStatues {
|
for _, containerStatus := range runningContainerStatues {
|
||||||
_, keep := containerChanges.ContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)]
|
_, keep := containerChanges.ContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)]
|
||||||
if !keep {
|
_, keepInit := containerChanges.InitContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)]
|
||||||
|
if !keep && !keepInit {
|
||||||
glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerStatus.Name, containerStatus.ID, format.Pod(pod))
|
glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerStatus.Name, containerStatus.ID, format.Pod(pod))
|
||||||
// attempt to find the appropriate container policy
|
// attempt to find the appropriate container policy
|
||||||
var podContainer *api.Container
|
var podContainer *api.Container
|
||||||
@ -1820,6 +1895,9 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Keep terminated init containers fairly aggressively controlled
|
||||||
|
dm.pruneInitContainersBeforeStart(pod, podStatus, containerChanges.InitContainersToKeep)
|
||||||
|
|
||||||
// We pass the value of the podIP down to runContainerInPod, which in turn
|
// We pass the value of the podIP down to runContainerInPod, which in turn
|
||||||
// passes it to various other functions, in order to facilitate
|
// passes it to various other functions, in order to facilitate
|
||||||
// functionality that requires this value (hosts file and downward API)
|
// functionality that requires this value (hosts file and downward API)
|
||||||
@ -1889,14 +1967,78 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start everything
|
next, status, done := findActiveInitContainer(pod, podStatus)
|
||||||
|
if status != nil {
|
||||||
|
if status.ExitCode != 0 {
|
||||||
|
// container initialization has failed, flag the pod as failed
|
||||||
|
initContainerResult := kubecontainer.NewSyncResult(kubecontainer.InitContainer, status.Name)
|
||||||
|
initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode))
|
||||||
|
result.AddSyncResult(initContainerResult)
|
||||||
|
if pod.Spec.RestartPolicy == api.RestartPolicyNever {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %+v", format.Pod(pod), status.Name, status))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
utilruntime.HandleError(fmt.Errorf("Error running pod %q init container %q, restarting: %+v", format.Pod(pod), status.Name, status))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: when configuring the pod's containers anything that can be configured by pointing
|
||||||
|
// to the namespace of the infra container should use namespaceMode. This includes things like the net namespace
|
||||||
|
// and IPC namespace. PID mode cannot point to another container right now.
|
||||||
|
// See createPodInfraContainer for infra container setup.
|
||||||
|
namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID)
|
||||||
|
pidMode := getPidMode(pod)
|
||||||
|
|
||||||
|
if next != nil {
|
||||||
|
if len(containerChanges.ContainersToStart) == 0 {
|
||||||
|
glog.V(4).Infof("No containers to start, stopping at init container %+v in pod %v", next.Name, format.Pod(pod))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we need to start the next container, do so now then exit
|
||||||
|
container := next
|
||||||
|
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)
|
||||||
|
result.AddSyncResult(startContainerResult)
|
||||||
|
|
||||||
|
// containerChanges.StartInfraContainer causes the containers to be restarted for config reasons
|
||||||
|
if !containerChanges.StartInfraContainer {
|
||||||
|
isInBackOff, err, msg := dm.doBackOff(pod, container, podStatus, backOff)
|
||||||
|
if isInBackOff {
|
||||||
|
startContainerResult.Fail(err, msg)
|
||||||
|
glog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod))
|
||||||
|
if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil {
|
||||||
|
startContainerResult.Fail(err, msg)
|
||||||
|
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Successfully started the container; clear the entry in the failure
|
||||||
|
glog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !done {
|
||||||
|
// init container still running
|
||||||
|
glog.V(4).Infof("An init container is still running in pod %v", format.Pod(pod))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if containerChanges.InitFailed {
|
||||||
|
// init container still running
|
||||||
|
glog.V(4).Infof("Not all init containers have succeeded for pod %v", format.Pod(pod))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start regular containers
|
||||||
for idx := range containerChanges.ContainersToStart {
|
for idx := range containerChanges.ContainersToStart {
|
||||||
container := &pod.Spec.Containers[idx]
|
container := &pod.Spec.Containers[idx]
|
||||||
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)
|
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)
|
||||||
result.AddSyncResult(startContainerResult)
|
result.AddSyncResult(startContainerResult)
|
||||||
|
|
||||||
// containerChanges.StartInfraContainer causes the containers to be restarted for config reasons
|
// containerChanges.StartInfraContainer causes the containers to be restarted for config reasons
|
||||||
// ignore backoff
|
|
||||||
if !containerChanges.StartInfraContainer {
|
if !containerChanges.StartInfraContainer {
|
||||||
isInBackOff, err, msg := dm.doBackOff(pod, container, podStatus, backOff)
|
isInBackOff, err, msg := dm.doBackOff(pod, container, podStatus, backOff)
|
||||||
if isInBackOff {
|
if isInBackOff {
|
||||||
@ -1905,46 +2047,131 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
|
glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
|
||||||
err, msg := dm.imagePuller.PullImage(pod, container, pullSecrets)
|
if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil {
|
||||||
if err != nil {
|
|
||||||
startContainerResult.Fail(err, msg)
|
startContainerResult.Fail(err, msg)
|
||||||
|
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if container.SecurityContext != nil && container.SecurityContext.RunAsNonRoot != nil && *container.SecurityContext.RunAsNonRoot {
|
|
||||||
err := dm.verifyNonRoot(container)
|
|
||||||
if err != nil {
|
|
||||||
startContainerResult.Fail(kubecontainer.ErrVerifyNonRoot, err.Error())
|
|
||||||
glog.Errorf("Error running pod %q container %q: %v", format.Pod(pod), container.Name, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// For a new container, the RestartCount should be 0
|
|
||||||
restartCount := 0
|
|
||||||
containerStatus := podStatus.FindContainerStatusByName(container.Name)
|
|
||||||
if containerStatus != nil {
|
|
||||||
restartCount = containerStatus.RestartCount + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(dawnchen): Check RestartPolicy.DelaySeconds before restart a container
|
|
||||||
// Note: when configuring the pod's containers anything that can be configured by pointing
|
|
||||||
// to the namespace of the infra container should use namespaceMode. This includes things like the net namespace
|
|
||||||
// and IPC namespace. PID mode cannot point to another container right now.
|
|
||||||
// See createPodInfraContainer for infra container setup.
|
|
||||||
namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID)
|
|
||||||
_, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode, getPidMode(pod), podIP, restartCount)
|
|
||||||
if err != nil {
|
|
||||||
startContainerResult.Fail(kubecontainer.ErrRunContainer, err.Error())
|
|
||||||
// TODO(bburns) : Perhaps blacklist a container after N failures?
|
|
||||||
glog.Errorf("Error running pod %q container %q: %v", format.Pod(pod), container.Name, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Successfully started the container; clear the entry in the failure
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// tryContainerStart attempts to pull and start the container, returning an error and a reason string if the start
|
||||||
|
// was not successful.
|
||||||
|
func (dm *DockerManager) tryContainerStart(container *api.Container, pod *api.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, namespaceMode, pidMode, podIP string) (err error, reason string) {
|
||||||
|
err, msg := dm.imagePuller.PullImage(pod, container, pullSecrets)
|
||||||
|
if err != nil {
|
||||||
|
return err, msg
|
||||||
|
}
|
||||||
|
|
||||||
|
if container.SecurityContext != nil && container.SecurityContext.RunAsNonRoot != nil && *container.SecurityContext.RunAsNonRoot {
|
||||||
|
err := dm.verifyNonRoot(container)
|
||||||
|
if err != nil {
|
||||||
|
return kubecontainer.ErrVerifyNonRoot, err.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For a new container, the RestartCount should be 0
|
||||||
|
restartCount := 0
|
||||||
|
containerStatus := podStatus.FindContainerStatusByName(container.Name)
|
||||||
|
if containerStatus != nil {
|
||||||
|
restartCount = containerStatus.RestartCount + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dawnchen): Check RestartPolicy.DelaySeconds before restart a container
|
||||||
|
_, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode, pidMode, podIP, restartCount)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(bburns) : Perhaps blacklist a container after N failures?
|
||||||
|
return kubecontainer.ErrRunContainer, err.Error()
|
||||||
|
}
|
||||||
|
return nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// pruneInitContainers ensures that before we begin creating init containers, we have reduced the number
|
||||||
|
// of outstanding init containers still present. This reduces load on the container garbage collector
|
||||||
|
// by only preserving the most recent terminated init container.
|
||||||
|
func (dm *DockerManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.DockerID]int) {
|
||||||
|
// only the last execution of an init container should be preserved, and only preserve it if it is in the
|
||||||
|
// list of init containers to keep.
|
||||||
|
initContainerNames := sets.NewString()
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
initContainerNames.Insert(container.Name)
|
||||||
|
}
|
||||||
|
for name := range initContainerNames {
|
||||||
|
count := 0
|
||||||
|
for _, status := range podStatus.ContainerStatuses {
|
||||||
|
if !initContainerNames.Has(status.Name) || status.State != kubecontainer.ContainerStateExited {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
// keep the first init container we see
|
||||||
|
if count == 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// if there is a reason to preserve the older container, do so
|
||||||
|
if _, ok := initContainersToKeep[kubecontainer.DockerID(status.ID.ID)]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// prune all other init containers that match this container name
|
||||||
|
// TODO: we may not need aggressive pruning
|
||||||
|
glog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count)
|
||||||
|
if err := dm.client.RemoveContainer(status.ID.ID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
|
||||||
|
if _, ok := err.(containerNotFoundError); ok {
|
||||||
|
count--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", name, err, format.Pod(pod)))
|
||||||
|
// TODO: report serious errors
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove any references to this container
|
||||||
|
if _, ok := dm.containerRefManager.GetRef(status.ID); ok {
|
||||||
|
dm.containerRefManager.ClearRef(status.ID)
|
||||||
|
} else {
|
||||||
|
glog.Warningf("No ref for pod '%q'", pod.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// findActiveInitContainer returns the status of the last failed container, the next init container to
|
||||||
|
// start, or done if there are no further init containers. Status is only returned if an init container
|
||||||
|
// failed, in which case next will point to the current container.
|
||||||
|
func findActiveInitContainer(pod *api.Pod, podStatus *kubecontainer.PodStatus) (next *api.Container, status *kubecontainer.ContainerStatus, done bool) {
|
||||||
|
if len(pod.Spec.InitContainers) == 0 {
|
||||||
|
return nil, nil, true
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := len(pod.Spec.InitContainers) - 1; i >= 0; i-- {
|
||||||
|
container := &pod.Spec.InitContainers[i]
|
||||||
|
status := podStatus.FindContainerStatusByName(container.Name)
|
||||||
|
switch {
|
||||||
|
case status == nil:
|
||||||
|
continue
|
||||||
|
case status.State == kubecontainer.ContainerStateRunning:
|
||||||
|
return nil, nil, false
|
||||||
|
case status.State == kubecontainer.ContainerStateExited:
|
||||||
|
switch {
|
||||||
|
// the container has failed, we'll have to retry
|
||||||
|
case status.ExitCode != 0:
|
||||||
|
return &pod.Spec.InitContainers[i], status, false
|
||||||
|
// all init containers successful
|
||||||
|
case i == (len(pod.Spec.InitContainers) - 1):
|
||||||
|
return nil, nil, true
|
||||||
|
// all containers up to i successful, go to i+1
|
||||||
|
default:
|
||||||
|
return &pod.Spec.InitContainers[i+1], nil, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &pod.Spec.InitContainers[0], nil, false
|
||||||
|
}
|
||||||
|
|
||||||
// verifyNonRoot returns an error if the container or image will run as the root user.
|
// verifyNonRoot returns an error if the container or image will run as the root user.
|
||||||
func (dm *DockerManager) verifyNonRoot(container *api.Container) error {
|
func (dm *DockerManager) verifyNonRoot(container *api.Container) error {
|
||||||
if securitycontext.HasRunAsUser(container) {
|
if securitycontext.HasRunAsUser(container) {
|
||||||
@ -2018,6 +2245,7 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cStatus != nil {
|
if cStatus != nil {
|
||||||
|
glog.Infof("checking backoff for container %q in pod %q", container.Name, pod.Name)
|
||||||
ts := cStatus.FinishedAt
|
ts := cStatus.FinishedAt
|
||||||
// found a container that requires backoff
|
// found a container that requires backoff
|
||||||
dockerName := KubeletContainerName{
|
dockerName := KubeletContainerName{
|
||||||
|
@ -1784,6 +1784,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||||||
!firstSeenTime.IsZero() {
|
!firstSeenTime.IsZero() {
|
||||||
metrics.PodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime))
|
metrics.PodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update status in the status manager
|
// Update status in the status manager
|
||||||
kl.statusManager.SetPodStatus(pod, apiPodStatus)
|
kl.statusManager.SetPodStatus(pod, apiPodStatus)
|
||||||
|
|
||||||
@ -2370,6 +2371,10 @@ func hasHostPortConflicts(pods []*api.Pod) bool {
|
|||||||
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
|
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 {
|
||||||
|
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -3392,12 +3397,46 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||||||
// This func is exported to simplify integration with 3rd party kubelet
|
// This func is exported to simplify integration with 3rd party kubelet
|
||||||
// integrations like kubernetes-mesos.
|
// integrations like kubernetes-mesos.
|
||||||
func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase {
|
func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase {
|
||||||
|
initialized := 0
|
||||||
|
pendingInitialization := 0
|
||||||
|
failedInitialization := 0
|
||||||
|
for _, container := range spec.InitContainers {
|
||||||
|
containerStatus, ok := api.GetContainerStatus(info, container.Name)
|
||||||
|
if !ok {
|
||||||
|
pendingInitialization++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case containerStatus.State.Running != nil:
|
||||||
|
pendingInitialization++
|
||||||
|
case containerStatus.State.Terminated != nil:
|
||||||
|
if containerStatus.State.Terminated.ExitCode == 0 {
|
||||||
|
initialized++
|
||||||
|
} else {
|
||||||
|
failedInitialization++
|
||||||
|
}
|
||||||
|
case containerStatus.State.Waiting != nil:
|
||||||
|
if containerStatus.LastTerminationState.Terminated != nil {
|
||||||
|
if containerStatus.LastTerminationState.Terminated.ExitCode == 0 {
|
||||||
|
initialized++
|
||||||
|
} else {
|
||||||
|
failedInitialization++
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pendingInitialization++
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
pendingInitialization++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unknown := 0
|
||||||
running := 0
|
running := 0
|
||||||
waiting := 0
|
waiting := 0
|
||||||
stopped := 0
|
stopped := 0
|
||||||
failed := 0
|
failed := 0
|
||||||
succeeded := 0
|
succeeded := 0
|
||||||
unknown := 0
|
|
||||||
for _, container := range spec.Containers {
|
for _, container := range spec.Containers {
|
||||||
containerStatus, ok := api.GetContainerStatus(info, container.Name)
|
containerStatus, ok := api.GetContainerStatus(info, container.Name)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -3426,7 +3465,13 @@ func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if failedInitialization > 0 && spec.RestartPolicy == api.RestartPolicyNever {
|
||||||
|
return api.PodFailed
|
||||||
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
case pendingInitialization > 0:
|
||||||
|
fallthrough
|
||||||
case waiting > 0:
|
case waiting > 0:
|
||||||
glog.V(5).Infof("pod waiting > 0, pending")
|
glog.V(5).Infof("pod waiting > 0, pending")
|
||||||
// One or more containers has not been started
|
// One or more containers has not been started
|
||||||
@ -3491,8 +3536,10 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P
|
|||||||
|
|
||||||
// Assume info is ready to process
|
// Assume info is ready to process
|
||||||
spec := &pod.Spec
|
spec := &pod.Spec
|
||||||
s.Phase = GetPhase(spec, s.ContainerStatuses)
|
allStatus := append(append([]api.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
|
||||||
|
s.Phase = GetPhase(spec, allStatus)
|
||||||
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
||||||
|
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
|
||||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))
|
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))
|
||||||
// s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus()
|
// s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus()
|
||||||
// does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure
|
// does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure
|
||||||
@ -3525,9 +3572,27 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P
|
|||||||
// alter the kubelet state at all.
|
// alter the kubelet state at all.
|
||||||
func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) *api.PodStatus {
|
func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) *api.PodStatus {
|
||||||
var apiPodStatus api.PodStatus
|
var apiPodStatus api.PodStatus
|
||||||
uid := pod.UID
|
|
||||||
apiPodStatus.PodIP = podStatus.IP
|
apiPodStatus.PodIP = podStatus.IP
|
||||||
|
|
||||||
|
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
|
||||||
|
pod, podStatus,
|
||||||
|
pod.Status.ContainerStatuses,
|
||||||
|
pod.Spec.Containers,
|
||||||
|
len(pod.Spec.InitContainers) > 0,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses(
|
||||||
|
pod, podStatus,
|
||||||
|
pod.Status.InitContainerStatuses,
|
||||||
|
pod.Spec.InitContainers,
|
||||||
|
len(pod.Spec.InitContainers) > 0,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
|
||||||
|
return &apiPodStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubecontainer.PodStatus, previousStatus []api.ContainerStatus, containers []api.Container, hasInitContainers, isInitContainer bool) []api.ContainerStatus {
|
||||||
convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *api.ContainerStatus {
|
convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *api.ContainerStatus {
|
||||||
cid := cs.ID.String()
|
cid := cs.ID.String()
|
||||||
status := &api.ContainerStatus{
|
status := &api.ContainerStatus{
|
||||||
@ -3556,15 +3621,19 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Fetch old containers statuses from old pod status.
|
// Fetch old containers statuses from old pod status.
|
||||||
oldStatuses := make(map[string]api.ContainerStatus, len(pod.Spec.Containers))
|
oldStatuses := make(map[string]api.ContainerStatus, len(containers))
|
||||||
for _, status := range pod.Status.ContainerStatuses {
|
for _, status := range previousStatus {
|
||||||
oldStatuses[status.Name] = status
|
oldStatuses[status.Name] = status
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set all container statuses to default waiting state
|
// Set all container statuses to default waiting state
|
||||||
statuses := make(map[string]*api.ContainerStatus, len(pod.Spec.Containers))
|
statuses := make(map[string]*api.ContainerStatus, len(containers))
|
||||||
defaultWaitingState := api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerCreating"}}
|
defaultWaitingState := api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerCreating"}}
|
||||||
for _, container := range pod.Spec.Containers {
|
if hasInitContainers {
|
||||||
|
defaultWaitingState = api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "PodInitializing"}}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, container := range containers {
|
||||||
status := &api.ContainerStatus{
|
status := &api.ContainerStatus{
|
||||||
Name: container.Name,
|
Name: container.Name,
|
||||||
Image: container.Image,
|
Image: container.Image,
|
||||||
@ -3580,7 +3649,6 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain
|
|||||||
|
|
||||||
// Make the latest container status comes first.
|
// Make the latest container status comes first.
|
||||||
sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses)))
|
sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses)))
|
||||||
|
|
||||||
// Set container statuses according to the statuses seen in pod status
|
// Set container statuses according to the statuses seen in pod status
|
||||||
containerSeen := map[string]int{}
|
containerSeen := map[string]int{}
|
||||||
for _, cStatus := range podStatus.ContainerStatuses {
|
for _, cStatus := range podStatus.ContainerStatuses {
|
||||||
@ -3602,13 +3670,13 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Handle the containers failed to be started, which should be in Waiting state.
|
// Handle the containers failed to be started, which should be in Waiting state.
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range containers {
|
||||||
// If a container should be restarted in next syncpod, it is *Waiting*.
|
// If a container should be restarted in next syncpod, it is *Waiting*.
|
||||||
if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
|
if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
status := statuses[container.Name]
|
status := statuses[container.Name]
|
||||||
reason, message, ok := kl.reasonCache.Get(uid, container.Name)
|
reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name)
|
||||||
if !ok {
|
if !ok {
|
||||||
// In fact, we could also apply Waiting state here, but it is less informative,
|
// In fact, we could also apply Waiting state here, but it is less informative,
|
||||||
// and the container will be restarted soon, so we prefer the original state here.
|
// and the container will be restarted soon, so we prefer the original state here.
|
||||||
@ -3630,15 +3698,15 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain
|
|||||||
statuses[container.Name] = status
|
statuses[container.Name] = status
|
||||||
}
|
}
|
||||||
|
|
||||||
apiPodStatus.ContainerStatuses = make([]api.ContainerStatus, 0)
|
var containerStatuses []api.ContainerStatus
|
||||||
for _, status := range statuses {
|
for _, status := range statuses {
|
||||||
apiPodStatus.ContainerStatuses = append(apiPodStatus.ContainerStatuses, *status)
|
containerStatuses = append(containerStatuses, *status)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort the container statuses since clients of this interface expect the list
|
// Sort the container statuses since clients of this interface expect the list
|
||||||
// of containers in a pod has a deterministic order.
|
// of containers in a pod has a deterministic order.
|
||||||
sort.Sort(kubetypes.SortedContainerStatuses(apiPodStatus.ContainerStatuses))
|
sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses))
|
||||||
return &apiPodStatus
|
return containerStatuses
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns logs of current machine.
|
// Returns logs of current machine.
|
||||||
|
@ -207,6 +207,15 @@ func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *api.PodStatus) {
|
|||||||
}
|
}
|
||||||
podStatus.ContainerStatuses[i].Ready = ready
|
podStatus.ContainerStatuses[i].Ready = ready
|
||||||
}
|
}
|
||||||
|
// init containers are ready if they have exited with success or if a readiness probe has
|
||||||
|
// succeeded.
|
||||||
|
for i, c := range podStatus.InitContainerStatuses {
|
||||||
|
var ready bool
|
||||||
|
if c.State.Terminated != nil && c.State.Terminated.ExitCode == 0 {
|
||||||
|
ready = true
|
||||||
|
}
|
||||||
|
podStatus.InitContainerStatuses[i].Ready = ready
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) getWorker(podUID types.UID, containerName string, probeType probeType) (*worker, bool) {
|
func (m *manager) getWorker(podUID types.UID, containerName string, probeType probeType) (*worker, bool) {
|
||||||
|
@ -465,6 +465,13 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re
|
|||||||
containerExists = true
|
containerExists = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !containerExists {
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
if container.Name == containerName {
|
||||||
|
containerExists = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if !containerExists {
|
if !containerExists {
|
||||||
response.WriteError(http.StatusNotFound, fmt.Errorf("container %q not found in pod %q\n", containerName, podID))
|
response.WriteError(http.StatusNotFound, fmt.Errorf("container %q not found in pod %q\n", containerName, podID))
|
||||||
return
|
return
|
||||||
|
@ -77,3 +77,58 @@ func GeneratePodReadyCondition(spec *api.PodSpec, containerStatuses []api.Contai
|
|||||||
Status: api.ConditionTrue,
|
Status: api.ConditionTrue,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GeneratePodInitializedCondition returns initialized condition if all init containers in a pod are ready, else it
|
||||||
|
// returns an uninitialized condition.
|
||||||
|
func GeneratePodInitializedCondition(spec *api.PodSpec, containerStatuses []api.ContainerStatus, podPhase api.PodPhase) api.PodCondition {
|
||||||
|
// Find if all containers are ready or not.
|
||||||
|
if containerStatuses == nil && len(spec.InitContainers) > 0 {
|
||||||
|
return api.PodCondition{
|
||||||
|
Type: api.PodInitialized,
|
||||||
|
Status: api.ConditionFalse,
|
||||||
|
Reason: "UnknownContainerStatuses",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
unknownContainers := []string{}
|
||||||
|
unreadyContainers := []string{}
|
||||||
|
for _, container := range spec.InitContainers {
|
||||||
|
if containerStatus, ok := api.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||||
|
if !containerStatus.Ready {
|
||||||
|
unreadyContainers = append(unreadyContainers, container.Name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
unknownContainers = append(unknownContainers, container.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If all init containers are known and succeeded, just return PodCompleted.
|
||||||
|
if podPhase == api.PodSucceeded && len(unknownContainers) == 0 {
|
||||||
|
return api.PodCondition{
|
||||||
|
Type: api.PodInitialized,
|
||||||
|
Status: api.ConditionTrue,
|
||||||
|
Reason: "PodCompleted",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unreadyMessages := []string{}
|
||||||
|
if len(unknownContainers) > 0 {
|
||||||
|
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers))
|
||||||
|
}
|
||||||
|
if len(unreadyContainers) > 0 {
|
||||||
|
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with incomplete status: %s", unreadyContainers))
|
||||||
|
}
|
||||||
|
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||||
|
if unreadyMessage != "" {
|
||||||
|
return api.PodCondition{
|
||||||
|
Type: api.PodInitialized,
|
||||||
|
Status: api.ConditionFalse,
|
||||||
|
Reason: "ContainersNotInitialized",
|
||||||
|
Message: unreadyMessage,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return api.PodCondition{
|
||||||
|
Type: api.PodInitialized,
|
||||||
|
Status: api.ConditionTrue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -172,20 +172,14 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find the container to update.
|
// Find the container to update.
|
||||||
containerIndex := -1
|
containerStatus, _, ok := findContainerStatus(&oldStatus.status, containerID.String())
|
||||||
for i, c := range oldStatus.status.ContainerStatuses {
|
if !ok {
|
||||||
if c.ContainerID == containerID.String() {
|
|
||||||
containerIndex = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if containerIndex == -1 {
|
|
||||||
glog.Warningf("Container readiness changed for unknown container: %q - %q",
|
glog.Warningf("Container readiness changed for unknown container: %q - %q",
|
||||||
format.Pod(pod), containerID.String())
|
format.Pod(pod), containerID.String())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if oldStatus.status.ContainerStatuses[containerIndex].Ready == ready {
|
if containerStatus.Ready == ready {
|
||||||
glog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready,
|
glog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready,
|
||||||
format.Pod(pod), containerID.String())
|
format.Pod(pod), containerID.String())
|
||||||
return
|
return
|
||||||
@ -196,7 +190,8 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
status.ContainerStatuses[containerIndex].Ready = ready
|
containerStatus, _, _ = findContainerStatus(&status, containerID.String())
|
||||||
|
containerStatus.Ready = ready
|
||||||
|
|
||||||
// Update pod condition.
|
// Update pod condition.
|
||||||
readyConditionIndex := -1
|
readyConditionIndex := -1
|
||||||
@ -217,6 +212,31 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai
|
|||||||
m.updateStatusInternal(pod, status, false)
|
m.updateStatusInternal(pod, status, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func findContainerStatus(status *api.PodStatus, containerID string) (containerStatus *api.ContainerStatus, init bool, ok bool) {
|
||||||
|
// Find the container to update.
|
||||||
|
containerIndex := -1
|
||||||
|
for i, c := range status.ContainerStatuses {
|
||||||
|
if c.ContainerID == containerID {
|
||||||
|
containerIndex = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if containerIndex != -1 {
|
||||||
|
return &status.ContainerStatuses[containerIndex], false, true
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, c := range status.InitContainerStatuses {
|
||||||
|
if c.ContainerID == containerID {
|
||||||
|
containerIndex = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if containerIndex != -1 {
|
||||||
|
return &status.InitContainerStatuses[containerIndex], true, true
|
||||||
|
}
|
||||||
|
return nil, false, false
|
||||||
|
}
|
||||||
|
|
||||||
func (m *manager) TerminatePod(pod *api.Pod) {
|
func (m *manager) TerminatePod(pod *api.Pod) {
|
||||||
m.podStatusesLock.Lock()
|
m.podStatusesLock.Lock()
|
||||||
defer m.podStatusesLock.Unlock()
|
defer m.podStatusesLock.Unlock()
|
||||||
@ -233,6 +253,11 @@ func (m *manager) TerminatePod(pod *api.Pod) {
|
|||||||
Terminated: &api.ContainerStateTerminated{},
|
Terminated: &api.ContainerStateTerminated{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for i := range status.InitContainerStatuses {
|
||||||
|
status.InitContainerStatuses[i].State = api.ContainerState{
|
||||||
|
Terminated: &api.ContainerStateTerminated{},
|
||||||
|
}
|
||||||
|
}
|
||||||
m.updateStatusInternal(pod, pod.Status, true)
|
m.updateStatusInternal(pod, pod.Status, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -251,16 +276,27 @@ func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus, force
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set ReadyCondition.LastTransitionTime.
|
// Set ReadyCondition.LastTransitionTime.
|
||||||
if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
|
if _, readyCondition := api.GetPodCondition(&status, api.PodReady); readyCondition != nil {
|
||||||
// Need to set LastTransitionTime.
|
// Need to set LastTransitionTime.
|
||||||
lastTransitionTime := unversioned.Now()
|
lastTransitionTime := unversioned.Now()
|
||||||
oldReadyCondition := api.GetPodReadyCondition(oldStatus)
|
_, oldReadyCondition := api.GetPodCondition(&oldStatus, api.PodReady)
|
||||||
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
|
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
|
||||||
lastTransitionTime = oldReadyCondition.LastTransitionTime
|
lastTransitionTime = oldReadyCondition.LastTransitionTime
|
||||||
}
|
}
|
||||||
readyCondition.LastTransitionTime = lastTransitionTime
|
readyCondition.LastTransitionTime = lastTransitionTime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set InitializedCondition.LastTransitionTime.
|
||||||
|
if _, initCondition := api.GetPodCondition(&status, api.PodInitialized); initCondition != nil {
|
||||||
|
// Need to set LastTransitionTime.
|
||||||
|
lastTransitionTime := unversioned.Now()
|
||||||
|
_, oldInitCondition := api.GetPodCondition(&oldStatus, api.PodInitialized)
|
||||||
|
if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status {
|
||||||
|
lastTransitionTime = oldInitCondition.LastTransitionTime
|
||||||
|
}
|
||||||
|
initCondition.LastTransitionTime = lastTransitionTime
|
||||||
|
}
|
||||||
|
|
||||||
// ensure that the start time does not change across updates.
|
// ensure that the start time does not change across updates.
|
||||||
if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
|
if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
|
||||||
status.StartTime = oldStatus.StartTime
|
status.StartTime = oldStatus.StartTime
|
||||||
@ -490,6 +526,8 @@ func normalizeStatus(status *api.PodStatus) *api.PodStatus {
|
|||||||
normalizeTimeStamp(&condition.LastProbeTime)
|
normalizeTimeStamp(&condition.LastProbeTime)
|
||||||
normalizeTimeStamp(&condition.LastTransitionTime)
|
normalizeTimeStamp(&condition.LastTransitionTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// update container statuses
|
||||||
for i := range status.ContainerStatuses {
|
for i := range status.ContainerStatuses {
|
||||||
cstatus := &status.ContainerStatuses[i]
|
cstatus := &status.ContainerStatuses[i]
|
||||||
normalizeContainerState(&cstatus.State)
|
normalizeContainerState(&cstatus.State)
|
||||||
@ -497,6 +535,15 @@ func normalizeStatus(status *api.PodStatus) *api.PodStatus {
|
|||||||
}
|
}
|
||||||
// Sort the container statuses, so that the order won't affect the result of comparison
|
// Sort the container statuses, so that the order won't affect the result of comparison
|
||||||
sort.Sort(kubetypes.SortedContainerStatuses(status.ContainerStatuses))
|
sort.Sort(kubetypes.SortedContainerStatuses(status.ContainerStatuses))
|
||||||
|
|
||||||
|
// update init container statuses
|
||||||
|
for i := range status.InitContainerStatuses {
|
||||||
|
cstatus := &status.InitContainerStatuses[i]
|
||||||
|
normalizeContainerState(&cstatus.State)
|
||||||
|
normalizeContainerState(&cstatus.LastTerminationState)
|
||||||
|
}
|
||||||
|
// Sort the container statuses, so that the order won't affect the result of comparison
|
||||||
|
sort.Sort(kubetypes.SortedContainerStatuses(status.InitContainerStatuses))
|
||||||
return status
|
return status
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,6 +63,11 @@ func canRunPod(pod *api.Pod) error {
|
|||||||
return fmt.Errorf("pod with UID %q specified privileged container, but is disallowed", pod.UID)
|
return fmt.Errorf("pod with UID %q specified privileged container, but is disallowed", pod.UID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
if securitycontext.HasPrivilegedRequest(&container) {
|
||||||
|
return fmt.Errorf("pod with UID %q specified privileged container, but is disallowed", pod.UID)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -208,45 +208,54 @@ func defaultContainerResourceRequirements(limitRange *api.LimitRange) api.Resour
|
|||||||
return requirements
|
return requirements
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mergeContainerResources handles defaulting all of the resources on a container.
|
||||||
|
func mergeContainerResources(container *api.Container, defaultRequirements *api.ResourceRequirements, annotationPrefix string, annotations []string) []string {
|
||||||
|
setRequests := []string{}
|
||||||
|
setLimits := []string{}
|
||||||
|
if container.Resources.Limits == nil {
|
||||||
|
container.Resources.Limits = api.ResourceList{}
|
||||||
|
}
|
||||||
|
if container.Resources.Requests == nil {
|
||||||
|
container.Resources.Requests = api.ResourceList{}
|
||||||
|
}
|
||||||
|
for k, v := range defaultRequirements.Limits {
|
||||||
|
_, found := container.Resources.Limits[k]
|
||||||
|
if !found {
|
||||||
|
container.Resources.Limits[k] = *v.Copy()
|
||||||
|
setLimits = append(setLimits, string(k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range defaultRequirements.Requests {
|
||||||
|
_, found := container.Resources.Requests[k]
|
||||||
|
if !found {
|
||||||
|
container.Resources.Requests[k] = *v.Copy()
|
||||||
|
setRequests = append(setRequests, string(k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(setRequests) > 0 {
|
||||||
|
sort.Strings(setRequests)
|
||||||
|
a := strings.Join(setRequests, ", ") + fmt.Sprintf(" request for %s %s", annotationPrefix, container.Name)
|
||||||
|
annotations = append(annotations, a)
|
||||||
|
}
|
||||||
|
if len(setLimits) > 0 {
|
||||||
|
sort.Strings(setLimits)
|
||||||
|
a := strings.Join(setLimits, ", ") + fmt.Sprintf(" limit for %s %s", annotationPrefix, container.Name)
|
||||||
|
annotations = append(annotations, a)
|
||||||
|
}
|
||||||
|
return annotations
|
||||||
|
}
|
||||||
|
|
||||||
// mergePodResourceRequirements merges enumerated requirements with default requirements
|
// mergePodResourceRequirements merges enumerated requirements with default requirements
|
||||||
// it annotates the pod with information about what requirements were modified
|
// it annotates the pod with information about what requirements were modified
|
||||||
func mergePodResourceRequirements(pod *api.Pod, defaultRequirements *api.ResourceRequirements) {
|
func mergePodResourceRequirements(pod *api.Pod, defaultRequirements *api.ResourceRequirements) {
|
||||||
annotations := []string{}
|
annotations := []string{}
|
||||||
|
|
||||||
for i := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
container := &pod.Spec.Containers[i]
|
annotations = mergeContainerResources(&pod.Spec.Containers[i], defaultRequirements, "container", annotations)
|
||||||
setRequests := []string{}
|
}
|
||||||
setLimits := []string{}
|
|
||||||
if container.Resources.Limits == nil {
|
for i := range pod.Spec.InitContainers {
|
||||||
container.Resources.Limits = api.ResourceList{}
|
annotations = mergeContainerResources(&pod.Spec.InitContainers[i], defaultRequirements, "init container", annotations)
|
||||||
}
|
|
||||||
if container.Resources.Requests == nil {
|
|
||||||
container.Resources.Requests = api.ResourceList{}
|
|
||||||
}
|
|
||||||
for k, v := range defaultRequirements.Limits {
|
|
||||||
_, found := container.Resources.Limits[k]
|
|
||||||
if !found {
|
|
||||||
container.Resources.Limits[k] = *v.Copy()
|
|
||||||
setLimits = append(setLimits, string(k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for k, v := range defaultRequirements.Requests {
|
|
||||||
_, found := container.Resources.Requests[k]
|
|
||||||
if !found {
|
|
||||||
container.Resources.Requests[k] = *v.Copy()
|
|
||||||
setRequests = append(setRequests, string(k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(setRequests) > 0 {
|
|
||||||
sort.Strings(setRequests)
|
|
||||||
a := strings.Join(setRequests, ", ") + " request for container " + container.Name
|
|
||||||
annotations = append(annotations, a)
|
|
||||||
}
|
|
||||||
if len(setLimits) > 0 {
|
|
||||||
sort.Strings(setLimits)
|
|
||||||
a := strings.Join(setLimits, ", ") + " limit for container " + container.Name
|
|
||||||
annotations = append(annotations, a)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(annotations) > 0 {
|
if len(annotations) > 0 {
|
||||||
@ -441,7 +450,7 @@ func PodLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// enforce pod limits
|
// enforce pod limits on init containers
|
||||||
if limitType == api.LimitTypePod {
|
if limitType == api.LimitTypePod {
|
||||||
containerRequests, containerLimits := []api.ResourceList{}, []api.ResourceList{}
|
containerRequests, containerLimits := []api.ResourceList{}, []api.ResourceList{}
|
||||||
for j := range pod.Spec.Containers {
|
for j := range pod.Spec.Containers {
|
||||||
@ -451,6 +460,28 @@ func PodLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error {
|
|||||||
}
|
}
|
||||||
podRequests := sum(containerRequests)
|
podRequests := sum(containerRequests)
|
||||||
podLimits := sum(containerLimits)
|
podLimits := sum(containerLimits)
|
||||||
|
for j := range pod.Spec.InitContainers {
|
||||||
|
container := &pod.Spec.InitContainers[j]
|
||||||
|
// take max(sum_containers, any_init_container)
|
||||||
|
for k, v := range container.Resources.Requests {
|
||||||
|
if v2, ok := podRequests[k]; ok {
|
||||||
|
if v.Cmp(v2) > 0 {
|
||||||
|
podRequests[k] = v
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
podRequests[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range container.Resources.Limits {
|
||||||
|
if v2, ok := podLimits[k]; ok {
|
||||||
|
if v.Cmp(v2) > 0 {
|
||||||
|
podLimits[k] = v
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
podLimits[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
for k, v := range limit.Min {
|
for k, v := range limit.Min {
|
||||||
if err := minConstraint(limitType, k, v, podRequests, podLimits); err != nil {
|
if err := minConstraint(limitType, k, v, podRequests, podLimits); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
|
@ -134,6 +134,17 @@ func validPod(name string, numContainers int, resources api.ResourceRequirements
|
|||||||
return pod
|
return pod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validPodInit(pod api.Pod, resources ...api.ResourceRequirements) api.Pod {
|
||||||
|
for i := 0; i < len(resources); i++ {
|
||||||
|
pod.Spec.InitContainers = append(pod.Spec.InitContainers, api.Container{
|
||||||
|
Image: "foo:V" + strconv.Itoa(i),
|
||||||
|
Resources: resources[i],
|
||||||
|
Name: "foo-" + strconv.Itoa(i),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
func TestDefaultContainerResourceRequirements(t *testing.T) {
|
func TestDefaultContainerResourceRequirements(t *testing.T) {
|
||||||
limitRange := validLimitRange()
|
limitRange := validLimitRange()
|
||||||
expected := api.ResourceRequirements{
|
expected := api.ResourceRequirements{
|
||||||
@ -183,7 +194,7 @@ func TestMergePodResourceRequirements(t *testing.T) {
|
|||||||
|
|
||||||
// pod with some resources enumerated should only merge empty
|
// pod with some resources enumerated should only merge empty
|
||||||
input := getResourceRequirements(getResourceList("", "512Mi"), getResourceList("", ""))
|
input := getResourceRequirements(getResourceList("", "512Mi"), getResourceList("", ""))
|
||||||
pod = validPod("limit-memory", 1, input)
|
pod = validPodInit(validPod("limit-memory", 1, input), input)
|
||||||
expected = api.ResourceRequirements{
|
expected = api.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: api.ResourceList{
|
||||||
api.ResourceCPU: defaultRequirements.Requests[api.ResourceCPU],
|
api.ResourceCPU: defaultRequirements.Requests[api.ResourceCPU],
|
||||||
@ -198,11 +209,18 @@ func TestMergePodResourceRequirements(t *testing.T) {
|
|||||||
t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, expected, actual)
|
t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, expected, actual)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for i := range pod.Spec.InitContainers {
|
||||||
|
actual := pod.Spec.InitContainers[i].Resources
|
||||||
|
if !api.Semantic.DeepEqual(expected, actual) {
|
||||||
|
t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
verifyAnnotation(t, &pod, "LimitRanger plugin set: cpu request for container foo-0; cpu, memory limit for container foo-0")
|
verifyAnnotation(t, &pod, "LimitRanger plugin set: cpu request for container foo-0; cpu, memory limit for container foo-0")
|
||||||
|
|
||||||
// pod with all resources enumerated should not merge anything
|
// pod with all resources enumerated should not merge anything
|
||||||
input = getResourceRequirements(getResourceList("100m", "512Mi"), getResourceList("200m", "1G"))
|
input = getResourceRequirements(getResourceList("100m", "512Mi"), getResourceList("200m", "1G"))
|
||||||
pod = validPod("limit-memory", 1, input)
|
initInputs := []api.ResourceRequirements{getResourceRequirements(getResourceList("200m", "1G"), getResourceList("400m", "2G"))}
|
||||||
|
pod = validPodInit(validPod("limit-memory", 1, input), initInputs...)
|
||||||
expected = input
|
expected = input
|
||||||
mergePodResourceRequirements(&pod, &defaultRequirements)
|
mergePodResourceRequirements(&pod, &defaultRequirements)
|
||||||
for i := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
@ -211,6 +229,12 @@ func TestMergePodResourceRequirements(t *testing.T) {
|
|||||||
t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, expected, actual)
|
t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, expected, actual)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for i := range pod.Spec.InitContainers {
|
||||||
|
actual := pod.Spec.InitContainers[i].Resources
|
||||||
|
if !api.Semantic.DeepEqual(initInputs[i], actual) {
|
||||||
|
t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, initInputs[i], actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
expectNoAnnotation(t, &pod)
|
expectNoAnnotation(t, &pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -273,6 +297,20 @@ func TestPodLimitFunc(t *testing.T) {
|
|||||||
pod: validPod("pod-min-memory-request-limit", 2, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", "100Mi"))),
|
pod: validPod("pod-min-memory-request-limit", 2, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", "100Mi"))),
|
||||||
limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: validPodInit(
|
||||||
|
validPod("pod-init-min-memory-request", 2, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", ""))),
|
||||||
|
getResourceRequirements(getResourceList("", "100Mi"), getResourceList("", "")),
|
||||||
|
),
|
||||||
|
limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: validPodInit(
|
||||||
|
validPod("pod-init-min-memory-request-limit", 2, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", "100Mi"))),
|
||||||
|
getResourceRequirements(getResourceList("", "80Mi"), getResourceList("", "100Mi")),
|
||||||
|
),
|
||||||
|
limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
pod: validPod("pod-max-cpu-request-limit", 2, getResourceRequirements(getResourceList("500m", ""), getResourceList("1", ""))),
|
pod: validPod("pod-max-cpu-request-limit", 2, getResourceRequirements(getResourceList("500m", ""), getResourceList("1", ""))),
|
||||||
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
||||||
@ -281,6 +319,22 @@ func TestPodLimitFunc(t *testing.T) {
|
|||||||
pod: validPod("pod-max-cpu-limit", 2, getResourceRequirements(getResourceList("", ""), getResourceList("1", ""))),
|
pod: validPod("pod-max-cpu-limit", 2, getResourceRequirements(getResourceList("", ""), getResourceList("1", ""))),
|
||||||
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: validPodInit(
|
||||||
|
validPod("pod-init-max-cpu-request-limit", 2, getResourceRequirements(getResourceList("500m", ""), getResourceList("1", ""))),
|
||||||
|
getResourceRequirements(getResourceList("1", ""), getResourceList("2", "")),
|
||||||
|
getResourceRequirements(getResourceList("1", ""), getResourceList("1", "")),
|
||||||
|
),
|
||||||
|
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: validPodInit(
|
||||||
|
validPod("pod-init-max-cpu-limit", 2, getResourceRequirements(getResourceList("", ""), getResourceList("1", ""))),
|
||||||
|
getResourceRequirements(getResourceList("", ""), getResourceList("2", "")),
|
||||||
|
getResourceRequirements(getResourceList("", ""), getResourceList("2", "")),
|
||||||
|
),
|
||||||
|
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
pod: validPod("pod-max-mem-request-limit", 2, getResourceRequirements(getResourceList("", "250Mi"), getResourceList("", "500Mi"))),
|
pod: validPod("pod-max-mem-request-limit", 2, getResourceRequirements(getResourceList("", "250Mi"), getResourceList("", "500Mi"))),
|
||||||
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
||||||
@ -387,6 +441,13 @@ func TestPodLimitFunc(t *testing.T) {
|
|||||||
pod: validPod("pod-max-mem-limit", 3, getResourceRequirements(getResourceList("", ""), getResourceList("", "500Mi"))),
|
pod: validPod("pod-max-mem-limit", 3, getResourceRequirements(getResourceList("", ""), getResourceList("", "500Mi"))),
|
||||||
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: validPodInit(
|
||||||
|
validPod("pod-init-max-mem-limit", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "500Mi"))),
|
||||||
|
getResourceRequirements(getResourceList("", ""), getResourceList("", "1.5Gi")),
|
||||||
|
),
|
||||||
|
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
pod: validPod("pod-max-mem-ratio", 3, getResourceRequirements(getResourceList("", "250Mi"), getResourceList("", "500Mi"))),
|
pod: validPod("pod-max-mem-ratio", 3, getResourceRequirements(getResourceList("", "250Mi"), getResourceList("", "500Mi"))),
|
||||||
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "2Gi"), api.ResourceList{}, api.ResourceList{}, getResourceList("", "1.5")),
|
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "2Gi"), api.ResourceList{}, api.ResourceList{}, getResourceList("", "1.5")),
|
||||||
@ -403,7 +464,7 @@ func TestPodLimitFunc(t *testing.T) {
|
|||||||
|
|
||||||
func TestPodLimitFuncApplyDefault(t *testing.T) {
|
func TestPodLimitFuncApplyDefault(t *testing.T) {
|
||||||
limitRange := validLimitRange()
|
limitRange := validLimitRange()
|
||||||
testPod := validPod("foo", 1, getResourceRequirements(api.ResourceList{}, api.ResourceList{}))
|
testPod := validPodInit(validPod("foo", 1, getResourceRequirements(api.ResourceList{}, api.ResourceList{})), getResourceRequirements(api.ResourceList{}, api.ResourceList{}))
|
||||||
err := PodLimitFunc(&limitRange, &testPod)
|
err := PodLimitFunc(&limitRange, &testPod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error for valid pod: %v, %v", testPod.Name, err)
|
t.Errorf("Unexpected error for valid pod: %v, %v", testPod.Name, err)
|
||||||
@ -429,6 +490,27 @@ func TestPodLimitFuncApplyDefault(t *testing.T) {
|
|||||||
t.Errorf("Unexpected cpu value %s", requestCpu)
|
t.Errorf("Unexpected cpu value %s", requestCpu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i := range testPod.Spec.InitContainers {
|
||||||
|
container := testPod.Spec.InitContainers[i]
|
||||||
|
limitMemory := container.Resources.Limits.Memory().String()
|
||||||
|
limitCpu := container.Resources.Limits.Cpu().String()
|
||||||
|
requestMemory := container.Resources.Requests.Memory().String()
|
||||||
|
requestCpu := container.Resources.Requests.Cpu().String()
|
||||||
|
|
||||||
|
if limitMemory != "10Mi" {
|
||||||
|
t.Errorf("Unexpected memory value %s", limitMemory)
|
||||||
|
}
|
||||||
|
if limitCpu != "75m" {
|
||||||
|
t.Errorf("Unexpected cpu value %s", limitCpu)
|
||||||
|
}
|
||||||
|
if requestMemory != "5Mi" {
|
||||||
|
t.Errorf("Unexpected memory value %s", requestMemory)
|
||||||
|
}
|
||||||
|
if requestCpu != "50m" {
|
||||||
|
t.Errorf("Unexpected cpu value %s", requestCpu)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLimitRangerIgnoresSubresource(t *testing.T) {
|
func TestLimitRangerIgnoresSubresource(t *testing.T) {
|
||||||
|
@ -21,7 +21,7 @@ import "fmt"
|
|||||||
const (
|
const (
|
||||||
podCountResourceName string = "PodCount"
|
podCountResourceName string = "PodCount"
|
||||||
cpuResourceName string = "CPU"
|
cpuResourceName string = "CPU"
|
||||||
memoryResoureceName string = "Memory"
|
memoryResourceName string = "Memory"
|
||||||
nvidiaGpuResourceName string = "NvidiaGpu"
|
nvidiaGpuResourceName string = "NvidiaGpu"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -359,6 +359,16 @@ func getResourceRequest(pod *api.Pod) resourceRequest {
|
|||||||
result.milliCPU += requests.Cpu().MilliValue()
|
result.milliCPU += requests.Cpu().MilliValue()
|
||||||
result.nvidiaGPU += requests.NvidiaGPU().Value()
|
result.nvidiaGPU += requests.NvidiaGPU().Value()
|
||||||
}
|
}
|
||||||
|
// take max_resource(sum_pod, any_init_container)
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
requests := container.Resources.Requests
|
||||||
|
if mem := requests.Memory().Value(); mem > result.memory {
|
||||||
|
result.memory = mem
|
||||||
|
}
|
||||||
|
if cpu := requests.Cpu().MilliValue(); cpu > result.milliCPU {
|
||||||
|
result.milliCPU = cpu
|
||||||
|
}
|
||||||
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -428,7 +438,7 @@ func PodFitsResources(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, er
|
|||||||
}
|
}
|
||||||
if totalMemory < podRequest.memory+nodeInfo.RequestedResource().Memory {
|
if totalMemory < podRequest.memory+nodeInfo.RequestedResource().Memory {
|
||||||
return false,
|
return false,
|
||||||
newInsufficientResourceError(memoryResoureceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory)
|
newInsufficientResourceError(memoryResourceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory)
|
||||||
}
|
}
|
||||||
if totalNvidiaGPU < podRequest.nvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU {
|
if totalNvidiaGPU < podRequest.nvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU {
|
||||||
return false,
|
return false,
|
||||||
|
@ -111,6 +111,11 @@ func newResourcePod(usage ...resourceRequest) *api.Pod {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newResourceInitPod(pod *api.Pod, usage ...resourceRequest) *api.Pod {
|
||||||
|
pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
func TestPodFitsResources(t *testing.T) {
|
func TestPodFitsResources(t *testing.T) {
|
||||||
enoughPodsTests := []struct {
|
enoughPodsTests := []struct {
|
||||||
pod *api.Pod
|
pod *api.Pod
|
||||||
@ -135,6 +140,54 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
test: "too many resources fails",
|
test: "too many resources fails",
|
||||||
wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10),
|
wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 8, memory: 19})),
|
||||||
|
fits: false,
|
||||||
|
test: "too many resources fails due to init container cpu",
|
||||||
|
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}, resourceRequest{milliCPU: 2, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 8, memory: 19})),
|
||||||
|
fits: false,
|
||||||
|
test: "too many resources fails due to highest init container cpu",
|
||||||
|
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||||
|
fits: false,
|
||||||
|
test: "too many resources fails due to init container memory",
|
||||||
|
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}, resourceRequest{milliCPU: 1, memory: 2}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||||
|
fits: false,
|
||||||
|
test: "too many resources fails due to highest init container memory",
|
||||||
|
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||||
|
fits: true,
|
||||||
|
test: "init container fits because it's the max, not sum, of containers and init containers",
|
||||||
|
wErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}, resourceRequest{milliCPU: 1, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||||
|
fits: true,
|
||||||
|
test: "multiple init containers fit because it's the max, not sum, of containers and init containers",
|
||||||
|
wErr: nil,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
||||||
nodeInfo: schedulercache.NewNodeInfo(
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
@ -149,7 +202,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "one resources fits",
|
test: "one resources fits",
|
||||||
wErr: newInsufficientResourceError(memoryResoureceName, 2, 19, 20),
|
wErr: newInsufficientResourceError(memoryResourceName, 2, 19, 20),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
|
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
|
||||||
@ -159,6 +212,14 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
test: "equal edge case",
|
test: "equal edge case",
|
||||||
wErr: nil,
|
wErr: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 4, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||||
|
fits: true,
|
||||||
|
test: "equal edge case for init container",
|
||||||
|
wErr: nil,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range enoughPodsTests {
|
for _, test := range enoughPodsTests {
|
||||||
@ -205,6 +266,14 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
test: "even for equal edge case predicate fails when there's no space for additional pod",
|
test: "even for equal edge case predicate fails when there's no space for additional pod",
|
||||||
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||||
|
fits: false,
|
||||||
|
test: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
|
||||||
|
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, test := range notEnoughPodsTests {
|
for _, test := range notEnoughPodsTests {
|
||||||
node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1)}}
|
node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1)}}
|
||||||
|
@ -879,6 +879,109 @@ func deleteNS(c *client.Client, namespace string, timeout time.Duration) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ContainerInitInvariant(older, newer runtime.Object) error {
|
||||||
|
oldPod := older.(*api.Pod)
|
||||||
|
newPod := newer.(*api.Pod)
|
||||||
|
if len(oldPod.Spec.InitContainers) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(oldPod.Spec.InitContainers) != len(newPod.Spec.InitContainers) {
|
||||||
|
return fmt.Errorf("init container list changed")
|
||||||
|
}
|
||||||
|
if oldPod.UID != newPod.UID {
|
||||||
|
return fmt.Errorf("two different pods exist in the condition: %s vs %s", oldPod.UID, newPod.UID)
|
||||||
|
}
|
||||||
|
if err := initContainersInvariants(oldPod); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := initContainersInvariants(newPod); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
oldInit, _, _ := podInitialized(oldPod)
|
||||||
|
newInit, _, _ := podInitialized(newPod)
|
||||||
|
if oldInit && !newInit {
|
||||||
|
// TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it
|
||||||
|
// from scratch
|
||||||
|
return fmt.Errorf("pod cannot be initialized and then regress to not being initialized")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func podInitialized(pod *api.Pod) (ok bool, failed bool, err error) {
|
||||||
|
allInit := true
|
||||||
|
initFailed := false
|
||||||
|
for _, s := range pod.Status.InitContainerStatuses {
|
||||||
|
switch {
|
||||||
|
case initFailed && s.State.Waiting == nil:
|
||||||
|
return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name)
|
||||||
|
case allInit && s.State.Waiting == nil:
|
||||||
|
return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name)
|
||||||
|
case s.State.Terminated == nil:
|
||||||
|
allInit = false
|
||||||
|
case s.State.Terminated.ExitCode != 0:
|
||||||
|
allInit = false
|
||||||
|
initFailed = true
|
||||||
|
case !s.Ready:
|
||||||
|
return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return allInit, initFailed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func initContainersInvariants(pod *api.Pod) error {
|
||||||
|
allInit, initFailed, err := podInitialized(pod)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !allInit || initFailed {
|
||||||
|
for _, s := range pod.Status.ContainerStatuses {
|
||||||
|
if s.State.Waiting == nil || s.RestartCount != 0 {
|
||||||
|
return fmt.Errorf("container %s is not waiting but initialization not complete", s.Name)
|
||||||
|
}
|
||||||
|
if s.State.Waiting.Reason != "PodInitializing" {
|
||||||
|
return fmt.Errorf("container %s should have reason PodInitializing: %s", s.Name, s.State.Waiting.Reason)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, c := api.GetPodCondition(&pod.Status, api.PodInitialized)
|
||||||
|
if c == nil {
|
||||||
|
return fmt.Errorf("pod does not have initialized condition")
|
||||||
|
}
|
||||||
|
if c.LastTransitionTime.IsZero() {
|
||||||
|
return fmt.Errorf("PodInitialized condition should always have a transition time")
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case c.Status == api.ConditionUnknown:
|
||||||
|
return fmt.Errorf("PodInitialized condition should never be Unknown")
|
||||||
|
case c.Status == api.ConditionTrue && (initFailed || !allInit):
|
||||||
|
return fmt.Errorf("PodInitialized condition was True but all not all containers initialized")
|
||||||
|
case c.Status == api.ConditionFalse && (!initFailed && allInit):
|
||||||
|
return fmt.Errorf("PodInitialized condition was False but all containers initialized")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type InvariantFunc func(older, newer runtime.Object) error
|
||||||
|
|
||||||
|
func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error {
|
||||||
|
errs := sets.NewString()
|
||||||
|
for i := range events {
|
||||||
|
j := i + 1
|
||||||
|
if j >= len(events) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(events[i].Object, events[j].Object); err != nil {
|
||||||
|
errs.Insert(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if errs.Len() > 0 {
|
||||||
|
return fmt.Errorf("invariants violated:\n* %s", strings.Join(errs.List(), "\n* "))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
|
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
|
||||||
// Returns an error if timeout occurs first, or pod goes in to failed state.
|
// Returns an error if timeout occurs first, or pod goes in to failed state.
|
||||||
func WaitForPodRunningInNamespace(c *client.Client, podName string, namespace string) error {
|
func WaitForPodRunningInNamespace(c *client.Client, podName string, namespace string) error {
|
||||||
@ -2218,7 +2321,11 @@ func DumpNodeDebugInfo(c *client.Client, nodeNames []string) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, p := range podList.Items {
|
for _, p := range podList.Items {
|
||||||
Logf("%v started at %v (%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.ContainerStatuses))
|
Logf("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
|
||||||
|
for _, c := range p.Status.InitContainerStatuses {
|
||||||
|
Logf("\tInit container %v ready: %v, restart count %v",
|
||||||
|
c.Name, c.Ready, c.RestartCount)
|
||||||
|
}
|
||||||
for _, c := range p.Status.ContainerStatuses {
|
for _, c := range p.Status.ContainerStatuses {
|
||||||
Logf("\tContainer %v ready: %v, restart count %v",
|
Logf("\tContainer %v ready: %v, restart count %v",
|
||||||
c.Name, c.Ready, c.RestartCount)
|
c.Name, c.Ready, c.RestartCount)
|
||||||
|
358
test/e2e/pods.go
358
test/e2e/pods.go
@ -659,6 +659,364 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("should invoke init containers on a RestartNever pod", func() {
|
||||||
|
podClient := f.Client.Pods(f.Namespace.Name)
|
||||||
|
|
||||||
|
By("creating the pod")
|
||||||
|
name := "pod-init-" + string(util.NewUUID())
|
||||||
|
value := strconv.Itoa(time.Now().Nanosecond())
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"name": "foo",
|
||||||
|
"time": value,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
RestartPolicy: api.RestartPolicyNever,
|
||||||
|
InitContainers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "init1",
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Command: []string{"/bin/true"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "init2",
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Command: []string{"/bin/true"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "run1",
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Command: []string{"/bin/true"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
defer podClient.Delete(pod.Name, nil)
|
||||||
|
startedPod, err := podClient.Create(pod)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Error creating a pod: %v", err)
|
||||||
|
}
|
||||||
|
w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta))
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Error watching a pod: %v", err)
|
||||||
|
}
|
||||||
|
wr := watch.NewRecorder(w)
|
||||||
|
event, err := watch.Until(framework.PodStartTimeout, wr, client.PodCompleted)
|
||||||
|
Expect(err).To(BeNil())
|
||||||
|
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
|
||||||
|
endPod := event.Object.(*api.Pod)
|
||||||
|
|
||||||
|
Expect(endPod.Status.Phase).To(Equal(api.PodSucceeded))
|
||||||
|
_, init := api.GetPodCondition(&endPod.Status, api.PodInitialized)
|
||||||
|
Expect(init).NotTo(BeNil())
|
||||||
|
Expect(init.Status).To(Equal(api.ConditionTrue))
|
||||||
|
|
||||||
|
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
|
||||||
|
for _, status := range endPod.Status.InitContainerStatuses {
|
||||||
|
Expect(status.Ready).To(BeTrue())
|
||||||
|
Expect(status.State.Terminated).NotTo(BeNil())
|
||||||
|
Expect(status.State.Terminated.ExitCode).To(BeZero())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should invoke init containers on a RestartAlways pod", func() {
|
||||||
|
podClient := f.Client.Pods(f.Namespace.Name)
|
||||||
|
|
||||||
|
By("creating the pod")
|
||||||
|
name := "pod-init-" + string(util.NewUUID())
|
||||||
|
value := strconv.Itoa(time.Now().Nanosecond())
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"name": "foo",
|
||||||
|
"time": value,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
InitContainers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "init1",
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Command: []string{"/bin/true"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "init2",
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Command: []string{"/bin/true"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "run1",
|
||||||
|
Image: "gcr.io/google_containers/pause:2.0",
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Limits: api.ResourceList{
|
||||||
|
api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||||
|
api.ResourceMemory: *resource.NewQuantity(10*1024*1024, resource.DecimalSI),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
defer podClient.Delete(pod.Name, nil)
|
||||||
|
startedPod, err := podClient.Create(pod)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Error creating a pod: %v", err)
|
||||||
|
}
|
||||||
|
w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta))
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Error watching a pod: %v", err)
|
||||||
|
}
|
||||||
|
wr := watch.NewRecorder(w)
|
||||||
|
event, err := watch.Until(framework.PodStartTimeout, wr, client.PodRunning)
|
||||||
|
Expect(err).To(BeNil())
|
||||||
|
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
|
||||||
|
endPod := event.Object.(*api.Pod)
|
||||||
|
|
||||||
|
Expect(endPod.Status.Phase).To(Equal(api.PodRunning))
|
||||||
|
_, init := api.GetPodCondition(&endPod.Status, api.PodInitialized)
|
||||||
|
Expect(init).NotTo(BeNil())
|
||||||
|
Expect(init.Status).To(Equal(api.ConditionTrue))
|
||||||
|
|
||||||
|
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
|
||||||
|
for _, status := range endPod.Status.InitContainerStatuses {
|
||||||
|
Expect(status.Ready).To(BeTrue())
|
||||||
|
Expect(status.State.Terminated).NotTo(BeNil())
|
||||||
|
Expect(status.State.Terminated.ExitCode).To(BeZero())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should not start app containers if init containers fail on a RestartAlways pod", func() {
|
||||||
|
podClient := f.Client.Pods(f.Namespace.Name)
|
||||||
|
|
||||||
|
By("creating the pod")
|
||||||
|
name := "pod-init-" + string(util.NewUUID())
|
||||||
|
value := strconv.Itoa(time.Now().Nanosecond())
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"name": "foo",
|
||||||
|
"time": value,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
InitContainers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "init1",
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Command: []string{"/bin/false"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "init2",
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Command: []string{"/bin/true"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "run1",
|
||||||
|
Image: "gcr.io/google_containers/pause:2.0",
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Limits: api.ResourceList{
|
||||||
|
api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||||
|
api.ResourceMemory: *resource.NewQuantity(10*1024*1024, resource.DecimalSI),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
defer podClient.Delete(pod.Name, nil)
|
||||||
|
startedPod, err := podClient.Create(pod)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Error creating a pod: %v", err)
|
||||||
|
}
|
||||||
|
w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta))
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Error watching a pod: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
wr := watch.NewRecorder(w)
|
||||||
|
event, err := watch.Until(
|
||||||
|
framework.PodStartTimeout, wr,
|
||||||
|
// check for the first container to fail at least once
|
||||||
|
func(evt watch.Event) (bool, error) {
|
||||||
|
switch t := evt.Object.(type) {
|
||||||
|
case *api.Pod:
|
||||||
|
for _, status := range t.Status.ContainerStatuses {
|
||||||
|
if status.State.Waiting == nil {
|
||||||
|
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
|
||||||
|
}
|
||||||
|
if status.State.Waiting.Reason != "PodInitializing" {
|
||||||
|
return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(t.Status.InitContainerStatuses) != 2 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
status := t.Status.InitContainerStatuses[1]
|
||||||
|
if status.State.Waiting == nil {
|
||||||
|
return false, fmt.Errorf("second init container should not be out of waiting: %#v", status)
|
||||||
|
}
|
||||||
|
if status.State.Waiting.Reason != "PodInitializing" {
|
||||||
|
return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status)
|
||||||
|
}
|
||||||
|
status = t.Status.InitContainerStatuses[0]
|
||||||
|
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
|
||||||
|
return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status)
|
||||||
|
}
|
||||||
|
// continue until we see an attempt to restart the pod
|
||||||
|
return status.LastTerminationState.Terminated != nil, nil
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("unexpected object: %#v", t)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// verify we get two restarts
|
||||||
|
func(evt watch.Event) (bool, error) {
|
||||||
|
switch t := evt.Object.(type) {
|
||||||
|
case *api.Pod:
|
||||||
|
status := t.Status.InitContainerStatuses[0]
|
||||||
|
if status.RestartCount < 3 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
framework.Logf("init container has failed twice: %#v", t)
|
||||||
|
// TODO: more conditions
|
||||||
|
return true, nil
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("unexpected object: %#v", t)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
Expect(err).To(BeNil())
|
||||||
|
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
|
||||||
|
endPod := event.Object.(*api.Pod)
|
||||||
|
|
||||||
|
Expect(endPod.Status.Phase).To(Equal(api.PodPending))
|
||||||
|
_, init := api.GetPodCondition(&endPod.Status, api.PodInitialized)
|
||||||
|
Expect(init).NotTo(BeNil())
|
||||||
|
Expect(init.Status).To(Equal(api.ConditionFalse))
|
||||||
|
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
|
||||||
|
Expect(init.Message).To(Equal("containers with incomplete status: [init1 init2]"))
|
||||||
|
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
|
||||||
|
podClient := f.Client.Pods(f.Namespace.Name)
|
||||||
|
|
||||||
|
By("creating the pod")
|
||||||
|
name := "pod-init-" + string(util.NewUUID())
|
||||||
|
value := strconv.Itoa(time.Now().Nanosecond())
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"name": "foo",
|
||||||
|
"time": value,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
RestartPolicy: api.RestartPolicyNever,
|
||||||
|
InitContainers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "init1",
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Command: []string{"/bin/true"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "init2",
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Command: []string{"/bin/false"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "run1",
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Command: []string{"/bin/true"},
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Limits: api.ResourceList{
|
||||||
|
api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||||
|
api.ResourceMemory: *resource.NewQuantity(10*1024*1024, resource.DecimalSI),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
defer podClient.Delete(pod.Name, nil)
|
||||||
|
startedPod, err := podClient.Create(pod)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Error creating a pod: %v", err)
|
||||||
|
}
|
||||||
|
w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta))
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Error watching a pod: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
wr := watch.NewRecorder(w)
|
||||||
|
event, err := watch.Until(
|
||||||
|
framework.PodStartTimeout, wr,
|
||||||
|
// check for the second container to fail at least once
|
||||||
|
func(evt watch.Event) (bool, error) {
|
||||||
|
switch t := evt.Object.(type) {
|
||||||
|
case *api.Pod:
|
||||||
|
for _, status := range t.Status.ContainerStatuses {
|
||||||
|
if status.State.Waiting == nil {
|
||||||
|
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
|
||||||
|
}
|
||||||
|
if status.State.Waiting.Reason != "PodInitializing" {
|
||||||
|
return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(t.Status.InitContainerStatuses) != 2 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
status := t.Status.InitContainerStatuses[0]
|
||||||
|
if status.State.Terminated == nil {
|
||||||
|
if status.State.Waiting != nil && status.State.Waiting.Reason != "PodInitializing" {
|
||||||
|
return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status)
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if status.State.Terminated != nil && status.State.Terminated.ExitCode != 0 {
|
||||||
|
return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status)
|
||||||
|
}
|
||||||
|
status = t.Status.InitContainerStatuses[1]
|
||||||
|
if status.State.Terminated == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if status.State.Terminated.ExitCode == 0 {
|
||||||
|
return false, fmt.Errorf("second init container should have failed: %#v", status)
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("unexpected object: %#v", t)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
client.PodCompleted,
|
||||||
|
)
|
||||||
|
Expect(err).To(BeNil())
|
||||||
|
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
|
||||||
|
endPod := event.Object.(*api.Pod)
|
||||||
|
|
||||||
|
Expect(endPod.Status.Phase).To(Equal(api.PodFailed))
|
||||||
|
_, init := api.GetPodCondition(&endPod.Status, api.PodInitialized)
|
||||||
|
Expect(init).NotTo(BeNil())
|
||||||
|
Expect(init.Status).To(Equal(api.ConditionFalse))
|
||||||
|
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
|
||||||
|
Expect(init.Message).To(Equal("containers with incomplete status: [init2]"))
|
||||||
|
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
|
||||||
|
Expect(endPod.Status.ContainerStatuses[0].State.Waiting).ToNot(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
It("should be restarted with a docker exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
|
It("should be restarted with a docker exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
|
||||||
runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
|
runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Loading…
Reference in New Issue
Block a user