mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Kubelet decides podStatus
This commit is contained in:
parent
c1ca07338f
commit
f762e062f2
@ -1036,10 +1036,6 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to get pod with name %q and uid %q info, health checks may be invalid", podFullName, uid)
|
||||
}
|
||||
netInfo, found := podStatus.Info[dockertools.PodInfraContainerName]
|
||||
if found {
|
||||
podStatus.PodIP = netInfo.PodIP
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
expectedHash := dockertools.HashContainer(&container)
|
||||
@ -1426,20 +1422,90 @@ func (kl *Kubelet) GetPodByName(namespace, name string) (*api.BoundPod, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getPhase returns the phase of a pod given its container info.
|
||||
func getPhase(spec *api.PodSpec, info api.PodInfo) api.PodPhase {
|
||||
if info == nil {
|
||||
return api.PodPending
|
||||
}
|
||||
|
||||
running := 0
|
||||
waiting := 0
|
||||
stopped := 0
|
||||
failed := 0
|
||||
succeeded := 0
|
||||
unknown := 0
|
||||
for _, container := range spec.Containers {
|
||||
if containerStatus, ok := info[container.Name]; ok {
|
||||
if containerStatus.State.Running != nil {
|
||||
running++
|
||||
} else if containerStatus.State.Termination != nil {
|
||||
stopped++
|
||||
if containerStatus.State.Termination.ExitCode == 0 {
|
||||
succeeded++
|
||||
} else {
|
||||
failed++
|
||||
}
|
||||
} else if containerStatus.State.Waiting != nil {
|
||||
waiting++
|
||||
} else {
|
||||
unknown++
|
||||
}
|
||||
} else {
|
||||
unknown++
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case waiting > 0:
|
||||
// One or more containers has not been started
|
||||
return api.PodPending
|
||||
case running > 0 && unknown == 0:
|
||||
// All containers have been started, and at least
|
||||
// one container is running
|
||||
return api.PodRunning
|
||||
case running == 0 && stopped > 0 && unknown == 0:
|
||||
// All containers are terminated
|
||||
if spec.RestartPolicy.Always != nil {
|
||||
// All containers are in the process of restarting
|
||||
return api.PodRunning
|
||||
}
|
||||
if stopped == succeeded {
|
||||
// RestartPolicy is not Always, and all
|
||||
// containers are terminated in success
|
||||
return api.PodSucceeded
|
||||
}
|
||||
if spec.RestartPolicy.Never != nil {
|
||||
// RestartPolicy is Never, and all containers are
|
||||
// terminated with at least one in failure
|
||||
return api.PodFailed
|
||||
}
|
||||
// RestartPolicy is OnFailure, and at least one in failure
|
||||
// and in the process of restarting
|
||||
return api.PodRunning
|
||||
default:
|
||||
return api.PodPending
|
||||
}
|
||||
}
|
||||
|
||||
// GetPodStatus returns information from Docker about the containers in a pod
|
||||
func (kl *Kubelet) GetPodStatus(podFullName string, uid types.UID) (api.PodStatus, error) {
|
||||
var manifest api.PodSpec
|
||||
var spec api.PodSpec
|
||||
for _, pod := range kl.pods {
|
||||
if GetPodFullName(&pod) == podFullName {
|
||||
manifest = pod.Spec
|
||||
spec = pod.Spec
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
info, err := dockertools.GetDockerPodInfo(kl.dockerClient, manifest, podFullName, uid)
|
||||
info, err := dockertools.GetDockerPodInfo(kl.dockerClient, spec, podFullName, uid)
|
||||
|
||||
// TODO(dchen1107): Determine PodPhase here
|
||||
var podStatus api.PodStatus
|
||||
podStatus.Phase = getPhase(&spec, info)
|
||||
netContainerInfo, found := info[dockertools.PodInfraContainerName]
|
||||
if found {
|
||||
podStatus.PodIP = netContainerInfo.PodIP
|
||||
}
|
||||
|
||||
// TODO(dchen1107): Change Info to list from map
|
||||
podStatus.Info = info
|
||||
|
||||
return podStatus, err
|
||||
|
@ -2240,3 +2240,322 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodPhaseWithRestartAlways(t *testing.T) {
|
||||
desiredState := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "containerA"},
|
||||
{Name: "containerB"},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
|
||||
}
|
||||
currentState := api.PodStatus{
|
||||
Host: "machine",
|
||||
}
|
||||
runningState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Running: &api.ContainerStateRunning{},
|
||||
},
|
||||
}
|
||||
stoppedState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Termination: &api.ContainerStateTerminated{},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
status api.PodPhase
|
||||
test string
|
||||
}{
|
||||
{&api.Pod{Spec: desiredState, Status: currentState}, api.PodPending, "waiting"},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"all running",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": stoppedState,
|
||||
"containerB": stoppedState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"all stopped with restart always",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": stoppedState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"mixed state #1 with restart always",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodPending,
|
||||
"mixed state #2 with restart always",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if status := getPhase(&test.pod.Spec, test.pod.Status.Info); status != test.status {
|
||||
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodPhaseWithRestartNever(t *testing.T) {
|
||||
desiredState := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "containerA"},
|
||||
{Name: "containerB"},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicy{Never: &api.RestartPolicyNever{}},
|
||||
}
|
||||
currentState := api.PodStatus{
|
||||
Host: "machine",
|
||||
}
|
||||
runningState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Running: &api.ContainerStateRunning{},
|
||||
},
|
||||
}
|
||||
succeededState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Termination: &api.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
failedState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Termination: &api.ContainerStateTerminated{
|
||||
ExitCode: -1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
status api.PodPhase
|
||||
test string
|
||||
}{
|
||||
{&api.Pod{Spec: desiredState, Status: currentState}, api.PodPending, "waiting"},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"all running with restart never",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": succeededState,
|
||||
"containerB": succeededState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodSucceeded,
|
||||
"all succeeded with restart never",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": failedState,
|
||||
"containerB": failedState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodFailed,
|
||||
"all failed with restart never",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": succeededState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"mixed state #1 with restart never",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodPending,
|
||||
"mixed state #2 with restart never",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if status := getPhase(&test.pod.Spec, test.pod.Status.Info); status != test.status {
|
||||
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodPhaseWithRestartOnFailure(t *testing.T) {
|
||||
desiredState := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "containerA"},
|
||||
{Name: "containerB"},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicy{OnFailure: &api.RestartPolicyOnFailure{}},
|
||||
}
|
||||
currentState := api.PodStatus{
|
||||
Host: "machine",
|
||||
}
|
||||
runningState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Running: &api.ContainerStateRunning{},
|
||||
},
|
||||
}
|
||||
succeededState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Termination: &api.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
failedState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Termination: &api.ContainerStateTerminated{
|
||||
ExitCode: -1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
status api.PodPhase
|
||||
test string
|
||||
}{
|
||||
{&api.Pod{Spec: desiredState, Status: currentState}, api.PodPending, "waiting"},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"all running with restart onfailure",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": succeededState,
|
||||
"containerB": succeededState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodSucceeded,
|
||||
"all succeeded with restart onfailure",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": failedState,
|
||||
"containerB": failedState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"all failed with restart never",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": succeededState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"mixed state #1 with restart onfailure",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodPending,
|
||||
"mixed state #2 with restart onfailure",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if status := getPhase(&test.pod.Spec, test.pod.Status.Info); status != test.status {
|
||||
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/leaky"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod"
|
||||
|
||||
@ -187,11 +186,13 @@ func (p *PodCache) computePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
||||
newStatus.Phase = api.PodUnknown
|
||||
} else {
|
||||
newStatus.Info = result.Status.Info
|
||||
newStatus.Phase = getPhase(&pod.Spec, newStatus.Info)
|
||||
if netContainerInfo, ok := newStatus.Info[leaky.PodInfraContainerName]; ok {
|
||||
if netContainerInfo.PodIP != "" {
|
||||
newStatus.PodIP = netContainerInfo.PodIP
|
||||
}
|
||||
newStatus.PodIP = result.Status.PodIP
|
||||
if newStatus.Info == nil {
|
||||
// There is a small race window, kubelet just has PodSpec, but
|
||||
// couldn't retrieve any ContainerStatus.
|
||||
newStatus.Phase = api.PodPending
|
||||
} else {
|
||||
newStatus.Phase = result.Status.Phase
|
||||
}
|
||||
}
|
||||
return newStatus, err
|
||||
@ -246,67 +247,3 @@ func (p *PodCache) UpdateAllContainers() {
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// getPhase returns the phase of a pod given its container info.
|
||||
// TODO(dchen1107): push this all the way down into kubelet.
|
||||
func getPhase(spec *api.PodSpec, info api.PodInfo) api.PodPhase {
|
||||
if info == nil {
|
||||
return api.PodPending
|
||||
}
|
||||
running := 0
|
||||
waiting := 0
|
||||
stopped := 0
|
||||
failed := 0
|
||||
succeeded := 0
|
||||
unknown := 0
|
||||
for _, container := range spec.Containers {
|
||||
if containerStatus, ok := info[container.Name]; ok {
|
||||
if containerStatus.State.Running != nil {
|
||||
running++
|
||||
} else if containerStatus.State.Termination != nil {
|
||||
stopped++
|
||||
if containerStatus.State.Termination.ExitCode == 0 {
|
||||
succeeded++
|
||||
} else {
|
||||
failed++
|
||||
}
|
||||
} else if containerStatus.State.Waiting != nil {
|
||||
waiting++
|
||||
} else {
|
||||
unknown++
|
||||
}
|
||||
} else {
|
||||
unknown++
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case waiting > 0:
|
||||
// One or more containers has not been started
|
||||
return api.PodPending
|
||||
case running > 0 && unknown == 0:
|
||||
// All containers have been started, and at least
|
||||
// one container is running
|
||||
return api.PodRunning
|
||||
case running == 0 && stopped > 0 && unknown == 0:
|
||||
// All containers are terminated
|
||||
if spec.RestartPolicy.Always != nil {
|
||||
// All containers are in the process of restarting
|
||||
return api.PodRunning
|
||||
}
|
||||
if stopped == succeeded {
|
||||
// RestartPolicy is not Always, and all
|
||||
// containers are terminated in success
|
||||
return api.PodSucceeded
|
||||
}
|
||||
if spec.RestartPolicy.Never != nil {
|
||||
// RestartPolicy is Never, and all containers are
|
||||
// terminated with at least one in failure
|
||||
return api.PodFailed
|
||||
}
|
||||
// RestartPolicy is OnFailure, and at least one in failure
|
||||
// and in the process of restarting
|
||||
return api.PodRunning
|
||||
default:
|
||||
return api.PodPending
|
||||
}
|
||||
}
|
||||
|
@ -20,13 +20,11 @@ import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/leaky"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/registrytest"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
type podInfoCall struct {
|
||||
@ -175,17 +173,6 @@ func TestPodCacheGetMissing(t *testing.T) {
|
||||
if status == nil {
|
||||
t.Errorf("Unexpected non-status.")
|
||||
}
|
||||
expected := &api.PodStatus{
|
||||
Phase: "Pending",
|
||||
Host: "machine",
|
||||
HostIP: "1.2.3.5",
|
||||
Info: api.PodInfo{
|
||||
"bar": api.ContainerStatus{},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(status, expected) {
|
||||
t.Errorf("expected:\n%#v\ngot:\n%#v\n", expected, status)
|
||||
}
|
||||
}
|
||||
|
||||
type podCacheTestConfig struct {
|
||||
@ -337,43 +324,6 @@ func TestFillPodStatusMissingMachine(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillPodStatus(t *testing.T) {
|
||||
pod := makePod(api.NamespaceDefault, "foo", "machine", "bar")
|
||||
expectedIP := "1.2.3.4"
|
||||
expectedTime, _ := time.Parse("2013-Feb-03", "2013-Feb-03")
|
||||
config := podCacheTestConfig{
|
||||
kubeletContainerInfo: api.PodStatus{
|
||||
Phase: api.PodPending,
|
||||
Host: "machine",
|
||||
HostIP: "ip of machine",
|
||||
PodIP: expectedIP,
|
||||
Info: api.PodInfo{
|
||||
leaky.PodInfraContainerName: {
|
||||
State: api.ContainerState{
|
||||
Running: &api.ContainerStateRunning{
|
||||
StartedAt: util.NewTime(expectedTime),
|
||||
},
|
||||
},
|
||||
RestartCount: 1,
|
||||
PodIP: expectedIP,
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []api.Node{*makeHealthyNode("machine", "ip of machine")},
|
||||
pods: []api.Pod{*pod},
|
||||
}
|
||||
cache := config.Construct()
|
||||
err := cache.updatePodStatus(&config.pods[0])
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %+v", err)
|
||||
}
|
||||
|
||||
status, err := cache.GetPodStatus(pod.Namespace, pod.Name)
|
||||
if e, a := &config.kubeletContainerInfo, status; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Expected: %+v, Got %+v", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillPodInfoNoData(t *testing.T) {
|
||||
pod := makePod(api.NamespaceDefault, "foo", "machine", "bar")
|
||||
expectedIP := ""
|
||||
@ -520,325 +470,6 @@ func TestPodPhaseWithBadNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodPhaseWithRestartAlways(t *testing.T) {
|
||||
desiredState := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "containerA"},
|
||||
{Name: "containerB"},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
|
||||
}
|
||||
currentState := api.PodStatus{
|
||||
Host: "machine",
|
||||
}
|
||||
runningState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Running: &api.ContainerStateRunning{},
|
||||
},
|
||||
}
|
||||
stoppedState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Termination: &api.ContainerStateTerminated{},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
status api.PodPhase
|
||||
test string
|
||||
}{
|
||||
{&api.Pod{Spec: desiredState, Status: currentState}, api.PodPending, "waiting"},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"all running",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": stoppedState,
|
||||
"containerB": stoppedState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"all stopped with restart always",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": stoppedState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"mixed state #1 with restart always",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodPending,
|
||||
"mixed state #2 with restart always",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if status := getPhase(&test.pod.Spec, test.pod.Status.Info); status != test.status {
|
||||
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodPhaseWithRestartNever(t *testing.T) {
|
||||
desiredState := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "containerA"},
|
||||
{Name: "containerB"},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicy{Never: &api.RestartPolicyNever{}},
|
||||
}
|
||||
currentState := api.PodStatus{
|
||||
Host: "machine",
|
||||
}
|
||||
runningState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Running: &api.ContainerStateRunning{},
|
||||
},
|
||||
}
|
||||
succeededState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Termination: &api.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
failedState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Termination: &api.ContainerStateTerminated{
|
||||
ExitCode: -1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
status api.PodPhase
|
||||
test string
|
||||
}{
|
||||
{&api.Pod{Spec: desiredState, Status: currentState}, api.PodPending, "waiting"},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"all running with restart never",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": succeededState,
|
||||
"containerB": succeededState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodSucceeded,
|
||||
"all succeeded with restart never",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": failedState,
|
||||
"containerB": failedState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodFailed,
|
||||
"all failed with restart never",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": succeededState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"mixed state #1 with restart never",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodPending,
|
||||
"mixed state #2 with restart never",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if status := getPhase(&test.pod.Spec, test.pod.Status.Info); status != test.status {
|
||||
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodPhaseWithRestartOnFailure(t *testing.T) {
|
||||
desiredState := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "containerA"},
|
||||
{Name: "containerB"},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicy{OnFailure: &api.RestartPolicyOnFailure{}},
|
||||
}
|
||||
currentState := api.PodStatus{
|
||||
Host: "machine",
|
||||
}
|
||||
runningState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Running: &api.ContainerStateRunning{},
|
||||
},
|
||||
}
|
||||
succeededState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Termination: &api.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
failedState := api.ContainerStatus{
|
||||
State: api.ContainerState{
|
||||
Termination: &api.ContainerStateTerminated{
|
||||
ExitCode: -1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
status api.PodPhase
|
||||
test string
|
||||
}{
|
||||
{&api.Pod{Spec: desiredState, Status: currentState}, api.PodPending, "waiting"},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"all running with restart onfailure",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": succeededState,
|
||||
"containerB": succeededState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodSucceeded,
|
||||
"all succeeded with restart onfailure",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": failedState,
|
||||
"containerB": failedState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"all failed with restart never",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
"containerB": succeededState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodRunning,
|
||||
"mixed state #1 with restart onfailure",
|
||||
},
|
||||
{
|
||||
&api.Pod{
|
||||
Spec: desiredState,
|
||||
Status: api.PodStatus{
|
||||
Info: map[string]api.ContainerStatus{
|
||||
"containerA": runningState,
|
||||
},
|
||||
Host: "machine",
|
||||
},
|
||||
},
|
||||
api.PodPending,
|
||||
"mixed state #2 with restart onfailure",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if status := getPhase(&test.pod.Spec, test.pod.Status.Info); status != test.status {
|
||||
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGarbageCollection(t *testing.T) {
|
||||
pod1 := makePod(api.NamespaceDefault, "foo", "machine", "bar")
|
||||
pod2 := makePod(api.NamespaceDefault, "baz", "machine", "qux")
|
||||
|
Loading…
Reference in New Issue
Block a user