Make pod listing costant-time

* move ip cache out of registry/pod
* combine, rationalize, and move pod status logic
* Fix unit and integration tests
This commit is contained in:
Daniel Smith
2014-12-20 18:49:10 -08:00
parent 9b6aec5e22
commit 5b8e91595a
8 changed files with 1057 additions and 872 deletions

View File

@@ -18,72 +18,37 @@ package pod
import (
"fmt"
"sync"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
"github.com/golang/glog"
)
type ipCacheEntry struct {
ip string
lastUpdate time.Time
}
type ipCache map[string]ipCacheEntry
type clock interface {
Now() time.Time
}
type realClock struct{}
func (r realClock) Now() time.Time {
return time.Now()
type PodStatusGetter interface {
GetPodStatus(namespace, name string) (*api.PodStatus, error)
}
// REST implements the RESTStorage interface in terms of a PodRegistry.
type REST struct {
cloudProvider cloudprovider.Interface
mu sync.Mutex
podCache client.PodInfoGetter
podInfoGetter client.PodInfoGetter
podPollPeriod time.Duration
registry Registry
nodes client.NodeInterface
ipCache ipCache
clock clock
podCache PodStatusGetter
registry Registry
}
type RESTConfig struct {
CloudProvider cloudprovider.Interface
PodCache client.PodInfoGetter
PodInfoGetter client.PodInfoGetter
Registry Registry
Nodes client.NodeInterface
PodCache PodStatusGetter
Registry Registry
}
// NewREST returns a new REST.
func NewREST(config *RESTConfig) *REST {
return &REST{
cloudProvider: config.CloudProvider,
podCache: config.PodCache,
podInfoGetter: config.PodInfoGetter,
podPollPeriod: time.Second * 10,
registry: config.Registry,
nodes: config.Nodes,
ipCache: ipCache{},
clock: realClock{},
podCache: config.PodCache,
registry: config.Registry,
}
}
@@ -123,17 +88,17 @@ func (rs *REST) Get(ctx api.Context, id string) (runtime.Object, error) {
if pod == nil {
return pod, nil
}
if rs.podCache != nil || rs.podInfoGetter != nil {
rs.fillPodInfo(pod)
status, err := getPodStatus(pod, rs.nodes)
if err != nil {
return pod, err
host := pod.Status.Host
if status, err := rs.podCache.GetPodStatus(pod.Namespace, pod.Name); err != nil {
pod.Status = api.PodStatus{
Phase: api.PodUnknown,
}
pod.Status.Phase = status
}
if pod.Status.Host != "" {
pod.Status.HostIP = rs.getInstanceIP(pod.Status.Host)
} else {
pod.Status = *status
}
// Make sure not to hide a recent host with an old one from the cache.
// TODO: move host to spec
pod.Status.Host = host
return pod, err
}
@@ -168,15 +133,18 @@ func (rs *REST) List(ctx api.Context, label, field labels.Selector) (runtime.Obj
if err == nil {
for i := range pods.Items {
pod := &pods.Items[i]
rs.fillPodInfo(pod)
status, err := getPodStatus(pod, rs.nodes)
if err != nil {
status = api.PodUnknown
}
pod.Status.Phase = status
if pod.Status.Host != "" {
pod.Status.HostIP = rs.getInstanceIP(pod.Status.Host)
host := pod.Status.Host
if status, err := rs.podCache.GetPodStatus(pod.Namespace, pod.Name); err != nil {
pod.Status = api.PodStatus{
Phase: api.PodUnknown,
}
} else {
pod.Status = *status
}
// Make sure not to hide a recent host with an old one from the cache.
// This is tested by the integration test.
// TODO: move host to spec
pod.Status.Host = host
}
}
return pods, err
@@ -207,148 +175,3 @@ func (rs *REST) Update(ctx api.Context, obj runtime.Object) (<-chan apiserver.RE
return rs.registry.GetPod(ctx, pod.Name)
}), nil
}
func (rs *REST) fillPodInfo(pod *api.Pod) {
if pod.Status.Host == "" {
return
}
// Get cached info for the list currently.
// TODO: Optionally use fresh info
if rs.podCache != nil {
info, err := rs.podCache.GetPodInfo(pod.Status.Host, pod.Namespace, pod.Name)
if err != nil {
if err != client.ErrPodInfoNotAvailable {
glog.Errorf("Error getting container info from cache: %v", err)
}
if rs.podInfoGetter != nil {
info, err = rs.podInfoGetter.GetPodInfo(pod.Status.Host, pod.Namespace, pod.Name)
}
if err != nil {
if err != client.ErrPodInfoNotAvailable {
glog.Errorf("Error getting fresh container info: %v", err)
}
return
}
}
pod.Status.Info = info.ContainerInfo
netContainerInfo, ok := pod.Status.Info["net"]
if ok {
if netContainerInfo.PodIP != "" {
pod.Status.PodIP = netContainerInfo.PodIP
} else if netContainerInfo.State.Running != nil {
glog.Warningf("No network settings: %#v", netContainerInfo)
}
} else {
glog.Warningf("Couldn't find network container for %s in %v", pod.Name, info)
}
}
}
func (rs *REST) getInstanceIP(host string) string {
data, ok := rs.ipCache[host]
now := rs.clock.Now()
if !ok || now.Sub(data.lastUpdate) > (30*time.Second) {
ip := getInstanceIPFromCloud(rs.cloudProvider, host)
data = ipCacheEntry{
ip: ip,
lastUpdate: now,
}
rs.ipCache[host] = data
}
return data.ip
}
func getInstanceIPFromCloud(cloud cloudprovider.Interface, host string) string {
if cloud == nil {
return ""
}
instances, ok := cloud.Instances()
if instances == nil || !ok {
return ""
}
addr, err := instances.IPAddress(host)
if err != nil {
glog.Errorf("Error getting instance IP for %q: %v", host, err)
return ""
}
return addr.String()
}
func getPodStatus(pod *api.Pod, nodes client.NodeInterface) (api.PodPhase, error) {
if pod.Status.Host == "" {
return api.PodPending, nil
}
if nodes != nil {
_, err := nodes.Get(pod.Status.Host)
if err != nil {
if errors.IsNotFound(err) {
return api.PodFailed, nil
}
glog.Errorf("Error getting pod info: %v", err)
return api.PodUnknown, nil
}
} else {
glog.Errorf("Unexpected missing minion interface, status may be in-accurate")
}
if pod.Status.Info == nil {
return api.PodPending, nil
}
// TODO(dchen1107): move the entire logic to kubelet?
running := 0
waiting := 0
stopped := 0
failed := 0
succeeded := 0
unknown := 0
for _, container := range pod.Spec.Containers {
if containerStatus, ok := pod.Status.Info[container.Name]; ok {
if containerStatus.State.Running != nil {
running++
} else if containerStatus.State.Termination != nil {
stopped++
if containerStatus.State.Termination.ExitCode == 0 {
succeeded++
} else {
failed++
}
} else if containerStatus.State.Waiting != nil {
waiting++
} else {
unknown++
}
} else {
unknown++
}
}
switch {
case waiting > 0:
// One or more containers has not been started
return api.PodPending, nil
case running > 0 && unknown == 0:
// All containers have been started, and at least
// one container is running
return api.PodRunning, nil
case running == 0 && stopped > 0 && unknown == 0:
// All containers are terminated
if pod.Spec.RestartPolicy.Always != nil {
// All containers are in the process of restarting
return api.PodRunning, nil
}
if stopped == succeeded {
// RestartPolicy is not Always, and all
// containers are terminated in success
return api.PodSucceeded, nil
}
if pod.Spec.RestartPolicy.Never != nil {
// RestartPolicy is Never, and all containers are
// terminated with at least one in failure
return api.PodFailed, nil
}
// RestartPolicy is OnFailure, and at least one in failure
// and in the process of restarting
return api.PodRunning, nil
default:
return api.PodPending, nil
}
}

View File

@@ -28,12 +28,25 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
fake_cloud "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/fake"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/registrytest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
type fakeCache struct {
requestedNamespace string
requestedName string
statusToReturn *api.PodStatus
errorToReturn error
}
func (f *fakeCache) GetPodStatus(namespace, name string) (*api.PodStatus, error) {
f.requestedNamespace = namespace
f.requestedName = name
return f.statusToReturn, f.errorToReturn
}
func expectApiStatusError(t *testing.T, ch <-chan apiserver.RESTResult, msg string) {
out := <-ch
status, ok := out.Object.(*api.Status)
@@ -61,6 +74,7 @@ func TestCreatePodRegistryError(t *testing.T) {
podRegistry.Err = fmt.Errorf("test error")
storage := REST{
registry: podRegistry,
podCache: &fakeCache{statusToReturn: &api.PodStatus{}},
}
pod := &api.Pod{}
ctx := api.NewDefaultContext()
@@ -76,6 +90,7 @@ func TestCreatePodSetsIds(t *testing.T) {
podRegistry.Err = fmt.Errorf("test error")
storage := REST{
registry: podRegistry,
podCache: &fakeCache{statusToReturn: &api.PodStatus{}},
}
pod := &api.Pod{}
ctx := api.NewDefaultContext()
@@ -98,6 +113,7 @@ func TestCreatePodSetsUID(t *testing.T) {
podRegistry.Err = fmt.Errorf("test error")
storage := REST{
registry: podRegistry,
podCache: &fakeCache{statusToReturn: &api.PodStatus{}},
}
pod := &api.Pod{}
ctx := api.NewDefaultContext()
@@ -117,6 +133,7 @@ func TestListPodsError(t *testing.T) {
podRegistry.Err = fmt.Errorf("test error")
storage := REST{
registry: podRegistry,
podCache: &fakeCache{statusToReturn: &api.PodStatus{}},
}
ctx := api.NewContext()
pods, err := storage.List(ctx, labels.Everything(), labels.Everything())
@@ -128,10 +145,40 @@ func TestListPodsError(t *testing.T) {
}
}
func TestListPodsCacheError(t *testing.T) {
podRegistry := registrytest.NewPodRegistry(nil)
podRegistry.Pods = &api.PodList{
Items: []api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
},
},
}
storage := REST{
registry: podRegistry,
podCache: &fakeCache{errorToReturn: client.ErrPodInfoNotAvailable},
}
ctx := api.NewContext()
pods, err := storage.List(ctx, labels.Everything(), labels.Everything())
if err != nil {
t.Fatalf("Expected no error, got %#v", err)
}
pl := pods.(*api.PodList)
if len(pl.Items) != 1 {
t.Fatalf("Unexpected 0-len pod list: %+v", pl)
}
if e, a := api.PodUnknown, pl.Items[0].Status.Phase; e != a {
t.Errorf("Expected %v, got %v", e, a)
}
}
func TestListEmptyPodList(t *testing.T) {
podRegistry := registrytest.NewPodRegistry(&api.PodList{ListMeta: api.ListMeta{ResourceVersion: "1"}})
storage := REST{
registry: podRegistry,
podCache: &fakeCache{statusToReturn: &api.PodStatus{}},
}
ctx := api.NewContext()
pods, err := storage.List(ctx, labels.Everything(), labels.Everything())
@@ -147,14 +194,6 @@ func TestListEmptyPodList(t *testing.T) {
}
}
type fakeClock struct {
t time.Time
}
func (f *fakeClock) Now() time.Time {
return f.t
}
func TestListPodList(t *testing.T) {
podRegistry := registrytest.NewPodRegistry(nil)
podRegistry.Pods = &api.PodList{
@@ -173,8 +212,7 @@ func TestListPodList(t *testing.T) {
}
storage := REST{
registry: podRegistry,
ipCache: ipCache{},
clock: &fakeClock{},
podCache: &fakeCache{statusToReturn: &api.PodStatus{Phase: api.PodRunning}},
}
ctx := api.NewContext()
podsObj, err := storage.List(ctx, labels.Everything(), labels.Everything())
@@ -186,7 +224,7 @@ func TestListPodList(t *testing.T) {
if len(pods.Items) != 2 {
t.Errorf("Unexpected pod list: %#v", pods)
}
if pods.Items[0].Name != "foo" {
if pods.Items[0].Name != "foo" || pods.Items[0].Status.Phase != api.PodRunning {
t.Errorf("Unexpected pod: %#v", pods.Items[0])
}
if pods.Items[1].Name != "bar" {
@@ -218,8 +256,7 @@ func TestListPodListSelection(t *testing.T) {
}
storage := REST{
registry: podRegistry,
ipCache: ipCache{},
clock: &fakeClock{},
podCache: &fakeCache{statusToReturn: &api.PodStatus{}},
}
ctx := api.NewContext()
@@ -283,6 +320,7 @@ func TestPodDecode(t *testing.T) {
podRegistry := registrytest.NewPodRegistry(nil)
storage := REST{
registry: podRegistry,
podCache: &fakeCache{statusToReturn: &api.PodStatus{}},
}
expected := &api.Pod{
ObjectMeta: api.ObjectMeta{
@@ -305,12 +343,37 @@ func TestPodDecode(t *testing.T) {
}
func TestGetPod(t *testing.T) {
podRegistry := registrytest.NewPodRegistry(nil)
podRegistry.Pod = &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "foo"},
Status: api.PodStatus{Host: "machine"},
}
storage := REST{
registry: podRegistry,
podCache: &fakeCache{statusToReturn: &api.PodStatus{Phase: api.PodRunning}},
}
ctx := api.NewContext()
obj, err := storage.Get(ctx, "foo")
pod := obj.(*api.Pod)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
expect := *podRegistry.Pod
expect.Status.Phase = api.PodRunning
// TODO: when host is moved to spec, remove this line.
expect.Status.Host = "machine"
if e, a := &expect, pod; !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected pod. Expected %#v, Got %#v", e, a)
}
}
func TestGetPodCacheError(t *testing.T) {
podRegistry := registrytest.NewPodRegistry(nil)
podRegistry.Pod = &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}
storage := REST{
registry: podRegistry,
ipCache: ipCache{},
clock: &fakeClock{},
podCache: &fakeCache{errorToReturn: client.ErrPodInfoNotAvailable},
}
ctx := api.NewContext()
obj, err := storage.Get(ctx, "foo")
@@ -319,497 +382,19 @@ func TestGetPod(t *testing.T) {
t.Errorf("unexpected error: %v", err)
}
if e, a := podRegistry.Pod, pod; !reflect.DeepEqual(e, a) {
expect := *podRegistry.Pod
expect.Status.Phase = api.PodUnknown
if e, a := &expect, pod; !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected pod. Expected %#v, Got %#v", e, a)
}
}
func TestGetPodCloud(t *testing.T) {
fakeCloud := &fake_cloud.FakeCloud{}
podRegistry := registrytest.NewPodRegistry(nil)
podRegistry.Pod = &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}, Status: api.PodStatus{Host: "machine"}}
clock := &fakeClock{t: time.Now()}
storage := REST{
registry: podRegistry,
cloudProvider: fakeCloud,
ipCache: ipCache{},
clock: clock,
}
ctx := api.NewContext()
obj, err := storage.Get(ctx, "foo")
pod := obj.(*api.Pod)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if e, a := podRegistry.Pod, pod; !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected pod. Expected %#v, Got %#v", e, a)
}
// This call should hit the cache, so we expect no additional calls to the cloud
obj, err = storage.Get(ctx, "foo")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(fakeCloud.Calls) != 1 || fakeCloud.Calls[0] != "ip-address" {
t.Errorf("Unexpected calls: %#v", fakeCloud.Calls)
}
// Advance the clock, this call should miss the cache, so expect one more call.
clock.t = clock.t.Add(60 * time.Second)
obj, err = storage.Get(ctx, "foo")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(fakeCloud.Calls) != 2 || fakeCloud.Calls[1] != "ip-address" {
t.Errorf("Unexpected calls: %#v", fakeCloud.Calls)
}
}
func TestPodStatusWithBadNode(t *testing.T) {
fakeClient := client.Fake{
MinionsList: api.NodeList{
Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: "machine"},
},
},
},
}
desiredState := api.PodSpec{
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
}
runningState := api.ContainerStatus{
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
stoppedState := api.ContainerStatus{
State: api.ContainerState{
Termination: &api.ContainerStateTerminated{},
},
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Host: "machine-2",
},
},
api.PodFailed,
"no info, but bad machine",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": runningState,
"containerB": runningState,
},
Host: "machine-two",
},
},
api.PodFailed,
"all running but minion is missing",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": stoppedState,
"containerB": stoppedState,
},
Host: "machine-two",
},
},
api.PodFailed,
"all stopped but minion missing",
},
}
for _, test := range tests {
if status, err := getPodStatus(test.pod, fakeClient.Nodes()); status != test.status {
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
if err != nil {
t.Errorf("In test %s, unexpected error: %v", test.test, err)
}
}
}
}
func TestPodStatusWithRestartAlways(t *testing.T) {
fakeClient := client.Fake{
MinionsList: api.NodeList{
Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: "machine"},
},
},
},
}
desiredState := api.PodSpec{
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
}
currentState := api.PodStatus{
Host: "machine",
}
runningState := api.ContainerStatus{
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
stoppedState := api.ContainerStatus{
State: api.ContainerState{
Termination: &api.ContainerStateTerminated{},
},
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
{&api.Pod{Spec: desiredState, Status: currentState}, api.PodPending, "waiting"},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": runningState,
"containerB": runningState,
},
Host: "machine",
},
},
api.PodRunning,
"all running",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": stoppedState,
"containerB": stoppedState,
},
Host: "machine",
},
},
api.PodRunning,
"all stopped with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": runningState,
"containerB": stoppedState,
},
Host: "machine",
},
},
api.PodRunning,
"mixed state #1 with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": runningState,
},
Host: "machine",
},
},
api.PodPending,
"mixed state #2 with restart always",
},
}
for _, test := range tests {
if status, err := getPodStatus(test.pod, fakeClient.Nodes()); status != test.status {
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
if err != nil {
t.Errorf("In test %s, unexpected error: %v", test.test, err)
}
}
}
}
func TestPodStatusWithRestartNever(t *testing.T) {
fakeClient := client.Fake{
MinionsList: api.NodeList{
Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: "machine"},
},
},
},
}
desiredState := api.PodSpec{
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicy{Never: &api.RestartPolicyNever{}},
}
currentState := api.PodStatus{
Host: "machine",
}
runningState := api.ContainerStatus{
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
succeededState := api.ContainerStatus{
State: api.ContainerState{
Termination: &api.ContainerStateTerminated{
ExitCode: 0,
},
},
}
failedState := api.ContainerStatus{
State: api.ContainerState{
Termination: &api.ContainerStateTerminated{
ExitCode: -1,
},
},
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
{&api.Pod{Spec: desiredState, Status: currentState}, api.PodPending, "waiting"},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": runningState,
"containerB": runningState,
},
Host: "machine",
},
},
api.PodRunning,
"all running with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": succeededState,
"containerB": succeededState,
},
Host: "machine",
},
},
api.PodSucceeded,
"all succeeded with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": failedState,
"containerB": failedState,
},
Host: "machine",
},
},
api.PodFailed,
"all failed with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": runningState,
"containerB": succeededState,
},
Host: "machine",
},
},
api.PodRunning,
"mixed state #1 with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": runningState,
},
Host: "machine",
},
},
api.PodPending,
"mixed state #2 with restart never",
},
}
for _, test := range tests {
if status, err := getPodStatus(test.pod, fakeClient.Nodes()); status != test.status {
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
if err != nil {
t.Errorf("In test %s, unexpected error: %v", test.test, err)
}
}
}
}
func TestPodStatusWithRestartOnFailure(t *testing.T) {
fakeClient := client.Fake{
MinionsList: api.NodeList{
Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: "machine"},
},
},
},
}
desiredState := api.PodSpec{
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicy{OnFailure: &api.RestartPolicyOnFailure{}},
}
currentState := api.PodStatus{
Host: "machine",
}
runningState := api.ContainerStatus{
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
succeededState := api.ContainerStatus{
State: api.ContainerState{
Termination: &api.ContainerStateTerminated{
ExitCode: 0,
},
},
}
failedState := api.ContainerStatus{
State: api.ContainerState{
Termination: &api.ContainerStateTerminated{
ExitCode: -1,
},
},
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
{&api.Pod{Spec: desiredState, Status: currentState}, api.PodPending, "waiting"},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": runningState,
"containerB": runningState,
},
Host: "machine",
},
},
api.PodRunning,
"all running with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": succeededState,
"containerB": succeededState,
},
Host: "machine",
},
},
api.PodSucceeded,
"all succeeded with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": failedState,
"containerB": failedState,
},
Host: "machine",
},
},
api.PodRunning,
"all failed with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": runningState,
"containerB": succeededState,
},
Host: "machine",
},
},
api.PodRunning,
"mixed state #1 with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
Info: map[string]api.ContainerStatus{
"containerA": runningState,
},
Host: "machine",
},
},
api.PodPending,
"mixed state #2 with restart onfailure",
},
}
for _, test := range tests {
if status, err := getPodStatus(test.pod, fakeClient.Nodes()); status != test.status {
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
if err != nil {
t.Errorf("In test %s, unexpected error: %v", test.test, err)
}
}
}
}
func TestPodStorageValidatesCreate(t *testing.T) {
podRegistry := registrytest.NewPodRegistry(nil)
podRegistry.Err = fmt.Errorf("test error")
storage := REST{
registry: podRegistry,
podCache: &fakeCache{statusToReturn: &api.PodStatus{}},
}
ctx := api.NewDefaultContext()
pod := &api.Pod{
@@ -837,8 +422,8 @@ func TestCreatePod(t *testing.T) {
},
}
storage := REST{
registry: podRegistry,
podPollPeriod: time.Millisecond * 100,
registry: podRegistry,
podCache: &fakeCache{statusToReturn: &api.PodStatus{}},
}
pod := &api.Pod{}
pod.Name = "foo"
@@ -867,57 +452,6 @@ func (f *FakePodInfoGetter) GetPodInfo(host, podNamespace string, podID string)
return api.PodContainerInfo{ContainerInfo: f.info}, f.err
}
func TestFillPodInfo(t *testing.T) {
expectedIP := "1.2.3.4"
expectedTime, _ := time.Parse("2013-Feb-03", "2013-Feb-03")
fakeGetter := FakePodInfoGetter{
info: map[string]api.ContainerStatus{
"net": {
State: api.ContainerState{
Running: &api.ContainerStateRunning{
StartedAt: util.NewTime(expectedTime),
},
},
RestartCount: 1,
PodIP: expectedIP,
},
},
}
storage := REST{
podCache: &fakeGetter,
}
pod := api.Pod{Status: api.PodStatus{Host: "foo"}}
storage.fillPodInfo(&pod)
if !reflect.DeepEqual(fakeGetter.info, pod.Status.Info) {
t.Errorf("Expected: %#v, Got %#v", fakeGetter.info, pod.Status.Info)
}
if pod.Status.PodIP != expectedIP {
t.Errorf("Expected %s, Got %s", expectedIP, pod.Status.PodIP)
}
}
func TestFillPodInfoNoData(t *testing.T) {
expectedIP := ""
fakeGetter := FakePodInfoGetter{
info: map[string]api.ContainerStatus{
"net": {
State: api.ContainerState{},
},
},
}
storage := REST{
podCache: &fakeGetter,
}
pod := api.Pod{Status: api.PodStatus{Host: "foo"}}
storage.fillPodInfo(&pod)
if !reflect.DeepEqual(fakeGetter.info, pod.Status.Info) {
t.Errorf("Expected %#v, Got %#v", fakeGetter.info, pod.Status.Info)
}
if pod.Status.PodIP != expectedIP {
t.Errorf("Expected %s, Got %s", expectedIP, pod.Status.PodIP)
}
}
func TestCreatePodWithConflictingNamespace(t *testing.T) {
storage := REST{}
pod := &api.Pod{