Merge pull request #5614 from yifan-gu/clean_prober

kubelet: Remove docker container in prober's interface
This commit is contained in:
Victor Marmol 2015-03-18 15:05:24 -07:00
commit 9a3afb7628
3 changed files with 26 additions and 29 deletions

View File

@ -180,7 +180,7 @@ func NewMainKubelet(
statusUpdateFrequency: statusUpdateFrequency,
resyncInterval: resyncInterval,
podInfraContainerImage: podInfraContainerImage,
dockerIDToRef: map[dockertools.DockerID]*api.ObjectReference{},
containerIDToRef: map[string]*api.ObjectReference{},
runner: dockertools.NewDockerContainerCommandRunner(dockerClient),
httpClient: &http.Client{},
pullQPS: pullQPS,
@ -264,8 +264,8 @@ type Kubelet struct {
// Needed to report events for containers belonging to deleted/modified pods.
// Tracks references for reporting events
dockerIDToRef map[dockertools.DockerID]*api.ObjectReference
refLock sync.RWMutex
containerIDToRef map[string]*api.ObjectReference
refLock sync.RWMutex
// Optional, defaults to simple Docker implementation
dockerPuller dockertools.DockerPuller
@ -673,27 +673,27 @@ func containerRef(pod *api.Pod, container *api.Container) (*api.ObjectReference,
}
// setRef stores a reference to a pod's container, associating it with the given docker id.
func (kl *Kubelet) setRef(id dockertools.DockerID, ref *api.ObjectReference) {
func (kl *Kubelet) setRef(id string, ref *api.ObjectReference) {
kl.refLock.Lock()
defer kl.refLock.Unlock()
if kl.dockerIDToRef == nil {
kl.dockerIDToRef = map[dockertools.DockerID]*api.ObjectReference{}
if kl.containerIDToRef == nil {
kl.containerIDToRef = map[string]*api.ObjectReference{}
}
kl.dockerIDToRef[id] = ref
kl.containerIDToRef[id] = ref
}
// clearRef forgets the given docker id and its associated container reference.
func (kl *Kubelet) clearRef(id dockertools.DockerID) {
func (kl *Kubelet) clearRef(id string) {
kl.refLock.Lock()
defer kl.refLock.Unlock()
delete(kl.dockerIDToRef, id)
delete(kl.containerIDToRef, id)
}
// getRef returns the container reference of the given id, or (nil, false) if none is stored.
func (kl *Kubelet) getRef(id dockertools.DockerID) (ref *api.ObjectReference, ok bool) {
func (kl *Kubelet) getRef(id string) (ref *api.ObjectReference, ok bool) {
kl.refLock.RLock()
defer kl.refLock.RUnlock()
ref, ok = kl.dockerIDToRef[id]
ref, ok = kl.containerIDToRef[id]
return ref, ok
}
@ -733,7 +733,7 @@ func (kl *Kubelet) runContainer(pod *api.Pod, container *api.Container, podVolum
}
// Remember this reference so we can report events about this container
if ref != nil {
kl.setRef(dockertools.DockerID(dockerContainer.ID), ref)
kl.setRef(dockerContainer.ID, ref)
kl.recorder.Eventf(ref, "created", "Created with docker id %v", dockerContainer.ID)
}
@ -949,7 +949,7 @@ func (kl *Kubelet) killContainerByID(ID string) error {
kl.readiness.remove(ID)
err := kl.dockerClient.StopContainer(ID, 10)
ref, ok := kl.getRef(dockertools.DockerID(ID))
ref, ok := kl.getRef(ID)
if !ok {
glog.Warningf("No ref for pod '%v'", ID)
} else {
@ -1217,7 +1217,7 @@ func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, hasMirrorPod bool, c
// look for changes in the container.
containerChanged := hash != 0 && hash != expectedHash
if !containerChanged {
result, err := kl.probeContainer(pod, podStatus, container, dockerContainer)
result, err := kl.probeContainer(pod, podStatus, container, dockerContainer.ID, dockerContainer.Created)
if err != nil {
// TODO(vmarmol): examine this logic.
glog.Infof("probe no-error: %s", container.Name)

View File

@ -24,14 +24,12 @@ import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
execprobe "github.com/GoogleCloudPlatform/kubernetes/pkg/probe/exec"
httprobe "github.com/GoogleCloudPlatform/kubernetes/pkg/probe/http"
tcprobe "github.com/GoogleCloudPlatform/kubernetes/pkg/probe/tcp"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
"github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
)
@ -41,32 +39,31 @@ const maxProbeRetries = 3
// probeContainer probes the liveness/readiness of the given container.
// If the container's liveness probe is unsuccessful, set readiness to false.
// If liveness is successful, do a readiness check and set readiness accordingly.
func (kl *Kubelet) probeContainer(pod *api.Pod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
func (kl *Kubelet) probeContainer(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) {
// Probe liveness.
live, err := kl.probeContainerLiveness(pod, status, container, dockerContainer)
live, err := kl.probeContainerLiveness(pod, status, container, createdAt)
if err != nil {
glog.V(1).Infof("Liveness probe errored: %v", err)
kl.readiness.set(dockerContainer.ID, false)
kl.readiness.set(containerID, false)
return probe.Unknown, err
}
if live != probe.Success {
glog.V(1).Infof("Liveness probe unsuccessful: %v", live)
kl.readiness.set(dockerContainer.ID, false)
kl.readiness.set(containerID, false)
return live, nil
}
// Probe readiness.
ready, err := kl.probeContainerReadiness(pod, status, container, dockerContainer)
ready, err := kl.probeContainerReadiness(pod, status, container, createdAt)
if err == nil && ready == probe.Success {
glog.V(3).Infof("Readiness probe successful: %v", ready)
kl.readiness.set(dockerContainer.ID, true)
kl.readiness.set(containerID, true)
return probe.Success, nil
}
glog.V(1).Infof("Readiness probe failed/errored: %v, %v", ready, err)
kl.readiness.set(dockerContainer.ID, false)
kl.readiness.set(containerID, false)
containerID := dockertools.DockerID(dockerContainer.ID)
ref, ok := kl.getRef(containerID)
if !ok {
glog.Warningf("No ref for pod '%v' - '%v'", containerID, container.Name)
@ -78,12 +75,12 @@ func (kl *Kubelet) probeContainer(pod *api.Pod, status api.PodStatus, container
// probeContainerLiveness probes the liveness of a container.
// If the initalDelay since container creation on liveness probe has not passed the probe will return probe.Success.
func (kl *Kubelet) probeContainerLiveness(pod *api.Pod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
func (kl *Kubelet) probeContainerLiveness(pod *api.Pod, status api.PodStatus, container api.Container, createdAt int64) (probe.Result, error) {
p := container.LivenessProbe
if p == nil {
return probe.Success, nil
}
if time.Now().Unix()-dockerContainer.Created < p.InitialDelaySeconds {
if time.Now().Unix()-createdAt < p.InitialDelaySeconds {
return probe.Success, nil
}
return kl.runProbeWithRetries(p, pod, status, container, maxProbeRetries)
@ -91,12 +88,12 @@ func (kl *Kubelet) probeContainerLiveness(pod *api.Pod, status api.PodStatus, co
// probeContainerLiveness probes the readiness of a container.
// If the initial delay on the readiness probe has not passed the probe will return probe.Failure.
func (kl *Kubelet) probeContainerReadiness(pod *api.Pod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
func (kl *Kubelet) probeContainerReadiness(pod *api.Pod, status api.PodStatus, container api.Container, createdAt int64) (probe.Result, error) {
p := container.ReadinessProbe
if p == nil {
return probe.Success, nil
}
if time.Now().Unix()-dockerContainer.Created < p.InitialDelaySeconds {
if time.Now().Unix()-createdAt < p.InitialDelaySeconds {
return probe.Failure, nil
}
return kl.runProbeWithRetries(p, pod, status, container, maxProbeRetries)

View File

@ -400,7 +400,7 @@ func TestProbeContainer(t *testing.T) {
} else {
kl = makeTestKubelet(test.expectedResult, nil)
}
result, err := kl.probeContainer(&api.Pod{}, api.PodStatus{}, test.testContainer, dc)
result, err := kl.probeContainer(&api.Pod{}, api.PodStatus{}, test.testContainer, dc.ID, dc.Created)
if test.expectError && err == nil {
t.Error("Expected error but did no error was returned.")
}