mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 18:24:07 +00:00
Merge pull request #5614 from yifan-gu/clean_prober
kubelet: Remove docker container in prober's interface
This commit is contained in:
commit
9a3afb7628
@ -180,7 +180,7 @@ func NewMainKubelet(
|
|||||||
statusUpdateFrequency: statusUpdateFrequency,
|
statusUpdateFrequency: statusUpdateFrequency,
|
||||||
resyncInterval: resyncInterval,
|
resyncInterval: resyncInterval,
|
||||||
podInfraContainerImage: podInfraContainerImage,
|
podInfraContainerImage: podInfraContainerImage,
|
||||||
dockerIDToRef: map[dockertools.DockerID]*api.ObjectReference{},
|
containerIDToRef: map[string]*api.ObjectReference{},
|
||||||
runner: dockertools.NewDockerContainerCommandRunner(dockerClient),
|
runner: dockertools.NewDockerContainerCommandRunner(dockerClient),
|
||||||
httpClient: &http.Client{},
|
httpClient: &http.Client{},
|
||||||
pullQPS: pullQPS,
|
pullQPS: pullQPS,
|
||||||
@ -264,8 +264,8 @@ type Kubelet struct {
|
|||||||
|
|
||||||
// Needed to report events for containers belonging to deleted/modified pods.
|
// Needed to report events for containers belonging to deleted/modified pods.
|
||||||
// Tracks references for reporting events
|
// Tracks references for reporting events
|
||||||
dockerIDToRef map[dockertools.DockerID]*api.ObjectReference
|
containerIDToRef map[string]*api.ObjectReference
|
||||||
refLock sync.RWMutex
|
refLock sync.RWMutex
|
||||||
|
|
||||||
// Optional, defaults to simple Docker implementation
|
// Optional, defaults to simple Docker implementation
|
||||||
dockerPuller dockertools.DockerPuller
|
dockerPuller dockertools.DockerPuller
|
||||||
@ -673,27 +673,27 @@ func containerRef(pod *api.Pod, container *api.Container) (*api.ObjectReference,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// setRef stores a reference to a pod's container, associating it with the given docker id.
|
// setRef stores a reference to a pod's container, associating it with the given docker id.
|
||||||
func (kl *Kubelet) setRef(id dockertools.DockerID, ref *api.ObjectReference) {
|
func (kl *Kubelet) setRef(id string, ref *api.ObjectReference) {
|
||||||
kl.refLock.Lock()
|
kl.refLock.Lock()
|
||||||
defer kl.refLock.Unlock()
|
defer kl.refLock.Unlock()
|
||||||
if kl.dockerIDToRef == nil {
|
if kl.containerIDToRef == nil {
|
||||||
kl.dockerIDToRef = map[dockertools.DockerID]*api.ObjectReference{}
|
kl.containerIDToRef = map[string]*api.ObjectReference{}
|
||||||
}
|
}
|
||||||
kl.dockerIDToRef[id] = ref
|
kl.containerIDToRef[id] = ref
|
||||||
}
|
}
|
||||||
|
|
||||||
// clearRef forgets the given docker id and its associated container reference.
|
// clearRef forgets the given docker id and its associated container reference.
|
||||||
func (kl *Kubelet) clearRef(id dockertools.DockerID) {
|
func (kl *Kubelet) clearRef(id string) {
|
||||||
kl.refLock.Lock()
|
kl.refLock.Lock()
|
||||||
defer kl.refLock.Unlock()
|
defer kl.refLock.Unlock()
|
||||||
delete(kl.dockerIDToRef, id)
|
delete(kl.containerIDToRef, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRef returns the container reference of the given id, or (nil, false) if none is stored.
|
// getRef returns the container reference of the given id, or (nil, false) if none is stored.
|
||||||
func (kl *Kubelet) getRef(id dockertools.DockerID) (ref *api.ObjectReference, ok bool) {
|
func (kl *Kubelet) getRef(id string) (ref *api.ObjectReference, ok bool) {
|
||||||
kl.refLock.RLock()
|
kl.refLock.RLock()
|
||||||
defer kl.refLock.RUnlock()
|
defer kl.refLock.RUnlock()
|
||||||
ref, ok = kl.dockerIDToRef[id]
|
ref, ok = kl.containerIDToRef[id]
|
||||||
return ref, ok
|
return ref, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -733,7 +733,7 @@ func (kl *Kubelet) runContainer(pod *api.Pod, container *api.Container, podVolum
|
|||||||
}
|
}
|
||||||
// Remember this reference so we can report events about this container
|
// Remember this reference so we can report events about this container
|
||||||
if ref != nil {
|
if ref != nil {
|
||||||
kl.setRef(dockertools.DockerID(dockerContainer.ID), ref)
|
kl.setRef(dockerContainer.ID, ref)
|
||||||
kl.recorder.Eventf(ref, "created", "Created with docker id %v", dockerContainer.ID)
|
kl.recorder.Eventf(ref, "created", "Created with docker id %v", dockerContainer.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -949,7 +949,7 @@ func (kl *Kubelet) killContainerByID(ID string) error {
|
|||||||
kl.readiness.remove(ID)
|
kl.readiness.remove(ID)
|
||||||
err := kl.dockerClient.StopContainer(ID, 10)
|
err := kl.dockerClient.StopContainer(ID, 10)
|
||||||
|
|
||||||
ref, ok := kl.getRef(dockertools.DockerID(ID))
|
ref, ok := kl.getRef(ID)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Warningf("No ref for pod '%v'", ID)
|
glog.Warningf("No ref for pod '%v'", ID)
|
||||||
} else {
|
} else {
|
||||||
@ -1217,7 +1217,7 @@ func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, hasMirrorPod bool, c
|
|||||||
// look for changes in the container.
|
// look for changes in the container.
|
||||||
containerChanged := hash != 0 && hash != expectedHash
|
containerChanged := hash != 0 && hash != expectedHash
|
||||||
if !containerChanged {
|
if !containerChanged {
|
||||||
result, err := kl.probeContainer(pod, podStatus, container, dockerContainer)
|
result, err := kl.probeContainer(pod, podStatus, container, dockerContainer.ID, dockerContainer.Created)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO(vmarmol): examine this logic.
|
// TODO(vmarmol): examine this logic.
|
||||||
glog.Infof("probe no-error: %s", container.Name)
|
glog.Infof("probe no-error: %s", container.Name)
|
||||||
|
@ -24,14 +24,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
|
||||||
execprobe "github.com/GoogleCloudPlatform/kubernetes/pkg/probe/exec"
|
execprobe "github.com/GoogleCloudPlatform/kubernetes/pkg/probe/exec"
|
||||||
httprobe "github.com/GoogleCloudPlatform/kubernetes/pkg/probe/http"
|
httprobe "github.com/GoogleCloudPlatform/kubernetes/pkg/probe/http"
|
||||||
tcprobe "github.com/GoogleCloudPlatform/kubernetes/pkg/probe/tcp"
|
tcprobe "github.com/GoogleCloudPlatform/kubernetes/pkg/probe/tcp"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
|
||||||
"github.com/fsouza/go-dockerclient"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
@ -41,32 +39,31 @@ const maxProbeRetries = 3
|
|||||||
// probeContainer probes the liveness/readiness of the given container.
|
// probeContainer probes the liveness/readiness of the given container.
|
||||||
// If the container's liveness probe is unsuccessful, set readiness to false.
|
// If the container's liveness probe is unsuccessful, set readiness to false.
|
||||||
// If liveness is successful, do a readiness check and set readiness accordingly.
|
// If liveness is successful, do a readiness check and set readiness accordingly.
|
||||||
func (kl *Kubelet) probeContainer(pod *api.Pod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
|
func (kl *Kubelet) probeContainer(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) {
|
||||||
// Probe liveness.
|
// Probe liveness.
|
||||||
live, err := kl.probeContainerLiveness(pod, status, container, dockerContainer)
|
live, err := kl.probeContainerLiveness(pod, status, container, createdAt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("Liveness probe errored: %v", err)
|
glog.V(1).Infof("Liveness probe errored: %v", err)
|
||||||
kl.readiness.set(dockerContainer.ID, false)
|
kl.readiness.set(containerID, false)
|
||||||
return probe.Unknown, err
|
return probe.Unknown, err
|
||||||
}
|
}
|
||||||
if live != probe.Success {
|
if live != probe.Success {
|
||||||
glog.V(1).Infof("Liveness probe unsuccessful: %v", live)
|
glog.V(1).Infof("Liveness probe unsuccessful: %v", live)
|
||||||
kl.readiness.set(dockerContainer.ID, false)
|
kl.readiness.set(containerID, false)
|
||||||
return live, nil
|
return live, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Probe readiness.
|
// Probe readiness.
|
||||||
ready, err := kl.probeContainerReadiness(pod, status, container, dockerContainer)
|
ready, err := kl.probeContainerReadiness(pod, status, container, createdAt)
|
||||||
if err == nil && ready == probe.Success {
|
if err == nil && ready == probe.Success {
|
||||||
glog.V(3).Infof("Readiness probe successful: %v", ready)
|
glog.V(3).Infof("Readiness probe successful: %v", ready)
|
||||||
kl.readiness.set(dockerContainer.ID, true)
|
kl.readiness.set(containerID, true)
|
||||||
return probe.Success, nil
|
return probe.Success, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(1).Infof("Readiness probe failed/errored: %v, %v", ready, err)
|
glog.V(1).Infof("Readiness probe failed/errored: %v, %v", ready, err)
|
||||||
kl.readiness.set(dockerContainer.ID, false)
|
kl.readiness.set(containerID, false)
|
||||||
|
|
||||||
containerID := dockertools.DockerID(dockerContainer.ID)
|
|
||||||
ref, ok := kl.getRef(containerID)
|
ref, ok := kl.getRef(containerID)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Warningf("No ref for pod '%v' - '%v'", containerID, container.Name)
|
glog.Warningf("No ref for pod '%v' - '%v'", containerID, container.Name)
|
||||||
@ -78,12 +75,12 @@ func (kl *Kubelet) probeContainer(pod *api.Pod, status api.PodStatus, container
|
|||||||
|
|
||||||
// probeContainerLiveness probes the liveness of a container.
|
// probeContainerLiveness probes the liveness of a container.
|
||||||
// If the initalDelay since container creation on liveness probe has not passed the probe will return probe.Success.
|
// If the initalDelay since container creation on liveness probe has not passed the probe will return probe.Success.
|
||||||
func (kl *Kubelet) probeContainerLiveness(pod *api.Pod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
|
func (kl *Kubelet) probeContainerLiveness(pod *api.Pod, status api.PodStatus, container api.Container, createdAt int64) (probe.Result, error) {
|
||||||
p := container.LivenessProbe
|
p := container.LivenessProbe
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return probe.Success, nil
|
return probe.Success, nil
|
||||||
}
|
}
|
||||||
if time.Now().Unix()-dockerContainer.Created < p.InitialDelaySeconds {
|
if time.Now().Unix()-createdAt < p.InitialDelaySeconds {
|
||||||
return probe.Success, nil
|
return probe.Success, nil
|
||||||
}
|
}
|
||||||
return kl.runProbeWithRetries(p, pod, status, container, maxProbeRetries)
|
return kl.runProbeWithRetries(p, pod, status, container, maxProbeRetries)
|
||||||
@ -91,12 +88,12 @@ func (kl *Kubelet) probeContainerLiveness(pod *api.Pod, status api.PodStatus, co
|
|||||||
|
|
||||||
// probeContainerLiveness probes the readiness of a container.
|
// probeContainerLiveness probes the readiness of a container.
|
||||||
// If the initial delay on the readiness probe has not passed the probe will return probe.Failure.
|
// If the initial delay on the readiness probe has not passed the probe will return probe.Failure.
|
||||||
func (kl *Kubelet) probeContainerReadiness(pod *api.Pod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
|
func (kl *Kubelet) probeContainerReadiness(pod *api.Pod, status api.PodStatus, container api.Container, createdAt int64) (probe.Result, error) {
|
||||||
p := container.ReadinessProbe
|
p := container.ReadinessProbe
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return probe.Success, nil
|
return probe.Success, nil
|
||||||
}
|
}
|
||||||
if time.Now().Unix()-dockerContainer.Created < p.InitialDelaySeconds {
|
if time.Now().Unix()-createdAt < p.InitialDelaySeconds {
|
||||||
return probe.Failure, nil
|
return probe.Failure, nil
|
||||||
}
|
}
|
||||||
return kl.runProbeWithRetries(p, pod, status, container, maxProbeRetries)
|
return kl.runProbeWithRetries(p, pod, status, container, maxProbeRetries)
|
||||||
|
@ -400,7 +400,7 @@ func TestProbeContainer(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
kl = makeTestKubelet(test.expectedResult, nil)
|
kl = makeTestKubelet(test.expectedResult, nil)
|
||||||
}
|
}
|
||||||
result, err := kl.probeContainer(&api.Pod{}, api.PodStatus{}, test.testContainer, dc)
|
result, err := kl.probeContainer(&api.Pod{}, api.PodStatus{}, test.testContainer, dc.ID, dc.Created)
|
||||||
if test.expectError && err == nil {
|
if test.expectError && err == nil {
|
||||||
t.Error("Expected error but did no error was returned.")
|
t.Error("Expected error but did no error was returned.")
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user