Merge pull request #7185 from yifan-gu/interface

kubelet/container: Move Prober/HandlerRunner interface to container/help...
This commit is contained in:
Victor Marmol 2015-04-22 12:16:10 -07:00
commit a0cc7c26d7
6 changed files with 56 additions and 28 deletions

View File

@ -0,0 +1,32 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
)
// HandlerRunner runs a lifecycle handler for a container.
type HandlerRunner interface {
Run(containerID string, pod *api.Pod, container *api.Container, handler *api.Handler) error
}
// Prober checks the healthiness of a container.
type Prober interface {
Probe(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error)
}

View File

@ -22,15 +22,12 @@ import (
"strconv" "strconv"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog" "github.com/golang/glog"
) )
type HandlerRunner interface {
Run(containerID string, pod *api.Pod, container *api.Container, handler *api.Handler) error
}
type handlerRunner struct { type handlerRunner struct {
httpGetter httpGetter httpGetter httpGetter
commandRunner dockertools.ContainerCommandRunner commandRunner dockertools.ContainerCommandRunner
@ -38,7 +35,7 @@ type handlerRunner struct {
} }
// TODO(yifan): Merge commandRunner and containerManager once containerManager implements the ContainerCommandRunner interface. // TODO(yifan): Merge commandRunner and containerManager once containerManager implements the ContainerCommandRunner interface.
func NewHandlerRunner(httpGetter httpGetter, commandRunner dockertools.ContainerCommandRunner, containerManager *dockertools.DockerManager) *handlerRunner { func newHandlerRunner(httpGetter httpGetter, commandRunner dockertools.ContainerCommandRunner, containerManager *dockertools.DockerManager) kubecontainer.HandlerRunner {
return &handlerRunner{ return &handlerRunner{
httpGetter: httpGetter, httpGetter: httpGetter,
commandRunner: commandRunner, commandRunner: commandRunner,

View File

@ -232,8 +232,8 @@ func NewMainKubelet(
} }
klet.podManager = newBasicPodManager(klet.kubeClient) klet.podManager = newBasicPodManager(klet.kubeClient)
klet.prober = NewProber(klet.runner, klet.readinessManager, klet.containerRefManager, klet.recorder) klet.prober = newProber(klet.runner, klet.readinessManager, klet.containerRefManager, klet.recorder)
klet.handlerRunner = NewHandlerRunner(klet.httpClient, klet.runner, klet.containerManager) klet.handlerRunner = newHandlerRunner(klet.httpClient, klet.runner, klet.containerManager)
runtimeCache, err := kubecontainer.NewRuntimeCache(containerManager) runtimeCache, err := kubecontainer.NewRuntimeCache(containerManager)
if err != nil { if err != nil {
@ -317,10 +317,10 @@ type Kubelet struct {
networkPlugin network.NetworkPlugin networkPlugin network.NetworkPlugin
// Healthy check prober. // Healthy check prober.
prober *Prober prober kubecontainer.Prober
// Container lifecycle handler runner. // Container lifecycle handler runner.
handlerRunner HandlerRunner handlerRunner kubecontainer.HandlerRunner
// Container readiness state manager. // Container readiness state manager.
readinessManager *kubecontainer.ReadinessManager readinessManager *kubecontainer.ReadinessManager

View File

@ -113,8 +113,8 @@ func newTestKubelet(t *testing.T) *TestKubelet {
}, },
fakeRecorder) fakeRecorder)
kubelet.containerManager.Puller = &dockertools.FakeDockerPuller{} kubelet.containerManager.Puller = &dockertools.FakeDockerPuller{}
kubelet.prober = NewProber(nil, kubelet.readinessManager, kubelet.containerRefManager, kubelet.recorder) kubelet.prober = newProber(nil, kubelet.readinessManager, kubelet.containerRefManager, kubelet.recorder)
kubelet.handlerRunner = NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, kubelet.containerManager) kubelet.handlerRunner = newHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, kubelet.containerManager)
return &TestKubelet{kubelet, fakeDocker, mockCadvisor, fakeKubeClient, waitGroup, fakeMirrorClient} return &TestKubelet{kubelet, fakeDocker, mockCadvisor, fakeKubeClient, waitGroup, fakeMirrorClient}
} }
@ -768,7 +768,7 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
waitGroup := testKubelet.waitGroup waitGroup := testKubelet.waitGroup
fakeHttp := fakeHTTP{} fakeHttp := fakeHTTP{}
kubelet.httpClient = &fakeHttp kubelet.httpClient = &fakeHttp
kubelet.handlerRunner = NewHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager) kubelet.handlerRunner = newHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager)
pods := []*api.Pod{ pods := []*api.Pod{
{ {
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
@ -1690,7 +1690,7 @@ func TestRunHandlerExec(t *testing.T) {
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
fakeDocker := testKubelet.fakeDocker fakeDocker := testKubelet.fakeDocker
kubelet.runner = &fakeCommandRunner kubelet.runner = &fakeCommandRunner
kubelet.handlerRunner = NewHandlerRunner(&fakeHTTP{}, kubelet.runner, kubelet.containerManager) kubelet.handlerRunner = newHandlerRunner(&fakeHTTP{}, kubelet.runner, kubelet.containerManager)
containerID := "abc1234" containerID := "abc1234"
podName := "podFoo" podName := "podFoo"
@ -1745,7 +1745,7 @@ func TestRunHandlerHttp(t *testing.T) {
testKubelet := newTestKubelet(t) testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
kubelet.httpClient = &fakeHttp kubelet.httpClient = &fakeHttp
kubelet.handlerRunner = NewHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager) kubelet.handlerRunner = newHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager)
containerID := "abc1234" containerID := "abc1234"
podName := "podFoo" podName := "podFoo"
@ -1813,7 +1813,7 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
kubelet.httpClient = &fakeHTTP{ kubelet.httpClient = &fakeHTTP{
err: fmt.Errorf("test error"), err: fmt.Errorf("test error"),
} }
kubelet.handlerRunner = NewHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager) kubelet.handlerRunner = newHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager)
pods := []*api.Pod{ pods := []*api.Pod{
{ {

View File

@ -37,9 +37,8 @@ import (
const maxProbeRetries = 3 const maxProbeRetries = 3
// Prober helps to check the liveness/readiness of a container. // prober helps to check the liveness/readiness of a container.
// TODO(yifan): Replace the concrete type with interface later. type prober struct {
type Prober struct {
exec execprobe.ExecProber exec execprobe.ExecProber
http httprobe.HTTPProber http httprobe.HTTPProber
tcp tcprobe.TCPProber tcp tcprobe.TCPProber
@ -52,13 +51,13 @@ type Prober struct {
// NewProber creates a Prober, it takes a command runner and // NewProber creates a Prober, it takes a command runner and
// several container info managers. // several container info managers.
func NewProber( func newProber(
runner dockertools.ContainerCommandRunner, runner dockertools.ContainerCommandRunner,
readinessManager *kubecontainer.ReadinessManager, readinessManager *kubecontainer.ReadinessManager,
refManager *kubecontainer.RefManager, refManager *kubecontainer.RefManager,
recorder record.EventRecorder) *Prober { recorder record.EventRecorder) kubecontainer.Prober {
return &Prober{ return &prober{
exec: execprobe.New(), exec: execprobe.New(),
http: httprobe.New(), http: httprobe.New(),
tcp: tcprobe.New(), tcp: tcprobe.New(),
@ -73,7 +72,7 @@ func NewProber(
// Probe checks the liveness/readiness of the given container. // Probe checks the liveness/readiness of the given container.
// If the container's liveness probe is unsuccessful, set readiness to false. // If the container's liveness probe is unsuccessful, set readiness to false.
// If liveness is successful, do a readiness check and set readiness accordingly. // If liveness is successful, do a readiness check and set readiness accordingly.
func (pb *Prober) Probe(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) { func (pb *prober) Probe(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) {
// Probe liveness. // Probe liveness.
live, err := pb.probeLiveness(pod, status, container, containerID, createdAt) live, err := pb.probeLiveness(pod, status, container, containerID, createdAt)
if err != nil { if err != nil {
@ -113,7 +112,7 @@ func (pb *Prober) Probe(pod *api.Pod, status api.PodStatus, container api.Contai
// probeLiveness probes the liveness of a container. // probeLiveness probes the liveness of a container.
// If the initalDelay since container creation on liveness probe has not passed the probe will return probe.Success. // If the initalDelay since container creation on liveness probe has not passed the probe will return probe.Success.
func (pb *Prober) probeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) { func (pb *prober) probeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) {
p := container.LivenessProbe p := container.LivenessProbe
if p == nil { if p == nil {
return probe.Success, nil return probe.Success, nil
@ -126,7 +125,7 @@ func (pb *Prober) probeLiveness(pod *api.Pod, status api.PodStatus, container ap
// probeReadiness probes the readiness of a container. // probeReadiness probes the readiness of a container.
// If the initial delay on the readiness probe has not passed the probe will return probe.Failure. // If the initial delay on the readiness probe has not passed the probe will return probe.Failure.
func (pb *Prober) probeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) { func (pb *prober) probeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) {
p := container.ReadinessProbe p := container.ReadinessProbe
if p == nil { if p == nil {
return probe.Success, nil return probe.Success, nil
@ -139,7 +138,7 @@ func (pb *Prober) probeReadiness(pod *api.Pod, status api.PodStatus, container a
// runProbeWithRetries tries to probe the container in a finite loop, it returns the last result // runProbeWithRetries tries to probe the container in a finite loop, it returns the last result
// if it never succeeds. // if it never succeeds.
func (pb *Prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string, retires int) (probe.Result, error) { func (pb *prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string, retires int) (probe.Result, error) {
var err error var err error
var result probe.Result var result probe.Result
for i := 0; i < retires; i++ { for i := 0; i < retires; i++ {
@ -151,7 +150,7 @@ func (pb *Prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.Pod
return result, err return result, err
} }
func (pb *Prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string) (probe.Result, error) { func (pb *prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string) (probe.Result, error) {
timeout := time.Duration(p.TimeoutSeconds) * time.Second timeout := time.Duration(p.TimeoutSeconds) * time.Second
if p.Exec != nil { if p.Exec != nil {
glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v", pod, container) glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v", pod, container)
@ -228,7 +227,7 @@ type execInContainer struct {
run func() ([]byte, error) run func() ([]byte, error)
} }
func (p *Prober) newExecInContainer(pod *api.Pod, container api.Container, containerID string) exec.Cmd { func (p *prober) newExecInContainer(pod *api.Pod, container api.Container, containerID string) exec.Cmd {
return execInContainer{func() ([]byte, error) { return execInContainer{func() ([]byte, error) {
return p.runner.RunInContainer(containerID, container.LivenessProbe.Exec.Command) return p.runner.RunInContainer(containerID, container.LivenessProbe.Exec.Command)
}} }}

View File

@ -152,7 +152,7 @@ func makeTestKubelet(result probe.Result, err error) *Kubelet {
containerRefManager: kubecontainer.NewRefManager(), containerRefManager: kubecontainer.NewRefManager(),
} }
kl.prober = &Prober{ kl.prober = &prober{
exec: fakeExecProber{ exec: fakeExecProber{
result: result, result: result,
err: err, err: err,