Merge pull request #8864 from ncdc/pluggable-docker-exec

Add support for pluggable Docker exec handlers
This commit is contained in:
Brian Grant 2015-06-02 14:13:06 -07:00
commit 8a9700b2ba
5 changed files with 176 additions and 63 deletions

View File

@ -111,6 +111,7 @@ type KubeletServer struct {
SystemContainer string SystemContainer string
ConfigureCBR0 bool ConfigureCBR0 bool
MaxPods int MaxPods int
DockerExecHandlerName string
// Flags intended for testing // Flags intended for testing
@ -173,6 +174,7 @@ func NewKubeletServer() *KubeletServer {
DockerDaemonContainer: "/docker-daemon", DockerDaemonContainer: "/docker-daemon",
SystemContainer: "", SystemContainer: "",
ConfigureCBR0: false, ConfigureCBR0: false,
DockerExecHandlerName: "native",
} }
} }
@ -233,6 +235,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.SystemContainer, "system-container", s.SystemContainer, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").") fs.StringVar(&s.SystemContainer, "system-container", s.SystemContainer, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.") fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
fs.IntVar(&s.MaxPods, "max-pods", 100, "Number of Pods that can run on this Kubelet.") fs.IntVar(&s.MaxPods, "max-pods", 100, "Number of Pods that can run on this Kubelet.")
fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.")
// Flags intended for testing, not recommended used in production environments. // Flags intended for testing, not recommended used in production environments.
fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.") fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.")
@ -306,6 +309,17 @@ func (s *KubeletServer) Run(_ []string) error {
mounter = &mount.NsenterMounter{} mounter = &mount.NsenterMounter{}
} }
var dockerExecHandler dockertools.ExecHandler
switch s.DockerExecHandlerName {
case "native":
dockerExecHandler = &dockertools.NativeExecHandler{}
case "nsenter":
dockerExecHandler = &dockertools.NsenterExecHandler{}
default:
glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName)
dockerExecHandler = &dockertools.NativeExecHandler{}
}
kcfg := KubeletConfig{ kcfg := KubeletConfig{
Address: s.Address, Address: s.Address,
AllowPrivileged: s.AllowPrivileged, AllowPrivileged: s.AllowPrivileged,
@ -352,6 +366,7 @@ func (s *KubeletServer) Run(_ []string) error {
SystemContainer: s.SystemContainer, SystemContainer: s.SystemContainer,
ConfigureCBR0: s.ConfigureCBR0, ConfigureCBR0: s.ConfigureCBR0,
MaxPods: s.MaxPods, MaxPods: s.MaxPods,
DockerExecHandler: dockerExecHandler,
} }
if err := RunKubelet(&kcfg, nil); err != nil { if err := RunKubelet(&kcfg, nil); err != nil {
@ -518,6 +533,7 @@ func SimpleKubelet(client *client.Client,
DockerDaemonContainer: "/docker-daemon", DockerDaemonContainer: "/docker-daemon",
SystemContainer: "", SystemContainer: "",
MaxPods: 32, MaxPods: 32,
DockerExecHandler: &dockertools.NativeExecHandler{},
} }
return &kcfg return &kcfg
} }
@ -655,6 +671,7 @@ type KubeletConfig struct {
SystemContainer string SystemContainer string
ConfigureCBR0 bool ConfigureCBR0 bool
MaxPods int MaxPods int
DockerExecHandler dockertools.ExecHandler
} }
func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) { func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
@ -708,7 +725,8 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
kc.DockerDaemonContainer, kc.DockerDaemonContainer,
kc.SystemContainer, kc.SystemContainer,
kc.ConfigureCBR0, kc.ConfigureCBR0,
kc.MaxPods) kc.MaxPods,
kc.DockerExecHandler)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err

View File

@ -0,0 +1,141 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"io"
"os"
"os/exec"
"time"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/fsouza/go-dockerclient"
)
// ExecHandler knows how to execute a command in a running Docker container.
type ExecHandler interface {
ExecInContainer(client DockerInterface, container *docker.Container, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error
}
// NsenterExecHandler executes commands in Docker containers using nsenter.
type NsenterExecHandler struct{}
// TODO should we support nsenter in a container, running with elevated privs and --pid=host?
func (*NsenterExecHandler) ExecInContainer(client DockerInterface, container *docker.Container, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
nsenter, err := exec.LookPath("nsenter")
if err != nil {
return fmt.Errorf("exec unavailable - unable to locate nsenter")
}
containerPid := container.State.Pid
// TODO what if the container doesn't have `env`???
args := []string{"-t", fmt.Sprintf("%d", containerPid), "-m", "-i", "-u", "-n", "-p", "--", "env", "-i"}
args = append(args, fmt.Sprintf("HOSTNAME=%s", container.Config.Hostname))
args = append(args, container.Config.Env...)
args = append(args, cmd...)
command := exec.Command(nsenter, args...)
if tty {
p, err := kubecontainer.StartPty(command)
if err != nil {
return err
}
defer p.Close()
// make sure to close the stdout stream
defer stdout.Close()
if stdin != nil {
go io.Copy(p, stdin)
}
if stdout != nil {
go io.Copy(stdout, p)
}
return command.Wait()
} else {
if stdin != nil {
// Use an os.Pipe here as it returns true *os.File objects.
// This way, if you run 'kubectl exec <pod> -i bash' (no tty) and type 'exit',
// the call below to command.Run() can unblock because its Stdin is the read half
// of the pipe.
r, w, err := os.Pipe()
if err != nil {
return err
}
go io.Copy(w, stdin)
command.Stdin = r
}
if stdout != nil {
command.Stdout = stdout
}
if stderr != nil {
command.Stderr = stderr
}
return command.Run()
}
}
// NativeExecHandler executes commands in Docker containers using Docker's exec API.
type NativeExecHandler struct{}
func (*NativeExecHandler) ExecInContainer(client DockerInterface, container *docker.Container, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
createOpts := docker.CreateExecOptions{
Container: container.ID,
Cmd: cmd,
AttachStdin: stdin != nil,
AttachStdout: stdout != nil,
AttachStderr: stderr != nil,
Tty: tty,
}
execObj, err := client.CreateExec(createOpts)
if err != nil {
return fmt.Errorf("failed to exec in container - Exec setup failed - %v", err)
}
startOpts := docker.StartExecOptions{
Detach: false,
InputStream: stdin,
OutputStream: stdout,
ErrorStream: stderr,
Tty: tty,
RawTerminal: tty,
}
err = client.StartExec(execObj.ID, startOpts)
if err != nil {
return err
}
tick := time.Tick(2 * time.Second)
for {
inspect, err2 := client.InspectExec(execObj.ID)
if err2 != nil {
return err2
}
if !inspect.Running {
if inspect.ExitCode != 0 {
err = &dockerExitError{inspect}
}
break
}
<-tick
}
return err
}

View File

@ -40,7 +40,7 @@ func NewFakeDockerManager(
runtimeHooks kubecontainer.RuntimeHooks) *DockerManager { runtimeHooks kubecontainer.RuntimeHooks) *DockerManager {
dm := NewDockerManager(client, recorder, readinessManager, containerRefManager, podInfraContainerImage, qps, dm := NewDockerManager(client, recorder, readinessManager, containerRefManager, podInfraContainerImage, qps,
burst, containerLogsDir, osInterface, networkPlugin, generator, httpClient, runtimeHooks) burst, containerLogsDir, osInterface, networkPlugin, generator, httpClient, runtimeHooks, &NativeExecHandler{})
dm.Puller = &FakeDockerPuller{} dm.Puller = &FakeDockerPuller{}
dm.prober = prober.New(nil, readinessManager, containerRefManager, recorder) dm.prober = prober.New(nil, readinessManager, containerRefManager, recorder)
return dm return dm

View File

@ -115,6 +115,9 @@ type DockerManager struct {
// Hooks injected into the container runtime. // Hooks injected into the container runtime.
runtimeHooks kubecontainer.RuntimeHooks runtimeHooks kubecontainer.RuntimeHooks
// Handler used to execute commands in containers.
execHandler ExecHandler
} }
func NewDockerManager( func NewDockerManager(
@ -130,7 +133,8 @@ func NewDockerManager(
networkPlugin network.NetworkPlugin, networkPlugin network.NetworkPlugin,
generator kubecontainer.RunContainerOptionsGenerator, generator kubecontainer.RunContainerOptionsGenerator,
httpClient kubeletTypes.HttpGetter, httpClient kubeletTypes.HttpGetter,
runtimeHooks kubecontainer.RuntimeHooks) *DockerManager { runtimeHooks kubecontainer.RuntimeHooks,
execHandler ExecHandler) *DockerManager {
// Work out the location of the Docker runtime, defaulting to /var/lib/docker // Work out the location of the Docker runtime, defaulting to /var/lib/docker
// if there are any problems. // if there are any problems.
dockerRoot := "/var/lib/docker" dockerRoot := "/var/lib/docker"
@ -162,6 +166,7 @@ func NewDockerManager(
} }
reasonCache := stringCache{cache: lru.New(maxReasonCacheEntries)} reasonCache := stringCache{cache: lru.New(maxReasonCacheEntries)}
dm := &DockerManager{ dm := &DockerManager{
client: client, client: client,
recorder: recorder, recorder: recorder,
@ -177,6 +182,7 @@ func NewDockerManager(
prober: nil, prober: nil,
generator: generator, generator: generator,
runtimeHooks: runtimeHooks, runtimeHooks: runtimeHooks,
execHandler: execHandler,
} }
dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm) dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm)
dm.prober = prober.New(dm, readinessManager, containerRefManager, recorder) dm.prober = prober.New(dm, readinessManager, containerRefManager, recorder)
@ -985,78 +991,24 @@ func (d *dockerExitError) ExitStatus() int {
return d.Inspect.ExitCode return d.Inspect.ExitCode
} }
// ExecInContainer uses nsenter to run the command inside the container identified by containerID. // ExecInContainer runs the command inside the container identified by containerID.
// //
// TODO: // TODO:
// - match cgroups of container
// - should we support `docker exec`?
// - should we support nsenter in a container, running with elevated privs and --pid=host?
// - use strong type for containerId // - use strong type for containerId
func (dm *DockerManager) ExecInContainer(containerId string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { func (dm *DockerManager) ExecInContainer(containerId string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
nsenter, err := exec.LookPath("nsenter") if dm.execHandler == nil {
if err != nil { return errors.New("unable to exec without an exec handler")
return fmt.Errorf("exec unavailable - unable to locate nsenter")
} }
container, err := dm.client.InspectContainer(containerId) container, err := dm.client.InspectContainer(containerId)
if err != nil { if err != nil {
return err return err
} }
if !container.State.Running { if !container.State.Running {
return fmt.Errorf("container not running (%s)", container) return fmt.Errorf("container not running (%s)", container)
} }
containerPid := container.State.Pid return dm.execHandler.ExecInContainer(dm.client, container, cmd, stdin, stdout, stderr, tty)
// TODO what if the container doesn't have `env`???
args := []string{"-t", fmt.Sprintf("%d", containerPid), "-m", "-i", "-u", "-n", "-p", "--", "env", "-i"}
args = append(args, fmt.Sprintf("HOSTNAME=%s", container.Config.Hostname))
args = append(args, container.Config.Env...)
args = append(args, cmd...)
command := exec.Command(nsenter, args...)
if tty {
p, err := kubecontainer.StartPty(command)
if err != nil {
return err
}
defer p.Close()
// make sure to close the stdout stream
defer stdout.Close()
if stdin != nil {
go io.Copy(p, stdin)
}
if stdout != nil {
go io.Copy(stdout, p)
}
return command.Wait()
} else {
if stdin != nil {
// Use an os.Pipe here as it returns true *os.File objects.
// This way, if you run 'kubectl exec <pod> -i bash' (no tty) and type 'exit',
// the call below to command.Run() can unblock because its Stdin is the read half
// of the pipe.
r, w, err := os.Pipe()
if err != nil {
return err
}
go io.Copy(w, stdin)
command.Stdin = r
}
if stdout != nil {
command.Stdout = stdout
}
if stderr != nil {
command.Stderr = stderr
}
return command.Run()
}
} }
// PortForward executes socat in the pod's network namespace and copies // PortForward executes socat in the pod's network namespace and copies

View File

@ -141,7 +141,8 @@ func NewMainKubelet(
dockerDaemonContainer string, dockerDaemonContainer string,
systemContainer string, systemContainer string,
configureCBR0 bool, configureCBR0 bool,
pods int) (*Kubelet, error) { pods int,
dockerExecHandler dockertools.ExecHandler) (*Kubelet, error) {
if rootDirectory == "" { if rootDirectory == "" {
return nil, fmt.Errorf("invalid root directory %q", rootDirectory) return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
} }
@ -278,7 +279,8 @@ func NewMainKubelet(
klet.networkPlugin, klet.networkPlugin,
klet, klet,
klet.httpClient, klet.httpClient,
newKubeletRuntimeHooks(recorder)) newKubeletRuntimeHooks(recorder),
dockerExecHandler)
case "rkt": case "rkt":
conf := &rkt.Config{InsecureSkipVerify: true} conf := &rkt.Config{InsecureSkipVerify: true}
rktRuntime, err := rkt.New( rktRuntime, err := rkt.New(