Merge pull request #8864 from ncdc/pluggable-docker-exec

Add support for pluggable Docker exec handlers
This commit is contained in:
Brian Grant 2015-06-02 14:13:06 -07:00
commit 8a9700b2ba
5 changed files with 176 additions and 63 deletions

View File

@ -111,6 +111,7 @@ type KubeletServer struct {
SystemContainer string
ConfigureCBR0 bool
MaxPods int
DockerExecHandlerName string
// Flags intended for testing
@ -173,6 +174,7 @@ func NewKubeletServer() *KubeletServer {
DockerDaemonContainer: "/docker-daemon",
SystemContainer: "",
ConfigureCBR0: false,
DockerExecHandlerName: "native",
}
}
@ -233,6 +235,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.SystemContainer, "system-container", s.SystemContainer, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
fs.IntVar(&s.MaxPods, "max-pods", 100, "Number of Pods that can run on this Kubelet.")
fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.")
// Flags intended for testing, not recommended used in production environments.
fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.")
@ -306,6 +309,17 @@ func (s *KubeletServer) Run(_ []string) error {
mounter = &mount.NsenterMounter{}
}
var dockerExecHandler dockertools.ExecHandler
switch s.DockerExecHandlerName {
case "native":
dockerExecHandler = &dockertools.NativeExecHandler{}
case "nsenter":
dockerExecHandler = &dockertools.NsenterExecHandler{}
default:
glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName)
dockerExecHandler = &dockertools.NativeExecHandler{}
}
kcfg := KubeletConfig{
Address: s.Address,
AllowPrivileged: s.AllowPrivileged,
@ -352,6 +366,7 @@ func (s *KubeletServer) Run(_ []string) error {
SystemContainer: s.SystemContainer,
ConfigureCBR0: s.ConfigureCBR0,
MaxPods: s.MaxPods,
DockerExecHandler: dockerExecHandler,
}
if err := RunKubelet(&kcfg, nil); err != nil {
@ -518,6 +533,7 @@ func SimpleKubelet(client *client.Client,
DockerDaemonContainer: "/docker-daemon",
SystemContainer: "",
MaxPods: 32,
DockerExecHandler: &dockertools.NativeExecHandler{},
}
return &kcfg
}
@ -655,6 +671,7 @@ type KubeletConfig struct {
SystemContainer string
ConfigureCBR0 bool
MaxPods int
DockerExecHandler dockertools.ExecHandler
}
func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
@ -708,7 +725,8 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
kc.DockerDaemonContainer,
kc.SystemContainer,
kc.ConfigureCBR0,
kc.MaxPods)
kc.MaxPods,
kc.DockerExecHandler)
if err != nil {
return nil, nil, err

View File

@ -0,0 +1,141 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"io"
"os"
"os/exec"
"time"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/fsouza/go-dockerclient"
)
// ExecHandler knows how to execute a command in a running Docker container.
type ExecHandler interface {
ExecInContainer(client DockerInterface, container *docker.Container, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error
}
// NsenterExecHandler executes commands in Docker containers using nsenter.
type NsenterExecHandler struct{}
// TODO should we support nsenter in a container, running with elevated privs and --pid=host?
func (*NsenterExecHandler) ExecInContainer(client DockerInterface, container *docker.Container, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
nsenter, err := exec.LookPath("nsenter")
if err != nil {
return fmt.Errorf("exec unavailable - unable to locate nsenter")
}
containerPid := container.State.Pid
// TODO what if the container doesn't have `env`???
args := []string{"-t", fmt.Sprintf("%d", containerPid), "-m", "-i", "-u", "-n", "-p", "--", "env", "-i"}
args = append(args, fmt.Sprintf("HOSTNAME=%s", container.Config.Hostname))
args = append(args, container.Config.Env...)
args = append(args, cmd...)
command := exec.Command(nsenter, args...)
if tty {
p, err := kubecontainer.StartPty(command)
if err != nil {
return err
}
defer p.Close()
// make sure to close the stdout stream
defer stdout.Close()
if stdin != nil {
go io.Copy(p, stdin)
}
if stdout != nil {
go io.Copy(stdout, p)
}
return command.Wait()
} else {
if stdin != nil {
// Use an os.Pipe here as it returns true *os.File objects.
// This way, if you run 'kubectl exec <pod> -i bash' (no tty) and type 'exit',
// the call below to command.Run() can unblock because its Stdin is the read half
// of the pipe.
r, w, err := os.Pipe()
if err != nil {
return err
}
go io.Copy(w, stdin)
command.Stdin = r
}
if stdout != nil {
command.Stdout = stdout
}
if stderr != nil {
command.Stderr = stderr
}
return command.Run()
}
}
// NativeExecHandler executes commands in Docker containers using Docker's exec API.
type NativeExecHandler struct{}
func (*NativeExecHandler) ExecInContainer(client DockerInterface, container *docker.Container, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
createOpts := docker.CreateExecOptions{
Container: container.ID,
Cmd: cmd,
AttachStdin: stdin != nil,
AttachStdout: stdout != nil,
AttachStderr: stderr != nil,
Tty: tty,
}
execObj, err := client.CreateExec(createOpts)
if err != nil {
return fmt.Errorf("failed to exec in container - Exec setup failed - %v", err)
}
startOpts := docker.StartExecOptions{
Detach: false,
InputStream: stdin,
OutputStream: stdout,
ErrorStream: stderr,
Tty: tty,
RawTerminal: tty,
}
err = client.StartExec(execObj.ID, startOpts)
if err != nil {
return err
}
tick := time.Tick(2 * time.Second)
for {
inspect, err2 := client.InspectExec(execObj.ID)
if err2 != nil {
return err2
}
if !inspect.Running {
if inspect.ExitCode != 0 {
err = &dockerExitError{inspect}
}
break
}
<-tick
}
return err
}

View File

@ -40,7 +40,7 @@ func NewFakeDockerManager(
runtimeHooks kubecontainer.RuntimeHooks) *DockerManager {
dm := NewDockerManager(client, recorder, readinessManager, containerRefManager, podInfraContainerImage, qps,
burst, containerLogsDir, osInterface, networkPlugin, generator, httpClient, runtimeHooks)
burst, containerLogsDir, osInterface, networkPlugin, generator, httpClient, runtimeHooks, &NativeExecHandler{})
dm.Puller = &FakeDockerPuller{}
dm.prober = prober.New(nil, readinessManager, containerRefManager, recorder)
return dm

View File

@ -115,6 +115,9 @@ type DockerManager struct {
// Hooks injected into the container runtime.
runtimeHooks kubecontainer.RuntimeHooks
// Handler used to execute commands in containers.
execHandler ExecHandler
}
func NewDockerManager(
@ -130,7 +133,8 @@ func NewDockerManager(
networkPlugin network.NetworkPlugin,
generator kubecontainer.RunContainerOptionsGenerator,
httpClient kubeletTypes.HttpGetter,
runtimeHooks kubecontainer.RuntimeHooks) *DockerManager {
runtimeHooks kubecontainer.RuntimeHooks,
execHandler ExecHandler) *DockerManager {
// Work out the location of the Docker runtime, defaulting to /var/lib/docker
// if there are any problems.
dockerRoot := "/var/lib/docker"
@ -162,6 +166,7 @@ func NewDockerManager(
}
reasonCache := stringCache{cache: lru.New(maxReasonCacheEntries)}
dm := &DockerManager{
client: client,
recorder: recorder,
@ -177,6 +182,7 @@ func NewDockerManager(
prober: nil,
generator: generator,
runtimeHooks: runtimeHooks,
execHandler: execHandler,
}
dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm)
dm.prober = prober.New(dm, readinessManager, containerRefManager, recorder)
@ -985,78 +991,24 @@ func (d *dockerExitError) ExitStatus() int {
return d.Inspect.ExitCode
}
// ExecInContainer uses nsenter to run the command inside the container identified by containerID.
// ExecInContainer runs the command inside the container identified by containerID.
//
// TODO:
// - match cgroups of container
// - should we support `docker exec`?
// - should we support nsenter in a container, running with elevated privs and --pid=host?
// - use strong type for containerId
func (dm *DockerManager) ExecInContainer(containerId string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
nsenter, err := exec.LookPath("nsenter")
if err != nil {
return fmt.Errorf("exec unavailable - unable to locate nsenter")
if dm.execHandler == nil {
return errors.New("unable to exec without an exec handler")
}
container, err := dm.client.InspectContainer(containerId)
if err != nil {
return err
}
if !container.State.Running {
return fmt.Errorf("container not running (%s)", container)
}
containerPid := container.State.Pid
// TODO what if the container doesn't have `env`???
args := []string{"-t", fmt.Sprintf("%d", containerPid), "-m", "-i", "-u", "-n", "-p", "--", "env", "-i"}
args = append(args, fmt.Sprintf("HOSTNAME=%s", container.Config.Hostname))
args = append(args, container.Config.Env...)
args = append(args, cmd...)
command := exec.Command(nsenter, args...)
if tty {
p, err := kubecontainer.StartPty(command)
if err != nil {
return err
}
defer p.Close()
// make sure to close the stdout stream
defer stdout.Close()
if stdin != nil {
go io.Copy(p, stdin)
}
if stdout != nil {
go io.Copy(stdout, p)
}
return command.Wait()
} else {
if stdin != nil {
// Use an os.Pipe here as it returns true *os.File objects.
// This way, if you run 'kubectl exec <pod> -i bash' (no tty) and type 'exit',
// the call below to command.Run() can unblock because its Stdin is the read half
// of the pipe.
r, w, err := os.Pipe()
if err != nil {
return err
}
go io.Copy(w, stdin)
command.Stdin = r
}
if stdout != nil {
command.Stdout = stdout
}
if stderr != nil {
command.Stderr = stderr
}
return command.Run()
}
return dm.execHandler.ExecInContainer(dm.client, container, cmd, stdin, stdout, stderr, tty)
}
// PortForward executes socat in the pod's network namespace and copies

View File

@ -141,7 +141,8 @@ func NewMainKubelet(
dockerDaemonContainer string,
systemContainer string,
configureCBR0 bool,
pods int) (*Kubelet, error) {
pods int,
dockerExecHandler dockertools.ExecHandler) (*Kubelet, error) {
if rootDirectory == "" {
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
}
@ -278,7 +279,8 @@ func NewMainKubelet(
klet.networkPlugin,
klet,
klet.httpClient,
newKubeletRuntimeHooks(recorder))
newKubeletRuntimeHooks(recorder),
dockerExecHandler)
case "rkt":
conf := &rkt.Config{InsecureSkipVerify: true}
rktRuntime, err := rkt.New(