Integrating ContainerManager into Kubelet

This commit is contained in:
Victor Marmol 2015-05-12 09:59:02 -07:00
parent a9db69ecfa
commit e1447618f4
2 changed files with 29 additions and 3 deletions

View File

@ -102,6 +102,7 @@ type KubeletServer struct {
ResourceContainer string ResourceContainer string
CgroupRoot string CgroupRoot string
ContainerRuntime string ContainerRuntime string
DockerDaemonContainer string
// Flags intended for testing // Flags intended for testing
@ -158,6 +159,7 @@ func NewKubeletServer() *KubeletServer {
ResourceContainer: "/kubelet", ResourceContainer: "/kubelet",
CgroupRoot: "", CgroupRoot: "",
ContainerRuntime: "docker", ContainerRuntime: "docker",
DockerDaemonContainer: "/docker-daemon",
} }
} }
@ -212,6 +214,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kubelet in (Default: /kubelet).") fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kubelet in (Default: /kubelet).")
fs.StringVar(&s.CgroupRoot, "cgroup_root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.") fs.StringVar(&s.CgroupRoot, "cgroup_root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
fs.StringVar(&s.ContainerRuntime, "container_runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.") fs.StringVar(&s.ContainerRuntime, "container_runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
fs.StringVar(&s.DockerDaemonContainer, "docker-daemon-container", s.DockerDaemonContainer, "Optional resource-only container in which to place the Docker Daemon. Empty for no container (Default: /docker-daemon).")
// Flags intended for testing, not recommended used in production environments. // Flags intended for testing, not recommended used in production environments.
fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.") fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.")
@ -321,6 +324,7 @@ func (s *KubeletServer) Run(_ []string) error {
CgroupRoot: s.CgroupRoot, CgroupRoot: s.CgroupRoot,
ContainerRuntime: s.ContainerRuntime, ContainerRuntime: s.ContainerRuntime,
Mounter: mounter, Mounter: mounter,
DockerDaemonContainer: s.DockerDaemonContainer,
} }
RunKubelet(&kcfg, nil) RunKubelet(&kcfg, nil)
@ -432,6 +436,7 @@ func SimpleKubelet(client *client.Client,
CgroupRoot: "", CgroupRoot: "",
ContainerRuntime: "docker", ContainerRuntime: "docker",
Mounter: mount.New(), Mounter: mount.New(),
DockerDaemonContainer: "/docker-daemon",
} }
return &kcfg return &kcfg
} }
@ -562,6 +567,7 @@ type KubeletConfig struct {
CgroupRoot string CgroupRoot string
ContainerRuntime string ContainerRuntime string
Mounter mount.Interface Mounter mount.Interface
DockerDaemonContainer string
} }
func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) { func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
@ -609,7 +615,8 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
kc.OSInterface, kc.OSInterface,
kc.CgroupRoot, kc.CgroupRoot,
kc.ContainerRuntime, kc.ContainerRuntime,
kc.Mounter) kc.Mounter,
kc.DockerDaemonContainer)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err

View File

@ -136,7 +136,8 @@ func NewMainKubelet(
osInterface kubecontainer.OSInterface, osInterface kubecontainer.OSInterface,
cgroupRoot string, cgroupRoot string,
containerRuntime string, containerRuntime string,
mounter mount.Interface) (*Kubelet, error) { mounter mount.Interface,
dockerDaemonContainer string) (*Kubelet, error) {
if rootDirectory == "" { if rootDirectory == "" {
return nil, fmt.Errorf("invalid root directory %q", rootDirectory) return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
} }
@ -276,10 +277,19 @@ func NewMainKubelet(
return nil, err return nil, err
} }
klet.containerRuntime = rktRuntime klet.containerRuntime = rktRuntime
// No Docker daemon to put in a container.
dockerDaemonContainer = ""
default: default:
return nil, fmt.Errorf("unsupported container runtime %q specified", containerRuntime) return nil, fmt.Errorf("unsupported container runtime %q specified", containerRuntime)
} }
containerManager, err := newContainerManager(dockerDaemonContainer)
if err != nil {
return nil, fmt.Errorf("failed to create the Container Manager: %v", err)
}
klet.containerManager = containerManager
// Wait for the runtime to be up with a timeout. // Wait for the runtime to be up with a timeout.
if err := waitUntilRuntimeIsUp(klet.containerRuntime, maxWaitForContainerRuntime); err != nil { if err := waitUntilRuntimeIsUp(klet.containerRuntime, maxWaitForContainerRuntime); err != nil {
return nil, fmt.Errorf("timed out waiting for %q to come up: %v", containerRuntime, err) return nil, fmt.Errorf("timed out waiting for %q to come up: %v", containerRuntime, err)
@ -434,6 +444,9 @@ type Kubelet struct {
// Mounter to use for volumes. // Mounter to use for volumes.
mounter mount.Interface mounter mount.Interface
// Manager of non-Runtime containers.
containerManager containerManager
} }
// getRootDir returns the full path to the directory under which kubelet can // getRootDir returns the full path to the directory under which kubelet can
@ -624,10 +637,16 @@ func (kl *Kubelet) Run(updates <-chan PodUpdate) {
err := kl.imageManager.Start() err := kl.imageManager.Start()
if err != nil { if err != nil {
kl.recorder.Eventf(kl.nodeRef, "imageManagerFailed", "Failed to start ImageManager %v", err) kl.recorder.Eventf(kl.nodeRef, "kubeletSetupFailed", "Failed to start ImageManager %v", err)
glog.Errorf("Failed to start ImageManager, images may not be garbage collected: %v", err) glog.Errorf("Failed to start ImageManager, images may not be garbage collected: %v", err)
} }
err = kl.containerManager.Start()
if err != nil {
kl.recorder.Eventf(kl.nodeRef, "kubeletSetupFailed", "Failed to start ContainerManager %v", err)
glog.Errorf("Failed to start ContainerManager, system may not be properly isolated: %v", err)
}
go util.Until(kl.updateRuntimeUp, 5*time.Second, util.NeverStop) go util.Until(kl.updateRuntimeUp, 5*time.Second, util.NeverStop)
go kl.syncNodeStatus() go kl.syncNodeStatus()
// Run the system oom watcher forever. // Run the system oom watcher forever.