mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #13468 from yifan-gu/rkt_doc
Auto commit by PR queue bot
This commit is contained in:
commit
bfc60709b1
@ -119,6 +119,7 @@ type KubeletServer struct {
|
||||
ResolverConfig string
|
||||
ResourceContainer string
|
||||
RktPath string
|
||||
RktStage1Image string
|
||||
RootDirectory string
|
||||
RunOnce bool
|
||||
StandaloneMode bool
|
||||
@ -189,6 +190,7 @@ func NewKubeletServer() *KubeletServer {
|
||||
RegistryBurst: 10,
|
||||
ResourceContainer: "/kubelet",
|
||||
RktPath: "",
|
||||
RktStage1Image: "",
|
||||
RootDirectory: defaultRootDir,
|
||||
SyncFrequency: 10 * time.Second,
|
||||
SystemContainer: "",
|
||||
@ -254,6 +256,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
|
||||
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
|
||||
fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'")
|
||||
fs.StringVar(&s.RktStage1Image, "rkt-stage1-image", s.RktStage1Image, "image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used")
|
||||
fs.StringVar(&s.SystemContainer, "system-container", s.SystemContainer, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
|
||||
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
|
||||
fs.IntVar(&s.MaxPods, "max-pods", 40, "Number of Pods that can run on this Kubelet.")
|
||||
@ -364,6 +367,7 @@ func (s *KubeletServer) KubeletConfig() (*KubeletConfig, error) {
|
||||
ResolverConfig: s.ResolverConfig,
|
||||
ResourceContainer: s.ResourceContainer,
|
||||
RktPath: s.RktPath,
|
||||
RktStage1Image: s.RktStage1Image,
|
||||
RootDirectory: s.RootDirectory,
|
||||
Runonce: s.RunOnce,
|
||||
StandaloneMode: (len(s.APIServerList) == 0),
|
||||
@ -789,6 +793,7 @@ type KubeletConfig struct {
|
||||
ResolverConfig string
|
||||
ResourceContainer string
|
||||
RktPath string
|
||||
RktStage1Image string
|
||||
RootDirectory string
|
||||
Runonce bool
|
||||
StandaloneMode bool
|
||||
@ -851,6 +856,7 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
|
||||
kc.CgroupRoot,
|
||||
kc.ContainerRuntime,
|
||||
kc.RktPath,
|
||||
kc.RktStage1Image,
|
||||
kc.Mounter,
|
||||
kc.DockerDaemonContainer,
|
||||
kc.SystemContainer,
|
||||
|
@ -325,6 +325,7 @@ func (ks *KubeletExecutorServer) createAndInitKubelet(
|
||||
kc.CgroupRoot,
|
||||
kc.ContainerRuntime,
|
||||
kc.RktPath,
|
||||
kc.RktStage1Image,
|
||||
kc.Mounter,
|
||||
kc.DockerDaemonContainer,
|
||||
kc.SystemContainer,
|
||||
|
@ -38,36 +38,31 @@ We still have [a bunch of work](http://issue.k8s.io/8262) to do to make the expe
|
||||
|
||||
### **Prerequisite**
|
||||
|
||||
- [systemd](http://www.freedesktop.org/wiki/Software/systemd/) should be installed on your machine and should be enabled. The minimum version required at this moment (2015/05/28) is [215](http://lists.freedesktop.org/archives/systemd-devel/2014-July/020903.html).
|
||||
- [systemd](http://www.freedesktop.org/wiki/Software/systemd/) should be installed on the machine and should be enabled. The minimum version required at this moment (2015/09/01) is 219
|
||||
*(Note that systemd is not required by rkt itself, we are using it here to monitor and manage the pods launched by kubelet.)*
|
||||
|
||||
- Install the latest rkt release according to the instructions [here](https://github.com/coreos/rkt).
|
||||
The minimum version required for now is [v0.5.6](https://github.com/coreos/rkt/releases/tag/v0.5.6).
|
||||
|
||||
- Make sure the `rkt metadata service` is running because it is necessary for running pod in private network mode.
|
||||
More details about the networking of rkt can be found in the [documentation](https://github.com/coreos/rkt/blob/master/Documentation/networking.md).
|
||||
|
||||
To start the `rkt metadata service`, you can simply run:
|
||||
|
||||
```console
|
||||
$ sudo rkt metadata-service
|
||||
```
|
||||
|
||||
If you want the service to be running as a systemd service, then:
|
||||
|
||||
```console
|
||||
$ sudo systemd-run rkt metadata-service
|
||||
```
|
||||
|
||||
Alternatively, you can use the [rkt-metadata.service](https://github.com/coreos/rkt/blob/master/dist/init/systemd/rkt-metadata.service) and [rkt-metadata.socket](https://github.com/coreos/rkt/blob/master/dist/init/systemd/rkt-metadata.socket) to start the service.
|
||||
The minimum version required for now is [v0.8.0](https://github.com/coreos/rkt/releases/tag/v0.8.0).
|
||||
|
||||
- Note that for rkt version later than v0.7.0, `metadata service` is not required for running pods in private networks. So now rkt pods will not register the metadata service be default.
|
||||
|
||||
### Local cluster
|
||||
|
||||
To use rkt as the container runtime, you just need to set the environment variable `CONTAINER_RUNTIME`:
|
||||
To use rkt as the container runtime, we need to supply `--container-runtime=rkt` and `--rkt-path=$PATH_TO_RKT_BINARY` to kubelet. Additionally we can provide `--rkt-stage1-image` flag
|
||||
as well to select which [stage1 image](https://github.com/coreos/rkt/blob/master/Documentation/running-lkvm-stage1.md) we want to use.
|
||||
|
||||
If you are using the [hack/local-up-cluster.sh](../../../hack/local-up-cluster.sh) script to launch the local cluster, then you can edit the environment variable `CONTAINER_RUNTIME`, `RKT_PATH` and `RKT_STAGE1_IMAGE` to
|
||||
set these flags:
|
||||
|
||||
```console
|
||||
$ export CONTAINER_RUNTIME=rkt
|
||||
$ export RKT_PATH=$PATH_TO_RKT_BINARY
|
||||
$ export RKT_STAGE1_IMAGE=PATH=$PATH_TO_STAGE1_IMAGE
|
||||
```
|
||||
|
||||
Then we can launch the local cluster using the script:
|
||||
|
||||
```console
|
||||
$ hack/local-up-cluster.sh
|
||||
```
|
||||
|
||||
@ -85,7 +80,7 @@ $ export KUBE_CONTAINER_RUNTIME=rkt
|
||||
You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`:
|
||||
|
||||
```console
|
||||
$ export KUBE_RKT_VERSION=0.5.6
|
||||
$ export KUBE_RKT_VERSION=0.8.0
|
||||
```
|
||||
|
||||
Then you can launch the cluster by:
|
||||
@ -109,7 +104,7 @@ $ export KUBE_CONTAINER_RUNTIME=rkt
|
||||
You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`:
|
||||
|
||||
```console
|
||||
$ export KUBE_RKT_VERSION=0.5.6
|
||||
$ export KUBE_RKT_VERSION=0.8.0
|
||||
```
|
||||
|
||||
You can optionally choose the CoreOS channel by setting `COREOS_CHANNEL`:
|
||||
@ -134,6 +129,46 @@ See [a simple nginx example](../../../docs/user-guide/simple-nginx.md) to try ou
|
||||
For more complete applications, please look in the [examples directory](../../../examples/).
|
||||
|
||||
|
||||
### Debugging
|
||||
|
||||
Here are severals tips for you when you run into any issues.
|
||||
|
||||
##### Check logs
|
||||
|
||||
By default, the log verbose level is 2. In order to see more logs related to rkt, we can set the verbose level to 4.
|
||||
For local cluster, we can set the environment variable: `LOG_LEVEL=4`.
|
||||
If the cluster is using salt, we can edit the [logging.sls](../../../cluster/saltbase/pillar/logging.sls) in the saltbase.
|
||||
|
||||
##### Check rkt pod status
|
||||
|
||||
To check the pods' status, we can use rkt command, such as `rkt list`, `rkt status`, `rkt image list`, etc.
|
||||
More information about rkt command line can be found [here](https://github.com/coreos/rkt/blob/master/Documentation/commands.md)
|
||||
|
||||
##### Check journal logs
|
||||
|
||||
As we use systemd to launch rkt pods(by creating service files which will run `rkt run-prepared`, we can check the pods' log
|
||||
using `journalctl`:
|
||||
|
||||
- Check the running state of the systemd service:
|
||||
|
||||
```console
|
||||
$ sudo journalctl -u $SERVICE_FILE
|
||||
```
|
||||
|
||||
where `$SERVICE_FILE` is the name of the service file created for the pod, you can find it in the kubelet logs.
|
||||
|
||||
##### Check the log of the container in the pod:
|
||||
|
||||
```console
|
||||
$ sudo journalctl -M rkt-$UUID -u $CONTAINER_NAME
|
||||
```
|
||||
|
||||
where `$UUID` is the rkt pod's UUID, which you can find via `rkt list --full`, and `$CONTAINER_NAME` is the container's name.
|
||||
|
||||
##### Check Kubernetes events, logs.
|
||||
|
||||
Besides above tricks, Kubernetes also provides us handy tools for debugging the pods. More information can be found [here](../../../docs/user-guide/application-troubleshooting.md)
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@ -86,6 +86,8 @@ API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-"/127.0.0.1(:[0-9]+)?$,/loc
|
||||
KUBELET_PORT=${KUBELET_PORT:-10250}
|
||||
LOG_LEVEL=${LOG_LEVEL:-3}
|
||||
CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
|
||||
RKT_PATH=${RKT_PATH:-""}
|
||||
RKT_STAGE1_IMAGE=${RKT_STAGE1_IMAGE:-""}
|
||||
CHAOS_CHANCE=${CHAOS_CHANCE:-0.0}
|
||||
|
||||
function test_apiserver_off {
|
||||
@ -251,6 +253,8 @@ function start_kubelet {
|
||||
--v=${LOG_LEVEL} \
|
||||
--chaos-chance="${CHAOS_CHANCE}" \
|
||||
--container-runtime="${CONTAINER_RUNTIME}" \
|
||||
--rkt-path="${RKT_PATH}" \
|
||||
--rkt-stage1-image="${RKT_STAGE1_IMAGE}" \
|
||||
--hostname-override="127.0.0.1" \
|
||||
--address="127.0.0.1" \
|
||||
--api-servers="${API_HOST}:${API_PORT}" \
|
||||
|
@ -223,6 +223,7 @@ resource-container
|
||||
resource-quota-sync-period
|
||||
resource-version
|
||||
rkt-path
|
||||
rkt-stage1-image
|
||||
root-ca-file
|
||||
root-dir
|
||||
run-proxy
|
||||
|
@ -164,6 +164,7 @@ func NewMainKubelet(
|
||||
cgroupRoot string,
|
||||
containerRuntime string,
|
||||
rktPath string,
|
||||
rktStage1Image string,
|
||||
mounter mount.Interface,
|
||||
dockerDaemonContainer string,
|
||||
systemContainer string,
|
||||
@ -335,6 +336,7 @@ func NewMainKubelet(
|
||||
case "rkt":
|
||||
conf := &rkt.Config{
|
||||
Path: rktPath,
|
||||
Stage1Image: rktStage1Image,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
rktRuntime, err := rkt.New(
|
||||
|
@ -23,6 +23,8 @@ import "fmt"
|
||||
type Config struct {
|
||||
// The absolute path to the binary, or leave empty to find it in $PATH.
|
||||
Path string
|
||||
// The image to use as stage1.
|
||||
Stage1Image string
|
||||
// The debug flag for rkt.
|
||||
Debug bool
|
||||
// The rkt data directory.
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
appcschema "github.com/appc/spec/schema"
|
||||
@ -42,6 +41,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober"
|
||||
kubeletUtil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/probe"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
@ -467,7 +467,7 @@ func (r *runtime) makePodManifest(pod *api.Pod, pullSecrets []api.Secret) (*appc
|
||||
|
||||
volumeMap, ok := r.volumeGetter.GetVolumes(pod.UID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot get the volumes for pod %q", kubecontainer.GetPodFullName(pod))
|
||||
return nil, fmt.Errorf("cannot get the volumes for pod %q", kubeletUtil.FormatPodName(pod))
|
||||
}
|
||||
|
||||
// Set global volumes.
|
||||
@ -533,7 +533,7 @@ func serviceFilePath(serviceName string) string {
|
||||
// preparePod will:
|
||||
//
|
||||
// 1. Invoke 'rkt prepare' to prepare the pod, and get the rkt pod uuid.
|
||||
// 2. Creates the unit file and save it under systemdUnitDir.
|
||||
// 2. Create the unit file and save it under systemdUnitDir.
|
||||
//
|
||||
// On success, it will return a string that represents name of the unit file
|
||||
// and the runtime pod.
|
||||
@ -566,6 +566,9 @@ func (r *runtime) preparePod(pod *api.Pod, pullSecrets []api.Secret) (string, *k
|
||||
|
||||
// Run 'rkt prepare' to get the rkt UUID.
|
||||
cmds := []string{"prepare", "--quiet", "--pod-manifest", manifestFile.Name()}
|
||||
if r.config.Stage1Image != "" {
|
||||
cmds = append(cmds, "--stage1-image", r.config.Stage1Image)
|
||||
}
|
||||
output, err := r.runCommand(cmds...)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
@ -596,6 +599,8 @@ func (r *runtime) preparePod(pod *api.Pod, pullSecrets []api.Secret) (string, *k
|
||||
// This makes the service show up for 'systemctl list-units' even if it exits successfully.
|
||||
newUnitOption("Service", "RemainAfterExit", "true"),
|
||||
newUnitOption("Service", "ExecStart", runPrepared),
|
||||
// This enables graceful stop.
|
||||
newUnitOption("Service", "KillMode", "mixed"),
|
||||
}
|
||||
|
||||
// Check if there's old rkt pod corresponding to the same pod, if so, update the restart count.
|
||||
@ -615,7 +620,7 @@ func (r *runtime) preparePod(pod *api.Pod, pullSecrets []api.Secret) (string, *k
|
||||
}
|
||||
units = append(units, newUnitOption(unitKubernetesSection, unitRestartCount, strconv.Itoa(restartCount)))
|
||||
|
||||
glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, pod.Name)
|
||||
glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, kubeletUtil.FormatPodName(pod))
|
||||
serviceFile, err := os.Create(serviceFilePath(serviceName))
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
@ -674,7 +679,7 @@ func (r *runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f
|
||||
// RunPod first creates the unit file for a pod, and then
|
||||
// starts the unit over d-bus.
|
||||
func (r *runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
|
||||
glog.V(4).Infof("Rkt starts to run pod: name %q.", pod.Name)
|
||||
glog.V(4).Infof("Rkt starts to run pod: name %q.", kubeletUtil.FormatPodName(pod))
|
||||
|
||||
name, runtimePod, prepareErr := r.preparePod(pod, pullSecrets)
|
||||
|
||||
@ -684,7 +689,7 @@ func (r *runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
|
||||
for i, c := range pod.Spec.Containers {
|
||||
ref, err := kubecontainer.GenerateContainerRef(pod, &c)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, c.Name, err)
|
||||
glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", kubeletUtil.FormatPodName(pod), c.Name, err)
|
||||
continue
|
||||
}
|
||||
if prepareErr != nil {
|
||||
@ -800,8 +805,11 @@ func (r *runtime) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) error {
|
||||
r.containerRefManager.ClearRef(id)
|
||||
}
|
||||
|
||||
// TODO(yifan): More graceful stop. Replace with StopUnit and wait for a timeout.
|
||||
r.systemd.KillUnit(serviceName, int32(syscall.SIGKILL))
|
||||
// Since all service file have 'KillMode=mixed', the processes in
|
||||
// the unit's cgroup will receive a SIGKILL if the normal stop timeouts.
|
||||
if _, err := r.systemd.StopUnit(serviceName, "replace"); err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove the systemd service file as well.
|
||||
return os.Remove(serviceFilePath(serviceName))
|
||||
}
|
||||
@ -961,7 +969,7 @@ func (r *runtime) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) {
|
||||
|
||||
// SyncPod syncs the running pod to match the specified desired pod.
|
||||
func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
|
||||
podFullName := kubecontainer.GetPodFullName(pod)
|
||||
podFullName := kubeletUtil.FormatPodName(pod)
|
||||
if len(runningPod.Containers) == 0 {
|
||||
glog.V(4).Infof("Pod %q is not running, will start it", podFullName)
|
||||
return r.RunPod(pod, pullSecrets)
|
||||
@ -1036,6 +1044,8 @@ func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus
|
||||
//
|
||||
// In rkt runtime's implementation, per container log is get via 'journalctl -M [rkt-$UUID] -u [APP_NAME]'.
|
||||
// See https://github.com/coreos/rkt/blob/master/Documentation/commands.md#logging for more details.
|
||||
//
|
||||
// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail.
|
||||
func (r *runtime) GetContainerLogs(pod *api.Pod, containerID string, tail string, follow bool, stdout, stderr io.Writer) error {
|
||||
id, err := parseContainerID(containerID)
|
||||
if err != nil {
|
||||
@ -1072,6 +1082,7 @@ func (r *runtime) GarbageCollect() error {
|
||||
|
||||
// Note: In rkt, the container ID is in the form of "UUID:appName", where
|
||||
// appName is the container name.
|
||||
// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail.
|
||||
func (r *runtime) RunInContainer(containerID string, cmd []string) ([]byte, error) {
|
||||
glog.V(4).Infof("Rkt running in container.")
|
||||
|
||||
@ -1092,6 +1103,7 @@ func (r *runtime) AttachContainer(containerID string, stdin io.Reader, stdout, s
|
||||
|
||||
// Note: In rkt, the container ID is in the form of "UUID:appName", where UUID is
|
||||
// the rkt UUID, and appName is the container name.
|
||||
// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail.
|
||||
func (r *runtime) ExecInContainer(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
|
||||
glog.V(4).Infof("Rkt execing in container.")
|
||||
|
||||
@ -1150,7 +1162,7 @@ func (r *runtime) findRktID(pod *kubecontainer.Pod) (string, error) {
|
||||
f, err := os.Open(serviceFilePath(serviceName))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", fmt.Errorf("no service file %v for pod %q, UID %q", serviceName, pod.Name, pod.ID)
|
||||
return "", fmt.Errorf("no service file %v for runtime pod %q, ID %q", serviceName, pod.Name, pod.ID)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
@ -1179,6 +1191,7 @@ func (r *runtime) findRktID(pod *kubecontainer.Pod) (string, error) {
|
||||
// - should we support nsenter + socat in a container, running with elevated privs and --pid=host?
|
||||
//
|
||||
// TODO(yifan): Merge with the same function in dockertools.
|
||||
// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail.
|
||||
func (r *runtime) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error {
|
||||
glog.V(4).Infof("Rkt port forwarding in container.")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user