kubeadm: do not special case Docker as a container runtime

crictl already works with the current state of dockershim.
Using the docker CLI is not required and the DockerRuntime
can be removed from kubeadm. This means that crictl
can connect at the dockershim (or cri-dockerd) socket and
be used to list containers, pull images, remove containers, and
all actions that the kubelet can otherwise perform with the socket.

Ensure that crictl is now required for all supported container runtimes
in checks.go. In the help text in waitcontrolplane.go show only
the crictl example.

Remove the check for the docker service from checks.go.
Remove the DockerValidor check from checks.go.
These two checks were special casing Docker as CR and compensating
for the lack of the same checks in dockershim. With the
extraction of dockershim to cri-dockerd, ideally cri-dockerd
should perform the required checks whether it can support
a given Docker config / version running on a host.
This commit is contained in:
Lubomir I. Ivanov 2022-01-04 22:28:53 +02:00
parent e9fc23e31e
commit 936e12c930
4 changed files with 23 additions and 125 deletions

View File

@ -29,7 +29,6 @@ import (
"k8s.io/klog/v2"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun"
)
@ -49,17 +48,10 @@ var (
Additionally, a control plane component may have crashed or exited when started by the container runtime.
To troubleshoot, list all containers using your preferred container runtimes CLI.
{{ if .IsDocker }}
Here is one example how you may list all Kubernetes containers running in docker:
- 'docker ps -a | grep kube | grep -v pause'
Once you have found the failing container, you can inspect its logs with:
- 'docker logs CONTAINERID'
{{ else }}
Here is one example how you may list all Kubernetes containers running in cri-o/containerd using crictl:
Here is one example how you may list all running Kubernetes containers by using crictl:
- 'crictl --runtime-endpoint {{ .Socket }} ps -a | grep kube | grep -v pause'
Once you have found the failing container, you can inspect its logs with:
- 'crictl --runtime-endpoint {{ .Socket }} logs CONTAINERID'
{{ end }}
`)))
)
@ -105,13 +97,11 @@ func runWaitControlPlanePhase(c workflow.RunData) error {
if err := waiter.WaitForKubeletAndFunc(waiter.WaitForAPI); err != nil {
context := struct {
Error string
Socket string
IsDocker bool
Error string
Socket string
}{
Error: fmt.Sprintf("%v", err),
Socket: data.Cfg().NodeRegistration.CRISocket,
IsDocker: data.Cfg().NodeRegistration.CRISocket == kubeadmconstants.DefaultDockerCRISocket,
Error: fmt.Sprintf("%v", err),
Socket: data.Cfg().NodeRegistration.CRISocket,
}
kubeletFailTempl.Execute(data.OutputWriter(), context)

View File

@ -507,9 +507,7 @@ func (subnet HTTPProxyCIDRCheck) Check() (warnings, errorList []error) {
}
// SystemVerificationCheck defines struct used for running the system verification node check in test/e2e_node/system
type SystemVerificationCheck struct {
IsDocker bool
}
type SystemVerificationCheck struct{}
// Name will return SystemVerification as name for SystemVerificationCheck
func (SystemVerificationCheck) Name() string {
@ -530,11 +528,6 @@ func (sysver SystemVerificationCheck) Check() (warnings, errorList []error) {
var validators = []system.Validator{
&system.KernelValidator{Reporter: reporter}}
// run the docker validator only with docker runtime
if sysver.IsDocker {
validators = append(validators, &system.DockerValidator{Reporter: reporter})
}
if runtime.GOOS == "linux" {
//add linux validators
validators = append(validators,
@ -1025,26 +1018,19 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.JoinConfigura
// kubeadm init and join commands
func addCommonChecks(execer utilsexec.Interface, k8sVersion string, nodeReg *kubeadmapi.NodeRegistrationOptions, checks []Checker) []Checker {
containerRuntime, err := utilruntime.NewContainerRuntime(execer, nodeReg.CRISocket)
isDocker := false
if err != nil {
fmt.Printf("[preflight] WARNING: Couldn't create the interface used for talking to the container runtime: %v\n", err)
} else {
checks = append(checks, ContainerRuntimeCheck{runtime: containerRuntime})
if containerRuntime.IsDocker() {
isDocker = true
checks = append(checks, ServiceCheck{Service: "docker", CheckIfActive: true})
}
}
// non-windows checks
if runtime.GOOS == "linux" {
if !isDocker {
checks = append(checks, InPathCheck{executable: "crictl", mandatory: true, exec: execer})
}
checks = append(checks,
FileContentCheck{Path: bridgenf, Content: []byte{'1'}},
FileContentCheck{Path: ipv4Forward, Content: []byte{'1'}},
SwapCheck{},
InPathCheck{executable: "crictl", mandatory: true, exec: execer},
InPathCheck{executable: "conntrack", mandatory: true, exec: execer},
InPathCheck{executable: "ip", mandatory: true, exec: execer},
InPathCheck{executable: "iptables", mandatory: true, exec: execer},
@ -1057,7 +1043,7 @@ func addCommonChecks(execer utilsexec.Interface, k8sVersion string, nodeReg *kub
InPathCheck{executable: "touch", mandatory: false, exec: execer})
}
checks = append(checks,
SystemVerificationCheck{IsDocker: isDocker},
SystemVerificationCheck{},
HostnameCheck{nodeName: nodeReg.Name},
KubeletVersionCheck{KubernetesVersion: k8sVersion, exec: execer},
ServiceCheck{Service: "kubelet", CheckIfActive: false},

View File

@ -29,7 +29,7 @@ import (
// ContainerRuntime is an interface for working with container runtimes
type ContainerRuntime interface {
IsDocker() bool
Socket() string
IsRunning() error
ListKubeContainers() ([]string, error)
RemoveContainers(containers []string) error
@ -43,39 +43,19 @@ type CRIRuntime struct {
criSocket string
}
// DockerRuntime is a struct that interfaces with the Docker daemon
type DockerRuntime struct {
exec utilsexec.Interface
}
// NewContainerRuntime sets up and returns a ContainerRuntime struct
func NewContainerRuntime(execer utilsexec.Interface, criSocket string) (ContainerRuntime, error) {
var toolName string
var runtime ContainerRuntime
if criSocket != constants.DefaultDockerCRISocket {
toolName = "crictl"
runtime = &CRIRuntime{execer, criSocket}
} else {
toolName = "docker"
runtime = &DockerRuntime{execer}
}
toolName := "crictl"
runtime := &CRIRuntime{execer, criSocket}
if _, err := execer.LookPath(toolName); err != nil {
return nil, errors.Wrapf(err, "%s is required for container runtime", toolName)
return nil, errors.Wrapf(err, "%s is required by the container runtime", toolName)
}
return runtime, nil
}
// IsDocker returns true if the runtime is docker
func (runtime *CRIRuntime) IsDocker() bool {
return false
}
// IsDocker returns true if the runtime is docker
func (runtime *DockerRuntime) IsDocker() bool {
return true
// Socket returns the CRI socket endpoint
func (runtime *CRIRuntime) Socket() string {
return runtime.criSocket
}
// IsRunning checks if runtime is running
@ -86,14 +66,6 @@ func (runtime *CRIRuntime) IsRunning() error {
return nil
}
// IsRunning checks if runtime is running
func (runtime *DockerRuntime) IsRunning() error {
if out, err := runtime.exec.Command("docker", "info").CombinedOutput(); err != nil {
return errors.Wrapf(err, "container runtime is not running: output: %s, error", string(out))
}
return nil
}
// ListKubeContainers lists running k8s CRI pods
func (runtime *CRIRuntime) ListKubeContainers() ([]string, error) {
out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "pods", "-q").CombinedOutput()
@ -105,12 +77,6 @@ func (runtime *CRIRuntime) ListKubeContainers() ([]string, error) {
return pods, nil
}
// ListKubeContainers lists running k8s containers
func (runtime *DockerRuntime) ListKubeContainers() ([]string, error) {
output, err := runtime.exec.Command("docker", "ps", "-a", "--filter", "name=k8s_", "-q").CombinedOutput()
return strings.Fields(string(output)), err
}
// RemoveContainers removes running k8s pods
func (runtime *CRIRuntime) RemoveContainers(containers []string) error {
errs := []error{}
@ -129,24 +95,6 @@ func (runtime *CRIRuntime) RemoveContainers(containers []string) error {
return errorsutil.NewAggregate(errs)
}
// RemoveContainers removes running containers
func (runtime *DockerRuntime) RemoveContainers(containers []string) error {
errs := []error{}
for _, container := range containers {
out, err := runtime.exec.Command("docker", "stop", container).CombinedOutput()
if err != nil {
// don't stop on errors, try to remove as many containers as possible
errs = append(errs, errors.Wrapf(err, "failed to stop running container %s: output: %s, error", container, string(out)))
} else {
out, err = runtime.exec.Command("docker", "rm", "--volumes", container).CombinedOutput()
if err != nil {
errs = append(errs, errors.Wrapf(err, "failed to remove running container %s: output: %s, error", container, string(out)))
}
}
}
return errorsutil.NewAggregate(errs)
}
// PullImage pulls the image
func (runtime *CRIRuntime) PullImage(image string) error {
var err error
@ -160,31 +108,12 @@ func (runtime *CRIRuntime) PullImage(image string) error {
return errors.Wrapf(err, "output: %s, error", out)
}
// PullImage pulls the image
func (runtime *DockerRuntime) PullImage(image string) error {
var err error
var out []byte
for i := 0; i < constants.PullImageRetry; i++ {
out, err = runtime.exec.Command("docker", "pull", image).CombinedOutput()
if err == nil {
return nil
}
}
return errors.Wrapf(err, "output: %s, error", out)
}
// ImageExists checks to see if the image exists on the system
func (runtime *CRIRuntime) ImageExists(image string) (bool, error) {
err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "inspecti", image).Run()
return err == nil, nil
}
// ImageExists checks to see if the image exists on the system
func (runtime *DockerRuntime) ImageExists(image string) (bool, error) {
err := runtime.exec.Command("docker", "inspect", image).Run()
return err == nil, nil
}
// detectCRISocketImpl is separated out only for test purposes, DON'T call it directly, use DetectCRISocket instead
func detectCRISocketImpl(isSocket func(string) bool) (string, error) {
foundCRISockets := []string{}

View File

@ -40,32 +40,25 @@ func TestNewContainerRuntime(t *testing.T) {
LookPathFunc: func(cmd string) (string, error) { return "", errors.Errorf("%s not found", cmd) },
}
cases := []struct {
name string
execer fakeexec.FakeExec
criSocket string
isDocker bool
isError bool
name string
execer fakeexec.FakeExec
isError bool
}{
{"valid: default cri socket", execLookPathOK, constants.DefaultDockerCRISocket, true, false},
{"valid: cri-o socket url", execLookPathOK, "unix:///var/run/crio/crio.sock", false, false},
{"invalid: no crictl", execLookPathErr, "unix:///var/run/crio/crio.sock", false, true},
{"valid: crictl present", execLookPathOK, false},
{"invalid: no crictl", execLookPathErr, true},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
runtime, err := NewContainerRuntime(&tc.execer, tc.criSocket)
_, err := NewContainerRuntime(&tc.execer, "unix:///some/socket.sock")
if err != nil {
if !tc.isError {
t.Fatalf("unexpected NewContainerRuntime error. criSocket: %s, error: %v", tc.criSocket, err)
t.Fatalf("unexpected NewContainerRuntime error. error: %v", err)
}
return // expected error occurs, impossible to test runtime further
}
if tc.isError && err == nil {
t.Fatalf("unexpected NewContainerRuntime success. criSocket: %s", tc.criSocket)
}
isDocker := runtime.IsDocker()
if tc.isDocker != isDocker {
t.Fatalf("unexpected isDocker() result %v for the criSocket %s", isDocker, tc.criSocket)
t.Fatal("unexpected NewContainerRuntime success")
}
})
}