Clean up dockershim flags in the kubelet

Signed-off-by: cyclinder <qifeng.guo@daocloud.io>
Co-authored-by: Ciprian Hacman <ciprian@hakman.dev>
Signed-off-by: Ciprian Hacman <ciprian@hakman.dev>
This commit is contained in:
cyclinder 2022-01-14 11:26:21 +08:00 committed by Ciprian Hacman
parent 03bcfab1a6
commit 07999dac70
17 changed files with 26 additions and 371 deletions

View File

@ -484,7 +484,7 @@ function ensure-local-ssds-ephemeral-storage() {
safe-format-and-mount "${device}" "${ephemeral_mountpoint}"
# mount container runtime root dir on SSD
local container_runtime="${CONTAINER_RUNTIME:-docker}"
local container_runtime="${CONTAINER_RUNTIME:-containerd}"
systemctl stop "$container_runtime"
# Some images remount the container runtime root dir.
umount "/var/lib/${container_runtime}" || true
@ -2550,9 +2550,9 @@ function start-volumesnapshot-crd-and-controller {
# endpoint.
function update-container-runtime {
local -r file="$1"
local -r container_runtime_endpoint="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}"
local -r container_runtime_endpoint="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/containerd/containerd.sock}"
sed -i \
-e "s@{{ *fluentd_container_runtime_service *}}@${FLUENTD_CONTAINER_RUNTIME_SERVICE:-${CONTAINER_RUNTIME_NAME:-docker}}@g" \
-e "s@{{ *fluentd_container_runtime_service *}}@${FLUENTD_CONTAINER_RUNTIME_SERVICE:-${CONTAINER_RUNTIME_NAME:-containerd}}@g" \
-e "s@{{ *container_runtime_endpoint *}}@${container_runtime_endpoint#unix://}@g" \
"${file}"
}
@ -3446,7 +3446,7 @@ function main() {
fi
log-wrap 'OverrideKubectl' override-kubectl
container_runtime="${CONTAINER_RUNTIME:-docker}"
container_runtime="${CONTAINER_RUNTIME:-containerd}"
# Run the containerized mounter once to pre-cache the container image.
if [[ "${container_runtime}" == "docker" ]]; then
log-wrap 'AssembleDockerFlags' assemble-docker-flags

View File

@ -340,7 +340,7 @@ function install-crictl {
# Create crictl config file.
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/containerd/containerd.sock}
EOF
if is-preloaded "${crictl}" "${crictl_hash}"; then
@ -584,7 +584,7 @@ function install-containerd-ubuntu {
}
function ensure-container-runtime {
container_runtime="${CONTAINER_RUNTIME:-docker}"
container_runtime="${CONTAINER_RUNTIME:-containerd}"
if [[ "${container_runtime}" == "docker" ]]; then
if ! command -v docker >/dev/null 2>&1; then
log-wrap "InstallDocker" install-docker

View File

@ -34,7 +34,7 @@ function container_runtime_monitoring {
# will also fail, and docker will be killed. This is undesirable especially when
# docker live restore is disabled.
local healthcheck_command=(docker ps)
if [[ "${CONTAINER_RUNTIME:-docker}" != "docker" ]]; then
if [[ "${CONTAINER_RUNTIME:-containerd}" != "docker" ]]; then
healthcheck_command=("${crictl}" pods)
fi
# Container runtime startup takes time. Make initial attempts before starting

View File

@ -781,23 +781,6 @@ function construct-linux-kubelet-flags {
flags+=" --resolv-conf=/run/systemd/resolve/resolv.conf"
fi
fi
# Network plugin
if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then
flags+=" --cni-bin-dir=/home/kubernetes/bin"
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" || "${ENABLE_NETD:-}" == "true" ]]; then
# Calico uses CNI always.
# Note that network policy won't work for master node.
if [[ "${node_type}" == "master" ]]; then
flags+=" --network-plugin=${NETWORK_PROVIDER}"
else
flags+=" --network-plugin=cni"
fi
else
# Otherwise use the configured value.
flags+=" --network-plugin=${NETWORK_PROVIDER}"
fi
fi
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
fi
@ -810,12 +793,11 @@ function construct-linux-kubelet-flags {
if [[ -n "${NODE_TAINTS:-}" ]]; then
flags+=" --register-with-taints=${NODE_TAINTS}"
fi
if [[ "${CONTAINER_RUNTIME:-}" != "docker" ]]; then
flags+=" --container-runtime=remote"
if [[ "${CONTAINER_RUNTIME}" == "containerd" ]]; then
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-unix:///run/containerd/containerd.sock}
flags+=" --runtime-cgroups=/system.slice/containerd.service"
fi
flags+=" --container-runtime=remote"
if [[ "${CONTAINER_RUNTIME}" == "containerd" ]]; then
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-unix:///run/containerd/containerd.sock}
flags+=" --runtime-cgroups=/system.slice/containerd.service"
fi
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT:-}" ]]; then
@ -861,17 +843,8 @@ function construct-windows-kubelet-flags {
# The directory where the TLS certs are located.
flags+=" --cert-dir=${WINDOWS_PKI_DIR}"
flags+=" --network-plugin=cni"
flags+=" --cni-bin-dir=${WINDOWS_CNI_DIR}"
flags+=" --cni-conf-dir=${WINDOWS_CNI_CONFIG_DIR}"
flags+=" --pod-manifest-path=${WINDOWS_MANIFESTS_DIR}"
# Windows images are large and we don't have gcr mirrors yet. Allow longer
# pull progress deadline.
flags+=" --image-pull-progress-deadline=5m"
flags+=" --enable-debugging-handlers=true"
# Configure kubelet to run as a windows service.
flags+=" --windows-service=true"
@ -888,13 +861,10 @@ function construct-windows-kubelet-flags {
# TODO(#78628): Re-enable KubeletPodResources when the issue is fixed.
# Force disable KubeletPodResources feature on Windows until #78628 is fixed.
flags+=" --feature-gates=KubeletPodResources=false"
if [[ "${WINDOWS_CONTAINER_RUNTIME:-}" != "docker" ]]; then
flags+=" --container-runtime=remote"
if [[ "${WINDOWS_CONTAINER_RUNTIME}" == "containerd" ]]; then
WINDOWS_CONTAINER_RUNTIME_ENDPOINT=${KUBE_WINDOWS_CONTAINER_RUNTIME_ENDPOINT:-npipe:////./pipe/containerd-containerd}
flags+=" --container-runtime-endpoint=${WINDOWS_CONTAINER_RUNTIME_ENDPOINT}"
fi
flags+=" --container-runtime=remote"
if [[ "${WINDOWS_CONTAINER_RUNTIME}" == "containerd" ]]; then
WINDOWS_CONTAINER_RUNTIME_ENDPOINT=${KUBE_WINDOWS_CONTAINER_RUNTIME_ENDPOINT:-npipe:////./pipe/containerd-containerd}
flags+=" --container-runtime-endpoint=${WINDOWS_CONTAINER_RUNTIME_ENDPOINT}"
fi
KUBELET_ARGS="${flags}"

View File

@ -17,10 +17,6 @@ limitations under the License.
package options
import (
"runtime"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/config"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
@ -39,20 +35,8 @@ var (
// NewContainerRuntimeOptions will create a new ContainerRuntimeOptions with
// default values.
func NewContainerRuntimeOptions() *config.ContainerRuntimeOptions {
dockerEndpoint := ""
if runtime.GOOS != "windows" {
dockerEndpoint = "unix:///var/run/docker.sock"
}
return &config.ContainerRuntimeOptions{
ContainerRuntime: kubetypes.DockerContainerRuntime,
DockerEndpoint: dockerEndpoint,
DockershimRootDirectory: "/var/lib/dockershim",
PodSandboxImage: defaultPodSandboxImage,
ImagePullProgressDeadline: metav1.Duration{Duration: 1 * time.Minute},
CNIBinDir: "/opt/cni/bin",
CNIConfDir: "/etc/cni/net.d",
CNICacheDir: "/var/lib/cni/cache",
ContainerRuntime: kubetypes.RemoteContainerRuntime,
PodSandboxImage: defaultPodSandboxImage,
}
}

View File

@ -93,7 +93,6 @@ import (
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/server"
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
"k8s.io/kubernetes/pkg/util/flock"
nodeutil "k8s.io/kubernetes/pkg/util/node"
@ -387,15 +386,6 @@ func UnsecuredDependencies(s *options.KubeletServer, featureGate featuregate.Fea
hu := hostutil.NewHostUtil()
var pluginRunner = exec.New()
var dockerOptions *kubelet.DockerOptions
if s.ContainerRuntime == kubetypes.DockerContainerRuntime {
dockerOptions = &kubelet.DockerOptions{
DockerEndpoint: s.DockerEndpoint,
RuntimeRequestTimeout: s.RuntimeRequestTimeout.Duration,
ImagePullProgressDeadline: s.ImagePullProgressDeadline.Duration,
}
}
plugins, err := ProbeVolumePlugins(featureGate)
if err != nil {
return nil, err
@ -405,7 +395,6 @@ func UnsecuredDependencies(s *options.KubeletServer, featureGate featuregate.Fea
CAdvisorInterface: nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here
Cloud: nil, // cloud provider might start background processes
ContainerManager: nil,
DockerOptions: dockerOptions,
KubeClient: nil,
HeartbeatClient: nil,
EventClient: nil,

View File

@ -222,9 +222,6 @@ else
sudo -v || exit 1
fi
# Do not use any network plugin by default. User could override the flags with
# test_args.
test_args='--kubelet-flags="--network-plugin= --cni-bin-dir=" '${test_args}
# Runtime flags
test_args='--kubelet-flags="--container-runtime='${runtime}'" '${test_args}

View File

@ -37,12 +37,6 @@ func TestImageFsInfoLabel(t *testing.T) {
expectedLabel string
expectedError error
}{{
description: "LabelDockerImages should be returned",
runtime: types.DockerContainerRuntime,
runtimeEndpoint: "",
expectedLabel: cadvisorfs.LabelDockerImages,
expectedError: nil,
}, {
description: "LabelCrioImages should be returned",
runtime: types.RemoteContainerRuntime,
runtimeEndpoint: CrioSocket,

View File

@ -23,7 +23,6 @@ import (
"fmt"
cadvisorfs "github.com/google/cadvisor/fs"
"k8s.io/kubernetes/pkg/kubelet/types"
)
// imageFsInfoProvider knows how to translate the configured runtime
@ -36,16 +35,11 @@ type imageFsInfoProvider struct {
// ImageFsInfoLabel returns the image fs label for the configured runtime.
// For remote runtimes, it handles additional runtimes natively understood by cAdvisor.
func (i *imageFsInfoProvider) ImageFsInfoLabel() (string, error) {
switch i.runtime {
case types.DockerContainerRuntime:
return cadvisorfs.LabelDockerImages, nil
case types.RemoteContainerRuntime:
// This is a temporary workaround to get stats for cri-o from cadvisor
// and should be removed.
// Related to https://github.com/kubernetes/kubernetes/issues/51798
if i.runtimeEndpoint == CrioSocket || i.runtimeEndpoint == "unix://"+CrioSocket {
return cadvisorfs.LabelCrioImages, nil
}
// This is a temporary workaround to get stats for cri-o from cadvisor
// and should be removed.
// Related to https://github.com/kubernetes/kubernetes/issues/51798
if i.runtimeEndpoint == CrioSocket || i.runtimeEndpoint == "unix://"+CrioSocket {
return cadvisorfs.LabelCrioImages, nil
}
return "", fmt.Errorf("no imagefs label for configured runtime")
}

View File

@ -17,14 +17,11 @@ limitations under the License.
package cadvisor
import (
goruntime "runtime"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapi2 "github.com/google/cadvisor/info/v2"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
const (
@ -74,6 +71,5 @@ func EphemeralStorageCapacityFromFsInfo(info cadvisorapi2.FsInfo) v1.ResourceLis
// be removed. Related issue:
// https://github.com/kubernetes/kubernetes/issues/51798
func UsingLegacyCadvisorStats(runtime, runtimeEndpoint string) bool {
return (runtime == kubetypes.DockerContainerRuntime && goruntime.GOOS == "linux") ||
runtimeEndpoint == CrioSocket || runtimeEndpoint == "unix://"+CrioSocket
return runtimeEndpoint == CrioSocket || runtimeEndpoint == "unix://"+CrioSocket
}

View File

@ -20,7 +20,6 @@ import (
"fmt"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ContainerRuntimeOptions defines options for the container runtime.
@ -31,41 +30,9 @@ type ContainerRuntimeOptions struct {
ContainerRuntime string
// RuntimeCgroups that container runtime is expected to be isolated in.
RuntimeCgroups string
// Docker-specific options.
// DockershimRootDirectory is the path to the dockershim root directory. Defaults to
// /var/lib/dockershim if unset. Exposed for integration testing (e.g. in OpenShift).
DockershimRootDirectory string
// PodSandboxImage is the image whose network/ipc namespaces
// containers in each pod will use.
PodSandboxImage string
// DockerEndpoint is the path to the docker endpoint to communicate with.
DockerEndpoint string
// If no pulling progress is made before the deadline imagePullProgressDeadline,
// the image pulling will be cancelled. Defaults to 1m0s.
// +optional
ImagePullProgressDeadline metav1.Duration
// Network plugin options.
// networkPluginName is the name of the network plugin to be invoked for
// various events in kubelet/pod lifecycle
NetworkPluginName string
// NetworkPluginMTU is the MTU to be passed to the network plugin,
// and overrides the default MTU for cases where it cannot be automatically
// computed (such as IPSEC).
NetworkPluginMTU int32
// CNIConfDir is the full path of the directory in which to search for
// CNI config files
CNIConfDir string
// CNIBinDir is the full path of the directory in which to search for
// CNI plugin binaries
CNIBinDir string
// CNICacheDir is the full path of the directory in which CNI should store
// cache files
CNICacheDir string
// Image credential provider plugin options
// ImageCredentialProviderConfigFile is the path to the credential provider plugin config file.
@ -83,34 +50,14 @@ type ContainerRuntimeOptions struct {
// AddFlags adds flags to the container runtime, according to ContainerRuntimeOptions.
func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) {
dockerOnlyWarning := "This docker-specific flag only works when container-runtime is set to docker."
// General settings.
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible value: 'remote'.")
fs.MarkDeprecated("container-runtime", "will be removed in 1.27 as the only valid value is 'remote'")
fs.StringVar(&s.RuntimeCgroups, "runtime-cgroups", s.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.")
// Docker-specific settings.
fs.StringVar(&s.DockershimRootDirectory, "experimental-dockershim-root-directory", s.DockershimRootDirectory, "Path to the dockershim root directory.")
fs.MarkHidden("experimental-dockershim-root-directory")
fs.StringVar(&s.PodSandboxImage, "pod-infra-container-image", s.PodSandboxImage, fmt.Sprintf("Specified image will not be pruned by the image garbage collector. "+
"When container-runtime is set to 'docker', all containers in each pod will use the network/ipc namespaces from this image. Other CRI implementations have their own configuration to set this image."))
fs.StringVar(&s.DockerEndpoint, "docker-endpoint", s.DockerEndpoint, fmt.Sprintf("Use this for the docker endpoint to communicate with. %s", dockerOnlyWarning))
fs.MarkDeprecated("docker-endpoint", "will be removed along with dockershim.")
fs.DurationVar(&s.ImagePullProgressDeadline.Duration, "image-pull-progress-deadline", s.ImagePullProgressDeadline.Duration, fmt.Sprintf("If no pulling progress is made before this deadline, the image pulling will be cancelled. %s", dockerOnlyWarning))
fs.MarkDeprecated("image-pull-progress-deadline", "will be removed along with dockershim.")
// Network plugin settings for Docker.
fs.StringVar(&s.NetworkPluginName, "network-plugin", s.NetworkPluginName, fmt.Sprintf("The name of the network plugin to be invoked for various events in kubelet/pod lifecycle. %s", dockerOnlyWarning))
fs.MarkDeprecated("network-plugin", "will be removed along with dockershim.")
fs.StringVar(&s.CNIConfDir, "cni-conf-dir", s.CNIConfDir, fmt.Sprintf("The full path of the directory in which to search for CNI config files. %s", dockerOnlyWarning))
fs.MarkDeprecated("cni-conf-dir", "will be removed along with dockershim.")
fs.StringVar(&s.CNIBinDir, "cni-bin-dir", s.CNIBinDir, fmt.Sprintf("A comma-separated list of full paths of directories in which to search for CNI plugin binaries. %s", dockerOnlyWarning))
fs.MarkDeprecated("cni-bin-dir", "will be removed along with dockershim.")
fs.StringVar(&s.CNICacheDir, "cni-cache-dir", s.CNICacheDir, fmt.Sprintf("The full path of the directory in which CNI should store cache files. %s", dockerOnlyWarning))
fs.MarkDeprecated("cni-cache-dir", "will be removed along with dockershim.")
fs.Int32Var(&s.NetworkPluginMTU, "network-plugin-mtu", s.NetworkPluginMTU, fmt.Sprintf("The MTU to be passed to the network plugin, to override the default. Set to 0 to use the default 1460 MTU. %s", dockerOnlyWarning))
fs.MarkDeprecated("network-plugin-mtu", "will be removed along with dockershim.")
// Image credential provider settings.
fs.StringVar(&s.ImageCredentialProviderConfigFile, "image-credential-provider-config", s.ImageCredentialProviderConfigFile, "The path to the credential provider plugin config file.")

View File

@ -224,7 +224,6 @@ type Dependencies struct {
CAdvisorInterface cadvisor.Interface
Cloud cloudprovider.Interface
ContainerManager cm.ContainerManager
DockerOptions *DockerOptions
EventClient v1core.EventsGetter
HeartbeatClient clientset.Interface
OnHeartbeatFailure func()
@ -246,15 +245,6 @@ type Dependencies struct {
useLegacyCadvisorStats bool
}
// DockerOptions contains docker specific configuration. Importantly, since it
// lives outside of `dockershim`, it should not depend on the `docker/docker`
// client library.
type DockerOptions struct {
DockerEndpoint string
RuntimeRequestTimeout time.Duration
ImagePullProgressDeadline time.Duration
}
// makePodSourceConfig creates a config.PodConfig from the given
// KubeletConfiguration or returns an error.
func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName, nodeHasSynced func() bool) (*config.PodConfig, error) {
@ -308,13 +298,7 @@ func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
}
}
switch containerRuntime {
case kubetypes.DockerContainerRuntime:
return fmt.Errorf("using dockershim is not supported, please consider using a full-fledged CRI implementation")
case kubetypes.RemoteContainerRuntime:
// No-op.
break
default:
if containerRuntime != kubetypes.RemoteContainerRuntime {
return fmt.Errorf("unsupported CRI runtime: %q", containerRuntime)
}
@ -837,8 +821,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.appArmorValidator = apparmor.NewValidator()
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator))
}
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewNoNewPrivsAdmitHandler(klet.containerRuntime))
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewProcMountAdmitHandler(klet.containerRuntime))
leaseDuration := time.Duration(kubeCfg.NodeLeaseDurationSeconds) * time.Second
renewInterval := time.Duration(float64(leaseDuration) * nodeLeaseRenewIntervalFraction)

View File

@ -21,7 +21,6 @@ import (
"fmt"
"os"
"path/filepath"
goruntime "runtime"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
@ -290,12 +289,7 @@ func (m *kubeGenericRuntimeManager) Type() string {
// SupportsSingleFileMapping returns whether the container runtime supports single file mappings or not.
// It is supported on Windows only if the container runtime is containerd.
func (m *kubeGenericRuntimeManager) SupportsSingleFileMapping() bool {
switch goruntime.GOOS {
case "windows":
return m.Type() != types.DockerContainerRuntime
default:
return true
}
return true
}
func newRuntimeVersion(version string) (*utilversion.Version, error) {

View File

@ -173,142 +173,3 @@ func (a *appArmorAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult {
Message: fmt.Sprintf("Cannot enforce AppArmor: %v", err),
}
}
// NewNoNewPrivsAdmitHandler returns a PodAdmitHandler which is used to evaluate
// if a pod can be admitted from the perspective of NoNewPrivs.
func NewNoNewPrivsAdmitHandler(runtime kubecontainer.Runtime) PodAdmitHandler {
return &noNewPrivsAdmitHandler{
Runtime: runtime,
}
}
type noNewPrivsAdmitHandler struct {
kubecontainer.Runtime
}
func (a *noNewPrivsAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult {
// If the pod is already running or terminated, no need to recheck NoNewPrivs.
if attrs.Pod.Status.Phase != v1.PodPending {
return PodAdmitResult{Admit: true}
}
// If the containers in a pod do not require no-new-privs, admit it.
if !noNewPrivsRequired(attrs.Pod) {
return PodAdmitResult{Admit: true}
}
// Always admit runtimes except docker.
if a.Runtime.Type() != kubetypes.DockerContainerRuntime {
return PodAdmitResult{Admit: true}
}
// Make sure docker api version is valid.
rversion, err := a.Runtime.APIVersion()
if err != nil {
return PodAdmitResult{
Admit: false,
Reason: "NoNewPrivs",
Message: fmt.Sprintf("Cannot enforce NoNewPrivs: %v", err),
}
}
v, err := rversion.Compare("1.23.0")
if err != nil {
return PodAdmitResult{
Admit: false,
Reason: "NoNewPrivs",
Message: fmt.Sprintf("Cannot enforce NoNewPrivs: %v", err),
}
}
// If the version is less than 1.23 it will return -1 above.
if v == -1 {
return PodAdmitResult{
Admit: false,
Reason: "NoNewPrivs",
Message: fmt.Sprintf("Cannot enforce NoNewPrivs: docker runtime API version %q must be greater than or equal to 1.23", rversion.String()),
}
}
return PodAdmitResult{Admit: true}
}
func noNewPrivsRequired(pod *v1.Pod) bool {
// Iterate over pod containers and check if we added no-new-privs.
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil && c.SecurityContext.AllowPrivilegeEscalation != nil && !*c.SecurityContext.AllowPrivilegeEscalation {
return true
}
}
return false
}
// NewProcMountAdmitHandler returns a PodAdmitHandler which is used to evaluate
// if a pod can be admitted from the perspective of ProcMount.
func NewProcMountAdmitHandler(runtime kubecontainer.Runtime) PodAdmitHandler {
return &procMountAdmitHandler{
Runtime: runtime,
}
}
type procMountAdmitHandler struct {
kubecontainer.Runtime
}
func (a *procMountAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult {
// If the pod is already running or terminated, no need to recheck NoNewPrivs.
if attrs.Pod.Status.Phase != v1.PodPending {
return PodAdmitResult{Admit: true}
}
// If the containers in a pod only need the default ProcMountType, admit it.
if procMountIsDefault(attrs.Pod) {
return PodAdmitResult{Admit: true}
}
// Always admit runtimes except docker.
if a.Runtime.Type() != kubetypes.DockerContainerRuntime {
return PodAdmitResult{Admit: true}
}
// Make sure docker api version is valid.
// Merged in https://github.com/moby/moby/pull/36644
rversion, err := a.Runtime.APIVersion()
if err != nil {
return PodAdmitResult{
Admit: false,
Reason: "ProcMount",
Message: fmt.Sprintf("Cannot enforce ProcMount: %v", err),
}
}
v, err := rversion.Compare("1.38.0")
if err != nil {
return PodAdmitResult{
Admit: false,
Reason: "ProcMount",
Message: fmt.Sprintf("Cannot enforce ProcMount: %v", err),
}
}
// If the version is less than 1.38 it will return -1 above.
if v == -1 {
return PodAdmitResult{
Admit: false,
Reason: "ProcMount",
Message: fmt.Sprintf("Cannot enforce ProcMount: docker runtime API version %q must be greater than or equal to 1.38", rversion.String()),
}
}
return PodAdmitResult{Admit: true}
}
func procMountIsDefault(pod *v1.Pod) bool {
// Iterate over pod containers and check if we are using the DefaultProcMountType
// for all containers.
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil {
if c.SecurityContext.ProcMount != nil && *c.SecurityContext.ProcMount != v1.DefaultProcMount {
return false
}
}
}
return true
}

View File

@ -27,7 +27,6 @@ const (
// Different container runtimes.
const (
DockerContainerRuntime = "docker"
RemoteContainerRuntime = "remote"
)

View File

@ -200,9 +200,6 @@ start_kubelet --kubeconfig "${KUBELET_KUBECONFIG}" \
--kubelet-cgroups=/kubelet \
--system-cgroups=/system \
--cgroup-root=/ \
"--network-plugin=${NETWORK_PLUGIN}" \
"--cni-conf-dir=${CNI_CONF_DIR}" \
"--cni-bin-dir=${CNI_BIN_DIR}" \
--v=$log_level \
--logtostderr

View File

@ -280,28 +280,6 @@ func (e *E2EServices) startKubelet() (*server, error) {
cmdArgs = append(cmdArgs, "--dynamic-config-dir", dynamicConfigDir)
}
// Enable kubenet by default.
cniBinDir, err := getCNIBinDirectory()
if err != nil {
return nil, err
}
cniConfDir, err := getCNIConfDirectory()
if err != nil {
return nil, err
}
cniCacheDir, err := getCNICacheDirectory()
if err != nil {
return nil, err
}
cmdArgs = append(cmdArgs,
"--network-plugin=kubenet",
"--cni-bin-dir", cniBinDir,
"--cni-conf-dir", cniConfDir,
"--cni-cache-dir", cniCacheDir)
// Keep hostname override for convenience.
if framework.TestContext.NodeName != "" { // If node name is specified, set hostname override.
cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName)
@ -445,33 +423,6 @@ func createKubeconfigCWD() (string, error) {
return kubeconfigPath, nil
}
// getCNIBinDirectory returns CNI directory.
func getCNIBinDirectory() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", err
}
return filepath.Join(cwd, "cni", "bin"), nil
}
// getCNIConfDirectory returns CNI Configuration directory.
func getCNIConfDirectory() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", err
}
return filepath.Join(cwd, "cni", "net.d"), nil
}
// getCNICacheDirectory returns CNI Cache directory.
func getCNICacheDirectory() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", err
}
return filepath.Join(cwd, "cni", "cache"), nil
}
// getDynamicConfigDir returns the directory for dynamic Kubelet configuration
func getDynamicConfigDir() (string, error) {
cwd, err := os.Getwd()