diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 7bdf842d7bf..02d7f3f9f74 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -37,6 +37,7 @@ import ( kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme" kubeletconfigvalidation "k8s.io/kubernetes/pkg/kubelet/apis/config/validation" "k8s.io/kubernetes/pkg/kubelet/config" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" utilflag "k8s.io/kubernetes/pkg/util/flag" ) @@ -175,6 +176,10 @@ func ValidateKubeletFlags(f *KubeletFlags) error { return fmt.Errorf("the SeccompDefault feature gate must be enabled in order to use the --seccomp-default flag") } + if f.ContainerRuntime != kubetypes.RemoteContainerRuntime { + return fmt.Errorf("unsupported CRI runtime: %q, only %q is currently supported", f.ContainerRuntime, kubetypes.RemoteContainerRuntime) + } + return nil } diff --git a/cmd/kubelet/app/options/options_test.go b/cmd/kubelet/app/options/options_test.go index 3c23c774b68..b7cabcdf17a 100644 --- a/cmd/kubelet/app/options/options_test.go +++ b/cmd/kubelet/app/options/options_test.go @@ -25,6 +25,8 @@ import ( "k8s.io/apimachinery/pkg/util/diff" cliflag "k8s.io/component-base/cli/flag" + "k8s.io/kubernetes/pkg/kubelet/config" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) func newKubeletServerOrDie() *KubeletServer { @@ -171,6 +173,9 @@ func TestValidateKubeletFlags(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := ValidateKubeletFlags(&KubeletFlags{ + ContainerRuntimeOptions: config.ContainerRuntimeOptions{ + ContainerRuntime: kubetypes.RemoteContainerRuntime, + }, NodeLabels: tt.labels, }) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 4c03c94d2d9..9d7c65609cc 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -184,8 +184,8 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API return fmt.Errorf("failed to validate kubelet flags: %w", err) } - if kubeletFlags.ContainerRuntime == "remote" && cleanFlagSet.Changed("pod-infra-container-image") { - klog.InfoS("Warning: For remote container runtime, --pod-infra-container-image is ignored in kubelet, which should be set in that remote runtime instead") + if cleanFlagSet.Changed("pod-infra-container-image") { + klog.InfoS("--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime") } // load kubelet config file, if provided @@ -612,12 +612,11 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend cgroupRoots = append(cgroupRoots, kubeletCgroup) } - runtimeCgroup, err := cm.GetRuntimeContainer(s.ContainerRuntime, s.RuntimeCgroups) if err != nil { klog.InfoS("Failed to get the container runtime's cgroup. Runtime system container metrics may be missing.", "err", err) - } else if runtimeCgroup != "" { + } else if s.RuntimeCgroups != "" { // RuntimeCgroups is optional, so ignore if it isn't specified - cgroupRoots = append(cgroupRoots, runtimeCgroup) + cgroupRoots = append(cgroupRoots, s.RuntimeCgroups) } if s.SystemCgroups != "" { @@ -626,8 +625,8 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend } if kubeDeps.CAdvisorInterface == nil { - imageFsInfoProvider := cadvisor.NewImageFsInfoProvider(s.ContainerRuntime, s.RemoteRuntimeEndpoint) - kubeDeps.CAdvisorInterface, err = cadvisor.New(imageFsInfoProvider, s.RootDirectory, cgroupRoots, cadvisor.UsingLegacyCadvisorStats(s.ContainerRuntime, s.RemoteRuntimeEndpoint)) + imageFsInfoProvider := cadvisor.NewImageFsInfoProvider(s.RemoteRuntimeEndpoint) + kubeDeps.CAdvisorInterface, err = cadvisor.New(imageFsInfoProvider, s.RootDirectory, cgroupRoots, cadvisor.UsingLegacyCadvisorStats(s.RemoteRuntimeEndpoint)) if err != nil { return err } @@ -703,7 +702,6 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend RuntimeCgroupsName: s.RuntimeCgroups, SystemCgroupsName: s.SystemCgroups, KubeletCgroupsName: s.KubeletCgroups, - ContainerRuntime: s.ContainerRuntime, CgroupsPerQOS: s.CgroupsPerQOS, CgroupRoot: s.CgroupRoot, CgroupDriver: s.CgroupDriver, @@ -745,12 +743,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend klog.InfoS("Failed to ApplyOOMScoreAdj", "err", err) } - err = kubelet.PreInitRuntimeService(&s.KubeletConfiguration, - kubeDeps, &s.ContainerRuntimeOptions, - s.ContainerRuntime, - s.RuntimeCgroups, - s.RemoteRuntimeEndpoint, - s.RemoteImageEndpoint) + err = kubelet.PreInitRuntimeService(&s.KubeletConfiguration, kubeDeps, s.RemoteRuntimeEndpoint, s.RemoteImageEndpoint) if err != nil { return err } @@ -1114,7 +1107,6 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie k, err := createAndInitKubelet(&kubeServer.KubeletConfiguration, kubeDeps, &kubeServer.ContainerRuntimeOptions, - kubeServer.ContainerRuntime, hostname, hostnameOverridden, nodeName, @@ -1189,7 +1181,6 @@ func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *kubele func createAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *kubelet.Dependencies, crOptions *config.ContainerRuntimeOptions, - containerRuntime string, hostname string, hostnameOverridden bool, nodeName types.NodeName, @@ -1223,7 +1214,6 @@ func createAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, k, err = kubelet.NewMainKubelet(kubeCfg, kubeDeps, crOptions, - containerRuntime, hostname, hostnameOverridden, nodeName, diff --git a/pkg/kubelet/cadvisor/cadvisor_linux_test.go b/pkg/kubelet/cadvisor/cadvisor_linux_test.go index c18d6bf4605..c508dccdaae 100644 --- a/pkg/kubelet/cadvisor/cadvisor_linux_test.go +++ b/pkg/kubelet/cadvisor/cadvisor_linux_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/assert" cadvisorfs "github.com/google/cadvisor/fs" - "k8s.io/kubernetes/pkg/kubelet/types" ) func TestImageFsInfoLabel(t *testing.T) { @@ -38,13 +37,11 @@ func TestImageFsInfoLabel(t *testing.T) { expectedError error }{{ description: "LabelCrioImages should be returned", - runtime: types.RemoteContainerRuntime, runtimeEndpoint: CrioSocket, expectedLabel: cadvisorfs.LabelCrioImages, expectedError: nil, }, { description: "Cannot find valid imagefs label", - runtime: "invalid-runtime", runtimeEndpoint: "", expectedLabel: "", expectedError: fmt.Errorf("no imagefs label for configured runtime"), @@ -52,7 +49,7 @@ func TestImageFsInfoLabel(t *testing.T) { for _, tc := range testcases { t.Run(tc.description, func(t *testing.T) { - infoProvider := NewImageFsInfoProvider(tc.runtime, tc.runtimeEndpoint) + infoProvider := NewImageFsInfoProvider(tc.runtimeEndpoint) label, err := infoProvider.ImageFsInfoLabel() assert.Equal(t, tc.expectedLabel, label) assert.Equal(t, tc.expectedError, err) diff --git a/pkg/kubelet/cadvisor/helpers_linux.go b/pkg/kubelet/cadvisor/helpers_linux.go index 662f9ac5def..c512d3d0510 100644 --- a/pkg/kubelet/cadvisor/helpers_linux.go +++ b/pkg/kubelet/cadvisor/helpers_linux.go @@ -28,7 +28,6 @@ import ( // imageFsInfoProvider knows how to translate the configured runtime // to its file system label for images. type imageFsInfoProvider struct { - runtime string runtimeEndpoint string } @@ -45,6 +44,6 @@ func (i *imageFsInfoProvider) ImageFsInfoLabel() (string, error) { } // NewImageFsInfoProvider returns a provider for the specified runtime configuration. -func NewImageFsInfoProvider(runtime, runtimeEndpoint string) ImageFsInfoProvider { - return &imageFsInfoProvider{runtime: runtime, runtimeEndpoint: runtimeEndpoint} +func NewImageFsInfoProvider(runtimeEndpoint string) ImageFsInfoProvider { + return &imageFsInfoProvider{runtimeEndpoint: runtimeEndpoint} } diff --git a/pkg/kubelet/cadvisor/helpers_unsupported.go b/pkg/kubelet/cadvisor/helpers_unsupported.go index 26e6bbc122f..2379ef1f188 100644 --- a/pkg/kubelet/cadvisor/helpers_unsupported.go +++ b/pkg/kubelet/cadvisor/helpers_unsupported.go @@ -30,6 +30,6 @@ func (i *unsupportedImageFsInfoProvider) ImageFsInfoLabel() (string, error) { } // NewImageFsInfoProvider returns a provider for the specified runtime configuration. -func NewImageFsInfoProvider(runtime, runtimeEndpoint string) ImageFsInfoProvider { +func NewImageFsInfoProvider(runtimeEndpoint string) ImageFsInfoProvider { return &unsupportedImageFsInfoProvider{} } diff --git a/pkg/kubelet/cadvisor/util.go b/pkg/kubelet/cadvisor/util.go index cdb08ed452b..d5ad3a6b580 100644 --- a/pkg/kubelet/cadvisor/util.go +++ b/pkg/kubelet/cadvisor/util.go @@ -64,12 +64,10 @@ func EphemeralStorageCapacityFromFsInfo(info cadvisorapi2.FsInfo) v1.ResourceLis } // UsingLegacyCadvisorStats returns true if container stats are provided by cadvisor instead of through the CRI. -// CRI integrations should get container metrics via CRI. Docker -// uses the built-in cadvisor to gather such metrics on Linux for -// historical reasons. +// CRI integrations should get container metrics via CRI. // TODO: cri-o relies on cadvisor as a temporary workaround. The code should // be removed. Related issue: // https://github.com/kubernetes/kubernetes/issues/51798 -func UsingLegacyCadvisorStats(runtime, runtimeEndpoint string) bool { +func UsingLegacyCadvisorStats(runtimeEndpoint string) bool { return runtimeEndpoint == CrioSocket || runtimeEndpoint == "unix://"+CrioSocket } diff --git a/pkg/kubelet/cm/helpers_linux.go b/pkg/kubelet/cm/helpers_linux.go index 160f35490ba..25d63fd0342 100644 --- a/pkg/kubelet/cm/helpers_linux.go +++ b/pkg/kubelet/cm/helpers_linux.go @@ -337,8 +337,3 @@ func GetKubeletContainer(kubeletCgroups string) (string, error) { } return kubeletCgroups, nil } - -// GetRuntimeContainer returns the cgroup used by the container runtime -func GetRuntimeContainer(containerRuntime, runtimeCgroups string) (string, error) { - return runtimeCgroups, nil -} diff --git a/pkg/kubelet/cm/helpers_unsupported.go b/pkg/kubelet/cm/helpers_unsupported.go index 323f4642b58..c0677c7bfc7 100644 --- a/pkg/kubelet/cm/helpers_unsupported.go +++ b/pkg/kubelet/cm/helpers_unsupported.go @@ -70,8 +70,3 @@ func NodeAllocatableRoot(cgroupRoot string, cgroupsPerQOS bool, cgroupDriver str func GetKubeletContainer(kubeletCgroups string) (string, error) { return "", nil } - -// GetRuntimeContainer returns the cgroup used by the container runtime -func GetRuntimeContainer(containerRuntime, runtimeCgroups string) (string, error) { - return "", nil -} diff --git a/pkg/kubelet/config/flags.go b/pkg/kubelet/config/flags.go index d26c0a2d256..d8e451f8740 100644 --- a/pkg/kubelet/config/flags.go +++ b/pkg/kubelet/config/flags.go @@ -56,8 +56,7 @@ func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.RuntimeCgroups, "runtime-cgroups", s.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.") // Docker-specific settings. - fs.StringVar(&s.PodSandboxImage, "pod-infra-container-image", s.PodSandboxImage, fmt.Sprintf("Specified image will not be pruned by the image garbage collector. "+ - "When container-runtime is set to 'docker', all containers in each pod will use the network/ipc namespaces from this image. Other CRI implementations have their own configuration to set this image.")) + fs.StringVar(&s.PodSandboxImage, "pod-infra-container-image", s.PodSandboxImage, fmt.Sprintf("Specified image will not be pruned by the image garbage collector. CRI implementations have their own configuration to set this image.")) // Image credential provider settings. fs.StringVar(&s.ImageCredentialProviderConfigFile, "image-credential-provider-config", s.ImageCredentialProviderConfigFile, "The path to the credential provider plugin config file.") diff --git a/pkg/kubelet/container/runtime.go b/pkg/kubelet/container/runtime.go index 63179616952..0810e938f26 100644 --- a/pkg/kubelet/container/runtime.go +++ b/pkg/kubelet/container/runtime.go @@ -69,9 +69,6 @@ type Runtime interface { // Type returns the type of the container runtime. Type() string - //SupportsSingleFileMapping returns whether the container runtime supports single file mappings or not. - SupportsSingleFileMapping() bool - // Version returns the version information of the container runtime. Version() (Version, error) diff --git a/pkg/kubelet/container/testing/fake_runtime.go b/pkg/kubelet/container/testing/fake_runtime.go index fa02d4b5714..3765e87c8a6 100644 --- a/pkg/kubelet/container/testing/fake_runtime.go +++ b/pkg/kubelet/container/testing/fake_runtime.go @@ -179,10 +179,6 @@ func (f *FakeRuntime) Type() string { return f.RuntimeType } -func (f *FakeRuntime) SupportsSingleFileMapping() bool { - return true -} - func (f *FakeRuntime) Version() (kubecontainer.Version, error) { f.Lock() defer f.Unlock() diff --git a/pkg/kubelet/container/testing/runtime_mock.go b/pkg/kubelet/container/testing/runtime_mock.go index 255b767a745..4eea4b5cdc9 100644 --- a/pkg/kubelet/container/testing/runtime_mock.go +++ b/pkg/kubelet/container/testing/runtime_mock.go @@ -124,20 +124,6 @@ func (mr *MockRuntimeMockRecorder) Type() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Type", reflect.TypeOf((*MockRuntime)(nil).Type)) } -// SupportsSingleFileMapping mocks base method -func (m *MockRuntime) SupportsSingleFileMapping() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SupportsSingleFileMapping") - ret0, _ := ret[0].(bool) - return ret0 -} - -// SupportsSingleFileMapping indicates an expected call of SupportsSingleFileMapping -func (mr *MockRuntimeMockRecorder) SupportsSingleFileMapping() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SupportsSingleFileMapping", reflect.TypeOf((*MockRuntime)(nil).SupportsSingleFileMapping)) -} - // Version mocks base method func (m *MockRuntime) Version() (container.Version, error) { m.ctrl.T.Helper() diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 8fafbc7ffa3..c215ea52e10 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -284,20 +284,11 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku // PreInitRuntimeService will init runtime service before RunKubelet. func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, - crOptions *config.ContainerRuntimeOptions, - containerRuntime string, - runtimeCgroups string, remoteRuntimeEndpoint string, remoteImageEndpoint string) error { - if remoteRuntimeEndpoint != "" { - // remoteImageEndpoint is same as remoteRuntimeEndpoint if not explicitly specified - if remoteImageEndpoint == "" { - remoteImageEndpoint = remoteRuntimeEndpoint - } - } - - if containerRuntime != kubetypes.RemoteContainerRuntime { - return fmt.Errorf("unsupported CRI runtime: %q", containerRuntime) + // remoteImageEndpoint is same as remoteRuntimeEndpoint if not explicitly specified + if remoteRuntimeEndpoint != "" && remoteImageEndpoint == "" { + remoteImageEndpoint = remoteRuntimeEndpoint } var err error @@ -308,7 +299,7 @@ func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration, return err } - kubeDeps.useLegacyCadvisorStats = cadvisor.UsingLegacyCadvisorStats(containerRuntime, remoteRuntimeEndpoint) + kubeDeps.useLegacyCadvisorStats = cadvisor.UsingLegacyCadvisorStats(remoteRuntimeEndpoint) return nil } @@ -318,7 +309,6 @@ func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration, func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, crOptions *config.ContainerRuntimeOptions, - containerRuntime string, hostname string, hostnameOverridden bool, nodeName types.NodeName, @@ -524,7 +514,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, syncLoopMonitor: atomic.Value{}, daemonEndpoints: daemonEndpoints, containerManager: kubeDeps.ContainerManager, - containerRuntimeName: containerRuntime, nodeIPs: nodeIPs, nodeIPValidator: validateNodeIP, clock: clock.RealClock{}, @@ -597,21 +586,17 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.KubeClient) } - if containerRuntime == kubetypes.RemoteContainerRuntime { - // setup containerLogManager for CRI container runtime - containerLogManager, err := logs.NewContainerLogManager( - klet.runtimeService, - kubeDeps.OSInterface, - kubeCfg.ContainerLogMaxSize, - int(kubeCfg.ContainerLogMaxFiles), - ) - if err != nil { - return nil, fmt.Errorf("failed to initialize container log manager: %v", err) - } - klet.containerLogManager = containerLogManager - } else { - klet.containerLogManager = logs.NewStubContainerLogManager() + // setup containerLogManager for CRI container runtime + containerLogManager, err := logs.NewContainerLogManager( + klet.runtimeService, + kubeDeps.OSInterface, + kubeCfg.ContainerLogMaxSize, + int(kubeCfg.ContainerLogMaxFiles), + ) + if err != nil { + return nil, fmt.Errorf("failed to initialize container log manager: %v", err) } + klet.containerLogManager = containerLogManager klet.reasonCache = NewReasonCache() klet.workQueue = queue.NewBasicWorkQueue(klet.clock) @@ -670,8 +655,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.runtimeCache = runtimeCache // common provider to get host file system usage associated with a pod managed by kubelet - hostStatsProvider := stats.NewHostStatsProvider(kubecontainer.RealOS{}, func(podUID types.UID) (string, bool) { - return getEtcHostsPath(klet.getPodDir(podUID)), klet.containerRuntime.SupportsSingleFileMapping() + hostStatsProvider := stats.NewHostStatsProvider(kubecontainer.RealOS{}, func(podUID types.UID) string { + return getEtcHostsPath(klet.getPodDir(podUID)) }) if kubeDeps.useLegacyCadvisorStats { klet.StatsProvider = stats.NewCadvisorStatsProvider( @@ -1000,9 +985,6 @@ type Kubelet struct { // Reference to this node. nodeRef *v1.ObjectReference - // The name of the container runtime - containerRuntimeName string - // Container runtime. containerRuntime kubecontainer.Runtime diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 1608719d69b..93f722589e3 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -144,12 +144,11 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol // Kubernetes only mounts on /etc/hosts if: // - container is not an infrastructure (pause) container // - container is not already mounting on /etc/hosts -// - if it is Windows and ContainerD is used. // Kubernetes will not mount /etc/hosts if: // - when the Pod sandbox is being created, its IP is still unknown. Hence, PodIP will not have been set. // - Windows pod contains a hostProcess container -func shouldMountHostsFile(pod *v1.Pod, podIPs []string, supportsSingleFileMapping bool) bool { - shouldMount := len(podIPs) > 0 && supportsSingleFileMapping +func shouldMountHostsFile(pod *v1.Pod, podIPs []string) bool { + shouldMount := len(podIPs) > 0 if runtime.GOOS == "windows" && utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostProcessContainers) { return shouldMount && !kubecontainer.HasWindowsHostProcessContainer(pod) } @@ -157,8 +156,8 @@ func shouldMountHostsFile(pod *v1.Pod, podIPs []string, supportsSingleFileMappin } // makeMounts determines the mount points for the given container. -func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain string, podIPs []string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar, supportsSingleFileMapping bool) ([]kubecontainer.Mount, func(), error) { - mountEtcHostsFile := shouldMountHostsFile(pod, podIPs, supportsSingleFileMapping) +func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain string, podIPs []string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar) ([]kubecontainer.Mount, func(), error) { + mountEtcHostsFile := shouldMountHostsFile(pod, podIPs) klog.V(3).InfoS("Creating hosts mount for container", "pod", klog.KObj(pod), "containerName", container.Name, "podIPs", podIPs, "path", mountEtcHostsFile) mounts := []kubecontainer.Mount{} var cleanupAction func() @@ -492,10 +491,8 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai } opts.Envs = append(opts.Envs, envs...) - // we can only mount individual files (e.g.: /etc/hosts, termination-log files) on Windows only if we're using Containerd. - supportsSingleFileMapping := kl.containerRuntime.SupportsSingleFileMapping() // only podIPs is sent to makeMounts, as podIPs is populated even if dual-stack feature flag is not enabled. - mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIPs, volumes, kl.hostutil, kl.subpather, opts.Envs, supportsSingleFileMapping) + mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIPs, volumes, kl.hostutil, kl.subpather, opts.Envs) if err != nil { return nil, cleanupAction, err } @@ -503,7 +500,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai // adding TerminationMessagePath on Windows is only allowed if ContainerD is used. Individual files cannot // be mounted as volumes using Docker for Windows. - if len(container.TerminationMessagePath) != 0 && supportsSingleFileMapping { + if len(container.TerminationMessagePath) != 0 { p := kl.getPodContainerDir(pod.UID, container.Name) if err := os.MkdirAll(p, 0750); err != nil { klog.ErrorS(err, "Error on creating dir", "path", p) diff --git a/pkg/kubelet/kubelet_pods_linux_test.go b/pkg/kubelet/kubelet_pods_linux_test.go index f5feba298ac..bcc5a1ff2df 100644 --- a/pkg/kubelet/kubelet_pods_linux_test.go +++ b/pkg/kubelet/kubelet_pods_linux_test.go @@ -250,7 +250,7 @@ func TestMakeMounts(t *testing.T) { }, } - mounts, _, err := makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", []string{""}, tc.podVolumes, fhu, fsp, nil, false) + mounts, _, err := makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", []string{""}, tc.podVolumes, fhu, fsp, nil) // validate only the error if we expect an error if tc.expectErr { diff --git a/pkg/kubelet/kubelet_pods_windows_test.go b/pkg/kubelet/kubelet_pods_windows_test.go index 71067c47453..cb0bab98bfe 100644 --- a/pkg/kubelet/kubelet_pods_windows_test.go +++ b/pkg/kubelet/kubelet_pods_windows_test.go @@ -84,7 +84,7 @@ func TestMakeMountsWindows(t *testing.T) { fhu := hostutil.NewFakeHostUtil(nil) fsp := &subpath.FakeSubpath{} - mounts, _, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", []string{""}, podVolumes, fhu, fsp, nil, false) + mounts, _, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", []string{""}, podVolumes, fhu, fsp, nil) expectedMounts := []kubecontainer.Mount{ { diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index ac5a9ff9e68..68be62d05f4 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -2355,13 +2355,6 @@ func TestSyncTerminatingPodKillPod(t *testing.T) { checkPodStatus(t, kl, pod, v1.PodFailed) } -func TestPreInitRuntimeService(t *testing.T) { - err := PreInitRuntimeService(nil, nil, nil, "", "", "", "") - if err == nil { - t.Fatal("PreInitRuntimeService should fail when not configured with a container runtime") - } -} - func TestSyncLabels(t *testing.T) { tests := []struct { name string diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 3de4fcadd17..4aef9baacc7 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -393,9 +393,7 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO // The reason we create and mount the log file in here (not in kubelet) is because // the file's location depends on the ID of the container, and we need to create and // mount the file before actually starting the container. - // we can only mount individual files (e.g.: /etc/hosts, termination-log files) on Windows only if we're using Containerd. - supportsSingleFileMapping := m.SupportsSingleFileMapping() - if opts.PodContainerDir != "" && len(container.TerminationMessagePath) != 0 && supportsSingleFileMapping { + if opts.PodContainerDir != "" && len(container.TerminationMessagePath) != 0 { // Because the PodContainerDir contains pod uid and container name which is unique enough, // here we just add a random id to make the path unique for different instances // of the same container. diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 9c1df5f5a96..9052e2d65b4 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -286,12 +286,6 @@ func (m *kubeGenericRuntimeManager) Type() string { return m.runtimeName } -// SupportsSingleFileMapping returns whether the container runtime supports single file mappings or not. -// It is supported on Windows only if the container runtime is containerd. -func (m *kubeGenericRuntimeManager) SupportsSingleFileMapping() bool { - return true -} - func newRuntimeVersion(version string) (*utilversion.Version, error) { if ver, err := utilversion.ParseSemantic(version); err == nil { return ver, err diff --git a/pkg/kubelet/stats/host_stats_provider.go b/pkg/kubelet/stats/host_stats_provider.go index 450b832bff5..a9d6717843b 100644 --- a/pkg/kubelet/stats/host_stats_provider.go +++ b/pkg/kubelet/stats/host_stats_provider.go @@ -31,7 +31,7 @@ import ( ) // PodEtcHostsPathFunc is a function to fetch a etc hosts path by pod uid and whether etc host path is supported by the runtime -type PodEtcHostsPathFunc func(podUID types.UID) (string, bool) +type PodEtcHostsPathFunc func(podUID types.UID) string // metricsProviderByPath maps a path to its metrics provider type metricsProviderByPath map[string]volume.MetricsProvider @@ -81,10 +81,7 @@ func (h hostStatsProvider) getPodContainerLogStats(podNamespace, podName string, // getPodEtcHostsStats gets status for pod etc hosts usage func (h hostStatsProvider) getPodEtcHostsStats(podUID types.UID, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) { // Runtimes may not support etc hosts file (Windows with docker) - podEtcHostsPath, isEtcHostsSupported := h.podEtcHostsPathFunc(podUID) - if !isEtcHostsSupported { - return nil, nil - } + podEtcHostsPath := h.podEtcHostsPathFunc(podUID) // Some pods have an explicit /etc/hosts mount and the Kubelet will not create an etc-hosts file for them if _, err := os.Stat(podEtcHostsPath); os.IsNotExist(err) { return nil, nil diff --git a/pkg/kubelet/stats/host_stats_provider_test.go b/pkg/kubelet/stats/host_stats_provider_test.go index 72714a1c730..310c9c6c2ad 100644 --- a/pkg/kubelet/stats/host_stats_provider_test.go +++ b/pkg/kubelet/stats/host_stats_provider_test.go @@ -39,8 +39,8 @@ func Test_hostStatsProvider_getPodEtcHostsStats(t *testing.T) { }{ { name: "Should return nil for runtimes that do not support etc host file", - podEtcHostsPathFunc: func(podUID types.UID) (string, bool) { - return "", false + podEtcHostsPathFunc: func(podUID types.UID) string { + return "" }, podUID: "fake0001", rootFsInfo: nil, diff --git a/pkg/kubemark/hollow_kubelet.go b/pkg/kubemark/hollow_kubelet.go index e3e35cecd00..57c8d6796b1 100644 --- a/pkg/kubemark/hollow_kubelet.go +++ b/pkg/kubemark/hollow_kubelet.go @@ -163,7 +163,6 @@ func GetHollowKubeletConfig(opt *HollowKubletOptions) (*options.KubeletFlags, *k f.MaxContainerCount = 100 f.MaxPerPodContainerCount = 2 f.NodeLabels = opt.NodeLabels - f.ContainerRuntimeOptions.ContainerRuntime = kubetypes.RemoteContainerRuntime f.RegisterSchedulable = true f.RemoteImageEndpoint = "unix:///run/containerd/containerd.sock"