mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #107594 from hakman/remove_container-runtime_logic
Clean up logic for deprecated flag --container-runtime in kubelet
This commit is contained in:
commit
8580bbf7d7
@ -37,6 +37,7 @@ import (
|
|||||||
kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme"
|
kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme"
|
||||||
kubeletconfigvalidation "k8s.io/kubernetes/pkg/kubelet/apis/config/validation"
|
kubeletconfigvalidation "k8s.io/kubernetes/pkg/kubelet/apis/config/validation"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||||
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
utilflag "k8s.io/kubernetes/pkg/util/flag"
|
utilflag "k8s.io/kubernetes/pkg/util/flag"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -175,6 +176,10 @@ func ValidateKubeletFlags(f *KubeletFlags) error {
|
|||||||
return fmt.Errorf("the SeccompDefault feature gate must be enabled in order to use the --seccomp-default flag")
|
return fmt.Errorf("the SeccompDefault feature gate must be enabled in order to use the --seccomp-default flag")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.ContainerRuntime != kubetypes.RemoteContainerRuntime {
|
||||||
|
return fmt.Errorf("unsupported CRI runtime: %q, only %q is currently supported", f.ContainerRuntime, kubetypes.RemoteContainerRuntime)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,6 +25,8 @@ import (
|
|||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/diff"
|
"k8s.io/apimachinery/pkg/util/diff"
|
||||||
cliflag "k8s.io/component-base/cli/flag"
|
cliflag "k8s.io/component-base/cli/flag"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||||
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newKubeletServerOrDie() *KubeletServer {
|
func newKubeletServerOrDie() *KubeletServer {
|
||||||
@ -171,6 +173,9 @@ func TestValidateKubeletFlags(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
err := ValidateKubeletFlags(&KubeletFlags{
|
err := ValidateKubeletFlags(&KubeletFlags{
|
||||||
|
ContainerRuntimeOptions: config.ContainerRuntimeOptions{
|
||||||
|
ContainerRuntime: kubetypes.RemoteContainerRuntime,
|
||||||
|
},
|
||||||
NodeLabels: tt.labels,
|
NodeLabels: tt.labels,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -184,8 +184,8 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
|
|||||||
return fmt.Errorf("failed to validate kubelet flags: %w", err)
|
return fmt.Errorf("failed to validate kubelet flags: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if kubeletFlags.ContainerRuntime == "remote" && cleanFlagSet.Changed("pod-infra-container-image") {
|
if cleanFlagSet.Changed("pod-infra-container-image") {
|
||||||
klog.InfoS("Warning: For remote container runtime, --pod-infra-container-image is ignored in kubelet, which should be set in that remote runtime instead")
|
klog.InfoS("--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime")
|
||||||
}
|
}
|
||||||
|
|
||||||
// load kubelet config file, if provided
|
// load kubelet config file, if provided
|
||||||
@ -612,12 +612,11 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
|||||||
cgroupRoots = append(cgroupRoots, kubeletCgroup)
|
cgroupRoots = append(cgroupRoots, kubeletCgroup)
|
||||||
}
|
}
|
||||||
|
|
||||||
runtimeCgroup, err := cm.GetRuntimeContainer(s.ContainerRuntime, s.RuntimeCgroups)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.InfoS("Failed to get the container runtime's cgroup. Runtime system container metrics may be missing.", "err", err)
|
klog.InfoS("Failed to get the container runtime's cgroup. Runtime system container metrics may be missing.", "err", err)
|
||||||
} else if runtimeCgroup != "" {
|
} else if s.RuntimeCgroups != "" {
|
||||||
// RuntimeCgroups is optional, so ignore if it isn't specified
|
// RuntimeCgroups is optional, so ignore if it isn't specified
|
||||||
cgroupRoots = append(cgroupRoots, runtimeCgroup)
|
cgroupRoots = append(cgroupRoots, s.RuntimeCgroups)
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.SystemCgroups != "" {
|
if s.SystemCgroups != "" {
|
||||||
@ -626,8 +625,8 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
|||||||
}
|
}
|
||||||
|
|
||||||
if kubeDeps.CAdvisorInterface == nil {
|
if kubeDeps.CAdvisorInterface == nil {
|
||||||
imageFsInfoProvider := cadvisor.NewImageFsInfoProvider(s.ContainerRuntime, s.RemoteRuntimeEndpoint)
|
imageFsInfoProvider := cadvisor.NewImageFsInfoProvider(s.RemoteRuntimeEndpoint)
|
||||||
kubeDeps.CAdvisorInterface, err = cadvisor.New(imageFsInfoProvider, s.RootDirectory, cgroupRoots, cadvisor.UsingLegacyCadvisorStats(s.ContainerRuntime, s.RemoteRuntimeEndpoint))
|
kubeDeps.CAdvisorInterface, err = cadvisor.New(imageFsInfoProvider, s.RootDirectory, cgroupRoots, cadvisor.UsingLegacyCadvisorStats(s.RemoteRuntimeEndpoint))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -703,7 +702,6 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
|||||||
RuntimeCgroupsName: s.RuntimeCgroups,
|
RuntimeCgroupsName: s.RuntimeCgroups,
|
||||||
SystemCgroupsName: s.SystemCgroups,
|
SystemCgroupsName: s.SystemCgroups,
|
||||||
KubeletCgroupsName: s.KubeletCgroups,
|
KubeletCgroupsName: s.KubeletCgroups,
|
||||||
ContainerRuntime: s.ContainerRuntime,
|
|
||||||
CgroupsPerQOS: s.CgroupsPerQOS,
|
CgroupsPerQOS: s.CgroupsPerQOS,
|
||||||
CgroupRoot: s.CgroupRoot,
|
CgroupRoot: s.CgroupRoot,
|
||||||
CgroupDriver: s.CgroupDriver,
|
CgroupDriver: s.CgroupDriver,
|
||||||
@ -745,12 +743,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
|||||||
klog.InfoS("Failed to ApplyOOMScoreAdj", "err", err)
|
klog.InfoS("Failed to ApplyOOMScoreAdj", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = kubelet.PreInitRuntimeService(&s.KubeletConfiguration,
|
err = kubelet.PreInitRuntimeService(&s.KubeletConfiguration, kubeDeps, s.RemoteRuntimeEndpoint, s.RemoteImageEndpoint)
|
||||||
kubeDeps, &s.ContainerRuntimeOptions,
|
|
||||||
s.ContainerRuntime,
|
|
||||||
s.RuntimeCgroups,
|
|
||||||
s.RemoteRuntimeEndpoint,
|
|
||||||
s.RemoteImageEndpoint)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1114,7 +1107,6 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie
|
|||||||
k, err := createAndInitKubelet(&kubeServer.KubeletConfiguration,
|
k, err := createAndInitKubelet(&kubeServer.KubeletConfiguration,
|
||||||
kubeDeps,
|
kubeDeps,
|
||||||
&kubeServer.ContainerRuntimeOptions,
|
&kubeServer.ContainerRuntimeOptions,
|
||||||
kubeServer.ContainerRuntime,
|
|
||||||
hostname,
|
hostname,
|
||||||
hostnameOverridden,
|
hostnameOverridden,
|
||||||
nodeName,
|
nodeName,
|
||||||
@ -1189,7 +1181,6 @@ func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *kubele
|
|||||||
func createAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
func createAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||||
kubeDeps *kubelet.Dependencies,
|
kubeDeps *kubelet.Dependencies,
|
||||||
crOptions *config.ContainerRuntimeOptions,
|
crOptions *config.ContainerRuntimeOptions,
|
||||||
containerRuntime string,
|
|
||||||
hostname string,
|
hostname string,
|
||||||
hostnameOverridden bool,
|
hostnameOverridden bool,
|
||||||
nodeName types.NodeName,
|
nodeName types.NodeName,
|
||||||
@ -1223,7 +1214,6 @@ func createAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
k, err = kubelet.NewMainKubelet(kubeCfg,
|
k, err = kubelet.NewMainKubelet(kubeCfg,
|
||||||
kubeDeps,
|
kubeDeps,
|
||||||
crOptions,
|
crOptions,
|
||||||
containerRuntime,
|
|
||||||
hostname,
|
hostname,
|
||||||
hostnameOverridden,
|
hostnameOverridden,
|
||||||
nodeName,
|
nodeName,
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
cadvisorfs "github.com/google/cadvisor/fs"
|
cadvisorfs "github.com/google/cadvisor/fs"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestImageFsInfoLabel(t *testing.T) {
|
func TestImageFsInfoLabel(t *testing.T) {
|
||||||
@ -38,13 +37,11 @@ func TestImageFsInfoLabel(t *testing.T) {
|
|||||||
expectedError error
|
expectedError error
|
||||||
}{{
|
}{{
|
||||||
description: "LabelCrioImages should be returned",
|
description: "LabelCrioImages should be returned",
|
||||||
runtime: types.RemoteContainerRuntime,
|
|
||||||
runtimeEndpoint: CrioSocket,
|
runtimeEndpoint: CrioSocket,
|
||||||
expectedLabel: cadvisorfs.LabelCrioImages,
|
expectedLabel: cadvisorfs.LabelCrioImages,
|
||||||
expectedError: nil,
|
expectedError: nil,
|
||||||
}, {
|
}, {
|
||||||
description: "Cannot find valid imagefs label",
|
description: "Cannot find valid imagefs label",
|
||||||
runtime: "invalid-runtime",
|
|
||||||
runtimeEndpoint: "",
|
runtimeEndpoint: "",
|
||||||
expectedLabel: "",
|
expectedLabel: "",
|
||||||
expectedError: fmt.Errorf("no imagefs label for configured runtime"),
|
expectedError: fmt.Errorf("no imagefs label for configured runtime"),
|
||||||
@ -52,7 +49,7 @@ func TestImageFsInfoLabel(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testcases {
|
for _, tc := range testcases {
|
||||||
t.Run(tc.description, func(t *testing.T) {
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
infoProvider := NewImageFsInfoProvider(tc.runtime, tc.runtimeEndpoint)
|
infoProvider := NewImageFsInfoProvider(tc.runtimeEndpoint)
|
||||||
label, err := infoProvider.ImageFsInfoLabel()
|
label, err := infoProvider.ImageFsInfoLabel()
|
||||||
assert.Equal(t, tc.expectedLabel, label)
|
assert.Equal(t, tc.expectedLabel, label)
|
||||||
assert.Equal(t, tc.expectedError, err)
|
assert.Equal(t, tc.expectedError, err)
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
// imageFsInfoProvider knows how to translate the configured runtime
|
// imageFsInfoProvider knows how to translate the configured runtime
|
||||||
// to its file system label for images.
|
// to its file system label for images.
|
||||||
type imageFsInfoProvider struct {
|
type imageFsInfoProvider struct {
|
||||||
runtime string
|
|
||||||
runtimeEndpoint string
|
runtimeEndpoint string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,6 +44,6 @@ func (i *imageFsInfoProvider) ImageFsInfoLabel() (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewImageFsInfoProvider returns a provider for the specified runtime configuration.
|
// NewImageFsInfoProvider returns a provider for the specified runtime configuration.
|
||||||
func NewImageFsInfoProvider(runtime, runtimeEndpoint string) ImageFsInfoProvider {
|
func NewImageFsInfoProvider(runtimeEndpoint string) ImageFsInfoProvider {
|
||||||
return &imageFsInfoProvider{runtime: runtime, runtimeEndpoint: runtimeEndpoint}
|
return &imageFsInfoProvider{runtimeEndpoint: runtimeEndpoint}
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,6 @@ func (i *unsupportedImageFsInfoProvider) ImageFsInfoLabel() (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewImageFsInfoProvider returns a provider for the specified runtime configuration.
|
// NewImageFsInfoProvider returns a provider for the specified runtime configuration.
|
||||||
func NewImageFsInfoProvider(runtime, runtimeEndpoint string) ImageFsInfoProvider {
|
func NewImageFsInfoProvider(runtimeEndpoint string) ImageFsInfoProvider {
|
||||||
return &unsupportedImageFsInfoProvider{}
|
return &unsupportedImageFsInfoProvider{}
|
||||||
}
|
}
|
||||||
|
@ -64,12 +64,10 @@ func EphemeralStorageCapacityFromFsInfo(info cadvisorapi2.FsInfo) v1.ResourceLis
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UsingLegacyCadvisorStats returns true if container stats are provided by cadvisor instead of through the CRI.
|
// UsingLegacyCadvisorStats returns true if container stats are provided by cadvisor instead of through the CRI.
|
||||||
// CRI integrations should get container metrics via CRI. Docker
|
// CRI integrations should get container metrics via CRI.
|
||||||
// uses the built-in cadvisor to gather such metrics on Linux for
|
|
||||||
// historical reasons.
|
|
||||||
// TODO: cri-o relies on cadvisor as a temporary workaround. The code should
|
// TODO: cri-o relies on cadvisor as a temporary workaround. The code should
|
||||||
// be removed. Related issue:
|
// be removed. Related issue:
|
||||||
// https://github.com/kubernetes/kubernetes/issues/51798
|
// https://github.com/kubernetes/kubernetes/issues/51798
|
||||||
func UsingLegacyCadvisorStats(runtime, runtimeEndpoint string) bool {
|
func UsingLegacyCadvisorStats(runtimeEndpoint string) bool {
|
||||||
return runtimeEndpoint == CrioSocket || runtimeEndpoint == "unix://"+CrioSocket
|
return runtimeEndpoint == CrioSocket || runtimeEndpoint == "unix://"+CrioSocket
|
||||||
}
|
}
|
||||||
|
@ -337,8 +337,3 @@ func GetKubeletContainer(kubeletCgroups string) (string, error) {
|
|||||||
}
|
}
|
||||||
return kubeletCgroups, nil
|
return kubeletCgroups, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRuntimeContainer returns the cgroup used by the container runtime
|
|
||||||
func GetRuntimeContainer(containerRuntime, runtimeCgroups string) (string, error) {
|
|
||||||
return runtimeCgroups, nil
|
|
||||||
}
|
|
||||||
|
@ -70,8 +70,3 @@ func NodeAllocatableRoot(cgroupRoot string, cgroupsPerQOS bool, cgroupDriver str
|
|||||||
func GetKubeletContainer(kubeletCgroups string) (string, error) {
|
func GetKubeletContainer(kubeletCgroups string) (string, error) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRuntimeContainer returns the cgroup used by the container runtime
|
|
||||||
func GetRuntimeContainer(containerRuntime, runtimeCgroups string) (string, error) {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
@ -56,8 +56,7 @@ func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) {
|
|||||||
fs.StringVar(&s.RuntimeCgroups, "runtime-cgroups", s.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.")
|
fs.StringVar(&s.RuntimeCgroups, "runtime-cgroups", s.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.")
|
||||||
|
|
||||||
// Docker-specific settings.
|
// Docker-specific settings.
|
||||||
fs.StringVar(&s.PodSandboxImage, "pod-infra-container-image", s.PodSandboxImage, fmt.Sprintf("Specified image will not be pruned by the image garbage collector. "+
|
fs.StringVar(&s.PodSandboxImage, "pod-infra-container-image", s.PodSandboxImage, fmt.Sprintf("Specified image will not be pruned by the image garbage collector. CRI implementations have their own configuration to set this image."))
|
||||||
"When container-runtime is set to 'docker', all containers in each pod will use the network/ipc namespaces from this image. Other CRI implementations have their own configuration to set this image."))
|
|
||||||
|
|
||||||
// Image credential provider settings.
|
// Image credential provider settings.
|
||||||
fs.StringVar(&s.ImageCredentialProviderConfigFile, "image-credential-provider-config", s.ImageCredentialProviderConfigFile, "The path to the credential provider plugin config file.")
|
fs.StringVar(&s.ImageCredentialProviderConfigFile, "image-credential-provider-config", s.ImageCredentialProviderConfigFile, "The path to the credential provider plugin config file.")
|
||||||
|
@ -69,9 +69,6 @@ type Runtime interface {
|
|||||||
// Type returns the type of the container runtime.
|
// Type returns the type of the container runtime.
|
||||||
Type() string
|
Type() string
|
||||||
|
|
||||||
//SupportsSingleFileMapping returns whether the container runtime supports single file mappings or not.
|
|
||||||
SupportsSingleFileMapping() bool
|
|
||||||
|
|
||||||
// Version returns the version information of the container runtime.
|
// Version returns the version information of the container runtime.
|
||||||
Version() (Version, error)
|
Version() (Version, error)
|
||||||
|
|
||||||
|
@ -179,10 +179,6 @@ func (f *FakeRuntime) Type() string {
|
|||||||
return f.RuntimeType
|
return f.RuntimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) SupportsSingleFileMapping() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FakeRuntime) Version() (kubecontainer.Version, error) {
|
func (f *FakeRuntime) Version() (kubecontainer.Version, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
@ -124,20 +124,6 @@ func (mr *MockRuntimeMockRecorder) Type() *gomock.Call {
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Type", reflect.TypeOf((*MockRuntime)(nil).Type))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Type", reflect.TypeOf((*MockRuntime)(nil).Type))
|
||||||
}
|
}
|
||||||
|
|
||||||
// SupportsSingleFileMapping mocks base method
|
|
||||||
func (m *MockRuntime) SupportsSingleFileMapping() bool {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "SupportsSingleFileMapping")
|
|
||||||
ret0, _ := ret[0].(bool)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// SupportsSingleFileMapping indicates an expected call of SupportsSingleFileMapping
|
|
||||||
func (mr *MockRuntimeMockRecorder) SupportsSingleFileMapping() *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SupportsSingleFileMapping", reflect.TypeOf((*MockRuntime)(nil).SupportsSingleFileMapping))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version mocks base method
|
// Version mocks base method
|
||||||
func (m *MockRuntime) Version() (container.Version, error) {
|
func (m *MockRuntime) Version() (container.Version, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
@ -284,20 +284,11 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
|
|||||||
// PreInitRuntimeService will init runtime service before RunKubelet.
|
// PreInitRuntimeService will init runtime service before RunKubelet.
|
||||||
func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||||
kubeDeps *Dependencies,
|
kubeDeps *Dependencies,
|
||||||
crOptions *config.ContainerRuntimeOptions,
|
|
||||||
containerRuntime string,
|
|
||||||
runtimeCgroups string,
|
|
||||||
remoteRuntimeEndpoint string,
|
remoteRuntimeEndpoint string,
|
||||||
remoteImageEndpoint string) error {
|
remoteImageEndpoint string) error {
|
||||||
if remoteRuntimeEndpoint != "" {
|
// remoteImageEndpoint is same as remoteRuntimeEndpoint if not explicitly specified
|
||||||
// remoteImageEndpoint is same as remoteRuntimeEndpoint if not explicitly specified
|
if remoteRuntimeEndpoint != "" && remoteImageEndpoint == "" {
|
||||||
if remoteImageEndpoint == "" {
|
remoteImageEndpoint = remoteRuntimeEndpoint
|
||||||
remoteImageEndpoint = remoteRuntimeEndpoint
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if containerRuntime != kubetypes.RemoteContainerRuntime {
|
|
||||||
return fmt.Errorf("unsupported CRI runtime: %q", containerRuntime)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
@ -308,7 +299,7 @@ func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
kubeDeps.useLegacyCadvisorStats = cadvisor.UsingLegacyCadvisorStats(containerRuntime, remoteRuntimeEndpoint)
|
kubeDeps.useLegacyCadvisorStats = cadvisor.UsingLegacyCadvisorStats(remoteRuntimeEndpoint)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -318,7 +309,6 @@ func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||||
kubeDeps *Dependencies,
|
kubeDeps *Dependencies,
|
||||||
crOptions *config.ContainerRuntimeOptions,
|
crOptions *config.ContainerRuntimeOptions,
|
||||||
containerRuntime string,
|
|
||||||
hostname string,
|
hostname string,
|
||||||
hostnameOverridden bool,
|
hostnameOverridden bool,
|
||||||
nodeName types.NodeName,
|
nodeName types.NodeName,
|
||||||
@ -524,7 +514,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
syncLoopMonitor: atomic.Value{},
|
syncLoopMonitor: atomic.Value{},
|
||||||
daemonEndpoints: daemonEndpoints,
|
daemonEndpoints: daemonEndpoints,
|
||||||
containerManager: kubeDeps.ContainerManager,
|
containerManager: kubeDeps.ContainerManager,
|
||||||
containerRuntimeName: containerRuntime,
|
|
||||||
nodeIPs: nodeIPs,
|
nodeIPs: nodeIPs,
|
||||||
nodeIPValidator: validateNodeIP,
|
nodeIPValidator: validateNodeIP,
|
||||||
clock: clock.RealClock{},
|
clock: clock.RealClock{},
|
||||||
@ -597,21 +586,17 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.KubeClient)
|
klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.KubeClient)
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerRuntime == kubetypes.RemoteContainerRuntime {
|
// setup containerLogManager for CRI container runtime
|
||||||
// setup containerLogManager for CRI container runtime
|
containerLogManager, err := logs.NewContainerLogManager(
|
||||||
containerLogManager, err := logs.NewContainerLogManager(
|
klet.runtimeService,
|
||||||
klet.runtimeService,
|
kubeDeps.OSInterface,
|
||||||
kubeDeps.OSInterface,
|
kubeCfg.ContainerLogMaxSize,
|
||||||
kubeCfg.ContainerLogMaxSize,
|
int(kubeCfg.ContainerLogMaxFiles),
|
||||||
int(kubeCfg.ContainerLogMaxFiles),
|
)
|
||||||
)
|
if err != nil {
|
||||||
if err != nil {
|
return nil, fmt.Errorf("failed to initialize container log manager: %v", err)
|
||||||
return nil, fmt.Errorf("failed to initialize container log manager: %v", err)
|
|
||||||
}
|
|
||||||
klet.containerLogManager = containerLogManager
|
|
||||||
} else {
|
|
||||||
klet.containerLogManager = logs.NewStubContainerLogManager()
|
|
||||||
}
|
}
|
||||||
|
klet.containerLogManager = containerLogManager
|
||||||
|
|
||||||
klet.reasonCache = NewReasonCache()
|
klet.reasonCache = NewReasonCache()
|
||||||
klet.workQueue = queue.NewBasicWorkQueue(klet.clock)
|
klet.workQueue = queue.NewBasicWorkQueue(klet.clock)
|
||||||
@ -670,8 +655,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
klet.runtimeCache = runtimeCache
|
klet.runtimeCache = runtimeCache
|
||||||
|
|
||||||
// common provider to get host file system usage associated with a pod managed by kubelet
|
// common provider to get host file system usage associated with a pod managed by kubelet
|
||||||
hostStatsProvider := stats.NewHostStatsProvider(kubecontainer.RealOS{}, func(podUID types.UID) (string, bool) {
|
hostStatsProvider := stats.NewHostStatsProvider(kubecontainer.RealOS{}, func(podUID types.UID) string {
|
||||||
return getEtcHostsPath(klet.getPodDir(podUID)), klet.containerRuntime.SupportsSingleFileMapping()
|
return getEtcHostsPath(klet.getPodDir(podUID))
|
||||||
})
|
})
|
||||||
if kubeDeps.useLegacyCadvisorStats {
|
if kubeDeps.useLegacyCadvisorStats {
|
||||||
klet.StatsProvider = stats.NewCadvisorStatsProvider(
|
klet.StatsProvider = stats.NewCadvisorStatsProvider(
|
||||||
@ -1000,9 +985,6 @@ type Kubelet struct {
|
|||||||
// Reference to this node.
|
// Reference to this node.
|
||||||
nodeRef *v1.ObjectReference
|
nodeRef *v1.ObjectReference
|
||||||
|
|
||||||
// The name of the container runtime
|
|
||||||
containerRuntimeName string
|
|
||||||
|
|
||||||
// Container runtime.
|
// Container runtime.
|
||||||
containerRuntime kubecontainer.Runtime
|
containerRuntime kubecontainer.Runtime
|
||||||
|
|
||||||
|
@ -144,12 +144,11 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol
|
|||||||
// Kubernetes only mounts on /etc/hosts if:
|
// Kubernetes only mounts on /etc/hosts if:
|
||||||
// - container is not an infrastructure (pause) container
|
// - container is not an infrastructure (pause) container
|
||||||
// - container is not already mounting on /etc/hosts
|
// - container is not already mounting on /etc/hosts
|
||||||
// - if it is Windows and ContainerD is used.
|
|
||||||
// Kubernetes will not mount /etc/hosts if:
|
// Kubernetes will not mount /etc/hosts if:
|
||||||
// - when the Pod sandbox is being created, its IP is still unknown. Hence, PodIP will not have been set.
|
// - when the Pod sandbox is being created, its IP is still unknown. Hence, PodIP will not have been set.
|
||||||
// - Windows pod contains a hostProcess container
|
// - Windows pod contains a hostProcess container
|
||||||
func shouldMountHostsFile(pod *v1.Pod, podIPs []string, supportsSingleFileMapping bool) bool {
|
func shouldMountHostsFile(pod *v1.Pod, podIPs []string) bool {
|
||||||
shouldMount := len(podIPs) > 0 && supportsSingleFileMapping
|
shouldMount := len(podIPs) > 0
|
||||||
if runtime.GOOS == "windows" && utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostProcessContainers) {
|
if runtime.GOOS == "windows" && utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostProcessContainers) {
|
||||||
return shouldMount && !kubecontainer.HasWindowsHostProcessContainer(pod)
|
return shouldMount && !kubecontainer.HasWindowsHostProcessContainer(pod)
|
||||||
}
|
}
|
||||||
@ -157,8 +156,8 @@ func shouldMountHostsFile(pod *v1.Pod, podIPs []string, supportsSingleFileMappin
|
|||||||
}
|
}
|
||||||
|
|
||||||
// makeMounts determines the mount points for the given container.
|
// makeMounts determines the mount points for the given container.
|
||||||
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain string, podIPs []string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar, supportsSingleFileMapping bool) ([]kubecontainer.Mount, func(), error) {
|
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain string, podIPs []string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar) ([]kubecontainer.Mount, func(), error) {
|
||||||
mountEtcHostsFile := shouldMountHostsFile(pod, podIPs, supportsSingleFileMapping)
|
mountEtcHostsFile := shouldMountHostsFile(pod, podIPs)
|
||||||
klog.V(3).InfoS("Creating hosts mount for container", "pod", klog.KObj(pod), "containerName", container.Name, "podIPs", podIPs, "path", mountEtcHostsFile)
|
klog.V(3).InfoS("Creating hosts mount for container", "pod", klog.KObj(pod), "containerName", container.Name, "podIPs", podIPs, "path", mountEtcHostsFile)
|
||||||
mounts := []kubecontainer.Mount{}
|
mounts := []kubecontainer.Mount{}
|
||||||
var cleanupAction func()
|
var cleanupAction func()
|
||||||
@ -492,10 +491,8 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
|
|||||||
}
|
}
|
||||||
opts.Envs = append(opts.Envs, envs...)
|
opts.Envs = append(opts.Envs, envs...)
|
||||||
|
|
||||||
// we can only mount individual files (e.g.: /etc/hosts, termination-log files) on Windows only if we're using Containerd.
|
|
||||||
supportsSingleFileMapping := kl.containerRuntime.SupportsSingleFileMapping()
|
|
||||||
// only podIPs is sent to makeMounts, as podIPs is populated even if dual-stack feature flag is not enabled.
|
// only podIPs is sent to makeMounts, as podIPs is populated even if dual-stack feature flag is not enabled.
|
||||||
mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIPs, volumes, kl.hostutil, kl.subpather, opts.Envs, supportsSingleFileMapping)
|
mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIPs, volumes, kl.hostutil, kl.subpather, opts.Envs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, cleanupAction, err
|
return nil, cleanupAction, err
|
||||||
}
|
}
|
||||||
@ -503,7 +500,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
|
|||||||
|
|
||||||
// adding TerminationMessagePath on Windows is only allowed if ContainerD is used. Individual files cannot
|
// adding TerminationMessagePath on Windows is only allowed if ContainerD is used. Individual files cannot
|
||||||
// be mounted as volumes using Docker for Windows.
|
// be mounted as volumes using Docker for Windows.
|
||||||
if len(container.TerminationMessagePath) != 0 && supportsSingleFileMapping {
|
if len(container.TerminationMessagePath) != 0 {
|
||||||
p := kl.getPodContainerDir(pod.UID, container.Name)
|
p := kl.getPodContainerDir(pod.UID, container.Name)
|
||||||
if err := os.MkdirAll(p, 0750); err != nil {
|
if err := os.MkdirAll(p, 0750); err != nil {
|
||||||
klog.ErrorS(err, "Error on creating dir", "path", p)
|
klog.ErrorS(err, "Error on creating dir", "path", p)
|
||||||
|
@ -250,7 +250,7 @@ func TestMakeMounts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
mounts, _, err := makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", []string{""}, tc.podVolumes, fhu, fsp, nil, false)
|
mounts, _, err := makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", []string{""}, tc.podVolumes, fhu, fsp, nil)
|
||||||
|
|
||||||
// validate only the error if we expect an error
|
// validate only the error if we expect an error
|
||||||
if tc.expectErr {
|
if tc.expectErr {
|
||||||
|
@ -84,7 +84,7 @@ func TestMakeMountsWindows(t *testing.T) {
|
|||||||
|
|
||||||
fhu := hostutil.NewFakeHostUtil(nil)
|
fhu := hostutil.NewFakeHostUtil(nil)
|
||||||
fsp := &subpath.FakeSubpath{}
|
fsp := &subpath.FakeSubpath{}
|
||||||
mounts, _, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", []string{""}, podVolumes, fhu, fsp, nil, false)
|
mounts, _, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", []string{""}, podVolumes, fhu, fsp, nil)
|
||||||
|
|
||||||
expectedMounts := []kubecontainer.Mount{
|
expectedMounts := []kubecontainer.Mount{
|
||||||
{
|
{
|
||||||
|
@ -2355,13 +2355,6 @@ func TestSyncTerminatingPodKillPod(t *testing.T) {
|
|||||||
checkPodStatus(t, kl, pod, v1.PodFailed)
|
checkPodStatus(t, kl, pod, v1.PodFailed)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPreInitRuntimeService(t *testing.T) {
|
|
||||||
err := PreInitRuntimeService(nil, nil, nil, "", "", "", "")
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("PreInitRuntimeService should fail when not configured with a container runtime")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSyncLabels(t *testing.T) {
|
func TestSyncLabels(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -393,9 +393,7 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
|
|||||||
// The reason we create and mount the log file in here (not in kubelet) is because
|
// The reason we create and mount the log file in here (not in kubelet) is because
|
||||||
// the file's location depends on the ID of the container, and we need to create and
|
// the file's location depends on the ID of the container, and we need to create and
|
||||||
// mount the file before actually starting the container.
|
// mount the file before actually starting the container.
|
||||||
// we can only mount individual files (e.g.: /etc/hosts, termination-log files) on Windows only if we're using Containerd.
|
if opts.PodContainerDir != "" && len(container.TerminationMessagePath) != 0 {
|
||||||
supportsSingleFileMapping := m.SupportsSingleFileMapping()
|
|
||||||
if opts.PodContainerDir != "" && len(container.TerminationMessagePath) != 0 && supportsSingleFileMapping {
|
|
||||||
// Because the PodContainerDir contains pod uid and container name which is unique enough,
|
// Because the PodContainerDir contains pod uid and container name which is unique enough,
|
||||||
// here we just add a random id to make the path unique for different instances
|
// here we just add a random id to make the path unique for different instances
|
||||||
// of the same container.
|
// of the same container.
|
||||||
|
@ -286,12 +286,6 @@ func (m *kubeGenericRuntimeManager) Type() string {
|
|||||||
return m.runtimeName
|
return m.runtimeName
|
||||||
}
|
}
|
||||||
|
|
||||||
// SupportsSingleFileMapping returns whether the container runtime supports single file mappings or not.
|
|
||||||
// It is supported on Windows only if the container runtime is containerd.
|
|
||||||
func (m *kubeGenericRuntimeManager) SupportsSingleFileMapping() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRuntimeVersion(version string) (*utilversion.Version, error) {
|
func newRuntimeVersion(version string) (*utilversion.Version, error) {
|
||||||
if ver, err := utilversion.ParseSemantic(version); err == nil {
|
if ver, err := utilversion.ParseSemantic(version); err == nil {
|
||||||
return ver, err
|
return ver, err
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// PodEtcHostsPathFunc is a function to fetch a etc hosts path by pod uid and whether etc host path is supported by the runtime
|
// PodEtcHostsPathFunc is a function to fetch a etc hosts path by pod uid and whether etc host path is supported by the runtime
|
||||||
type PodEtcHostsPathFunc func(podUID types.UID) (string, bool)
|
type PodEtcHostsPathFunc func(podUID types.UID) string
|
||||||
|
|
||||||
// metricsProviderByPath maps a path to its metrics provider
|
// metricsProviderByPath maps a path to its metrics provider
|
||||||
type metricsProviderByPath map[string]volume.MetricsProvider
|
type metricsProviderByPath map[string]volume.MetricsProvider
|
||||||
@ -81,10 +81,7 @@ func (h hostStatsProvider) getPodContainerLogStats(podNamespace, podName string,
|
|||||||
// getPodEtcHostsStats gets status for pod etc hosts usage
|
// getPodEtcHostsStats gets status for pod etc hosts usage
|
||||||
func (h hostStatsProvider) getPodEtcHostsStats(podUID types.UID, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
|
func (h hostStatsProvider) getPodEtcHostsStats(podUID types.UID, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
|
||||||
// Runtimes may not support etc hosts file (Windows with docker)
|
// Runtimes may not support etc hosts file (Windows with docker)
|
||||||
podEtcHostsPath, isEtcHostsSupported := h.podEtcHostsPathFunc(podUID)
|
podEtcHostsPath := h.podEtcHostsPathFunc(podUID)
|
||||||
if !isEtcHostsSupported {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
// Some pods have an explicit /etc/hosts mount and the Kubelet will not create an etc-hosts file for them
|
// Some pods have an explicit /etc/hosts mount and the Kubelet will not create an etc-hosts file for them
|
||||||
if _, err := os.Stat(podEtcHostsPath); os.IsNotExist(err) {
|
if _, err := os.Stat(podEtcHostsPath); os.IsNotExist(err) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
@ -39,8 +39,8 @@ func Test_hostStatsProvider_getPodEtcHostsStats(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Should return nil for runtimes that do not support etc host file",
|
name: "Should return nil for runtimes that do not support etc host file",
|
||||||
podEtcHostsPathFunc: func(podUID types.UID) (string, bool) {
|
podEtcHostsPathFunc: func(podUID types.UID) string {
|
||||||
return "", false
|
return ""
|
||||||
},
|
},
|
||||||
podUID: "fake0001",
|
podUID: "fake0001",
|
||||||
rootFsInfo: nil,
|
rootFsInfo: nil,
|
||||||
|
@ -163,7 +163,6 @@ func GetHollowKubeletConfig(opt *HollowKubletOptions) (*options.KubeletFlags, *k
|
|||||||
f.MaxContainerCount = 100
|
f.MaxContainerCount = 100
|
||||||
f.MaxPerPodContainerCount = 2
|
f.MaxPerPodContainerCount = 2
|
||||||
f.NodeLabels = opt.NodeLabels
|
f.NodeLabels = opt.NodeLabels
|
||||||
f.ContainerRuntimeOptions.ContainerRuntime = kubetypes.RemoteContainerRuntime
|
|
||||||
f.RegisterSchedulable = true
|
f.RegisterSchedulable = true
|
||||||
f.RemoteImageEndpoint = "unix:///run/containerd/containerd.sock"
|
f.RemoteImageEndpoint = "unix:///run/containerd/containerd.sock"
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user