mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #113501 from pacoxu/fix-startReflector
kubelet: fix nil pointer in startReflector for standalone mode
This commit is contained in:
commit
70263d55b2
@ -128,8 +128,8 @@ const (
|
||||
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
|
||||
nodeStatusUpdateRetry = 5
|
||||
|
||||
// ContainerLogsDir is the location of container logs.
|
||||
ContainerLogsDir = "/var/log/containers"
|
||||
// DefaultContainerLogsDir is the location of container logs.
|
||||
DefaultContainerLogsDir = "/var/log/containers"
|
||||
|
||||
// MaxContainerBackOff is the max backoff period, exported for the e2e test
|
||||
MaxContainerBackOff = 300 * time.Second
|
||||
@ -187,7 +187,11 @@ const (
|
||||
nodeLeaseRenewIntervalFraction = 0.25
|
||||
)
|
||||
|
||||
var etcHostsPath = getContainerEtcHostsPath()
|
||||
var (
|
||||
// ContainerLogsDir can be overwrited for testing usage
|
||||
ContainerLogsDir = DefaultContainerLogsDir
|
||||
etcHostsPath = getContainerEtcHostsPath()
|
||||
)
|
||||
|
||||
func getContainerEtcHostsPath() string {
|
||||
if sysruntime.GOOS == "windows" {
|
||||
@ -558,24 +562,26 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
|
||||
var secretManager secret.Manager
|
||||
var configMapManager configmap.Manager
|
||||
switch kubeCfg.ConfigMapAndSecretChangeDetectionStrategy {
|
||||
case kubeletconfiginternal.WatchChangeDetectionStrategy:
|
||||
secretManager = secret.NewWatchingSecretManager(kubeDeps.KubeClient, klet.resyncInterval)
|
||||
configMapManager = configmap.NewWatchingConfigMapManager(kubeDeps.KubeClient, klet.resyncInterval)
|
||||
case kubeletconfiginternal.TTLCacheChangeDetectionStrategy:
|
||||
secretManager = secret.NewCachingSecretManager(
|
||||
kubeDeps.KubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
configMapManager = configmap.NewCachingConfigMapManager(
|
||||
kubeDeps.KubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
case kubeletconfiginternal.GetChangeDetectionStrategy:
|
||||
secretManager = secret.NewSimpleSecretManager(kubeDeps.KubeClient)
|
||||
configMapManager = configmap.NewSimpleConfigMapManager(kubeDeps.KubeClient)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown configmap and secret manager mode: %v", kubeCfg.ConfigMapAndSecretChangeDetectionStrategy)
|
||||
}
|
||||
if klet.kubeClient != nil {
|
||||
switch kubeCfg.ConfigMapAndSecretChangeDetectionStrategy {
|
||||
case kubeletconfiginternal.WatchChangeDetectionStrategy:
|
||||
secretManager = secret.NewWatchingSecretManager(klet.kubeClient, klet.resyncInterval)
|
||||
configMapManager = configmap.NewWatchingConfigMapManager(klet.kubeClient, klet.resyncInterval)
|
||||
case kubeletconfiginternal.TTLCacheChangeDetectionStrategy:
|
||||
secretManager = secret.NewCachingSecretManager(
|
||||
klet.kubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
configMapManager = configmap.NewCachingConfigMapManager(
|
||||
klet.kubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
case kubeletconfiginternal.GetChangeDetectionStrategy:
|
||||
secretManager = secret.NewSimpleSecretManager(klet.kubeClient)
|
||||
configMapManager = configmap.NewSimpleConfigMapManager(klet.kubeClient)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown configmap and secret manager mode: %v", kubeCfg.ConfigMapAndSecretChangeDetectionStrategy)
|
||||
}
|
||||
|
||||
klet.secretManager = secretManager
|
||||
klet.configMapManager = configMapManager
|
||||
klet.secretManager = secretManager
|
||||
klet.configMapManager = configMapManager
|
||||
}
|
||||
|
||||
if klet.experimentalHostUserNamespaceDefaulting {
|
||||
klog.InfoS("Experimental host user namespace defaulting is enabled")
|
||||
|
@ -18,7 +18,9 @@ package kubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@ -29,7 +31,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
oteltrace "go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
core "k8s.io/client-go/testing"
|
||||
@ -45,14 +51,18 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/klog/v2/ktesting"
|
||||
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cri/remote"
|
||||
fakeremote "k8s.io/kubernetes/pkg/kubelet/cri/remote/fake"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
@ -66,6 +76,7 @@ import (
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server"
|
||||
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
@ -76,6 +87,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/util/oom"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/awsebs"
|
||||
"k8s.io/kubernetes/pkg/volume/azuredd"
|
||||
@ -87,6 +99,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
||||
"k8s.io/utils/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -2715,3 +2728,140 @@ type podsByUID []*v1.Pod
|
||||
func (p podsByUID) Len() int { return len(p) }
|
||||
func (p podsByUID) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p podsByUID) Less(i, j int) bool { return p[i].UID < p[j].UID }
|
||||
|
||||
// createAndStartFakeRemoteRuntime creates and starts fakeremote.RemoteRuntime.
|
||||
// It returns the RemoteRuntime, endpoint on success.
|
||||
// Users should call fakeRuntime.Stop() to cleanup the server.
|
||||
func createAndStartFakeRemoteRuntime(t *testing.T) (*fakeremote.RemoteRuntime, string) {
|
||||
endpoint, err := fakeremote.GenerateEndpoint()
|
||||
require.NoError(t, err)
|
||||
|
||||
fakeRuntime := fakeremote.NewFakeRemoteRuntime()
|
||||
fakeRuntime.Start(endpoint)
|
||||
|
||||
return fakeRuntime, endpoint
|
||||
}
|
||||
|
||||
func createRemoteRuntimeService(endpoint string, t *testing.T) internalapi.RuntimeService {
|
||||
runtimeService, err := remote.NewRemoteRuntimeService(endpoint, 15*time.Second, oteltrace.NewNoopTracerProvider())
|
||||
require.NoError(t, err)
|
||||
return runtimeService
|
||||
}
|
||||
|
||||
func TestNewMainKubeletStandAlone(t *testing.T) {
|
||||
tempDir, err := os.MkdirTemp("", "logs")
|
||||
ContainerLogsDir = tempDir
|
||||
assert.NoError(t, err)
|
||||
defer os.RemoveAll(ContainerLogsDir)
|
||||
kubeCfg := &kubeletconfiginternal.KubeletConfiguration{
|
||||
SyncFrequency: metav1.Duration{Duration: time.Minute},
|
||||
ConfigMapAndSecretChangeDetectionStrategy: kubeletconfiginternal.WatchChangeDetectionStrategy,
|
||||
ContainerLogMaxSize: "10Mi",
|
||||
ContainerLogMaxFiles: 5,
|
||||
MemoryThrottlingFactor: utilpointer.Float64(0),
|
||||
}
|
||||
var prober volume.DynamicPluginProber
|
||||
tp := oteltrace.NewNoopTracerProvider()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
cadvisor := cadvisortest.NewMockInterface(mockCtrl)
|
||||
cadvisor.EXPECT().MachineInfo().Return(&cadvisorapi.MachineInfo{}, nil).AnyTimes()
|
||||
cadvisor.EXPECT().ImagesFsInfo().Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 1000,
|
||||
Available: 600,
|
||||
}, nil).AnyTimes()
|
||||
tlsOptions := &server.TLSOptions{
|
||||
Config: &tls.Config{
|
||||
MinVersion: 0,
|
||||
},
|
||||
}
|
||||
fakeRuntime, endpoint := createAndStartFakeRemoteRuntime(t)
|
||||
defer func() {
|
||||
fakeRuntime.Stop()
|
||||
}()
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
rtSvc := createRemoteRuntimeService(endpoint, t)
|
||||
kubeDep := &Dependencies{
|
||||
Auth: nil,
|
||||
CAdvisorInterface: cadvisor,
|
||||
Cloud: nil,
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
KubeClient: nil, // standalone mode
|
||||
HeartbeatClient: nil,
|
||||
EventClient: nil,
|
||||
TracerProvider: tp,
|
||||
HostUtil: hostutil.NewFakeHostUtil(nil),
|
||||
Mounter: mount.NewFakeMounter(nil),
|
||||
Recorder: fakeRecorder,
|
||||
RemoteRuntimeService: rtSvc,
|
||||
RemoteImageService: fakeRuntime.ImageService,
|
||||
Subpather: &subpath.FakeSubpath{},
|
||||
OOMAdjuster: oom.NewOOMAdjuster(),
|
||||
OSInterface: kubecontainer.RealOS{},
|
||||
DynamicPluginProber: prober,
|
||||
TLSOptions: tlsOptions,
|
||||
}
|
||||
crOptions := &config.ContainerRuntimeOptions{}
|
||||
|
||||
testMainKubelet, err := NewMainKubelet(
|
||||
kubeCfg,
|
||||
kubeDep,
|
||||
crOptions,
|
||||
"hostname",
|
||||
false,
|
||||
"hostname",
|
||||
[]net.IP{},
|
||||
"",
|
||||
"external",
|
||||
"/tmp/cert",
|
||||
"/tmp/rootdir",
|
||||
"",
|
||||
"",
|
||||
false,
|
||||
[]v1.Taint{},
|
||||
[]string{},
|
||||
"",
|
||||
false,
|
||||
false,
|
||||
metav1.Duration{Duration: time.Minute},
|
||||
1024,
|
||||
110,
|
||||
"default",
|
||||
true,
|
||||
true,
|
||||
map[string]string{},
|
||||
1024,
|
||||
false,
|
||||
)
|
||||
assert.NoError(t, err, "NewMainKubelet should succeed")
|
||||
assert.NotNil(t, testMainKubelet, "testMainKubelet should not be nil")
|
||||
|
||||
testMainKubelet.BirthCry()
|
||||
testMainKubelet.StartGarbageCollection()
|
||||
// Nil pointer panic can be reproduced if configmap manager is not nil.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/113492
|
||||
// pod := &v1.Pod{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// UID: "12345678",
|
||||
// Name: "bar",
|
||||
// Namespace: "foo",
|
||||
// },
|
||||
// Spec: v1.PodSpec{
|
||||
// Containers: []v1.Container{{
|
||||
// EnvFrom: []v1.EnvFromSource{{
|
||||
// ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
// LocalObjectReference: v1.LocalObjectReference{Name: "config-map"}}},
|
||||
// }}},
|
||||
// Volumes: []v1.Volume{{
|
||||
// VolumeSource: v1.VolumeSource{
|
||||
// ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
// LocalObjectReference: v1.LocalObjectReference{
|
||||
// Name: "config-map"}}}}},
|
||||
// },
|
||||
// }
|
||||
// testMainKubelet.configMapManager.RegisterPod(pod)
|
||||
// testMainKubelet.secretManager.RegisterPod(pod)
|
||||
assert.Nil(t, testMainKubelet.configMapManager, "configmap manager should be nil if kubelet is in standalone mode")
|
||||
assert.Nil(t, testMainKubelet.secretManager, "secret manager should be nil if kubelet is in standalone mode")
|
||||
}
|
||||
|
@ -46,6 +46,12 @@ var _ Watcher = &realWatcher{}
|
||||
// NewWatcher creates and initializes a OOMWatcher backed by Cadvisor as
|
||||
// the oom streamer.
|
||||
func NewWatcher(recorder record.EventRecorder) (Watcher, error) {
|
||||
// for test purpose
|
||||
_, ok := recorder.(*record.FakeRecorder)
|
||||
if ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
oomStreamer, err := oomparser.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -257,11 +257,21 @@ func (kvh *kubeletVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) {
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
|
||||
return kvh.secretManager.GetSecret
|
||||
if kvh.secretManager != nil {
|
||||
return kvh.secretManager.GetSecret
|
||||
}
|
||||
return func(namespace, name string) (*v1.Secret, error) {
|
||||
return nil, fmt.Errorf("not supported due to running kubelet in standalone mode")
|
||||
}
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
|
||||
return kvh.configMapManager.GetConfigMap
|
||||
if kvh.configMapManager != nil {
|
||||
return kvh.configMapManager.GetConfigMap
|
||||
}
|
||||
return func(namespace, name string) (*v1.ConfigMap, error) {
|
||||
return nil, fmt.Errorf("not supported due to running kubelet in standalone mode")
|
||||
}
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetServiceAccountTokenFunc() func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
|
Loading…
Reference in New Issue
Block a user