Merge pull request #113501 from pacoxu/fix-startReflector

kubelet: fix nil pointer in startReflector for standalone mode
This commit is contained in:
Kubernetes Prow Robot 2022-11-09 03:50:12 -08:00 committed by GitHub
commit 70263d55b2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 194 additions and 22 deletions

View File

@ -128,8 +128,8 @@ const (
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed. // nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
nodeStatusUpdateRetry = 5 nodeStatusUpdateRetry = 5
// ContainerLogsDir is the location of container logs. // DefaultContainerLogsDir is the location of container logs.
ContainerLogsDir = "/var/log/containers" DefaultContainerLogsDir = "/var/log/containers"
// MaxContainerBackOff is the max backoff period, exported for the e2e test // MaxContainerBackOff is the max backoff period, exported for the e2e test
MaxContainerBackOff = 300 * time.Second MaxContainerBackOff = 300 * time.Second
@ -187,7 +187,11 @@ const (
nodeLeaseRenewIntervalFraction = 0.25 nodeLeaseRenewIntervalFraction = 0.25
) )
var etcHostsPath = getContainerEtcHostsPath() var (
// ContainerLogsDir can be overwrited for testing usage
ContainerLogsDir = DefaultContainerLogsDir
etcHostsPath = getContainerEtcHostsPath()
)
func getContainerEtcHostsPath() string { func getContainerEtcHostsPath() string {
if sysruntime.GOOS == "windows" { if sysruntime.GOOS == "windows" {
@ -558,24 +562,26 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
var secretManager secret.Manager var secretManager secret.Manager
var configMapManager configmap.Manager var configMapManager configmap.Manager
switch kubeCfg.ConfigMapAndSecretChangeDetectionStrategy { if klet.kubeClient != nil {
case kubeletconfiginternal.WatchChangeDetectionStrategy: switch kubeCfg.ConfigMapAndSecretChangeDetectionStrategy {
secretManager = secret.NewWatchingSecretManager(kubeDeps.KubeClient, klet.resyncInterval) case kubeletconfiginternal.WatchChangeDetectionStrategy:
configMapManager = configmap.NewWatchingConfigMapManager(kubeDeps.KubeClient, klet.resyncInterval) secretManager = secret.NewWatchingSecretManager(klet.kubeClient, klet.resyncInterval)
case kubeletconfiginternal.TTLCacheChangeDetectionStrategy: configMapManager = configmap.NewWatchingConfigMapManager(klet.kubeClient, klet.resyncInterval)
secretManager = secret.NewCachingSecretManager( case kubeletconfiginternal.TTLCacheChangeDetectionStrategy:
kubeDeps.KubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode)) secretManager = secret.NewCachingSecretManager(
configMapManager = configmap.NewCachingConfigMapManager( klet.kubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
kubeDeps.KubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode)) configMapManager = configmap.NewCachingConfigMapManager(
case kubeletconfiginternal.GetChangeDetectionStrategy: klet.kubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
secretManager = secret.NewSimpleSecretManager(kubeDeps.KubeClient) case kubeletconfiginternal.GetChangeDetectionStrategy:
configMapManager = configmap.NewSimpleConfigMapManager(kubeDeps.KubeClient) secretManager = secret.NewSimpleSecretManager(klet.kubeClient)
default: configMapManager = configmap.NewSimpleConfigMapManager(klet.kubeClient)
return nil, fmt.Errorf("unknown configmap and secret manager mode: %v", kubeCfg.ConfigMapAndSecretChangeDetectionStrategy) default:
} return nil, fmt.Errorf("unknown configmap and secret manager mode: %v", kubeCfg.ConfigMapAndSecretChangeDetectionStrategy)
}
klet.secretManager = secretManager klet.secretManager = secretManager
klet.configMapManager = configMapManager klet.configMapManager = configMapManager
}
if klet.experimentalHostUserNamespaceDefaulting { if klet.experimentalHostUserNamespaceDefaulting {
klog.InfoS("Experimental host user namespace defaulting is enabled") klog.InfoS("Experimental host user namespace defaulting is enabled")

View File

@ -18,7 +18,9 @@ package kubelet
import ( import (
"context" "context"
"crypto/tls"
"fmt" "fmt"
"net"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
@ -29,7 +31,11 @@ import (
"testing" "testing"
"time" "time"
oteltrace "go.opentelemetry.io/otel/trace"
"github.com/golang/mock/gomock"
cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
@ -45,14 +51,18 @@ import (
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/flowcontrol"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2/ktesting" "k8s.io/klog/v2/ktesting"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/configmap" "k8s.io/kubernetes/pkg/kubelet/configmap"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/cri/remote"
fakeremote "k8s.io/kubernetes/pkg/kubelet/cri/remote/fake"
"k8s.io/kubernetes/pkg/kubelet/eviction" "k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
@ -66,6 +76,7 @@ import (
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing" probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
"k8s.io/kubernetes/pkg/kubelet/secret" "k8s.io/kubernetes/pkg/kubelet/secret"
"k8s.io/kubernetes/pkg/kubelet/server"
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats" serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/kubelet/stats" "k8s.io/kubernetes/pkg/kubelet/stats"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
@ -76,6 +87,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/util/queue"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/awsebs" "k8s.io/kubernetes/pkg/volume/awsebs"
"k8s.io/kubernetes/pkg/volume/azuredd" "k8s.io/kubernetes/pkg/volume/azuredd"
@ -87,6 +99,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util/subpath" "k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/utils/clock" "k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing" testingclock "k8s.io/utils/clock/testing"
utilpointer "k8s.io/utils/pointer"
) )
func init() { func init() {
@ -2715,3 +2728,140 @@ type podsByUID []*v1.Pod
func (p podsByUID) Len() int { return len(p) } func (p podsByUID) Len() int { return len(p) }
func (p podsByUID) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p podsByUID) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p podsByUID) Less(i, j int) bool { return p[i].UID < p[j].UID } func (p podsByUID) Less(i, j int) bool { return p[i].UID < p[j].UID }
// createAndStartFakeRemoteRuntime creates and starts fakeremote.RemoteRuntime.
// It returns the RemoteRuntime, endpoint on success.
// Users should call fakeRuntime.Stop() to cleanup the server.
func createAndStartFakeRemoteRuntime(t *testing.T) (*fakeremote.RemoteRuntime, string) {
endpoint, err := fakeremote.GenerateEndpoint()
require.NoError(t, err)
fakeRuntime := fakeremote.NewFakeRemoteRuntime()
fakeRuntime.Start(endpoint)
return fakeRuntime, endpoint
}
func createRemoteRuntimeService(endpoint string, t *testing.T) internalapi.RuntimeService {
runtimeService, err := remote.NewRemoteRuntimeService(endpoint, 15*time.Second, oteltrace.NewNoopTracerProvider())
require.NoError(t, err)
return runtimeService
}
func TestNewMainKubeletStandAlone(t *testing.T) {
tempDir, err := os.MkdirTemp("", "logs")
ContainerLogsDir = tempDir
assert.NoError(t, err)
defer os.RemoveAll(ContainerLogsDir)
kubeCfg := &kubeletconfiginternal.KubeletConfiguration{
SyncFrequency: metav1.Duration{Duration: time.Minute},
ConfigMapAndSecretChangeDetectionStrategy: kubeletconfiginternal.WatchChangeDetectionStrategy,
ContainerLogMaxSize: "10Mi",
ContainerLogMaxFiles: 5,
MemoryThrottlingFactor: utilpointer.Float64(0),
}
var prober volume.DynamicPluginProber
tp := oteltrace.NewNoopTracerProvider()
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
cadvisor := cadvisortest.NewMockInterface(mockCtrl)
cadvisor.EXPECT().MachineInfo().Return(&cadvisorapi.MachineInfo{}, nil).AnyTimes()
cadvisor.EXPECT().ImagesFsInfo().Return(cadvisorapiv2.FsInfo{
Usage: 400,
Capacity: 1000,
Available: 600,
}, nil).AnyTimes()
tlsOptions := &server.TLSOptions{
Config: &tls.Config{
MinVersion: 0,
},
}
fakeRuntime, endpoint := createAndStartFakeRemoteRuntime(t)
defer func() {
fakeRuntime.Stop()
}()
fakeRecorder := &record.FakeRecorder{}
rtSvc := createRemoteRuntimeService(endpoint, t)
kubeDep := &Dependencies{
Auth: nil,
CAdvisorInterface: cadvisor,
Cloud: nil,
ContainerManager: cm.NewStubContainerManager(),
KubeClient: nil, // standalone mode
HeartbeatClient: nil,
EventClient: nil,
TracerProvider: tp,
HostUtil: hostutil.NewFakeHostUtil(nil),
Mounter: mount.NewFakeMounter(nil),
Recorder: fakeRecorder,
RemoteRuntimeService: rtSvc,
RemoteImageService: fakeRuntime.ImageService,
Subpather: &subpath.FakeSubpath{},
OOMAdjuster: oom.NewOOMAdjuster(),
OSInterface: kubecontainer.RealOS{},
DynamicPluginProber: prober,
TLSOptions: tlsOptions,
}
crOptions := &config.ContainerRuntimeOptions{}
testMainKubelet, err := NewMainKubelet(
kubeCfg,
kubeDep,
crOptions,
"hostname",
false,
"hostname",
[]net.IP{},
"",
"external",
"/tmp/cert",
"/tmp/rootdir",
"",
"",
false,
[]v1.Taint{},
[]string{},
"",
false,
false,
metav1.Duration{Duration: time.Minute},
1024,
110,
"default",
true,
true,
map[string]string{},
1024,
false,
)
assert.NoError(t, err, "NewMainKubelet should succeed")
assert.NotNil(t, testMainKubelet, "testMainKubelet should not be nil")
testMainKubelet.BirthCry()
testMainKubelet.StartGarbageCollection()
// Nil pointer panic can be reproduced if configmap manager is not nil.
// See https://github.com/kubernetes/kubernetes/issues/113492
// pod := &v1.Pod{
// ObjectMeta: metav1.ObjectMeta{
// UID: "12345678",
// Name: "bar",
// Namespace: "foo",
// },
// Spec: v1.PodSpec{
// Containers: []v1.Container{{
// EnvFrom: []v1.EnvFromSource{{
// ConfigMapRef: &v1.ConfigMapEnvSource{
// LocalObjectReference: v1.LocalObjectReference{Name: "config-map"}}},
// }}},
// Volumes: []v1.Volume{{
// VolumeSource: v1.VolumeSource{
// ConfigMap: &v1.ConfigMapVolumeSource{
// LocalObjectReference: v1.LocalObjectReference{
// Name: "config-map"}}}}},
// },
// }
// testMainKubelet.configMapManager.RegisterPod(pod)
// testMainKubelet.secretManager.RegisterPod(pod)
assert.Nil(t, testMainKubelet.configMapManager, "configmap manager should be nil if kubelet is in standalone mode")
assert.Nil(t, testMainKubelet.secretManager, "secret manager should be nil if kubelet is in standalone mode")
}

View File

@ -46,6 +46,12 @@ var _ Watcher = &realWatcher{}
// NewWatcher creates and initializes a OOMWatcher backed by Cadvisor as // NewWatcher creates and initializes a OOMWatcher backed by Cadvisor as
// the oom streamer. // the oom streamer.
func NewWatcher(recorder record.EventRecorder) (Watcher, error) { func NewWatcher(recorder record.EventRecorder) (Watcher, error) {
// for test purpose
_, ok := recorder.(*record.FakeRecorder)
if ok {
return nil, nil
}
oomStreamer, err := oomparser.New() oomStreamer, err := oomparser.New()
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -257,11 +257,21 @@ func (kvh *kubeletVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) {
} }
func (kvh *kubeletVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) { func (kvh *kubeletVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
return kvh.secretManager.GetSecret if kvh.secretManager != nil {
return kvh.secretManager.GetSecret
}
return func(namespace, name string) (*v1.Secret, error) {
return nil, fmt.Errorf("not supported due to running kubelet in standalone mode")
}
} }
func (kvh *kubeletVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) { func (kvh *kubeletVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
return kvh.configMapManager.GetConfigMap if kvh.configMapManager != nil {
return kvh.configMapManager.GetConfigMap
}
return func(namespace, name string) (*v1.ConfigMap, error) {
return nil, fmt.Errorf("not supported due to running kubelet in standalone mode")
}
} }
func (kvh *kubeletVolumeHost) GetServiceAccountTokenFunc() func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { func (kvh *kubeletVolumeHost) GetServiceAccountTokenFunc() func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {