remove unused fields from Kubelet struct

This commit is contained in:
David Ashpole 2017-05-10 16:25:09 -07:00
parent ad9b41dbe2
commit b69dacbd86
3 changed files with 47 additions and 123 deletions

View File

@ -377,8 +377,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.Core().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector) nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.Core().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector)
cache.NewReflector(nodeLW, &v1.Node{}, nodeIndexer, 0).Run() cache.NewReflector(nodeLW, &v1.Node{}, nodeIndexer, 0).Run()
} }
nodeLister := corelisters.NewNodeLister(nodeIndexer) nodeInfo := &predicates.CachedNodeInfo{NodeLister: corelisters.NewNodeLister(nodeIndexer)}
nodeInfo := &predicates.CachedNodeInfo{NodeLister: nodeLister}
// TODO: get the real node object of ourself, // TODO: get the real node object of ourself,
// and use the real node name and UID. // and use the real node name and UID.
@ -407,16 +406,14 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
clusterDNS = append(clusterDNS, ip) clusterDNS = append(clusterDNS, ip)
} }
} }
httpClient := &http.Client{}
klet := &Kubelet{ klet := &Kubelet{
hostname: hostname, hostname: hostname,
nodeName: nodeName, nodeName: nodeName,
dockerClient: kubeDeps.DockerClient,
kubeClient: kubeDeps.KubeClient, kubeClient: kubeDeps.KubeClient,
rootDirectory: kubeCfg.RootDirectory, rootDirectory: kubeCfg.RootDirectory,
resyncInterval: kubeCfg.SyncFrequency.Duration, resyncInterval: kubeCfg.SyncFrequency.Duration,
containerRefManager: containerRefManager,
httpClient: &http.Client{},
sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources), sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources),
registerNode: kubeCfg.RegisterNode, registerNode: kubeCfg.RegisterNode,
registerSchedulable: kubeCfg.RegisterSchedulable, registerSchedulable: kubeCfg.RegisterSchedulable,
@ -424,7 +421,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
clusterDomain: kubeCfg.ClusterDomain, clusterDomain: kubeCfg.ClusterDomain,
clusterDNS: clusterDNS, clusterDNS: clusterDNS,
serviceLister: serviceLister, serviceLister: serviceLister,
nodeLister: nodeLister,
nodeInfo: nodeInfo, nodeInfo: nodeInfo,
masterServiceNamespace: kubeCfg.MasterServiceNamespace, masterServiceNamespace: kubeCfg.MasterServiceNamespace,
streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration, streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration,
@ -438,25 +434,21 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
nodeRef: nodeRef, nodeRef: nodeRef,
nodeLabels: kubeCfg.NodeLabels, nodeLabels: kubeCfg.NodeLabels,
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration, nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
os: kubeDeps.OSInterface, os: kubeDeps.OSInterface,
oomWatcher: oomWatcher, oomWatcher: oomWatcher,
cgroupsPerQOS: kubeCfg.CgroupsPerQOS, cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
cgroupRoot: kubeCfg.CgroupRoot, cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter, mounter: kubeDeps.Mounter,
writer: kubeDeps.Writer, writer: kubeDeps.Writer,
nonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR, maxPods: int(kubeCfg.MaxPods),
maxPods: int(kubeCfg.MaxPods), podsPerCore: int(kubeCfg.PodsPerCore),
podsPerCore: int(kubeCfg.PodsPerCore), syncLoopMonitor: atomic.Value{},
syncLoopMonitor: atomic.Value{}, resolverConfig: kubeCfg.ResolverConfig,
resolverConfig: kubeCfg.ResolverConfig, daemonEndpoints: daemonEndpoints,
cpuCFSQuota: kubeCfg.CPUCFSQuota, containerManager: kubeDeps.ContainerManager,
daemonEndpoints: daemonEndpoints, nodeIP: net.ParseIP(nodeIP),
containerManager: kubeDeps.ContainerManager, clock: clock.RealClock{},
nodeIP: net.ParseIP(nodeIP),
clock: clock.RealClock{},
outOfDiskTransitionFrequency: kubeCfg.OutOfDiskTransitionFrequency.Duration, outOfDiskTransitionFrequency: kubeCfg.OutOfDiskTransitionFrequency.Duration,
enableCustomMetrics: kubeCfg.EnableCustomMetrics,
babysitDaemons: kubeCfg.BabysitDaemons,
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach, enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
iptClient: utilipt.New(utilexec.New(), utildbus.New(), utilipt.ProtocolIpv4), iptClient: utilipt.New(utilexec.New(), utildbus.New(), utilipt.ProtocolIpv4),
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains, makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
@ -474,16 +466,15 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
glog.Infof("Experimental host user namespace defaulting is enabled.") glog.Infof("Experimental host user namespace defaulting is enabled.")
} }
if mode, err := effectiveHairpinMode(componentconfig.HairpinMode(kubeCfg.HairpinMode), kubeCfg.ContainerRuntime, kubeCfg.NetworkPluginName); err != nil { hairpinMode, err := effectiveHairpinMode(componentconfig.HairpinMode(kubeCfg.HairpinMode), kubeCfg.ContainerRuntime, kubeCfg.NetworkPluginName)
if err != nil {
// This is a non-recoverable error. Returning it up the callstack will just // This is a non-recoverable error. Returning it up the callstack will just
// lead to retries of the same failure, so just fail hard. // lead to retries of the same failure, so just fail hard.
glog.Fatalf("Invalid hairpin mode: %v", err) glog.Fatalf("Invalid hairpin mode: %v", err)
} else {
klet.hairpinMode = mode
} }
glog.Infof("Hairpin mode set to %q", klet.hairpinMode) glog.Infof("Hairpin mode set to %q", hairpinMode)
if plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, kubeCfg.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, klet.hairpinMode, klet.nonMasqueradeCIDR, int(kubeCfg.NetworkPluginMTU)); err != nil { if plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, kubeCfg.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, kubeCfg.NonMasqueradeCIDR, int(kubeCfg.NetworkPluginMTU)); err != nil {
return nil, err return nil, err
} else { } else {
klet.networkPlugin = plug klet.networkPlugin = plug
@ -515,8 +506,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
binDir = kubeCfg.NetworkPluginDir binDir = kubeCfg.NetworkPluginDir
} }
pluginSettings := dockershim.NetworkPluginSettings{ pluginSettings := dockershim.NetworkPluginSettings{
HairpinMode: klet.hairpinMode, HairpinMode: hairpinMode,
NonMasqueradeCIDR: klet.nonMasqueradeCIDR, NonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR,
PluginName: kubeCfg.NetworkPluginName, PluginName: kubeCfg.NetworkPluginName,
PluginConfDir: kubeCfg.CNIConfDir, PluginConfDir: kubeCfg.CNIConfDir,
PluginBinDir: binDir, PluginBinDir: binDir,
@ -546,7 +537,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
case "docker": case "docker":
// Create and start the CRI shim running as a grpc server. // Create and start the CRI shim running as a grpc server.
streamingConfig := getStreamingConfig(kubeCfg, kubeDeps) streamingConfig := getStreamingConfig(kubeCfg, kubeDeps)
ds, err := dockershim.NewDockerService(klet.dockerClient, kubeCfg.SeccompProfileRoot, kubeCfg.PodInfraContainerImage, ds, err := dockershim.NewDockerService(kubeDeps.DockerClient, kubeCfg.SeccompProfileRoot, kubeCfg.PodInfraContainerImage,
streamingConfig, &pluginSettings, kubeCfg.RuntimeCgroups, kubeCfg.CgroupDriver, kubeCfg.DockerExecHandlerName, dockershimRootDir, streamingConfig, &pluginSettings, kubeCfg.RuntimeCgroups, kubeCfg.CgroupDriver, kubeCfg.DockerExecHandlerName, dockershimRootDir,
kubeCfg.DockerDisableSharedPID) kubeCfg.DockerDisableSharedPID)
if err != nil { if err != nil {
@ -577,12 +568,12 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
} }
// Create dockerLegacyService when the logging driver is not supported. // Create dockerLegacyService when the logging driver is not supported.
supported, err := dockershim.IsCRISupportedLogDriver(klet.dockerClient) supported, err := dockershim.IsCRISupportedLogDriver(kubeDeps.DockerClient)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if !supported { if !supported {
klet.dockerLegacyService = dockershim.NewDockerLegacyService(klet.dockerClient) klet.dockerLegacyService = dockershim.NewDockerLegacyService(kubeDeps.DockerClient)
} }
case "remote": case "remote":
// No-op. // No-op.
@ -599,12 +590,12 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
klet.podManager, klet.podManager,
kubeDeps.OSInterface, kubeDeps.OSInterface,
klet, klet,
klet.httpClient, httpClient,
imageBackOff, imageBackOff,
kubeCfg.SerializeImagePulls, kubeCfg.SerializeImagePulls,
float32(kubeCfg.RegistryPullQPS), float32(kubeCfg.RegistryPullQPS),
int(kubeCfg.RegistryBurst), int(kubeCfg.RegistryBurst),
klet.cpuCFSQuota, kubeCfg.CPUCFSQuota,
runtimeService, runtimeService,
imageService, imageService,
) )
@ -628,9 +619,9 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
containerRefManager, containerRefManager,
klet.podManager, klet.podManager,
klet.livenessManager, klet.livenessManager,
klet.httpClient, httpClient,
klet.networkPlugin, klet.networkPlugin,
klet.hairpinMode == componentconfig.HairpinVeth, hairpinMode == componentconfig.HairpinVeth,
utilexec.New(), utilexec.New(),
kubecontainer.RealOS{}, kubecontainer.RealOS{},
imageBackOff, imageBackOff,
@ -762,7 +753,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator)) klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator))
if utilfeature.DefaultFeatureGate.Enabled(features.Accelerators) { if utilfeature.DefaultFeatureGate.Enabled(features.Accelerators) {
if kubeCfg.ContainerRuntime == "docker" { if kubeCfg.ContainerRuntime == "docker" {
if klet.gpuManager, err = nvidia.NewNvidiaGPUManager(klet, klet.dockerClient); err != nil { if klet.gpuManager, err = nvidia.NewNvidiaGPUManager(klet, kubeDeps.DockerClient); err != nil {
return nil, err return nil, err
} }
} else { } else {
@ -783,17 +774,12 @@ type serviceLister interface {
List(labels.Selector) ([]*v1.Service, error) List(labels.Selector) ([]*v1.Service, error)
} }
type nodeLister interface {
List(labels.Selector) ([]*v1.Node, error)
}
// Kubelet is the main kubelet implementation. // Kubelet is the main kubelet implementation.
type Kubelet struct { type Kubelet struct {
kubeletConfiguration componentconfig.KubeletConfiguration kubeletConfiguration componentconfig.KubeletConfiguration
hostname string hostname string
nodeName types.NodeName nodeName types.NodeName
dockerClient libdocker.Interface
runtimeCache kubecontainer.RuntimeCache runtimeCache kubecontainer.RuntimeCache
kubeClient clientset.Interface kubeClient clientset.Interface
iptClient utilipt.Interface iptClient utilipt.Interface
@ -816,16 +802,10 @@ type Kubelet struct {
// Needed to observe and respond to situations that could impact node stability // Needed to observe and respond to situations that could impact node stability
evictionManager eviction.Manager evictionManager eviction.Manager
// Needed to report events for containers belonging to deleted/modified pods.
// Tracks references for reporting events
containerRefManager *kubecontainer.RefManager
// Optional, defaults to /logs/ from /var/log // Optional, defaults to /logs/ from /var/log
logServer http.Handler logServer http.Handler
// Optional, defaults to simple Docker implementation // Optional, defaults to simple Docker implementation
runner kubecontainer.ContainerCommandRunner runner kubecontainer.ContainerCommandRunner
// Optional, client for http requests, defaults to empty client
httpClient kubetypes.HttpGetter
// cAdvisor used for container information. // cAdvisor used for container information.
cadvisor cadvisor.Interface cadvisor cadvisor.Interface
@ -850,8 +830,6 @@ type Kubelet struct {
masterServiceNamespace string masterServiceNamespace string
// serviceLister knows how to list services // serviceLister knows how to list services
serviceLister serviceLister serviceLister serviceLister
// nodeLister knows how to list nodes
nodeLister nodeLister
// nodeInfo knows how to get information about the node for this kubelet. // nodeInfo knows how to get information about the node for this kubelet.
nodeInfo predicates.NodeInfo nodeInfo predicates.NodeInfo
@ -960,10 +938,6 @@ type Kubelet struct {
// Manager of non-Runtime containers. // Manager of non-Runtime containers.
containerManager cm.ContainerManager containerManager cm.ContainerManager
nodeConfig cm.NodeConfig
// Traffic to IPs outside this range will use IP masquerade.
nonMasqueradeCIDR string
// Maximum Number of Pods which can be run by this Kubelet // Maximum Number of Pods which can be run by this Kubelet
maxPods int maxPods int
@ -986,9 +960,6 @@ type Kubelet struct {
// TODO: remove when kubenet plugin is ready // TODO: remove when kubenet plugin is ready
shaper bandwidth.BandwidthShaper shaper bandwidth.BandwidthShaper
// True if container cpu limits should be enforced via cgroup CFS quota
cpuCFSQuota bool
// Information about the ports which are opened by daemons on Node running this Kubelet server. // Information about the ports which are opened by daemons on Node running this Kubelet server.
daemonEndpoints *v1.NodeDaemonEndpoints daemonEndpoints *v1.NodeDaemonEndpoints
@ -1014,17 +985,6 @@ type Kubelet struct {
// getting rescheduled onto the node. // getting rescheduled onto the node.
outOfDiskTransitionFrequency time.Duration outOfDiskTransitionFrequency time.Duration
// support gathering custom metrics.
enableCustomMetrics bool
// How the Kubelet should setup hairpin NAT. Can take the values: "promiscuous-bridge"
// (make cbr0 promiscuous), "hairpin-veth" (set the hairpin flag on veth interfaces)
// or "none" (do nothing).
hairpinMode componentconfig.HairpinMode
// The node has babysitter process monitoring docker and kubelet
babysitDaemons bool
// handlers called during the tryUpdateNodeStatus cycle // handlers called during the tryUpdateNodeStatus cycle
setNodeStatusFuncs []func(*v1.Node) error setNodeStatusFuncs []func(*v1.Node) error

View File

@ -154,7 +154,7 @@ func newTestKubeletWithImageList(
kubelet.nodeName = types.NodeName(testKubeletHostname) kubelet.nodeName = types.NodeName(testKubeletHostname)
kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime) kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
kubelet.runtimeState.setNetworkState(nil) kubelet.runtimeState.setNetworkState(nil)
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kubelet.nonMasqueradeCIDR, 1440) kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, "", 1440)
if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil { if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil {
t.Fatalf("can't make a temp rootdir: %v", err) t.Fatalf("can't make a temp rootdir: %v", err)
} else { } else {
@ -166,7 +166,6 @@ func newTestKubeletWithImageList(
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true }) kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true })
kubelet.masterServiceNamespace = metav1.NamespaceDefault kubelet.masterServiceNamespace = metav1.NamespaceDefault
kubelet.serviceLister = testServiceLister{} kubelet.serviceLister = testServiceLister{}
kubelet.nodeLister = testNodeLister{}
kubelet.nodeInfo = testNodeInfo{ kubelet.nodeInfo = testNodeInfo{
nodes: []*v1.Node{ nodes: []*v1.Node{
{ {
@ -206,7 +205,6 @@ func newTestKubeletWithImageList(
kubelet.secretManager = secretManager kubelet.secretManager = secretManager
kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient, kubelet.secretManager) kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient, kubelet.secretManager)
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{}) kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{})
kubelet.containerRefManager = kubecontainer.NewRefManager()
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{}) diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{})
if err != nil { if err != nil {
t.Fatalf("can't initialize disk space manager: %v", err) t.Fatalf("can't initialize disk space manager: %v", err)
@ -451,16 +449,6 @@ func TestHandlePortConflicts(t *testing.T) {
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{ kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
{ {
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)}, ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
@ -506,16 +494,6 @@ func TestHandleHostNameConflicts(t *testing.T) {
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{ kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
{ {
ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"}, ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"},
@ -564,7 +542,6 @@ func TestHandleNodeSelector(t *testing.T) {
}, },
}, },
} }
kl.nodeLister = testNodeLister{nodes: nodes}
kl.nodeInfo = testNodeInfo{nodes: nodes} kl.nodeInfo = testNodeInfo{nodes: nodes}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
@ -603,7 +580,6 @@ func TestHandleMemExceeded(t *testing.T) {
v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI), v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
}}}, }}},
} }
kl.nodeLister = testNodeLister{nodes: nodes}
kl.nodeInfo = testNodeInfo{nodes: nodes} kl.nodeInfo = testNodeInfo{nodes: nodes}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
@ -1842,16 +1818,6 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kl := testKubelet.kubelet kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{ kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
{ {
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)}, ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},

View File

@ -73,23 +73,21 @@ func TestRunOnce(t *testing.T) {
} }
defer os.RemoveAll(basePath) defer os.RemoveAll(basePath)
kb := &Kubelet{ kb := &Kubelet{
rootDirectory: basePath, rootDirectory: basePath,
recorder: &record.FakeRecorder{}, recorder: &record.FakeRecorder{},
cadvisor: cadvisor, cadvisor: cadvisor,
nodeLister: testNodeLister{}, nodeInfo: testNodeInfo{},
nodeInfo: testNodeInfo{}, statusManager: status.NewManager(nil, podManager, &statustest.FakePodDeletionSafetyProvider{}),
statusManager: status.NewManager(nil, podManager, &statustest.FakePodDeletionSafetyProvider{}), podManager: podManager,
containerRefManager: kubecontainer.NewRefManager(), os: &containertest.FakeOS{},
podManager: podManager, diskSpaceManager: diskSpaceManager,
os: &containertest.FakeOS{}, containerRuntime: fakeRuntime,
diskSpaceManager: diskSpaceManager, reasonCache: NewReasonCache(),
containerRuntime: fakeRuntime, clock: clock.RealClock{},
reasonCache: NewReasonCache(), kubeClient: &fake.Clientset{},
clock: clock.RealClock{}, hostname: testKubeletHostname,
kubeClient: &fake.Clientset{}, nodeName: testKubeletHostname,
hostname: testKubeletHostname, runtimeState: newRuntimeState(time.Second),
nodeName: testKubeletHostname,
runtimeState: newRuntimeState(time.Second),
} }
kb.containerManager = cm.NewStubContainerManager() kb.containerManager = cm.NewStubContainerManager()
@ -113,7 +111,7 @@ func TestRunOnce(t *testing.T) {
false, /* experimentalCheckNodeCapabilitiesBeforeMount */ false, /* experimentalCheckNodeCapabilitiesBeforeMount */
false /* keepTerminatedPodVolumes */) false /* keepTerminatedPodVolumes */)
kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kb.nonMasqueradeCIDR, network.UseDefaultMTU) kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, "", network.UseDefaultMTU)
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency // TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
volumeStatsAggPeriod := time.Second * 10 volumeStatsAggPeriod := time.Second * 10
kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime) kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime)