mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
remove unused fields from Kubelet struct
This commit is contained in:
parent
ad9b41dbe2
commit
b69dacbd86
@ -377,8 +377,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.Core().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector)
|
||||
cache.NewReflector(nodeLW, &v1.Node{}, nodeIndexer, 0).Run()
|
||||
}
|
||||
nodeLister := corelisters.NewNodeLister(nodeIndexer)
|
||||
nodeInfo := &predicates.CachedNodeInfo{NodeLister: nodeLister}
|
||||
nodeInfo := &predicates.CachedNodeInfo{NodeLister: corelisters.NewNodeLister(nodeIndexer)}
|
||||
|
||||
// TODO: get the real node object of ourself,
|
||||
// and use the real node name and UID.
|
||||
@ -407,16 +406,14 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
clusterDNS = append(clusterDNS, ip)
|
||||
}
|
||||
}
|
||||
httpClient := &http.Client{}
|
||||
|
||||
klet := &Kubelet{
|
||||
hostname: hostname,
|
||||
nodeName: nodeName,
|
||||
dockerClient: kubeDeps.DockerClient,
|
||||
kubeClient: kubeDeps.KubeClient,
|
||||
rootDirectory: kubeCfg.RootDirectory,
|
||||
resyncInterval: kubeCfg.SyncFrequency.Duration,
|
||||
containerRefManager: containerRefManager,
|
||||
httpClient: &http.Client{},
|
||||
sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources),
|
||||
registerNode: kubeCfg.RegisterNode,
|
||||
registerSchedulable: kubeCfg.RegisterSchedulable,
|
||||
@ -424,7 +421,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
clusterDomain: kubeCfg.ClusterDomain,
|
||||
clusterDNS: clusterDNS,
|
||||
serviceLister: serviceLister,
|
||||
nodeLister: nodeLister,
|
||||
nodeInfo: nodeInfo,
|
||||
masterServiceNamespace: kubeCfg.MasterServiceNamespace,
|
||||
streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration,
|
||||
@ -438,25 +434,21 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
nodeRef: nodeRef,
|
||||
nodeLabels: kubeCfg.NodeLabels,
|
||||
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
|
||||
os: kubeDeps.OSInterface,
|
||||
oomWatcher: oomWatcher,
|
||||
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
|
||||
cgroupRoot: kubeCfg.CgroupRoot,
|
||||
mounter: kubeDeps.Mounter,
|
||||
writer: kubeDeps.Writer,
|
||||
nonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR,
|
||||
maxPods: int(kubeCfg.MaxPods),
|
||||
podsPerCore: int(kubeCfg.PodsPerCore),
|
||||
syncLoopMonitor: atomic.Value{},
|
||||
resolverConfig: kubeCfg.ResolverConfig,
|
||||
cpuCFSQuota: kubeCfg.CPUCFSQuota,
|
||||
daemonEndpoints: daemonEndpoints,
|
||||
containerManager: kubeDeps.ContainerManager,
|
||||
nodeIP: net.ParseIP(nodeIP),
|
||||
clock: clock.RealClock{},
|
||||
os: kubeDeps.OSInterface,
|
||||
oomWatcher: oomWatcher,
|
||||
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
|
||||
cgroupRoot: kubeCfg.CgroupRoot,
|
||||
mounter: kubeDeps.Mounter,
|
||||
writer: kubeDeps.Writer,
|
||||
maxPods: int(kubeCfg.MaxPods),
|
||||
podsPerCore: int(kubeCfg.PodsPerCore),
|
||||
syncLoopMonitor: atomic.Value{},
|
||||
resolverConfig: kubeCfg.ResolverConfig,
|
||||
daemonEndpoints: daemonEndpoints,
|
||||
containerManager: kubeDeps.ContainerManager,
|
||||
nodeIP: net.ParseIP(nodeIP),
|
||||
clock: clock.RealClock{},
|
||||
outOfDiskTransitionFrequency: kubeCfg.OutOfDiskTransitionFrequency.Duration,
|
||||
enableCustomMetrics: kubeCfg.EnableCustomMetrics,
|
||||
babysitDaemons: kubeCfg.BabysitDaemons,
|
||||
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
|
||||
iptClient: utilipt.New(utilexec.New(), utildbus.New(), utilipt.ProtocolIpv4),
|
||||
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
|
||||
@ -474,16 +466,15 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
glog.Infof("Experimental host user namespace defaulting is enabled.")
|
||||
}
|
||||
|
||||
if mode, err := effectiveHairpinMode(componentconfig.HairpinMode(kubeCfg.HairpinMode), kubeCfg.ContainerRuntime, kubeCfg.NetworkPluginName); err != nil {
|
||||
hairpinMode, err := effectiveHairpinMode(componentconfig.HairpinMode(kubeCfg.HairpinMode), kubeCfg.ContainerRuntime, kubeCfg.NetworkPluginName)
|
||||
if err != nil {
|
||||
// This is a non-recoverable error. Returning it up the callstack will just
|
||||
// lead to retries of the same failure, so just fail hard.
|
||||
glog.Fatalf("Invalid hairpin mode: %v", err)
|
||||
} else {
|
||||
klet.hairpinMode = mode
|
||||
}
|
||||
glog.Infof("Hairpin mode set to %q", klet.hairpinMode)
|
||||
glog.Infof("Hairpin mode set to %q", hairpinMode)
|
||||
|
||||
if plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, kubeCfg.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, klet.hairpinMode, klet.nonMasqueradeCIDR, int(kubeCfg.NetworkPluginMTU)); err != nil {
|
||||
if plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, kubeCfg.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, kubeCfg.NonMasqueradeCIDR, int(kubeCfg.NetworkPluginMTU)); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
klet.networkPlugin = plug
|
||||
@ -515,8 +506,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
binDir = kubeCfg.NetworkPluginDir
|
||||
}
|
||||
pluginSettings := dockershim.NetworkPluginSettings{
|
||||
HairpinMode: klet.hairpinMode,
|
||||
NonMasqueradeCIDR: klet.nonMasqueradeCIDR,
|
||||
HairpinMode: hairpinMode,
|
||||
NonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR,
|
||||
PluginName: kubeCfg.NetworkPluginName,
|
||||
PluginConfDir: kubeCfg.CNIConfDir,
|
||||
PluginBinDir: binDir,
|
||||
@ -546,7 +537,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
case "docker":
|
||||
// Create and start the CRI shim running as a grpc server.
|
||||
streamingConfig := getStreamingConfig(kubeCfg, kubeDeps)
|
||||
ds, err := dockershim.NewDockerService(klet.dockerClient, kubeCfg.SeccompProfileRoot, kubeCfg.PodInfraContainerImage,
|
||||
ds, err := dockershim.NewDockerService(kubeDeps.DockerClient, kubeCfg.SeccompProfileRoot, kubeCfg.PodInfraContainerImage,
|
||||
streamingConfig, &pluginSettings, kubeCfg.RuntimeCgroups, kubeCfg.CgroupDriver, kubeCfg.DockerExecHandlerName, dockershimRootDir,
|
||||
kubeCfg.DockerDisableSharedPID)
|
||||
if err != nil {
|
||||
@ -577,12 +568,12 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
}
|
||||
|
||||
// Create dockerLegacyService when the logging driver is not supported.
|
||||
supported, err := dockershim.IsCRISupportedLogDriver(klet.dockerClient)
|
||||
supported, err := dockershim.IsCRISupportedLogDriver(kubeDeps.DockerClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !supported {
|
||||
klet.dockerLegacyService = dockershim.NewDockerLegacyService(klet.dockerClient)
|
||||
klet.dockerLegacyService = dockershim.NewDockerLegacyService(kubeDeps.DockerClient)
|
||||
}
|
||||
case "remote":
|
||||
// No-op.
|
||||
@ -599,12 +590,12 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
klet.podManager,
|
||||
kubeDeps.OSInterface,
|
||||
klet,
|
||||
klet.httpClient,
|
||||
httpClient,
|
||||
imageBackOff,
|
||||
kubeCfg.SerializeImagePulls,
|
||||
float32(kubeCfg.RegistryPullQPS),
|
||||
int(kubeCfg.RegistryBurst),
|
||||
klet.cpuCFSQuota,
|
||||
kubeCfg.CPUCFSQuota,
|
||||
runtimeService,
|
||||
imageService,
|
||||
)
|
||||
@ -628,9 +619,9 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
containerRefManager,
|
||||
klet.podManager,
|
||||
klet.livenessManager,
|
||||
klet.httpClient,
|
||||
httpClient,
|
||||
klet.networkPlugin,
|
||||
klet.hairpinMode == componentconfig.HairpinVeth,
|
||||
hairpinMode == componentconfig.HairpinVeth,
|
||||
utilexec.New(),
|
||||
kubecontainer.RealOS{},
|
||||
imageBackOff,
|
||||
@ -762,7 +753,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator))
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.Accelerators) {
|
||||
if kubeCfg.ContainerRuntime == "docker" {
|
||||
if klet.gpuManager, err = nvidia.NewNvidiaGPUManager(klet, klet.dockerClient); err != nil {
|
||||
if klet.gpuManager, err = nvidia.NewNvidiaGPUManager(klet, kubeDeps.DockerClient); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
@ -783,17 +774,12 @@ type serviceLister interface {
|
||||
List(labels.Selector) ([]*v1.Service, error)
|
||||
}
|
||||
|
||||
type nodeLister interface {
|
||||
List(labels.Selector) ([]*v1.Node, error)
|
||||
}
|
||||
|
||||
// Kubelet is the main kubelet implementation.
|
||||
type Kubelet struct {
|
||||
kubeletConfiguration componentconfig.KubeletConfiguration
|
||||
|
||||
hostname string
|
||||
nodeName types.NodeName
|
||||
dockerClient libdocker.Interface
|
||||
runtimeCache kubecontainer.RuntimeCache
|
||||
kubeClient clientset.Interface
|
||||
iptClient utilipt.Interface
|
||||
@ -816,16 +802,10 @@ type Kubelet struct {
|
||||
// Needed to observe and respond to situations that could impact node stability
|
||||
evictionManager eviction.Manager
|
||||
|
||||
// Needed to report events for containers belonging to deleted/modified pods.
|
||||
// Tracks references for reporting events
|
||||
containerRefManager *kubecontainer.RefManager
|
||||
|
||||
// Optional, defaults to /logs/ from /var/log
|
||||
logServer http.Handler
|
||||
// Optional, defaults to simple Docker implementation
|
||||
runner kubecontainer.ContainerCommandRunner
|
||||
// Optional, client for http requests, defaults to empty client
|
||||
httpClient kubetypes.HttpGetter
|
||||
|
||||
// cAdvisor used for container information.
|
||||
cadvisor cadvisor.Interface
|
||||
@ -850,8 +830,6 @@ type Kubelet struct {
|
||||
masterServiceNamespace string
|
||||
// serviceLister knows how to list services
|
||||
serviceLister serviceLister
|
||||
// nodeLister knows how to list nodes
|
||||
nodeLister nodeLister
|
||||
// nodeInfo knows how to get information about the node for this kubelet.
|
||||
nodeInfo predicates.NodeInfo
|
||||
|
||||
@ -960,10 +938,6 @@ type Kubelet struct {
|
||||
|
||||
// Manager of non-Runtime containers.
|
||||
containerManager cm.ContainerManager
|
||||
nodeConfig cm.NodeConfig
|
||||
|
||||
// Traffic to IPs outside this range will use IP masquerade.
|
||||
nonMasqueradeCIDR string
|
||||
|
||||
// Maximum Number of Pods which can be run by this Kubelet
|
||||
maxPods int
|
||||
@ -986,9 +960,6 @@ type Kubelet struct {
|
||||
// TODO: remove when kubenet plugin is ready
|
||||
shaper bandwidth.BandwidthShaper
|
||||
|
||||
// True if container cpu limits should be enforced via cgroup CFS quota
|
||||
cpuCFSQuota bool
|
||||
|
||||
// Information about the ports which are opened by daemons on Node running this Kubelet server.
|
||||
daemonEndpoints *v1.NodeDaemonEndpoints
|
||||
|
||||
@ -1014,17 +985,6 @@ type Kubelet struct {
|
||||
// getting rescheduled onto the node.
|
||||
outOfDiskTransitionFrequency time.Duration
|
||||
|
||||
// support gathering custom metrics.
|
||||
enableCustomMetrics bool
|
||||
|
||||
// How the Kubelet should setup hairpin NAT. Can take the values: "promiscuous-bridge"
|
||||
// (make cbr0 promiscuous), "hairpin-veth" (set the hairpin flag on veth interfaces)
|
||||
// or "none" (do nothing).
|
||||
hairpinMode componentconfig.HairpinMode
|
||||
|
||||
// The node has babysitter process monitoring docker and kubelet
|
||||
babysitDaemons bool
|
||||
|
||||
// handlers called during the tryUpdateNodeStatus cycle
|
||||
setNodeStatusFuncs []func(*v1.Node) error
|
||||
|
||||
|
@ -154,7 +154,7 @@ func newTestKubeletWithImageList(
|
||||
kubelet.nodeName = types.NodeName(testKubeletHostname)
|
||||
kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
|
||||
kubelet.runtimeState.setNetworkState(nil)
|
||||
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kubelet.nonMasqueradeCIDR, 1440)
|
||||
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, "", 1440)
|
||||
if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil {
|
||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||
} else {
|
||||
@ -166,7 +166,6 @@ func newTestKubeletWithImageList(
|
||||
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true })
|
||||
kubelet.masterServiceNamespace = metav1.NamespaceDefault
|
||||
kubelet.serviceLister = testServiceLister{}
|
||||
kubelet.nodeLister = testNodeLister{}
|
||||
kubelet.nodeInfo = testNodeInfo{
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
@ -206,7 +205,6 @@ func newTestKubeletWithImageList(
|
||||
kubelet.secretManager = secretManager
|
||||
kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient, kubelet.secretManager)
|
||||
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{})
|
||||
kubelet.containerRefManager = kubecontainer.NewRefManager()
|
||||
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{})
|
||||
if err != nil {
|
||||
t.Fatalf("can't initialize disk space manager: %v", err)
|
||||
@ -451,16 +449,6 @@ func TestHandlePortConflicts(t *testing.T) {
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
||||
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
@ -506,16 +494,6 @@ func TestHandleHostNameConflicts(t *testing.T) {
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
||||
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"},
|
||||
@ -564,7 +542,6 @@ func TestHandleNodeSelector(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
kl.nodeLister = testNodeLister{nodes: nodes}
|
||||
kl.nodeInfo = testNodeInfo{nodes: nodes}
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
@ -603,7 +580,6 @@ func TestHandleMemExceeded(t *testing.T) {
|
||||
v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
|
||||
}}},
|
||||
}
|
||||
kl.nodeLister = testNodeLister{nodes: nodes}
|
||||
kl.nodeInfo = testNodeInfo{nodes: nodes}
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
@ -1842,16 +1818,6 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
|
@ -73,23 +73,21 @@ func TestRunOnce(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(basePath)
|
||||
kb := &Kubelet{
|
||||
rootDirectory: basePath,
|
||||
recorder: &record.FakeRecorder{},
|
||||
cadvisor: cadvisor,
|
||||
nodeLister: testNodeLister{},
|
||||
nodeInfo: testNodeInfo{},
|
||||
statusManager: status.NewManager(nil, podManager, &statustest.FakePodDeletionSafetyProvider{}),
|
||||
containerRefManager: kubecontainer.NewRefManager(),
|
||||
podManager: podManager,
|
||||
os: &containertest.FakeOS{},
|
||||
diskSpaceManager: diskSpaceManager,
|
||||
containerRuntime: fakeRuntime,
|
||||
reasonCache: NewReasonCache(),
|
||||
clock: clock.RealClock{},
|
||||
kubeClient: &fake.Clientset{},
|
||||
hostname: testKubeletHostname,
|
||||
nodeName: testKubeletHostname,
|
||||
runtimeState: newRuntimeState(time.Second),
|
||||
rootDirectory: basePath,
|
||||
recorder: &record.FakeRecorder{},
|
||||
cadvisor: cadvisor,
|
||||
nodeInfo: testNodeInfo{},
|
||||
statusManager: status.NewManager(nil, podManager, &statustest.FakePodDeletionSafetyProvider{}),
|
||||
podManager: podManager,
|
||||
os: &containertest.FakeOS{},
|
||||
diskSpaceManager: diskSpaceManager,
|
||||
containerRuntime: fakeRuntime,
|
||||
reasonCache: NewReasonCache(),
|
||||
clock: clock.RealClock{},
|
||||
kubeClient: &fake.Clientset{},
|
||||
hostname: testKubeletHostname,
|
||||
nodeName: testKubeletHostname,
|
||||
runtimeState: newRuntimeState(time.Second),
|
||||
}
|
||||
kb.containerManager = cm.NewStubContainerManager()
|
||||
|
||||
@ -113,7 +111,7 @@ func TestRunOnce(t *testing.T) {
|
||||
false, /* experimentalCheckNodeCapabilitiesBeforeMount */
|
||||
false /* keepTerminatedPodVolumes */)
|
||||
|
||||
kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kb.nonMasqueradeCIDR, network.UseDefaultMTU)
|
||||
kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, "", network.UseDefaultMTU)
|
||||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||
volumeStatsAggPeriod := time.Second * 10
|
||||
kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime)
|
||||
|
Loading…
Reference in New Issue
Block a user