mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 23:17:50 +00:00
Revert "proxy startup-time config handling cleanup"
This commit is contained in:
@@ -452,8 +452,6 @@ func (o *Options) ApplyDefaults(in *kubeproxyconfig.KubeProxyConfiguration) (*ku
|
||||
|
||||
out := internal.(*kubeproxyconfig.KubeProxyConfiguration)
|
||||
|
||||
o.platformApplyDefaults(out)
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -524,16 +522,21 @@ with the apiserver API to configure the proxy.`,
|
||||
// ProxyServer represents all the parameters required to start the Kubernetes proxy server. All
|
||||
// fields are required.
|
||||
type ProxyServer struct {
|
||||
Config *kubeproxyconfig.KubeProxyConfiguration
|
||||
|
||||
Client clientset.Interface
|
||||
Broadcaster events.EventBroadcaster
|
||||
Recorder events.EventRecorder
|
||||
Conntracker Conntracker // if nil, ignored
|
||||
NodeRef *v1.ObjectReference
|
||||
HealthzServer healthcheck.ProxierHealthUpdater
|
||||
|
||||
Proxier proxy.Provider
|
||||
Client clientset.Interface
|
||||
Proxier proxy.Provider
|
||||
Broadcaster events.EventBroadcaster
|
||||
Recorder events.EventRecorder
|
||||
ConntrackConfiguration kubeproxyconfig.KubeProxyConntrackConfiguration
|
||||
Conntracker Conntracker // if nil, ignored
|
||||
ProxyMode kubeproxyconfig.ProxyMode
|
||||
NodeRef *v1.ObjectReference
|
||||
MetricsBindAddress string
|
||||
BindAddressHardFail bool
|
||||
EnableProfiling bool
|
||||
OOMScoreAdj *int32
|
||||
ConfigSyncPeriod time.Duration
|
||||
HealthzServer healthcheck.ProxierHealthUpdater
|
||||
localDetectorMode kubeproxyconfig.LocalMode
|
||||
}
|
||||
|
||||
// createClient creates a kube client from the given config and masterOverride.
|
||||
@@ -642,9 +645,9 @@ func (s *ProxyServer) Run() error {
|
||||
|
||||
// TODO(vmarmol): Use container config for this.
|
||||
var oomAdjuster *oom.OOMAdjuster
|
||||
if s.Config.OOMScoreAdj != nil {
|
||||
if s.OOMScoreAdj != nil {
|
||||
oomAdjuster = oom.NewOOMAdjuster()
|
||||
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*s.Config.OOMScoreAdj)); err != nil {
|
||||
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*s.OOMScoreAdj)); err != nil {
|
||||
klog.V(2).InfoS("Failed to apply OOMScore", "err", err)
|
||||
}
|
||||
}
|
||||
@@ -657,7 +660,7 @@ func (s *ProxyServer) Run() error {
|
||||
// TODO(thockin): make it possible for healthz and metrics to be on the same port.
|
||||
|
||||
var errCh chan error
|
||||
if s.Config.BindAddressHardFail {
|
||||
if s.BindAddressHardFail {
|
||||
errCh = make(chan error)
|
||||
}
|
||||
|
||||
@@ -665,12 +668,12 @@ func (s *ProxyServer) Run() error {
|
||||
serveHealthz(s.HealthzServer, errCh)
|
||||
|
||||
// Start up a metrics server if requested
|
||||
serveMetrics(s.Config.MetricsBindAddress, s.Config.Mode, s.Config.EnableProfiling, errCh)
|
||||
serveMetrics(s.MetricsBindAddress, s.ProxyMode, s.EnableProfiling, errCh)
|
||||
|
||||
// Tune conntrack, if requested
|
||||
// Conntracker is always nil for windows
|
||||
if s.Conntracker != nil {
|
||||
max, err := getConntrackMax(s.Config.Conntrack)
|
||||
max, err := getConntrackMax(s.ConntrackConfiguration)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -693,15 +696,15 @@ func (s *ProxyServer) Run() error {
|
||||
}
|
||||
}
|
||||
|
||||
if s.Config.Conntrack.TCPEstablishedTimeout != nil && s.Config.Conntrack.TCPEstablishedTimeout.Duration > 0 {
|
||||
timeout := int(s.Config.Conntrack.TCPEstablishedTimeout.Duration / time.Second)
|
||||
if s.ConntrackConfiguration.TCPEstablishedTimeout != nil && s.ConntrackConfiguration.TCPEstablishedTimeout.Duration > 0 {
|
||||
timeout := int(s.ConntrackConfiguration.TCPEstablishedTimeout.Duration / time.Second)
|
||||
if err := s.Conntracker.SetTCPEstablishedTimeout(timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if s.Config.Conntrack.TCPCloseWaitTimeout != nil && s.Config.Conntrack.TCPCloseWaitTimeout.Duration > 0 {
|
||||
timeout := int(s.Config.Conntrack.TCPCloseWaitTimeout.Duration / time.Second)
|
||||
if s.ConntrackConfiguration.TCPCloseWaitTimeout != nil && s.ConntrackConfiguration.TCPCloseWaitTimeout.Duration > 0 {
|
||||
timeout := int(s.ConntrackConfiguration.TCPCloseWaitTimeout.Duration / time.Second)
|
||||
if err := s.Conntracker.SetTCPCloseWaitTimeout(timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -722,7 +725,7 @@ func (s *ProxyServer) Run() error {
|
||||
labelSelector = labelSelector.Add(*noProxyName, *noHeadlessEndpoints)
|
||||
|
||||
// Make informers that filter out objects that want a non-default service proxy.
|
||||
informerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.Config.ConfigSyncPeriod.Duration,
|
||||
informerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.ConfigSyncPeriod,
|
||||
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||
options.LabelSelector = labelSelector.String()
|
||||
}))
|
||||
@@ -731,11 +734,11 @@ func (s *ProxyServer) Run() error {
|
||||
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
|
||||
// only notify on changes, and the initial update (on process start) may be lost if no handlers
|
||||
// are registered yet.
|
||||
serviceConfig := config.NewServiceConfig(informerFactory.Core().V1().Services(), s.Config.ConfigSyncPeriod.Duration)
|
||||
serviceConfig := config.NewServiceConfig(informerFactory.Core().V1().Services(), s.ConfigSyncPeriod)
|
||||
serviceConfig.RegisterEventHandler(s.Proxier)
|
||||
go serviceConfig.Run(wait.NeverStop)
|
||||
|
||||
endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1().EndpointSlices(), s.Config.ConfigSyncPeriod.Duration)
|
||||
endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1().EndpointSlices(), s.ConfigSyncPeriod)
|
||||
endpointSliceConfig.RegisterEventHandler(s.Proxier)
|
||||
go endpointSliceConfig.Run(wait.NeverStop)
|
||||
|
||||
@@ -744,13 +747,13 @@ func (s *ProxyServer) Run() error {
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
// Make an informer that selects for our nodename.
|
||||
currentNodeInformerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.Config.ConfigSyncPeriod.Duration,
|
||||
currentNodeInformerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.ConfigSyncPeriod,
|
||||
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", s.NodeRef.Name).String()
|
||||
}))
|
||||
nodeConfig := config.NewNodeConfig(currentNodeInformerFactory.Core().V1().Nodes(), s.Config.ConfigSyncPeriod.Duration)
|
||||
nodeConfig := config.NewNodeConfig(currentNodeInformerFactory.Core().V1().Nodes(), s.ConfigSyncPeriod)
|
||||
// https://issues.k8s.io/111321
|
||||
if s.Config.DetectLocalMode == kubeproxyconfig.LocalModeNodeCIDR {
|
||||
if s.localDetectorMode == kubeproxyconfig.LocalModeNodeCIDR {
|
||||
nodeConfig.RegisterEventHandler(&proxy.NodePodCIDRHandler{})
|
||||
}
|
||||
nodeConfig.RegisterEventHandler(s.Proxier)
|
||||
|
||||
Reference in New Issue
Block a user