convert k8s.io/kubernetes/pkg/proxy to contextual logging, part 1

Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
This commit is contained in:
Ziqi Zhao 2024-01-26 13:10:53 +08:00
parent c1924df0a8
commit be4535bd34
19 changed files with 542 additions and 420 deletions

View File

@ -17,6 +17,7 @@ limitations under the License.
package app package app
import ( import (
"context"
"errors" "errors"
"os" "os"
"strconv" "strconv"
@ -33,30 +34,30 @@ import (
// https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt // https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt
type Conntracker interface { type Conntracker interface {
// SetMax adjusts nf_conntrack_max. // SetMax adjusts nf_conntrack_max.
SetMax(max int) error SetMax(ctx context.Context, max int) error
// SetTCPEstablishedTimeout adjusts nf_conntrack_tcp_timeout_established. // SetTCPEstablishedTimeout adjusts nf_conntrack_tcp_timeout_established.
SetTCPEstablishedTimeout(seconds int) error SetTCPEstablishedTimeout(ctx context.Context, seconds int) error
// SetTCPCloseWaitTimeout adjusts nf_conntrack_tcp_timeout_close_wait. // SetTCPCloseWaitTimeout adjusts nf_conntrack_tcp_timeout_close_wait.
SetTCPCloseWaitTimeout(seconds int) error SetTCPCloseWaitTimeout(ctx context.Context, seconds int) error
// SetTCPBeLiberal adjusts nf_conntrack_tcp_be_liberal. // SetTCPBeLiberal adjusts nf_conntrack_tcp_be_liberal.
SetTCPBeLiberal(value int) error SetTCPBeLiberal(ctx context.Context, value int) error
// SetUDPTimeout adjusts nf_conntrack_udp_timeout. // SetUDPTimeout adjusts nf_conntrack_udp_timeout.
SetUDPTimeout(seconds int) error SetUDPTimeout(ctx context.Context, seconds int) error
// SetUDPStreamTimeout adjusts nf_conntrack_udp_timeout_stream. // SetUDPStreamTimeout adjusts nf_conntrack_udp_timeout_stream.
SetUDPStreamTimeout(seconds int) error SetUDPStreamTimeout(ctx context.Context, seconds int) error
} }
type realConntracker struct { type realConntracker struct {
logger klog.Logger
} }
var errReadOnlySysFS = errors.New("readOnlySysFS") var errReadOnlySysFS = errors.New("readOnlySysFS")
func (rct realConntracker) SetMax(max int) error { func (rct realConntracker) SetMax(ctx context.Context, max int) error {
if err := rct.setIntSysCtl("nf_conntrack_max", max); err != nil { logger := klog.FromContext(ctx)
if err := rct.setIntSysCtl(ctx, "nf_conntrack_max", max); err != nil {
return err return err
} }
rct.logger.Info("Setting nf_conntrack_max", "nfConntrackMax", max) logger.Info("Setting nf_conntrack_max", "nfConntrackMax", max)
// Linux does not support writing to /sys/module/nf_conntrack/parameters/hashsize // Linux does not support writing to /sys/module/nf_conntrack/parameters/hashsize
// when the writer process is not in the initial network namespace // when the writer process is not in the initial network namespace
@ -79,7 +80,7 @@ func (rct realConntracker) SetMax(max int) error {
// don't set conntrack hashsize and return a special error // don't set conntrack hashsize and return a special error
// errReadOnlySysFS here. The caller should deal with // errReadOnlySysFS here. The caller should deal with
// errReadOnlySysFS differently. // errReadOnlySysFS differently.
writable, err := rct.isSysFSWritable() writable, err := rct.isSysFSWritable(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -87,36 +88,37 @@ func (rct realConntracker) SetMax(max int) error {
return errReadOnlySysFS return errReadOnlySysFS
} }
// TODO: generify this and sysctl to a new sysfs.WriteInt() // TODO: generify this and sysctl to a new sysfs.WriteInt()
rct.logger.Info("Setting conntrack hashsize", "conntrackHashsize", max/4) logger.Info("Setting conntrack hashsize", "conntrackHashsize", max/4)
return writeIntStringFile("/sys/module/nf_conntrack/parameters/hashsize", max/4) return writeIntStringFile("/sys/module/nf_conntrack/parameters/hashsize", max/4)
} }
func (rct realConntracker) SetTCPEstablishedTimeout(seconds int) error { func (rct realConntracker) SetTCPEstablishedTimeout(ctx context.Context, seconds int) error {
return rct.setIntSysCtl("nf_conntrack_tcp_timeout_established", seconds) return rct.setIntSysCtl(ctx, "nf_conntrack_tcp_timeout_established", seconds)
} }
func (rct realConntracker) SetTCPCloseWaitTimeout(seconds int) error { func (rct realConntracker) SetTCPCloseWaitTimeout(ctx context.Context, seconds int) error {
return rct.setIntSysCtl("nf_conntrack_tcp_timeout_close_wait", seconds) return rct.setIntSysCtl(ctx, "nf_conntrack_tcp_timeout_close_wait", seconds)
} }
func (rct realConntracker) SetTCPBeLiberal(value int) error { func (rct realConntracker) SetTCPBeLiberal(ctx context.Context, value int) error {
return rct.setIntSysCtl("nf_conntrack_tcp_be_liberal", value) return rct.setIntSysCtl(ctx, "nf_conntrack_tcp_be_liberal", value)
} }
func (rct realConntracker) SetUDPTimeout(seconds int) error { func (rct realConntracker) SetUDPTimeout(ctx context.Context, seconds int) error {
return rct.setIntSysCtl("nf_conntrack_udp_timeout", seconds) return rct.setIntSysCtl(ctx, "nf_conntrack_udp_timeout", seconds)
} }
func (rct realConntracker) SetUDPStreamTimeout(seconds int) error { func (rct realConntracker) SetUDPStreamTimeout(ctx context.Context, seconds int) error {
return rct.setIntSysCtl("nf_conntrack_udp_timeout_stream", seconds) return rct.setIntSysCtl(ctx, "nf_conntrack_udp_timeout_stream", seconds)
} }
func (rct realConntracker) setIntSysCtl(name string, value int) error { func (rct realConntracker) setIntSysCtl(ctx context.Context, name string, value int) error {
logger := klog.FromContext(ctx)
entry := "net/netfilter/" + name entry := "net/netfilter/" + name
sys := sysctl.New() sys := sysctl.New()
if val, _ := sys.GetSysctl(entry); val != value { if val, _ := sys.GetSysctl(entry); val != value {
rct.logger.Info("Set sysctl", "entry", entry, "value", value) logger.Info("Set sysctl", "entry", entry, "value", value)
if err := sys.SetSysctl(entry, value); err != nil { if err := sys.SetSysctl(entry, value); err != nil {
return err return err
} }
@ -125,13 +127,14 @@ func (rct realConntracker) setIntSysCtl(name string, value int) error {
} }
// isSysFSWritable checks /proc/mounts to see whether sysfs is 'rw' or not. // isSysFSWritable checks /proc/mounts to see whether sysfs is 'rw' or not.
func (rct realConntracker) isSysFSWritable() (bool, error) { func (rct realConntracker) isSysFSWritable(ctx context.Context) (bool, error) {
logger := klog.FromContext(ctx)
const permWritable = "rw" const permWritable = "rw"
const sysfsDevice = "sysfs" const sysfsDevice = "sysfs"
m := mount.New("" /* default mount path */) m := mount.New("" /* default mount path */)
mountPoints, err := m.List() mountPoints, err := m.List()
if err != nil { if err != nil {
rct.logger.Error(err, "Failed to list mount points") logger.Error(err, "Failed to list mount points")
return false, err return false, err
} }
@ -143,7 +146,7 @@ func (rct realConntracker) isSysFSWritable() (bool, error) {
if len(mountPoint.Opts) > 0 && mountPoint.Opts[0] == permWritable { if len(mountPoint.Opts) > 0 && mountPoint.Opts[0] == permWritable {
return true, nil return true, nil
} }
rct.logger.Error(nil, "Sysfs is not writable", "mountPoint", mountPoint, "mountOptions", mountPoint.Opts) logger.Error(nil, "Sysfs is not writable", "mountPoint", mountPoint, "mountOptions", mountPoint.Opts)
return false, errReadOnlySysFS return false, errReadOnlySysFS
} }

View File

@ -96,7 +96,7 @@ func init() {
// proxyRun defines the interface to run a specified ProxyServer // proxyRun defines the interface to run a specified ProxyServer
type proxyRun interface { type proxyRun interface {
Run() error Run(ctx context.Context) error
} }
// Options contains everything necessary to create and run a proxy server. // Options contains everything necessary to create and run a proxy server.
@ -371,20 +371,20 @@ func (o *Options) Validate() error {
} }
// Run runs the specified ProxyServer. // Run runs the specified ProxyServer.
func (o *Options) Run() error { func (o *Options) Run(ctx context.Context) error {
defer close(o.errCh) defer close(o.errCh)
if len(o.WriteConfigTo) > 0 { if len(o.WriteConfigTo) > 0 {
return o.writeConfigFile() return o.writeConfigFile()
} }
err := platformCleanup(o.config.Mode, o.CleanupAndExit) err := platformCleanup(ctx, o.config.Mode, o.CleanupAndExit)
if o.CleanupAndExit { if o.CleanupAndExit {
return err return err
} }
// We ignore err otherwise; the cleanup is best-effort, and the backends will have // We ignore err otherwise; the cleanup is best-effort, and the backends will have
// logged messages if they failed in interesting ways. // logged messages if they failed in interesting ways.
proxyServer, err := newProxyServer(o.logger, o.config, o.master, o.InitAndExit) proxyServer, err := newProxyServer(ctx, o.config, o.master, o.InitAndExit)
if err != nil { if err != nil {
return err return err
} }
@ -393,19 +393,19 @@ func (o *Options) Run() error {
} }
o.proxyServer = proxyServer o.proxyServer = proxyServer
return o.runLoop() return o.runLoop(ctx)
} }
// runLoop will watch on the update change of the proxy server's configuration file. // runLoop will watch on the update change of the proxy server's configuration file.
// Return an error when updated // Return an error when updated
func (o *Options) runLoop() error { func (o *Options) runLoop(ctx context.Context) error {
if o.watcher != nil { if o.watcher != nil {
o.watcher.Run() o.watcher.Run()
} }
// run the proxy in goroutine // run the proxy in goroutine
go func() { go func() {
err := o.proxyServer.Run() err := o.proxyServer.Run(ctx)
o.errCh <- err o.errCh <- err
}() }()
@ -554,7 +554,7 @@ with the apiserver API to configure the proxy.`,
} }
// add feature enablement metrics // add feature enablement metrics
utilfeature.DefaultMutableFeatureGate.AddMetrics() utilfeature.DefaultMutableFeatureGate.AddMetrics()
if err := opts.Run(); err != nil { if err := opts.Run(context.Background()); err != nil {
opts.logger.Error(err, "Error running ProxyServer") opts.logger.Error(err, "Error running ProxyServer")
return err return err
} }
@ -597,15 +597,14 @@ type ProxyServer struct {
podCIDRs []string // only used for LocalModeNodeCIDR podCIDRs []string // only used for LocalModeNodeCIDR
Proxier proxy.Provider Proxier proxy.Provider
logger klog.Logger
} }
// newProxyServer creates a ProxyServer based on the given config // newProxyServer creates a ProxyServer based on the given config
func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfiguration, master string, initOnly bool) (*ProxyServer, error) { func newProxyServer(ctx context.Context, config *kubeproxyconfig.KubeProxyConfiguration, master string, initOnly bool) (*ProxyServer, error) {
logger := klog.FromContext(ctx)
s := &ProxyServer{ s := &ProxyServer{
Config: config, Config: config,
logger: logger,
} }
cz, err := configz.New(kubeproxyconfig.GroupName) cz, err := configz.New(kubeproxyconfig.GroupName)
@ -623,13 +622,13 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu
return nil, err return nil, err
} }
s.Client, err = createClient(logger, config.ClientConnection, master) s.Client, err = createClient(ctx, config.ClientConnection, master)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rawNodeIPs := getNodeIPs(logger, s.Client, s.Hostname) rawNodeIPs := getNodeIPs(ctx, s.Client, s.Hostname)
s.PrimaryIPFamily, s.NodeIPs = detectNodeIPs(logger, rawNodeIPs, config.BindAddress) s.PrimaryIPFamily, s.NodeIPs = detectNodeIPs(ctx, rawNodeIPs, config.BindAddress)
if len(config.NodePortAddresses) == 1 && config.NodePortAddresses[0] == kubeproxyconfig.NodePortAddressesPrimary { if len(config.NodePortAddresses) == 1 && config.NodePortAddresses[0] == kubeproxyconfig.NodePortAddressesPrimary {
var nodePortAddresses []string var nodePortAddresses []string
@ -656,7 +655,7 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu
s.HealthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration) s.HealthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration)
} }
err = s.platformSetup() err = s.platformSetup(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -666,7 +665,7 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu
logger.Error(err, "Kube-proxy configuration may be incomplete or incorrect") logger.Error(err, "Kube-proxy configuration may be incomplete or incorrect")
} }
ipv4Supported, ipv6Supported, dualStackSupported, err := s.platformCheckSupported() ipv4Supported, ipv6Supported, dualStackSupported, err := s.platformCheckSupported(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} else if (s.PrimaryIPFamily == v1.IPv4Protocol && !ipv4Supported) || (s.PrimaryIPFamily == v1.IPv6Protocol && !ipv6Supported) { } else if (s.PrimaryIPFamily == v1.IPv4Protocol && !ipv4Supported) || (s.PrimaryIPFamily == v1.IPv6Protocol && !ipv6Supported) {
@ -685,7 +684,7 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu
logger.Error(err, "Kube-proxy configuration may be incomplete or incorrect") logger.Error(err, "Kube-proxy configuration may be incomplete or incorrect")
} }
s.Proxier, err = s.createProxier(config, dualStackSupported, initOnly) s.Proxier, err = s.createProxier(ctx, config, dualStackSupported, initOnly)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -816,7 +815,8 @@ func badBindAddress(bindAddress string, wrongFamily netutils.IPFamily) bool {
// createClient creates a kube client from the given config and masterOverride. // createClient creates a kube client from the given config and masterOverride.
// TODO remove masterOverride when CLI flags are removed. // TODO remove masterOverride when CLI flags are removed.
func createClient(logger klog.Logger, config componentbaseconfig.ClientConnectionConfiguration, masterOverride string) (clientset.Interface, error) { func createClient(ctx context.Context, config componentbaseconfig.ClientConnectionConfiguration, masterOverride string) (clientset.Interface, error) {
logger := klog.FromContext(ctx)
var kubeConfig *rest.Config var kubeConfig *rest.Config
var err error var err error
@ -847,7 +847,8 @@ func createClient(logger klog.Logger, config componentbaseconfig.ClientConnectio
return client, nil return client, nil
} }
func serveHealthz(logger klog.Logger, hz *healthcheck.ProxierHealthServer, errCh chan error) { func serveHealthz(ctx context.Context, hz *healthcheck.ProxierHealthServer, errCh chan error) {
logger := klog.FromContext(ctx)
if hz == nil { if hz == nil {
return return
} }
@ -866,7 +867,7 @@ func serveHealthz(logger klog.Logger, hz *healthcheck.ProxierHealthServer, errCh
logger.Error(nil, "Healthz server returned without error") logger.Error(nil, "Healthz server returned without error")
} }
} }
go wait.Until(fn, 5*time.Second, wait.NeverStop) go wait.Until(fn, 5*time.Second, ctx.Done())
} }
func serveMetrics(bindAddress string, proxyMode kubeproxyconfig.ProxyMode, enableProfiling bool, errCh chan error) { func serveMetrics(bindAddress string, proxyMode kubeproxyconfig.ProxyMode, enableProfiling bool, errCh chan error) {
@ -911,18 +912,19 @@ func serveMetrics(bindAddress string, proxyMode kubeproxyconfig.ProxyMode, enabl
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set). // Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
// TODO: At the moment, Run() cannot return a nil error, otherwise it's caller will never exit. Update callers of Run to handle nil errors. // TODO: At the moment, Run() cannot return a nil error, otherwise it's caller will never exit. Update callers of Run to handle nil errors.
func (s *ProxyServer) Run() error { func (s *ProxyServer) Run(ctx context.Context) error {
logger := klog.FromContext(ctx)
// To help debugging, immediately log version // To help debugging, immediately log version
s.logger.Info("Version info", "version", version.Get()) logger.Info("Version info", "version", version.Get())
s.logger.Info("Golang settings", "GOGC", os.Getenv("GOGC"), "GOMAXPROCS", os.Getenv("GOMAXPROCS"), "GOTRACEBACK", os.Getenv("GOTRACEBACK")) logger.Info("Golang settings", "GOGC", os.Getenv("GOGC"), "GOMAXPROCS", os.Getenv("GOMAXPROCS"), "GOTRACEBACK", os.Getenv("GOTRACEBACK"))
// TODO(vmarmol): Use container config for this. // TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster var oomAdjuster *oom.OOMAdjuster
if s.Config.OOMScoreAdj != nil { if s.Config.OOMScoreAdj != nil {
oomAdjuster = oom.NewOOMAdjuster() oomAdjuster = oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*s.Config.OOMScoreAdj)); err != nil { if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*s.Config.OOMScoreAdj)); err != nil {
s.logger.V(2).Info("Failed to apply OOMScore", "err", err) logger.V(2).Info("Failed to apply OOMScore", "err", err)
} }
} }
@ -940,7 +942,7 @@ func (s *ProxyServer) Run() error {
} }
// Start up a healthz server if requested // Start up a healthz server if requested
serveHealthz(s.logger, s.HealthzServer, healthzErrCh) serveHealthz(ctx, s.HealthzServer, healthzErrCh)
// Start up a metrics server if requested // Start up a metrics server if requested
serveMetrics(s.Config.MetricsBindAddress, s.Config.Mode, s.Config.EnableProfiling, metricsErrCh) serveMetrics(s.Config.MetricsBindAddress, s.Config.Mode, s.Config.EnableProfiling, metricsErrCh)
@ -968,16 +970,16 @@ func (s *ProxyServer) Run() error {
// Note: RegisterHandler() calls need to happen before creation of Sources because sources // Note: RegisterHandler() calls need to happen before creation of Sources because sources
// only notify on changes, and the initial update (on process start) may be lost if no handlers // only notify on changes, and the initial update (on process start) may be lost if no handlers
// are registered yet. // are registered yet.
serviceConfig := config.NewServiceConfig(informerFactory.Core().V1().Services(), s.Config.ConfigSyncPeriod.Duration) serviceConfig := config.NewServiceConfig(ctx, informerFactory.Core().V1().Services(), s.Config.ConfigSyncPeriod.Duration)
serviceConfig.RegisterEventHandler(s.Proxier) serviceConfig.RegisterEventHandler(s.Proxier)
go serviceConfig.Run(wait.NeverStop) go serviceConfig.Run(ctx.Done())
endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1().EndpointSlices(), s.Config.ConfigSyncPeriod.Duration) endpointSliceConfig := config.NewEndpointSliceConfig(ctx, informerFactory.Discovery().V1().EndpointSlices(), s.Config.ConfigSyncPeriod.Duration)
endpointSliceConfig.RegisterEventHandler(s.Proxier) endpointSliceConfig.RegisterEventHandler(s.Proxier)
go endpointSliceConfig.Run(wait.NeverStop) go endpointSliceConfig.Run(ctx.Done())
if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) { if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
serviceCIDRConfig := config.NewServiceCIDRConfig(informerFactory.Networking().V1alpha1().ServiceCIDRs(), s.Config.ConfigSyncPeriod.Duration) serviceCIDRConfig := config.NewServiceCIDRConfig(ctx, informerFactory.Networking().V1alpha1().ServiceCIDRs(), s.Config.ConfigSyncPeriod.Duration)
serviceCIDRConfig.RegisterEventHandler(s.Proxier) serviceCIDRConfig.RegisterEventHandler(s.Proxier)
go serviceCIDRConfig.Run(wait.NeverStop) go serviceCIDRConfig.Run(wait.NeverStop)
} }
@ -990,10 +992,10 @@ func (s *ProxyServer) Run() error {
informers.WithTweakListOptions(func(options *metav1.ListOptions) { informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", s.NodeRef.Name).String() options.FieldSelector = fields.OneTermEqualSelector("metadata.name", s.NodeRef.Name).String()
})) }))
nodeConfig := config.NewNodeConfig(currentNodeInformerFactory.Core().V1().Nodes(), s.Config.ConfigSyncPeriod.Duration) nodeConfig := config.NewNodeConfig(ctx, currentNodeInformerFactory.Core().V1().Nodes(), s.Config.ConfigSyncPeriod.Duration)
// https://issues.k8s.io/111321 // https://issues.k8s.io/111321
if s.Config.DetectLocalMode == kubeproxyconfig.LocalModeNodeCIDR { if s.Config.DetectLocalMode == kubeproxyconfig.LocalModeNodeCIDR {
nodeConfig.RegisterEventHandler(proxy.NewNodePodCIDRHandler(s.podCIDRs)) nodeConfig.RegisterEventHandler(proxy.NewNodePodCIDRHandler(ctx, s.podCIDRs))
} }
if utilfeature.DefaultFeatureGate.Enabled(features.KubeProxyDrainingTerminatingNodes) { if utilfeature.DefaultFeatureGate.Enabled(features.KubeProxyDrainingTerminatingNodes) {
nodeConfig.RegisterEventHandler(&proxy.NodeEligibleHandler{ nodeConfig.RegisterEventHandler(&proxy.NodeEligibleHandler{
@ -1039,7 +1041,8 @@ func (s *ProxyServer) birthCry() {
// 1. if bindAddress is not 0.0.0.0 or ::, then it is used as the primary IP. // 1. if bindAddress is not 0.0.0.0 or ::, then it is used as the primary IP.
// 2. if rawNodeIPs is not empty, then its address(es) is/are used // 2. if rawNodeIPs is not empty, then its address(es) is/are used
// 3. otherwise the node IPs are 127.0.0.1 and ::1 // 3. otherwise the node IPs are 127.0.0.1 and ::1
func detectNodeIPs(logger klog.Logger, rawNodeIPs []net.IP, bindAddress string) (v1.IPFamily, map[v1.IPFamily]net.IP) { func detectNodeIPs(ctx context.Context, rawNodeIPs []net.IP, bindAddress string) (v1.IPFamily, map[v1.IPFamily]net.IP) {
logger := klog.FromContext(ctx)
primaryFamily := v1.IPv4Protocol primaryFamily := v1.IPv4Protocol
nodeIPs := map[v1.IPFamily]net.IP{ nodeIPs := map[v1.IPFamily]net.IP{
v1.IPv4Protocol: net.IPv4(127, 0, 0, 1), v1.IPv4Protocol: net.IPv4(127, 0, 0, 1),
@ -1080,7 +1083,8 @@ func detectNodeIPs(logger klog.Logger, rawNodeIPs []net.IP, bindAddress string)
// getNodeIP returns IPs for the node with the provided name. If // getNodeIP returns IPs for the node with the provided name. If
// required, it will wait for the node to be created. // required, it will wait for the node to be created.
func getNodeIPs(logger klog.Logger, client clientset.Interface, name string) []net.IP { func getNodeIPs(ctx context.Context, client clientset.Interface, name string) []net.IP {
logger := klog.FromContext(ctx)
var nodeIPs []net.IP var nodeIPs []net.IP
backoff := wait.Backoff{ backoff := wait.Backoff{
Steps: 6, Steps: 6,
@ -1090,7 +1094,7 @@ func getNodeIPs(logger klog.Logger, client clientset.Interface, name string) []n
} }
err := wait.ExponentialBackoff(backoff, func() (bool, error) { err := wait.ExponentialBackoff(backoff, func() (bool, error) {
node, err := client.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) node, err := client.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
logger.Error(err, "Failed to retrieve node info") logger.Error(err, "Failed to retrieve node info")
return false, nil return false, nil

View File

@ -84,18 +84,19 @@ func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfigur
// platformSetup is called after setting up the ProxyServer, but before creating the // platformSetup is called after setting up the ProxyServer, but before creating the
// Proxier. It should fill in any platform-specific fields and perform other // Proxier. It should fill in any platform-specific fields and perform other
// platform-specific setup. // platform-specific setup.
func (s *ProxyServer) platformSetup() error { func (s *ProxyServer) platformSetup(ctx context.Context) error {
logger := klog.FromContext(ctx)
if s.Config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR { if s.Config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {
s.logger.Info("Watching for node, awaiting podCIDR allocation", "hostname", s.Hostname) logger.Info("Watching for node, awaiting podCIDR allocation", "hostname", s.Hostname)
node, err := waitForPodCIDR(s.Client, s.Hostname) node, err := waitForPodCIDR(ctx, s.Client, s.Hostname)
if err != nil { if err != nil {
return err return err
} }
s.podCIDRs = node.Spec.PodCIDRs s.podCIDRs = node.Spec.PodCIDRs
s.logger.Info("NodeInfo", "podCIDRs", node.Spec.PodCIDRs) logger.Info("NodeInfo", "podCIDRs", node.Spec.PodCIDRs)
} }
err := s.setupConntrack() err := s.setupConntrack(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -133,7 +134,9 @@ func getIPTables(primaryFamily v1.IPFamily) ([2]utiliptables.Interface, utilipta
// platformCheckSupported is called immediately before creating the Proxier, to check // platformCheckSupported is called immediately before creating the Proxier, to check
// what IP families are supported (and whether the configuration is usable at all). // what IP families are supported (and whether the configuration is usable at all).
func (s *ProxyServer) platformCheckSupported() (ipv4Supported, ipv6Supported, dualStackSupported bool, err error) { func (s *ProxyServer) platformCheckSupported(ctx context.Context) (ipv4Supported, ipv6Supported, dualStackSupported bool, err error) {
logger := klog.FromContext(ctx)
if isIPTablesBased(s.Config.Mode) { if isIPTablesBased(s.Config.Mode) {
ipt, _ := getIPTables(v1.IPFamilyUnknown) ipt, _ := getIPTables(v1.IPFamilyUnknown)
ipv4Supported = ipt[0].Present() ipv4Supported = ipt[0].Present()
@ -142,9 +145,9 @@ func (s *ProxyServer) platformCheckSupported() (ipv4Supported, ipv6Supported, du
if !ipv4Supported && !ipv6Supported { if !ipv4Supported && !ipv6Supported {
err = fmt.Errorf("iptables is not available on this host") err = fmt.Errorf("iptables is not available on this host")
} else if !ipv4Supported { } else if !ipv4Supported {
s.logger.Info("No iptables support for family", "ipFamily", v1.IPv4Protocol) logger.Info("No iptables support for family", "ipFamily", v1.IPv4Protocol)
} else if !ipv6Supported { } else if !ipv6Supported {
s.logger.Info("No iptables support for family", "ipFamily", v1.IPv6Protocol) logger.Info("No iptables support for family", "ipFamily", v1.IPv6Protocol)
} }
} else { } else {
// Assume support for both families. // Assume support for both families.
@ -159,25 +162,27 @@ func (s *ProxyServer) platformCheckSupported() (ipv4Supported, ipv6Supported, du
} }
// createProxier creates the proxy.Provider // createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration, dualStack, initOnly bool) (proxy.Provider, error) { func (s *ProxyServer) createProxier(ctx context.Context, config *proxyconfigapi.KubeProxyConfiguration, dualStack, initOnly bool) (proxy.Provider, error) {
logger := klog.FromContext(ctx)
var proxier proxy.Provider var proxier proxy.Provider
var localDetectors [2]proxyutiliptables.LocalTrafficDetector var localDetectors [2]proxyutiliptables.LocalTrafficDetector
var localDetector proxyutiliptables.LocalTrafficDetector var localDetector proxyutiliptables.LocalTrafficDetector
var err error var err error
if config.Mode == proxyconfigapi.ProxyModeIPTables { if config.Mode == proxyconfigapi.ProxyModeIPTables {
s.logger.Info("Using iptables Proxier") logger.Info("Using iptables Proxier")
if dualStack { if dualStack {
ipt, _ := getIPTables(s.PrimaryIPFamily) ipt, _ := getIPTables(s.PrimaryIPFamily)
localDetectors, err = getDualStackLocalDetectorTuple(s.logger, config.DetectLocalMode, config, s.podCIDRs) localDetectors, err = getDualStackLocalDetectorTuple(logger, config.DetectLocalMode, config, s.podCIDRs)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err) return nil, fmt.Errorf("unable to create proxier: %v", err)
} }
// TODO this has side effects that should only happen when Run() is invoked. // TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewDualStackProxier( proxier, err = iptables.NewDualStackProxier(
ctx,
ipt, ipt,
utilsysctl.New(), utilsysctl.New(),
exec.New(), exec.New(),
@ -197,13 +202,14 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio
} else { } else {
// Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support). // Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support).
_, iptInterface := getIPTables(s.PrimaryIPFamily) _, iptInterface := getIPTables(s.PrimaryIPFamily)
localDetector, err = getLocalDetector(s.logger, s.PrimaryIPFamily, config.DetectLocalMode, config, s.podCIDRs) localDetector, err = getLocalDetector(logger, s.PrimaryIPFamily, config.DetectLocalMode, config, s.podCIDRs)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err) return nil, fmt.Errorf("unable to create proxier: %v", err)
} }
// TODO this has side effects that should only happen when Run() is invoked. // TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewProxier( proxier, err = iptables.NewProxier(
ctx,
s.PrimaryIPFamily, s.PrimaryIPFamily,
iptInterface, iptInterface,
utilsysctl.New(), utilsysctl.New(),
@ -230,21 +236,22 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio
execer := exec.New() execer := exec.New()
ipsetInterface := utilipset.New(execer) ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New() ipvsInterface := utilipvs.New()
if err := ipvs.CanUseIPVSProxier(ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil { if err := ipvs.CanUseIPVSProxier(ctx, ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil {
return nil, fmt.Errorf("can't use the IPVS proxier: %v", err) return nil, fmt.Errorf("can't use the IPVS proxier: %v", err)
} }
s.logger.Info("Using ipvs Proxier") logger.Info("Using ipvs Proxier")
if dualStack { if dualStack {
ipt, _ := getIPTables(s.PrimaryIPFamily) ipt, _ := getIPTables(s.PrimaryIPFamily)
// Always ordered to match []ipt // Always ordered to match []ipt
localDetectors, err = getDualStackLocalDetectorTuple(s.logger, config.DetectLocalMode, config, s.podCIDRs) localDetectors, err = getDualStackLocalDetectorTuple(logger, config.DetectLocalMode, config, s.podCIDRs)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err) return nil, fmt.Errorf("unable to create proxier: %v", err)
} }
proxier, err = ipvs.NewDualStackProxier( proxier, err = ipvs.NewDualStackProxier(
ctx,
ipt, ipt,
ipvsInterface, ipvsInterface,
ipsetInterface, ipsetInterface,
@ -270,12 +277,13 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio
) )
} else { } else {
_, iptInterface := getIPTables(s.PrimaryIPFamily) _, iptInterface := getIPTables(s.PrimaryIPFamily)
localDetector, err = getLocalDetector(s.logger, s.PrimaryIPFamily, config.DetectLocalMode, config, s.podCIDRs) localDetector, err = getLocalDetector(logger, s.PrimaryIPFamily, config.DetectLocalMode, config, s.podCIDRs)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err) return nil, fmt.Errorf("unable to create proxier: %v", err)
} }
proxier, err = ipvs.NewProxier( proxier, err = ipvs.NewProxier(
ctx,
s.PrimaryIPFamily, s.PrimaryIPFamily,
iptInterface, iptInterface,
ipvsInterface, ipvsInterface,
@ -305,16 +313,17 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio
return nil, fmt.Errorf("unable to create proxier: %v", err) return nil, fmt.Errorf("unable to create proxier: %v", err)
} }
} else if config.Mode == proxyconfigapi.ProxyModeNFTables { } else if config.Mode == proxyconfigapi.ProxyModeNFTables {
s.logger.Info("Using nftables Proxier") logger.Info("Using nftables Proxier")
if dualStack { if dualStack {
localDetectors, err = getDualStackLocalDetectorTuple(s.logger, config.DetectLocalMode, config, s.podCIDRs) localDetectors, err = getDualStackLocalDetectorTuple(logger, config.DetectLocalMode, config, s.podCIDRs)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err) return nil, fmt.Errorf("unable to create proxier: %v", err)
} }
// TODO this has side effects that should only happen when Run() is invoked. // TODO this has side effects that should only happen when Run() is invoked.
proxier, err = nftables.NewDualStackProxier( proxier, err = nftables.NewDualStackProxier(
ctx,
utilsysctl.New(), utilsysctl.New(),
config.NFTables.SyncPeriod.Duration, config.NFTables.SyncPeriod.Duration,
config.NFTables.MinSyncPeriod.Duration, config.NFTables.MinSyncPeriod.Duration,
@ -330,13 +339,14 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio
) )
} else { } else {
// Create a single-stack proxier if and only if the node does not support dual-stack // Create a single-stack proxier if and only if the node does not support dual-stack
localDetector, err = getLocalDetector(s.logger, s.PrimaryIPFamily, config.DetectLocalMode, config, s.podCIDRs) localDetector, err = getLocalDetector(logger, s.PrimaryIPFamily, config.DetectLocalMode, config, s.podCIDRs)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err) return nil, fmt.Errorf("unable to create proxier: %v", err)
} }
// TODO this has side effects that should only happen when Run() is invoked. // TODO this has side effects that should only happen when Run() is invoked.
proxier, err = nftables.NewProxier( proxier, err = nftables.NewProxier(
ctx,
s.PrimaryIPFamily, s.PrimaryIPFamily,
utilsysctl.New(), utilsysctl.New(),
config.NFTables.SyncPeriod.Duration, config.NFTables.SyncPeriod.Duration,
@ -361,17 +371,15 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio
return proxier, nil return proxier, nil
} }
func (s *ProxyServer) setupConntrack() error { func (s *ProxyServer) setupConntrack(ctx context.Context) error {
ct := &realConntracker{ ct := &realConntracker{}
logger: s.logger,
}
max, err := getConntrackMax(s.logger, s.Config.Conntrack) max, err := getConntrackMax(ctx, s.Config.Conntrack)
if err != nil { if err != nil {
return err return err
} }
if max > 0 { if max > 0 {
err := ct.SetMax(max) err := ct.SetMax(ctx, max)
if err != nil { if err != nil {
if err != errReadOnlySysFS { if err != errReadOnlySysFS {
return err return err
@ -391,34 +399,34 @@ func (s *ProxyServer) setupConntrack() error {
if s.Config.Conntrack.TCPEstablishedTimeout != nil && s.Config.Conntrack.TCPEstablishedTimeout.Duration > 0 { if s.Config.Conntrack.TCPEstablishedTimeout != nil && s.Config.Conntrack.TCPEstablishedTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPEstablishedTimeout.Duration / time.Second) timeout := int(s.Config.Conntrack.TCPEstablishedTimeout.Duration / time.Second)
if err := ct.SetTCPEstablishedTimeout(timeout); err != nil { if err := ct.SetTCPEstablishedTimeout(ctx, timeout); err != nil {
return err return err
} }
} }
if s.Config.Conntrack.TCPCloseWaitTimeout != nil && s.Config.Conntrack.TCPCloseWaitTimeout.Duration > 0 { if s.Config.Conntrack.TCPCloseWaitTimeout != nil && s.Config.Conntrack.TCPCloseWaitTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPCloseWaitTimeout.Duration / time.Second) timeout := int(s.Config.Conntrack.TCPCloseWaitTimeout.Duration / time.Second)
if err := ct.SetTCPCloseWaitTimeout(timeout); err != nil { if err := ct.SetTCPCloseWaitTimeout(ctx, timeout); err != nil {
return err return err
} }
} }
if s.Config.Conntrack.TCPBeLiberal { if s.Config.Conntrack.TCPBeLiberal {
if err := ct.SetTCPBeLiberal(1); err != nil { if err := ct.SetTCPBeLiberal(ctx, 1); err != nil {
return err return err
} }
} }
if s.Config.Conntrack.UDPTimeout.Duration > 0 { if s.Config.Conntrack.UDPTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.UDPTimeout.Duration / time.Second) timeout := int(s.Config.Conntrack.UDPTimeout.Duration / time.Second)
if err := ct.SetUDPTimeout(timeout); err != nil { if err := ct.SetUDPTimeout(ctx, timeout); err != nil {
return err return err
} }
} }
if s.Config.Conntrack.UDPStreamTimeout.Duration > 0 { if s.Config.Conntrack.UDPStreamTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.UDPStreamTimeout.Duration / time.Second) timeout := int(s.Config.Conntrack.UDPStreamTimeout.Duration / time.Second)
if err := ct.SetUDPStreamTimeout(timeout); err != nil { if err := ct.SetUDPStreamTimeout(ctx, timeout); err != nil {
return err return err
} }
} }
@ -426,7 +434,8 @@ func (s *ProxyServer) setupConntrack() error {
return nil return nil
} }
func getConntrackMax(logger klog.Logger, config proxyconfigapi.KubeProxyConntrackConfiguration) (int, error) { func getConntrackMax(ctx context.Context, config proxyconfigapi.KubeProxyConntrackConfiguration) (int, error) {
logger := klog.FromContext(ctx)
if config.MaxPerCore != nil && *config.MaxPerCore > 0 { if config.MaxPerCore != nil && *config.MaxPerCore > 0 {
floor := 0 floor := 0
if config.Min != nil { if config.Min != nil {
@ -443,10 +452,10 @@ func getConntrackMax(logger klog.Logger, config proxyconfigapi.KubeProxyConntrac
return 0, nil return 0, nil
} }
func waitForPodCIDR(client clientset.Interface, nodeName string) (*v1.Node, error) { func waitForPodCIDR(ctx context.Context, client clientset.Interface, nodeName string) (*v1.Node, error) {
// since allocators can assign the podCIDR after the node registers, we do a watch here to wait // since allocators can assign the podCIDR after the node registers, we do a watch here to wait
// for podCIDR to be assigned, instead of assuming that the Get() on startup will have it. // for podCIDR to be assigned, instead of assuming that the Get() on startup will have it.
ctx, cancelFunc := context.WithTimeout(context.TODO(), timeoutForNodePodCIDR) ctx, cancelFunc := context.WithTimeout(ctx, timeoutForNodePodCIDR)
defer cancelFunc() defer cancelFunc()
fieldSelector := fields.OneTermEqualSelector("metadata.name", nodeName).String() fieldSelector := fields.OneTermEqualSelector("metadata.name", nodeName).String()
@ -552,7 +561,7 @@ func getDualStackLocalDetectorTuple(logger klog.Logger, mode proxyconfigapi.Loca
// cleanupAndExit is true, it will attempt to remove rules from all known kube-proxy // cleanupAndExit is true, it will attempt to remove rules from all known kube-proxy
// modes. If it is false, it will only remove rules that are definitely not in use by the // modes. If it is false, it will only remove rules that are definitely not in use by the
// currently-configured mode. // currently-configured mode.
func platformCleanup(mode proxyconfigapi.ProxyMode, cleanupAndExit bool) error { func platformCleanup(ctx context.Context, mode proxyconfigapi.ProxyMode, cleanupAndExit bool) error {
var encounteredError bool var encounteredError bool
// Clean up iptables and ipvs rules if switching to nftables, or if cleanupAndExit // Clean up iptables and ipvs rules if switching to nftables, or if cleanupAndExit
@ -563,15 +572,15 @@ func platformCleanup(mode proxyconfigapi.ProxyMode, cleanupAndExit bool) error {
ipvsInterface := utilipvs.New() ipvsInterface := utilipvs.New()
for _, ipt := range ipts { for _, ipt := range ipts {
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError encounteredError = iptables.CleanupLeftovers(ctx, ipt) || encounteredError
encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError encounteredError = ipvs.CleanupLeftovers(ctx, ipvsInterface, ipt, ipsetInterface) || encounteredError
} }
} }
if utilfeature.DefaultFeatureGate.Enabled(features.NFTablesProxyMode) { if utilfeature.DefaultFeatureGate.Enabled(features.NFTablesProxyMode) {
// Clean up nftables rules when switching to iptables or ipvs, or if cleanupAndExit // Clean up nftables rules when switching to iptables or ipvs, or if cleanupAndExit
if isIPTablesBased(mode) || cleanupAndExit { if isIPTablesBased(mode) || cleanupAndExit {
encounteredError = nftables.CleanupLeftovers() || encounteredError encounteredError = nftables.CleanupLeftovers(ctx) || encounteredError
} }
} }

View File

@ -561,6 +561,7 @@ detectLocalMode: "BridgeInterface"`)
} }
for _, tc := range testCases { for _, tc := range testCases {
_, ctx := ktesting.NewTestContext(t)
file, tempDir, err := setUp() file, tempDir, err := setUp()
if err != nil { if err != nil {
t.Fatalf("unexpected error when setting up environment: %v", err) t.Fatalf("unexpected error when setting up environment: %v", err)
@ -576,7 +577,7 @@ detectLocalMode: "BridgeInterface"`)
errCh := make(chan error, 1) errCh := make(chan error, 1)
go func() { go func() {
errCh <- opt.runLoop() errCh <- opt.runLoop(ctx)
}() }()
if tc.append { if tc.append {
@ -598,6 +599,7 @@ detectLocalMode: "BridgeInterface"`)
} }
func Test_waitForPodCIDR(t *testing.T) { func Test_waitForPodCIDR(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
expected := []string{"192.168.0.0/24", "fd00:1:2::/64"} expected := []string{"192.168.0.0/24", "fd00:1:2::/64"}
nodeName := "test-node" nodeName := "test-node"
oldNode := &v1.Node{ oldNode := &v1.Node{
@ -636,7 +638,7 @@ func Test_waitForPodCIDR(t *testing.T) {
// set the PodCIDRs on the new node // set the PodCIDRs on the new node
fakeWatch.Modify(updatedNode) fakeWatch.Modify(updatedNode)
}() }()
got, err := waitForPodCIDR(client, node.Name) got, err := waitForPodCIDR(ctx, client, node.Name)
if err != nil { if err != nil {
t.Errorf("waitForPodCIDR() unexpected error %v", err) t.Errorf("waitForPodCIDR() unexpected error %v", err)
return return
@ -679,8 +681,8 @@ func TestGetConntrackMax(t *testing.T) {
Min: ptr.To(tc.min), Min: ptr.To(tc.min),
MaxPerCore: ptr.To(tc.maxPerCore), MaxPerCore: ptr.To(tc.maxPerCore),
} }
logger, _ := ktesting.NewTestContext(t) _, ctx := ktesting.NewTestContext(t)
x, e := getConntrackMax(logger, cfg) x, e := getConntrackMax(ctx, cfg)
if e != nil { if e != nil {
if tc.err == "" { if tc.err == "" {
t.Errorf("[%d] unexpected error: %v", i, e) t.Errorf("[%d] unexpected error: %v", i, e)
@ -720,6 +722,7 @@ func TestProxyServer_platformSetup(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
client := clientsetfake.NewSimpleClientset(tt.node) client := clientsetfake.NewSimpleClientset(tt.node)
s := &ProxyServer{ s := &ProxyServer{
Config: tt.config, Config: tt.config,
@ -730,7 +733,7 @@ func TestProxyServer_platformSetup(t *testing.T) {
v1.IPv6Protocol: net.IPv6zero, v1.IPv6Protocol: net.IPv6zero,
}, },
} }
err := s.platformSetup() err := s.platformSetup(ctx)
if err != nil { if err != nil {
t.Errorf("ProxyServer.createProxier() error = %v", err) t.Errorf("ProxyServer.createProxier() error = %v", err)
return return

View File

@ -22,6 +22,7 @@ limitations under the License.
package app package app
import ( import (
"context"
"fmt" "fmt"
"runtime" "runtime"
@ -39,22 +40,22 @@ var unsupportedError = fmt.Errorf(runtime.GOOS + "/" + runtime.GOARCH + "is unsu
// platformSetup is called after setting up the ProxyServer, but before creating the // platformSetup is called after setting up the ProxyServer, but before creating the
// Proxier. It should fill in any platform-specific fields and perform other // Proxier. It should fill in any platform-specific fields and perform other
// platform-specific setup. // platform-specific setup.
func (s *ProxyServer) platformSetup() error { func (s *ProxyServer) platformSetup(ctx context.Context) error {
return unsupportedError return unsupportedError
} }
// platformCheckSupported is called immediately before creating the Proxier, to check // platformCheckSupported is called immediately before creating the Proxier, to check
// what IP families are supported (and whether the configuration is usable at all). // what IP families are supported (and whether the configuration is usable at all).
func (s *ProxyServer) platformCheckSupported() (ipv4Supported, ipv6Supported, dualStackSupported bool, err error) { func (s *ProxyServer) platformCheckSupported(ctx context.Context) (ipv4Supported, ipv6Supported, dualStackSupported bool, err error) {
return false, false, false, unsupportedError return false, false, false, unsupportedError
} }
// createProxier creates the proxy.Provider // createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration, dualStackMode, initOnly bool) (proxy.Provider, error) { func (s *ProxyServer) createProxier(ctx context.Context, config *proxyconfigapi.KubeProxyConfiguration, dualStackMode, initOnly bool) (proxy.Provider, error) {
return nil, unsupportedError return nil, unsupportedError
} }
// platformCleanup removes stale kube-proxy rules that can be safely removed. // platformCleanup removes stale kube-proxy rules that can be safely removed.
func platformCleanup(mode proxyconfigapi.ProxyMode, cleanupAndExit bool) error { func platformCleanup(ctx context.Context, mode proxyconfigapi.ProxyMode, cleanupAndExit bool) error {
return unsupportedError return unsupportedError
} }

View File

@ -513,7 +513,7 @@ kind: KubeProxyConfiguration
type fakeProxyServerLongRun struct{} type fakeProxyServerLongRun struct{}
// Run runs the specified ProxyServer. // Run runs the specified ProxyServer.
func (s *fakeProxyServerLongRun) Run() error { func (s *fakeProxyServerLongRun) Run(ctx context.Context) error {
for { for {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
} }
@ -527,7 +527,7 @@ func (s *fakeProxyServerLongRun) CleanupAndExit() error {
type fakeProxyServerError struct{} type fakeProxyServerError struct{}
// Run runs the specified ProxyServer. // Run runs the specified ProxyServer.
func (s *fakeProxyServerError) Run() error { func (s *fakeProxyServerError) Run(ctx context.Context) error {
for { for {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
return fmt.Errorf("mocking error from ProxyServer.Run()") return fmt.Errorf("mocking error from ProxyServer.Run()")
@ -654,8 +654,8 @@ func Test_getNodeIPs(t *testing.T) {
nodeName := fmt.Sprintf("node%d", i+1) nodeName := fmt.Sprintf("node%d", i+1)
expectIP := fmt.Sprintf("192.168.0.%d", i+1) expectIP := fmt.Sprintf("192.168.0.%d", i+1)
go func() { go func() {
logger, _ := ktesting.NewTestContext(t) _, ctx := ktesting.NewTestContext(t)
ips := getNodeIPs(logger, client, nodeName) ips := getNodeIPs(ctx, client, nodeName)
if len(ips) == 0 { if len(ips) == 0 {
ch <- fmt.Errorf("expected IP %s for %s but got nil", expectIP, nodeName) ch <- fmt.Errorf("expected IP %s for %s but got nil", expectIP, nodeName)
} else if ips[0].String() != expectIP { } else if ips[0].String() != expectIP {
@ -834,8 +834,8 @@ func Test_detectNodeIPs(t *testing.T) {
} }
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
logger, _ := ktesting.NewTestContext(t) _, ctx := ktesting.NewTestContext(t)
primaryFamily, ips := detectNodeIPs(logger, c.rawNodeIPs, c.bindAddress) primaryFamily, ips := detectNodeIPs(ctx, c.rawNodeIPs, c.bindAddress)
if primaryFamily != c.expectedFamily { if primaryFamily != c.expectedFamily {
t.Errorf("Expected family %q got %q", c.expectedFamily, primaryFamily) t.Errorf("Expected family %q got %q", c.expectedFamily, primaryFamily)
} }

View File

@ -22,6 +22,7 @@ limitations under the License.
package app package app
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net" "net"
@ -49,7 +50,7 @@ func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfigur
// platformSetup is called after setting up the ProxyServer, but before creating the // platformSetup is called after setting up the ProxyServer, but before creating the
// Proxier. It should fill in any platform-specific fields and perform other // Proxier. It should fill in any platform-specific fields and perform other
// platform-specific setup. // platform-specific setup.
func (s *ProxyServer) platformSetup() error { func (s *ProxyServer) platformSetup(ctx context.Context) error {
winkernel.RegisterMetrics() winkernel.RegisterMetrics()
// Preserve backward-compatibility with the old secondary IP behavior // Preserve backward-compatibility with the old secondary IP behavior
if s.PrimaryIPFamily == v1.IPv4Protocol { if s.PrimaryIPFamily == v1.IPv4Protocol {
@ -62,7 +63,7 @@ func (s *ProxyServer) platformSetup() error {
// platformCheckSupported is called immediately before creating the Proxier, to check // platformCheckSupported is called immediately before creating the Proxier, to check
// what IP families are supported (and whether the configuration is usable at all). // what IP families are supported (and whether the configuration is usable at all).
func (s *ProxyServer) platformCheckSupported() (ipv4Supported, ipv6Supported, dualStackSupported bool, err error) { func (s *ProxyServer) platformCheckSupported(ctx context.Context) (ipv4Supported, ipv6Supported, dualStackSupported bool, err error) {
// Check if Kernel proxier can be used at all // Check if Kernel proxier can be used at all
_, err = winkernel.CanUseWinKernelProxier(winkernel.WindowsKernelCompatTester{}) _, err = winkernel.CanUseWinKernelProxier(winkernel.WindowsKernelCompatTester{})
if err != nil { if err != nil {
@ -81,7 +82,7 @@ func (s *ProxyServer) platformCheckSupported() (ipv4Supported, ipv6Supported, du
} }
// createProxier creates the proxy.Provider // createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration, dualStackMode, initOnly bool) (proxy.Provider, error) { func (s *ProxyServer) createProxier(ctx context.Context, config *proxyconfigapi.KubeProxyConfiguration, dualStackMode, initOnly bool) (proxy.Provider, error) {
if initOnly { if initOnly {
return nil, fmt.Errorf("--init-only is not implemented on Windows") return nil, fmt.Errorf("--init-only is not implemented on Windows")
} }
@ -121,7 +122,7 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio
} }
// platformCleanup removes stale kube-proxy rules that can be safely removed. // platformCleanup removes stale kube-proxy rules that can be safely removed.
func platformCleanup(mode proxyconfigapi.ProxyMode, cleanupAndExit bool) error { func platformCleanup(ctx context.Context, mode proxyconfigapi.ProxyMode, cleanupAndExit bool) error {
if cleanupAndExit { if cleanupAndExit {
return errors.New("--cleanup-and-exit is not implemented on Windows") return errors.New("--cleanup-and-exit is not implemented on Windows")
} }

View File

@ -30,10 +30,12 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
ktesting "k8s.io/client-go/testing" ktesting "k8s.io/client-go/testing"
klogtesting "k8s.io/klog/v2/ktesting"
"k8s.io/utils/ptr" "k8s.io/utils/ptr"
) )
func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) { func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) {
_, ctx := klogtesting.NewTestContext(t)
service1v1 := &v1.Service{ service1v1 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s1"},
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 10}}}} Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 10}}}}
@ -56,7 +58,7 @@ func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute) sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
serviceConfig := NewServiceConfig(sharedInformers.Core().V1().Services(), time.Minute) serviceConfig := NewServiceConfig(ctx, sharedInformers.Core().V1().Services(), time.Minute)
serviceConfig.RegisterEventHandler(handler) serviceConfig.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh) go sharedInformers.Start(stopCh)
go serviceConfig.Run(stopCh) go serviceConfig.Run(stopCh)
@ -83,6 +85,7 @@ func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) {
} }
func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) { func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) {
_, ctx := klogtesting.NewTestContext(t)
endpoints1v1 := &discoveryv1.EndpointSlice{ endpoints1v1 := &discoveryv1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e1"},
AddressType: discoveryv1.AddressTypeIPv4, AddressType: discoveryv1.AddressTypeIPv4,
@ -136,7 +139,7 @@ func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute) sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
endpointsliceConfig := NewEndpointSliceConfig(sharedInformers.Discovery().V1().EndpointSlices(), time.Minute) endpointsliceConfig := NewEndpointSliceConfig(ctx, sharedInformers.Discovery().V1().EndpointSlices(), time.Minute)
endpointsliceConfig.RegisterEventHandler(handler) endpointsliceConfig.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh) go sharedInformers.Start(stopCh)
go endpointsliceConfig.Run(stopCh) go endpointsliceConfig.Run(stopCh)
@ -163,6 +166,7 @@ func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) {
} }
func TestInitialSync(t *testing.T) { func TestInitialSync(t *testing.T) {
_, ctx := klogtesting.NewTestContext(t)
svc1 := &v1.Service{ svc1 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"}, ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 10}}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 10}}},
@ -191,11 +195,11 @@ func TestInitialSync(t *testing.T) {
client := fake.NewSimpleClientset(svc1, svc2, eps2, eps1) client := fake.NewSimpleClientset(svc1, svc2, eps2, eps1)
sharedInformers := informers.NewSharedInformerFactory(client, 0) sharedInformers := informers.NewSharedInformerFactory(client, 0)
svcConfig := NewServiceConfig(sharedInformers.Core().V1().Services(), 0) svcConfig := NewServiceConfig(ctx, sharedInformers.Core().V1().Services(), 0)
svcHandler := NewServiceHandlerMock() svcHandler := NewServiceHandlerMock()
svcConfig.RegisterEventHandler(svcHandler) svcConfig.RegisterEventHandler(svcHandler)
epsConfig := NewEndpointSliceConfig(sharedInformers.Discovery().V1().EndpointSlices(), 0) epsConfig := NewEndpointSliceConfig(ctx, sharedInformers.Discovery().V1().EndpointSlices(), 0)
epsHandler := NewEndpointSliceHandlerMock() epsHandler := NewEndpointSliceHandlerMock()
epsConfig.RegisterEventHandler(epsHandler) epsConfig.RegisterEventHandler(epsHandler)

View File

@ -17,6 +17,7 @@ limitations under the License.
package config package config
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
"time" "time"
@ -71,12 +72,14 @@ type EndpointSliceHandler interface {
type EndpointSliceConfig struct { type EndpointSliceConfig struct {
listerSynced cache.InformerSynced listerSynced cache.InformerSynced
eventHandlers []EndpointSliceHandler eventHandlers []EndpointSliceHandler
logger klog.Logger
} }
// NewEndpointSliceConfig creates a new EndpointSliceConfig. // NewEndpointSliceConfig creates a new EndpointSliceConfig.
func NewEndpointSliceConfig(endpointSliceInformer discoveryv1informers.EndpointSliceInformer, resyncPeriod time.Duration) *EndpointSliceConfig { func NewEndpointSliceConfig(ctx context.Context, endpointSliceInformer discoveryv1informers.EndpointSliceInformer, resyncPeriod time.Duration) *EndpointSliceConfig {
result := &EndpointSliceConfig{ result := &EndpointSliceConfig{
listerSynced: endpointSliceInformer.Informer().HasSynced, listerSynced: endpointSliceInformer.Informer().HasSynced,
logger: klog.FromContext(ctx),
} }
_, _ = endpointSliceInformer.Informer().AddEventHandlerWithResyncPeriod( _, _ = endpointSliceInformer.Informer().AddEventHandlerWithResyncPeriod(
@ -98,14 +101,14 @@ func (c *EndpointSliceConfig) RegisterEventHandler(handler EndpointSliceHandler)
// Run waits for cache synced and invokes handlers after syncing. // Run waits for cache synced and invokes handlers after syncing.
func (c *EndpointSliceConfig) Run(stopCh <-chan struct{}) { func (c *EndpointSliceConfig) Run(stopCh <-chan struct{}) {
klog.InfoS("Starting endpoint slice config controller") c.logger.Info("Starting endpoint slice config controller")
if !cache.WaitForNamedCacheSync("endpoint slice config", stopCh, c.listerSynced) { if !cache.WaitForNamedCacheSync("endpoint slice config", stopCh, c.listerSynced) {
return return
} }
for _, h := range c.eventHandlers { for _, h := range c.eventHandlers {
klog.V(3).InfoS("Calling handler.OnEndpointSlicesSynced()") c.logger.V(3).Info("Calling handler.OnEndpointSlicesSynced()")
h.OnEndpointSlicesSynced() h.OnEndpointSlicesSynced()
} }
} }
@ -117,7 +120,7 @@ func (c *EndpointSliceConfig) handleAddEndpointSlice(obj interface{}) {
return return
} }
for _, h := range c.eventHandlers { for _, h := range c.eventHandlers {
klog.V(4).InfoS("Calling handler.OnEndpointSliceAdd", "endpoints", klog.KObj(endpointSlice)) c.logger.V(4).Info("Calling handler.OnEndpointSliceAdd", "endpoints", klog.KObj(endpointSlice))
h.OnEndpointSliceAdd(endpointSlice) h.OnEndpointSliceAdd(endpointSlice)
} }
} }
@ -134,7 +137,7 @@ func (c *EndpointSliceConfig) handleUpdateEndpointSlice(oldObj, newObj interface
return return
} }
for _, h := range c.eventHandlers { for _, h := range c.eventHandlers {
klog.V(4).InfoS("Calling handler.OnEndpointSliceUpdate") c.logger.V(4).Info("Calling handler.OnEndpointSliceUpdate")
h.OnEndpointSliceUpdate(oldEndpointSlice, newEndpointSlice) h.OnEndpointSliceUpdate(oldEndpointSlice, newEndpointSlice)
} }
} }
@ -153,7 +156,7 @@ func (c *EndpointSliceConfig) handleDeleteEndpointSlice(obj interface{}) {
} }
} }
for _, h := range c.eventHandlers { for _, h := range c.eventHandlers {
klog.V(4).InfoS("Calling handler.OnEndpointsDelete") c.logger.V(4).Info("Calling handler.OnEndpointsDelete")
h.OnEndpointSliceDelete(endpointSlice) h.OnEndpointSliceDelete(endpointSlice)
} }
} }
@ -162,12 +165,14 @@ func (c *EndpointSliceConfig) handleDeleteEndpointSlice(obj interface{}) {
type ServiceConfig struct { type ServiceConfig struct {
listerSynced cache.InformerSynced listerSynced cache.InformerSynced
eventHandlers []ServiceHandler eventHandlers []ServiceHandler
logger klog.Logger
} }
// NewServiceConfig creates a new ServiceConfig. // NewServiceConfig creates a new ServiceConfig.
func NewServiceConfig(serviceInformer v1informers.ServiceInformer, resyncPeriod time.Duration) *ServiceConfig { func NewServiceConfig(ctx context.Context, serviceInformer v1informers.ServiceInformer, resyncPeriod time.Duration) *ServiceConfig {
result := &ServiceConfig{ result := &ServiceConfig{
listerSynced: serviceInformer.Informer().HasSynced, listerSynced: serviceInformer.Informer().HasSynced,
logger: klog.FromContext(ctx),
} }
_, _ = serviceInformer.Informer().AddEventHandlerWithResyncPeriod( _, _ = serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
@ -189,14 +194,14 @@ func (c *ServiceConfig) RegisterEventHandler(handler ServiceHandler) {
// Run waits for cache synced and invokes handlers after syncing. // Run waits for cache synced and invokes handlers after syncing.
func (c *ServiceConfig) Run(stopCh <-chan struct{}) { func (c *ServiceConfig) Run(stopCh <-chan struct{}) {
klog.InfoS("Starting service config controller") c.logger.Info("Starting service config controller")
if !cache.WaitForNamedCacheSync("service config", stopCh, c.listerSynced) { if !cache.WaitForNamedCacheSync("service config", stopCh, c.listerSynced) {
return return
} }
for i := range c.eventHandlers { for i := range c.eventHandlers {
klog.V(3).InfoS("Calling handler.OnServiceSynced()") c.logger.V(3).Info("Calling handler.OnServiceSynced()")
c.eventHandlers[i].OnServiceSynced() c.eventHandlers[i].OnServiceSynced()
} }
} }
@ -208,7 +213,7 @@ func (c *ServiceConfig) handleAddService(obj interface{}) {
return return
} }
for i := range c.eventHandlers { for i := range c.eventHandlers {
klog.V(4).InfoS("Calling handler.OnServiceAdd") c.logger.V(4).Info("Calling handler.OnServiceAdd")
c.eventHandlers[i].OnServiceAdd(service) c.eventHandlers[i].OnServiceAdd(service)
} }
} }
@ -225,7 +230,7 @@ func (c *ServiceConfig) handleUpdateService(oldObj, newObj interface{}) {
return return
} }
for i := range c.eventHandlers { for i := range c.eventHandlers {
klog.V(4).InfoS("Calling handler.OnServiceUpdate") c.logger.V(4).Info("Calling handler.OnServiceUpdate")
c.eventHandlers[i].OnServiceUpdate(oldService, service) c.eventHandlers[i].OnServiceUpdate(oldService, service)
} }
} }
@ -244,7 +249,7 @@ func (c *ServiceConfig) handleDeleteService(obj interface{}) {
} }
} }
for i := range c.eventHandlers { for i := range c.eventHandlers {
klog.V(4).InfoS("Calling handler.OnServiceDelete") c.logger.V(4).Info("Calling handler.OnServiceDelete")
c.eventHandlers[i].OnServiceDelete(service) c.eventHandlers[i].OnServiceDelete(service)
} }
} }
@ -289,12 +294,14 @@ var _ NodeHandler = &NoopNodeHandler{}
type NodeConfig struct { type NodeConfig struct {
listerSynced cache.InformerSynced listerSynced cache.InformerSynced
eventHandlers []NodeHandler eventHandlers []NodeHandler
logger klog.Logger
} }
// NewNodeConfig creates a new NodeConfig. // NewNodeConfig creates a new NodeConfig.
func NewNodeConfig(nodeInformer v1informers.NodeInformer, resyncPeriod time.Duration) *NodeConfig { func NewNodeConfig(ctx context.Context, nodeInformer v1informers.NodeInformer, resyncPeriod time.Duration) *NodeConfig {
result := &NodeConfig{ result := &NodeConfig{
listerSynced: nodeInformer.Informer().HasSynced, listerSynced: nodeInformer.Informer().HasSynced,
logger: klog.FromContext(ctx),
} }
_, _ = nodeInformer.Informer().AddEventHandlerWithResyncPeriod( _, _ = nodeInformer.Informer().AddEventHandlerWithResyncPeriod(
@ -316,14 +323,14 @@ func (c *NodeConfig) RegisterEventHandler(handler NodeHandler) {
// Run starts the goroutine responsible for calling registered handlers. // Run starts the goroutine responsible for calling registered handlers.
func (c *NodeConfig) Run(stopCh <-chan struct{}) { func (c *NodeConfig) Run(stopCh <-chan struct{}) {
klog.InfoS("Starting node config controller") c.logger.Info("Starting node config controller")
if !cache.WaitForNamedCacheSync("node config", stopCh, c.listerSynced) { if !cache.WaitForNamedCacheSync("node config", stopCh, c.listerSynced) {
return return
} }
for i := range c.eventHandlers { for i := range c.eventHandlers {
klog.V(3).InfoS("Calling handler.OnNodeSynced()") c.logger.V(3).Info("Calling handler.OnNodeSynced()")
c.eventHandlers[i].OnNodeSynced() c.eventHandlers[i].OnNodeSynced()
} }
} }
@ -335,7 +342,7 @@ func (c *NodeConfig) handleAddNode(obj interface{}) {
return return
} }
for i := range c.eventHandlers { for i := range c.eventHandlers {
klog.V(4).InfoS("Calling handler.OnNodeAdd") c.logger.V(4).Info("Calling handler.OnNodeAdd")
c.eventHandlers[i].OnNodeAdd(node) c.eventHandlers[i].OnNodeAdd(node)
} }
} }
@ -352,7 +359,7 @@ func (c *NodeConfig) handleUpdateNode(oldObj, newObj interface{}) {
return return
} }
for i := range c.eventHandlers { for i := range c.eventHandlers {
klog.V(5).InfoS("Calling handler.OnNodeUpdate") c.logger.V(5).Info("Calling handler.OnNodeUpdate")
c.eventHandlers[i].OnNodeUpdate(oldNode, node) c.eventHandlers[i].OnNodeUpdate(oldNode, node)
} }
} }
@ -371,7 +378,7 @@ func (c *NodeConfig) handleDeleteNode(obj interface{}) {
} }
} }
for i := range c.eventHandlers { for i := range c.eventHandlers {
klog.V(4).InfoS("Calling handler.OnNodeDelete") c.logger.V(4).Info("Calling handler.OnNodeDelete")
c.eventHandlers[i].OnNodeDelete(node) c.eventHandlers[i].OnNodeDelete(node)
} }
} }
@ -390,13 +397,15 @@ type ServiceCIDRConfig struct {
eventHandlers []ServiceCIDRHandler eventHandlers []ServiceCIDRHandler
mu sync.Mutex mu sync.Mutex
cidrs sets.Set[string] cidrs sets.Set[string]
logger klog.Logger
} }
// NewServiceCIDRConfig creates a new ServiceCIDRConfig. // NewServiceCIDRConfig creates a new ServiceCIDRConfig.
func NewServiceCIDRConfig(serviceCIDRInformer networkingv1alpha1informers.ServiceCIDRInformer, resyncPeriod time.Duration) *ServiceCIDRConfig { func NewServiceCIDRConfig(ctx context.Context, serviceCIDRInformer networkingv1alpha1informers.ServiceCIDRInformer, resyncPeriod time.Duration) *ServiceCIDRConfig {
result := &ServiceCIDRConfig{ result := &ServiceCIDRConfig{
listerSynced: serviceCIDRInformer.Informer().HasSynced, listerSynced: serviceCIDRInformer.Informer().HasSynced,
cidrs: sets.New[string](), cidrs: sets.New[string](),
logger: klog.FromContext(ctx),
} }
_, _ = serviceCIDRInformer.Informer().AddEventHandlerWithResyncPeriod( _, _ = serviceCIDRInformer.Informer().AddEventHandlerWithResyncPeriod(
@ -423,7 +432,7 @@ func (c *ServiceCIDRConfig) RegisterEventHandler(handler ServiceCIDRHandler) {
// Run waits for cache synced and invokes handlers after syncing. // Run waits for cache synced and invokes handlers after syncing.
func (c *ServiceCIDRConfig) Run(stopCh <-chan struct{}) { func (c *ServiceCIDRConfig) Run(stopCh <-chan struct{}) {
klog.InfoS("Starting serviceCIDR config controller") c.logger.Info("Starting serviceCIDR config controller")
if !cache.WaitForNamedCacheSync("serviceCIDR config", stopCh, c.listerSynced) { if !cache.WaitForNamedCacheSync("serviceCIDR config", stopCh, c.listerSynced) {
return return
@ -465,7 +474,7 @@ func (c *ServiceCIDRConfig) handleServiceCIDREvent(oldObj, newObj interface{}) {
} }
for i := range c.eventHandlers { for i := range c.eventHandlers {
klog.V(4).InfoS("Calling handler.OnServiceCIDRsChanged") c.logger.V(4).Info("Calling handler.OnServiceCIDRsChanged")
c.eventHandlers[i].OnServiceCIDRsChanged(c.cidrs.UnsortedList()) c.eventHandlers[i].OnServiceCIDRsChanged(c.cidrs.UnsortedList())
} }
} }

View File

@ -32,6 +32,7 @@ import (
informers "k8s.io/client-go/informers" informers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
ktesting "k8s.io/client-go/testing" ktesting "k8s.io/client-go/testing"
klogtesting "k8s.io/klog/v2/ktesting"
"k8s.io/utils/ptr" "k8s.io/utils/ptr"
) )
@ -226,6 +227,7 @@ func (h *EndpointSliceHandlerMock) ValidateEndpointSlices(t *testing.T, expected
} }
func TestNewServiceAddedAndNotified(t *testing.T) { func TestNewServiceAddedAndNotified(t *testing.T) {
_, ctx := klogtesting.NewTestContext(t)
client := fake.NewSimpleClientset() client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake() fakeWatch := watch.NewFake()
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil)) client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
@ -235,7 +237,7 @@ func TestNewServiceAddedAndNotified(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute) sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewServiceConfig(sharedInformers.Core().V1().Services(), time.Minute) config := NewServiceConfig(ctx, sharedInformers.Core().V1().Services(), time.Minute)
handler := NewServiceHandlerMock() handler := NewServiceHandlerMock()
config.RegisterEventHandler(handler) config.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh) go sharedInformers.Start(stopCh)
@ -250,6 +252,7 @@ func TestNewServiceAddedAndNotified(t *testing.T) {
} }
func TestServiceAddedRemovedSetAndNotified(t *testing.T) { func TestServiceAddedRemovedSetAndNotified(t *testing.T) {
_, ctx := klogtesting.NewTestContext(t)
client := fake.NewSimpleClientset() client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake() fakeWatch := watch.NewFake()
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil)) client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
@ -259,7 +262,7 @@ func TestServiceAddedRemovedSetAndNotified(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute) sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewServiceConfig(sharedInformers.Core().V1().Services(), time.Minute) config := NewServiceConfig(ctx, sharedInformers.Core().V1().Services(), time.Minute)
handler := NewServiceHandlerMock() handler := NewServiceHandlerMock()
config.RegisterEventHandler(handler) config.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh) go sharedInformers.Start(stopCh)
@ -286,6 +289,7 @@ func TestServiceAddedRemovedSetAndNotified(t *testing.T) {
} }
func TestNewServicesMultipleHandlersAddedAndNotified(t *testing.T) { func TestNewServicesMultipleHandlersAddedAndNotified(t *testing.T) {
_, ctx := klogtesting.NewTestContext(t)
client := fake.NewSimpleClientset() client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake() fakeWatch := watch.NewFake()
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil)) client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
@ -295,7 +299,7 @@ func TestNewServicesMultipleHandlersAddedAndNotified(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute) sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewServiceConfig(sharedInformers.Core().V1().Services(), time.Minute) config := NewServiceConfig(ctx, sharedInformers.Core().V1().Services(), time.Minute)
handler := NewServiceHandlerMock() handler := NewServiceHandlerMock()
handler2 := NewServiceHandlerMock() handler2 := NewServiceHandlerMock()
config.RegisterEventHandler(handler) config.RegisterEventHandler(handler)
@ -320,6 +324,7 @@ func TestNewServicesMultipleHandlersAddedAndNotified(t *testing.T) {
} }
func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) { func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) {
_, ctx := klogtesting.NewTestContext(t)
client := fake.NewSimpleClientset() client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake() fakeWatch := watch.NewFake()
client.PrependWatchReactor("endpointslices", ktesting.DefaultWatchReactor(fakeWatch, nil)) client.PrependWatchReactor("endpointslices", ktesting.DefaultWatchReactor(fakeWatch, nil))
@ -329,7 +334,7 @@ func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute) sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewEndpointSliceConfig(sharedInformers.Discovery().V1().EndpointSlices(), time.Minute) config := NewEndpointSliceConfig(ctx, sharedInformers.Discovery().V1().EndpointSlices(), time.Minute)
handler := NewEndpointSliceHandlerMock() handler := NewEndpointSliceHandlerMock()
handler2 := NewEndpointSliceHandlerMock() handler2 := NewEndpointSliceHandlerMock()
config.RegisterEventHandler(handler) config.RegisterEventHandler(handler)
@ -366,6 +371,7 @@ func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) {
} }
func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) { func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) {
_, ctx := klogtesting.NewTestContext(t)
client := fake.NewSimpleClientset() client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake() fakeWatch := watch.NewFake()
client.PrependWatchReactor("endpointslices", ktesting.DefaultWatchReactor(fakeWatch, nil)) client.PrependWatchReactor("endpointslices", ktesting.DefaultWatchReactor(fakeWatch, nil))
@ -375,7 +381,7 @@ func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute) sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewEndpointSliceConfig(sharedInformers.Discovery().V1().EndpointSlices(), time.Minute) config := NewEndpointSliceConfig(ctx, sharedInformers.Discovery().V1().EndpointSlices(), time.Minute)
handler := NewEndpointSliceHandlerMock() handler := NewEndpointSliceHandlerMock()
handler2 := NewEndpointSliceHandlerMock() handler2 := NewEndpointSliceHandlerMock()
config.RegisterEventHandler(handler) config.RegisterEventHandler(handler)

View File

@ -25,6 +25,7 @@ package iptables
import ( import (
"bytes" "bytes"
"context"
"crypto/sha256" "crypto/sha256"
"encoding/base32" "encoding/base32"
"fmt" "fmt"
@ -97,6 +98,7 @@ const sysctlNFConntrackTCPBeLiberal = "net/netfilter/nf_conntrack_tcp_be_liberal
// NewDualStackProxier creates a MetaProxier instance, with IPv4 and IPv6 proxies. // NewDualStackProxier creates a MetaProxier instance, with IPv4 and IPv6 proxies.
func NewDualStackProxier( func NewDualStackProxier(
ctx context.Context,
ipt [2]utiliptables.Interface, ipt [2]utiliptables.Interface,
sysctl utilsysctl.Interface, sysctl utilsysctl.Interface,
exec utilexec.Interface, exec utilexec.Interface,
@ -114,14 +116,14 @@ func NewDualStackProxier(
initOnly bool, initOnly bool,
) (proxy.Provider, error) { ) (proxy.Provider, error) {
// Create an ipv4 instance of the single-stack proxier // Create an ipv4 instance of the single-stack proxier
ipv4Proxier, err := NewProxier(v1.IPv4Protocol, ipt[0], sysctl, ipv4Proxier, err := NewProxier(ctx, v1.IPv4Protocol, ipt[0], sysctl,
exec, syncPeriod, minSyncPeriod, masqueradeAll, localhostNodePorts, masqueradeBit, localDetectors[0], hostname, exec, syncPeriod, minSyncPeriod, masqueradeAll, localhostNodePorts, masqueradeBit, localDetectors[0], hostname,
nodeIPs[v1.IPv4Protocol], recorder, healthzServer, nodePortAddresses, initOnly) nodeIPs[v1.IPv4Protocol], recorder, healthzServer, nodePortAddresses, initOnly)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err) return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err)
} }
ipv6Proxier, err := NewProxier(v1.IPv6Protocol, ipt[1], sysctl, ipv6Proxier, err := NewProxier(ctx, v1.IPv6Protocol, ipt[1], sysctl,
exec, syncPeriod, minSyncPeriod, masqueradeAll, false, masqueradeBit, localDetectors[1], hostname, exec, syncPeriod, minSyncPeriod, masqueradeAll, false, masqueradeBit, localDetectors[1], hostname,
nodeIPs[v1.IPv6Protocol], recorder, healthzServer, nodePortAddresses, initOnly) nodeIPs[v1.IPv6Protocol], recorder, healthzServer, nodePortAddresses, initOnly)
if err != nil { if err != nil {
@ -205,6 +207,8 @@ type Proxier struct {
// networkInterfacer defines an interface for several net library functions. // networkInterfacer defines an interface for several net library functions.
// Inject for test purpose. // Inject for test purpose.
networkInterfacer proxyutil.NetworkInterfacer networkInterfacer proxyutil.NetworkInterfacer
logger klog.Logger
} }
// Proxier implements proxy.Provider // Proxier implements proxy.Provider
@ -215,7 +219,8 @@ var _ proxy.Provider = &Proxier{}
// An error will be returned if iptables fails to update or acquire the initial lock. // An error will be returned if iptables fails to update or acquire the initial lock.
// Once a proxier is created, it will keep iptables up to date in the background and // Once a proxier is created, it will keep iptables up to date in the background and
// will not terminate if a particular iptables call fails. // will not terminate if a particular iptables call fails.
func NewProxier(ipFamily v1.IPFamily, func NewProxier(ctx context.Context,
ipFamily v1.IPFamily,
ipt utiliptables.Interface, ipt utiliptables.Interface,
sysctl utilsysctl.Interface, sysctl utilsysctl.Interface,
exec utilexec.Interface, exec utilexec.Interface,
@ -232,6 +237,7 @@ func NewProxier(ipFamily v1.IPFamily,
nodePortAddressStrings []string, nodePortAddressStrings []string,
initOnly bool, initOnly bool,
) (*Proxier, error) { ) (*Proxier, error) {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "ipFamily", ipFamily)
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings) nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
if !nodePortAddresses.ContainsIPv4Loopback() { if !nodePortAddresses.ContainsIPv4Loopback() {
@ -240,7 +246,7 @@ func NewProxier(ipFamily v1.IPFamily,
if localhostNodePorts { if localhostNodePorts {
// Set the route_localnet sysctl we need for exposing NodePorts on loopback addresses // Set the route_localnet sysctl we need for exposing NodePorts on loopback addresses
// Refer to https://issues.k8s.io/90259 // Refer to https://issues.k8s.io/90259
klog.InfoS("Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses") logger.Info("Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses")
if err := proxyutil.EnsureSysctl(sysctl, sysctlRouteLocalnet, 1); err != nil { if err := proxyutil.EnsureSysctl(sysctl, sysctlRouteLocalnet, 1); err != nil {
return nil, err return nil, err
} }
@ -252,18 +258,18 @@ func NewProxier(ipFamily v1.IPFamily,
conntrackTCPLiberal := false conntrackTCPLiberal := false
if val, err := sysctl.GetSysctl(sysctlNFConntrackTCPBeLiberal); err == nil && val != 0 { if val, err := sysctl.GetSysctl(sysctlNFConntrackTCPBeLiberal); err == nil && val != 0 {
conntrackTCPLiberal = true conntrackTCPLiberal = true
klog.InfoS("nf_conntrack_tcp_be_liberal set, not installing DROP rules for INVALID packets") logger.Info("nf_conntrack_tcp_be_liberal set, not installing DROP rules for INVALID packets")
} }
if initOnly { if initOnly {
klog.InfoS("System initialized and --init-only specified") logger.Info("System initialized and --init-only specified")
return nil, nil return nil, nil
} }
// Generate the masquerade mark to use for SNAT rules. // Generate the masquerade mark to use for SNAT rules.
masqueradeValue := 1 << uint(masqueradeBit) masqueradeValue := 1 << uint(masqueradeBit)
masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue) masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue)
klog.V(2).InfoS("Using iptables mark for masquerade", "ipFamily", ipt.Protocol(), "mark", masqueradeMark) logger.V(2).Info("Using iptables mark for masquerade", "mark", masqueradeMark)
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer)
@ -296,10 +302,11 @@ func NewProxier(ipFamily v1.IPFamily,
nodePortAddresses: nodePortAddresses, nodePortAddresses: nodePortAddresses,
networkInterfacer: proxyutil.RealNetwork{}, networkInterfacer: proxyutil.RealNetwork{},
conntrackTCPLiberal: conntrackTCPLiberal, conntrackTCPLiberal: conntrackTCPLiberal,
logger: logger,
} }
burstSyncs := 2 burstSyncs := 2
klog.V(2).InfoS("Iptables sync params", "ipFamily", ipt.Protocol(), "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "burstSyncs", burstSyncs) logger.V(2).Info("Iptables sync params", "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "burstSyncs", burstSyncs)
// We pass syncPeriod to ipt.Monitor, which will call us only if it needs to. // We pass syncPeriod to ipt.Monitor, which will call us only if it needs to.
// We need to pass *some* maxInterval to NewBoundedFrequencyRunner anyway though. // We need to pass *some* maxInterval to NewBoundedFrequencyRunner anyway though.
// time.Hour is arbitrary. // time.Hour is arbitrary.
@ -309,9 +316,9 @@ func NewProxier(ipFamily v1.IPFamily,
proxier.forceSyncProxyRules, syncPeriod, wait.NeverStop) proxier.forceSyncProxyRules, syncPeriod, wait.NeverStop)
if ipt.HasRandomFully() { if ipt.HasRandomFully() {
klog.V(2).InfoS("Iptables supports --random-fully", "ipFamily", ipt.Protocol()) logger.V(2).Info("Iptables supports --random-fully")
} else { } else {
klog.V(2).InfoS("Iptables does not support --random-fully", "ipFamily", ipt.Protocol()) logger.V(2).Info("Iptables does not support --random-fully")
} }
return proxier, nil return proxier, nil
@ -396,7 +403,8 @@ var iptablesCleanupOnlyChains = []iptablesJumpChain{}
// CleanupLeftovers removes all iptables rules and chains created by the Proxier // CleanupLeftovers removes all iptables rules and chains created by the Proxier
// It returns true if an error was encountered. Errors are logged. // It returns true if an error was encountered. Errors are logged.
func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { func CleanupLeftovers(ctx context.Context, ipt utiliptables.Interface) (encounteredError bool) {
logger := klog.FromContext(ctx)
// Unlink our chains // Unlink our chains
for _, jump := range append(iptablesJumpChains, iptablesCleanupOnlyChains...) { for _, jump := range append(iptablesJumpChains, iptablesCleanupOnlyChains...) {
args := append(jump.extraArgs, args := append(jump.extraArgs,
@ -405,7 +413,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
) )
if err := ipt.DeleteRule(jump.table, jump.srcChain, args...); err != nil { if err := ipt.DeleteRule(jump.table, jump.srcChain, args...); err != nil {
if !utiliptables.IsNotFoundError(err) { if !utiliptables.IsNotFoundError(err) {
klog.ErrorS(err, "Error removing pure-iptables proxy rule") logger.Error(err, "Error removing pure-iptables proxy rule")
encounteredError = true encounteredError = true
} }
} }
@ -414,7 +422,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
// Flush and remove all of our "-t nat" chains. // Flush and remove all of our "-t nat" chains.
iptablesData := bytes.NewBuffer(nil) iptablesData := bytes.NewBuffer(nil)
if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil { if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil {
klog.ErrorS(err, "Failed to execute iptables-save", "table", utiliptables.TableNAT) logger.Error(err, "Failed to execute iptables-save", "table", utiliptables.TableNAT)
encounteredError = true encounteredError = true
} else { } else {
existingNATChains := utiliptables.GetChainsFromTable(iptablesData.Bytes()) existingNATChains := utiliptables.GetChainsFromTable(iptablesData.Bytes())
@ -442,7 +450,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
// Write it. // Write it.
err = ipt.Restore(utiliptables.TableNAT, natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) err = ipt.Restore(utiliptables.TableNAT, natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to execute iptables-restore", "table", utiliptables.TableNAT) logger.Error(err, "Failed to execute iptables-restore", "table", utiliptables.TableNAT)
metrics.IptablesRestoreFailuresTotal.Inc() metrics.IptablesRestoreFailuresTotal.Inc()
encounteredError = true encounteredError = true
} }
@ -451,7 +459,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
// Flush and remove all of our "-t filter" chains. // Flush and remove all of our "-t filter" chains.
iptablesData.Reset() iptablesData.Reset()
if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil { if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil {
klog.ErrorS(err, "Failed to execute iptables-save", "table", utiliptables.TableFilter) logger.Error(err, "Failed to execute iptables-save", "table", utiliptables.TableFilter)
encounteredError = true encounteredError = true
} else { } else {
existingFilterChains := utiliptables.GetChainsFromTable(iptablesData.Bytes()) existingFilterChains := utiliptables.GetChainsFromTable(iptablesData.Bytes())
@ -469,7 +477,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
filterLines := append(filterChains.Bytes(), filterRules.Bytes()...) filterLines := append(filterChains.Bytes(), filterRules.Bytes()...)
// Write it. // Write it.
if err := ipt.Restore(utiliptables.TableFilter, filterLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil { if err := ipt.Restore(utiliptables.TableFilter, filterLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil {
klog.ErrorS(err, "Failed to execute iptables-restore", "table", utiliptables.TableFilter) logger.Error(err, "Failed to execute iptables-restore", "table", utiliptables.TableFilter)
metrics.IptablesRestoreFailuresTotal.Inc() metrics.IptablesRestoreFailuresTotal.Inc()
encounteredError = true encounteredError = true
} }
@ -605,7 +613,7 @@ func (proxier *Proxier) OnEndpointSlicesSynced() {
// is observed. // is observed.
func (proxier *Proxier) OnNodeAdd(node *v1.Node) { func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
if node.Name != proxier.hostname { if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", proxier.logger.Error(nil, "Received a watch event for a node that doesn't match the current node",
"eventNode", node.Name, "currentNode", proxier.hostname) "eventNode", node.Name, "currentNode", proxier.hostname)
return return
} }
@ -621,7 +629,7 @@ func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
} }
proxier.needFullSync = true proxier.needFullSync = true
proxier.mu.Unlock() proxier.mu.Unlock()
klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels) proxier.logger.V(4).Info("Updated proxier node labels", "labels", node.Labels)
proxier.Sync() proxier.Sync()
} }
@ -630,7 +638,7 @@ func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
// node object is observed. // node object is observed.
func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) { func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
if node.Name != proxier.hostname { if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", proxier.logger.Error(nil, "Received a watch event for a node that doesn't match the current node",
"eventNode", node.Name, "currentNode", proxier.hostname) "eventNode", node.Name, "currentNode", proxier.hostname)
return return
} }
@ -646,7 +654,7 @@ func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
} }
proxier.needFullSync = true proxier.needFullSync = true
proxier.mu.Unlock() proxier.mu.Unlock()
klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels) proxier.logger.V(4).Info("Updated proxier node labels", "labels", node.Labels)
proxier.Sync() proxier.Sync()
} }
@ -655,7 +663,7 @@ func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
// object is observed. // object is observed.
func (proxier *Proxier) OnNodeDelete(node *v1.Node) { func (proxier *Proxier) OnNodeDelete(node *v1.Node) {
if node.Name != proxier.hostname { if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", proxier.logger.Error(nil, "Received a watch event for a node that doesn't match the current node",
"eventNode", node.Name, "currentNode", proxier.hostname) "eventNode", node.Name, "currentNode", proxier.hostname)
return return
} }
@ -779,7 +787,7 @@ func (proxier *Proxier) syncProxyRules() {
// don't sync rules till we've received services and endpoints // don't sync rules till we've received services and endpoints
if !proxier.isInitialized() { if !proxier.isInitialized() {
klog.V(2).InfoS("Not syncing iptables until Services and Endpoints have been received from master") proxier.logger.V(2).Info("Not syncing iptables until Services and Endpoints have been received from master")
return return
} }
@ -796,18 +804,18 @@ func (proxier *Proxier) syncProxyRules() {
} else { } else {
metrics.SyncFullProxyRulesLatency.Observe(metrics.SinceInSeconds(start)) metrics.SyncFullProxyRulesLatency.Observe(metrics.SinceInSeconds(start))
} }
klog.V(2).InfoS("SyncProxyRules complete", "elapsed", time.Since(start)) proxier.logger.V(2).Info("SyncProxyRules complete", "elapsed", time.Since(start))
}() }()
serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges) serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges)
endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges) endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges)
klog.V(2).InfoS("Syncing iptables rules") proxier.logger.V(2).Info("Syncing iptables rules")
success := false success := false
defer func() { defer func() {
if !success { if !success {
klog.InfoS("Sync failed", "retryingTime", proxier.syncPeriod) proxier.logger.Info("Sync failed", "retryingTime", proxier.syncPeriod)
proxier.syncRunner.RetryAfter(proxier.syncPeriod) proxier.syncRunner.RetryAfter(proxier.syncPeriod)
if tryPartialSync { if tryPartialSync {
metrics.IptablesPartialRestoreFailuresTotal.Inc() metrics.IptablesPartialRestoreFailuresTotal.Inc()
@ -833,7 +841,7 @@ func (proxier *Proxier) syncProxyRules() {
// (which will be very slow on hosts with lots of iptables rules). // (which will be very slow on hosts with lots of iptables rules).
for _, jump := range append(iptablesJumpChains, iptablesKubeletJumpChains...) { for _, jump := range append(iptablesJumpChains, iptablesKubeletJumpChains...) {
if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil { if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil {
klog.ErrorS(err, "Failed to ensure chain exists", "table", jump.table, "chain", jump.dstChain) proxier.logger.Error(err, "Failed to ensure chain exists", "table", jump.table, "chain", jump.dstChain)
return return
} }
args := jump.extraArgs args := jump.extraArgs
@ -842,7 +850,7 @@ func (proxier *Proxier) syncProxyRules() {
} }
args = append(args, "-j", string(jump.dstChain)) args = append(args, "-j", string(jump.dstChain))
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jump.table, jump.srcChain, args...); err != nil { if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jump.table, jump.srcChain, args...); err != nil {
klog.ErrorS(err, "Failed to ensure chain jumps", "table", jump.table, "srcChain", jump.srcChain, "dstChain", jump.dstChain) proxier.logger.Error(err, "Failed to ensure chain jumps", "table", jump.table, "srcChain", jump.srcChain, "dstChain", jump.dstChain)
return return
} }
} }
@ -952,7 +960,7 @@ func (proxier *Proxier) syncProxyRules() {
for svcName, svc := range proxier.svcPortMap { for svcName, svc := range proxier.svcPortMap {
svcInfo, ok := svc.(*servicePortInfo) svcInfo, ok := svc.(*servicePortInfo)
if !ok { if !ok {
klog.ErrorS(nil, "Failed to cast serviceInfo", "serviceName", svcName) proxier.logger.Error(nil, "Failed to cast serviceInfo", "serviceName", svcName)
continue continue
} }
protocol := strings.ToLower(string(svcInfo.Protocol())) protocol := strings.ToLower(string(svcInfo.Protocol()))
@ -1345,7 +1353,7 @@ func (proxier *Proxier) syncProxyRules() {
for _, ep := range allLocallyReachableEndpoints { for _, ep := range allLocallyReachableEndpoints {
epInfo, ok := ep.(*endpointInfo) epInfo, ok := ep.(*endpointInfo)
if !ok { if !ok {
klog.ErrorS(nil, "Failed to cast endpointInfo", "endpointInfo", ep) proxier.logger.Error(nil, "Failed to cast endpointInfo", "endpointInfo", ep)
continue continue
} }
@ -1396,7 +1404,7 @@ func (proxier *Proxier) syncProxyRules() {
} }
proxier.lastIPTablesCleanup = time.Now() proxier.lastIPTablesCleanup = time.Now()
} else { } else {
klog.ErrorS(err, "Failed to execute iptables-save: stale chains will not be deleted") proxier.logger.Error(err, "Failed to execute iptables-save: stale chains will not be deleted")
} }
} }
@ -1420,15 +1428,15 @@ func (proxier *Proxier) syncProxyRules() {
} else { } else {
nodeIPs, err := proxier.nodePortAddresses.GetNodeIPs(proxier.networkInterfacer) nodeIPs, err := proxier.nodePortAddresses.GetNodeIPs(proxier.networkInterfacer)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to get node ip address matching nodeport cidrs, services with nodeport may not work as intended", "CIDRs", proxier.nodePortAddresses) proxier.logger.Error(err, "Failed to get node ip address matching nodeport cidrs, services with nodeport may not work as intended", "CIDRs", proxier.nodePortAddresses)
} }
for _, ip := range nodeIPs { for _, ip := range nodeIPs {
if ip.IsLoopback() { if ip.IsLoopback() {
if isIPv6 { if isIPv6 {
klog.ErrorS(nil, "--nodeport-addresses includes localhost but localhost NodePorts are not supported on IPv6", "address", ip.String()) proxier.logger.Error(nil, "--nodeport-addresses includes localhost but localhost NodePorts are not supported on IPv6", "address", ip.String())
continue continue
} else if !proxier.localhostNodePorts { } else if !proxier.localhostNodePorts {
klog.ErrorS(nil, "--nodeport-addresses includes localhost but --iptables-localhost-nodeports=false was passed", "address", ip.String()) proxier.logger.Error(nil, "--nodeport-addresses includes localhost but --iptables-localhost-nodeports=false was passed", "address", ip.String())
continue continue
} }
} }
@ -1491,7 +1499,7 @@ func (proxier *Proxier) syncProxyRules() {
proxier.iptablesData.Write(proxier.natRules.Bytes()) proxier.iptablesData.Write(proxier.natRules.Bytes())
proxier.iptablesData.WriteString("COMMIT\n") proxier.iptablesData.WriteString("COMMIT\n")
klog.V(2).InfoS("Reloading service iptables data", proxier.logger.V(2).Info("Reloading service iptables data",
"numServices", len(proxier.svcPortMap), "numServices", len(proxier.svcPortMap),
"numEndpoints", totalEndpoints, "numEndpoints", totalEndpoints,
"numFilterChains", proxier.filterChains.Lines(), "numFilterChains", proxier.filterChains.Lines(),
@ -1499,16 +1507,16 @@ func (proxier *Proxier) syncProxyRules() {
"numNATChains", proxier.natChains.Lines(), "numNATChains", proxier.natChains.Lines(),
"numNATRules", proxier.natRules.Lines(), "numNATRules", proxier.natRules.Lines(),
) )
klog.V(9).InfoS("Restoring iptables", "rules", proxier.iptablesData.Bytes()) proxier.logger.V(9).Info("Restoring iptables", "rules", proxier.iptablesData.Bytes())
// NOTE: NoFlushTables is used so we don't flush non-kubernetes chains in the table // NOTE: NoFlushTables is used so we don't flush non-kubernetes chains in the table
err := proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters) err := proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil { if err != nil {
if pErr, ok := err.(utiliptables.ParseError); ok { if pErr, ok := err.(utiliptables.ParseError); ok {
lines := utiliptables.ExtractLines(proxier.iptablesData.Bytes(), pErr.Line(), 3) lines := utiliptables.ExtractLines(proxier.iptablesData.Bytes(), pErr.Line(), 3)
klog.ErrorS(pErr, "Failed to execute iptables-restore", "rules", lines) proxier.logger.Error(pErr, "Failed to execute iptables-restore", "rules", lines)
} else { } else {
klog.ErrorS(err, "Failed to execute iptables-restore") proxier.logger.Error(err, "Failed to execute iptables-restore")
} }
metrics.IptablesRestoreFailuresTotal.Inc() metrics.IptablesRestoreFailuresTotal.Inc()
return return
@ -1520,7 +1528,7 @@ func (proxier *Proxier) syncProxyRules() {
for _, lastChangeTriggerTime := range lastChangeTriggerTimes { for _, lastChangeTriggerTime := range lastChangeTriggerTimes {
latency := metrics.SinceInSeconds(lastChangeTriggerTime) latency := metrics.SinceInSeconds(lastChangeTriggerTime)
metrics.NetworkProgrammingLatency.Observe(latency) metrics.NetworkProgrammingLatency.Observe(latency)
klog.V(4).InfoS("Network programming", "endpoint", klog.KRef(name.Namespace, name.Name), "elapsed", latency) proxier.logger.V(4).Info("Network programming", "endpoint", klog.KRef(name.Namespace, name.Name), "elapsed", latency)
} }
} }
@ -1535,10 +1543,10 @@ func (proxier *Proxier) syncProxyRules() {
// not "OnlyLocal", but the services list will not, and the serviceHealthServer // not "OnlyLocal", but the services list will not, and the serviceHealthServer
// will just drop those endpoints. // will just drop those endpoints.
if err := proxier.serviceHealthServer.SyncServices(proxier.svcPortMap.HealthCheckNodePorts()); err != nil { if err := proxier.serviceHealthServer.SyncServices(proxier.svcPortMap.HealthCheckNodePorts()); err != nil {
klog.ErrorS(err, "Error syncing healthcheck services") proxier.logger.Error(err, "Error syncing healthcheck services")
} }
if err := proxier.serviceHealthServer.SyncEndpoints(proxier.endpointsMap.LocalReadyEndpoints()); err != nil { if err := proxier.serviceHealthServer.SyncEndpoints(proxier.endpointsMap.LocalReadyEndpoints()); err != nil {
klog.ErrorS(err, "Error syncing healthcheck endpoints") proxier.logger.Error(err, "Error syncing healthcheck endpoints")
} }
// Finish housekeeping, clear stale conntrack entries for UDP Services // Finish housekeeping, clear stale conntrack entries for UDP Services

View File

@ -46,6 +46,7 @@ import (
"k8s.io/component-base/metrics/legacyregistry" "k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/metrics/testutil" "k8s.io/component-base/metrics/testutil"
"k8s.io/klog/v2" "k8s.io/klog/v2"
klogtesting "k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/conntrack" "k8s.io/kubernetes/pkg/proxy/conntrack"
@ -387,17 +388,17 @@ func TestParseIPTablesData(t *testing.T) {
} }
} }
func countRules(tableName utiliptables.Table, ruleData string) int { func countRules(logger klog.Logger, tableName utiliptables.Table, ruleData string) int {
dump, err := iptablestest.ParseIPTablesDump(ruleData) dump, err := iptablestest.ParseIPTablesDump(ruleData)
if err != nil { if err != nil {
klog.ErrorS(err, "error parsing iptables rules") logger.Error(err, "error parsing iptables rules")
return -1 return -1
} }
rules := 0 rules := 0
table, err := dump.GetTable(tableName) table, err := dump.GetTable(tableName)
if err != nil { if err != nil {
klog.ErrorS(err, "can't find table", "table", tableName) logger.Error(err, "can't find table", "table", tableName)
return -1 return -1
} }
@ -407,19 +408,19 @@ func countRules(tableName utiliptables.Table, ruleData string) int {
return rules return rules
} }
func countRulesFromMetric(tableName utiliptables.Table) int { func countRulesFromMetric(logger klog.Logger, tableName utiliptables.Table) int {
numRulesFloat, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(tableName))) numRulesFloat, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(tableName)))
if err != nil { if err != nil {
klog.ErrorS(err, "metrics are not registered?") logger.Error(err, "metrics are not registered?")
return -1 return -1
} }
return int(numRulesFloat) return int(numRulesFloat)
} }
func countRulesFromLastSyncMetric(tableName utiliptables.Table) int { func countRulesFromLastSyncMetric(logger klog.Logger, tableName utiliptables.Table) int {
numRulesFloat, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesLastSync.WithLabelValues(string(tableName))) numRulesFloat, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesLastSync.WithLabelValues(string(tableName)))
if err != nil { if err != nil {
klog.ErrorS(err, "metrics are not registered?") logger.Error(err, "metrics are not registered?")
return -1 return -1
} }
return int(numRulesFloat) return int(numRulesFloat)
@ -1540,6 +1541,7 @@ func TestTracePacket(t *testing.T) {
// TestOverallIPTablesRules creates a variety of services and verifies that the generated // TestOverallIPTablesRules creates a variety of services and verifies that the generated
// rules are exactly as expected. // rules are exactly as expected.
func TestOverallIPTablesRules(t *testing.T) { func TestOverallIPTablesRules(t *testing.T) {
logger, _ := klogtesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt) fp := NewFakeProxier(ipt)
metrics.RegisterMetrics() metrics.RegisterMetrics()
@ -1799,8 +1801,8 @@ func TestOverallIPTablesRules(t *testing.T) {
assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
nNatRules := countRulesFromMetric(utiliptables.TableNAT) nNatRules := countRulesFromMetric(logger, utiliptables.TableNAT)
expectedNatRules := countRules(utiliptables.TableNAT, fp.iptablesData.String()) expectedNatRules := countRules(logger, utiliptables.TableNAT, fp.iptablesData.String())
if nNatRules != expectedNatRules { if nNatRules != expectedNatRules {
t.Fatalf("Wrong number of nat rules: expected %d received %d", expectedNatRules, nNatRules) t.Fatalf("Wrong number of nat rules: expected %d received %d", expectedNatRules, nNatRules)
@ -4142,6 +4144,7 @@ func TestHealthCheckNodePortWhenTerminating(t *testing.T) {
} }
func TestProxierMetricsIptablesTotalRules(t *testing.T) { func TestProxierMetricsIptablesTotalRules(t *testing.T) {
logger, _ := klogtesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt) fp := NewFakeProxier(ipt)
@ -4170,15 +4173,15 @@ func TestProxierMetricsIptablesTotalRules(t *testing.T) {
fp.syncProxyRules() fp.syncProxyRules()
iptablesData := fp.iptablesData.String() iptablesData := fp.iptablesData.String()
nFilterRules := countRulesFromMetric(utiliptables.TableFilter) nFilterRules := countRulesFromMetric(logger, utiliptables.TableFilter)
expectedFilterRules := countRules(utiliptables.TableFilter, iptablesData) expectedFilterRules := countRules(logger, utiliptables.TableFilter, iptablesData)
if nFilterRules != expectedFilterRules { if nFilterRules != expectedFilterRules {
t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData) t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
} }
nNatRules := countRulesFromMetric(utiliptables.TableNAT) nNatRules := countRulesFromMetric(logger, utiliptables.TableNAT)
expectedNatRules := countRules(utiliptables.TableNAT, iptablesData) expectedNatRules := countRules(logger, utiliptables.TableNAT, iptablesData)
if nNatRules != expectedNatRules { if nNatRules != expectedNatRules {
t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData) t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
@ -4203,15 +4206,15 @@ func TestProxierMetricsIptablesTotalRules(t *testing.T) {
fp.syncProxyRules() fp.syncProxyRules()
iptablesData = fp.iptablesData.String() iptablesData = fp.iptablesData.String()
nFilterRules = countRulesFromMetric(utiliptables.TableFilter) nFilterRules = countRulesFromMetric(logger, utiliptables.TableFilter)
expectedFilterRules = countRules(utiliptables.TableFilter, iptablesData) expectedFilterRules = countRules(logger, utiliptables.TableFilter, iptablesData)
if nFilterRules != expectedFilterRules { if nFilterRules != expectedFilterRules {
t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData) t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
} }
nNatRules = countRulesFromMetric(utiliptables.TableNAT) nNatRules = countRulesFromMetric(logger, utiliptables.TableNAT)
expectedNatRules = countRules(utiliptables.TableNAT, iptablesData) expectedNatRules = countRules(logger, utiliptables.TableNAT, iptablesData)
if nNatRules != expectedNatRules { if nNatRules != expectedNatRules {
t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData) t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
@ -5822,6 +5825,7 @@ func TestSyncProxyRulesLargeClusterMode(t *testing.T) {
// Test calling syncProxyRules() multiple times with various changes // Test calling syncProxyRules() multiple times with various changes
func TestSyncProxyRulesRepeated(t *testing.T) { func TestSyncProxyRulesRepeated(t *testing.T) {
logger, _ := klogtesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt) fp := NewFakeProxier(ipt)
metrics.RegisterMetrics() metrics.RegisterMetrics()
@ -5920,14 +5924,14 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
`) `)
assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
rulesSynced := countRules(utiliptables.TableNAT, expected) rulesSynced := countRules(logger, utiliptables.TableNAT, expected)
rulesSyncedMetric := countRulesFromLastSyncMetric(utiliptables.TableNAT) rulesSyncedMetric := countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced { if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced) t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
} }
rulesTotal := rulesSynced rulesTotal := rulesSynced
rulesTotalMetric := countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric := countRulesFromMetric(logger, utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal { if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
@ -5998,8 +6002,8 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected) rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT) rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced { if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced) t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
} }
@ -6007,7 +6011,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
// We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-X27LE4BHSL4DOUIK rules, and 2 // We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-X27LE4BHSL4DOUIK rules, and 2
// KUBE-SEP-BSWRHOQ77KEXZLNL rules. // KUBE-SEP-BSWRHOQ77KEXZLNL rules.
rulesTotal += 5 rulesTotal += 5
rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal { if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
@ -6049,8 +6053,8 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected) rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT) rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced { if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced) t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
} }
@ -6058,7 +6062,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
// We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-2VJB64SDSIJUP5T6 rules, and 2 // We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-2VJB64SDSIJUP5T6 rules, and 2
// KUBE-SEP-UHEGFW77JX3KXTOV rules // KUBE-SEP-UHEGFW77JX3KXTOV rules
rulesTotal -= 5 rulesTotal -= 5
rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal { if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
@ -6109,15 +6113,15 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected) rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT) rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced { if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced) t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
} }
// The REJECT rule is in "filter", not NAT, so the number of NAT rules hasn't // The REJECT rule is in "filter", not NAT, so the number of NAT rules hasn't
// changed. // changed.
rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal { if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
@ -6172,8 +6176,8 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected) rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT) rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced { if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced) t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
} }
@ -6181,7 +6185,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
// We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and // We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and
// 2 KUBE-SEP-AYCN5HPXMIRJNJXU rules // 2 KUBE-SEP-AYCN5HPXMIRJNJXU rules
rulesTotal += 5 rulesTotal += 5
rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal { if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
@ -6231,14 +6235,14 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected) rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT) rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced { if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced) t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
} }
// We rewrote existing rules but did not change the overall number of rules. // We rewrote existing rules but did not change the overall number of rules.
rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal { if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
@ -6289,8 +6293,8 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected) rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT) rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced { if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced) t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
} }
@ -6299,7 +6303,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
// jumping to the new SEP chain. The other rules related to svc3 got rewritten, // jumping to the new SEP chain. The other rules related to svc3 got rewritten,
// but that does not change the count of rules. // but that does not change the count of rules.
rulesTotal += 3 rulesTotal += 3
rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal { if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
@ -6337,14 +6341,14 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected) rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT) rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced { if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced) t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
} }
// (No changes) // (No changes)
rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal { if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
@ -6447,8 +6451,8 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected) rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT) rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced { if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced) t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
} }
@ -6456,7 +6460,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
// We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and 2 // We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and 2
// KUBE-SEP-AYCN5HPXMIRJNJXU rules // KUBE-SEP-AYCN5HPXMIRJNJXU rules
rulesTotal -= 5 rulesTotal -= 5
rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal { if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }

View File

@ -21,6 +21,7 @@ package ipvs
import ( import (
"bytes" "bytes"
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -111,6 +112,7 @@ const (
// NewDualStackProxier returns a new Proxier for dual-stack operation // NewDualStackProxier returns a new Proxier for dual-stack operation
func NewDualStackProxier( func NewDualStackProxier(
ctx context.Context,
ipt [2]utiliptables.Interface, ipt [2]utiliptables.Interface,
ipvs utilipvs.Interface, ipvs utilipvs.Interface,
ipset utilipset.Interface, ipset utilipset.Interface,
@ -135,7 +137,7 @@ func NewDualStackProxier(
initOnly bool, initOnly bool,
) (proxy.Provider, error) { ) (proxy.Provider, error) {
// Create an ipv4 instance of the single-stack proxier // Create an ipv4 instance of the single-stack proxier
ipv4Proxier, err := NewProxier(v1.IPv4Protocol, ipt[0], ipvs, ipset, sysctl, ipv4Proxier, err := NewProxier(ctx, v1.IPv4Protocol, ipt[0], ipvs, ipset, sysctl,
exec, syncPeriod, minSyncPeriod, filterCIDRs(false, excludeCIDRs), strictARP, exec, syncPeriod, minSyncPeriod, filterCIDRs(false, excludeCIDRs), strictARP,
tcpTimeout, tcpFinTimeout, udpTimeout, masqueradeAll, masqueradeBit, tcpTimeout, tcpFinTimeout, udpTimeout, masqueradeAll, masqueradeBit,
localDetectors[0], hostname, nodeIPs[v1.IPv4Protocol], recorder, localDetectors[0], hostname, nodeIPs[v1.IPv4Protocol], recorder,
@ -144,7 +146,7 @@ func NewDualStackProxier(
return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err) return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err)
} }
ipv6Proxier, err := NewProxier(v1.IPv6Protocol, ipt[1], ipvs, ipset, sysctl, ipv6Proxier, err := NewProxier(ctx, v1.IPv6Protocol, ipt[1], ipvs, ipset, sysctl,
exec, syncPeriod, minSyncPeriod, filterCIDRs(true, excludeCIDRs), strictARP, exec, syncPeriod, minSyncPeriod, filterCIDRs(true, excludeCIDRs), strictARP,
tcpTimeout, tcpFinTimeout, udpTimeout, masqueradeAll, masqueradeBit, tcpTimeout, tcpFinTimeout, udpTimeout, masqueradeAll, masqueradeBit,
localDetectors[1], hostname, nodeIPs[v1.IPv6Protocol], recorder, localDetectors[1], hostname, nodeIPs[v1.IPv6Protocol], recorder,
@ -251,6 +253,8 @@ type Proxier struct {
// additional iptables rules. // additional iptables rules.
// (ref: https://github.com/kubernetes/kubernetes/issues/119656) // (ref: https://github.com/kubernetes/kubernetes/issues/119656)
lbNoNodeAccessIPPortProtocolEntries []*utilipset.Entry lbNoNodeAccessIPPortProtocolEntries []*utilipset.Entry
logger klog.Logger
} }
// Proxier implements proxy.Provider // Proxier implements proxy.Provider
@ -261,7 +265,9 @@ var _ proxy.Provider = &Proxier{}
// An error will be returned if it fails to update or acquire the initial lock. // An error will be returned if it fails to update or acquire the initial lock.
// Once a proxier is created, it will keep iptables and ipvs rules up to date in the background and // Once a proxier is created, it will keep iptables and ipvs rules up to date in the background and
// will not terminate if a particular iptables or ipvs call fails. // will not terminate if a particular iptables or ipvs call fails.
func NewProxier(ipFamily v1.IPFamily, func NewProxier(
ctx context.Context,
ipFamily v1.IPFamily,
ipt utiliptables.Interface, ipt utiliptables.Interface,
ipvs utilipvs.Interface, ipvs utilipvs.Interface,
ipset utilipset.Interface, ipset utilipset.Interface,
@ -285,6 +291,7 @@ func NewProxier(ipFamily v1.IPFamily,
nodePortAddressStrings []string, nodePortAddressStrings []string,
initOnly bool, initOnly bool,
) (*Proxier, error) { ) (*Proxier, error) {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "ipFamily", ipFamily)
// Set the conntrack sysctl we need for // Set the conntrack sysctl we need for
if err := proxyutil.EnsureSysctl(sysctl, sysctlVSConnTrack, 1); err != nil { if err := proxyutil.EnsureSysctl(sysctl, sysctlVSConnTrack, 1); err != nil {
return nil, err return nil, err
@ -296,10 +303,10 @@ func NewProxier(ipFamily v1.IPFamily,
} }
if kernelVersion.LessThan(version.MustParseGeneric(utilkernel.IPVSConnReuseModeMinSupportedKernelVersion)) { if kernelVersion.LessThan(version.MustParseGeneric(utilkernel.IPVSConnReuseModeMinSupportedKernelVersion)) {
klog.ErrorS(nil, "Can't set sysctl, kernel version doesn't satisfy minimum version requirements", "sysctl", sysctlConnReuse, "minimumKernelVersion", utilkernel.IPVSConnReuseModeMinSupportedKernelVersion) logger.Error(nil, "Can't set sysctl, kernel version doesn't satisfy minimum version requirements", "sysctl", sysctlConnReuse, "minimumKernelVersion", utilkernel.IPVSConnReuseModeMinSupportedKernelVersion)
} else if kernelVersion.AtLeast(version.MustParseGeneric(utilkernel.IPVSConnReuseModeFixedKernelVersion)) { } else if kernelVersion.AtLeast(version.MustParseGeneric(utilkernel.IPVSConnReuseModeFixedKernelVersion)) {
// https://github.com/kubernetes/kubernetes/issues/93297 // https://github.com/kubernetes/kubernetes/issues/93297
klog.V(2).InfoS("Left as-is", "sysctl", sysctlConnReuse) logger.V(2).Info("Left as-is", "sysctl", sysctlConnReuse)
} else { } else {
// Set the connection reuse mode // Set the connection reuse mode
if err := proxyutil.EnsureSysctl(sysctl, sysctlConnReuse, 0); err != nil { if err := proxyutil.EnsureSysctl(sysctl, sysctlConnReuse, 0); err != nil {
@ -339,12 +346,12 @@ func NewProxier(ipFamily v1.IPFamily,
// current system timeout should be preserved // current system timeout should be preserved
if tcpTimeout > 0 || tcpFinTimeout > 0 || udpTimeout > 0 { if tcpTimeout > 0 || tcpFinTimeout > 0 || udpTimeout > 0 {
if err := ipvs.ConfigureTimeouts(tcpTimeout, tcpFinTimeout, udpTimeout); err != nil { if err := ipvs.ConfigureTimeouts(tcpTimeout, tcpFinTimeout, udpTimeout); err != nil {
klog.ErrorS(err, "Failed to configure IPVS timeouts") logger.Error(err, "Failed to configure IPVS timeouts")
} }
} }
if initOnly { if initOnly {
klog.InfoS("System initialized and --init-only specified") logger.Info("System initialized and --init-only specified")
return nil, nil return nil, nil
} }
@ -352,10 +359,10 @@ func NewProxier(ipFamily v1.IPFamily,
masqueradeValue := 1 << uint(masqueradeBit) masqueradeValue := 1 << uint(masqueradeBit)
masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue) masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue)
klog.V(2).InfoS("Record nodeIP and family", "nodeIP", nodeIP, "family", ipFamily) logger.V(2).Info("Record nodeIP and family", "nodeIP", nodeIP, "family", ipFamily)
if len(scheduler) == 0 { if len(scheduler) == 0 {
klog.InfoS("IPVS scheduler not specified, use rr by default") logger.Info("IPVS scheduler not specified, use rr by default")
scheduler = defaultScheduler scheduler = defaultScheduler
} }
@ -399,6 +406,7 @@ func NewProxier(ipFamily v1.IPFamily,
nodePortAddresses: nodePortAddresses, nodePortAddresses: nodePortAddresses,
networkInterfacer: proxyutil.RealNetwork{}, networkInterfacer: proxyutil.RealNetwork{},
gracefuldeleteManager: NewGracefulTerminationManager(ipvs), gracefuldeleteManager: NewGracefulTerminationManager(ipvs),
logger: logger,
} }
// initialize ipsetList with all sets we needed // initialize ipsetList with all sets we needed
proxier.ipsetList = make(map[string]*IPSet) proxier.ipsetList = make(map[string]*IPSet)
@ -406,7 +414,7 @@ func NewProxier(ipFamily v1.IPFamily,
proxier.ipsetList[is.name] = NewIPSet(ipset, is.name, is.setType, (ipFamily == v1.IPv6Protocol), is.comment) proxier.ipsetList[is.name] = NewIPSet(ipset, is.name, is.setType, (ipFamily == v1.IPv6Protocol), is.comment)
} }
burstSyncs := 2 burstSyncs := 2
klog.V(2).InfoS("ipvs sync params", "ipFamily", ipt.Protocol(), "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "burstSyncs", burstSyncs) logger.V(2).Info("ipvs sync params", "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "burstSyncs", burstSyncs)
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs)
proxier.gracefuldeleteManager.Run() proxier.gracefuldeleteManager.Run()
return proxier, nil return proxier, nil
@ -571,7 +579,8 @@ func getFirstColumn(r io.Reader) ([]string, error) {
// already exist with the configured scheduler, we just return. Otherwise // already exist with the configured scheduler, we just return. Otherwise
// we check if a dummy VS can be configured with the configured scheduler. // we check if a dummy VS can be configured with the configured scheduler.
// Kernel modules will be loaded automatically if necessary. // Kernel modules will be loaded automatically if necessary.
func CanUseIPVSProxier(ipvs utilipvs.Interface, ipsetver IPSetVersioner, scheduler string) error { func CanUseIPVSProxier(ctx context.Context, ipvs utilipvs.Interface, ipsetver IPSetVersioner, scheduler string) error {
logger := klog.FromContext(ctx)
// BUG: https://github.com/moby/ipvs/issues/27 // BUG: https://github.com/moby/ipvs/issues/27
// If ipvs is not compiled into the kernel no error is returned and handle==nil. // If ipvs is not compiled into the kernel no error is returned and handle==nil.
// This in turn causes ipvs.GetVirtualServers and ipvs.AddVirtualServer // This in turn causes ipvs.GetVirtualServers and ipvs.AddVirtualServer
@ -597,20 +606,20 @@ func CanUseIPVSProxier(ipvs utilipvs.Interface, ipsetver IPSetVersioner, schedul
// If any virtual server (VS) using the scheduler exist we skip the checks. // If any virtual server (VS) using the scheduler exist we skip the checks.
vservers, err := ipvs.GetVirtualServers() vservers, err := ipvs.GetVirtualServers()
if err != nil { if err != nil {
klog.ErrorS(err, "Can't read the ipvs") logger.Error(err, "Can't read the ipvs")
return err return err
} }
klog.V(5).InfoS("Virtual Servers", "count", len(vservers)) logger.V(5).Info("Virtual Servers", "count", len(vservers))
if len(vservers) > 0 { if len(vservers) > 0 {
// This is most likely a kube-proxy re-start. We know that ipvs works // This is most likely a kube-proxy re-start. We know that ipvs works
// and if any VS uses the configured scheduler, we are done. // and if any VS uses the configured scheduler, we are done.
for _, vs := range vservers { for _, vs := range vservers {
if vs.Scheduler == scheduler { if vs.Scheduler == scheduler {
klog.V(5).InfoS("VS exist, Skipping checks") logger.V(5).Info("VS exist, Skipping checks")
return nil return nil
} }
} }
klog.V(5).InfoS("No existing VS uses the configured scheduler", "scheduler", scheduler) logger.V(5).Info("No existing VS uses the configured scheduler", "scheduler", scheduler)
} }
// Try to insert a dummy VS with the passed scheduler. // Try to insert a dummy VS with the passed scheduler.
@ -631,25 +640,25 @@ func CanUseIPVSProxier(ipvs utilipvs.Interface, ipsetver IPSetVersioner, schedul
Scheduler: scheduler, Scheduler: scheduler,
} }
if err := ipvs.AddVirtualServer(&vs); err != nil { if err := ipvs.AddVirtualServer(&vs); err != nil {
klog.ErrorS(err, "Could not create dummy VS", "scheduler", scheduler) logger.Error(err, "Could not create dummy VS", "scheduler", scheduler)
return err return err
} }
// To overcome the BUG described above we check that the VS is *really* added. // To overcome the BUG described above we check that the VS is *really* added.
vservers, err = ipvs.GetVirtualServers() vservers, err = ipvs.GetVirtualServers()
if err != nil { if err != nil {
klog.ErrorS(err, "ipvs.GetVirtualServers") logger.Error(err, "ipvs.GetVirtualServers")
return err return err
} }
klog.V(5).InfoS("Virtual Servers after adding dummy", "count", len(vservers)) logger.V(5).Info("Virtual Servers after adding dummy", "count", len(vservers))
if len(vservers) == 0 { if len(vservers) == 0 {
klog.InfoS("Dummy VS not created", "scheduler", scheduler) logger.Info("Dummy VS not created", "scheduler", scheduler)
return fmt.Errorf("Ipvs not supported") // This is a BUG work-around return fmt.Errorf("Ipvs not supported") // This is a BUG work-around
} }
klog.V(5).InfoS("Dummy VS created", "vs", vs) logger.V(5).Info("Dummy VS created", "vs", vs)
if err := ipvs.DeleteVirtualServer(&vs); err != nil { if err := ipvs.DeleteVirtualServer(&vs); err != nil {
klog.ErrorS(err, "Could not delete dummy VS") logger.Error(err, "Could not delete dummy VS")
return err return err
} }
@ -658,7 +667,8 @@ func CanUseIPVSProxier(ipvs utilipvs.Interface, ipsetver IPSetVersioner, schedul
// CleanupIptablesLeftovers removes all iptables rules and chains created by the Proxier // CleanupIptablesLeftovers removes all iptables rules and chains created by the Proxier
// It returns true if an error was encountered. Errors are logged. // It returns true if an error was encountered. Errors are logged.
func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool) { func cleanupIptablesLeftovers(ctx context.Context, ipt utiliptables.Interface) (encounteredError bool) {
logger := klog.FromContext(ctx)
// Unlink the iptables chains created by ipvs Proxier // Unlink the iptables chains created by ipvs Proxier
for _, jc := range iptablesJumpChain { for _, jc := range iptablesJumpChain {
args := []string{ args := []string{
@ -667,7 +677,7 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool
} }
if err := ipt.DeleteRule(jc.table, jc.from, args...); err != nil { if err := ipt.DeleteRule(jc.table, jc.from, args...); err != nil {
if !utiliptables.IsNotFoundError(err) { if !utiliptables.IsNotFoundError(err) {
klog.ErrorS(err, "Error removing iptables rules in ipvs proxier") logger.Error(err, "Error removing iptables rules in ipvs proxier")
encounteredError = true encounteredError = true
} }
} }
@ -677,7 +687,7 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool
for _, ch := range iptablesCleanupChains { for _, ch := range iptablesCleanupChains {
if err := ipt.FlushChain(ch.table, ch.chain); err != nil { if err := ipt.FlushChain(ch.table, ch.chain); err != nil {
if !utiliptables.IsNotFoundError(err) { if !utiliptables.IsNotFoundError(err) {
klog.ErrorS(err, "Error removing iptables rules in ipvs proxier") logger.Error(err, "Error removing iptables rules in ipvs proxier")
encounteredError = true encounteredError = true
} }
} }
@ -687,7 +697,7 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool
for _, ch := range iptablesCleanupChains { for _, ch := range iptablesCleanupChains {
if err := ipt.DeleteChain(ch.table, ch.chain); err != nil { if err := ipt.DeleteChain(ch.table, ch.chain); err != nil {
if !utiliptables.IsNotFoundError(err) { if !utiliptables.IsNotFoundError(err) {
klog.ErrorS(err, "Error removing iptables rules in ipvs proxier") logger.Error(err, "Error removing iptables rules in ipvs proxier")
encounteredError = true encounteredError = true
} }
} }
@ -697,12 +707,13 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool
} }
// CleanupLeftovers clean up all ipvs and iptables rules created by ipvs Proxier. // CleanupLeftovers clean up all ipvs and iptables rules created by ipvs Proxier.
func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset utilipset.Interface) (encounteredError bool) { func CleanupLeftovers(ctx context.Context, ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset utilipset.Interface) (encounteredError bool) {
logger := klog.FromContext(ctx)
// Clear all ipvs rules // Clear all ipvs rules
if ipvs != nil { if ipvs != nil {
err := ipvs.Flush() err := ipvs.Flush()
if err != nil { if err != nil {
klog.ErrorS(err, "Error flushing ipvs rules") logger.Error(err, "Error flushing ipvs rules")
encounteredError = true encounteredError = true
} }
} }
@ -710,18 +721,18 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset
nl := NewNetLinkHandle(false) nl := NewNetLinkHandle(false)
err := nl.DeleteDummyDevice(defaultDummyDevice) err := nl.DeleteDummyDevice(defaultDummyDevice)
if err != nil { if err != nil {
klog.ErrorS(err, "Error deleting dummy device created by ipvs proxier", "device", defaultDummyDevice) logger.Error(err, "Error deleting dummy device created by ipvs proxier", "device", defaultDummyDevice)
encounteredError = true encounteredError = true
} }
// Clear iptables created by ipvs Proxier. // Clear iptables created by ipvs Proxier.
encounteredError = cleanupIptablesLeftovers(ipt) || encounteredError encounteredError = cleanupIptablesLeftovers(ctx, ipt) || encounteredError
// Destroy ip sets created by ipvs Proxier. We should call it after cleaning up // Destroy ip sets created by ipvs Proxier. We should call it after cleaning up
// iptables since we can NOT delete ip set which is still referenced by iptables. // iptables since we can NOT delete ip set which is still referenced by iptables.
for _, set := range ipsetInfo { for _, set := range ipsetInfo {
err = ipset.DestroySet(set.name) err = ipset.DestroySet(set.name)
if err != nil { if err != nil {
if !utilipset.IsNotFoundError(err) { if !utilipset.IsNotFoundError(err) {
klog.ErrorS(err, "Error removing ipset", "ipset", set.name) logger.Error(err, "Error removing ipset", "ipset", set.name)
encounteredError = true encounteredError = true
} }
} }
@ -829,7 +840,7 @@ func (proxier *Proxier) OnEndpointSlicesSynced() {
// is observed. // is observed.
func (proxier *Proxier) OnNodeAdd(node *v1.Node) { func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
if node.Name != proxier.hostname { if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", "eventNode", node.Name, "currentNode", proxier.hostname) proxier.logger.Error(nil, "Received a watch event for a node that doesn't match the current node", "eventNode", node.Name, "currentNode", proxier.hostname)
return return
} }
@ -843,7 +854,7 @@ func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
proxier.nodeLabels[k] = v proxier.nodeLabels[k] = v
} }
proxier.mu.Unlock() proxier.mu.Unlock()
klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels) proxier.logger.V(4).Info("Updated proxier node labels", "labels", node.Labels)
proxier.Sync() proxier.Sync()
} }
@ -852,7 +863,7 @@ func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
// node object is observed. // node object is observed.
func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) { func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
if node.Name != proxier.hostname { if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", "eventNode", node.Name, "currentNode", proxier.hostname) proxier.logger.Error(nil, "Received a watch event for a node that doesn't match the current node", "eventNode", node.Name, "currentNode", proxier.hostname)
return return
} }
@ -866,7 +877,7 @@ func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
proxier.nodeLabels[k] = v proxier.nodeLabels[k] = v
} }
proxier.mu.Unlock() proxier.mu.Unlock()
klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels) proxier.logger.V(4).Info("Updated proxier node labels", "labels", node.Labels)
proxier.Sync() proxier.Sync()
} }
@ -875,7 +886,7 @@ func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
// object is observed. // object is observed.
func (proxier *Proxier) OnNodeDelete(node *v1.Node) { func (proxier *Proxier) OnNodeDelete(node *v1.Node) {
if node.Name != proxier.hostname { if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", "eventNode", node.Name, "currentNode", proxier.hostname) proxier.logger.Error(nil, "Received a watch event for a node that doesn't match the current node", "eventNode", node.Name, "currentNode", proxier.hostname)
return return
} }
@ -902,7 +913,7 @@ func (proxier *Proxier) syncProxyRules() {
// don't sync rules till we've received services and endpoints // don't sync rules till we've received services and endpoints
if !proxier.isInitialized() { if !proxier.isInitialized() {
klog.V(2).InfoS("Not syncing ipvs rules until Services and Endpoints have been received from master") proxier.logger.V(2).Info("Not syncing ipvs rules until Services and Endpoints have been received from master")
return return
} }
@ -916,7 +927,7 @@ func (proxier *Proxier) syncProxyRules() {
start := time.Now() start := time.Now()
defer func() { defer func() {
metrics.SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start)) metrics.SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start))
klog.V(4).InfoS("syncProxyRules complete", "elapsed", time.Since(start)) proxier.logger.V(4).Info("syncProxyRules complete", "elapsed", time.Since(start))
}() }()
// We assume that if this was called, we really want to sync them, // We assume that if this was called, we really want to sync them,
@ -925,7 +936,7 @@ func (proxier *Proxier) syncProxyRules() {
serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges) serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges)
endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges) endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges)
klog.V(3).InfoS("Syncing ipvs proxier rules") proxier.logger.V(3).Info("Syncing ipvs proxier rules")
proxier.serviceNoLocalEndpointsInternal = sets.New[string]() proxier.serviceNoLocalEndpointsInternal = sets.New[string]()
proxier.serviceNoLocalEndpointsExternal = sets.New[string]() proxier.serviceNoLocalEndpointsExternal = sets.New[string]()
@ -950,7 +961,7 @@ func (proxier *Proxier) syncProxyRules() {
// make sure dummy interface exists in the system where ipvs Proxier will bind service address on it // make sure dummy interface exists in the system where ipvs Proxier will bind service address on it
_, err := proxier.netlinkHandle.EnsureDummyDevice(defaultDummyDevice) _, err := proxier.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to create dummy interface", "interface", defaultDummyDevice) proxier.logger.Error(err, "Failed to create dummy interface", "interface", defaultDummyDevice)
return return
} }
@ -969,12 +980,12 @@ func (proxier *Proxier) syncProxyRules() {
// alreadyBoundAddrs Represents addresses currently assigned to the dummy interface // alreadyBoundAddrs Represents addresses currently assigned to the dummy interface
alreadyBoundAddrs, err := proxier.netlinkHandle.GetLocalAddresses(defaultDummyDevice) alreadyBoundAddrs, err := proxier.netlinkHandle.GetLocalAddresses(defaultDummyDevice)
if err != nil { if err != nil {
klog.ErrorS(err, "Error listing addresses binded to dummy interface") proxier.logger.Error(err, "Error listing addresses binded to dummy interface")
} }
// nodeAddressSet All addresses *except* those on the dummy interface // nodeAddressSet All addresses *except* those on the dummy interface
nodeAddressSet, err := proxier.netlinkHandle.GetAllLocalAddressesExcept(defaultDummyDevice) nodeAddressSet, err := proxier.netlinkHandle.GetAllLocalAddressesExcept(defaultDummyDevice)
if err != nil { if err != nil {
klog.ErrorS(err, "Error listing node addresses") proxier.logger.Error(err, "Error listing node addresses")
} }
hasNodePort := false hasNodePort := false
@ -997,7 +1008,7 @@ func (proxier *Proxier) syncProxyRules() {
} else { } else {
allNodeIPs, err := proxier.nodePortAddresses.GetNodeIPs(proxier.networkInterfacer) allNodeIPs, err := proxier.nodePortAddresses.GetNodeIPs(proxier.networkInterfacer)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to get node IP address matching nodeport cidr") proxier.logger.Error(err, "Failed to get node IP address matching nodeport cidr")
} else { } else {
for _, ip := range allNodeIPs { for _, ip := range allNodeIPs {
if !ip.IsLoopback() { if !ip.IsLoopback() {
@ -1012,7 +1023,7 @@ func (proxier *Proxier) syncProxyRules() {
for svcPortName, svcPort := range proxier.svcPortMap { for svcPortName, svcPort := range proxier.svcPortMap {
svcInfo, ok := svcPort.(*servicePortInfo) svcInfo, ok := svcPort.(*servicePortInfo)
if !ok { if !ok {
klog.ErrorS(nil, "Failed to cast serviceInfo", "servicePortName", svcPortName) proxier.logger.Error(nil, "Failed to cast serviceInfo", "servicePortName", svcPortName)
continue continue
} }
@ -1025,7 +1036,7 @@ func (proxier *Proxier) syncProxyRules() {
for _, e := range proxier.endpointsMap[svcPortName] { for _, e := range proxier.endpointsMap[svcPortName] {
ep, ok := e.(*proxy.BaseEndpointInfo) ep, ok := e.(*proxy.BaseEndpointInfo)
if !ok { if !ok {
klog.ErrorS(nil, "Failed to cast BaseEndpointInfo", "endpoint", e) proxier.logger.Error(nil, "Failed to cast BaseEndpointInfo", "endpoint", e)
continue continue
} }
if !ep.IsLocal() { if !ep.IsLocal() {
@ -1045,7 +1056,7 @@ func (proxier *Proxier) syncProxyRules() {
SetType: utilipset.HashIPPortIP, SetType: utilipset.HashIPPortIP,
} }
if valid := proxier.ipsetList[kubeLoopBackIPSet].validateEntry(entry); !valid { if valid := proxier.ipsetList[kubeLoopBackIPSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoopBackIPSet].Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoopBackIPSet].Name)
continue continue
} }
proxier.ipsetList[kubeLoopBackIPSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeLoopBackIPSet].activeEntries.Insert(entry.String())
@ -1062,7 +1073,7 @@ func (proxier *Proxier) syncProxyRules() {
// add service Cluster IP:Port to kubeServiceAccess ip set for the purpose of solving hairpin. // add service Cluster IP:Port to kubeServiceAccess ip set for the purpose of solving hairpin.
// proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String()) // proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String())
if valid := proxier.ipsetList[kubeClusterIPSet].validateEntry(entry); !valid { if valid := proxier.ipsetList[kubeClusterIPSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeClusterIPSet].Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeClusterIPSet].Name)
continue continue
} }
proxier.ipsetList[kubeClusterIPSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeClusterIPSet].activeEntries.Insert(entry.String())
@ -1093,10 +1104,10 @@ func (proxier *Proxier) syncProxyRules() {
internalNodeLocal = true internalNodeLocal = true
} }
if err := proxier.syncEndpoint(svcPortName, internalNodeLocal, serv); err != nil { if err := proxier.syncEndpoint(svcPortName, internalNodeLocal, serv); err != nil {
klog.ErrorS(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv) proxier.logger.Error(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv)
} }
} else { } else {
klog.ErrorS(err, "Failed to sync service", "servicePortName", svcPortName, "virtualServer", serv) proxier.logger.Error(err, "Failed to sync service", "servicePortName", svcPortName, "virtualServer", serv)
} }
// Capture externalIPs. // Capture externalIPs.
@ -1111,14 +1122,14 @@ func (proxier *Proxier) syncProxyRules() {
if svcInfo.ExternalPolicyLocal() { if svcInfo.ExternalPolicyLocal() {
if valid := proxier.ipsetList[kubeExternalIPLocalSet].validateEntry(entry); !valid { if valid := proxier.ipsetList[kubeExternalIPLocalSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeExternalIPLocalSet].Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeExternalIPLocalSet].Name)
continue continue
} }
proxier.ipsetList[kubeExternalIPLocalSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeExternalIPLocalSet].activeEntries.Insert(entry.String())
} else { } else {
// We have to SNAT packets to external IPs. // We have to SNAT packets to external IPs.
if valid := proxier.ipsetList[kubeExternalIPSet].validateEntry(entry); !valid { if valid := proxier.ipsetList[kubeExternalIPSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeExternalIPSet].Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeExternalIPSet].Name)
continue continue
} }
proxier.ipsetList[kubeExternalIPSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeExternalIPSet].activeEntries.Insert(entry.String())
@ -1147,10 +1158,10 @@ func (proxier *Proxier) syncProxyRules() {
activeBindAddrs.Insert(serv.Address.String()) activeBindAddrs.Insert(serv.Address.String())
} }
if err := proxier.syncEndpoint(svcPortName, svcInfo.ExternalPolicyLocal(), serv); err != nil { if err := proxier.syncEndpoint(svcPortName, svcInfo.ExternalPolicyLocal(), serv); err != nil {
klog.ErrorS(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv) proxier.logger.Error(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv)
} }
} else { } else {
klog.ErrorS(err, "Failed to sync service", "servicePortName", svcPortName, "virtualServer", serv) proxier.logger.Error(err, "Failed to sync service", "servicePortName", svcPortName, "virtualServer", serv)
} }
} }
@ -1168,14 +1179,14 @@ func (proxier *Proxier) syncProxyRules() {
// If we are proxying globally, we need to masquerade in case we cross nodes. // If we are proxying globally, we need to masquerade in case we cross nodes.
// If we are proxying only locally, we can retain the source IP. // If we are proxying only locally, we can retain the source IP.
if valid := proxier.ipsetList[kubeLoadBalancerSet].validateEntry(entry); !valid { if valid := proxier.ipsetList[kubeLoadBalancerSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSet].Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSet].Name)
continue continue
} }
proxier.ipsetList[kubeLoadBalancerSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeLoadBalancerSet].activeEntries.Insert(entry.String())
// insert loadbalancer entry to lbIngressLocalSet if service externaltrafficpolicy=local // insert loadbalancer entry to lbIngressLocalSet if service externaltrafficpolicy=local
if svcInfo.ExternalPolicyLocal() { if svcInfo.ExternalPolicyLocal() {
if valid := proxier.ipsetList[kubeLoadBalancerLocalSet].validateEntry(entry); !valid { if valid := proxier.ipsetList[kubeLoadBalancerLocalSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerLocalSet].Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerLocalSet].Name)
continue continue
} }
proxier.ipsetList[kubeLoadBalancerLocalSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeLoadBalancerLocalSet].activeEntries.Insert(entry.String())
@ -1185,7 +1196,7 @@ func (proxier *Proxier) syncProxyRules() {
// This currently works for loadbalancers that preserves source ips. // This currently works for loadbalancers that preserves source ips.
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply. // For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
if valid := proxier.ipsetList[kubeLoadBalancerFWSet].validateEntry(entry); !valid { if valid := proxier.ipsetList[kubeLoadBalancerFWSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerFWSet].Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerFWSet].Name)
continue continue
} }
proxier.ipsetList[kubeLoadBalancerFWSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeLoadBalancerFWSet].activeEntries.Insert(entry.String())
@ -1201,7 +1212,7 @@ func (proxier *Proxier) syncProxyRules() {
} }
// enumerate all white list source cidr // enumerate all white list source cidr
if valid := proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].validateEntry(entry); !valid { if valid := proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].Name)
continue continue
} }
proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].activeEntries.Insert(entry.String())
@ -1223,7 +1234,7 @@ func (proxier *Proxier) syncProxyRules() {
} }
// enumerate all white list source ip // enumerate all white list source ip
if valid := proxier.ipsetList[kubeLoadBalancerSourceIPSet].validateEntry(entry); !valid { if valid := proxier.ipsetList[kubeLoadBalancerSourceIPSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceIPSet].Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceIPSet].Name)
continue continue
} }
proxier.ipsetList[kubeLoadBalancerSourceIPSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeLoadBalancerSourceIPSet].activeEntries.Insert(entry.String())
@ -1256,10 +1267,10 @@ func (proxier *Proxier) syncProxyRules() {
activeBindAddrs.Insert(serv.Address.String()) activeBindAddrs.Insert(serv.Address.String())
} }
if err := proxier.syncEndpoint(svcPortName, svcInfo.ExternalPolicyLocal(), serv); err != nil { if err := proxier.syncEndpoint(svcPortName, svcInfo.ExternalPolicyLocal(), serv); err != nil {
klog.ErrorS(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv) proxier.logger.Error(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv)
} }
} else { } else {
klog.ErrorS(err, "Failed to sync service", "servicePortName", svcPortName, "virtualServer", serv) proxier.logger.Error(err, "Failed to sync service", "servicePortName", svcPortName, "virtualServer", serv)
} }
} }
@ -1309,13 +1320,13 @@ func (proxier *Proxier) syncProxyRules() {
} }
default: default:
// It should never hit // It should never hit
klog.ErrorS(nil, "Unsupported protocol type", "protocol", protocol) proxier.logger.Error(nil, "Unsupported protocol type", "protocol", protocol)
} }
if nodePortSet != nil { if nodePortSet != nil {
entryInvalidErr := false entryInvalidErr := false
for _, entry := range entries { for _, entry := range entries {
if valid := nodePortSet.validateEntry(entry); !valid { if valid := nodePortSet.validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", nodePortSet.Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", nodePortSet.Name)
entryInvalidErr = true entryInvalidErr = true
break break
} }
@ -1338,13 +1349,13 @@ func (proxier *Proxier) syncProxyRules() {
nodePortLocalSet = proxier.ipsetList[kubeNodePortLocalSetSCTP] nodePortLocalSet = proxier.ipsetList[kubeNodePortLocalSetSCTP]
default: default:
// It should never hit // It should never hit
klog.ErrorS(nil, "Unsupported protocol type", "protocol", protocol) proxier.logger.Error(nil, "Unsupported protocol type", "protocol", protocol)
} }
if nodePortLocalSet != nil { if nodePortLocalSet != nil {
entryInvalidErr := false entryInvalidErr := false
for _, entry := range entries { for _, entry := range entries {
if valid := nodePortLocalSet.validateEntry(entry); !valid { if valid := nodePortLocalSet.validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", nodePortLocalSet.Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", nodePortLocalSet.Name)
entryInvalidErr = true entryInvalidErr = true
break break
} }
@ -1377,10 +1388,10 @@ func (proxier *Proxier) syncProxyRules() {
if err := proxier.syncService(svcPortNameString, serv, false, alreadyBoundAddrs); err == nil { if err := proxier.syncService(svcPortNameString, serv, false, alreadyBoundAddrs); err == nil {
activeIPVSServices.Insert(serv.String()) activeIPVSServices.Insert(serv.String())
if err := proxier.syncEndpoint(svcPortName, svcInfo.ExternalPolicyLocal(), serv); err != nil { if err := proxier.syncEndpoint(svcPortName, svcInfo.ExternalPolicyLocal(), serv); err != nil {
klog.ErrorS(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv) proxier.logger.Error(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv)
} }
} else { } else {
klog.ErrorS(err, "Failed to sync service", "servicePortName", svcPortName, "virtualServer", serv) proxier.logger.Error(err, "Failed to sync service", "servicePortName", svcPortName, "virtualServer", serv)
} }
} }
} }
@ -1395,7 +1406,7 @@ func (proxier *Proxier) syncProxyRules() {
} }
if valid := nodePortSet.validateEntry(entry); !valid { if valid := nodePortSet.validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", nodePortSet.Name) proxier.logger.Error(nil, "Error adding entry to ipset", "entry", entry, "ipset", nodePortSet.Name)
continue continue
} }
nodePortSet.activeEntries.Insert(entry.String()) nodePortSet.activeEntries.Insert(entry.String())
@ -1422,14 +1433,17 @@ func (proxier *Proxier) syncProxyRules() {
proxier.iptablesData.Write(proxier.filterChains.Bytes()) proxier.iptablesData.Write(proxier.filterChains.Bytes())
proxier.iptablesData.Write(proxier.filterRules.Bytes()) proxier.iptablesData.Write(proxier.filterRules.Bytes())
klog.V(5).InfoS("Restoring iptables", "rules", proxier.iptablesData.Bytes()) proxier.logger.V(5).Info(
"Restoring iptables", "natChains", proxier.natChains,
"natRules", proxier.natRules, "filterChains", proxier.filterChains,
"filterRules", proxier.filterRules)
err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters) err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil { if err != nil {
if pErr, ok := err.(utiliptables.ParseError); ok { if pErr, ok := err.(utiliptables.ParseError); ok {
lines := utiliptables.ExtractLines(proxier.iptablesData.Bytes(), pErr.Line(), 3) lines := utiliptables.ExtractLines(proxier.iptablesData.Bytes(), pErr.Line(), 3)
klog.ErrorS(pErr, "Failed to execute iptables-restore", "rules", lines) proxier.logger.Error(pErr, "Failed to execute iptables-restore", "rules", lines)
} else { } else {
klog.ErrorS(err, "Failed to execute iptables-restore", "rules", proxier.iptablesData.Bytes()) proxier.logger.Error(err, "Failed to execute iptables-restore", "rules", proxier.iptablesData.Bytes())
} }
metrics.IptablesRestoreFailuresTotal.Inc() metrics.IptablesRestoreFailuresTotal.Inc()
return return
@ -1438,17 +1452,17 @@ func (proxier *Proxier) syncProxyRules() {
for _, lastChangeTriggerTime := range lastChangeTriggerTimes { for _, lastChangeTriggerTime := range lastChangeTriggerTimes {
latency := metrics.SinceInSeconds(lastChangeTriggerTime) latency := metrics.SinceInSeconds(lastChangeTriggerTime)
metrics.NetworkProgrammingLatency.Observe(latency) metrics.NetworkProgrammingLatency.Observe(latency)
klog.V(4).InfoS("Network programming", "endpoint", klog.KRef(name.Namespace, name.Name), "elapsed", latency) proxier.logger.V(4).Info("Network programming", "endpoint", klog.KRef(name.Namespace, name.Name), "elapsed", latency)
} }
} }
// Remove superfluous addresses from the dummy device // Remove superfluous addresses from the dummy device
superfluousAddresses := alreadyBoundAddrs.Difference(activeBindAddrs) superfluousAddresses := alreadyBoundAddrs.Difference(activeBindAddrs)
if superfluousAddresses.Len() > 0 { if superfluousAddresses.Len() > 0 {
klog.V(2).InfoS("Removing addresses", "interface", defaultDummyDevice, "addresses", superfluousAddresses) proxier.logger.V(2).Info("Removing addresses", "interface", defaultDummyDevice, "addresses", superfluousAddresses)
for adr := range superfluousAddresses { for adr := range superfluousAddresses {
if err := proxier.netlinkHandle.UnbindAddress(adr, defaultDummyDevice); err != nil { if err := proxier.netlinkHandle.UnbindAddress(adr, defaultDummyDevice); err != nil {
klog.ErrorS(err, "UnbindAddress", "interface", defaultDummyDevice, "address", adr) proxier.logger.Error(err, "UnbindAddress", "interface", defaultDummyDevice, "address", adr)
} }
} }
} }
@ -1462,7 +1476,7 @@ func (proxier *Proxier) syncProxyRules() {
currentIPVSServices[appliedSvc.String()] = appliedSvc currentIPVSServices[appliedSvc.String()] = appliedSvc
} }
} else { } else {
klog.ErrorS(err, "Failed to get ipvs service") proxier.logger.Error(err, "Failed to get ipvs service")
} }
proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices) proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices)
@ -1475,10 +1489,10 @@ func (proxier *Proxier) syncProxyRules() {
// not "OnlyLocal", but the services list will not, and the serviceHealthServer // not "OnlyLocal", but the services list will not, and the serviceHealthServer
// will just drop those endpoints. // will just drop those endpoints.
if err := proxier.serviceHealthServer.SyncServices(proxier.svcPortMap.HealthCheckNodePorts()); err != nil { if err := proxier.serviceHealthServer.SyncServices(proxier.svcPortMap.HealthCheckNodePorts()); err != nil {
klog.ErrorS(err, "Error syncing healthcheck services") proxier.logger.Error(err, "Error syncing healthcheck services")
} }
if err := proxier.serviceHealthServer.SyncEndpoints(proxier.endpointsMap.LocalReadyEndpoints()); err != nil { if err := proxier.serviceHealthServer.SyncEndpoints(proxier.endpointsMap.LocalReadyEndpoints()); err != nil {
klog.ErrorS(err, "Error syncing healthcheck endpoints") proxier.logger.Error(err, "Error syncing healthcheck endpoints")
} }
metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal").Set(float64(proxier.serviceNoLocalEndpointsInternal.Len())) metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal").Set(float64(proxier.serviceNoLocalEndpointsInternal.Len()))
@ -1750,7 +1764,7 @@ func (proxier *Proxier) acceptIPVSTraffic() {
func (proxier *Proxier) createAndLinkKubeChain() { func (proxier *Proxier) createAndLinkKubeChain() {
for _, ch := range iptablesChains { for _, ch := range iptablesChains {
if _, err := proxier.iptables.EnsureChain(ch.table, ch.chain); err != nil { if _, err := proxier.iptables.EnsureChain(ch.table, ch.chain); err != nil {
klog.ErrorS(err, "Failed to ensure chain exists", "table", ch.table, "chain", ch.chain) proxier.logger.Error(err, "Failed to ensure chain exists", "table", ch.table, "chain", ch.chain)
return return
} }
if ch.table == utiliptables.TableNAT { if ch.table == utiliptables.TableNAT {
@ -1763,7 +1777,7 @@ func (proxier *Proxier) createAndLinkKubeChain() {
for _, jc := range iptablesJumpChain { for _, jc := range iptablesJumpChain {
args := []string{"-m", "comment", "--comment", jc.comment, "-j", string(jc.to)} args := []string{"-m", "comment", "--comment", jc.comment, "-j", string(jc.to)}
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jc.table, jc.from, args...); err != nil { if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jc.table, jc.from, args...); err != nil {
klog.ErrorS(err, "Failed to ensure chain jumps", "table", jc.table, "srcChain", jc.from, "dstChain", jc.to) proxier.logger.Error(err, "Failed to ensure chain jumps", "table", jc.table, "srcChain", jc.from, "dstChain", jc.to)
} }
} }
@ -1774,17 +1788,17 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer,
if appliedVirtualServer == nil || !appliedVirtualServer.Equal(vs) { if appliedVirtualServer == nil || !appliedVirtualServer.Equal(vs) {
if appliedVirtualServer == nil { if appliedVirtualServer == nil {
// IPVS service is not found, create a new service // IPVS service is not found, create a new service
klog.V(3).InfoS("Adding new service", "serviceName", svcName, "virtualServer", vs) proxier.logger.V(3).Info("Adding new service", "serviceName", svcName, "virtualServer", vs)
if err := proxier.ipvs.AddVirtualServer(vs); err != nil { if err := proxier.ipvs.AddVirtualServer(vs); err != nil {
klog.ErrorS(err, "Failed to add IPVS service", "serviceName", svcName) proxier.logger.Error(err, "Failed to add IPVS service", "serviceName", svcName)
return err return err
} }
} else { } else {
// IPVS service was changed, update the existing one // IPVS service was changed, update the existing one
// During updates, service VIP will not go down // During updates, service VIP will not go down
klog.V(3).InfoS("IPVS service was changed", "serviceName", svcName) proxier.logger.V(3).Info("IPVS service was changed", "serviceName", svcName)
if err := proxier.ipvs.UpdateVirtualServer(vs); err != nil { if err := proxier.ipvs.UpdateVirtualServer(vs); err != nil {
klog.ErrorS(err, "Failed to update IPVS service") proxier.logger.Error(err, "Failed to update IPVS service")
return err return err
} }
} }
@ -1798,10 +1812,10 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer,
return nil return nil
} }
klog.V(4).InfoS("Bind address", "address", vs.Address) proxier.logger.V(4).Info("Bind address", "address", vs.Address)
_, err := proxier.netlinkHandle.EnsureAddressBind(vs.Address.String(), defaultDummyDevice) _, err := proxier.netlinkHandle.EnsureAddressBind(vs.Address.String(), defaultDummyDevice)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to bind service address to dummy device", "serviceName", svcName) proxier.logger.Error(err, "Failed to bind service address to dummy device", "serviceName", svcName)
return err return err
} }
} }
@ -1812,7 +1826,7 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer,
func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNodeLocalEndpoints bool, vs *utilipvs.VirtualServer) error { func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNodeLocalEndpoints bool, vs *utilipvs.VirtualServer) error {
appliedVirtualServer, err := proxier.ipvs.GetVirtualServer(vs) appliedVirtualServer, err := proxier.ipvs.GetVirtualServer(vs)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to get IPVS service") proxier.logger.Error(err, "Failed to get IPVS service")
return err return err
} }
if appliedVirtualServer == nil { if appliedVirtualServer == nil {
@ -1823,7 +1837,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
curEndpoints := sets.New[string]() curEndpoints := sets.New[string]()
curDests, err := proxier.ipvs.GetRealServers(appliedVirtualServer) curDests, err := proxier.ipvs.GetRealServers(appliedVirtualServer)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to list IPVS destinations") proxier.logger.Error(err, "Failed to list IPVS destinations")
return err return err
} }
for _, des := range curDests { for _, des := range curDests {
@ -1838,7 +1852,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
// externalTrafficPolicy=Local. // externalTrafficPolicy=Local.
svcInfo, ok := proxier.svcPortMap[svcPortName] svcInfo, ok := proxier.svcPortMap[svcPortName]
if !ok { if !ok {
klog.InfoS("Unable to filter endpoints due to missing service info", "servicePortName", svcPortName) proxier.logger.Info("Unable to filter endpoints due to missing service info", "servicePortName", svcPortName)
} else { } else {
clusterEndpoints, localEndpoints, _, hasAnyEndpoints := proxy.CategorizeEndpoints(endpoints, svcInfo, proxier.nodeLabels) clusterEndpoints, localEndpoints, _, hasAnyEndpoints := proxy.CategorizeEndpoints(endpoints, svcInfo, proxier.nodeLabels)
if onlyNodeLocalEndpoints { if onlyNodeLocalEndpoints {
@ -1873,12 +1887,12 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
for _, ep := range newEndpoints.UnsortedList() { for _, ep := range newEndpoints.UnsortedList() {
ip, port, err := net.SplitHostPort(ep) ip, port, err := net.SplitHostPort(ep)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to parse endpoint", "endpoint", ep) proxier.logger.Error(err, "Failed to parse endpoint", "endpoint", ep)
continue continue
} }
portNum, err := strconv.Atoi(port) portNum, err := strconv.Atoi(port)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to parse endpoint port", "port", port) proxier.logger.Error(err, "Failed to parse endpoint port", "port", port)
continue continue
} }
@ -1896,7 +1910,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
if dest.Weight != newDest.Weight { if dest.Weight != newDest.Weight {
err = proxier.ipvs.UpdateRealServer(appliedVirtualServer, newDest) err = proxier.ipvs.UpdateRealServer(appliedVirtualServer, newDest)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to update destination", "newDest", newDest) proxier.logger.Error(err, "Failed to update destination", "newDest", newDest)
continue continue
} }
} }
@ -1907,16 +1921,16 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
if !proxier.gracefuldeleteManager.InTerminationList(uniqueRS) { if !proxier.gracefuldeleteManager.InTerminationList(uniqueRS) {
continue continue
} }
klog.V(5).InfoS("new ep is in graceful delete list", "uniqueRealServer", uniqueRS) proxier.logger.V(5).Info("new ep is in graceful delete list", "uniqueRealServer", uniqueRS)
err := proxier.gracefuldeleteManager.MoveRSOutofGracefulDeleteList(uniqueRS) err := proxier.gracefuldeleteManager.MoveRSOutofGracefulDeleteList(uniqueRS)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to delete endpoint in gracefulDeleteQueue", "endpoint", ep) proxier.logger.Error(err, "Failed to delete endpoint in gracefulDeleteQueue", "endpoint", ep)
continue continue
} }
} }
err = proxier.ipvs.AddRealServer(appliedVirtualServer, newDest) err = proxier.ipvs.AddRealServer(appliedVirtualServer, newDest)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to add destination", "newDest", newDest) proxier.logger.Error(err, "Failed to add destination", "newDest", newDest)
continue continue
} }
} }
@ -1930,12 +1944,12 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
} }
ip, port, err := net.SplitHostPort(ep) ip, port, err := net.SplitHostPort(ep)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to parse endpoint", "endpoint", ep) proxier.logger.Error(err, "Failed to parse endpoint", "endpoint", ep)
continue continue
} }
portNum, err := strconv.Atoi(port) portNum, err := strconv.Atoi(port)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to parse endpoint port", "port", port) proxier.logger.Error(err, "Failed to parse endpoint port", "port", port)
continue continue
} }
@ -1944,10 +1958,10 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
Port: uint16(portNum), Port: uint16(portNum),
} }
klog.V(5).InfoS("Using graceful delete", "uniqueRealServer", uniqueRS) proxier.logger.V(5).Info("Using graceful delete", "uniqueRealServer", uniqueRS)
err = proxier.gracefuldeleteManager.GracefulDeleteRS(appliedVirtualServer, delDest) err = proxier.gracefuldeleteManager.GracefulDeleteRS(appliedVirtualServer, delDest)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to delete destination", "uniqueRealServer", uniqueRS) proxier.logger.Error(err, "Failed to delete destination", "uniqueRealServer", uniqueRS)
continue continue
} }
} }
@ -1964,9 +1978,9 @@ func (proxier *Proxier) cleanLegacyService(activeServices sets.Set[string], curr
continue continue
} }
if !activeServices.Has(cs) { if !activeServices.Has(cs) {
klog.V(4).InfoS("Delete service", "virtualServer", svc) proxier.logger.V(4).Info("Delete service", "virtualServer", svc)
if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil { if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil {
klog.ErrorS(err, "Failed to delete service", "virtualServer", svc) proxier.logger.Error(err, "Failed to delete service", "virtualServer", svc)
} }
} }
} }

View File

@ -21,6 +21,7 @@ package ipvs
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"net" "net"
"reflect" "reflect"
@ -55,6 +56,7 @@ import (
"k8s.io/kubernetes/pkg/util/async" "k8s.io/kubernetes/pkg/util/async"
utiliptables "k8s.io/kubernetes/pkg/util/iptables" utiliptables "k8s.io/kubernetes/pkg/util/iptables"
iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing" iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/kubernetes/test/utils/ktesting"
netutils "k8s.io/utils/net" netutils "k8s.io/utils/net"
"k8s.io/utils/ptr" "k8s.io/utils/ptr"
) )
@ -125,7 +127,7 @@ func (fake *fakeIPSetVersioner) GetVersion() (string, error) {
return fake.version, fake.err return fake.version, fake.err
} }
func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, nodeIPs []string, excludeCIDRs []*net.IPNet, ipFamily v1.IPFamily) *Proxier { func NewFakeProxier(ctx context.Context, ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, nodeIPs []string, excludeCIDRs []*net.IPNet, ipFamily v1.IPFamily) *Proxier {
netlinkHandle := netlinktest.NewFakeNetlinkHandle(ipFamily == v1.IPv6Protocol) netlinkHandle := netlinktest.NewFakeNetlinkHandle(ipFamily == v1.IPv6Protocol)
netlinkHandle.SetLocalAddresses("eth0", nodeIPs...) netlinkHandle.SetLocalAddresses("eth0", nodeIPs...)
@ -224,10 +226,11 @@ func makeTestEndpointSlice(namespace, name string, sliceNum int, epsFunc func(*d
} }
func TestCleanupLeftovers(t *testing.T) { func TestCleanupLeftovers(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
svcPort := 80 svcPort := 80
svcNodePort := 3001 svcNodePort := 3001
@ -266,12 +269,13 @@ func TestCleanupLeftovers(t *testing.T) {
fp.syncProxyRules() fp.syncProxyRules()
// test cleanup left over // test cleanup left over
if CleanupLeftovers(ipvs, ipt, ipset) { if CleanupLeftovers(ctx, ipvs, ipt, ipset) {
t.Errorf("Cleanup leftovers failed") t.Errorf("Cleanup leftovers failed")
} }
} }
func TestCanUseIPVSProxier(t *testing.T) { func TestCanUseIPVSProxier(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
testCases := []struct { testCases := []struct {
name string name string
scheduler string scheduler string
@ -321,7 +325,7 @@ func TestCanUseIPVSProxier(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
ipvs := &fakeIpvs{tc.ipvsErr, false} ipvs := &fakeIpvs{tc.ipvsErr, false}
versioner := &fakeIPSetVersioner{version: tc.ipsetVersion, err: tc.ipsetErr} versioner := &fakeIPSetVersioner{version: tc.ipsetVersion, err: tc.ipsetErr}
err := CanUseIPVSProxier(ipvs, versioner, tc.scheduler) err := CanUseIPVSProxier(ctx, ipvs, versioner, tc.scheduler)
if (err == nil) != tc.ok { if (err == nil) != tc.ok {
t.Errorf("Case [%s], expect %v, got err: %v", tc.name, tc.ok, err) t.Errorf("Case [%s], expect %v, got err: %v", tc.name, tc.ok, err)
} }
@ -941,10 +945,11 @@ func TestNodePortIPv4(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv4Protocol)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses) fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses)
makeServiceMap(fp, test.services...) makeServiceMap(fp, test.services...)
@ -1283,10 +1288,11 @@ func TestNodePortIPv6(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv6Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv6Protocol)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses) fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses)
makeServiceMap(fp, test.services...) makeServiceMap(fp, test.services...)
@ -1312,10 +1318,11 @@ func TestNodePortIPv6(t *testing.T) {
} }
func Test_syncEndpoint_updateWeightsOnRestart(t *testing.T) { func Test_syncEndpoint_updateWeightsOnRestart(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
svc1 := makeTestService("ns1", "svc1", func(svc *v1.Service) { svc1 := makeTestService("ns1", "svc1", func(svc *v1.Service) {
svc.Spec.ClusterIP = "10.20.30.41" svc.Spec.ClusterIP = "10.20.30.41"
@ -1509,10 +1516,11 @@ func TestIPv4Proxier(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
makeServiceMap(fp, test.services...) makeServiceMap(fp, test.services...)
populateEndpointSlices(fp, test.endpoints...) populateEndpointSlices(fp, test.endpoints...)
@ -1646,10 +1654,11 @@ func TestIPv6Proxier(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv6Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv6Protocol)
makeServiceMap(fp, test.services...) makeServiceMap(fp, test.services...)
populateEndpointSlices(fp, test.endpoints...) populateEndpointSlices(fp, test.endpoints...)
@ -1667,10 +1676,11 @@ func TestIPv6Proxier(t *testing.T) {
func TestMasqueradeRule(t *testing.T) { func TestMasqueradeRule(t *testing.T) {
for _, testcase := range []bool{false, true} { for _, testcase := range []bool{false, true} {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake().SetHasRandomFully(testcase) ipt := iptablestest.NewFake().SetHasRandomFully(testcase)
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
makeServiceMap(fp) makeServiceMap(fp)
fp.syncProxyRules() fp.syncProxyRules()
@ -1700,10 +1710,11 @@ func TestMasqueradeRule(t *testing.T) {
} }
func TestExternalIPsNoEndpoint(t *testing.T) { func TestExternalIPsNoEndpoint(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
svcPort := 80 svcPort := 80
svcExternalIPs := "50.60.70.81" svcExternalIPs := "50.60.70.81"
@ -1752,10 +1763,11 @@ func TestExternalIPsNoEndpoint(t *testing.T) {
} }
func TestExternalIPs(t *testing.T) { func TestExternalIPs(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
svcPort := 80 svcPort := 80
svcExternalIPs := sets.New[string]("50.60.70.81", "2012::51", "127.0.0.1") svcExternalIPs := sets.New[string]("50.60.70.81", "2012::51", "127.0.0.1")
@ -1822,10 +1834,11 @@ func TestExternalIPs(t *testing.T) {
} }
func TestOnlyLocalExternalIPs(t *testing.T) { func TestOnlyLocalExternalIPs(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
svcPort := 80 svcPort := 80
svcExternalIPs := sets.New[string]("50.60.70.81", "2012::51", "127.0.0.1") svcExternalIPs := sets.New[string]("50.60.70.81", "2012::51", "127.0.0.1")
@ -1903,7 +1916,7 @@ func TestOnlyLocalExternalIPs(t *testing.T) {
} }
func TestLoadBalancer(t *testing.T) { func TestLoadBalancer(t *testing.T) {
ipt, fp := buildFakeProxier() ipt, fp := buildFakeProxier(t)
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
svcPort := 80 svcPort := 80
svcNodePort := 3001 svcNodePort := 3001
@ -1989,7 +2002,7 @@ func TestLoadBalancer(t *testing.T) {
func TestOnlyLocalNodePorts(t *testing.T) { func TestOnlyLocalNodePorts(t *testing.T) {
nodeIP := netutils.ParseIPSloppy("100.101.102.103") nodeIP := netutils.ParseIPSloppy("100.101.102.103")
ipt, fp := buildFakeProxier() ipt, fp := buildFakeProxier(t)
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
svcPort := 80 svcPort := 80
@ -2087,7 +2100,7 @@ func TestOnlyLocalNodePorts(t *testing.T) {
} }
func TestHealthCheckNodePort(t *testing.T) { func TestHealthCheckNodePort(t *testing.T) {
ipt, fp := buildFakeProxier() ipt, fp := buildFakeProxier(t)
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
svcPort := 80 svcPort := 80
@ -2160,7 +2173,7 @@ func TestHealthCheckNodePort(t *testing.T) {
} }
func TestLoadBalancerSourceRanges(t *testing.T) { func TestLoadBalancerSourceRanges(t *testing.T) {
ipt, fp := buildFakeProxier() ipt, fp := buildFakeProxier(t)
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
svcPort := 80 svcPort := 80
@ -2265,7 +2278,7 @@ func TestLoadBalancerSourceRanges(t *testing.T) {
} }
func TestAcceptIPVSTraffic(t *testing.T) { func TestAcceptIPVSTraffic(t *testing.T) {
ipt, fp := buildFakeProxier() ipt, fp := buildFakeProxier(t)
ingressIP := "1.2.3.4" ingressIP := "1.2.3.4"
externalIP := []string{"5.6.7.8"} externalIP := []string{"5.6.7.8"}
@ -2335,7 +2348,7 @@ func TestAcceptIPVSTraffic(t *testing.T) {
} }
func TestOnlyLocalLoadBalancing(t *testing.T) { func TestOnlyLocalLoadBalancing(t *testing.T) {
ipt, fp := buildFakeProxier() ipt, fp := buildFakeProxier(t)
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
svcPort := 80 svcPort := 80
@ -2449,10 +2462,11 @@ func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port
} }
func TestBuildServiceMapAddRemove(t *testing.T) { func TestBuildServiceMapAddRemove(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
services := []*v1.Service{ services := []*v1.Service{
makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) { makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
@ -2561,10 +2575,11 @@ func TestBuildServiceMapAddRemove(t *testing.T) {
} }
func TestBuildServiceMapServiceHeadless(t *testing.T) { func TestBuildServiceMapServiceHeadless(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
makeServiceMap(fp, makeServiceMap(fp,
makeTestService("somewhere-else", "headless", func(svc *v1.Service) { makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
@ -2601,10 +2616,11 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) {
} }
func TestBuildServiceMapServiceTypeExternalName(t *testing.T) { func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
makeServiceMap(fp, makeServiceMap(fp,
makeTestService("somewhere-else", "external-name", func(svc *v1.Service) { makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
@ -2631,10 +2647,11 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
} }
func TestBuildServiceMapServiceUpdate(t *testing.T) { func TestBuildServiceMapServiceUpdate(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) { servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP svc.Spec.Type = v1.ServiceTypeClusterIP
@ -2722,11 +2739,12 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) {
} }
func TestSessionAffinity(t *testing.T) { func TestSessionAffinity(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
nodeIP := "100.101.102.103" nodeIP := "100.101.102.103"
fp := NewFakeProxier(ipt, ipvs, ipset, []string{nodeIP}, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, []string{nodeIP}, nil, v1.IPv4Protocol)
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
svcPort := 80 svcPort := 80
svcNodePort := 3001 svcNodePort := 3001
@ -3537,10 +3555,11 @@ func Test_updateEndpointsMap(t *testing.T) {
for tci, tc := range testCases { for tci, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
fp.hostname = testHostname fp.hostname = testHostname
// First check that after adding all previous versions of endpoints, // First check that after adding all previous versions of endpoints,
@ -3813,10 +3832,11 @@ func Test_syncService(t *testing.T) {
} }
for i := range testCases { for i := range testCases {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
proxier := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) proxier := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
proxier.netlinkHandle.EnsureDummyDevice(defaultDummyDevice) proxier.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
if testCases[i].oldVirtualServer != nil { if testCases[i].oldVirtualServer != nil {
@ -3842,11 +3862,12 @@ func Test_syncService(t *testing.T) {
} }
} }
func buildFakeProxier() (*iptablestest.FakeIPTables, *Proxier) { func buildFakeProxier(t *testing.T) (*iptablestest.FakeIPTables, *Proxier) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
return ipt, NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) return ipt, NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
} }
func getRules(ipt *iptablestest.FakeIPTables, chain utiliptables.Chain) []*iptablestest.Rule { func getRules(ipt *iptablestest.FakeIPTables, chain utiliptables.Chain) []*iptablestest.Rule {
@ -3935,11 +3956,12 @@ func checkIPVS(t *testing.T, fp *Proxier, vs *netlinktest.ExpectedVirtualServer)
} }
func TestCleanLegacyService(t *testing.T) { func TestCleanLegacyService(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
excludeCIDRs, _ := netutils.ParseCIDRs([]string{"3.3.3.0/24", "4.4.4.0/24"}) excludeCIDRs, _ := netutils.ParseCIDRs([]string{"3.3.3.0/24", "4.4.4.0/24"})
fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol)
// All ipvs services that were processed in the latest sync loop. // All ipvs services that were processed in the latest sync loop.
activeServices := sets.New("ipvs0", "ipvs1") activeServices := sets.New("ipvs0", "ipvs1")
@ -4016,10 +4038,11 @@ func TestCleanLegacyService(t *testing.T) {
} }
func TestCleanLegacyServiceWithRealServers(t *testing.T) { func TestCleanLegacyServiceWithRealServers(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
// all deleted expect ipvs2 // all deleted expect ipvs2
activeServices := sets.New("ipvs2") activeServices := sets.New("ipvs2")
@ -4085,12 +4108,13 @@ func TestCleanLegacyServiceWithRealServers(t *testing.T) {
} }
func TestCleanLegacyRealServersExcludeCIDRs(t *testing.T) { func TestCleanLegacyRealServersExcludeCIDRs(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
gtm := NewGracefulTerminationManager(ipvs) gtm := NewGracefulTerminationManager(ipvs)
excludeCIDRs, _ := netutils.ParseCIDRs([]string{"4.4.4.4/32"}) excludeCIDRs, _ := netutils.ParseCIDRs([]string{"4.4.4.4/32"})
fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol)
fp.gracefuldeleteManager = gtm fp.gracefuldeleteManager = gtm
vs := &utilipvs.VirtualServer{ vs := &utilipvs.VirtualServer{
@ -4137,11 +4161,12 @@ func TestCleanLegacyRealServersExcludeCIDRs(t *testing.T) {
} }
func TestCleanLegacyService6(t *testing.T) { func TestCleanLegacyService6(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
excludeCIDRs, _ := netutils.ParseCIDRs([]string{"3000::/64", "4000::/64"}) excludeCIDRs, _ := netutils.ParseCIDRs([]string{"3000::/64", "4000::/64"})
fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv6Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv6Protocol)
fp.nodeIP = netutils.ParseIPSloppy("::1") fp.nodeIP = netutils.ParseIPSloppy("::1")
// All ipvs services that were processed in the latest sync loop. // All ipvs services that were processed in the latest sync loop.
@ -4219,10 +4244,11 @@ func TestCleanLegacyService6(t *testing.T) {
} }
func TestMultiPortServiceBindAddr(t *testing.T) { func TestMultiPortServiceBindAddr(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
service1 := makeTestService("ns1", "svc1", func(svc *v1.Service) { service1 := makeTestService("ns1", "svc1", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP svc.Spec.Type = v1.ServiceTypeClusterIP
@ -4323,10 +4349,11 @@ raid10 57344 0 - Live 0xffffffffc0597000`,
// the shared EndpointsChangeTracker and EndpointSliceCache. This test ensures that the // the shared EndpointsChangeTracker and EndpointSliceCache. This test ensures that the
// ipvs proxier supports translating EndpointSlices to ipvs output. // ipvs proxier supports translating EndpointSlices to ipvs output.
func TestEndpointSliceE2E(t *testing.T) { func TestEndpointSliceE2E(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
fp.servicesSynced = true fp.servicesSynced = true
fp.endpointSlicesSynced = true fp.endpointSlicesSynced = true
@ -4408,10 +4435,11 @@ func TestEndpointSliceE2E(t *testing.T) {
} }
func TestHealthCheckNodePortE2E(t *testing.T) { func TestHealthCheckNodePortE2E(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
fp.servicesSynced = true fp.servicesSynced = true
fp.endpointSlicesSynced = true fp.endpointSlicesSynced = true
@ -4462,10 +4490,11 @@ func TestHealthCheckNodePortE2E(t *testing.T) {
// Test_HealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating // Test_HealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating
func Test_HealthCheckNodePortWhenTerminating(t *testing.T) { func Test_HealthCheckNodePortWhenTerminating(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
fp.servicesSynced = true fp.servicesSynced = true
// fp.endpointsSynced = true // fp.endpointsSynced = true
fp.endpointSlicesSynced = true fp.endpointSlicesSynced = true
@ -4606,10 +4635,11 @@ func TestFilterCIDRs(t *testing.T) {
} }
func TestCreateAndLinkKubeChain(t *testing.T) { func TestCreateAndLinkKubeChain(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
fp.createAndLinkKubeChain() fp.createAndLinkKubeChain()
expectedNATChains := `:KUBE-SERVICES - [0:0] expectedNATChains := `:KUBE-SERVICES - [0:0]
:KUBE-POSTROUTING - [0:0] :KUBE-POSTROUTING - [0:0]
@ -4709,10 +4739,11 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) {
}, },
} }
for _, tc := range testCases { for _, tc := range testCases {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
fp.servicesSynced = true fp.servicesSynced = true
// fp.endpointsSynced = true // fp.endpointsSynced = true
fp.endpointSlicesSynced = true fp.endpointSlicesSynced = true
@ -4805,11 +4836,11 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) {
// Test_EndpointSliceReadyAndTerminatingCluster tests that when there are ready and ready + terminating // Test_EndpointSliceReadyAndTerminatingCluster tests that when there are ready and ready + terminating
// endpoints and the traffic policy is "Cluster", only the ready endpoints are used. // endpoints and the traffic policy is "Cluster", only the ready endpoints are used.
func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) { func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
fp.servicesSynced = true fp.servicesSynced = true
// fp.endpointsSynced = true // fp.endpointsSynced = true
fp.endpointSlicesSynced = true fp.endpointSlicesSynced = true
@ -4978,11 +5009,11 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) {
// Test_EndpointSliceReadyAndTerminatingLocal tests that when there are local ready and ready + terminating // Test_EndpointSliceReadyAndTerminatingLocal tests that when there are local ready and ready + terminating
// endpoints, only the ready endpoints are used. // endpoints, only the ready endpoints are used.
func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) { func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
fp.servicesSynced = true fp.servicesSynced = true
// fp.endpointsSynced = true // fp.endpointsSynced = true
fp.endpointSlicesSynced = true fp.endpointSlicesSynced = true
@ -5150,11 +5181,11 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
// Test_EndpointSliceOnlyReadyTerminatingCluster tests that when there are only ready terminating // Test_EndpointSliceOnlyReadyTerminatingCluster tests that when there are only ready terminating
// endpoints and the traffic policy is "Cluster", we fall back to terminating endpoints. // endpoints and the traffic policy is "Cluster", we fall back to terminating endpoints.
func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) { func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
fp.servicesSynced = true fp.servicesSynced = true
// fp.endpointsSynced = true // fp.endpointsSynced = true
fp.endpointSlicesSynced = true fp.endpointSlicesSynced = true
@ -5322,11 +5353,11 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) {
// Test_EndpointSliceOnlyReadyTerminatingLocal tests that when there are only local ready terminating // Test_EndpointSliceOnlyReadyTerminatingLocal tests that when there are only local ready terminating
// endpoints, we fall back to those endpoints. // endpoints, we fall back to those endpoints.
func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) { func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
fp.servicesSynced = true fp.servicesSynced = true
// fp.endpointsSynced = true // fp.endpointsSynced = true
fp.endpointSlicesSynced = true fp.endpointSlicesSynced = true
@ -5665,10 +5696,11 @@ func TestNoEndpointsMetric(t *testing.T) {
}, },
} }
for _, tc := range testCases { for _, tc := range testCases {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, []string{"10.0.0.1"}, nil, v1.IPv4Protocol) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, []string{"10.0.0.1"}, nil, v1.IPv4Protocol)
fp.servicesSynced = true fp.servicesSynced = true
// fp.endpointsSynced = true // fp.endpointsSynced = true
fp.endpointSlicesSynced = true fp.endpointSlicesSynced = true
@ -5760,13 +5792,14 @@ func TestDismissLocalhostRuleExist(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
if test.ipFamily == v1.IPv6Protocol { if test.ipFamily == v1.IPv6Protocol {
ipt = iptablestest.NewIPv6Fake() ipt = iptablestest.NewIPv6Fake()
} }
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, test.ipFamily) fp := NewFakeProxier(ctx, ipt, ipvs, ipset, nil, nil, test.ipFamily)
fp.syncProxyRules() fp.syncProxyRules()
@ -5853,7 +5886,7 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) {
for _, testCase := range testCases { for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) { t.Run(testCase.name, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LoadBalancerIPMode, testCase.ipModeEnabled)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LoadBalancerIPMode, testCase.ipModeEnabled)()
_, fp := buildFakeProxier() _, fp := buildFakeProxier(t)
makeServiceMap(fp, makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer" svc.Spec.Type = "LoadBalancer"

View File

@ -20,13 +20,13 @@ limitations under the License.
package ipvs package ipvs
import ( import (
"errors"
"fmt" "fmt"
"net" "net"
"strings" "strings"
"sync" "sync"
"time" "time"
"errors"
libipvs "github.com/moby/ipvs" libipvs "github.com/moby/ipvs"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"

View File

@ -17,6 +17,7 @@ limitations under the License.
package kubemark package kubemark
import ( import (
"context"
"fmt" "fmt"
"time" "time"
@ -85,7 +86,8 @@ func NewHollowProxy(
} }
func (hp *HollowProxy) Run() error { func (hp *HollowProxy) Run() error {
if err := hp.ProxyServer.Run(); err != nil {
if err := hp.ProxyServer.Run(context.TODO()); err != nil {
return fmt.Errorf("Error while running proxy: %w", err) return fmt.Errorf("Error while running proxy: %w", err)
} }
return nil return nil

View File

@ -105,6 +105,7 @@ const (
// NewDualStackProxier creates a MetaProxier instance, with IPv4 and IPv6 proxies. // NewDualStackProxier creates a MetaProxier instance, with IPv4 and IPv6 proxies.
func NewDualStackProxier( func NewDualStackProxier(
ctx context.Context,
sysctl utilsysctl.Interface, sysctl utilsysctl.Interface,
syncPeriod time.Duration, syncPeriod time.Duration,
minSyncPeriod time.Duration, minSyncPeriod time.Duration,
@ -119,14 +120,14 @@ func NewDualStackProxier(
initOnly bool, initOnly bool,
) (proxy.Provider, error) { ) (proxy.Provider, error) {
// Create an ipv4 instance of the single-stack proxier // Create an ipv4 instance of the single-stack proxier
ipv4Proxier, err := NewProxier(v1.IPv4Protocol, sysctl, ipv4Proxier, err := NewProxier(ctx, v1.IPv4Protocol, sysctl,
syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit, localDetectors[0], hostname, syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit, localDetectors[0], hostname,
nodeIPs[v1.IPv4Protocol], recorder, healthzServer, nodePortAddresses, initOnly) nodeIPs[v1.IPv4Protocol], recorder, healthzServer, nodePortAddresses, initOnly)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err) return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err)
} }
ipv6Proxier, err := NewProxier(v1.IPv6Protocol, sysctl, ipv6Proxier, err := NewProxier(ctx, v1.IPv6Protocol, sysctl,
syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit, localDetectors[1], hostname, syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit, localDetectors[1], hostname,
nodeIPs[v1.IPv6Protocol], recorder, healthzServer, nodePortAddresses, initOnly) nodeIPs[v1.IPv6Protocol], recorder, healthzServer, nodePortAddresses, initOnly)
if err != nil { if err != nil {
@ -189,6 +190,8 @@ type Proxier struct {
// serviceCIDRs is a comma separated list of ServiceCIDRs belonging to the IPFamily // serviceCIDRs is a comma separated list of ServiceCIDRs belonging to the IPFamily
// which proxier is operating on, can be directly consumed by knftables. // which proxier is operating on, can be directly consumed by knftables.
serviceCIDRs string serviceCIDRs string
logger klog.Logger
} }
// Proxier implements proxy.Provider // Proxier implements proxy.Provider
@ -197,7 +200,8 @@ var _ proxy.Provider = &Proxier{}
// NewProxier returns a new nftables Proxier. Once a proxier is created, it will keep // NewProxier returns a new nftables Proxier. Once a proxier is created, it will keep
// nftables up to date in the background and will not terminate if a particular nftables // nftables up to date in the background and will not terminate if a particular nftables
// call fails. // call fails.
func NewProxier(ipFamily v1.IPFamily, func NewProxier(ctx context.Context,
ipFamily v1.IPFamily,
sysctl utilsysctl.Interface, sysctl utilsysctl.Interface,
syncPeriod time.Duration, syncPeriod time.Duration,
minSyncPeriod time.Duration, minSyncPeriod time.Duration,
@ -211,15 +215,17 @@ func NewProxier(ipFamily v1.IPFamily,
nodePortAddressStrings []string, nodePortAddressStrings []string,
initOnly bool, initOnly bool,
) (*Proxier, error) { ) (*Proxier, error) {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "ipFamily", ipFamily)
if initOnly { if initOnly {
klog.InfoS("System initialized and --init-only specified") logger.Info("System initialized and --init-only specified")
return nil, nil return nil, nil
} }
// Generate the masquerade mark to use for SNAT rules. // Generate the masquerade mark to use for SNAT rules.
masqueradeValue := 1 << uint(masqueradeBit) masqueradeValue := 1 << uint(masqueradeBit)
masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue) masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue)
klog.V(2).InfoS("Using nftables mark for masquerade", "ipFamily", ipFamily, "mark", masqueradeMark) logger.V(2).Info("Using nftables mark for masquerade", "mark", masqueradeMark)
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings) nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
@ -256,10 +262,11 @@ func NewProxier(ipFamily v1.IPFamily,
nodePortAddresses: nodePortAddresses, nodePortAddresses: nodePortAddresses,
networkInterfacer: proxyutil.RealNetwork{}, networkInterfacer: proxyutil.RealNetwork{},
staleChains: make(map[string]time.Time), staleChains: make(map[string]time.Time),
logger: logger,
} }
burstSyncs := 2 burstSyncs := 2
klog.V(2).InfoS("NFTables sync params", "ipFamily", ipFamily, "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "burstSyncs", burstSyncs) logger.V(2).Info("NFTables sync params", "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "burstSyncs", burstSyncs)
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs)
return proxier, nil return proxier, nil
@ -516,11 +523,11 @@ func (proxier *Proxier) setupNFTables(tx *knftables.Transaction) {
}) })
nodeIPs, err := proxier.nodePortAddresses.GetNodeIPs(proxier.networkInterfacer) nodeIPs, err := proxier.nodePortAddresses.GetNodeIPs(proxier.networkInterfacer)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to get node ip address matching nodeport cidrs, services with nodeport may not work as intended", "CIDRs", proxier.nodePortAddresses) proxier.logger.Error(err, "Failed to get node ip address matching nodeport cidrs, services with nodeport may not work as intended", "CIDRs", proxier.nodePortAddresses)
} }
for _, ip := range nodeIPs { for _, ip := range nodeIPs {
if ip.IsLoopback() { if ip.IsLoopback() {
klog.ErrorS(nil, "--nodeport-addresses includes localhost but localhost NodePorts are not supported", "address", ip.String()) proxier.logger.Error(nil, "--nodeport-addresses includes localhost but localhost NodePorts are not supported", "address", ip.String())
continue continue
} }
tx.Add(&knftables.Element{ tx.Add(&knftables.Element{
@ -642,7 +649,8 @@ func (proxier *Proxier) setupNFTables(tx *knftables.Transaction) {
// CleanupLeftovers removes all nftables rules and chains created by the Proxier // CleanupLeftovers removes all nftables rules and chains created by the Proxier
// It returns true if an error was encountered. Errors are logged. // It returns true if an error was encountered. Errors are logged.
func CleanupLeftovers() bool { func CleanupLeftovers(ctx context.Context) bool {
logger := klog.FromContext(ctx)
var encounteredError bool var encounteredError bool
for _, family := range []knftables.Family{knftables.IPv4Family, knftables.IPv6Family} { for _, family := range []knftables.Family{knftables.IPv4Family, knftables.IPv6Family} {
@ -650,10 +658,10 @@ func CleanupLeftovers() bool {
if err == nil { if err == nil {
tx := nft.NewTransaction() tx := nft.NewTransaction()
tx.Delete(&knftables.Table{}) tx.Delete(&knftables.Table{})
err = nft.Run(context.TODO(), tx) err = nft.Run(ctx, tx)
} }
if err != nil && !knftables.IsNotFound(err) { if err != nil && !knftables.IsNotFound(err) {
klog.ErrorS(err, "Error cleaning up nftables rules") logger.Error(err, "Error cleaning up nftables rules")
encounteredError = true encounteredError = true
} }
} }
@ -767,7 +775,7 @@ func (proxier *Proxier) OnEndpointSlicesSynced() {
// is observed. // is observed.
func (proxier *Proxier) OnNodeAdd(node *v1.Node) { func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
if node.Name != proxier.hostname { if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", proxier.logger.Error(nil, "Received a watch event for a node that doesn't match the current node",
"eventNode", node.Name, "currentNode", proxier.hostname) "eventNode", node.Name, "currentNode", proxier.hostname)
return return
} }
@ -782,7 +790,7 @@ func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
proxier.nodeLabels[k] = v proxier.nodeLabels[k] = v
} }
proxier.mu.Unlock() proxier.mu.Unlock()
klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels) proxier.logger.V(4).Info("Updated proxier node labels", "labels", node.Labels)
proxier.Sync() proxier.Sync()
} }
@ -791,7 +799,7 @@ func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
// node object is observed. // node object is observed.
func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) { func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
if node.Name != proxier.hostname { if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", proxier.logger.Error(nil, "Received a watch event for a node that doesn't match the current node",
"eventNode", node.Name, "currentNode", proxier.hostname) "eventNode", node.Name, "currentNode", proxier.hostname)
return return
} }
@ -806,7 +814,7 @@ func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
proxier.nodeLabels[k] = v proxier.nodeLabels[k] = v
} }
proxier.mu.Unlock() proxier.mu.Unlock()
klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels) proxier.logger.V(4).Info("Updated proxier node labels", "labels", node.Labels)
proxier.Sync() proxier.Sync()
} }
@ -815,7 +823,7 @@ func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
// object is observed. // object is observed.
func (proxier *Proxier) OnNodeDelete(node *v1.Node) { func (proxier *Proxier) OnNodeDelete(node *v1.Node) {
if node.Name != proxier.hostname { if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", proxier.logger.Error(nil, "Received a watch event for a node that doesn't match the current node",
"eventNode", node.Name, "currentNode", proxier.hostname) "eventNode", node.Name, "currentNode", proxier.hostname)
return return
} }
@ -974,7 +982,7 @@ func (proxier *Proxier) syncProxyRules() {
// don't sync rules till we've received services and endpoints // don't sync rules till we've received services and endpoints
if !proxier.isInitialized() { if !proxier.isInitialized() {
klog.V(2).InfoS("Not syncing nftables until Services and Endpoints have been received from master") proxier.logger.V(2).Info("Not syncing nftables until Services and Endpoints have been received from master")
return return
} }
@ -986,18 +994,18 @@ func (proxier *Proxier) syncProxyRules() {
start := time.Now() start := time.Now()
defer func() { defer func() {
metrics.SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start)) metrics.SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start))
klog.V(2).InfoS("SyncProxyRules complete", "elapsed", time.Since(start)) proxier.logger.V(2).Info("SyncProxyRules complete", "elapsed", time.Since(start))
}() }()
serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges) serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges)
endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges) endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges)
klog.V(2).InfoS("Syncing nftables rules") proxier.logger.V(2).Info("Syncing nftables rules")
success := false success := false
defer func() { defer func() {
if !success { if !success {
klog.InfoS("Sync failed", "retryingTime", proxier.syncPeriod) proxier.logger.Info("Sync failed", "retryingTime", proxier.syncPeriod)
proxier.syncRunner.RetryAfter(proxier.syncPeriod) proxier.syncRunner.RetryAfter(proxier.syncPeriod)
} }
}() }()
@ -1018,13 +1026,13 @@ func (proxier *Proxier) syncProxyRules() {
} }
} }
if deleted > 0 { if deleted > 0 {
klog.InfoS("Deleting stale nftables chains", "numChains", deleted) proxier.logger.Info("Deleting stale nftables chains", "numChains", deleted)
err := proxier.nftables.Run(context.TODO(), tx) err := proxier.nftables.Run(context.TODO(), tx)
if err != nil { if err != nil {
// We already deleted the entries from staleChains, but if // We already deleted the entries from staleChains, but if
// the chains still exist, they'll just get added back // the chains still exist, they'll just get added back
// (with a later timestamp) at the end of the sync. // (with a later timestamp) at the end of the sync.
klog.ErrorS(err, "Unable to delete stale chains; will retry later") proxier.logger.Error(err, "Unable to delete stale chains; will retry later")
// FIXME: metric // FIXME: metric
} }
} }
@ -1082,7 +1090,7 @@ func (proxier *Proxier) syncProxyRules() {
for svcName, svc := range proxier.svcPortMap { for svcName, svc := range proxier.svcPortMap {
svcInfo, ok := svc.(*servicePortInfo) svcInfo, ok := svc.(*servicePortInfo)
if !ok { if !ok {
klog.ErrorS(nil, "Failed to cast serviceInfo", "serviceName", svcName) proxier.logger.Error(nil, "Failed to cast serviceInfo", "serviceName", svcName)
continue continue
} }
protocol := strings.ToLower(string(svcInfo.Protocol())) protocol := strings.ToLower(string(svcInfo.Protocol()))
@ -1477,7 +1485,7 @@ func (proxier *Proxier) syncProxyRules() {
for _, ep := range allLocallyReachableEndpoints { for _, ep := range allLocallyReachableEndpoints {
epInfo, ok := ep.(*endpointInfo) epInfo, ok := ep.(*endpointInfo)
if !ok { if !ok {
klog.ErrorS(nil, "Failed to cast endpointsInfo", "endpointsInfo", ep) proxier.logger.Error(nil, "Failed to cast endpointsInfo", "endpointsInfo", ep)
continue continue
} }
@ -1525,7 +1533,7 @@ func (proxier *Proxier) syncProxyRules() {
for _, ep := range allLocallyReachableEndpoints { for _, ep := range allLocallyReachableEndpoints {
epInfo, ok := ep.(*endpointInfo) epInfo, ok := ep.(*endpointInfo)
if !ok { if !ok {
klog.ErrorS(nil, "Failed to cast endpointInfo", "endpointInfo", ep) proxier.logger.Error(nil, "Failed to cast endpointInfo", "endpointInfo", ep)
continue continue
} }
@ -1583,7 +1591,7 @@ func (proxier *Proxier) syncProxyRules() {
} }
} }
} else if !knftables.IsNotFound(err) { } else if !knftables.IsNotFound(err) {
klog.ErrorS(err, "Failed to list nftables chains: stale chains will not be deleted") proxier.logger.Error(err, "Failed to list nftables chains: stale chains will not be deleted")
} }
// OTOH, we can immediately delete any stale affinity sets // OTOH, we can immediately delete any stale affinity sets
@ -1597,11 +1605,11 @@ func (proxier *Proxier) syncProxyRules() {
} }
} }
} else if !knftables.IsNotFound(err) { } else if !knftables.IsNotFound(err) {
klog.ErrorS(err, "Failed to list nftables sets: stale affinity sets will not be deleted") proxier.logger.Error(err, "Failed to list nftables sets: stale affinity sets will not be deleted")
} }
// Sync rules. // Sync rules.
klog.V(2).InfoS("Reloading service nftables data", proxier.logger.V(2).Info("Reloading service nftables data",
"numServices", len(proxier.svcPortMap), "numServices", len(proxier.svcPortMap),
"numEndpoints", totalEndpoints, "numEndpoints", totalEndpoints,
) )
@ -1611,7 +1619,7 @@ func (proxier *Proxier) syncProxyRules() {
err = proxier.nftables.Run(context.TODO(), tx) err = proxier.nftables.Run(context.TODO(), tx)
if err != nil { if err != nil {
klog.ErrorS(err, "nftables sync failed") proxier.logger.Error(err, "nftables sync failed")
metrics.IptablesRestoreFailuresTotal.Inc() metrics.IptablesRestoreFailuresTotal.Inc()
return return
} }
@ -1621,7 +1629,7 @@ func (proxier *Proxier) syncProxyRules() {
for _, lastChangeTriggerTime := range lastChangeTriggerTimes { for _, lastChangeTriggerTime := range lastChangeTriggerTimes {
latency := metrics.SinceInSeconds(lastChangeTriggerTime) latency := metrics.SinceInSeconds(lastChangeTriggerTime)
metrics.NetworkProgrammingLatency.Observe(latency) metrics.NetworkProgrammingLatency.Observe(latency)
klog.V(4).InfoS("Network programming", "endpoint", klog.KRef(name.Namespace, name.Name), "elapsed", latency) proxier.logger.V(4).Info("Network programming", "endpoint", klog.KRef(name.Namespace, name.Name), "elapsed", latency)
} }
} }
@ -1636,10 +1644,10 @@ func (proxier *Proxier) syncProxyRules() {
// not "OnlyLocal", but the services list will not, and the serviceHealthServer // not "OnlyLocal", but the services list will not, and the serviceHealthServer
// will just drop those endpoints. // will just drop those endpoints.
if err := proxier.serviceHealthServer.SyncServices(proxier.svcPortMap.HealthCheckNodePorts()); err != nil { if err := proxier.serviceHealthServer.SyncServices(proxier.svcPortMap.HealthCheckNodePorts()); err != nil {
klog.ErrorS(err, "Error syncing healthcheck services") proxier.logger.Error(err, "Error syncing healthcheck services")
} }
if err := proxier.serviceHealthServer.SyncEndpoints(proxier.endpointsMap.LocalReadyEndpoints()); err != nil { if err := proxier.serviceHealthServer.SyncEndpoints(proxier.endpointsMap.LocalReadyEndpoints()); err != nil {
klog.ErrorS(err, "Error syncing healthcheck endpoints") proxier.logger.Error(err, "Error syncing healthcheck endpoints")
} }
// Finish housekeeping, clear stale conntrack entries for UDP Services // Finish housekeeping, clear stale conntrack entries for UDP Services

View File

@ -17,6 +17,7 @@ limitations under the License.
package proxy package proxy
import ( import (
"context"
"reflect" "reflect"
"sync" "sync"
@ -32,11 +33,13 @@ import (
type NodePodCIDRHandler struct { type NodePodCIDRHandler struct {
mu sync.Mutex mu sync.Mutex
podCIDRs []string podCIDRs []string
logger klog.Logger
} }
func NewNodePodCIDRHandler(podCIDRs []string) *NodePodCIDRHandler { func NewNodePodCIDRHandler(ctx context.Context, podCIDRs []string) *NodePodCIDRHandler {
return &NodePodCIDRHandler{ return &NodePodCIDRHandler{
podCIDRs: podCIDRs, podCIDRs: podCIDRs,
logger: klog.FromContext(ctx),
} }
} }
@ -50,12 +53,12 @@ func (n *NodePodCIDRHandler) OnNodeAdd(node *v1.Node) {
podCIDRs := node.Spec.PodCIDRs podCIDRs := node.Spec.PodCIDRs
// initialize podCIDRs // initialize podCIDRs
if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 { if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 {
klog.InfoS("Setting current PodCIDRs", "podCIDRs", podCIDRs) n.logger.Info("Setting current PodCIDRs", "podCIDRs", podCIDRs)
n.podCIDRs = podCIDRs n.podCIDRs = podCIDRs
return return
} }
if !reflect.DeepEqual(n.podCIDRs, podCIDRs) { if !reflect.DeepEqual(n.podCIDRs, podCIDRs) {
klog.ErrorS(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting", n.logger.Error(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting",
"node", klog.KObj(node), "newPodCIDRs", podCIDRs, "oldPodCIDRs", n.podCIDRs) "node", klog.KObj(node), "newPodCIDRs", podCIDRs, "oldPodCIDRs", n.podCIDRs)
klog.FlushAndExit(klog.ExitFlushTimeout, 1) klog.FlushAndExit(klog.ExitFlushTimeout, 1)
} }
@ -68,12 +71,12 @@ func (n *NodePodCIDRHandler) OnNodeUpdate(_, node *v1.Node) {
podCIDRs := node.Spec.PodCIDRs podCIDRs := node.Spec.PodCIDRs
// initialize podCIDRs // initialize podCIDRs
if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 { if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 {
klog.InfoS("Setting current PodCIDRs", "podCIDRs", podCIDRs) n.logger.Info("Setting current PodCIDRs", "podCIDRs", podCIDRs)
n.podCIDRs = podCIDRs n.podCIDRs = podCIDRs
return return
} }
if !reflect.DeepEqual(n.podCIDRs, podCIDRs) { if !reflect.DeepEqual(n.podCIDRs, podCIDRs) {
klog.ErrorS(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting", n.logger.Error(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting",
"node", klog.KObj(node), "newPodCIDRs", podCIDRs, "oldPODCIDRs", n.podCIDRs) "node", klog.KObj(node), "newPodCIDRs", podCIDRs, "oldPODCIDRs", n.podCIDRs)
klog.FlushAndExit(klog.ExitFlushTimeout, 1) klog.FlushAndExit(klog.ExitFlushTimeout, 1)
} }
@ -81,7 +84,7 @@ func (n *NodePodCIDRHandler) OnNodeUpdate(_, node *v1.Node) {
// OnNodeDelete is a handler for Node deletes. // OnNodeDelete is a handler for Node deletes.
func (n *NodePodCIDRHandler) OnNodeDelete(node *v1.Node) { func (n *NodePodCIDRHandler) OnNodeDelete(node *v1.Node) {
klog.ErrorS(nil, "Current Node is being deleted", "node", klog.KObj(node)) n.logger.Error(nil, "Current Node is being deleted", "node", klog.KObj(node))
} }
// OnNodeSynced is a handler for Node syncs. // OnNodeSynced is a handler for Node syncs.

View File

@ -19,6 +19,9 @@ package util
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"strings"
"github.com/go-logr/logr"
) )
// LineBuffer is an interface for writing lines of input to a bytes.Buffer // LineBuffer is an interface for writing lines of input to a bytes.Buffer
@ -46,6 +49,8 @@ type LineBuffer interface {
Lines() int Lines() int
} }
var _ logr.Marshaler = &realLineBuffer{}
type realLineBuffer struct { type realLineBuffer struct {
b bytes.Buffer b bytes.Buffer
lines int lines int
@ -108,6 +113,11 @@ func (buf *realLineBuffer) Lines() int {
return buf.lines return buf.lines
} }
// Implements the logs.Marshaler interface
func (buf *realLineBuffer) MarshalLog() any {
return strings.Split(buf.b.String(), "\n")
}
type discardLineBuffer struct { type discardLineBuffer struct {
lines int lines int
} }