mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Fixed improperly migrated logs (#105763)
* fixed improperly migrated logs * small fixes * small fix * Update pkg/proxy/iptables/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/healthcheck/service_health.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/iptables/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/iptables/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/iptables/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/iptables/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/ipvs/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/ipvs/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/ipvs/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * Update pkg/proxy/winkernel/proxier.go Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com> * refactoring * refactoring * refactoring * reverted some files back to master Co-authored-by: Marek Siarkowicz <marek.siarkowicz@protonmail.com>
This commit is contained in:
parent
d3f81a1be6
commit
81636f2158
@ -56,7 +56,7 @@ func newServiceHealthServer(hostname string, recorder events.EventRecorder, list
|
||||
|
||||
nodeAddresses, err := utilproxy.GetNodeAddresses(nodePortAddresses, utilproxy.RealNetwork{})
|
||||
if err != nil || nodeAddresses.Len() == 0 {
|
||||
klog.ErrorS(err, "Health Check Port:Failed to get node ip address matching node port addresses. Health check port will listen to all node addresses", "nodePortAddresses", nodePortAddresses)
|
||||
klog.ErrorS(err, "Failed to get node ip address matching node port addresses, health check port will listen to all node addresses", "nodePortAddresses", nodePortAddresses)
|
||||
nodeAddresses = sets.NewString()
|
||||
nodeAddresses.Insert(utilproxy.IPv4ZeroCIDR)
|
||||
}
|
||||
@ -104,23 +104,24 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err
|
||||
// Remove any that are not needed any more.
|
||||
for nsn, svc := range hcs.services {
|
||||
if port, found := newServices[nsn]; !found || port != svc.port {
|
||||
klog.V(2).InfoS("Closing healthcheck", "service", nsn.String(), "port", svc.port)
|
||||
klog.V(2).InfoS("Closing healthcheck", "service", nsn, "port", svc.port)
|
||||
|
||||
// errors are loged in closeAll()
|
||||
_ = svc.closeAll()
|
||||
|
||||
delete(hcs.services, nsn)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Add any that are needed.
|
||||
for nsn, port := range newServices {
|
||||
if hcs.services[nsn] != nil {
|
||||
klog.V(3).InfoS("Existing healthcheck", "service", nsn.String(), "port", port)
|
||||
klog.V(3).InfoS("Existing healthcheck", "service", nsn, "port", port)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(2).InfoS("Opening healthcheck", "service", nsn.String(), "port", port)
|
||||
klog.V(2).InfoS("Opening healthcheck", "service", nsn, "port", port)
|
||||
|
||||
svc := &hcInstance{nsn: nsn, port: port}
|
||||
err := svc.listenAndServeAll(hcs)
|
||||
@ -137,7 +138,7 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err
|
||||
UID: types.UID(nsn.String()),
|
||||
}, nil, api.EventTypeWarning, "FailedToStartServiceHealthcheck", "Listen", msg)
|
||||
}
|
||||
klog.ErrorS(err, "failed to start healthcheck", "node", hcs.hostname, "service", nsn.String(), "port", port)
|
||||
klog.ErrorS(err, "Failed to start healthcheck", "node", hcs.hostname, "service", nsn, "port", port)
|
||||
continue
|
||||
}
|
||||
hcs.services[nsn] = svc
|
||||
@ -181,12 +182,12 @@ func (hcI *hcInstance) listenAndServeAll(hcs *server) error {
|
||||
// start serving
|
||||
go func(hcI *hcInstance, listener net.Listener, httpSrv httpServer) {
|
||||
// Serve() will exit when the listener is closed.
|
||||
klog.V(3).InfoS("Starting goroutine for healthcheck", "service", hcI.nsn.String(), "address", listener.Addr().String())
|
||||
klog.V(3).InfoS("Starting goroutine for healthcheck", "service", hcI.nsn, "address", listener.Addr())
|
||||
if err := httpSrv.Serve(listener); err != nil {
|
||||
klog.ErrorS(err, "Healthcheck closed", "service", hcI.nsn.String())
|
||||
klog.ErrorS(err, "Healthcheck closed", "service", hcI.nsn)
|
||||
return
|
||||
}
|
||||
klog.V(3).InfoS("Healthcheck closed", "service", hcI.nsn.String(), "address", listener.Addr().String())
|
||||
klog.V(3).InfoS("Healthcheck closed", "service", hcI.nsn, "address", listener.Addr())
|
||||
}(hcI, listener, httpSrv)
|
||||
|
||||
hcI.listeners = append(hcI.listeners, listener)
|
||||
@ -200,7 +201,7 @@ func (hcI *hcInstance) closeAll() error {
|
||||
errors := []error{}
|
||||
for _, listener := range hcI.listeners {
|
||||
if err := listener.Close(); err != nil {
|
||||
klog.Errorf("Service %q -- CloseListener(%v) error:%v", hcI.nsn, listener.Addr(), err)
|
||||
klog.ErrorS(err, "Error closing listener for health check service", "service", hcI.nsn, "address", listener.Addr())
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
@ -224,7 +225,7 @@ func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
svc, ok := h.hcs.services[h.name]
|
||||
if !ok || svc == nil {
|
||||
h.hcs.lock.RUnlock()
|
||||
klog.ErrorS(nil, "Received request for closed healthcheck", "service", h.name.String())
|
||||
klog.ErrorS(nil, "Received request for closed healthcheck", "service", h.name)
|
||||
return
|
||||
}
|
||||
count := svc.endpoints
|
||||
@ -254,10 +255,10 @@ func (hcs *server) SyncEndpoints(newEndpoints map[types.NamespacedName]int) erro
|
||||
|
||||
for nsn, count := range newEndpoints {
|
||||
if hcs.services[nsn] == nil {
|
||||
klog.V(3).InfoS("Not saving endpoints for unknown healthcheck", "service", nsn.String())
|
||||
klog.V(3).InfoS("Not saving endpoints for unknown healthcheck", "service", nsn)
|
||||
continue
|
||||
}
|
||||
klog.V(3).InfoS("Reporting endpoints for healthcheck", "endpointCount", count, "service", nsn.String())
|
||||
klog.V(3).InfoS("Reporting endpoints for healthcheck", "endpointCount", count, "service", nsn)
|
||||
hcs.services[nsn].endpoints = count
|
||||
}
|
||||
for nsn, hci := range hcs.services {
|
||||
|
@ -272,7 +272,7 @@ func NewProxier(ipt utiliptables.Interface,
|
||||
// are connected to a Linux bridge (but not SDN bridges). Until most
|
||||
// plugins handle this, log when config is missing
|
||||
if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 {
|
||||
klog.InfoS("Missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended")
|
||||
klog.InfoS("Missing br-netfilter module or unset sysctl br-nf-call-iptables, proxy may not work as intended")
|
||||
}
|
||||
|
||||
// Generate the masquerade mark to use for SNAT rules.
|
||||
@ -291,7 +291,7 @@ func NewProxier(ipt utiliptables.Interface,
|
||||
nodePortAddresses = ipFamilyMap[ipFamily]
|
||||
// Log the IPs not matching the ipFamily
|
||||
if ips, ok := ipFamilyMap[utilproxy.OtherIPFamily(ipFamily)]; ok && len(ips) > 0 {
|
||||
klog.InfoS("Found node IPs of the wrong family", "ipFamily", ipFamily, "ips", strings.Join(ips, ","))
|
||||
klog.InfoS("Found node IPs of the wrong family", "ipFamily", ipFamily, "IPs", strings.Join(ips, ","))
|
||||
}
|
||||
|
||||
proxier := &Proxier{
|
||||
@ -737,23 +737,23 @@ func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceE
|
||||
if nodePort != 0 {
|
||||
err = conntrack.ClearEntriesForPortNAT(proxier.exec, endpointIP, nodePort, svcProto)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to delete nodeport-related endpoint connections", "servicePortName", epSvcPair.ServicePortName.String())
|
||||
klog.ErrorS(err, "Failed to delete nodeport-related endpoint connections", "servicePortName", epSvcPair.ServicePortName)
|
||||
}
|
||||
}
|
||||
err = conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIP().String(), endpointIP, svcProto)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to delete endpoint connections", "servicePortName", epSvcPair.ServicePortName.String())
|
||||
klog.ErrorS(err, "Failed to delete endpoint connections", "servicePortName", epSvcPair.ServicePortName)
|
||||
}
|
||||
for _, extIP := range svcInfo.ExternalIPStrings() {
|
||||
err := conntrack.ClearEntriesForNAT(proxier.exec, extIP, endpointIP, svcProto)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to delete endpoint connections for externalIP", "servicePortName", epSvcPair.ServicePortName.String(), "externalIP", extIP)
|
||||
klog.ErrorS(err, "Failed to delete endpoint connections for externalIP", "servicePortName", epSvcPair.ServicePortName, "externalIP", extIP)
|
||||
}
|
||||
}
|
||||
for _, lbIP := range svcInfo.LoadBalancerIPStrings() {
|
||||
err := conntrack.ClearEntriesForNAT(proxier.exec, lbIP, endpointIP, svcProto)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to delete endpoint connections for LoadBalancerIP", "servicePortName", epSvcPair.ServicePortName.String(), "loadBalancerIP", lbIP)
|
||||
klog.ErrorS(err, "Failed to delete endpoint connections for LoadBalancerIP", "servicePortName", epSvcPair.ServicePortName, "loadBalancerIP", lbIP)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -807,7 +807,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// an UDP service that changes from 0 to non-0 endpoints is considered stale.
|
||||
for _, svcPortName := range endpointUpdateResult.StaleServiceNames {
|
||||
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && conntrack.IsClearConntrackNeeded(svcInfo.Protocol()) {
|
||||
klog.V(2).InfoS("Stale service", "protocol", strings.ToLower(string(svcInfo.Protocol())), "svcPortName", svcPortName.String(), "clusterIP", svcInfo.ClusterIP().String())
|
||||
klog.V(2).InfoS("Stale service", "protocol", strings.ToLower(string(svcInfo.Protocol())), "servicePortName", svcPortName, "clusterIP", svcInfo.ClusterIP())
|
||||
conntrackCleanupServiceIPs.Insert(svcInfo.ClusterIP().String())
|
||||
for _, extIP := range svcInfo.ExternalIPStrings() {
|
||||
conntrackCleanupServiceIPs.Insert(extIP)
|
||||
@ -817,7 +817,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
}
|
||||
nodePort := svcInfo.NodePort()
|
||||
if svcInfo.Protocol() == v1.ProtocolUDP && nodePort != 0 {
|
||||
klog.V(2).Infof("Stale %s service NodePort %v -> %d", strings.ToLower(string(svcInfo.Protocol())), svcPortName, nodePort)
|
||||
klog.V(2).InfoS("Stale service", "protocol", strings.ToLower(string(svcInfo.Protocol())), "servicePortName", svcPortName, "nodePort", nodePort)
|
||||
conntrackCleanupServiceNodePorts.Insert(nodePort)
|
||||
}
|
||||
}
|
||||
@ -984,7 +984,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
for svcName, svc := range proxier.serviceMap {
|
||||
svcInfo, ok := svc.(*serviceInfo)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Failed to cast serviceInfo", "svcName", svcName.String())
|
||||
klog.ErrorS(nil, "Failed to cast serviceInfo", "serviceName", svcName)
|
||||
continue
|
||||
}
|
||||
isIPv6 := netutils.IsIPv6(svcInfo.ClusterIP())
|
||||
@ -1072,7 +1072,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
Protocol: netutils.Protocol(svcInfo.Protocol()),
|
||||
}
|
||||
if proxier.portsMap[lp] != nil {
|
||||
klog.V(4).InfoS("Port was open before and is still needed", "port", lp.String())
|
||||
klog.V(4).InfoS("Port was open before and is still needed", "port", lp)
|
||||
replacementPortsMap[lp] = proxier.portsMap[lp]
|
||||
} else {
|
||||
socket, err := proxier.portMapper.OpenLocalPort(&lp)
|
||||
@ -1086,10 +1086,10 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
UID: types.UID(proxier.hostname),
|
||||
Namespace: "",
|
||||
}, nil, v1.EventTypeWarning, err.Error(), "SyncProxyRules", msg)
|
||||
klog.ErrorS(err, "can't open port, skipping it", "port", lp.String())
|
||||
klog.ErrorS(err, "Can't open port, skipping it", "port", lp)
|
||||
continue
|
||||
}
|
||||
klog.V(2).InfoS("Opened local port", "port", lp.String())
|
||||
klog.V(2).InfoS("Opened local port", "port", lp)
|
||||
replacementPortsMap[lp] = socket
|
||||
}
|
||||
}
|
||||
@ -1244,7 +1244,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// For ports on node IPs, open the actual port and hold it.
|
||||
for _, lp := range lps {
|
||||
if proxier.portsMap[lp] != nil {
|
||||
klog.V(4).InfoS("Port was open before and is still needed", "port", lp.String())
|
||||
klog.V(4).InfoS("Port was open before and is still needed", "port", lp)
|
||||
replacementPortsMap[lp] = proxier.portsMap[lp]
|
||||
} else if svcInfo.Protocol() != v1.ProtocolSCTP {
|
||||
socket, err := proxier.portMapper.OpenLocalPort(&lp)
|
||||
@ -1258,10 +1258,10 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
UID: types.UID(proxier.hostname),
|
||||
Namespace: "",
|
||||
}, nil, v1.EventTypeWarning, err.Error(), "SyncProxyRules", msg)
|
||||
klog.ErrorS(err, "can't open port, skipping it", "port", lp.String())
|
||||
klog.ErrorS(err, "Can't open port, skipping it", "port", lp)
|
||||
continue
|
||||
}
|
||||
klog.V(2).InfoS("Opened local port", "port", lp.String())
|
||||
klog.V(2).InfoS("Opened local port", "port", lp)
|
||||
replacementPortsMap[lp] = socket
|
||||
}
|
||||
}
|
||||
@ -1328,7 +1328,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
for _, ep := range allEndpoints {
|
||||
epInfo, ok := ep.(*endpointsInfo)
|
||||
if !ok {
|
||||
klog.ErrorS(err, "Failed to cast endpointsInfo", "endpointsInfo", ep.String())
|
||||
klog.ErrorS(err, "Failed to cast endpointsInfo", "endpointsInfo", ep)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -1546,7 +1546,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
}
|
||||
// Ignore IP addresses with incorrect version
|
||||
if isIPv6 && !netutils.IsIPv6String(address) || !isIPv6 && netutils.IsIPv6String(address) {
|
||||
klog.ErrorS(nil, "IP has incorrect IP version", "ip", address)
|
||||
klog.ErrorS(nil, "IP has incorrect IP version", "IP", address)
|
||||
continue
|
||||
}
|
||||
// create nodeport rules for each IP one by one
|
||||
@ -1659,17 +1659,17 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// Finish housekeeping.
|
||||
// Clear stale conntrack entries for UDP Services, this has to be done AFTER the iptables rules are programmed.
|
||||
// TODO: these could be made more consistent.
|
||||
klog.V(4).InfoS("Deleting conntrack stale entries for Services", "ips", conntrackCleanupServiceIPs.UnsortedList())
|
||||
klog.V(4).InfoS("Deleting conntrack stale entries for services", "IPs", conntrackCleanupServiceIPs.UnsortedList())
|
||||
for _, svcIP := range conntrackCleanupServiceIPs.UnsortedList() {
|
||||
if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil {
|
||||
klog.ErrorS(err, "Failed to delete stale service connections", "ip", svcIP)
|
||||
klog.ErrorS(err, "Failed to delete stale service connections", "IP", svcIP)
|
||||
}
|
||||
}
|
||||
klog.V(4).InfoS("Deleting conntrack stale entries for Services", "nodeports", conntrackCleanupServiceNodePorts.UnsortedList())
|
||||
klog.V(4).InfoS("Deleting conntrack stale entries for services", "nodePorts", conntrackCleanupServiceNodePorts.UnsortedList())
|
||||
for _, nodePort := range conntrackCleanupServiceNodePorts.UnsortedList() {
|
||||
err := conntrack.ClearEntriesForPort(proxier.exec, nodePort, isIPv6, v1.ProtocolUDP)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to clear udp conntrack", "port", nodePort)
|
||||
klog.ErrorS(err, "Failed to clear udp conntrack", "nodePort", nodePort)
|
||||
}
|
||||
}
|
||||
klog.V(4).InfoS("Deleting stale endpoint connections", "endpoints", endpointUpdateResult.StaleEndpoints)
|
||||
|
@ -39,7 +39,7 @@ var (
|
||||
func (hns hnsV2) getNetworkByName(name string) (*hnsNetworkInfo, error) {
|
||||
hnsnetwork, err := hcn.GetNetworkByName(name)
|
||||
if err != nil {
|
||||
klog.Errorf("%v", err)
|
||||
klog.ErrorS(err, "Error getting network by name")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ func (hns hnsV2) getEndpointByID(id string) (*endpointsInfo, error) {
|
||||
func (hns hnsV2) getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) {
|
||||
hnsnetwork, err := hcn.GetNetworkByName(networkName)
|
||||
if err != nil {
|
||||
klog.Errorf("%v", err)
|
||||
klog.ErrorS(err, "Error getting network by name")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ func (hns hnsV2) deleteEndpoint(hnsID string) error {
|
||||
}
|
||||
err = hnsendpoint.Delete()
|
||||
if err == nil {
|
||||
klog.V(3).Infof("Remote endpoint resource deleted id %s", hnsID)
|
||||
klog.V(3).InfoS("Remote endpoint resource deleted", "hnsID", hnsID)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user