diff --git a/pkg/kubelet/network/kubenet/kubenet_linux.go b/pkg/kubelet/network/kubenet/kubenet_linux.go index 497f770dc58..e1c707f7659 100644 --- a/pkg/kubelet/network/kubenet/kubenet_linux.go +++ b/pkg/kubelet/network/kubenet/kubenet_linux.go @@ -69,9 +69,9 @@ type kubenetNetworkPlugin struct { loConfig *libcni.NetworkConfig cniConfig libcni.CNI shaper bandwidth.BandwidthShaper - podCIDRs map[kubecontainer.ContainerID]string + mu sync.Mutex //Mutex for protecting podIPs map and netConfig + podIPs map[kubecontainer.ContainerID]string MTU int - mu sync.Mutex //Mutex for protecting podCIDRs map and netConfig execer utilexec.Interface nsenterPath string hairpinMode componentconfig.HairpinMode @@ -86,7 +86,7 @@ func NewPlugin() network.NetworkPlugin { iptInterface := utiliptables.New(execer, dbus, protocol) return &kubenetNetworkPlugin{ - podCIDRs: make(map[kubecontainer.ContainerID]string), + podIPs: make(map[kubecontainer.ContainerID]string), hostPortMap: make(map[hostport]closeable), MTU: 1460, //TODO: don't hardcode this execer: utilexec.New(), @@ -317,10 +317,10 @@ func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id k if err != nil { return err } - if res.IP4 == nil || res.IP4.IP.String() == "" { + if res.IP4 == nil || res.IP4.IP.IP.String() == "" { return fmt.Errorf("CNI plugin reported no IPv4 address for container %v.", id) } - plugin.podCIDRs[id] = res.IP4.IP.String() + plugin.podIPs[id] = res.IP4.IP.IP.String() // Put the container bridge into promiscuous mode to force it to accept hairpin packets. // TODO: Remove this once the kernel bug (#20096) is fixed. @@ -346,8 +346,8 @@ func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id k } if egress != nil || ingress != nil { - ipAddr, _, _ := net.ParseCIDR(plugin.podCIDRs[id]) - if err = plugin.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", ipAddr.String()), egress, ingress); err != nil { + ipAddr := plugin.podIPs[id] + if err := plugin.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", ipAddr), egress, ingress); err != nil { return fmt.Errorf("Failed to add pod to shaper: %v", err) } } @@ -369,26 +369,24 @@ func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, i return fmt.Errorf("Kubenet needs a PodCIDR to tear down pods") } - // no cached CIDR is Ok during teardown - cidr, hasCIDR := plugin.podCIDRs[id] - if hasCIDR { - glog.V(5).Infof("Removing pod CIDR %s from shaper", cidr) + // no cached IP is Ok during teardown + podIP, hasIP := plugin.podIPs[id] + if hasIP { + glog.V(5).Infof("Removing pod IP %s from shaper", podIP) // shaper wants /32 - if addr, _, err := net.ParseCIDR(cidr); err == nil { - if err = plugin.shaper.Reset(fmt.Sprintf("%s/32", addr.String())); err != nil { - glog.Warningf("Failed to remove pod CIDR %s from shaper: %v", cidr, err) - } + if err := plugin.shaper.Reset(fmt.Sprintf("%s/32", podIP)); err != nil { + glog.Warningf("Failed to remove pod IP %s from shaper: %v", podIP, err) } } if err := plugin.delContainerFromNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id); err != nil { // This is to prevent returning error when TearDownPod is called twice on the same pod. This helps to reduce event pollution. - if !hasCIDR { + if !hasIP { glog.Warningf("Failed to delete container from kubenet: %v", err) return nil } return err } - delete(plugin.podCIDRs, id) + delete(plugin.podIPs, id) plugin.syncHostportsRules() return nil @@ -400,11 +398,8 @@ func (plugin *kubenetNetworkPlugin) GetPodNetworkStatus(namespace string, name s plugin.mu.Lock() defer plugin.mu.Unlock() // Assuming the ip of pod does not change. Try to retrieve ip from kubenet map first. - if cidr, ok := plugin.podCIDRs[id]; ok { - ip, _, err := net.ParseCIDR(strings.Trim(cidr, "\n")) - if err == nil { - return &network.PodNetworkStatus{IP: ip}, nil - } + if podIP, ok := plugin.podIPs[id]; ok { + return &network.PodNetworkStatus{IP: net.ParseIP(podIP)}, nil } netnsPath, err := plugin.host.GetRuntime().GetNetNS(id) @@ -429,7 +424,7 @@ func (plugin *kubenetNetworkPlugin) GetPodNetworkStatus(namespace string, name s if err != nil { return nil, fmt.Errorf("Kubenet failed to parse ip from output %s due to %v", output, err) } - plugin.podCIDRs[id] = ip.String() + plugin.podIPs[id] = ip.String() return &network.PodNetworkStatus{IP: ip}, nil } @@ -549,7 +544,7 @@ func (plugin *kubenetNetworkPlugin) openPodHostports(pod *api.Pod) (map[hostport //syncHostportMap syncs newly opened hostports to kubenet on successful pod setup. If pod setup failed, then clean up. func (plugin *kubenetNetworkPlugin) syncHostportMap(id kubecontainer.ContainerID, hostportMap map[hostport]closeable) { // if pod ip cannot be retrieved from podCIDR, then assume pod setup failed. - if _, ok := plugin.podCIDRs[id]; !ok { + if _, ok := plugin.podIPs[id]; !ok { for hp, socket := range hostportMap { err := socket.Close() if err != nil { @@ -580,23 +575,18 @@ func (plugin *kubenetNetworkPlugin) gatherAllHostports() (map[api.ContainerPort] } } // Assuming if kubenet has the pod's ip, the pod is alive and its host port should be presented. - cidr, ok := plugin.podCIDRs[podInfraContainerId] + podIP, ok := plugin.podIPs[podInfraContainerId] if !ok { // The POD has been delete. Ignore continue } - podIP, _, err := net.ParseCIDR(strings.Trim(cidr, "\n")) - if err != nil { - glog.V(3).Info("Failed to retrieve pod ip for %s-%s: %v", p.Namespace, p.Name, err) - continue - } // Need the complete api.Pod object pod, ok := plugin.host.GetPodByName(p.Namespace, p.Name) if ok { for _, container := range pod.Spec.Containers { for _, port := range container.Ports { if port.HostPort != 0 { - podHostportMap[port] = targetPod{podFullName: kubecontainer.GetPodFullName(pod), podIP: podIP.String()} + podHostportMap[port] = targetPod{podFullName: kubecontainer.GetPodFullName(pod), podIP: podIP} } } } diff --git a/pkg/kubelet/network/kubenet/kubenet_linux_test.go b/pkg/kubelet/network/kubenet/kubenet_linux_test.go index bd83ad06218..c55a9e576ec 100644 --- a/pkg/kubelet/network/kubenet/kubenet_linux_test.go +++ b/pkg/kubelet/network/kubenet/kubenet_linux_test.go @@ -38,17 +38,17 @@ var _ network.NetworkPlugin = &kubenetNetworkPlugin{} func newFakeKubenetPlugin(initMap map[kubecontainer.ContainerID]string, execer exec.Interface, host network.Host) *kubenetNetworkPlugin { return &kubenetNetworkPlugin{ - podCIDRs: initMap, - execer: execer, - MTU: 1460, - host: host, + podIPs: initMap, + execer: execer, + MTU: 1460, + host: host, } } func TestGetPodNetworkStatus(t *testing.T) { podIPMap := make(map[kubecontainer.ContainerID]string) - podIPMap[kubecontainer.ContainerID{ID: "1"}] = "10.245.0.2/32" - podIPMap[kubecontainer.ContainerID{ID: "2"}] = "10.245.0.3/32" + podIPMap[kubecontainer.ContainerID{ID: "1"}] = "10.245.0.2" + podIPMap[kubecontainer.ContainerID{ID: "2"}] = "10.245.0.3" testCases := []struct { id string @@ -145,7 +145,7 @@ func TestTeardownCallsShaper(t *testing.T) { kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details) existingContainerID := kubecontainer.BuildContainerID("docker", "123") - kubenet.podCIDRs[existingContainerID] = "10.0.0.1/24" + kubenet.podIPs[existingContainerID] = "10.0.0.1" if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil { t.Fatalf("Unexpected error in TearDownPod: %v", err)