remove legacy leftovers of portmapping functionality that was moved to CNI

This commit is contained in:
Sergey Kanzhelev 2020-07-30 23:12:16 +00:00
parent 0c642b6ef0
commit d20fd40884
16 changed files with 41 additions and 987 deletions

View File

@ -105,7 +105,6 @@ pkg/kubelet/dockershim/libdocker
pkg/kubelet/dockershim/network
pkg/kubelet/dockershim/network/cni/testing
pkg/kubelet/dockershim/network/hostport
pkg/kubelet/dockershim/network/hostport/testing
pkg/kubelet/dockershim/network/kubenet
pkg/kubelet/dockershim/network/testing
pkg/kubelet/pluginmanager/pluginwatcher

View File

@ -333,22 +333,18 @@ func MakePortMappings(container *v1.Container) (ports []PortMapping) {
}
}
// We need to create some default port name if it's not specified, since
// this is necessary for the dockershim CNI driver.
// https://github.com/kubernetes/kubernetes/pull/82374#issuecomment-529496888
if p.Name == "" {
pm.Name = fmt.Sprintf("%s-%s-%s:%d", container.Name, family, p.Protocol, p.ContainerPort)
} else {
pm.Name = fmt.Sprintf("%s-%s", container.Name, p.Name)
var name string = p.Name
if name == "" {
name = fmt.Sprintf("%s-%s:%d", family, p.Protocol, p.ContainerPort)
}
// Protect against a port name being used more than once in a container.
if _, ok := names[pm.Name]; ok {
klog.Warningf("Port name conflicted, %q is defined more than once", pm.Name)
if _, ok := names[name]; ok {
klog.Warningf("Port name conflicted, %q is defined more than once", name)
continue
}
ports = append(ports, pm)
names[pm.Name] = struct{}{}
names[name] = struct{}{}
}
return
}

View File

@ -558,9 +558,8 @@ func TestMakePortMappings(t *testing.T) {
HostIP: ip,
}
}
portMapping := func(name string, protocol v1.Protocol, containerPort, hostPort int, ip string) PortMapping {
portMapping := func(protocol v1.Protocol, containerPort, hostPort int, ip string) PortMapping {
return PortMapping{
Name: name,
Protocol: protocol,
ContainerPort: containerPort,
HostPort: hostPort,
@ -590,11 +589,11 @@ func TestMakePortMappings(t *testing.T) {
},
},
[]PortMapping{
portMapping("fooContainer-v4-TCP:80", v1.ProtocolTCP, 80, 8080, "127.0.0.1"),
portMapping("fooContainer-v4-TCP:443", v1.ProtocolTCP, 443, 4343, "192.168.0.1"),
portMapping("fooContainer-foo", v1.ProtocolUDP, 555, 5555, ""),
portMapping("fooContainer-v6-TCP:80", v1.ProtocolTCP, 80, 8080, "::"),
portMapping("fooContainer-any-TCP:1234", v1.ProtocolTCP, 1234, 5678, ""),
portMapping(v1.ProtocolTCP, 80, 8080, "127.0.0.1"),
portMapping(v1.ProtocolTCP, 443, 4343, "192.168.0.1"),
portMapping(v1.ProtocolUDP, 555, 5555, ""),
portMapping(v1.ProtocolTCP, 80, 8080, "::"),
portMapping(v1.ProtocolTCP, 1234, 5678, ""),
},
},
}

View File

@ -403,8 +403,6 @@ type Mount struct {
// PortMapping contains information about the port mapping.
type PortMapping struct {
// Name of the port mapping
Name string
// Protocol of the port mapping.
Protocol v1.Protocol
// The port number within the container.
@ -433,8 +431,6 @@ type RunContainerOptions struct {
Mounts []Mount
// The host devices mapped into the containers.
Devices []DeviceInfo
// The port mappings for the containers.
PortMappings []PortMapping
// The annotations for the container
// These annotations are generated by other components (i.e.,
// not users). Currently, only device plugins populate the annotations.

View File

@ -34,14 +34,14 @@ import (
types020 "github.com/containernetworking/cni/pkg/types/020"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
utiltesting "k8s.io/client-go/util/testing"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/testing"
mock_cni "k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/testing"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport"
networktest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/testing"
"k8s.io/utils/exec"
@ -247,7 +247,6 @@ func TestCNIPlugin(t *testing.T) {
ports := map[string][]*hostport.PortMapping{
containerID.ID: {
{
Name: "name",
HostPort: 8008,
ContainerPort: 80,
Protocol: "UDP",

View File

@ -12,7 +12,6 @@ go_library(
"fake_iptables.go",
"hostport.go",
"hostport_manager.go",
"hostport_syncer.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport",
deps = [
@ -33,7 +32,6 @@ go_test(
srcs = [
"fake_iptables_test.go",
"hostport_manager_test.go",
"hostport_syncer_test.go",
"hostport_test.go",
],
embed = [":go_default_library"],
@ -54,9 +52,6 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/kubelet/dockershim/network/hostport/testing:all-srcs",
],
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -38,7 +38,6 @@ const (
// PortMapping represents a network port in a container
type PortMapping struct {
Name string
HostPort int32
ContainerPort int32
Protocol v1.Protocol

View File

@ -394,3 +394,18 @@ func filterChains(chains map[utiliptables.Chain]string, filterChains []utiliptab
delete(chains, chain)
}
}
func getPodFullName(pod *PodPortMapping) string {
// Use underscore as the delimiter because it is not allowed in pod name
// (DNS subdomain format), while allowed in the container name format.
return pod.Name + "_" + pod.Namespace
}
// Join all words with spaces, terminate with newline and write to buf.
func writeLine(buf *bytes.Buffer, words ...string) {
buf.WriteString(strings.Join(words, " ") + "\n")
}
func (hp *hostport) String() string {
return fmt.Sprintf("%s:%d", hp.protocol, hp.port)
}

View File

@ -1,328 +0,0 @@
// +build !dockerless
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostport
import (
"bytes"
"crypto/sha256"
"encoding/base32"
"fmt"
"net"
"strconv"
"strings"
"time"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/utils/net"
)
// HostportSyncer takes a list of PodPortMappings and implements hostport all at once
type HostportSyncer interface {
// SyncHostports gathers all hostports on node and setup iptables rules to enable them.
// On each invocation existing ports are synced and stale rules are deleted.
SyncHostports(natInterfaceName string, activePodPortMappings []*PodPortMapping) error
// OpenPodHostportsAndSync opens hostports for a new PodPortMapping, gathers all hostports on
// node, sets up iptables rules enable them. On each invocation existing ports are synced and stale rules are deleted.
// 'newPortMapping' must also be present in 'activePodPortMappings'.
OpenPodHostportsAndSync(newPortMapping *PodPortMapping, natInterfaceName string, activePodPortMappings []*PodPortMapping) error
}
type hostportSyncer struct {
hostPortMap map[hostport]closeable
iptables utiliptables.Interface
portOpener hostportOpener
}
func NewHostportSyncer(iptables utiliptables.Interface) HostportSyncer {
return &hostportSyncer{
hostPortMap: make(map[hostport]closeable),
iptables: iptables,
portOpener: openLocalPort,
}
}
type targetPod struct {
podFullName string
podIP string
}
func (hp *hostport) String() string {
return fmt.Sprintf("%s:%d", hp.protocol, hp.port)
}
// openHostports opens all hostport for pod and returns the map of hostport and socket
func (h *hostportSyncer) openHostports(podHostportMapping *PodPortMapping) error {
var retErr error
ports := make(map[hostport]closeable)
for _, port := range podHostportMapping.PortMappings {
if port.HostPort <= 0 {
// Assume hostport is not specified in this portmapping. So skip
continue
}
// We do not open host ports for SCTP ports, as we agreed in the Support of SCTP KEP
if port.Protocol == v1.ProtocolSCTP {
continue
}
hp := hostport{
port: port.HostPort,
protocol: strings.ToLower(string(port.Protocol)),
}
socket, err := h.portOpener(&hp)
if err != nil {
retErr = fmt.Errorf("cannot open hostport %d for pod %s: %v", port.HostPort, getPodFullName(podHostportMapping), err)
break
}
ports[hp] = socket
}
// If encounter any error, close all hostports that just got opened.
if retErr != nil {
for hp, socket := range ports {
if err := socket.Close(); err != nil {
klog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podHostportMapping), err)
}
}
return retErr
}
for hostPort, socket := range ports {
h.hostPortMap[hostPort] = socket
}
return nil
}
func getPodFullName(pod *PodPortMapping) string {
// Use underscore as the delimiter because it is not allowed in pod name
// (DNS subdomain format), while allowed in the container name format.
return pod.Name + "_" + pod.Namespace
}
// gatherAllHostports returns all hostports that should be presented on node,
// given the list of pods running on that node and ignoring host network
// pods (which don't need hostport <-> container port mapping)
// It only returns the hosports that match the IP family passed as parameter
func gatherAllHostports(activePodPortMappings []*PodPortMapping, isIPv6 bool) (map[*PortMapping]targetPod, error) {
podHostportMap := make(map[*PortMapping]targetPod)
for _, pm := range activePodPortMappings {
// IP.To16() returns nil if IP is not a valid IPv4 or IPv6 address
if pm.IP.To16() == nil {
return nil, fmt.Errorf("Invalid or missing pod %s IP", getPodFullName(pm))
}
// return only entries from the same IP family
if utilnet.IsIPv6(pm.IP) != isIPv6 {
continue
}
// should not handle hostports for hostnetwork pods
if pm.HostNetwork {
continue
}
for _, port := range pm.PortMappings {
if port.HostPort != 0 {
podHostportMap[port] = targetPod{podFullName: getPodFullName(pm), podIP: pm.IP.String()}
}
}
}
return podHostportMap, nil
}
// Join all words with spaces, terminate with newline and write to buf.
func writeLine(buf *bytes.Buffer, words ...string) {
buf.WriteString(strings.Join(words, " ") + "\n")
}
func writeBytesLine(buf *bytes.Buffer, bytes []byte) {
buf.Write(bytes)
buf.WriteByte('\n')
}
//hostportChainName takes containerPort for a pod and returns associated iptables chain.
// This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-SVC-". We do
// this because IPTables Chain Names must be <= 28 chars long, and the longer
// they are the harder they are to read.
func hostportChainName(pm *PortMapping, podFullName string) utiliptables.Chain {
hash := sha256.Sum256([]byte(strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol) + podFullName))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
}
// OpenPodHostportsAndSync opens hostports for a new PodPortMapping, gathers all hostports on
// node, sets up iptables rules enable them. And finally clean up stale hostports.
// 'newPortMapping' must also be present in 'activePodPortMappings'.
func (h *hostportSyncer) OpenPodHostportsAndSync(newPortMapping *PodPortMapping, natInterfaceName string, activePodPortMappings []*PodPortMapping) error {
// try to open pod host port if specified
if err := h.openHostports(newPortMapping); err != nil {
return err
}
// Add the new pod to active pods if it's not present.
var found bool
for _, pm := range activePodPortMappings {
if pm.Namespace == newPortMapping.Namespace && pm.Name == newPortMapping.Name {
found = true
break
}
}
if !found {
activePodPortMappings = append(activePodPortMappings, newPortMapping)
}
return h.SyncHostports(natInterfaceName, activePodPortMappings)
}
// SyncHostports gathers all hostports on node and setup iptables rules enable them. And finally clean up stale hostports
func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMappings []*PodPortMapping) error {
start := time.Now()
defer func() {
klog.V(4).Infof("syncHostportsRules took %v", time.Since(start))
}()
hostportPodMap, err := gatherAllHostports(activePodPortMappings, h.iptables.IsIPv6())
if err != nil {
return err
}
// Ensure KUBE-HOSTPORTS chains
ensureKubeHostportChains(h.iptables, natInterfaceName)
// Get iptables-save output so we can check for existing chains and rules.
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
existingNATChains := make(map[utiliptables.Chain][]byte)
iptablesData := bytes.NewBuffer(nil)
err = h.iptables.SaveInto(utiliptables.TableNAT, iptablesData)
if err != nil { // if we failed to get any rules
klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
} else { // otherwise parse the output
existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, iptablesData.Bytes())
}
natChains := bytes.NewBuffer(nil)
natRules := bytes.NewBuffer(nil)
writeLine(natChains, "*nat")
// Make sure we keep stats for the top-level chains, if they existed
// (which most should have because we created them above).
if chain, ok := existingNATChains[kubeHostportsChain]; ok {
writeBytesLine(natChains, chain)
} else {
writeLine(natChains, utiliptables.MakeChainLine(kubeHostportsChain))
}
// Accumulate NAT chains to keep.
activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set
for port, target := range hostportPodMap {
protocol := strings.ToLower(string(port.Protocol))
hostportChain := hostportChainName(port, target.podFullName)
if chain, ok := existingNATChains[hostportChain]; ok {
writeBytesLine(natChains, chain)
} else {
writeLine(natChains, utiliptables.MakeChainLine(hostportChain))
}
activeNATChains[hostportChain] = true
// Redirect to hostport chain
args := []string{
"-A", string(kubeHostportsChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, port.HostPort),
"-m", protocol, "-p", protocol,
"--dport", fmt.Sprintf("%d", port.HostPort),
"-j", string(hostportChain),
}
writeLine(natRules, args...)
// Assuming kubelet is syncing iptables KUBE-MARK-MASQ chain
// If the request comes from the pod that is serving the hostport, then SNAT
args = []string{
"-A", string(hostportChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, port.HostPort),
"-s", target.podIP, "-j", string(iptablesproxy.KubeMarkMasqChain),
}
writeLine(natRules, args...)
// Create hostport chain to DNAT traffic to final destination
// IPTables will maintained the stats for this chain
hostPortBinding := net.JoinHostPort(target.podIP, strconv.Itoa(int(port.ContainerPort)))
args = []string{
"-A", string(hostportChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, port.HostPort),
"-m", protocol, "-p", protocol,
"-j", "DNAT", fmt.Sprintf("--to-destination=%s", hostPortBinding),
}
writeLine(natRules, args...)
}
// Delete chains no longer in use.
for chain := range existingNATChains {
if !activeNATChains[chain] {
chainString := string(chain)
if !strings.HasPrefix(chainString, kubeHostportChainPrefix) {
// Ignore chains that aren't ours.
continue
}
// We must (as per iptables) write a chain-line for it, which has
// the nice effect of flushing the chain. Then we can remove the
// chain.
writeBytesLine(natChains, existingNATChains[chain])
writeLine(natRules, "-X", chainString)
}
}
writeLine(natRules, "COMMIT")
natLines := append(natChains.Bytes(), natRules.Bytes()...)
klog.V(3).Infof("Restoring iptables rules: %s", natLines)
err = h.iptables.RestoreAll(natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
return fmt.Errorf("failed to execute iptables-restore: %v", err)
}
h.cleanupHostportMap(hostportPodMap)
return nil
}
// cleanupHostportMap closes obsolete hostports
func (h *hostportSyncer) cleanupHostportMap(containerPortMap map[*PortMapping]targetPod) {
// compute hostports that are supposed to be open
currentHostports := make(map[hostport]bool)
for containerPort := range containerPortMap {
hp := hostport{
port: containerPort.HostPort,
protocol: strings.ToLower(string(containerPort.Protocol)),
}
currentHostports[hp] = true
}
// close and delete obsolete hostports
for hp, socket := range h.hostPortMap {
if _, ok := currentHostports[hp]; !ok {
socket.Close()
klog.V(3).Infof("Closed local port %s", hp.String())
delete(h.hostPortMap, hp)
}
}
}

View File

@ -1,532 +0,0 @@
// +build !dockerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostport
import (
"net"
"reflect"
"strings"
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
)
type ruleMatch struct {
hostport int
chain string
match string
}
func TestOpenPodHostports(t *testing.T) {
fakeIPTables := NewFakeIPTables()
fakeOpener := NewFakeSocketManager()
h := &hostportSyncer{
hostPortMap: make(map[hostport]closeable),
iptables: fakeIPTables,
portOpener: fakeOpener.openFakeSocket,
}
tests := []struct {
mapping *PodPortMapping
matches []*ruleMatch
}{
// New pod that we are going to add
{
&PodPortMapping{
Name: "test-pod",
Namespace: v1.NamespaceDefault,
IP: net.ParseIP("10.1.1.2"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 4567,
ContainerPort: 80,
Protocol: v1.ProtocolTCP,
},
{
HostPort: 5678,
ContainerPort: 81,
Protocol: v1.ProtocolUDP,
},
},
},
[]*ruleMatch{
{
-1,
"KUBE-HOSTPORTS",
"-m comment --comment \"test-pod_default hostport 4567\" -m tcp -p tcp --dport 4567",
},
{
4567,
"",
"-m comment --comment \"test-pod_default hostport 4567\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ",
},
{
4567,
"",
"-m comment --comment \"test-pod_default hostport 4567\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.2:80",
},
{
-1,
"KUBE-HOSTPORTS",
"-m comment --comment \"test-pod_default hostport 5678\" -m udp -p udp --dport 5678",
},
{
5678,
"",
"-m comment --comment \"test-pod_default hostport 5678\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ",
},
{
5678,
"",
"-m comment --comment \"test-pod_default hostport 5678\" -m udp -p udp -j DNAT --to-destination 10.1.1.2:81",
},
},
},
// Already running pod
{
&PodPortMapping{
Name: "another-test-pod",
Namespace: v1.NamespaceDefault,
IP: net.ParseIP("10.1.1.5"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 123,
ContainerPort: 654,
Protocol: v1.ProtocolTCP,
},
},
},
[]*ruleMatch{
{
-1,
"KUBE-HOSTPORTS",
"-m comment --comment \"another-test-pod_default hostport 123\" -m tcp -p tcp --dport 123",
},
{
123,
"",
"-m comment --comment \"another-test-pod_default hostport 123\" -s 10.1.1.5/32 -j KUBE-MARK-MASQ",
},
{
123,
"",
"-m comment --comment \"another-test-pod_default hostport 123\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.5:654",
},
},
},
// IPv6 pod
{
&PodPortMapping{
Name: "ipv6-test-pod",
Namespace: v1.NamespaceDefault,
IP: net.ParseIP("2001:dead::5"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 123,
ContainerPort: 654,
Protocol: v1.ProtocolTCP,
},
},
},
[]*ruleMatch{},
},
}
activePodPortMapping := make([]*PodPortMapping, 0)
// Fill in any match rules missing chain names
for _, test := range tests {
for _, match := range test.matches {
if match.hostport >= 0 {
found := false
for _, pm := range test.mapping.PortMappings {
if int(pm.HostPort) == match.hostport {
match.chain = string(hostportChainName(pm, getPodFullName(test.mapping)))
found = true
break
}
}
if !found {
t.Fatalf("Failed to find ContainerPort for match %d/'%s'", match.hostport, match.match)
}
}
}
activePodPortMapping = append(activePodPortMapping, test.mapping)
}
// Already running pod's host port
hp := hostport{
tests[1].mapping.PortMappings[0].HostPort,
strings.ToLower(string(tests[1].mapping.PortMappings[0].Protocol)),
}
h.hostPortMap[hp] = &fakeSocket{
tests[1].mapping.PortMappings[0].HostPort,
strings.ToLower(string(tests[1].mapping.PortMappings[0].Protocol)),
false,
}
err := h.OpenPodHostportsAndSync(tests[0].mapping, "br0", activePodPortMapping)
if err != nil {
t.Fatalf("Failed to OpenPodHostportsAndSync: %v", err)
}
// Generic rules
genericRules := []*ruleMatch{
{-1, "POSTROUTING", "-m comment --comment \"SNAT for localhost access to hostports\" -o br0 -s 127.0.0.0/8 -j MASQUERADE"},
{-1, "PREROUTING", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
{-1, "OUTPUT", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
}
for _, rule := range genericRules {
_, chain, err := fakeIPTables.getChain(utiliptables.TableNAT, utiliptables.Chain(rule.chain))
if err != nil {
t.Fatalf("Expected NAT chain %s did not exist", rule.chain)
}
if !matchRule(chain, rule.match) {
t.Fatalf("Expected %s chain rule match '%s' not found", rule.chain, rule.match)
}
}
// Pod rules
for _, test := range tests {
for _, match := range test.matches {
// Ensure chain exists
_, chain, err := fakeIPTables.getChain(utiliptables.TableNAT, utiliptables.Chain(match.chain))
if err != nil {
t.Fatalf("Expected NAT chain %s did not exist", match.chain)
}
if !matchRule(chain, match.match) {
t.Fatalf("Expected NAT chain %s rule containing '%s' not found", match.chain, match.match)
}
}
}
// Socket
hostPortMap := map[hostport]closeable{
{123, "tcp"}: &fakeSocket{123, "tcp", false},
{4567, "tcp"}: &fakeSocket{4567, "tcp", false},
{5678, "udp"}: &fakeSocket{5678, "udp", false},
}
if !reflect.DeepEqual(hostPortMap, h.hostPortMap) {
t.Fatalf("Mismatch in expected hostPortMap. Expected '%v', got '%v'", hostPortMap, h.hostPortMap)
}
}
func matchRule(chain *fakeChain, match string) bool {
for _, rule := range chain.rules {
if strings.Contains(rule, match) {
return true
}
}
return false
}
func TestOpenPodHostportsIPv6(t *testing.T) {
fakeIPTables := NewFakeIPTables()
fakeIPTables.protocol = utiliptables.ProtocolIPv6
fakeOpener := NewFakeSocketManager()
h := &hostportSyncer{
hostPortMap: make(map[hostport]closeable),
iptables: fakeIPTables,
portOpener: fakeOpener.openFakeSocket,
}
tests := []struct {
mapping *PodPortMapping
matches []*ruleMatch
}{
// New pod that we are going to add
{
&PodPortMapping{
Name: "test-pod",
Namespace: v1.NamespaceDefault,
IP: net.ParseIP("2001:beef::2"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 4567,
ContainerPort: 80,
Protocol: v1.ProtocolTCP,
},
{
HostPort: 5678,
ContainerPort: 81,
Protocol: v1.ProtocolUDP,
},
},
},
[]*ruleMatch{
{
-1,
"KUBE-HOSTPORTS",
"-m comment --comment \"test-pod_default hostport 4567\" -m tcp -p tcp --dport 4567",
},
{
4567,
"",
"-m comment --comment \"test-pod_default hostport 4567\" -s 2001:beef::2/32 -j KUBE-MARK-MASQ",
},
{
4567,
"",
"-m comment --comment \"test-pod_default hostport 4567\" -m tcp -p tcp -j DNAT --to-destination [2001:beef::2]:80",
},
{
-1,
"KUBE-HOSTPORTS",
"-m comment --comment \"test-pod_default hostport 5678\" -m udp -p udp --dport 5678",
},
{
5678,
"",
"-m comment --comment \"test-pod_default hostport 5678\" -s 2001:beef::2/32 -j KUBE-MARK-MASQ",
},
{
5678,
"",
"-m comment --comment \"test-pod_default hostport 5678\" -m udp -p udp -j DNAT --to-destination [2001:beef::2]:81",
},
},
},
// Already running pod
{
&PodPortMapping{
Name: "another-test-pod",
Namespace: v1.NamespaceDefault,
IP: net.ParseIP("2001:beef::5"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 123,
ContainerPort: 654,
Protocol: v1.ProtocolTCP,
},
},
},
[]*ruleMatch{
{
-1,
"KUBE-HOSTPORTS",
"-m comment --comment \"another-test-pod_default hostport 123\" -m tcp -p tcp --dport 123",
},
{
123,
"",
"-m comment --comment \"another-test-pod_default hostport 123\" -s 2001:beef::5/32 -j KUBE-MARK-MASQ",
},
{
123,
"",
"-m comment --comment \"another-test-pod_default hostport 123\" -m tcp -p tcp -j DNAT --to-destination [2001:beef::5]:654",
},
},
},
// IPv4 pod
{
&PodPortMapping{
Name: "ipv4-test-pod",
Namespace: v1.NamespaceDefault,
IP: net.ParseIP("192.168.2.5"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 123,
ContainerPort: 654,
Protocol: v1.ProtocolTCP,
},
},
},
[]*ruleMatch{},
},
}
activePodPortMapping := make([]*PodPortMapping, 0)
// Fill in any match rules missing chain names
for _, test := range tests {
for _, match := range test.matches {
if match.hostport >= 0 {
found := false
for _, pm := range test.mapping.PortMappings {
if int(pm.HostPort) == match.hostport {
match.chain = string(hostportChainName(pm, getPodFullName(test.mapping)))
found = true
break
}
}
if !found {
t.Fatalf("Failed to find ContainerPort for match %d/'%s'", match.hostport, match.match)
}
}
}
activePodPortMapping = append(activePodPortMapping, test.mapping)
}
// Already running pod's host port
hp := hostport{
tests[1].mapping.PortMappings[0].HostPort,
strings.ToLower(string(tests[1].mapping.PortMappings[0].Protocol)),
}
h.hostPortMap[hp] = &fakeSocket{
tests[1].mapping.PortMappings[0].HostPort,
strings.ToLower(string(tests[1].mapping.PortMappings[0].Protocol)),
false,
}
err := h.OpenPodHostportsAndSync(tests[0].mapping, "br0", activePodPortMapping)
if err != nil {
t.Fatalf("Failed to OpenPodHostportsAndSync: %v", err)
}
// Generic rules
genericRules := []*ruleMatch{
{-1, "POSTROUTING", "-m comment --comment \"SNAT for localhost access to hostports\" -o br0 -s ::1/128 -j MASQUERADE"},
{-1, "PREROUTING", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
{-1, "OUTPUT", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
}
for _, rule := range genericRules {
_, chain, err := fakeIPTables.getChain(utiliptables.TableNAT, utiliptables.Chain(rule.chain))
if err != nil {
t.Fatalf("Expected NAT chain %s did not exist", rule.chain)
}
if !matchRule(chain, rule.match) {
t.Fatalf("Expected %s chain rule match '%s' not found", rule.chain, rule.match)
}
}
// Pod rules
for _, test := range tests {
for _, match := range test.matches {
// Ensure chain exists
_, chain, err := fakeIPTables.getChain(utiliptables.TableNAT, utiliptables.Chain(match.chain))
if err != nil {
t.Fatalf("Expected NAT chain %s did not exist", match.chain)
}
if !matchRule(chain, match.match) {
t.Fatalf("Expected NAT chain %s rule containing '%s' not found", match.chain, match.match)
}
}
}
// Socket
hostPortMap := map[hostport]closeable{
{123, "tcp"}: &fakeSocket{123, "tcp", false},
{4567, "tcp"}: &fakeSocket{4567, "tcp", false},
{5678, "udp"}: &fakeSocket{5678, "udp", false},
}
if !reflect.DeepEqual(hostPortMap, h.hostPortMap) {
t.Fatalf("Mismatch in expected hostPortMap. Expected '%v', got '%v'", hostPortMap, h.hostPortMap)
}
}
func TestHostportChainName(t *testing.T) {
m := make(map[string]int)
chain := hostportChainName(&PortMapping{HostPort: 57119, Protocol: "TCP", ContainerPort: 57119}, "testrdma-2")
m[string(chain)] = 1
chain = hostportChainName(&PortMapping{HostPort: 55429, Protocol: "TCP", ContainerPort: 55429}, "testrdma-2")
m[string(chain)] = 1
chain = hostportChainName(&PortMapping{HostPort: 56833, Protocol: "TCP", ContainerPort: 56833}, "testrdma-2")
m[string(chain)] = 1
if len(m) != 3 {
t.Fatal(m)
}
}
func TestHostPortSyncerRemoveLegacyRules(t *testing.T) {
iptables := NewFakeIPTables()
legacyRules := [][]string{
{"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp --dport 8443 -j KUBE-HP-5N7UH5JAXCVP5UJR"},
{"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod1_ns1 hostport 8081\" -m udp -p udp --dport 8081 -j KUBE-HP-7THKRFSEH4GIIXK7"},
{"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod1_ns1 hostport 8080\" -m tcp -p tcp --dport 8080 -j KUBE-HP-4YVONL46AKYWSKS3"},
{"-A", "OUTPUT", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
{"-A", "PREROUTING", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
{"-A", "POSTROUTING", "-m comment --comment \"SNAT for localhost access to hostports\" -o cbr0 -s 127.0.0.0/8 -j MASQUERADE"},
{"-A", "KUBE-HP-4YVONL46AKYWSKS3", "-m comment --comment \"pod1_ns1 hostport 8080\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ"},
{"-A", "KUBE-HP-4YVONL46AKYWSKS3", "-m comment --comment \"pod1_ns1 hostport 8080\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.2:80"},
{"-A", "KUBE-HP-7THKRFSEH4GIIXK7", "-m comment --comment \"pod1_ns1 hostport 8081\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ"},
{"-A", "KUBE-HP-7THKRFSEH4GIIXK7", "-m comment --comment \"pod1_ns1 hostport 8081\" -m udp -p udp -j DNAT --to-destination 10.1.1.2:81"},
{"-A", "KUBE-HP-5N7UH5JAXCVP5UJR", "-m comment --comment \"pod3_ns1 hostport 8443\" -s 10.1.1.4/32 -j KUBE-MARK-MASQ"},
{"-A", "KUBE-HP-5N7UH5JAXCVP5UJR", "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.4:443"},
}
for _, rule := range legacyRules {
_, err := iptables.EnsureChain(utiliptables.TableNAT, utiliptables.Chain(rule[1]))
assert.NoError(t, err)
_, err = iptables.ensureRule(utiliptables.RulePosition(rule[0]), utiliptables.TableNAT, utiliptables.Chain(rule[1]), rule[2])
assert.NoError(t, err)
}
portOpener := NewFakeSocketManager()
h := &hostportSyncer{
hostPortMap: make(map[hostport]closeable),
iptables: iptables,
portOpener: portOpener.openFakeSocket,
}
// check preserve pod3's rules and remove pod1's rules
pod3PortMapping := &PodPortMapping{
Name: "pod3",
Namespace: "ns1",
IP: net.ParseIP("10.1.1.4"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 8443,
ContainerPort: 443,
Protocol: v1.ProtocolTCP,
},
},
}
h.SyncHostports("cbr0", []*PodPortMapping{pod3PortMapping})
newChainName := string(hostportChainName(pod3PortMapping.PortMappings[0], getPodFullName(pod3PortMapping)))
expectRules := [][]string{
{"KUBE-HOSTPORTS", "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp --dport 8443 -j " + newChainName},
{newChainName, "-m comment --comment \"pod3_ns1 hostport 8443\" -s 10.1.1.4/32 -j KUBE-MARK-MASQ"},
{newChainName, "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.4:443"},
}
natTable, ok := iptables.tables[string(utiliptables.TableNAT)]
assert.True(t, ok)
// check pod1's rules in KUBE-HOSTPORTS chain should be cleaned up
hostportChain, ok := natTable.chains["KUBE-HOSTPORTS"]
assert.True(t, ok, string(hostportChain.name))
assert.Equal(t, 1, len(hostportChain.rules), "%v", hostportChain.rules)
// check pod3's rules left
assert.Equal(t, expectRules[0][1], hostportChain.rules[0])
chain, ok := natTable.chains[newChainName]
assert.True(t, ok)
assert.Equal(t, 2, len(chain.rules))
assert.Equal(t, expectRules[1][1], chain.rules[0])
assert.Equal(t, expectRules[2][1], chain.rules[1])
// check legacy KUBE-HP-* chains should be deleted
for _, name := range []string{"KUBE-HP-4YVONL46AKYWSKS3", "KUBE-HP-7THKRFSEH4GIIXK7", "KUBE-HP-5N7UH5JAXCVP5UJR"} {
_, ok := natTable.chains[name]
assert.False(t, ok)
}
}

View File

@ -1,26 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["fake.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/testing",
deps = ["//pkg/kubelet/dockershim/network/hostport:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,45 +0,0 @@
// +build !dockerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport"
)
type fakeSyncer struct{}
func NewFakeHostportSyncer() hostport.HostportSyncer {
return &fakeSyncer{}
}
func (h *fakeSyncer) OpenPodHostportsAndSync(newPortMapping *hostport.PodPortMapping, natInterfaceName string, activePortMapping []*hostport.PodPortMapping) error {
return h.SyncHostports(natInterfaceName, activePortMapping)
}
func (h *fakeSyncer) SyncHostports(natInterfaceName string, activePortMapping []*hostport.PodPortMapping) error {
for _, r := range activePortMapping {
if r.IP.To4() == nil {
return fmt.Errorf("invalid or missing pod %s/%s IP", r.Namespace, r.Name)
}
}
return nil
}

View File

@ -140,7 +140,6 @@ go_test(
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
"//pkg/kubelet/dockershim/network/cni/testing:go_default_library",
"//pkg/kubelet/dockershim/network/hostport/testing:go_default_library",
"//pkg/kubelet/dockershim/network/testing:go_default_library",
"//pkg/util/bandwidth:go_default_library",
"//pkg/util/iptables/testing:go_default_library",
@ -158,7 +157,6 @@ go_test(
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
"//pkg/kubelet/dockershim/network/cni/testing:go_default_library",
"//pkg/kubelet/dockershim/network/hostport/testing:go_default_library",
"//pkg/kubelet/dockershim/network/testing:go_default_library",
"//pkg/util/bandwidth:go_default_library",
"//pkg/util/iptables/testing:go_default_library",

View File

@ -90,22 +90,17 @@ var requiredCNIPlugins = [...]string{"bridge", "host-local", "loopback"}
type kubenetNetworkPlugin struct {
network.NoopNetworkPlugin
host network.Host
netConfig *libcni.NetworkConfig
loConfig *libcni.NetworkConfig
cniConfig libcni.CNI
bandwidthShaper bandwidth.Shaper
mu sync.Mutex //Mutex for protecting podIPs map, netConfig, and shaper initialization
podIPs map[kubecontainer.ContainerID]utilsets.String
mtu int
execer utilexec.Interface
nsenterPath string
hairpinMode kubeletconfig.HairpinMode
// kubenet can use either hostportSyncer and hostportManager to implement hostports
// Currently, if network host supports legacy features, hostportSyncer will be used,
// otherwise, hostportManager will be used.
hostportSyncer hostport.HostportSyncer
hostportSyncerv6 hostport.HostportSyncer
host network.Host
netConfig *libcni.NetworkConfig
loConfig *libcni.NetworkConfig
cniConfig libcni.CNI
bandwidthShaper bandwidth.Shaper
mu sync.Mutex //Mutex for protecting podIPs map, netConfig, and shaper initialization
podIPs map[kubecontainer.ContainerID]utilsets.String
mtu int
execer utilexec.Interface
nsenterPath string
hairpinMode kubeletconfig.HairpinMode
hostportManager hostport.HostPortManager
hostportManagerv6 hostport.HostPortManager
iptables utiliptables.Interface
@ -131,8 +126,6 @@ func NewPlugin(networkPluginDirs []string, cacheDir string) network.NetworkPlugi
iptablesv6: iptInterfacev6,
sysctl: utilsysctl.New(),
binDirs: append([]string{DefaultCNIDir}, networkPluginDirs...),
hostportSyncer: hostport.NewHostportSyncer(iptInterface),
hostportSyncerv6: hostport.NewHostportSyncer(iptInterfacev6),
hostportManager: hostport.NewHostportManager(iptInterface),
hostportManagerv6: hostport.NewHostportManager(iptInterfacev6),
nonMasqueradeCIDR: "10.0.0.0/8",

View File

@ -34,7 +34,6 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
mockcni "k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/testing"
hostporttest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/testing"
nettest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/testing"
"k8s.io/kubernetes/pkg/util/bandwidth"
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
@ -179,7 +178,6 @@ func TestTeardownCallsShaper(t *testing.T) {
kubenet.cniConfig = mockcni
kubenet.iptables = ipttest.NewFake()
kubenet.bandwidthShaper = fshaper
kubenet.hostportSyncer = hostporttest.NewFakeHostportSyncer()
mockcni.On("DelNetwork", mock.AnythingOfType("*context.timerCtx"), mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil)

View File

@ -468,8 +468,6 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
podName := volumeutil.GetUniquePodName(pod)
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
opts.PortMappings = kubecontainer.MakePortMappings(container)
blkutil := volumepathhandler.NewBlockVolumePathHandler()
blkVolumes, err := kl.makeBlockVolumes(pod, container, volumes, blkutil)
if err != nil {