mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #48091 from rpothier/kubenet-ipv6
Automatic merge from submit-queue (batch tested with PRs 45467, 48091, 48033, 48498) Allow Kubenet with ipv6 When running kubenet with IPv6, there is a panic as there is IPv4 specific code the Event function. With this change, Event will support IPv4 and IPv6 **What this PR does / why we need it**: This PR allows kubenet to use IPv6. Currently there is a panic in kubenet_linux.go as there is IPv4 specific code. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #48089 **Special notes for your reviewer**: **Release note**: ```release-note-NONE ```
This commit is contained in:
commit
e14d9a7ffd
@ -259,7 +259,7 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
setHairpin := plugin.hairpinMode == componentconfig.HairpinVeth
|
setHairpin := plugin.hairpinMode == componentconfig.HairpinVeth
|
||||||
// Set bridge address to first address in IPNet
|
// Set bridge address to first address in IPNet
|
||||||
cidr.IP.To4()[3] += 1
|
cidr.IP[len(cidr.IP)-1] += 1
|
||||||
|
|
||||||
json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, podCIDR, cidr.IP.String())
|
json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, podCIDR, cidr.IP.String())
|
||||||
glog.V(2).Infof("CNI network config set to %v", json)
|
glog.V(2).Infof("CNI network config set to %v", json)
|
||||||
|
@ -232,37 +232,61 @@ func TestGenerateMacAddress(t *testing.T) {
|
|||||||
// TestInvocationWithoutRuntime invokes the plugin without a runtime.
|
// TestInvocationWithoutRuntime invokes the plugin without a runtime.
|
||||||
// This is how kubenet is invoked from the cri.
|
// This is how kubenet is invoked from the cri.
|
||||||
func TestTearDownWithoutRuntime(t *testing.T) {
|
func TestTearDownWithoutRuntime(t *testing.T) {
|
||||||
fhost := nettest.NewFakeHost(nil)
|
testCases := []struct {
|
||||||
fhost.Legacy = false
|
podCIDR string
|
||||||
fhost.Runtime = nil
|
ip string
|
||||||
mockcni := &mock_cni.MockCNI{}
|
expectedGateway string
|
||||||
|
}{
|
||||||
fexec := &exec.FakeExec{
|
{
|
||||||
CommandScript: []exec.FakeCommandAction{},
|
podCIDR: "10.0.0.1/24",
|
||||||
LookPathFunc: func(file string) (string, error) {
|
ip: "10.0.0.1",
|
||||||
return fmt.Sprintf("/fake-bin/%s", file), nil
|
expectedGateway: "10.0.0.1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
podCIDR: "2001:beef::1/48",
|
||||||
|
ip: "2001:beef::1",
|
||||||
|
expectedGateway: "2001:beef::1",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
fhost := nettest.NewFakeHost(nil)
|
||||||
|
fhost.Legacy = false
|
||||||
|
fhost.Runtime = nil
|
||||||
|
mockcni := &mock_cni.MockCNI{}
|
||||||
|
|
||||||
kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost)
|
fexec := &exec.FakeExec{
|
||||||
kubenet.cniConfig = mockcni
|
CommandScript: []exec.FakeCommandAction{},
|
||||||
kubenet.iptables = ipttest.NewFake()
|
LookPathFunc: func(file string) (string, error) {
|
||||||
|
return fmt.Sprintf("/fake-bin/%s", file), nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
details := make(map[string]interface{})
|
kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost)
|
||||||
details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = "10.0.0.1/24"
|
kubenet.cniConfig = mockcni
|
||||||
kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)
|
kubenet.iptables = ipttest.NewFake()
|
||||||
|
|
||||||
existingContainerID := kubecontainer.BuildContainerID("docker", "123")
|
details := make(map[string]interface{})
|
||||||
kubenet.podIPs[existingContainerID] = "10.0.0.1"
|
details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = tc.podCIDR
|
||||||
|
kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)
|
||||||
|
|
||||||
mockcni.On("DelNetwork", mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil)
|
if kubenet.gateway.String() != tc.expectedGateway {
|
||||||
|
t.Errorf("generated gateway: %q, expecting: %q", kubenet.gateway.String(), tc.expectedGateway)
|
||||||
|
}
|
||||||
|
if kubenet.podCidr != tc.podCIDR {
|
||||||
|
t.Errorf("generated podCidr: %q, expecting: %q", kubenet.podCidr, tc.podCIDR)
|
||||||
|
}
|
||||||
|
existingContainerID := kubecontainer.BuildContainerID("docker", "123")
|
||||||
|
kubenet.podIPs[existingContainerID] = tc.ip
|
||||||
|
|
||||||
if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil {
|
mockcni.On("DelNetwork", mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil)
|
||||||
t.Fatalf("Unexpected error in TearDownPod: %v", err)
|
|
||||||
|
if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil {
|
||||||
|
t.Fatalf("Unexpected error in TearDownPod: %v", err)
|
||||||
|
}
|
||||||
|
// Assert that the CNI DelNetwork made it through and we didn't crash
|
||||||
|
// without a runtime.
|
||||||
|
mockcni.AssertExpectations(t)
|
||||||
}
|
}
|
||||||
// Assert that the CNI DelNetwork made it through and we didn't crash
|
|
||||||
// without a runtime.
|
|
||||||
mockcni.AssertExpectations(t)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: add unit test for each implementation of network plugin interface
|
//TODO: add unit test for each implementation of network plugin interface
|
||||||
|
Loading…
Reference in New Issue
Block a user