multus-cni/pkg/kubeletclient/kubeletclient_test.go
Miguel Duarte Barroso 8ba2accb9f
Replace entrypoint script with initcontainers (#718)
* build: install the multus binary in an init container

Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>

* build: generate kubeconfig via go

Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>

* build: generate multus cni configuration via golang

Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>

* build: provide a docker img for daemon based deployments

We will have 2 different images (only on amd64 archs):
- legacy entrypoint script based
- daemonized process

The `image-build` docker action is updated, to build these 2 images.

There will be 2 different deployment specs, along with e2e test
lanes, one for each of the aforementioned alternatives.

Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>

* build: delegate CNI config watch loop via golang

For the thick-plugin alternative, provide the watch loop for
configuration regeneration via a golang binary.

Over time, this binary is expected to run the control loop to watch
out for pod updates.

To enable current multus users to chose when they upgrade to this new
deployment setup, these changes are provided in separate multus images,
having a different yaml spec files. Both of these alternatives are
tested e2e, since a new lane is introduced.

The following libraries are introduced, along with the motivation for
adding them:
- dproxy: allows traversing the default network configuration arbitrarily,
  similar to what an X path / JSON path tool provides.
  Repo is available at [0].
- fsnotify: watch for changes in the default CNI configuration file.
  Repo is available at [1].

The config map providing the default network CNI configuration is not
copied over, since originally, the user was not required to install a
default network CNI plugin first, but, nowadays, this is a required
step of multus.

As such, it is no longer required to provide a default CNI
configuration.

[0] - https://github.com/koron/go-dproxy
[1] - https://github.com/fsnotify/fsnotify

Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>

* run gofmt

Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>

* refactor: make the builder pattern more idiomatic to golang

Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>

* build: update github actions to release new imgs

Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>
2021-10-27 08:42:37 -04:00

223 lines
5.8 KiB
Go

package kubeletclient
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"google.golang.org/grpc"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sTypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/kubelet/util"
mtypes "gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
)
var (
socketDir string
socketName string
fakeServer *fakeResourceServer
)
type fakeResourceServer struct {
server *grpc.Server
}
/* This is for 1.21.x or later. Uncomment it once we update vendor here!
//TODO: This is stub code for test, but we may need to change for the testing we use this API in the future...
func (m *fakeResourceServer) GetAllocatableResources(ctx context.Context, req *podresourcesapi.AllocatableResourcesRequest) (*podresourcesapi.AllocatableResourcesResponse, error) {
return &podresourcesapi.AllocatableResourcesResponse{}, nil
}
*/
func (m *fakeResourceServer) List(ctx context.Context, req *podresourcesapi.ListPodResourcesRequest) (*podresourcesapi.ListPodResourcesResponse, error) {
podName := "pod-name"
podNamespace := "pod-namespace"
containerName := "container-name"
devs := []*podresourcesapi.ContainerDevices{
{
ResourceName: "resource",
DeviceIds: []string{"dev0", "dev1"},
},
}
resp := &podresourcesapi.ListPodResourcesResponse{
PodResources: []*podresourcesapi.PodResources{
{
Name: podName,
Namespace: podNamespace,
Containers: []*podresourcesapi.ContainerResources{
{
Name: containerName,
Devices: devs,
},
},
},
},
}
return resp, nil
}
func TestKubeletclient(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Kubeletclient Suite")
}
var testKubeletSocket string
func setUp() error {
tempSocketDir, err := ioutil.TempDir("", "kubelet-resource-client")
if err != nil {
return err
}
testingPodResourcesPath := filepath.Join(tempSocketDir, defaultPodResourcesPath)
if err := os.MkdirAll(testingPodResourcesPath, os.ModeDir); err != nil {
return err
}
socketDir = testingPodResourcesPath
socketName = filepath.Join(socketDir, "kubelet.sock")
testKubeletSocket = socketName
fakeServer = &fakeResourceServer{server: grpc.NewServer()}
podresourcesapi.RegisterPodResourcesListerServer(fakeServer.server, fakeServer)
lis, err := util.CreateListener(socketName)
if err != nil {
return err
}
go fakeServer.server.Serve(lis)
return nil
}
func tearDown(path string) error {
if fakeServer != nil {
fakeServer.server.Stop()
}
err := os.RemoveAll(path)
return err
}
var _ = BeforeSuite(func() {
err := setUp()
Expect(err).NotTo(HaveOccurred())
})
var _ = AfterSuite(func() {
err := tearDown(socketDir)
Expect(err).NotTo(HaveOccurred())
})
var _ = Describe("Kubelet resource endpoint data read operations", func() {
Context("GetResourceClient()", func() {
It("should return no error", func() {
_, err := GetResourceClient(testKubeletSocket)
Expect(err).NotTo(HaveOccurred())
})
It("should fail with missing file", func() {
_, err := GetResourceClient("sampleSocketString")
Expect(err).To(HaveOccurred())
})
})
Context("GetPodResourceMap() with valid pod name and namespace", func() {
It("should return no error", func() {
podUID := k8sTypes.UID("970a395d-bb3b-11e8-89df-408d5c537d23")
fakePod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-name",
Namespace: "pod-namespace",
UID: podUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container-name",
},
},
},
}
client, err := getKubeletClient(testKubeletSocket)
Expect(err).NotTo(HaveOccurred())
outputRMap := map[string]*mtypes.ResourceInfo{
"resource": {DeviceIDs: []string{"dev0", "dev1"}},
}
resourceMap, err := client.GetPodResourceMap(fakePod)
Expect(err).NotTo(HaveOccurred())
Expect(resourceMap).ShouldNot(BeNil())
Expect(resourceMap).To(Equal(outputRMap))
})
It("should return an error with garbage socket value", func() {
_, err := getKubeletClient("/badfilepath!?//")
Expect(err).To(HaveOccurred())
})
})
Context("GetPodResourceMap() with empty podname", func() {
It("should return error", func() {
podUID := k8sTypes.UID("970a395d-bb3b-11e8-89df-408d5c537d23")
fakePod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "",
Namespace: "pod-namespace",
UID: podUID,
},
}
client, err := getKubeletClient(testKubeletSocket)
Expect(err).NotTo(HaveOccurred())
_, err = client.GetPodResourceMap(fakePod)
Expect(err).To(HaveOccurred())
})
})
Context("GetPodResourceMap() with empty namespace", func() {
It("should return error", func() {
podUID := k8sTypes.UID("970a395d-bb3b-11e8-89df-408d5c537d23")
fakePod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-name",
Namespace: "",
UID: podUID,
},
}
client, err := getKubeletClient(testKubeletSocket)
Expect(err).NotTo(HaveOccurred())
_, err = client.GetPodResourceMap(fakePod)
Expect(err).To(HaveOccurred())
})
})
Context("GetPodResourceMap() with non-existent podname and namespace", func() {
It("should return no error", func() {
podUID := k8sTypes.UID("970a395d-bb3b-11e8-89df-408d5c537d23")
fakePod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "whateverpod",
Namespace: "whatevernamespace",
UID: podUID,
},
}
client, err := getKubeletClient(testKubeletSocket)
Expect(err).NotTo(HaveOccurred())
emptyRMap := map[string]*mtypes.ResourceInfo{}
resourceMap, err := client.GetPodResourceMap(fakePod)
Expect(err).NotTo(HaveOccurred())
Expect(resourceMap).ShouldNot(BeNil())
Expect(resourceMap).To(Equal(emptyRMap))
})
})
})