Merge pull request #55692 from vikaschoudhary16/dpUT

Automatic merge from submit-queue (batch tested with PRs 56386, 57204, 55692, 57107, 57177). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Refactor TestPodContainerDeviceAllocation for extensiblity/readability

**What this PR does / why we need it**:
This PR refactors and reorganizes TestPodContainerDeviceAllocation(). This PR changes the logic to use array for iterating over test conditions and other refactoring such as moving test pod creation logic to a separate function. 

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
None
```
cc @sjenning @jeremyeder @RenaudWasTaken @vishh @mindprince @jiayingz @ScorpioCPH

/sig node
This commit is contained in:
Kubernetes Submit Queue 2017-12-17 04:19:42 -08:00 committed by GitHub
commit 380c2b6d0b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -366,37 +366,25 @@ func (m *MockEndpoint) allocate(devs []string) (*pluginapi.AllocateResponse, err
return nil, nil return nil, nil
} }
func TestPodContainerDeviceAllocation(t *testing.T) { func makePod(limits v1.ResourceList) *v1.Pod {
flag.Set("alsologtostderr", fmt.Sprintf("%t", true)) return &v1.Pod{
var logLevel string ObjectMeta: metav1.ObjectMeta{
flag.StringVar(&logLevel, "logLevel", "4", "test") UID: uuid.NewUUID(),
flag.Lookup("v").Value.Set(logLevel) },
Spec: v1.PodSpec{
resourceName1 := "domain1.com/resource1" Containers: []v1.Container{
resourceQuantity1 := *resource.NewQuantity(int64(2), resource.DecimalSI) {
devID1 := "dev1" Resources: v1.ResourceRequirements{
devID2 := "dev2" Limits: limits,
resourceName2 := "domain2.com/resource2" },
resourceQuantity2 := *resource.NewQuantity(int64(1), resource.DecimalSI) },
devID3 := "dev3" },
devID4 := "dev4"
as := require.New(t)
monitorCallback := func(resourceName string, added, updated, deleted []pluginapi.Device) {}
podsStub := activePodsStub{
activePods: []*v1.Pod{},
}
cachedNode := &v1.Node{
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{},
}, },
} }
nodeInfo := &schedulercache.NodeInfo{} }
nodeInfo.SetNode(cachedNode)
tmpDir, err := ioutil.TempDir("", "checkpoint") func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestResource) *ManagerImpl {
as.Nil(err) monitorCallback := func(resourceName string, added, updated, deleted []pluginapi.Device) {}
defer os.RemoveAll(tmpDir)
testManager := &ManagerImpl{ testManager := &ManagerImpl{
socketdir: tmpDir, socketdir: tmpDir,
callback: monitorCallback, callback: monitorCallback,
@ -404,158 +392,221 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String),
endpoints: make(map[string]endpoint), endpoints: make(map[string]endpoint),
podDevices: make(podDevices), podDevices: make(podDevices),
activePods: podsStub.getActivePods, activePods: activePods,
sourcesReady: &sourcesReadyStub{}, sourcesReady: &sourcesReadyStub{},
} }
testManager.store, _ = utilstore.NewFileStore("/tmp/", utilfs.DefaultFs{}) testManager.store, _ = utilstore.NewFileStore("/tmp/", utilfs.DefaultFs{})
for _, res := range testRes {
testManager.allDevices[res.resourceName] = sets.NewString()
for _, dev := range res.devs {
testManager.allDevices[res.resourceName].Insert(dev)
}
if res.resourceName == "domain1.com/resource1" {
testManager.endpoints[res.resourceName] = &MockEndpoint{
allocateFunc: func(devs []string) (*pluginapi.AllocateResponse, error) {
resp := new(pluginapi.AllocateResponse)
resp.Envs = make(map[string]string)
for _, dev := range devs {
switch dev {
case "dev1":
resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{
ContainerPath: "/dev/aaa",
HostPath: "/dev/aaa",
Permissions: "mrw",
})
testManager.allDevices[resourceName1] = sets.NewString() resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{
testManager.allDevices[resourceName1].Insert(devID1) ContainerPath: "/dev/bbb",
testManager.allDevices[resourceName1].Insert(devID2) HostPath: "/dev/bbb",
testManager.allDevices[resourceName2] = sets.NewString() Permissions: "mrw",
testManager.allDevices[resourceName2].Insert(devID3) })
testManager.allDevices[resourceName2].Insert(devID4)
testManager.endpoints[resourceName1] = &MockEndpoint{ resp.Mounts = append(resp.Mounts, &pluginapi.Mount{
allocateFunc: func(devs []string) (*pluginapi.AllocateResponse, error) { ContainerPath: "/container_dir1/file1",
resp := new(pluginapi.AllocateResponse) HostPath: "host_dir1/file1",
resp.Envs = make(map[string]string) ReadOnly: true,
for _, dev := range devs { })
switch dev {
case "dev1":
resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{
ContainerPath: "/dev/aaa",
HostPath: "/dev/aaa",
Permissions: "mrw",
})
resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{ case "dev2":
ContainerPath: "/dev/bbb", resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{
HostPath: "/dev/bbb", ContainerPath: "/dev/ccc",
Permissions: "mrw", HostPath: "/dev/ccc",
}) Permissions: "mrw",
})
resp.Mounts = append(resp.Mounts, &pluginapi.Mount{ resp.Mounts = append(resp.Mounts, &pluginapi.Mount{
ContainerPath: "/container_dir1/file1", ContainerPath: "/container_dir1/file2",
HostPath: "host_dir1/file1", HostPath: "host_dir1/file2",
ReadOnly: true, ReadOnly: true,
}) })
case "dev2": resp.Envs["key1"] = "val1"
resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{ }
ContainerPath: "/dev/ccc", }
HostPath: "/dev/ccc", return resp, nil
Permissions: "mrw", },
})
resp.Mounts = append(resp.Mounts, &pluginapi.Mount{
ContainerPath: "/container_dir1/file2",
HostPath: "host_dir1/file2",
ReadOnly: true,
})
resp.Envs["key1"] = "val1"
}
} }
return resp, nil }
}, if res.resourceName == "domain2.com/resource2" {
} testManager.endpoints[res.resourceName] = &MockEndpoint{
allocateFunc: func(devs []string) (*pluginapi.AllocateResponse, error) {
resp := new(pluginapi.AllocateResponse)
resp.Envs = make(map[string]string)
for _, dev := range devs {
switch dev {
case "dev3":
resp.Envs["key2"] = "val2"
testManager.endpoints[resourceName2] = &MockEndpoint{ case "dev4":
allocateFunc: func(devs []string) (*pluginapi.AllocateResponse, error) { resp.Envs["key2"] = "val3"
resp := new(pluginapi.AllocateResponse) }
resp.Envs = make(map[string]string) }
for _, dev := range devs { return resp, nil
switch dev { },
case "dev3":
resp.Envs["key2"] = "val2"
case "dev4":
resp.Envs["key2"] = "val3"
}
} }
return resp, nil }
}
return testManager
}
func getTestNodeInfo(allocatable v1.ResourceList) *schedulercache.NodeInfo {
cachedNode := &v1.Node{
Status: v1.NodeStatus{
Allocatable: allocatable,
}, },
} }
nodeInfo := &schedulercache.NodeInfo{}
nodeInfo.SetNode(cachedNode)
return nodeInfo
}
pod := &v1.Pod{ type TestResource struct {
ObjectMeta: metav1.ObjectMeta{ resourceName string
UID: uuid.NewUUID(), resourceQuantity resource.Quantity
}, devs []string
Spec: v1.PodSpec{ }
Containers: []v1.Container{
{ func TestPodContainerDeviceAllocation(t *testing.T) {
Name: string(uuid.NewUUID()), flag.Set("alsologtostderr", fmt.Sprintf("%t", true))
Resources: v1.ResourceRequirements{ var logLevel string
Limits: v1.ResourceList{ flag.StringVar(&logLevel, "logLevel", "4", "test")
v1.ResourceName(resourceName1): resourceQuantity1, flag.Lookup("v").Value.Set(logLevel)
v1.ResourceName("cpu"): resourceQuantity1, res1 := TestResource{
v1.ResourceName(resourceName2): resourceQuantity2, resourceName: "domain1.com/resource1",
}, resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI),
}, devs: []string{"dev1", "dev2"},
},
},
},
} }
res2 := TestResource{
podsStub.updateActivePods([]*v1.Pod{pod}) resourceName: "domain2.com/resource2",
err = testManager.Allocate(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod}) resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI),
devs: []string{"dev3", "dev4"},
}
testResources := make([]TestResource, 2)
testResources = append(testResources, res1)
testResources = append(testResources, res2)
as := require.New(t)
podsStub := activePodsStub{
activePods: []*v1.Pod{},
}
tmpDir, err := ioutil.TempDir("", "checkpoint")
as.Nil(err) as.Nil(err)
runContainerOpts := testManager.GetDeviceRunContainerOptions(pod, &pod.Spec.Containers[0]) defer os.RemoveAll(tmpDir)
as.NotNil(runContainerOpts) nodeInfo := getTestNodeInfo(v1.ResourceList{})
as.Equal(len(runContainerOpts.Devices), 3) testManager := getTestManager(tmpDir, podsStub.getActivePods, testResources)
as.Equal(len(runContainerOpts.Mounts), 2)
as.Equal(len(runContainerOpts.Envs), 2)
// Requesting to create a pod without enough resources should fail. testPods := []*v1.Pod{
as.Equal(2, testManager.allocatedDevices[resourceName1].Len()) makePod(v1.ResourceList{
failPod := &v1.Pod{ v1.ResourceName(res1.resourceName): res1.resourceQuantity,
ObjectMeta: metav1.ObjectMeta{ v1.ResourceName("cpu"): res1.resourceQuantity,
UID: uuid.NewUUID(), v1.ResourceName(res2.resourceName): res2.resourceQuantity}),
makePod(v1.ResourceList{
v1.ResourceName(res1.resourceName): res2.resourceQuantity}),
makePod(v1.ResourceList{
v1.ResourceName(res2.resourceName): res2.resourceQuantity}),
}
testCases := []struct {
description string
testPod *v1.Pod
expectedContainerOptsLen []int
expectedAllocatedResName1 int
expectedAllocatedResName2 int
expErr error
}{
{
description: "Successfull allocation of two Res1 resources and one Res2 resource",
testPod: testPods[0],
expectedContainerOptsLen: []int{3, 2, 2},
expectedAllocatedResName1: 2,
expectedAllocatedResName2: 1,
expErr: nil,
}, },
Spec: v1.PodSpec{ {
Containers: []v1.Container{ description: "Requesting to create a pod without enough resources should fail",
{ testPod: testPods[1],
Name: string(uuid.NewUUID()), expectedContainerOptsLen: nil,
Resources: v1.ResourceRequirements{ expectedAllocatedResName1: 2,
Limits: v1.ResourceList{ expectedAllocatedResName2: 1,
v1.ResourceName(resourceName1): resourceQuantity2, expErr: fmt.Errorf("requested number of devices unavailable for domain1.com/resource1. Requested: 1, Available: 0"),
}, },
}, {
}, description: "Successfull allocation of all available Res1 resources and Res2 resources",
}, testPod: testPods[2],
expectedContainerOptsLen: []int{0, 0, 1},
expectedAllocatedResName1: 2,
expectedAllocatedResName2: 2,
expErr: nil,
}, },
} }
err = testManager.Allocate(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: failPod}) activePods := []*v1.Pod{}
as.NotNil(err) for _, testCase := range testCases {
runContainerOpts2 := testManager.GetDeviceRunContainerOptions(failPod, &failPod.Spec.Containers[0]) pod := testCase.testPod
as.Nil(runContainerOpts2) activePods = append(activePods, pod)
podsStub.updateActivePods(activePods)
// Requesting to create a new pod with a single resourceName2 should succeed. err := testManager.Allocate(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod})
newPod := &v1.Pod{ if !reflect.DeepEqual(err, testCase.expErr) {
ObjectMeta: metav1.ObjectMeta{ t.Errorf("DevicePluginManager error (%v). expected error: %v but got: %v",
UID: uuid.NewUUID(), testCase.description, testCase.expErr, err)
}, }
Spec: v1.PodSpec{ runContainerOpts := testManager.GetDeviceRunContainerOptions(pod, &pod.Spec.Containers[0])
Containers: []v1.Container{ if testCase.expectedContainerOptsLen == nil {
{ as.Nil(runContainerOpts)
Name: string(uuid.NewUUID()), } else {
Resources: v1.ResourceRequirements{ as.Equal(len(runContainerOpts.Devices), testCase.expectedContainerOptsLen[0])
Limits: v1.ResourceList{ as.Equal(len(runContainerOpts.Mounts), testCase.expectedContainerOptsLen[1])
v1.ResourceName(resourceName2): resourceQuantity2, as.Equal(len(runContainerOpts.Envs), testCase.expectedContainerOptsLen[2])
}, }
}, as.Equal(testCase.expectedAllocatedResName1, testManager.allocatedDevices[res1.resourceName].Len())
}, as.Equal(testCase.expectedAllocatedResName2, testManager.allocatedDevices[res2.resourceName].Len())
},
},
} }
err = testManager.Allocate(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: newPod})
as.Nil(err)
runContainerOpts3 := testManager.GetDeviceRunContainerOptions(newPod, &newPod.Spec.Containers[0])
as.Equal(1, len(runContainerOpts3.Envs))
}
func TestInitContainerDeviceAllocation(t *testing.T) {
// Requesting to create a pod that requests resourceName1 in init containers and normal containers // Requesting to create a pod that requests resourceName1 in init containers and normal containers
// should succeed with devices allocated to init containers reallocated to normal containers. // should succeed with devices allocated to init containers reallocated to normal containers.
res1 := TestResource{
resourceName: "domain1.com/resource1",
resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI),
devs: []string{"dev1", "dev2"},
}
res2 := TestResource{
resourceName: "domain2.com/resource2",
resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI),
devs: []string{"dev3", "dev4"},
}
testResources := make([]TestResource, 2)
testResources = append(testResources, res1)
testResources = append(testResources, res2)
as := require.New(t)
podsStub := activePodsStub{
activePods: []*v1.Pod{},
}
nodeInfo := getTestNodeInfo(v1.ResourceList{})
tmpDir, err := ioutil.TempDir("", "checkpoint")
as.Nil(err)
defer os.RemoveAll(tmpDir)
testManager := getTestManager(tmpDir, podsStub.getActivePods, testResources)
podWithPluginResourcesInInitContainers := &v1.Pod{ podWithPluginResourcesInInitContainers := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
@ -566,7 +617,7 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
Name: string(uuid.NewUUID()), Name: string(uuid.NewUUID()),
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{ Limits: v1.ResourceList{
v1.ResourceName(resourceName1): resourceQuantity2, v1.ResourceName(res1.resourceName): res2.resourceQuantity,
}, },
}, },
}, },
@ -574,7 +625,7 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
Name: string(uuid.NewUUID()), Name: string(uuid.NewUUID()),
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{ Limits: v1.ResourceList{
v1.ResourceName(resourceName1): resourceQuantity1, v1.ResourceName(res1.resourceName): res1.resourceQuantity,
}, },
}, },
}, },
@ -584,8 +635,8 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
Name: string(uuid.NewUUID()), Name: string(uuid.NewUUID()),
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{ Limits: v1.ResourceList{
v1.ResourceName(resourceName1): resourceQuantity2, v1.ResourceName(res1.resourceName): res2.resourceQuantity,
v1.ResourceName(resourceName2): resourceQuantity2, v1.ResourceName(res2.resourceName): res2.resourceQuantity,
}, },
}, },
}, },
@ -593,8 +644,8 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
Name: string(uuid.NewUUID()), Name: string(uuid.NewUUID()),
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{ Limits: v1.ResourceList{
v1.ResourceName(resourceName1): resourceQuantity2, v1.ResourceName(res1.resourceName): res2.resourceQuantity,
v1.ResourceName(resourceName2): resourceQuantity2, v1.ResourceName(res2.resourceName): res2.resourceQuantity,
}, },
}, },
}, },
@ -609,10 +660,10 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
initCont2 := podWithPluginResourcesInInitContainers.Spec.InitContainers[1].Name initCont2 := podWithPluginResourcesInInitContainers.Spec.InitContainers[1].Name
normalCont1 := podWithPluginResourcesInInitContainers.Spec.Containers[0].Name normalCont1 := podWithPluginResourcesInInitContainers.Spec.Containers[0].Name
normalCont2 := podWithPluginResourcesInInitContainers.Spec.Containers[1].Name normalCont2 := podWithPluginResourcesInInitContainers.Spec.Containers[1].Name
initCont1Devices := testManager.podDevices.containerDevices(podUID, initCont1, resourceName1) initCont1Devices := testManager.podDevices.containerDevices(podUID, initCont1, res1.resourceName)
initCont2Devices := testManager.podDevices.containerDevices(podUID, initCont2, resourceName1) initCont2Devices := testManager.podDevices.containerDevices(podUID, initCont2, res1.resourceName)
normalCont1Devices := testManager.podDevices.containerDevices(podUID, normalCont1, resourceName1) normalCont1Devices := testManager.podDevices.containerDevices(podUID, normalCont1, res1.resourceName)
normalCont2Devices := testManager.podDevices.containerDevices(podUID, normalCont2, resourceName1) normalCont2Devices := testManager.podDevices.containerDevices(podUID, normalCont2, res1.resourceName)
as.True(initCont2Devices.IsSuperset(initCont1Devices)) as.True(initCont2Devices.IsSuperset(initCont1Devices))
as.True(initCont2Devices.IsSuperset(normalCont1Devices)) as.True(initCont2Devices.IsSuperset(normalCont1Devices))
as.True(initCont2Devices.IsSuperset(normalCont2Devices)) as.True(initCont2Devices.IsSuperset(normalCont2Devices))