mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
Update existing tests in support of GetPreferredallocation()
This commit is contained in:
parent
a780ccff5b
commit
cbd405d85c
@ -160,7 +160,7 @@ func TestAllocate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func esetup(t *testing.T, devs []*pluginapi.Device, socket, resourceName string, callback monitorCallback) (*Stub, *endpointImpl) {
|
func esetup(t *testing.T, devs []*pluginapi.Device, socket, resourceName string, callback monitorCallback) (*Stub, *endpointImpl) {
|
||||||
p := NewDevicePluginStub(devs, socket, resourceName, false)
|
p := NewDevicePluginStub(devs, socket, resourceName, false, false)
|
||||||
|
|
||||||
err := p.Start()
|
err := p.Start()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -103,55 +103,57 @@ func TestDevicePluginReRegistration(t *testing.T) {
|
|||||||
{ID: "Dev3", Health: pluginapi.Healthy},
|
{ID: "Dev3", Health: pluginapi.Healthy},
|
||||||
}
|
}
|
||||||
for _, preStartContainerFlag := range []bool{false, true} {
|
for _, preStartContainerFlag := range []bool{false, true} {
|
||||||
m, ch, p1 := setup(t, devs, nil, socketName, pluginSocketName)
|
for _, getPreferredAllocationFlag := range []bool{false, true} {
|
||||||
p1.Register(socketName, testResourceName, "")
|
m, ch, p1 := setup(t, devs, nil, socketName, pluginSocketName)
|
||||||
|
p1.Register(socketName, testResourceName, "")
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ch:
|
case <-ch:
|
||||||
case <-time.After(5 * time.Second):
|
case <-time.After(5 * time.Second):
|
||||||
t.Fatalf("timeout while waiting for manager update")
|
t.Fatalf("timeout while waiting for manager update")
|
||||||
|
}
|
||||||
|
capacity, allocatable, _ := m.GetCapacity()
|
||||||
|
resourceCapacity := capacity[v1.ResourceName(testResourceName)]
|
||||||
|
resourceAllocatable := allocatable[v1.ResourceName(testResourceName)]
|
||||||
|
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||||
|
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
||||||
|
|
||||||
|
p2 := NewDevicePluginStub(devs, pluginSocketName+".new", testResourceName, preStartContainerFlag, getPreferredAllocationFlag)
|
||||||
|
err = p2.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
p2.Register(socketName, testResourceName, "")
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ch:
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
t.Fatalf("timeout while waiting for manager update")
|
||||||
|
}
|
||||||
|
capacity, allocatable, _ = m.GetCapacity()
|
||||||
|
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
|
||||||
|
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
|
||||||
|
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||||
|
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices shouldn't change.")
|
||||||
|
|
||||||
|
// Test the scenario that a plugin re-registers with different devices.
|
||||||
|
p3 := NewDevicePluginStub(devsForRegistration, pluginSocketName+".third", testResourceName, preStartContainerFlag, getPreferredAllocationFlag)
|
||||||
|
err = p3.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
p3.Register(socketName, testResourceName, "")
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ch:
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
t.Fatalf("timeout while waiting for manager update")
|
||||||
|
}
|
||||||
|
capacity, allocatable, _ = m.GetCapacity()
|
||||||
|
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
|
||||||
|
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
|
||||||
|
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||||
|
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of plugin previously registered should be removed.")
|
||||||
|
p2.Stop()
|
||||||
|
p3.Stop()
|
||||||
|
cleanup(t, m, p1)
|
||||||
}
|
}
|
||||||
capacity, allocatable, _ := m.GetCapacity()
|
|
||||||
resourceCapacity := capacity[v1.ResourceName(testResourceName)]
|
|
||||||
resourceAllocatable := allocatable[v1.ResourceName(testResourceName)]
|
|
||||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
|
||||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
|
||||||
|
|
||||||
p2 := NewDevicePluginStub(devs, pluginSocketName+".new", testResourceName, preStartContainerFlag)
|
|
||||||
err = p2.Start()
|
|
||||||
require.NoError(t, err)
|
|
||||||
p2.Register(socketName, testResourceName, "")
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatalf("timeout while waiting for manager update")
|
|
||||||
}
|
|
||||||
capacity, allocatable, _ = m.GetCapacity()
|
|
||||||
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
|
|
||||||
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
|
|
||||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
|
||||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices shouldn't change.")
|
|
||||||
|
|
||||||
// Test the scenario that a plugin re-registers with different devices.
|
|
||||||
p3 := NewDevicePluginStub(devsForRegistration, pluginSocketName+".third", testResourceName, preStartContainerFlag)
|
|
||||||
err = p3.Start()
|
|
||||||
require.NoError(t, err)
|
|
||||||
p3.Register(socketName, testResourceName, "")
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatalf("timeout while waiting for manager update")
|
|
||||||
}
|
|
||||||
capacity, allocatable, _ = m.GetCapacity()
|
|
||||||
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
|
|
||||||
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
|
|
||||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
|
||||||
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of plugin previously registered should be removed.")
|
|
||||||
p2.Stop()
|
|
||||||
p3.Stop()
|
|
||||||
cleanup(t, m, p1)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -186,7 +188,7 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) {
|
|||||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
||||||
|
|
||||||
p2 := NewDevicePluginStub(devs, pluginSocketName+".new", testResourceName, false)
|
p2 := NewDevicePluginStub(devs, pluginSocketName+".new", testResourceName, false, false)
|
||||||
err = p2.Start()
|
err = p2.Start()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Wait for the second callback to be issued.
|
// Wait for the second callback to be issued.
|
||||||
@ -203,7 +205,7 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) {
|
|||||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
||||||
|
|
||||||
// Test the scenario that a plugin re-registers with different devices.
|
// Test the scenario that a plugin re-registers with different devices.
|
||||||
p3 := NewDevicePluginStub(devsForRegistration, pluginSocketName+".third", testResourceName, false)
|
p3 := NewDevicePluginStub(devsForRegistration, pluginSocketName+".third", testResourceName, false, false)
|
||||||
err = p3.Start()
|
err = p3.Start()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Wait for the third callback to be issued.
|
// Wait for the third callback to be issued.
|
||||||
@ -249,7 +251,7 @@ func setupDeviceManager(t *testing.T, devs []*pluginapi.Device, callback monitor
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setupDevicePlugin(t *testing.T, devs []*pluginapi.Device, pluginSocketName string) *Stub {
|
func setupDevicePlugin(t *testing.T, devs []*pluginapi.Device, pluginSocketName string) *Stub {
|
||||||
p := NewDevicePluginStub(devs, pluginSocketName, testResourceName, false)
|
p := NewDevicePluginStub(devs, pluginSocketName, testResourceName, false, false)
|
||||||
err := p.Start()
|
err := p.Start()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return p
|
return p
|
||||||
@ -549,8 +551,9 @@ func (a *activePodsStub) updateActivePods(newPods []*v1.Pod) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type MockEndpoint struct {
|
type MockEndpoint struct {
|
||||||
allocateFunc func(devs []string) (*pluginapi.AllocateResponse, error)
|
getPreferredAllocationFunc func(available, mustInclude []string, size int) (*pluginapi.PreferredAllocationResponse, error)
|
||||||
initChan chan []string
|
allocateFunc func(devs []string) (*pluginapi.AllocateResponse, error)
|
||||||
|
initChan chan []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockEndpoint) stop() {}
|
func (m *MockEndpoint) stop() {}
|
||||||
@ -563,6 +566,13 @@ func (m *MockEndpoint) preStartContainer(devs []string) (*pluginapi.PreStartCont
|
|||||||
return &pluginapi.PreStartContainerResponse{}, nil
|
return &pluginapi.PreStartContainerResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *MockEndpoint) getPreferredAllocation(available, mustInclude []string, size int) (*pluginapi.PreferredAllocationResponse, error) {
|
||||||
|
if m.getPreferredAllocationFunc != nil {
|
||||||
|
return m.getPreferredAllocationFunc(available, mustInclude, size)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *MockEndpoint) allocate(devs []string) (*pluginapi.AllocateResponse, error) {
|
func (m *MockEndpoint) allocate(devs []string) (*pluginapi.AllocateResponse, error) {
|
||||||
if m.allocateFunc != nil {
|
if m.allocateFunc != nil {
|
||||||
return m.allocateFunc(devs)
|
return m.allocateFunc(devs)
|
||||||
|
@ -599,6 +599,7 @@ func TestTopologyAlignedAllocation(t *testing.T) {
|
|||||||
allDevices: make(map[string]map[string]pluginapi.Device),
|
allDevices: make(map[string]map[string]pluginapi.Device),
|
||||||
healthyDevices: make(map[string]sets.String),
|
healthyDevices: make(map[string]sets.String),
|
||||||
allocatedDevices: make(map[string]sets.String),
|
allocatedDevices: make(map[string]sets.String),
|
||||||
|
endpoints: make(map[string]endpointInfo),
|
||||||
podDevices: make(podDevices),
|
podDevices: make(podDevices),
|
||||||
sourcesReady: &sourcesReadyStub{},
|
sourcesReady: &sourcesReadyStub{},
|
||||||
activePods: func() []*v1.Pod { return []*v1.Pod{} },
|
activePods: func() []*v1.Pod { return []*v1.Pod{} },
|
||||||
@ -607,6 +608,7 @@ func TestTopologyAlignedAllocation(t *testing.T) {
|
|||||||
|
|
||||||
m.allDevices[tc.resource] = make(map[string]pluginapi.Device)
|
m.allDevices[tc.resource] = make(map[string]pluginapi.Device)
|
||||||
m.healthyDevices[tc.resource] = sets.NewString()
|
m.healthyDevices[tc.resource] = sets.NewString()
|
||||||
|
m.endpoints[tc.resource] = endpointInfo{}
|
||||||
|
|
||||||
for _, d := range tc.devices {
|
for _, d := range tc.devices {
|
||||||
m.allDevices[tc.resource][d.ID] = d
|
m.allDevices[tc.resource][d.ID] = d
|
||||||
|
@ -1 +1 @@
|
|||||||
1.1
|
1.2
|
||||||
|
@ -86,7 +86,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
socketPath := pluginSocksDir + "/dp." + fmt.Sprintf("%d", time.Now().Unix())
|
socketPath := pluginSocksDir + "/dp." + fmt.Sprintf("%d", time.Now().Unix())
|
||||||
|
|
||||||
dp1 := dm.NewDevicePluginStub(devs, socketPath, resourceName, false)
|
dp1 := dm.NewDevicePluginStub(devs, socketPath, resourceName, false, false)
|
||||||
if err := dp1.Start(); err != nil {
|
if err := dp1.Start(); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user