mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 08:17:26 +00:00
remove dead code in gpu manager
Signed-off-by: Vishnu kannan <vishnuk@google.com>
This commit is contained in:
parent
46708be3e8
commit
ad743a922a
@ -102,10 +102,7 @@ func (ngm *nvidiaGPUManager) Start() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// It's possible that the runtime isn't available now.
|
// It's possible that the runtime isn't available now.
|
||||||
allocatedGPUs, err := ngm.gpusInUse()
|
ngm.allocated = ngm.gpusInUse()
|
||||||
if err == nil {
|
|
||||||
ngm.allocated = allocatedGPUs
|
|
||||||
}
|
|
||||||
// We ignore errors when identifying allocated GPUs because it is possible that the runtime interfaces may be not be logically up.
|
// We ignore errors when identifying allocated GPUs because it is possible that the runtime interfaces may be not be logically up.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -141,11 +138,7 @@ func (ngm *nvidiaGPUManager) AllocateGPU(pod *v1.Pod, container *v1.Container) (
|
|||||||
defer ngm.Unlock()
|
defer ngm.Unlock()
|
||||||
if ngm.allocated == nil {
|
if ngm.allocated == nil {
|
||||||
// Initialization is not complete. Try now. Failures can no longer be tolerated.
|
// Initialization is not complete. Try now. Failures can no longer be tolerated.
|
||||||
allocated, err := ngm.gpusInUse()
|
ngm.allocated = ngm.gpusInUse()
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to allocate GPUs because of issues identifying GPUs in use: %v", err)
|
|
||||||
}
|
|
||||||
ngm.allocated = allocated
|
|
||||||
} else {
|
} else {
|
||||||
// update internal list of GPUs in use prior to allocating new GPUs.
|
// update internal list of GPUs in use prior to allocating new GPUs.
|
||||||
ngm.updateAllocatedGPUs()
|
ngm.updateAllocatedGPUs()
|
||||||
@ -217,7 +210,7 @@ func (ngm *nvidiaGPUManager) discoverGPUs() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// gpusInUse returns a list of GPUs in use along with the respective pods that are using it.
|
// gpusInUse returns a list of GPUs in use along with the respective pods that are using it.
|
||||||
func (ngm *nvidiaGPUManager) gpusInUse() (*podGPUs, error) {
|
func (ngm *nvidiaGPUManager) gpusInUse() *podGPUs {
|
||||||
pods := ngm.activePodsLister.GetActivePods()
|
pods := ngm.activePodsLister.GetActivePods()
|
||||||
type containerIdentifier struct {
|
type containerIdentifier struct {
|
||||||
id string
|
id string
|
||||||
@ -274,7 +267,7 @@ func (ngm *nvidiaGPUManager) gpusInUse() (*podGPUs, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ret, nil
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func isValidPath(path string) bool {
|
func isValidPath(path string) bool {
|
||||||
|
@ -70,9 +70,8 @@ func TestMultiContainerPodGPUAllocation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Expect that no devices are in use.
|
// Expect that no devices are in use.
|
||||||
gpusInUse, err := testGpuManager.gpusInUse()
|
gpusInUse := testGpuManager.gpusInUse()
|
||||||
as := assert.New(t)
|
as := assert.New(t)
|
||||||
as.Nil(err)
|
|
||||||
as.Equal(len(gpusInUse.devices()), 0)
|
as.Equal(len(gpusInUse.devices()), 0)
|
||||||
|
|
||||||
// Allocated GPUs for a pod with two containers.
|
// Allocated GPUs for a pod with two containers.
|
||||||
@ -121,9 +120,8 @@ func TestMultiPodGPUAllocation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Expect that no devices are in use.
|
// Expect that no devices are in use.
|
||||||
gpusInUse, err := testGpuManager.gpusInUse()
|
gpusInUse := testGpuManager.gpusInUse()
|
||||||
as := assert.New(t)
|
as := assert.New(t)
|
||||||
as.Nil(err)
|
|
||||||
as.Equal(len(gpusInUse.devices()), 0)
|
as.Equal(len(gpusInUse.devices()), 0)
|
||||||
|
|
||||||
// Allocated GPUs for a pod with two containers.
|
// Allocated GPUs for a pod with two containers.
|
||||||
@ -155,9 +153,8 @@ func TestPodContainerRestart(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Expect that no devices are in use.
|
// Expect that no devices are in use.
|
||||||
gpusInUse, err := testGpuManager.gpusInUse()
|
gpusInUse := testGpuManager.gpusInUse()
|
||||||
as := assert.New(t)
|
as := assert.New(t)
|
||||||
as.Nil(err)
|
|
||||||
as.Equal(len(gpusInUse.devices()), 0)
|
as.Equal(len(gpusInUse.devices()), 0)
|
||||||
|
|
||||||
// Make a pod with one containers that requests two GPUs.
|
// Make a pod with one containers that requests two GPUs.
|
||||||
|
Loading…
Reference in New Issue
Block a user